blob: 74f78fb575079cb5959635e44891809aa646e2cb [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
105#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
123
sewardj1d7c3322011-02-28 09:22:51 +0000124/* Admin double linked list of Locks */
125/* We need a double linked list to properly and efficiently
126 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000127static Lock* admin_locks = NULL;
128
sewardjb4112022007-11-09 22:49:28 +0000129/* Mapping table for core ThreadIds to Thread* */
130static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for lock guest addresses to Lock* */
133static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
134
sewardj0f64c9e2011-03-10 17:40:22 +0000135/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000136static WordSetU* univ_lsets = NULL; /* sets of Lock* */
137static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
138
sewardjb4112022007-11-09 22:49:28 +0000139
140/*----------------------------------------------------------------*/
141/*--- Simple helpers for the data structures ---*/
142/*----------------------------------------------------------------*/
143
144static UWord stats__lockN_acquires = 0;
145static UWord stats__lockN_releases = 0;
146
sewardjf98e1c02008-10-25 16:22:41 +0000147static
148ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000149
150/* --------- Constructors --------- */
151
sewardjf98e1c02008-10-25 16:22:41 +0000152static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000153 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000154 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000155 thread->locksetA = HG_(emptyWS)( univ_lsets );
156 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000157 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000158 thread->hbthr = hbthr;
159 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000160 thread->created_at = NULL;
161 thread->announced = False;
162 thread->errmsg_index = indx++;
163 thread->admin = admin_threads;
164 admin_threads = thread;
165 return thread;
166}
sewardjf98e1c02008-10-25 16:22:41 +0000167
sewardjb4112022007-11-09 22:49:28 +0000168// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000169// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000170static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
171 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000172 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000173 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000174 if (admin_locks)
175 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000176 lock->admin_next = admin_locks;
177 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000178 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000179 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000180 lock->unique = unique++;
181 lock->magic = LockN_MAGIC;
182 lock->appeared_at = NULL;
183 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000184 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000185 lock->guestaddr = guestaddr;
186 lock->kind = kind;
187 lock->heldW = False;
188 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000189 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000190 return lock;
191}
sewardjb4112022007-11-09 22:49:28 +0000192
193/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000194 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000195static void del_LockN ( Lock* lk )
196{
sewardjf98e1c02008-10-25 16:22:41 +0000197 tl_assert(HG_(is_sane_LockN)(lk));
198 tl_assert(lk->hbso);
199 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000200 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000201 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000202 /* begin: del lock from double linked list */
203 if (lk == admin_locks) {
204 tl_assert(lk->admin_prev == NULL);
205 if (lk->admin_next)
206 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000207 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000208 }
209 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000210 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000211 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000212 if (lk->admin_next)
213 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000214 }
sewardj0f64c9e2011-03-10 17:40:22 +0000215 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000216 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000217 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000218}
219
220/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
221 it. This is done strictly: only combinations resulting from
222 correct program and libpthread behaviour are allowed. */
223static void lockN_acquire_writer ( Lock* lk, Thread* thr )
224{
sewardjf98e1c02008-10-25 16:22:41 +0000225 tl_assert(HG_(is_sane_LockN)(lk));
226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000227
228 stats__lockN_acquires++;
229
230 /* EXPOSITION only */
231 /* We need to keep recording snapshots of where the lock was
232 acquired, so as to produce better lock-order error messages. */
233 if (lk->acquired_at == NULL) {
234 ThreadId tid;
235 tl_assert(lk->heldBy == NULL);
236 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
237 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000238 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000239 } else {
240 tl_assert(lk->heldBy != NULL);
241 }
242 /* end EXPOSITION only */
243
244 switch (lk->kind) {
245 case LK_nonRec:
246 case_LK_nonRec:
247 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
248 tl_assert(!lk->heldW);
249 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000250 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000251 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000252 break;
253 case LK_mbRec:
254 if (lk->heldBy == NULL)
255 goto case_LK_nonRec;
256 /* 2nd and subsequent locking of a lock by its owner */
257 tl_assert(lk->heldW);
258 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000259 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000260 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000261 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
262 == VG_(sizeTotalBag)(lk->heldBy));
263 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_rdwr:
266 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
267 goto case_LK_nonRec;
268 default:
269 tl_assert(0);
270 }
sewardjf98e1c02008-10-25 16:22:41 +0000271 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000272}
273
274static void lockN_acquire_reader ( Lock* lk, Thread* thr )
275{
sewardjf98e1c02008-10-25 16:22:41 +0000276 tl_assert(HG_(is_sane_LockN)(lk));
277 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000278 /* can only add reader to a reader-writer lock. */
279 tl_assert(lk->kind == LK_rdwr);
280 /* lk must be free or already r-held. */
281 tl_assert(lk->heldBy == NULL
282 || (lk->heldBy != NULL && !lk->heldW));
283
284 stats__lockN_acquires++;
285
286 /* EXPOSITION only */
287 /* We need to keep recording snapshots of where the lock was
288 acquired, so as to produce better lock-order error messages. */
289 if (lk->acquired_at == NULL) {
290 ThreadId tid;
291 tl_assert(lk->heldBy == NULL);
292 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
293 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000294 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000295 } else {
296 tl_assert(lk->heldBy != NULL);
297 }
298 /* end EXPOSITION only */
299
300 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000301 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000302 } else {
303 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000304 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000305 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000306 }
307 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000309}
310
311/* Update 'lk' to reflect a release of it by 'thr'. This is done
312 strictly: only combinations resulting from correct program and
313 libpthread behaviour are allowed. */
314
315static void lockN_release ( Lock* lk, Thread* thr )
316{
317 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000318 tl_assert(HG_(is_sane_LockN)(lk));
319 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000320 /* lock must be held by someone */
321 tl_assert(lk->heldBy);
322 stats__lockN_releases++;
323 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000324 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000325 /* thr must actually have been a holder of lk */
326 tl_assert(b);
327 /* normalise */
328 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000329 if (VG_(isEmptyBag)(lk->heldBy)) {
330 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000331 lk->heldBy = NULL;
332 lk->heldW = False;
333 lk->acquired_at = NULL;
334 }
sewardjf98e1c02008-10-25 16:22:41 +0000335 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000336}
337
338static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
339{
340 Thread* thr;
341 if (!lk->heldBy) {
342 tl_assert(!lk->heldW);
343 return;
344 }
345 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000346 VG_(initIterBag)( lk->heldBy );
347 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000348 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetA, (Word)lk ));
351 thr->locksetA
352 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
353
354 if (lk->heldW) {
355 tl_assert(HG_(elemWS)( univ_lsets,
356 thr->locksetW, (Word)lk ));
357 thr->locksetW
358 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
359 }
360 }
sewardj896f6f92008-08-19 08:38:52 +0000361 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000362}
363
sewardjb4112022007-11-09 22:49:28 +0000364
365/*----------------------------------------------------------------*/
366/*--- Print out the primary data structures ---*/
367/*----------------------------------------------------------------*/
368
sewardjb4112022007-11-09 22:49:28 +0000369#define PP_THREADS (1<<1)
370#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000371#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000372
373
374static const Int sHOW_ADMIN = 0;
375
376static void space ( Int n )
377{
378 Int i;
379 Char spaces[128+1];
380 tl_assert(n >= 0 && n < 128);
381 if (n == 0)
382 return;
383 for (i = 0; i < n; i++)
384 spaces[i] = ' ';
385 spaces[i] = 0;
386 tl_assert(i < 128+1);
387 VG_(printf)("%s", spaces);
388}
389
390static void pp_Thread ( Int d, Thread* t )
391{
392 space(d+0); VG_(printf)("Thread %p {\n", t);
393 if (sHOW_ADMIN) {
394 space(d+3); VG_(printf)("admin %p\n", t->admin);
395 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
396 }
397 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
398 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000399 space(d+0); VG_(printf)("}\n");
400}
401
402static void pp_admin_threads ( Int d )
403{
404 Int i, n;
405 Thread* t;
406 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
407 /* nothing */
408 }
409 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
410 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
411 if (0) {
412 space(n);
413 VG_(printf)("admin_threads record %d of %d:\n", i, n);
414 }
415 pp_Thread(d+3, t);
416 }
barta0b6b2c2008-07-07 06:49:24 +0000417 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000418}
419
420static void pp_map_threads ( Int d )
421{
njn4c245e52009-03-15 23:25:38 +0000422 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000423 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000424 for (i = 0; i < VG_N_THREADS; i++) {
425 if (map_threads[i] != NULL)
426 n++;
427 }
428 VG_(printf)("(%d entries) {\n", n);
429 for (i = 0; i < VG_N_THREADS; i++) {
430 if (map_threads[i] == NULL)
431 continue;
432 space(d+3);
433 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
434 }
435 space(d); VG_(printf)("}\n");
436}
437
438static const HChar* show_LockKind ( LockKind lkk ) {
439 switch (lkk) {
440 case LK_mbRec: return "mbRec";
441 case LK_nonRec: return "nonRec";
442 case LK_rdwr: return "rdwr";
443 default: tl_assert(0);
444 }
445}
446
447static void pp_Lock ( Int d, Lock* lk )
448{
barta0b6b2c2008-07-07 06:49:24 +0000449 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000450 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000451 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
452 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
453 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000454 }
455 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
456 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
457 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
458 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
459 if (lk->heldBy) {
460 Thread* thr;
461 Word count;
462 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000463 VG_(initIterBag)( lk->heldBy );
464 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000465 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000466 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000467 VG_(printf)("}");
468 }
469 VG_(printf)("\n");
470 space(d+0); VG_(printf)("}\n");
471}
472
473static void pp_admin_locks ( Int d )
474{
475 Int i, n;
476 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000477 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000478 /* nothing */
479 }
480 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000481 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000482 if (0) {
483 space(n);
484 VG_(printf)("admin_locks record %d of %d:\n", i, n);
485 }
486 pp_Lock(d+3, lk);
487 }
barta0b6b2c2008-07-07 06:49:24 +0000488 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000489}
490
491static void pp_map_locks ( Int d )
492{
493 void* gla;
494 Lock* lk;
495 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000496 (Int)VG_(sizeFM)( map_locks ));
497 VG_(initIterFM)( map_locks );
498 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000499 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000500 space(d+3);
501 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
502 }
sewardj896f6f92008-08-19 08:38:52 +0000503 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000504 space(d); VG_(printf)("}\n");
505}
506
sewardjb4112022007-11-09 22:49:28 +0000507static void pp_everything ( Int flags, Char* caller )
508{
509 Int d = 0;
510 VG_(printf)("\n");
511 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
512 if (flags & PP_THREADS) {
513 VG_(printf)("\n");
514 pp_admin_threads(d+3);
515 VG_(printf)("\n");
516 pp_map_threads(d+3);
517 }
518 if (flags & PP_LOCKS) {
519 VG_(printf)("\n");
520 pp_admin_locks(d+3);
521 VG_(printf)("\n");
522 pp_map_locks(d+3);
523 }
sewardjb4112022007-11-09 22:49:28 +0000524
525 VG_(printf)("\n");
526 VG_(printf)("}\n");
527 VG_(printf)("\n");
528}
529
530#undef SHOW_ADMIN
531
532
533/*----------------------------------------------------------------*/
534/*--- Initialise the primary data structures ---*/
535/*----------------------------------------------------------------*/
536
sewardjf98e1c02008-10-25 16:22:41 +0000537static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000538{
sewardjb4112022007-11-09 22:49:28 +0000539 Thread* thr;
540
541 /* Get everything initialised and zeroed. */
542 tl_assert(admin_threads == NULL);
543 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000544
545 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000546
547 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000549 tl_assert(map_threads != NULL);
550
sewardjb4112022007-11-09 22:49:28 +0000551 tl_assert(sizeof(Addr) == sizeof(Word));
552 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000553 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
554 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000555 tl_assert(map_locks != NULL);
556
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
559 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(univ_lsets != NULL);
561
562 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000563 if (HG_(clo_track_lockorders)) {
564 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
565 HG_(free), 24/*cacheSize*/ );
566 tl_assert(univ_laog != NULL);
567 }
sewardjb4112022007-11-09 22:49:28 +0000568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000576 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
577 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
sewardjb4112022007-11-09 22:49:28 +0000587 all__sanity_check("initialise_data_structures");
588}
589
590
591/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000592/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000593/*----------------------------------------------------------------*/
594
595/* Doesn't assert if the relevant map_threads entry is NULL. */
596static Thread* map_threads_maybe_lookup ( ThreadId coretid )
597{
598 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000599 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000600 thr = map_threads[coretid];
601 return thr;
602}
603
604/* Asserts if the relevant map_threads entry is NULL. */
605static inline Thread* map_threads_lookup ( ThreadId coretid )
606{
607 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000608 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000609 thr = map_threads[coretid];
610 tl_assert(thr);
611 return thr;
612}
613
sewardjf98e1c02008-10-25 16:22:41 +0000614/* Do a reverse lookup. Does not assert if 'thr' is not found in
615 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000616static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
617{
sewardjf98e1c02008-10-25 16:22:41 +0000618 ThreadId tid;
619 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000620 /* Check nobody used the invalid-threadid slot */
621 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
622 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000623 tid = thr->coretid;
624 tl_assert(HG_(is_sane_ThreadId)(tid));
625 return tid;
sewardjb4112022007-11-09 22:49:28 +0000626}
627
628/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
629 is not found in map_threads. */
630static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
631{
632 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
633 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000634 tl_assert(map_threads[tid]);
635 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000636 return tid;
637}
638
639static void map_threads_delete ( ThreadId coretid )
640{
641 Thread* thr;
642 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000643 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000644 thr = map_threads[coretid];
645 tl_assert(thr);
646 map_threads[coretid] = NULL;
647}
648
649
650/*----------------------------------------------------------------*/
651/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
652/*----------------------------------------------------------------*/
653
654/* Make sure there is a lock table entry for the given (lock) guest
655 address. If not, create one of the stated 'kind' in unheld state.
656 In any case, return the address of the existing or new Lock. */
657static
658Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
659{
660 Bool found;
661 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000663 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000664 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000665 if (!found) {
666 Lock* lock = mk_LockN(lkk, ga);
667 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000669 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000670 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000671 return lock;
672 } else {
673 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000675 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000676 return oldlock;
677 }
678}
679
680static Lock* map_locks_maybe_lookup ( Addr ga )
681{
682 Bool found;
683 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000684 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000685 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000686 return lk;
687}
688
689static void map_locks_delete ( Addr ga )
690{
691 Addr ga2 = 0;
692 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000693 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000694 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000695 /* delFromFM produces the val which is being deleted, if it is
696 found. So assert it is non-null; that in effect asserts that we
697 are deleting a (ga, Lock) pair which actually exists. */
698 tl_assert(lk != NULL);
699 tl_assert(ga2 == ga);
700}
701
702
sewardjb4112022007-11-09 22:49:28 +0000703
704/*----------------------------------------------------------------*/
705/*--- Sanity checking the data structures ---*/
706/*----------------------------------------------------------------*/
707
708static UWord stats__sanity_checks = 0;
709
sewardjb4112022007-11-09 22:49:28 +0000710static void laog__sanity_check ( Char* who ); /* fwds */
711
712/* REQUIRED INVARIANTS:
713
714 Thread vs Segment/Lock/SecMaps
715
716 for each t in Threads {
717
718 // Thread.lockset: each element is really a valid Lock
719
720 // Thread.lockset: each Lock in set is actually held by that thread
721 for lk in Thread.lockset
722 lk == LockedBy(t)
723
724 // Thread.csegid is a valid SegmentID
725 // and the associated Segment has .thr == t
726
727 }
728
729 all thread Locksets are pairwise empty under intersection
730 (that is, no lock is claimed to be held by more than one thread)
731 -- this is guaranteed if all locks in locksets point back to their
732 owner threads
733
734 Lock vs Thread/Segment/SecMaps
735
736 for each entry (gla, la) in map_locks
737 gla == la->guest_addr
738
739 for each lk in Locks {
740
741 lk->tag is valid
742 lk->guest_addr does not have shadow state NoAccess
743 if lk == LockedBy(t), then t->lockset contains lk
744 if lk == UnlockedBy(segid) then segid is valid SegmentID
745 and can be mapped to a valid Segment(seg)
746 and seg->thr->lockset does not contain lk
747 if lk == UnlockedNew then (no lockset contains lk)
748
749 secmaps for lk has .mbHasLocks == True
750
751 }
752
753 Segment vs Thread/Lock/SecMaps
754
755 the Segment graph is a dag (no cycles)
756 all of the Segment graph must be reachable from the segids
757 mentioned in the Threads
758
759 for seg in Segments {
760
761 seg->thr is a sane Thread
762
763 }
764
765 SecMaps vs Segment/Thread/Lock
766
767 for sm in SecMaps {
768
769 sm properly aligned
770 if any shadow word is ShR or ShM then .mbHasShared == True
771
772 for each Excl(segid) state
773 map_segments_lookup maps to a sane Segment(seg)
774 for each ShM/ShR(tsetid,lsetid) state
775 each lk in lset is a valid Lock
776 each thr in tset is a valid thread, which is non-dead
777
778 }
779*/
780
781
782/* Return True iff 'thr' holds 'lk' in some mode. */
783static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
784{
785 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000786 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000787 else
788 return False;
789}
790
791/* Sanity check Threads, as far as possible */
792__attribute__((noinline))
793static void threads__sanity_check ( Char* who )
794{
795#define BAD(_str) do { how = (_str); goto bad; } while (0)
796 Char* how = "no error";
797 Thread* thr;
798 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000799 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000800 Word ls_size, i;
801 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000802 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000803 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000804 wsA = thr->locksetA;
805 wsW = thr->locksetW;
806 // locks held in W mode are a subset of all locks held
807 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
808 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
809 for (i = 0; i < ls_size; i++) {
810 lk = (Lock*)ls_words[i];
811 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000812 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000813 // Thread.lockset: each Lock in set is actually held by that
814 // thread
815 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000816 }
817 }
818 return;
819 bad:
820 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
821 tl_assert(0);
822#undef BAD
823}
824
825
826/* Sanity check Locks, as far as possible */
827__attribute__((noinline))
828static void locks__sanity_check ( Char* who )
829{
830#define BAD(_str) do { how = (_str); goto bad; } while (0)
831 Char* how = "no error";
832 Addr gla;
833 Lock* lk;
834 Int i;
835 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000836 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000837 ;
sewardj896f6f92008-08-19 08:38:52 +0000838 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000839 // for each entry (gla, lk) in map_locks
840 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000841 VG_(initIterFM)( map_locks );
842 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000843 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000844 if (lk->guestaddr != gla) BAD("2");
845 }
sewardj896f6f92008-08-19 08:38:52 +0000846 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000847 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000848 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000849 // lock is sane. Quite comprehensive, also checks that
850 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000851 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000852 // map_locks binds guest address back to this lock
853 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000854 // look at all threads mentioned as holders of this lock. Ensure
855 // this lock is mentioned in their locksets.
856 if (lk->heldBy) {
857 Thread* thr;
858 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000859 VG_(initIterBag)( lk->heldBy );
860 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000861 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000862 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000863 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000864 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000865 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
866 BAD("6");
867 // also check the w-only lockset
868 if (lk->heldW
869 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
870 BAD("7");
871 if ((!lk->heldW)
872 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
873 BAD("8");
874 }
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000876 } else {
877 /* lock not held by anybody */
878 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
879 // since lk is unheld, then (no lockset contains lk)
880 // hmm, this is really too expensive to check. Hmm.
881 }
sewardjb4112022007-11-09 22:49:28 +0000882 }
883
884 return;
885 bad:
886 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
887 tl_assert(0);
888#undef BAD
889}
890
891
sewardjb4112022007-11-09 22:49:28 +0000892static void all_except_Locks__sanity_check ( Char* who ) {
893 stats__sanity_checks++;
894 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
895 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000896 if (HG_(clo_track_lockorders))
897 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000898}
899static void all__sanity_check ( Char* who ) {
900 all_except_Locks__sanity_check(who);
901 locks__sanity_check(who);
902}
903
904
905/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000906/*--- Shadow value and address range handlers ---*/
907/*----------------------------------------------------------------*/
908
909static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000910//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000911static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000912__attribute__((noinline))
913static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000914
sewardjb4112022007-11-09 22:49:28 +0000915
916/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000917/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
918 Is that a problem? (hence 'scopy' rather than 'ccopy') */
919static void shadow_mem_scopy_range ( Thread* thr,
920 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000921{
922 Thr* hbthr = thr->hbthr;
923 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000924 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000925}
926
sewardj23f12002009-07-24 08:45:08 +0000927static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
928{
sewardjf98e1c02008-10-25 16:22:41 +0000929 Thr* hbthr = thr->hbthr;
930 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000931 LIBHB_CREAD_N(hbthr, a, len);
932}
933
934static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
935 Thr* hbthr = thr->hbthr;
936 tl_assert(hbthr);
937 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000938}
939
940static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
941{
sewardj23f12002009-07-24 08:45:08 +0000942 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000943}
944
sewardjb4112022007-11-09 22:49:28 +0000945static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
946{
sewardjb4112022007-11-09 22:49:28 +0000947 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +0000948 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +0000949 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000950}
951
sewardj406bac82010-03-03 23:03:40 +0000952static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
953{
954 if (0 && len > 500)
955 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
956 libhb_srange_untrack( thr->hbthr, aIN, len );
957}
958
sewardjb4112022007-11-09 22:49:28 +0000959
960/*----------------------------------------------------------------*/
961/*--- Event handlers (evh__* functions) ---*/
962/*--- plus helpers (evhH__* functions) ---*/
963/*----------------------------------------------------------------*/
964
965/*--------- Event handler helpers (evhH__* functions) ---------*/
966
967/* Create a new segment for 'thr', making it depend (.prev) on its
968 existing segment, bind together the SegmentID and Segment, and
969 return both of them. Also update 'thr' so it references the new
970 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000971//zz static
972//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
973//zz /*OUT*/Segment** new_segP,
974//zz Thread* thr )
975//zz {
976//zz Segment* cur_seg;
977//zz tl_assert(new_segP);
978//zz tl_assert(new_segidP);
979//zz tl_assert(HG_(is_sane_Thread)(thr));
980//zz cur_seg = map_segments_lookup( thr->csegid );
981//zz tl_assert(cur_seg);
982//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
983//zz at their owner thread. */
984//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
985//zz *new_segidP = alloc_SegmentID();
986//zz map_segments_add( *new_segidP, *new_segP );
987//zz thr->csegid = *new_segidP;
988//zz }
sewardjb4112022007-11-09 22:49:28 +0000989
990
991/* The lock at 'lock_ga' has acquired a writer. Make all necessary
992 updates, and also do all possible error checks. */
993static
994void evhH__post_thread_w_acquires_lock ( Thread* thr,
995 LockKind lkk, Addr lock_ga )
996{
997 Lock* lk;
998
999 /* Basically what we need to do is call lockN_acquire_writer.
1000 However, that will barf if any 'invalid' lock states would
1001 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001002 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001003 routine.
1004
1005 Because this routine is only called after successful lock
1006 acquisition, we should not be asked to move the lock into any
1007 invalid states. Requests to do so are bugs in libpthread, since
1008 that should have rejected any such requests. */
1009
sewardjf98e1c02008-10-25 16:22:41 +00001010 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001011 /* Try to find the lock. If we can't, then create a new one with
1012 kind 'lkk'. */
1013 lk = map_locks_lookup_or_create(
1014 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001015 tl_assert( HG_(is_sane_LockN)(lk) );
1016
1017 /* check libhb level entities exist */
1018 tl_assert(thr->hbthr);
1019 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001020
1021 if (lk->heldBy == NULL) {
1022 /* the lock isn't held. Simple. */
1023 tl_assert(!lk->heldW);
1024 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001025 /* acquire a dependency from the lock's VCs */
1026 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001027 goto noerror;
1028 }
1029
1030 /* So the lock is already held. If held as a r-lock then
1031 libpthread must be buggy. */
1032 tl_assert(lk->heldBy);
1033 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001034 HG_(record_error_Misc)(
1035 thr, "Bug in libpthread: write lock "
1036 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001037 goto error;
1038 }
1039
1040 /* So the lock is held in w-mode. If it's held by some other
1041 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001042 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001043
sewardj896f6f92008-08-19 08:38:52 +00001044 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001045 HG_(record_error_Misc)(
1046 thr, "Bug in libpthread: write lock "
1047 "granted on mutex/rwlock which is currently "
1048 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001049 goto error;
1050 }
1051
1052 /* So the lock is already held in w-mode by 'thr'. That means this
1053 is an attempt to lock it recursively, which is only allowable
1054 for LK_mbRec kinded locks. Since this routine is called only
1055 once the lock has been acquired, this must also be a libpthread
1056 bug. */
1057 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001058 HG_(record_error_Misc)(
1059 thr, "Bug in libpthread: recursive write lock "
1060 "granted on mutex/wrlock which does not "
1061 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So we are recursively re-locking a lock we already w-hold. */
1066 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001067 /* acquire a dependency from the lock's VC. Probably pointless,
1068 but also harmless. */
1069 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001070 goto noerror;
1071
1072 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001073 if (HG_(clo_track_lockorders)) {
1074 /* check lock order acquisition graph, and update. This has to
1075 happen before the lock is added to the thread's locksetA/W. */
1076 laog__pre_thread_acquires_lock( thr, lk );
1077 }
sewardjb4112022007-11-09 22:49:28 +00001078 /* update the thread's held-locks set */
1079 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1080 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1081 /* fall through */
1082
1083 error:
sewardjf98e1c02008-10-25 16:22:41 +00001084 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
1087
1088/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1089 updates, and also do all possible error checks. */
1090static
1091void evhH__post_thread_r_acquires_lock ( Thread* thr,
1092 LockKind lkk, Addr lock_ga )
1093{
1094 Lock* lk;
1095
1096 /* Basically what we need to do is call lockN_acquire_reader.
1097 However, that will barf if any 'invalid' lock states would
1098 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001099 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001100 routine.
1101
1102 Because this routine is only called after successful lock
1103 acquisition, we should not be asked to move the lock into any
1104 invalid states. Requests to do so are bugs in libpthread, since
1105 that should have rejected any such requests. */
1106
sewardjf98e1c02008-10-25 16:22:41 +00001107 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001108 /* Try to find the lock. If we can't, then create a new one with
1109 kind 'lkk'. Only a reader-writer lock can be read-locked,
1110 hence the first assertion. */
1111 tl_assert(lkk == LK_rdwr);
1112 lk = map_locks_lookup_or_create(
1113 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001114 tl_assert( HG_(is_sane_LockN)(lk) );
1115
1116 /* check libhb level entities exist */
1117 tl_assert(thr->hbthr);
1118 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001119
1120 if (lk->heldBy == NULL) {
1121 /* the lock isn't held. Simple. */
1122 tl_assert(!lk->heldW);
1123 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001124 /* acquire a dependency from the lock's VC */
1125 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001126 goto noerror;
1127 }
1128
1129 /* So the lock is already held. If held as a w-lock then
1130 libpthread must be buggy. */
1131 tl_assert(lk->heldBy);
1132 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001133 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1134 "granted on rwlock which is "
1135 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001136 goto error;
1137 }
1138
1139 /* Easy enough. In short anybody can get a read-lock on a rwlock
1140 provided it is either unlocked or already in rd-held. */
1141 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001142 /* acquire a dependency from the lock's VC. Probably pointless,
1143 but also harmless. */
1144 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001145 goto noerror;
1146
1147 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001148 if (HG_(clo_track_lockorders)) {
1149 /* check lock order acquisition graph, and update. This has to
1150 happen before the lock is added to the thread's locksetA/W. */
1151 laog__pre_thread_acquires_lock( thr, lk );
1152 }
sewardjb4112022007-11-09 22:49:28 +00001153 /* update the thread's held-locks set */
1154 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1155 /* but don't update thr->locksetW, since lk is only rd-held */
1156 /* fall through */
1157
1158 error:
sewardjf98e1c02008-10-25 16:22:41 +00001159 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001160}
1161
1162
1163/* The lock at 'lock_ga' is just about to be unlocked. Make all
1164 necessary updates, and also do all possible error checks. */
1165static
1166void evhH__pre_thread_releases_lock ( Thread* thr,
1167 Addr lock_ga, Bool isRDWR )
1168{
1169 Lock* lock;
1170 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001171 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001172
1173 /* This routine is called prior to a lock release, before
1174 libpthread has had a chance to validate the call. Hence we need
1175 to detect and reject any attempts to move the lock into an
1176 invalid state. Such attempts are bugs in the client.
1177
1178 isRDWR is True if we know from the wrapper context that lock_ga
1179 should refer to a reader-writer lock, and is False if [ditto]
1180 lock_ga should refer to a standard mutex. */
1181
sewardjf98e1c02008-10-25 16:22:41 +00001182 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001183 lock = map_locks_maybe_lookup( lock_ga );
1184
1185 if (!lock) {
1186 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1187 the client is trying to unlock it. So complain, then ignore
1188 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001189 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001190 return;
1191 }
1192
1193 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001194 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001195
1196 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001197 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1198 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001199 }
1200 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001201 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1202 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001203 }
1204
1205 if (!lock->heldBy) {
1206 /* The lock is not held. This indicates a serious bug in the
1207 client. */
1208 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001209 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001210 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1211 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1212 goto error;
1213 }
1214
sewardjf98e1c02008-10-25 16:22:41 +00001215 /* test just above dominates */
1216 tl_assert(lock->heldBy);
1217 was_heldW = lock->heldW;
1218
sewardjb4112022007-11-09 22:49:28 +00001219 /* The lock is held. Is this thread one of the holders? If not,
1220 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001221 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001222 tl_assert(n >= 0);
1223 if (n == 0) {
1224 /* We are not a current holder of the lock. This is a bug in
1225 the guest, and (per POSIX pthread rules) the unlock
1226 attempt will fail. So just complain and do nothing
1227 else. */
sewardj896f6f92008-08-19 08:38:52 +00001228 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001229 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001230 tl_assert(realOwner != thr);
1231 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1232 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001233 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001234 goto error;
1235 }
1236
1237 /* Ok, we hold the lock 'n' times. */
1238 tl_assert(n >= 1);
1239
1240 lockN_release( lock, thr );
1241
1242 n--;
1243 tl_assert(n >= 0);
1244
1245 if (n > 0) {
1246 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001247 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001248 /* We still hold the lock. So either it's a recursive lock
1249 or a rwlock which is currently r-held. */
1250 tl_assert(lock->kind == LK_mbRec
1251 || (lock->kind == LK_rdwr && !lock->heldW));
1252 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1253 if (lock->heldW)
1254 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1255 else
1256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1257 } else {
sewardj983f3022009-05-21 14:49:55 +00001258 /* n is zero. This means we don't hold the lock any more. But
1259 if it's a rwlock held in r-mode, someone else could still
1260 hold it. Just do whatever sanity checks we can. */
1261 if (lock->kind == LK_rdwr && lock->heldBy) {
1262 /* It's a rwlock. We no longer hold it but we used to;
1263 nevertheless it still appears to be held by someone else.
1264 The implication is that, prior to this release, it must
1265 have been shared by us and and whoever else is holding it;
1266 which in turn implies it must be r-held, since a lock
1267 can't be w-held by more than one thread. */
1268 /* The lock is now R-held by somebody else: */
1269 tl_assert(lock->heldW == False);
1270 } else {
1271 /* Normal case. It's either not a rwlock, or it's a rwlock
1272 that we used to hold in w-mode (which is pretty much the
1273 same thing as a non-rwlock.) Since this transaction is
1274 atomic (V does not allow multiple threads to run
1275 simultaneously), it must mean the lock is now not held by
1276 anybody. Hence assert for it. */
1277 /* The lock is now not held by anybody: */
1278 tl_assert(!lock->heldBy);
1279 tl_assert(lock->heldW == False);
1280 }
sewardjf98e1c02008-10-25 16:22:41 +00001281 //if (lock->heldBy) {
1282 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1283 //}
sewardjb4112022007-11-09 22:49:28 +00001284 /* update this thread's lockset accordingly. */
1285 thr->locksetA
1286 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1287 thr->locksetW
1288 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001289 /* push our VC into the lock */
1290 tl_assert(thr->hbthr);
1291 tl_assert(lock->hbso);
1292 /* If the lock was previously W-held, then we want to do a
1293 strong send, and if previously R-held, then a weak send. */
1294 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001295 }
1296 /* fall through */
1297
1298 error:
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001300}
1301
1302
sewardj9f569b72008-11-13 13:33:09 +00001303/* ---------------------------------------------------------- */
1304/* -------- Event handlers proper (evh__* functions) -------- */
1305/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001306
1307/* What is the Thread* for the currently running thread? This is
1308 absolutely performance critical. We receive notifications from the
1309 core for client code starts/stops, and cache the looked-up result
1310 in 'current_Thread'. Hence, for the vast majority of requests,
1311 finding the current thread reduces to a read of a global variable,
1312 provided get_current_Thread_in_C_C is inlined.
1313
1314 Outside of client code, current_Thread is NULL, and presumably
1315 any uses of it will cause a segfault. Hence:
1316
1317 - for uses definitely within client code, use
1318 get_current_Thread_in_C_C.
1319
1320 - for all other uses, use get_current_Thread.
1321*/
1322
sewardj23f12002009-07-24 08:45:08 +00001323static Thread *current_Thread = NULL,
1324 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001325
1326static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1327 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1328 tl_assert(current_Thread == NULL);
1329 current_Thread = map_threads_lookup( tid );
1330 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001331 if (current_Thread != current_Thread_prev) {
1332 libhb_Thr_resumes( current_Thread->hbthr );
1333 current_Thread_prev = current_Thread;
1334 }
sewardjb4112022007-11-09 22:49:28 +00001335}
1336static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1337 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1338 tl_assert(current_Thread != NULL);
1339 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001340 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001341}
1342static inline Thread* get_current_Thread_in_C_C ( void ) {
1343 return current_Thread;
1344}
1345static inline Thread* get_current_Thread ( void ) {
1346 ThreadId coretid;
1347 Thread* thr;
1348 thr = get_current_Thread_in_C_C();
1349 if (LIKELY(thr))
1350 return thr;
1351 /* evidently not in client code. Do it the slow way. */
1352 coretid = VG_(get_running_tid)();
1353 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001354 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001355 of initial memory layout) and VG_(get_running_tid)() returns
1356 VG_INVALID_THREADID at that point. */
1357 if (coretid == VG_INVALID_THREADID)
1358 coretid = 1; /* KLUDGE */
1359 thr = map_threads_lookup( coretid );
1360 return thr;
1361}
1362
1363static
1364void evh__new_mem ( Addr a, SizeT len ) {
1365 if (SHOW_EVENTS >= 2)
1366 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1367 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001368 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001369 all__sanity_check("evh__new_mem-post");
1370}
1371
1372static
sewardj1f77fec2010-04-12 19:51:04 +00001373void evh__new_mem_stack ( Addr a, SizeT len ) {
1374 if (SHOW_EVENTS >= 2)
1375 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1376 shadow_mem_make_New( get_current_Thread(),
1377 -VG_STACK_REDZONE_SZB + a, len );
1378 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1379 all__sanity_check("evh__new_mem_stack-post");
1380}
1381
1382static
sewardj7cf4e6b2008-05-01 20:24:26 +00001383void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1384 if (SHOW_EVENTS >= 2)
1385 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1386 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001387 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001388 all__sanity_check("evh__new_mem_w_tid-post");
1389}
1390
1391static
sewardjb4112022007-11-09 22:49:28 +00001392void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001393 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001394 if (SHOW_EVENTS >= 1)
1395 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1396 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1397 if (rr || ww || xx)
1398 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001399 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001400 all__sanity_check("evh__new_mem_w_perms-post");
1401}
1402
1403static
1404void evh__set_perms ( Addr a, SizeT len,
1405 Bool rr, Bool ww, Bool xx ) {
1406 if (SHOW_EVENTS >= 1)
1407 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1408 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1409 /* Hmm. What should we do here, that actually makes any sense?
1410 Let's say: if neither readable nor writable, then declare it
1411 NoAccess, else leave it alone. */
1412 if (!(rr || ww))
1413 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001414 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001415 all__sanity_check("evh__set_perms-post");
1416}
1417
1418static
1419void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001420 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001421 if (SHOW_EVENTS >= 2)
1422 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1423 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__die_mem-post");
1426}
1427
1428static
sewardj406bac82010-03-03 23:03:40 +00001429void evh__untrack_mem ( Addr a, SizeT len ) {
1430 // whereas it doesn't ignore this
1431 if (SHOW_EVENTS >= 2)
1432 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1433 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1434 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1435 all__sanity_check("evh__untrack_mem-post");
1436}
1437
1438static
sewardj23f12002009-07-24 08:45:08 +00001439void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1440 if (SHOW_EVENTS >= 2)
1441 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1442 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1443 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1444 all__sanity_check("evh__copy_mem-post");
1445}
1446
1447static
sewardjb4112022007-11-09 22:49:28 +00001448void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1449{
1450 if (SHOW_EVENTS >= 1)
1451 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1452 (Int)parent, (Int)child );
1453
1454 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001455 Thread* thr_p;
1456 Thread* thr_c;
1457 Thr* hbthr_p;
1458 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001459
sewardjf98e1c02008-10-25 16:22:41 +00001460 tl_assert(HG_(is_sane_ThreadId)(parent));
1461 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001462 tl_assert(parent != child);
1463
1464 thr_p = map_threads_maybe_lookup( parent );
1465 thr_c = map_threads_maybe_lookup( child );
1466
1467 tl_assert(thr_p != NULL);
1468 tl_assert(thr_c == NULL);
1469
sewardjf98e1c02008-10-25 16:22:41 +00001470 hbthr_p = thr_p->hbthr;
1471 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001472 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001473
sewardjf98e1c02008-10-25 16:22:41 +00001474 hbthr_c = libhb_create ( hbthr_p );
1475
1476 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001477 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001478 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001479 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1480 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001481
1482 /* and bind it in the thread-map table */
1483 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001484 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1485 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001486
1487 /* Record where the parent is so we can later refer to this in
1488 error messages.
1489
1490 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1491 The stack snapshot is taken immediately after the parent has
1492 returned from its sys_clone call. Unfortunately there is no
1493 unwind info for the insn following "syscall" - reading the
1494 glibc sources confirms this. So we ask for a snapshot to be
1495 taken as if RIP was 3 bytes earlier, in a place where there
1496 is unwind info. Sigh.
1497 */
1498 { Word first_ip_delta = 0;
1499# if defined(VGP_amd64_linux)
1500 first_ip_delta = -3;
1501# endif
1502 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1503 }
sewardjb4112022007-11-09 22:49:28 +00001504 }
1505
sewardjf98e1c02008-10-25 16:22:41 +00001506 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001507 all__sanity_check("evh__pre_thread_create-post");
1508}
1509
1510static
1511void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1512{
1513 Int nHeld;
1514 Thread* thr_q;
1515 if (SHOW_EVENTS >= 1)
1516 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1517 (Int)quit_tid );
1518
1519 /* quit_tid has disappeared without joining to any other thread.
1520 Therefore there is no synchronisation event associated with its
1521 exit and so we have to pretty much treat it as if it was still
1522 alive but mysteriously making no progress. That is because, if
1523 we don't know when it really exited, then we can never say there
1524 is a point in time when we're sure the thread really has
1525 finished, and so we need to consider the possibility that it
1526 lingers indefinitely and continues to interact with other
1527 threads. */
1528 /* However, it might have rendezvous'd with a thread that called
1529 pthread_join with this one as arg, prior to this point (that's
1530 how NPTL works). In which case there has already been a prior
1531 sync event. So in any case, just let the thread exit. On NPTL,
1532 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001533 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001534 thr_q = map_threads_maybe_lookup( quit_tid );
1535 tl_assert(thr_q != NULL);
1536
1537 /* Complain if this thread holds any locks. */
1538 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1539 tl_assert(nHeld >= 0);
1540 if (nHeld > 0) {
1541 HChar buf[80];
1542 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1543 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001544 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001545 }
1546
sewardj23f12002009-07-24 08:45:08 +00001547 /* Not much to do here:
1548 - tell libhb the thread is gone
1549 - clear the map_threads entry, in order that the Valgrind core
1550 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001551 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1552 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001553 tl_assert(thr_q->hbthr);
1554 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001555 tl_assert(thr_q->coretid == quit_tid);
1556 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001557 map_threads_delete( quit_tid );
1558
sewardjf98e1c02008-10-25 16:22:41 +00001559 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001560 all__sanity_check("evh__pre_thread_ll_exit-post");
1561}
1562
sewardj61bc2c52011-02-09 10:34:00 +00001563/* This is called immediately after fork, for the child only. 'tid'
1564 is the only surviving thread (as per POSIX rules on fork() in
1565 threaded programs), so we have to clean up map_threads to remove
1566 entries for any other threads. */
1567static
1568void evh__atfork_child ( ThreadId tid )
1569{
1570 UInt i;
1571 Thread* thr;
1572 /* Slot 0 should never be used. */
1573 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1574 tl_assert(!thr);
1575 /* Clean up all other slots except 'tid'. */
1576 for (i = 1; i < VG_N_THREADS; i++) {
1577 if (i == tid)
1578 continue;
1579 thr = map_threads_maybe_lookup(i);
1580 if (!thr)
1581 continue;
1582 /* Cleanup actions (next 5 lines) copied from end of
1583 evh__pre_thread_ll_exit; keep in sync. */
1584 tl_assert(thr->hbthr);
1585 libhb_async_exit(thr->hbthr);
1586 tl_assert(thr->coretid == i);
1587 thr->coretid = VG_INVALID_THREADID;
1588 map_threads_delete(i);
1589 }
1590}
1591
sewardjf98e1c02008-10-25 16:22:41 +00001592
sewardjb4112022007-11-09 22:49:28 +00001593static
1594void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1595{
sewardjb4112022007-11-09 22:49:28 +00001596 Thread* thr_s;
1597 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001598 Thr* hbthr_s;
1599 Thr* hbthr_q;
1600 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001601
1602 if (SHOW_EVENTS >= 1)
1603 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1604 (Int)stay_tid, quit_thr );
1605
sewardjf98e1c02008-10-25 16:22:41 +00001606 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001607
1608 thr_s = map_threads_maybe_lookup( stay_tid );
1609 thr_q = quit_thr;
1610 tl_assert(thr_s != NULL);
1611 tl_assert(thr_q != NULL);
1612 tl_assert(thr_s != thr_q);
1613
sewardjf98e1c02008-10-25 16:22:41 +00001614 hbthr_s = thr_s->hbthr;
1615 hbthr_q = thr_q->hbthr;
1616 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001617 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1618 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001619
sewardjf98e1c02008-10-25 16:22:41 +00001620 /* Allocate a temporary synchronisation object and use it to send
1621 an imaginary message from the quitter to the stayer, the purpose
1622 being to generate a dependence from the quitter to the
1623 stayer. */
1624 so = libhb_so_alloc();
1625 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001626 /* Send last arg of _so_send as False, since the sending thread
1627 doesn't actually exist any more, so we don't want _so_send to
1628 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001629 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1630 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1631 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001632
sewardjf98e1c02008-10-25 16:22:41 +00001633 /* evh__pre_thread_ll_exit issues an error message if the exiting
1634 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001635
1636 /* This holds because, at least when using NPTL as the thread
1637 library, we should be notified the low level thread exit before
1638 we hear of any join event on it. The low level exit
1639 notification feeds through into evh__pre_thread_ll_exit,
1640 which should clear the map_threads entry for it. Hence we
1641 expect there to be no map_threads entry at this point. */
1642 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1643 == VG_INVALID_THREADID);
1644
sewardjf98e1c02008-10-25 16:22:41 +00001645 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001646 all__sanity_check("evh__post_thread_join-post");
1647}
1648
1649static
1650void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1651 Addr a, SizeT size) {
1652 if (SHOW_EVENTS >= 2
1653 || (SHOW_EVENTS >= 1 && size != 1))
1654 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1655 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001656 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001657 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001658 all__sanity_check("evh__pre_mem_read-post");
1659}
1660
1661static
1662void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1663 Char* s, Addr a ) {
1664 Int len;
1665 if (SHOW_EVENTS >= 1)
1666 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1667 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001668 // Don't segfault if the string starts in an obviously stupid
1669 // place. Actually we should check the whole string, not just
1670 // the start address, but that's too much trouble. At least
1671 // checking the first byte is better than nothing. See #255009.
1672 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1673 return;
sewardjb4112022007-11-09 22:49:28 +00001674 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001675 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001676 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001677 all__sanity_check("evh__pre_mem_read_asciiz-post");
1678}
1679
1680static
1681void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1682 Addr a, SizeT size ) {
1683 if (SHOW_EVENTS >= 1)
1684 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1685 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001686 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001687 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001688 all__sanity_check("evh__pre_mem_write-post");
1689}
1690
1691static
1692void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1693 if (SHOW_EVENTS >= 1)
1694 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1695 (void*)a, len, (Int)is_inited );
1696 // FIXME: this is kinda stupid
1697 if (is_inited) {
1698 shadow_mem_make_New(get_current_Thread(), a, len);
1699 } else {
1700 shadow_mem_make_New(get_current_Thread(), a, len);
1701 }
sewardjf98e1c02008-10-25 16:22:41 +00001702 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001703 all__sanity_check("evh__pre_mem_read-post");
1704}
1705
1706static
1707void evh__die_mem_heap ( Addr a, SizeT len ) {
1708 if (SHOW_EVENTS >= 1)
1709 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1710 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001711 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001712 all__sanity_check("evh__pre_mem_read-post");
1713}
1714
sewardj23f12002009-07-24 08:45:08 +00001715/* --- Event handlers called from generated code --- */
1716
sewardjb4112022007-11-09 22:49:28 +00001717static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001718void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001719 Thread* thr = get_current_Thread_in_C_C();
1720 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001721 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001722}
sewardjf98e1c02008-10-25 16:22:41 +00001723
sewardjb4112022007-11-09 22:49:28 +00001724static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001725void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001726 Thread* thr = get_current_Thread_in_C_C();
1727 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001728 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001729}
sewardjf98e1c02008-10-25 16:22:41 +00001730
sewardjb4112022007-11-09 22:49:28 +00001731static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001732void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001733 Thread* thr = get_current_Thread_in_C_C();
1734 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001735 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001736}
sewardjf98e1c02008-10-25 16:22:41 +00001737
sewardjb4112022007-11-09 22:49:28 +00001738static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001739void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001740 Thread* thr = get_current_Thread_in_C_C();
1741 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001742 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001743}
sewardjf98e1c02008-10-25 16:22:41 +00001744
sewardjb4112022007-11-09 22:49:28 +00001745static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001746void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001747 Thread* thr = get_current_Thread_in_C_C();
1748 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001749 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001750}
1751
1752static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001753void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001754 Thread* thr = get_current_Thread_in_C_C();
1755 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001756 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001757}
sewardjf98e1c02008-10-25 16:22:41 +00001758
sewardjb4112022007-11-09 22:49:28 +00001759static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001760void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001761 Thread* thr = get_current_Thread_in_C_C();
1762 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001763 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001764}
sewardjf98e1c02008-10-25 16:22:41 +00001765
sewardjb4112022007-11-09 22:49:28 +00001766static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001767void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001768 Thread* thr = get_current_Thread_in_C_C();
1769 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001770 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001771}
sewardjf98e1c02008-10-25 16:22:41 +00001772
sewardjb4112022007-11-09 22:49:28 +00001773static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001774void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001775 Thread* thr = get_current_Thread_in_C_C();
1776 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001777 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001778}
sewardjf98e1c02008-10-25 16:22:41 +00001779
sewardjb4112022007-11-09 22:49:28 +00001780static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001781void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001782 Thread* thr = get_current_Thread_in_C_C();
1783 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001784 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001785}
1786
sewardjb4112022007-11-09 22:49:28 +00001787
sewardj9f569b72008-11-13 13:33:09 +00001788/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001789/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001790/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001791
1792/* EXPOSITION only: by intercepting lock init events we can show the
1793 user where the lock was initialised, rather than only being able to
1794 show where it was first locked. Intercepting lock initialisations
1795 is not necessary for the basic operation of the race checker. */
1796static
1797void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1798 void* mutex, Word mbRec )
1799{
1800 if (SHOW_EVENTS >= 1)
1801 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1802 (Int)tid, mbRec, (void*)mutex );
1803 tl_assert(mbRec == 0 || mbRec == 1);
1804 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1805 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001806 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001807 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1808}
1809
1810static
1811void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1812{
1813 Thread* thr;
1814 Lock* lk;
1815 if (SHOW_EVENTS >= 1)
1816 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1817 (Int)tid, (void*)mutex );
1818
1819 thr = map_threads_maybe_lookup( tid );
1820 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001821 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001822
1823 lk = map_locks_maybe_lookup( (Addr)mutex );
1824
1825 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001826 HG_(record_error_Misc)(
1827 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001828 }
1829
1830 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001831 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001832 tl_assert( lk->guestaddr == (Addr)mutex );
1833 if (lk->heldBy) {
1834 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001835 HG_(record_error_Misc)(
1836 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001837 /* remove lock from locksets of all owning threads */
1838 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001839 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001840 lk->heldBy = NULL;
1841 lk->heldW = False;
1842 lk->acquired_at = NULL;
1843 }
1844 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001845 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001846
1847 if (HG_(clo_track_lockorders))
1848 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001849 map_locks_delete( lk->guestaddr );
1850 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001851 }
1852
sewardjf98e1c02008-10-25 16:22:41 +00001853 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001854 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1855}
1856
1857static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1858 void* mutex, Word isTryLock )
1859{
1860 /* Just check the mutex is sane; nothing else to do. */
1861 // 'mutex' may be invalid - not checked by wrapper
1862 Thread* thr;
1863 Lock* lk;
1864 if (SHOW_EVENTS >= 1)
1865 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1866 (Int)tid, (void*)mutex );
1867
1868 tl_assert(isTryLock == 0 || isTryLock == 1);
1869 thr = map_threads_maybe_lookup( tid );
1870 tl_assert(thr); /* cannot fail - Thread* must already exist */
1871
1872 lk = map_locks_maybe_lookup( (Addr)mutex );
1873
1874 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001875 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1876 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001877 }
1878
1879 if ( lk
1880 && isTryLock == 0
1881 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1882 && lk->heldBy
1883 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001884 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001885 /* uh, it's a non-recursive lock and we already w-hold it, and
1886 this is a real lock operation (not a speculative "tryLock"
1887 kind of thing). Duh. Deadlock coming up; but at least
1888 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001889 HChar* errstr = "Attempt to re-lock a "
1890 "non-recursive lock I already hold";
1891 HChar* auxstr = "Lock was previously acquired";
1892 if (lk->acquired_at) {
1893 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1894 } else {
1895 HG_(record_error_Misc)( thr, errstr );
1896 }
sewardjb4112022007-11-09 22:49:28 +00001897 }
1898}
1899
1900static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1901{
1902 // only called if the real library call succeeded - so mutex is sane
1903 Thread* thr;
1904 if (SHOW_EVENTS >= 1)
1905 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1906 (Int)tid, (void*)mutex );
1907
1908 thr = map_threads_maybe_lookup( tid );
1909 tl_assert(thr); /* cannot fail - Thread* must already exist */
1910
1911 evhH__post_thread_w_acquires_lock(
1912 thr,
1913 LK_mbRec, /* if not known, create new lock with this LockKind */
1914 (Addr)mutex
1915 );
1916}
1917
1918static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1919{
1920 // 'mutex' may be invalid - not checked by wrapper
1921 Thread* thr;
1922 if (SHOW_EVENTS >= 1)
1923 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1924 (Int)tid, (void*)mutex );
1925
1926 thr = map_threads_maybe_lookup( tid );
1927 tl_assert(thr); /* cannot fail - Thread* must already exist */
1928
1929 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1930}
1931
1932static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1933{
1934 // only called if the real library call succeeded - so mutex is sane
1935 Thread* thr;
1936 if (SHOW_EVENTS >= 1)
1937 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1938 (Int)tid, (void*)mutex );
1939 thr = map_threads_maybe_lookup( tid );
1940 tl_assert(thr); /* cannot fail - Thread* must already exist */
1941
1942 // anything we should do here?
1943}
1944
1945
sewardj5a644da2009-08-11 10:35:58 +00001946/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00001947/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00001948/* ------------------------------------------------------- */
1949
1950/* All a bit of a kludge. Pretend we're really dealing with ordinary
1951 pthread_mutex_t's instead, for the most part. */
1952
1953static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
1954 void* slock )
1955{
1956 Thread* thr;
1957 Lock* lk;
1958 /* In glibc's kludgey world, we're either initialising or unlocking
1959 it. Since this is the pre-routine, if it is locked, unlock it
1960 and take a dependence edge. Otherwise, do nothing. */
1961
1962 if (SHOW_EVENTS >= 1)
1963 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
1964 "(ctid=%d, slock=%p)\n",
1965 (Int)tid, (void*)slock );
1966
1967 thr = map_threads_maybe_lookup( tid );
1968 /* cannot fail - Thread* must already exist */;
1969 tl_assert( HG_(is_sane_Thread)(thr) );
1970
1971 lk = map_locks_maybe_lookup( (Addr)slock );
1972 if (lk && lk->heldBy) {
1973 /* it's held. So do the normal pre-unlock actions, as copied
1974 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
1975 duplicates the map_locks_maybe_lookup. */
1976 evhH__pre_thread_releases_lock( thr, (Addr)slock,
1977 False/*!isRDWR*/ );
1978 }
1979}
1980
1981static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
1982 void* slock )
1983{
1984 Lock* lk;
1985 /* More kludgery. If the lock has never been seen before, do
1986 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
1987 nothing. */
1988
1989 if (SHOW_EVENTS >= 1)
1990 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
1991 "(ctid=%d, slock=%p)\n",
1992 (Int)tid, (void*)slock );
1993
1994 lk = map_locks_maybe_lookup( (Addr)slock );
1995 if (!lk) {
1996 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
1997 }
1998}
1999
2000static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2001 void* slock, Word isTryLock )
2002{
2003 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2004}
2005
2006static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2007 void* slock )
2008{
2009 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2010}
2011
2012static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2013 void* slock )
2014{
2015 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2016}
2017
2018
sewardj9f569b72008-11-13 13:33:09 +00002019/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002020/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002021/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002022
sewardj02114542009-07-28 20:52:36 +00002023/* A mapping from CV to (the SO associated with it, plus some
2024 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002025 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2026 wait on it completes, we do a 'recv' from the SO. This is believed
2027 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002028 signallings/broadcasts.
2029*/
2030
sewardj02114542009-07-28 20:52:36 +00002031/* .so is the SO for this CV.
2032 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002033
sewardj02114542009-07-28 20:52:36 +00002034 POSIX says effectively that the first pthread_cond_{timed}wait call
2035 causes a dynamic binding between the CV and the mutex, and that
2036 lasts until such time as the waiter count falls to zero. Hence
2037 need to keep track of the number of waiters in order to do
2038 consistency tracking. */
2039typedef
2040 struct {
2041 SO* so; /* libhb-allocated SO */
2042 void* mx_ga; /* addr of associated mutex, if any */
2043 UWord nWaiters; /* # threads waiting on the CV */
2044 }
2045 CVInfo;
2046
2047
2048/* pthread_cond_t* -> CVInfo* */
2049static WordFM* map_cond_to_CVInfo = NULL;
2050
2051static void map_cond_to_CVInfo_INIT ( void ) {
2052 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2053 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2054 "hg.mctCI.1", HG_(free), NULL );
2055 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002056 }
2057}
2058
sewardj02114542009-07-28 20:52:36 +00002059static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002060 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002061 map_cond_to_CVInfo_INIT();
2062 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002063 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002064 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002065 } else {
sewardj02114542009-07-28 20:52:36 +00002066 SO* so = libhb_so_alloc();
2067 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2068 cvi->so = so;
2069 cvi->mx_ga = 0;
2070 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2071 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002072 }
2073}
2074
sewardj02114542009-07-28 20:52:36 +00002075static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002076 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002077 map_cond_to_CVInfo_INIT();
2078 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2079 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002080 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002081 tl_assert(cvi);
2082 tl_assert(cvi->so);
2083 libhb_so_dealloc(cvi->so);
2084 cvi->mx_ga = 0;
2085 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002086 }
2087}
2088
2089static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2090{
sewardjf98e1c02008-10-25 16:22:41 +00002091 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2092 cond to a SO if it is not already so bound, and 'send' on the
2093 SO. This is later used by other thread(s) which successfully
2094 exit from a pthread_cond_wait on the same cv; then they 'recv'
2095 from the SO, thereby acquiring a dependency on this signalling
2096 event. */
sewardjb4112022007-11-09 22:49:28 +00002097 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002098 CVInfo* cvi;
2099 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002100
2101 if (SHOW_EVENTS >= 1)
2102 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2103 (Int)tid, (void*)cond );
2104
sewardjb4112022007-11-09 22:49:28 +00002105 thr = map_threads_maybe_lookup( tid );
2106 tl_assert(thr); /* cannot fail - Thread* must already exist */
2107
sewardj02114542009-07-28 20:52:36 +00002108 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2109 tl_assert(cvi);
2110 tl_assert(cvi->so);
2111
sewardjb4112022007-11-09 22:49:28 +00002112 // error-if: mutex is bogus
2113 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002114 // Hmm. POSIX doesn't actually say that it's an error to call
2115 // pthread_cond_signal with the associated mutex being unlocked.
2116 // Although it does say that it should be "if consistent scheduling
2117 // is desired."
2118 //
2119 // For the moment, disable these checks.
2120 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2121 //if (lk == NULL || cvi->mx_ga == 0) {
2122 // HG_(record_error_Misc)( thr,
2123 // "pthread_cond_{signal,broadcast}: "
2124 // "no or invalid mutex associated with cond");
2125 //}
2126 ///* note: lk could be NULL. Be careful. */
2127 //if (lk) {
2128 // if (lk->kind == LK_rdwr) {
2129 // HG_(record_error_Misc)(thr,
2130 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2131 // }
2132 // if (lk->heldBy == NULL) {
2133 // HG_(record_error_Misc)(thr,
2134 // "pthread_cond_{signal,broadcast}: "
2135 // "associated lock is not held by any thread");
2136 // }
2137 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2138 // HG_(record_error_Misc)(thr,
2139 // "pthread_cond_{signal,broadcast}: "
2140 // "associated lock is not held by calling thread");
2141 // }
2142 //}
sewardjb4112022007-11-09 22:49:28 +00002143
sewardj02114542009-07-28 20:52:36 +00002144 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002145}
2146
2147/* returns True if it reckons 'mutex' is valid and held by this
2148 thread, else False */
2149static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2150 void* cond, void* mutex )
2151{
2152 Thread* thr;
2153 Lock* lk;
2154 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002155 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002156
2157 if (SHOW_EVENTS >= 1)
2158 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2159 "(ctid=%d, cond=%p, mutex=%p)\n",
2160 (Int)tid, (void*)cond, (void*)mutex );
2161
sewardjb4112022007-11-09 22:49:28 +00002162 thr = map_threads_maybe_lookup( tid );
2163 tl_assert(thr); /* cannot fail - Thread* must already exist */
2164
2165 lk = map_locks_maybe_lookup( (Addr)mutex );
2166
2167 /* Check for stupid mutex arguments. There are various ways to be
2168 a bozo. Only complain once, though, even if more than one thing
2169 is wrong. */
2170 if (lk == NULL) {
2171 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002172 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002173 thr,
2174 "pthread_cond_{timed}wait called with invalid mutex" );
2175 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002176 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002177 if (lk->kind == LK_rdwr) {
2178 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002179 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002180 thr, "pthread_cond_{timed}wait called with mutex "
2181 "of type pthread_rwlock_t*" );
2182 } else
2183 if (lk->heldBy == NULL) {
2184 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002185 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002186 thr, "pthread_cond_{timed}wait called with un-held mutex");
2187 } else
2188 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002189 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002190 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002191 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002192 thr, "pthread_cond_{timed}wait called with mutex "
2193 "held by a different thread" );
2194 }
2195 }
2196
2197 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002198 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2199 tl_assert(cvi);
2200 tl_assert(cvi->so);
2201 if (cvi->nWaiters == 0) {
2202 /* form initial (CV,MX) binding */
2203 cvi->mx_ga = mutex;
2204 }
2205 else /* check existing (CV,MX) binding */
2206 if (cvi->mx_ga != mutex) {
2207 HG_(record_error_Misc)(
2208 thr, "pthread_cond_{timed}wait: cond is associated "
2209 "with a different mutex");
2210 }
2211 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002212
2213 return lk_valid;
2214}
2215
2216static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2217 void* cond, void* mutex )
2218{
sewardjf98e1c02008-10-25 16:22:41 +00002219 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2220 the SO for this cond, and 'recv' from it so as to acquire a
2221 dependency edge back to the signaller/broadcaster. */
2222 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002223 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002224
2225 if (SHOW_EVENTS >= 1)
2226 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2227 "(ctid=%d, cond=%p, mutex=%p)\n",
2228 (Int)tid, (void*)cond, (void*)mutex );
2229
sewardjb4112022007-11-09 22:49:28 +00002230 thr = map_threads_maybe_lookup( tid );
2231 tl_assert(thr); /* cannot fail - Thread* must already exist */
2232
2233 // error-if: cond is also associated with a different mutex
2234
sewardj02114542009-07-28 20:52:36 +00002235 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2236 tl_assert(cvi);
2237 tl_assert(cvi->so);
2238 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002239
sewardj02114542009-07-28 20:52:36 +00002240 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002241 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2242 it? If this happened it would surely be a bug in the threads
2243 library. Or one of those fabled "spurious wakeups". */
2244 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2245 "succeeded on"
2246 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002247 }
sewardjf98e1c02008-10-25 16:22:41 +00002248
2249 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002250 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2251
2252 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002253}
2254
2255static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2256 void* cond )
2257{
2258 /* Deal with destroy events. The only purpose is to free storage
2259 associated with the CV, so as to avoid any possible resource
2260 leaks. */
2261 if (SHOW_EVENTS >= 1)
2262 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2263 "(ctid=%d, cond=%p)\n",
2264 (Int)tid, (void*)cond );
2265
sewardj02114542009-07-28 20:52:36 +00002266 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002267}
2268
2269
sewardj9f569b72008-11-13 13:33:09 +00002270/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002271/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002272/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002273
2274/* EXPOSITION only */
2275static
2276void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2277{
2278 if (SHOW_EVENTS >= 1)
2279 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2280 (Int)tid, (void*)rwl );
2281 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002282 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002283 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2284}
2285
2286static
2287void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2288{
2289 Thread* thr;
2290 Lock* lk;
2291 if (SHOW_EVENTS >= 1)
2292 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2293 (Int)tid, (void*)rwl );
2294
2295 thr = map_threads_maybe_lookup( tid );
2296 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002297 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002298
2299 lk = map_locks_maybe_lookup( (Addr)rwl );
2300
2301 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002302 HG_(record_error_Misc)(
2303 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002304 }
2305
2306 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002307 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002308 tl_assert( lk->guestaddr == (Addr)rwl );
2309 if (lk->heldBy) {
2310 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002311 HG_(record_error_Misc)(
2312 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002313 /* remove lock from locksets of all owning threads */
2314 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002315 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002316 lk->heldBy = NULL;
2317 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002318 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002319 }
2320 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002321 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002322
2323 if (HG_(clo_track_lockorders))
2324 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002325 map_locks_delete( lk->guestaddr );
2326 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002327 }
2328
sewardjf98e1c02008-10-25 16:22:41 +00002329 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002330 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2331}
2332
2333static
sewardj789c3c52008-02-25 12:10:07 +00002334void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2335 void* rwl,
2336 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002337{
2338 /* Just check the rwl is sane; nothing else to do. */
2339 // 'rwl' may be invalid - not checked by wrapper
2340 Thread* thr;
2341 Lock* lk;
2342 if (SHOW_EVENTS >= 1)
2343 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2344 (Int)tid, (Int)isW, (void*)rwl );
2345
2346 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002347 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002348 thr = map_threads_maybe_lookup( tid );
2349 tl_assert(thr); /* cannot fail - Thread* must already exist */
2350
2351 lk = map_locks_maybe_lookup( (Addr)rwl );
2352 if ( lk
2353 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2354 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002355 HG_(record_error_Misc)(
2356 thr, "pthread_rwlock_{rd,rw}lock with a "
2357 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002358 }
2359}
2360
2361static
2362void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2363{
2364 // only called if the real library call succeeded - so mutex is sane
2365 Thread* thr;
2366 if (SHOW_EVENTS >= 1)
2367 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2368 (Int)tid, (Int)isW, (void*)rwl );
2369
2370 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2371 thr = map_threads_maybe_lookup( tid );
2372 tl_assert(thr); /* cannot fail - Thread* must already exist */
2373
2374 (isW ? evhH__post_thread_w_acquires_lock
2375 : evhH__post_thread_r_acquires_lock)(
2376 thr,
2377 LK_rdwr, /* if not known, create new lock with this LockKind */
2378 (Addr)rwl
2379 );
2380}
2381
2382static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2383{
2384 // 'rwl' may be invalid - not checked by wrapper
2385 Thread* thr;
2386 if (SHOW_EVENTS >= 1)
2387 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2388 (Int)tid, (void*)rwl );
2389
2390 thr = map_threads_maybe_lookup( tid );
2391 tl_assert(thr); /* cannot fail - Thread* must already exist */
2392
2393 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2394}
2395
2396static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2397{
2398 // only called if the real library call succeeded - so mutex is sane
2399 Thread* thr;
2400 if (SHOW_EVENTS >= 1)
2401 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2402 (Int)tid, (void*)rwl );
2403 thr = map_threads_maybe_lookup( tid );
2404 tl_assert(thr); /* cannot fail - Thread* must already exist */
2405
2406 // anything we should do here?
2407}
2408
2409
sewardj9f569b72008-11-13 13:33:09 +00002410/* ---------------------------------------------------------- */
2411/* -------------- events to do with semaphores -------------- */
2412/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002413
sewardj11e352f2007-11-30 11:11:02 +00002414/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002415 variables. */
2416
sewardjf98e1c02008-10-25 16:22:41 +00002417/* For each semaphore, we maintain a stack of SOs. When a 'post'
2418 operation is done on a semaphore (unlocking, essentially), a new SO
2419 is created for the posting thread, the posting thread does a strong
2420 send to it (which merely installs the posting thread's VC in the
2421 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002422
2423 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002424 semaphore, we pop a SO off the semaphore's stack (which should be
2425 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002426 dependencies between posters and waiters of the semaphore.
2427
sewardjf98e1c02008-10-25 16:22:41 +00002428 It may not be necessary to use a stack - perhaps a bag of SOs would
2429 do. But we do need to keep track of how many unused-up posts have
2430 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002431
sewardjf98e1c02008-10-25 16:22:41 +00002432 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002433 twice on S. T3 cannot complete its waits without both T1 and T2
2434 posting. The above mechanism will ensure that T3 acquires
2435 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002436
sewardjf98e1c02008-10-25 16:22:41 +00002437 When a semaphore is initialised with value N, we do as if we'd
2438 posted N times on the semaphore: basically create N SOs and do a
2439 strong send to all of then. This allows up to N waits on the
2440 semaphore to acquire a dependency on the initialisation point,
2441 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002442
2443 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2444 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002445*/
2446
sewardjf98e1c02008-10-25 16:22:41 +00002447/* sem_t* -> XArray* SO* */
2448static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002449
sewardjf98e1c02008-10-25 16:22:41 +00002450static void map_sem_to_SO_stack_INIT ( void ) {
2451 if (map_sem_to_SO_stack == NULL) {
2452 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2453 HG_(free), NULL );
2454 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002455 }
2456}
2457
sewardjf98e1c02008-10-25 16:22:41 +00002458static void push_SO_for_sem ( void* sem, SO* so ) {
2459 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002460 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002461 tl_assert(so);
2462 map_sem_to_SO_stack_INIT();
2463 if (VG_(lookupFM)( map_sem_to_SO_stack,
2464 &keyW, (UWord*)&xa, (UWord)sem )) {
2465 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002466 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002467 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002468 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002469 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2470 VG_(addToXA)( xa, &so );
2471 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002472 }
2473}
2474
sewardjf98e1c02008-10-25 16:22:41 +00002475static SO* mb_pop_SO_for_sem ( void* sem ) {
2476 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002477 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002478 SO* so;
2479 map_sem_to_SO_stack_INIT();
2480 if (VG_(lookupFM)( map_sem_to_SO_stack,
2481 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002482 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002483 Word sz;
2484 tl_assert(keyW == (UWord)sem);
2485 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002486 tl_assert(sz >= 0);
2487 if (sz == 0)
2488 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002489 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2490 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002491 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002492 return so;
sewardjb4112022007-11-09 22:49:28 +00002493 } else {
2494 /* hmm, that's odd. No stack for this semaphore. */
2495 return NULL;
2496 }
2497}
2498
sewardj11e352f2007-11-30 11:11:02 +00002499static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002500{
sewardjf98e1c02008-10-25 16:22:41 +00002501 UWord keyW, valW;
2502 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002503
sewardjb4112022007-11-09 22:49:28 +00002504 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002505 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002506 (Int)tid, (void*)sem );
2507
sewardjf98e1c02008-10-25 16:22:41 +00002508 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002509
sewardjf98e1c02008-10-25 16:22:41 +00002510 /* Empty out the semaphore's SO stack. This way of doing it is
2511 stupid, but at least it's easy. */
2512 while (1) {
2513 so = mb_pop_SO_for_sem( sem );
2514 if (!so) break;
2515 libhb_so_dealloc(so);
2516 }
2517
2518 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2519 XArray* xa = (XArray*)valW;
2520 tl_assert(keyW == (UWord)sem);
2521 tl_assert(xa);
2522 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2523 VG_(deleteXA)(xa);
2524 }
sewardjb4112022007-11-09 22:49:28 +00002525}
2526
sewardj11e352f2007-11-30 11:11:02 +00002527static
2528void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2529{
sewardjf98e1c02008-10-25 16:22:41 +00002530 SO* so;
2531 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002532
2533 if (SHOW_EVENTS >= 1)
2534 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2535 (Int)tid, (void*)sem, value );
2536
sewardjf98e1c02008-10-25 16:22:41 +00002537 thr = map_threads_maybe_lookup( tid );
2538 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002539
sewardjf98e1c02008-10-25 16:22:41 +00002540 /* Empty out the semaphore's SO stack. This way of doing it is
2541 stupid, but at least it's easy. */
2542 while (1) {
2543 so = mb_pop_SO_for_sem( sem );
2544 if (!so) break;
2545 libhb_so_dealloc(so);
2546 }
sewardj11e352f2007-11-30 11:11:02 +00002547
sewardjf98e1c02008-10-25 16:22:41 +00002548 /* If we don't do this check, the following while loop runs us out
2549 of memory for stupid initial values of 'value'. */
2550 if (value > 10000) {
2551 HG_(record_error_Misc)(
2552 thr, "sem_init: initial value exceeds 10000; using 10000" );
2553 value = 10000;
2554 }
sewardj11e352f2007-11-30 11:11:02 +00002555
sewardjf98e1c02008-10-25 16:22:41 +00002556 /* Now create 'valid' new SOs for the thread, do a strong send to
2557 each of them, and push them all on the stack. */
2558 for (; value > 0; value--) {
2559 Thr* hbthr = thr->hbthr;
2560 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002561
sewardjf98e1c02008-10-25 16:22:41 +00002562 so = libhb_so_alloc();
2563 libhb_so_send( hbthr, so, True/*strong send*/ );
2564 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002565 }
2566}
2567
2568static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002569{
sewardjf98e1c02008-10-25 16:22:41 +00002570 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2571 it (iow, write our VC into it, then tick ours), and push the SO
2572 on on a stack of SOs associated with 'sem'. This is later used
2573 by other thread(s) which successfully exit from a sem_wait on
2574 the same sem; by doing a strong recv from SOs popped of the
2575 stack, they acquire dependencies on the posting thread
2576 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002577
sewardjf98e1c02008-10-25 16:22:41 +00002578 Thread* thr;
2579 SO* so;
2580 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002581
2582 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002583 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002584 (Int)tid, (void*)sem );
2585
2586 thr = map_threads_maybe_lookup( tid );
2587 tl_assert(thr); /* cannot fail - Thread* must already exist */
2588
2589 // error-if: sem is bogus
2590
sewardjf98e1c02008-10-25 16:22:41 +00002591 hbthr = thr->hbthr;
2592 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002593
sewardjf98e1c02008-10-25 16:22:41 +00002594 so = libhb_so_alloc();
2595 libhb_so_send( hbthr, so, True/*strong send*/ );
2596 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002597}
2598
sewardj11e352f2007-11-30 11:11:02 +00002599static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002600{
sewardjf98e1c02008-10-25 16:22:41 +00002601 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2602 the 'sem' from this semaphore's SO-stack, and do a strong recv
2603 from it. This creates a dependency back to one of the post-ers
2604 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002605
sewardjf98e1c02008-10-25 16:22:41 +00002606 Thread* thr;
2607 SO* so;
2608 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002609
2610 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002611 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002612 (Int)tid, (void*)sem );
2613
2614 thr = map_threads_maybe_lookup( tid );
2615 tl_assert(thr); /* cannot fail - Thread* must already exist */
2616
2617 // error-if: sem is bogus
2618
sewardjf98e1c02008-10-25 16:22:41 +00002619 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002620
sewardjf98e1c02008-10-25 16:22:41 +00002621 if (so) {
2622 hbthr = thr->hbthr;
2623 tl_assert(hbthr);
2624
2625 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2626 libhb_so_dealloc(so);
2627 } else {
2628 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2629 If this happened it would surely be a bug in the threads
2630 library. */
2631 HG_(record_error_Misc)(
2632 thr, "Bug in libpthread: sem_wait succeeded on"
2633 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002634 }
2635}
2636
2637
sewardj9f569b72008-11-13 13:33:09 +00002638/* -------------------------------------------------------- */
2639/* -------------- events to do with barriers -------------- */
2640/* -------------------------------------------------------- */
2641
2642typedef
2643 struct {
2644 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002645 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002646 UWord size; /* declared size */
2647 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2648 }
2649 Bar;
2650
2651static Bar* new_Bar ( void ) {
2652 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2653 tl_assert(bar);
2654 /* all fields are zero */
2655 tl_assert(bar->initted == False);
2656 return bar;
2657}
2658
2659static void delete_Bar ( Bar* bar ) {
2660 tl_assert(bar);
2661 if (bar->waiting)
2662 VG_(deleteXA)(bar->waiting);
2663 HG_(free)(bar);
2664}
2665
2666/* A mapping which stores auxiliary data for barriers. */
2667
2668/* pthread_barrier_t* -> Bar* */
2669static WordFM* map_barrier_to_Bar = NULL;
2670
2671static void map_barrier_to_Bar_INIT ( void ) {
2672 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2673 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2674 "hg.mbtBI.1", HG_(free), NULL );
2675 tl_assert(map_barrier_to_Bar != NULL);
2676 }
2677}
2678
2679static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2680 UWord key, val;
2681 map_barrier_to_Bar_INIT();
2682 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2683 tl_assert(key == (UWord)barrier);
2684 return (Bar*)val;
2685 } else {
2686 Bar* bar = new_Bar();
2687 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2688 return bar;
2689 }
2690}
2691
2692static void map_barrier_to_Bar_delete ( void* barrier ) {
2693 UWord keyW, valW;
2694 map_barrier_to_Bar_INIT();
2695 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2696 Bar* bar = (Bar*)valW;
2697 tl_assert(keyW == (UWord)barrier);
2698 delete_Bar(bar);
2699 }
2700}
2701
2702
2703static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2704 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002705 UWord count,
2706 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002707{
2708 Thread* thr;
2709 Bar* bar;
2710
2711 if (SHOW_EVENTS >= 1)
2712 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002713 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2714 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002715
2716 thr = map_threads_maybe_lookup( tid );
2717 tl_assert(thr); /* cannot fail - Thread* must already exist */
2718
2719 if (count == 0) {
2720 HG_(record_error_Misc)(
2721 thr, "pthread_barrier_init: 'count' argument is zero"
2722 );
2723 }
2724
sewardj406bac82010-03-03 23:03:40 +00002725 if (resizable != 0 && resizable != 1) {
2726 HG_(record_error_Misc)(
2727 thr, "pthread_barrier_init: invalid 'resizable' argument"
2728 );
2729 }
2730
sewardj9f569b72008-11-13 13:33:09 +00002731 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2732 tl_assert(bar);
2733
2734 if (bar->initted) {
2735 HG_(record_error_Misc)(
2736 thr, "pthread_barrier_init: barrier is already initialised"
2737 );
2738 }
2739
2740 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2741 tl_assert(bar->initted);
2742 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002743 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002744 );
2745 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2746 }
2747 if (!bar->waiting) {
2748 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2749 sizeof(Thread*) );
2750 }
2751
2752 tl_assert(bar->waiting);
2753 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002754 bar->initted = True;
2755 bar->resizable = resizable == 1 ? True : False;
2756 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002757}
2758
2759
2760static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2761 void* barrier )
2762{
sewardj553655c2008-11-14 19:41:19 +00002763 Thread* thr;
2764 Bar* bar;
2765
sewardj9f569b72008-11-13 13:33:09 +00002766 /* Deal with destroy events. The only purpose is to free storage
2767 associated with the barrier, so as to avoid any possible
2768 resource leaks. */
2769 if (SHOW_EVENTS >= 1)
2770 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2771 "(tid=%d, barrier=%p)\n",
2772 (Int)tid, (void*)barrier );
2773
sewardj553655c2008-11-14 19:41:19 +00002774 thr = map_threads_maybe_lookup( tid );
2775 tl_assert(thr); /* cannot fail - Thread* must already exist */
2776
2777 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2778 tl_assert(bar);
2779
2780 if (!bar->initted) {
2781 HG_(record_error_Misc)(
2782 thr, "pthread_barrier_destroy: barrier was never initialised"
2783 );
2784 }
2785
2786 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2787 HG_(record_error_Misc)(
2788 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2789 );
2790 }
2791
sewardj9f569b72008-11-13 13:33:09 +00002792 /* Maybe we shouldn't do this; just let it persist, so that when it
2793 is reinitialised we don't need to do any dynamic memory
2794 allocation? The downside is a potentially unlimited space leak,
2795 if the client creates (in turn) a large number of barriers all
2796 at different locations. Note that if we do later move to the
2797 don't-delete-it scheme, we need to mark the barrier as
2798 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002799 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002800 map_barrier_to_Bar_delete( barrier );
2801}
2802
2803
sewardj406bac82010-03-03 23:03:40 +00002804/* All the threads have arrived. Now do the Interesting Bit. Get a
2805 new synchronisation object and do a weak send to it from all the
2806 participating threads. This makes its vector clocks be the join of
2807 all the individual threads' vector clocks. Then do a strong
2808 receive from it back to all threads, so that their VCs are a copy
2809 of it (hence are all equal to the join of their original VCs.) */
2810static void do_barrier_cross_sync_and_empty ( Bar* bar )
2811{
2812 /* XXX check bar->waiting has no duplicates */
2813 UWord i;
2814 SO* so = libhb_so_alloc();
2815
2816 tl_assert(bar->waiting);
2817 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2818
2819 /* compute the join ... */
2820 for (i = 0; i < bar->size; i++) {
2821 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2822 Thr* hbthr = t->hbthr;
2823 libhb_so_send( hbthr, so, False/*weak send*/ );
2824 }
2825 /* ... and distribute to all threads */
2826 for (i = 0; i < bar->size; i++) {
2827 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2828 Thr* hbthr = t->hbthr;
2829 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2830 }
2831
2832 /* finally, we must empty out the waiting vector */
2833 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2834
2835 /* and we don't need this any more. Perhaps a stack-allocated
2836 SO would be better? */
2837 libhb_so_dealloc(so);
2838}
2839
2840
sewardj9f569b72008-11-13 13:33:09 +00002841static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2842 void* barrier )
2843{
sewardj1c466b72008-11-19 11:52:14 +00002844 /* This function gets called after a client thread calls
2845 pthread_barrier_wait but before it arrives at the real
2846 pthread_barrier_wait.
2847
2848 Why is the following correct? It's a bit subtle.
2849
2850 If this is not the last thread arriving at the barrier, we simply
2851 note its presence and return. Because valgrind (at least as of
2852 Nov 08) is single threaded, we are guaranteed safe from any race
2853 conditions when in this function -- no other client threads are
2854 running.
2855
2856 If this is the last thread, then we are again the only running
2857 thread. All the other threads will have either arrived at the
2858 real pthread_barrier_wait or are on their way to it, but in any
2859 case are guaranteed not to be able to move past it, because this
2860 thread is currently in this function and so has not yet arrived
2861 at the real pthread_barrier_wait. That means that:
2862
2863 1. While we are in this function, none of the other threads
2864 waiting at the barrier can move past it.
2865
2866 2. When this function returns (and simulated execution resumes),
2867 this thread and all other waiting threads will be able to move
2868 past the real barrier.
2869
2870 Because of this, it is now safe to update the vector clocks of
2871 all threads, to represent the fact that they all arrived at the
2872 barrier and have all moved on. There is no danger of any
2873 complications to do with some threads leaving the barrier and
2874 racing back round to the front, whilst others are still leaving
2875 (which is the primary source of complication in correct handling/
2876 implementation of barriers). That can't happen because we update
2877 here our data structures so as to indicate that the threads have
2878 passed the barrier, even though, as per (2) above, they are
2879 guaranteed not to pass the barrier until we return.
2880
2881 This relies crucially on Valgrind being single threaded. If that
2882 changes, this will need to be reconsidered.
2883 */
sewardj9f569b72008-11-13 13:33:09 +00002884 Thread* thr;
2885 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002886 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002887
2888 if (SHOW_EVENTS >= 1)
2889 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2890 "(tid=%d, barrier=%p)\n",
2891 (Int)tid, (void*)barrier );
2892
2893 thr = map_threads_maybe_lookup( tid );
2894 tl_assert(thr); /* cannot fail - Thread* must already exist */
2895
2896 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2897 tl_assert(bar);
2898
2899 if (!bar->initted) {
2900 HG_(record_error_Misc)(
2901 thr, "pthread_barrier_wait: barrier is uninitialised"
2902 );
2903 return; /* client is broken .. avoid assertions below */
2904 }
2905
2906 /* guaranteed by _INIT_PRE above */
2907 tl_assert(bar->size > 0);
2908 tl_assert(bar->waiting);
2909
2910 VG_(addToXA)( bar->waiting, &thr );
2911
2912 /* guaranteed by this function */
2913 present = VG_(sizeXA)(bar->waiting);
2914 tl_assert(present > 0 && present <= bar->size);
2915
2916 if (present < bar->size)
2917 return;
2918
sewardj406bac82010-03-03 23:03:40 +00002919 do_barrier_cross_sync_and_empty(bar);
2920}
sewardj9f569b72008-11-13 13:33:09 +00002921
sewardj9f569b72008-11-13 13:33:09 +00002922
sewardj406bac82010-03-03 23:03:40 +00002923static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
2924 void* barrier,
2925 UWord newcount )
2926{
2927 Thread* thr;
2928 Bar* bar;
2929 UWord present;
2930
2931 if (SHOW_EVENTS >= 1)
2932 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
2933 "(tid=%d, barrier=%p, newcount=%lu)\n",
2934 (Int)tid, (void*)barrier, newcount );
2935
2936 thr = map_threads_maybe_lookup( tid );
2937 tl_assert(thr); /* cannot fail - Thread* must already exist */
2938
2939 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2940 tl_assert(bar);
2941
2942 if (!bar->initted) {
2943 HG_(record_error_Misc)(
2944 thr, "pthread_barrier_resize: barrier is uninitialised"
2945 );
2946 return; /* client is broken .. avoid assertions below */
2947 }
2948
2949 if (!bar->resizable) {
2950 HG_(record_error_Misc)(
2951 thr, "pthread_barrier_resize: barrier is may not be resized"
2952 );
2953 return; /* client is broken .. avoid assertions below */
2954 }
2955
2956 if (newcount == 0) {
2957 HG_(record_error_Misc)(
2958 thr, "pthread_barrier_resize: 'newcount' argument is zero"
2959 );
2960 return; /* client is broken .. avoid assertions below */
2961 }
2962
2963 /* guaranteed by _INIT_PRE above */
2964 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00002965 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00002966 /* Guaranteed by this fn */
2967 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00002968
sewardj406bac82010-03-03 23:03:40 +00002969 if (newcount >= bar->size) {
2970 /* Increasing the capacity. There's no possibility of threads
2971 moving on from the barrier in this situation, so just note
2972 the fact and do nothing more. */
2973 bar->size = newcount;
2974 } else {
2975 /* Decreasing the capacity. If we decrease it to be equal or
2976 below the number of waiting threads, they will now move past
2977 the barrier, so need to mess with dep edges in the same way
2978 as if the barrier had filled up normally. */
2979 present = VG_(sizeXA)(bar->waiting);
2980 tl_assert(present >= 0 && present <= bar->size);
2981 if (newcount <= present) {
2982 bar->size = present; /* keep the cross_sync call happy */
2983 do_barrier_cross_sync_and_empty(bar);
2984 }
2985 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00002986 }
sewardj9f569b72008-11-13 13:33:09 +00002987}
2988
2989
sewardjed2e72e2009-08-14 11:08:24 +00002990/* ----------------------------------------------------- */
2991/* ----- events to do with user-specified HB edges ----- */
2992/* ----------------------------------------------------- */
2993
2994/* A mapping from arbitrary UWord tag to the SO associated with it.
2995 The UWord tags are meaningless to us, interpreted only by the
2996 user. */
2997
2998
2999
3000/* UWord -> SO* */
3001static WordFM* map_usertag_to_SO = NULL;
3002
3003static void map_usertag_to_SO_INIT ( void ) {
3004 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3005 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3006 "hg.mutS.1", HG_(free), NULL );
3007 tl_assert(map_usertag_to_SO != NULL);
3008 }
3009}
3010
3011static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3012 UWord key, val;
3013 map_usertag_to_SO_INIT();
3014 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3015 tl_assert(key == (UWord)usertag);
3016 return (SO*)val;
3017 } else {
3018 SO* so = libhb_so_alloc();
3019 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3020 return so;
3021 }
3022}
3023
3024// If it's ever needed (XXX check before use)
3025//static void map_usertag_to_SO_delete ( UWord usertag ) {
3026// UWord keyW, valW;
3027// map_usertag_to_SO_INIT();
3028// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3029// SO* so = (SO*)valW;
3030// tl_assert(keyW == usertag);
3031// tl_assert(so);
3032// libhb_so_dealloc(so);
3033// }
3034//}
3035
3036
3037static
3038void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3039{
3040 /* TID is just about to notionally sent a message on a notional
3041 abstract synchronisation object whose identity is given by
3042 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003043 bound, and do a 'weak send' on the SO. This joins the vector
3044 clocks from this thread into any vector clocks already present
3045 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003046 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003047 thereby acquiring a dependency on all the events that have
3048 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003049 Thread* thr;
3050 SO* so;
3051
3052 if (SHOW_EVENTS >= 1)
3053 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3054 (Int)tid, usertag );
3055
3056 thr = map_threads_maybe_lookup( tid );
3057 tl_assert(thr); /* cannot fail - Thread* must already exist */
3058
3059 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3060 tl_assert(so);
3061
sewardj8c50d3c2011-03-11 18:38:12 +00003062 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003063}
3064
3065static
3066void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3067{
3068 /* TID has just notionally received a message from a notional
3069 abstract synchronisation object whose identity is given by
3070 USERTAG. Bind USERTAG to a real SO if it is not already so
3071 bound. If the SO has at some point in the past been 'sent' on,
3072 to a 'strong receive' on it, thereby acquiring a dependency on
3073 the sender. */
3074 Thread* thr;
3075 SO* so;
3076
3077 if (SHOW_EVENTS >= 1)
3078 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3079 (Int)tid, usertag );
3080
3081 thr = map_threads_maybe_lookup( tid );
3082 tl_assert(thr); /* cannot fail - Thread* must already exist */
3083
3084 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3085 tl_assert(so);
3086
3087 /* Acquire a dependency on it. If the SO has never so far been
3088 sent on, then libhb_so_recv will do nothing. So we're safe
3089 regardless of SO's history. */
3090 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3091}
3092
3093
sewardjb4112022007-11-09 22:49:28 +00003094/*--------------------------------------------------------------*/
3095/*--- Lock acquisition order monitoring ---*/
3096/*--------------------------------------------------------------*/
3097
3098/* FIXME: here are some optimisations still to do in
3099 laog__pre_thread_acquires_lock.
3100
3101 The graph is structured so that if L1 --*--> L2 then L1 must be
3102 acquired before L2.
3103
3104 The common case is that some thread T holds (eg) L1 L2 and L3 and
3105 is repeatedly acquiring and releasing Ln, and there is no ordering
3106 error in what it is doing. Hence it repeatly:
3107
3108 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3109 produces the answer No (because there is no error).
3110
3111 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3112 (because they already got added the first time T acquired Ln).
3113
3114 Hence cache these two events:
3115
3116 (1) Cache result of the query from last time. Invalidate the cache
3117 any time any edges are added to or deleted from laog.
3118
3119 (2) Cache these add-edge requests and ignore them if said edges
3120 have already been added to laog. Invalidate the cache any time
3121 any edges are deleted from laog.
3122*/
3123
3124typedef
3125 struct {
3126 WordSetID inns; /* in univ_laog */
3127 WordSetID outs; /* in univ_laog */
3128 }
3129 LAOGLinks;
3130
3131/* lock order acquisition graph */
3132static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3133
3134/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3135 where that edge was created, so that we can show the user later if
3136 we need to. */
3137typedef
3138 struct {
3139 Addr src_ga; /* Lock guest addresses for */
3140 Addr dst_ga; /* src/dst of the edge */
3141 ExeContext* src_ec; /* And corresponding places where that */
3142 ExeContext* dst_ec; /* ordering was established */
3143 }
3144 LAOGLinkExposition;
3145
sewardj250ec2e2008-02-15 22:02:30 +00003146static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003147 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3148 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3149 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3150 if (llx1->src_ga < llx2->src_ga) return -1;
3151 if (llx1->src_ga > llx2->src_ga) return 1;
3152 if (llx1->dst_ga < llx2->dst_ga) return -1;
3153 if (llx1->dst_ga > llx2->dst_ga) return 1;
3154 return 0;
3155}
3156
3157static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3158/* end EXPOSITION ONLY */
3159
3160
sewardja65db102009-01-26 10:45:16 +00003161__attribute__((noinline))
3162static void laog__init ( void )
3163{
3164 tl_assert(!laog);
3165 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003166 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003167
3168 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3169 HG_(free), NULL/*unboxedcmp*/ );
3170
3171 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3172 cmp_LAOGLinkExposition );
3173 tl_assert(laog);
3174 tl_assert(laog_exposition);
3175}
3176
sewardjb4112022007-11-09 22:49:28 +00003177static void laog__show ( Char* who ) {
3178 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003179 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003180 Lock* me;
3181 LAOGLinks* links;
3182 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003183 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003184 me = NULL;
3185 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003186 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003187 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003188 tl_assert(me);
3189 tl_assert(links);
3190 VG_(printf)(" node %p:\n", me);
3191 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3192 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003193 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003194 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3195 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003196 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003197 me = NULL;
3198 links = NULL;
3199 }
sewardj896f6f92008-08-19 08:38:52 +00003200 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003201 VG_(printf)("}\n");
3202}
3203
3204__attribute__((noinline))
3205static void laog__add_edge ( Lock* src, Lock* dst ) {
3206 Word keyW;
3207 LAOGLinks* links;
3208 Bool presentF, presentR;
3209 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3210
3211 /* Take the opportunity to sanity check the graph. Record in
3212 presentF if there is already a src->dst mapping in this node's
3213 forwards links, and presentR if there is already a src->dst
3214 mapping in this node's backwards links. They should agree!
3215 Also, we need to know whether the edge was already present so as
3216 to decide whether or not to update the link details mapping. We
3217 can compute presentF and presentR essentially for free, so may
3218 as well do this always. */
3219 presentF = presentR = False;
3220
3221 /* Update the out edges for src */
3222 keyW = 0;
3223 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003224 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003225 WordSetID outs_new;
3226 tl_assert(links);
3227 tl_assert(keyW == (Word)src);
3228 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3229 presentF = outs_new == links->outs;
3230 links->outs = outs_new;
3231 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003232 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003233 links->inns = HG_(emptyWS)( univ_laog );
3234 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003235 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003236 }
3237 /* Update the in edges for dst */
3238 keyW = 0;
3239 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003240 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003241 WordSetID inns_new;
3242 tl_assert(links);
3243 tl_assert(keyW == (Word)dst);
3244 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3245 presentR = inns_new == links->inns;
3246 links->inns = inns_new;
3247 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003248 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003249 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3250 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003251 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003252 }
3253
3254 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3255
3256 if (!presentF && src->acquired_at && dst->acquired_at) {
3257 LAOGLinkExposition expo;
3258 /* If this edge is entering the graph, and we have acquired_at
3259 information for both src and dst, record those acquisition
3260 points. Hence, if there is later a violation of this
3261 ordering, we can show the user the two places in which the
3262 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003263 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003264 src->guestaddr, dst->guestaddr);
3265 expo.src_ga = src->guestaddr;
3266 expo.dst_ga = dst->guestaddr;
3267 expo.src_ec = NULL;
3268 expo.dst_ec = NULL;
3269 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003270 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003271 /* we already have it; do nothing */
3272 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003273 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3274 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003275 expo2->src_ga = src->guestaddr;
3276 expo2->dst_ga = dst->guestaddr;
3277 expo2->src_ec = src->acquired_at;
3278 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003279 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003280 }
3281 }
3282}
3283
3284__attribute__((noinline))
3285static void laog__del_edge ( Lock* src, Lock* dst ) {
3286 Word keyW;
3287 LAOGLinks* links;
3288 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3289 /* Update the out edges for src */
3290 keyW = 0;
3291 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003292 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003293 tl_assert(links);
3294 tl_assert(keyW == (Word)src);
3295 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3296 }
3297 /* Update the in edges for dst */
3298 keyW = 0;
3299 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003300 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003301 tl_assert(links);
3302 tl_assert(keyW == (Word)dst);
3303 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3304 }
3305}
3306
3307__attribute__((noinline))
3308static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3309 Word keyW;
3310 LAOGLinks* links;
3311 keyW = 0;
3312 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003313 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003314 tl_assert(links);
3315 tl_assert(keyW == (Word)lk);
3316 return links->outs;
3317 } else {
3318 return HG_(emptyWS)( univ_laog );
3319 }
3320}
3321
3322__attribute__((noinline))
3323static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3324 Word keyW;
3325 LAOGLinks* links;
3326 keyW = 0;
3327 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003328 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003329 tl_assert(links);
3330 tl_assert(keyW == (Word)lk);
3331 return links->inns;
3332 } else {
3333 return HG_(emptyWS)( univ_laog );
3334 }
3335}
3336
3337__attribute__((noinline))
3338static void laog__sanity_check ( Char* who ) {
3339 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003340 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003341 Lock* me;
3342 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003343 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003344 me = NULL;
3345 links = NULL;
3346 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003347 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003348 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003349 tl_assert(me);
3350 tl_assert(links);
3351 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3352 for (i = 0; i < ws_size; i++) {
3353 if ( ! HG_(elemWS)( univ_laog,
3354 laog__succs( (Lock*)ws_words[i] ),
3355 (Word)me ))
3356 goto bad;
3357 }
3358 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3359 for (i = 0; i < ws_size; i++) {
3360 if ( ! HG_(elemWS)( univ_laog,
3361 laog__preds( (Lock*)ws_words[i] ),
3362 (Word)me ))
3363 goto bad;
3364 }
3365 me = NULL;
3366 links = NULL;
3367 }
sewardj896f6f92008-08-19 08:38:52 +00003368 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003369 return;
3370
3371 bad:
3372 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3373 laog__show(who);
3374 tl_assert(0);
3375}
3376
3377/* If there is a path in laog from 'src' to any of the elements in
3378 'dst', return an arbitrarily chosen element of 'dst' reachable from
3379 'src'. If no path exist from 'src' to any element in 'dst', return
3380 NULL. */
3381__attribute__((noinline))
3382static
3383Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3384{
3385 Lock* ret;
3386 Word i, ssz;
3387 XArray* stack; /* of Lock* */
3388 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3389 Lock* here;
3390 WordSetID succs;
3391 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003392 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003393 //laog__sanity_check();
3394
3395 /* If the destination set is empty, we can never get there from
3396 'src' :-), so don't bother to try */
3397 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3398 return NULL;
3399
3400 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003401 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3402 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003403
3404 (void) VG_(addToXA)( stack, &src );
3405
3406 while (True) {
3407
3408 ssz = VG_(sizeXA)( stack );
3409
3410 if (ssz == 0) { ret = NULL; break; }
3411
3412 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3413 VG_(dropTailXA)( stack, 1 );
3414
3415 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3416
sewardj896f6f92008-08-19 08:38:52 +00003417 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003418 continue;
3419
sewardj896f6f92008-08-19 08:38:52 +00003420 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003421
3422 succs = laog__succs( here );
3423 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3424 for (i = 0; i < succs_size; i++)
3425 (void) VG_(addToXA)( stack, &succs_words[i] );
3426 }
3427
sewardj896f6f92008-08-19 08:38:52 +00003428 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003429 VG_(deleteXA)( stack );
3430 return ret;
3431}
3432
3433
3434/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3435 between 'lk' and the locks already held by 'thr' and issue a
3436 complaint if so. Also, update the ordering graph appropriately.
3437*/
3438__attribute__((noinline))
3439static void laog__pre_thread_acquires_lock (
3440 Thread* thr, /* NB: BEFORE lock is added */
3441 Lock* lk
3442 )
3443{
sewardj250ec2e2008-02-15 22:02:30 +00003444 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003445 Word ls_size, i;
3446 Lock* other;
3447
3448 /* It may be that 'thr' already holds 'lk' and is recursively
3449 relocking in. In this case we just ignore the call. */
3450 /* NB: univ_lsets really is correct here */
3451 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3452 return;
3453
sewardjb4112022007-11-09 22:49:28 +00003454 /* First, the check. Complain if there is any path in laog from lk
3455 to any of the locks already held by thr, since if any such path
3456 existed, it would mean that previously lk was acquired before
3457 (rather than after, as we are doing here) at least one of those
3458 locks.
3459 */
3460 other = laog__do_dfs_from_to(lk, thr->locksetA);
3461 if (other) {
3462 LAOGLinkExposition key, *found;
3463 /* So we managed to find a path lk --*--> other in the graph,
3464 which implies that 'lk' should have been acquired before
3465 'other' but is in fact being acquired afterwards. We present
3466 the lk/other arguments to record_error_LockOrder in the order
3467 in which they should have been acquired. */
3468 /* Go look in the laog_exposition mapping, to find the allocation
3469 points for this edge, so we can show the user. */
3470 key.src_ga = lk->guestaddr;
3471 key.dst_ga = other->guestaddr;
3472 key.src_ec = NULL;
3473 key.dst_ec = NULL;
3474 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003475 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003476 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003477 tl_assert(found != &key);
3478 tl_assert(found->src_ga == key.src_ga);
3479 tl_assert(found->dst_ga == key.dst_ga);
3480 tl_assert(found->src_ec);
3481 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003482 HG_(record_error_LockOrder)(
3483 thr, lk->guestaddr, other->guestaddr,
3484 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003485 } else {
3486 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003487 HG_(record_error_LockOrder)(
3488 thr, lk->guestaddr, other->guestaddr,
3489 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003490 }
3491 }
3492
3493 /* Second, add to laog the pairs
3494 (old, lk) | old <- locks already held by thr
3495 Since both old and lk are currently held by thr, their acquired_at
3496 fields must be non-NULL.
3497 */
3498 tl_assert(lk->acquired_at);
3499 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3500 for (i = 0; i < ls_size; i++) {
3501 Lock* old = (Lock*)ls_words[i];
3502 tl_assert(old->acquired_at);
3503 laog__add_edge( old, lk );
3504 }
3505
3506 /* Why "except_Locks" ? We're here because a lock is being
3507 acquired by a thread, and we're in an inconsistent state here.
3508 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3509 When called in this inconsistent state, locks__sanity_check duly
3510 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003511 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003512 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3513}
3514
3515
3516/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3517
3518__attribute__((noinline))
3519static void laog__handle_one_lock_deletion ( Lock* lk )
3520{
3521 WordSetID preds, succs;
3522 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003523 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003524
3525 preds = laog__preds( lk );
3526 succs = laog__succs( lk );
3527
3528 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3529 for (i = 0; i < preds_size; i++)
3530 laog__del_edge( (Lock*)preds_words[i], lk );
3531
3532 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3533 for (j = 0; j < succs_size; j++)
3534 laog__del_edge( lk, (Lock*)succs_words[j] );
3535
3536 for (i = 0; i < preds_size; i++) {
3537 for (j = 0; j < succs_size; j++) {
3538 if (preds_words[i] != succs_words[j]) {
3539 /* This can pass unlocked locks to laog__add_edge, since
3540 we're deleting stuff. So their acquired_at fields may
3541 be NULL. */
3542 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3543 }
3544 }
3545 }
3546}
3547
sewardj1cbc12f2008-11-10 16:16:46 +00003548//__attribute__((noinline))
3549//static void laog__handle_lock_deletions (
3550// WordSetID /* in univ_laog */ locksToDelete
3551// )
3552//{
3553// Word i, ws_size;
3554// UWord* ws_words;
3555//
sewardj1cbc12f2008-11-10 16:16:46 +00003556//
3557// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3558// for (i = 0; i < ws_size; i++)
3559// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3560//
3561// if (HG_(clo_sanity_flags) & SCE_LAOG)
3562// all__sanity_check("laog__handle_lock_deletions-post");
3563//}
sewardjb4112022007-11-09 22:49:28 +00003564
3565
3566/*--------------------------------------------------------------*/
3567/*--- Malloc/free replacements ---*/
3568/*--------------------------------------------------------------*/
3569
3570typedef
3571 struct {
3572 void* next; /* required by m_hashtable */
3573 Addr payload; /* ptr to actual block */
3574 SizeT szB; /* size requested */
3575 ExeContext* where; /* where it was allocated */
3576 Thread* thr; /* allocating thread */
3577 }
3578 MallocMeta;
3579
3580/* A hash table of MallocMetas, used to track malloc'd blocks
3581 (obviously). */
3582static VgHashTable hg_mallocmeta_table = NULL;
3583
3584
3585static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003586 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003587 tl_assert(md);
3588 return md;
3589}
3590static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003591 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003592}
3593
3594
3595/* Allocate a client block and set up the metadata for it. */
3596
3597static
3598void* handle_alloc ( ThreadId tid,
3599 SizeT szB, SizeT alignB, Bool is_zeroed )
3600{
3601 Addr p;
3602 MallocMeta* md;
3603
3604 tl_assert( ((SSizeT)szB) >= 0 );
3605 p = (Addr)VG_(cli_malloc)(alignB, szB);
3606 if (!p) {
3607 return NULL;
3608 }
3609 if (is_zeroed)
3610 VG_(memset)((void*)p, 0, szB);
3611
3612 /* Note that map_threads_lookup must succeed (cannot assert), since
3613 memory can only be allocated by currently alive threads, hence
3614 they must have an entry in map_threads. */
3615 md = new_MallocMeta();
3616 md->payload = p;
3617 md->szB = szB;
3618 md->where = VG_(record_ExeContext)( tid, 0 );
3619 md->thr = map_threads_lookup( tid );
3620
3621 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3622
3623 /* Tell the lower level memory wranglers. */
3624 evh__new_mem_heap( p, szB, is_zeroed );
3625
3626 return (void*)p;
3627}
3628
3629/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3630 Cast to a signed type to catch any unexpectedly negative args.
3631 We're assuming here that the size asked for is not greater than
3632 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3633 platforms). */
3634static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3635 if (((SSizeT)n) < 0) return NULL;
3636 return handle_alloc ( tid, n, VG_(clo_alignment),
3637 /*is_zeroed*/False );
3638}
3639static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3640 if (((SSizeT)n) < 0) return NULL;
3641 return handle_alloc ( tid, n, VG_(clo_alignment),
3642 /*is_zeroed*/False );
3643}
3644static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3645 if (((SSizeT)n) < 0) return NULL;
3646 return handle_alloc ( tid, n, VG_(clo_alignment),
3647 /*is_zeroed*/False );
3648}
3649static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3650 if (((SSizeT)n) < 0) return NULL;
3651 return handle_alloc ( tid, n, align,
3652 /*is_zeroed*/False );
3653}
3654static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3655 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3656 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3657 /*is_zeroed*/True );
3658}
3659
3660
3661/* Free a client block, including getting rid of the relevant
3662 metadata. */
3663
3664static void handle_free ( ThreadId tid, void* p )
3665{
3666 MallocMeta *md, *old_md;
3667 SizeT szB;
3668
3669 /* First see if we can find the metadata for 'p'. */
3670 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3671 if (!md)
3672 return; /* apparently freeing a bogus address. Oh well. */
3673
3674 tl_assert(md->payload == (Addr)p);
3675 szB = md->szB;
3676
3677 /* Nuke the metadata block */
3678 old_md = (MallocMeta*)
3679 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3680 tl_assert(old_md); /* it must be present - we just found it */
3681 tl_assert(old_md == md);
3682 tl_assert(old_md->payload == (Addr)p);
3683
3684 VG_(cli_free)((void*)old_md->payload);
3685 delete_MallocMeta(old_md);
3686
3687 /* Tell the lower level memory wranglers. */
3688 evh__die_mem_heap( (Addr)p, szB );
3689}
3690
3691static void hg_cli__free ( ThreadId tid, void* p ) {
3692 handle_free(tid, p);
3693}
3694static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3695 handle_free(tid, p);
3696}
3697static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3698 handle_free(tid, p);
3699}
3700
3701
3702static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3703{
3704 MallocMeta *md, *md_new, *md_tmp;
3705 SizeT i;
3706
3707 Addr payload = (Addr)payloadV;
3708
3709 if (((SSizeT)new_size) < 0) return NULL;
3710
3711 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3712 if (!md)
3713 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3714
3715 tl_assert(md->payload == payload);
3716
3717 if (md->szB == new_size) {
3718 /* size unchanged */
3719 md->where = VG_(record_ExeContext)(tid, 0);
3720 return payloadV;
3721 }
3722
3723 if (md->szB > new_size) {
3724 /* new size is smaller */
3725 md->szB = new_size;
3726 md->where = VG_(record_ExeContext)(tid, 0);
3727 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3728 return payloadV;
3729 }
3730
3731 /* else */ {
3732 /* new size is bigger */
3733 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3734
3735 /* First half kept and copied, second half new */
3736 // FIXME: shouldn't we use a copier which implements the
3737 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003738 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003739 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003740 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003741 /* FIXME: can anything funny happen here? specifically, if the
3742 old range contained a lock, then die_mem_heap will complain.
3743 Is that the correct behaviour? Not sure. */
3744 evh__die_mem_heap( payload, md->szB );
3745
3746 /* Copy from old to new */
3747 for (i = 0; i < md->szB; i++)
3748 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3749
3750 /* Because the metadata hash table is index by payload address,
3751 we have to get rid of the old hash table entry and make a new
3752 one. We can't just modify the existing metadata in place,
3753 because then it would (almost certainly) be in the wrong hash
3754 chain. */
3755 md_new = new_MallocMeta();
3756 *md_new = *md;
3757
3758 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3759 tl_assert(md_tmp);
3760 tl_assert(md_tmp == md);
3761
3762 VG_(cli_free)((void*)md->payload);
3763 delete_MallocMeta(md);
3764
3765 /* Update fields */
3766 md_new->where = VG_(record_ExeContext)( tid, 0 );
3767 md_new->szB = new_size;
3768 md_new->payload = p_new;
3769 md_new->thr = map_threads_lookup( tid );
3770
3771 /* and add */
3772 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3773
3774 return (void*)p_new;
3775 }
3776}
3777
njn8b140de2009-02-17 04:31:18 +00003778static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3779{
3780 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3781
3782 // There may be slop, but pretend there isn't because only the asked-for
3783 // area will have been shadowed properly.
3784 return ( md ? md->szB : 0 );
3785}
3786
sewardjb4112022007-11-09 22:49:28 +00003787
sewardj095d61e2010-03-11 13:43:18 +00003788/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003789 Slow linear search. With a bit of hash table help if 'data_addr'
3790 is either the start of a block or up to 15 word-sized steps along
3791 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003792
3793static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3794{
sewardjc8028ad2010-05-05 09:34:42 +00003795 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3796 right at it. */
3797 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3798 return True;
3799 /* else normal interval rules apply */
3800 if (LIKELY(a < mm->payload)) return False;
3801 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3802 return True;
sewardj095d61e2010-03-11 13:43:18 +00003803}
3804
sewardjc8028ad2010-05-05 09:34:42 +00003805Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003806 /*OUT*/Addr* payload,
3807 /*OUT*/SizeT* szB,
3808 Addr data_addr )
3809{
3810 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003811 Int i;
3812 const Int n_fast_check_words = 16;
3813
3814 /* First, do a few fast searches on the basis that data_addr might
3815 be exactly the start of a block or up to 15 words inside. This
3816 can happen commonly via the creq
3817 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3818 for (i = 0; i < n_fast_check_words; i++) {
3819 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3820 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3821 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3822 goto found;
3823 }
3824
sewardj095d61e2010-03-11 13:43:18 +00003825 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003826 some such, it's hard to see how to do better. We have to check
3827 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003828 VG_(HT_ResetIter)(hg_mallocmeta_table);
3829 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003830 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3831 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003832 }
sewardjc8028ad2010-05-05 09:34:42 +00003833
3834 /* Not found. Bah. */
3835 return False;
3836 /*NOTREACHED*/
3837
3838 found:
3839 tl_assert(mm);
3840 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3841 if (where) *where = mm->where;
3842 if (payload) *payload = mm->payload;
3843 if (szB) *szB = mm->szB;
3844 return True;
sewardj095d61e2010-03-11 13:43:18 +00003845}
3846
3847
sewardjb4112022007-11-09 22:49:28 +00003848/*--------------------------------------------------------------*/
3849/*--- Instrumentation ---*/
3850/*--------------------------------------------------------------*/
3851
3852static void instrument_mem_access ( IRSB* bbOut,
3853 IRExpr* addr,
3854 Int szB,
3855 Bool isStore,
3856 Int hWordTy_szB )
3857{
3858 IRType tyAddr = Ity_INVALID;
3859 HChar* hName = NULL;
3860 void* hAddr = NULL;
3861 Int regparms = 0;
3862 IRExpr** argv = NULL;
3863 IRDirty* di = NULL;
3864
3865 tl_assert(isIRAtom(addr));
3866 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3867
3868 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3869 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3870
3871 /* So the effective address is in 'addr' now. */
3872 regparms = 1; // unless stated otherwise
3873 if (isStore) {
3874 switch (szB) {
3875 case 1:
sewardj23f12002009-07-24 08:45:08 +00003876 hName = "evh__mem_help_cwrite_1";
3877 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003878 argv = mkIRExprVec_1( addr );
3879 break;
3880 case 2:
sewardj23f12002009-07-24 08:45:08 +00003881 hName = "evh__mem_help_cwrite_2";
3882 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003883 argv = mkIRExprVec_1( addr );
3884 break;
3885 case 4:
sewardj23f12002009-07-24 08:45:08 +00003886 hName = "evh__mem_help_cwrite_4";
3887 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003888 argv = mkIRExprVec_1( addr );
3889 break;
3890 case 8:
sewardj23f12002009-07-24 08:45:08 +00003891 hName = "evh__mem_help_cwrite_8";
3892 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003893 argv = mkIRExprVec_1( addr );
3894 break;
3895 default:
3896 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3897 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003898 hName = "evh__mem_help_cwrite_N";
3899 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003900 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3901 break;
3902 }
3903 } else {
3904 switch (szB) {
3905 case 1:
sewardj23f12002009-07-24 08:45:08 +00003906 hName = "evh__mem_help_cread_1";
3907 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003908 argv = mkIRExprVec_1( addr );
3909 break;
3910 case 2:
sewardj23f12002009-07-24 08:45:08 +00003911 hName = "evh__mem_help_cread_2";
3912 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003913 argv = mkIRExprVec_1( addr );
3914 break;
3915 case 4:
sewardj23f12002009-07-24 08:45:08 +00003916 hName = "evh__mem_help_cread_4";
3917 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003918 argv = mkIRExprVec_1( addr );
3919 break;
3920 case 8:
sewardj23f12002009-07-24 08:45:08 +00003921 hName = "evh__mem_help_cread_8";
3922 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003923 argv = mkIRExprVec_1( addr );
3924 break;
3925 default:
3926 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3927 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003928 hName = "evh__mem_help_cread_N";
3929 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003930 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3931 break;
3932 }
3933 }
3934
3935 /* Add the helper. */
3936 tl_assert(hName);
3937 tl_assert(hAddr);
3938 tl_assert(argv);
3939 di = unsafeIRDirty_0_N( regparms,
3940 hName, VG_(fnptr_to_fnentry)( hAddr ),
3941 argv );
3942 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3943}
3944
3945
sewardja0eee322009-07-31 08:46:35 +00003946/* Figure out if GA is a guest code address in the dynamic linker, and
3947 if so return True. Otherwise (and in case of any doubt) return
3948 False. (sidedly safe w/ False as the safe value) */
3949static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3950{
3951 DebugInfo* dinfo;
3952 const UChar* soname;
3953 if (0) return False;
3954
sewardje3f1e592009-07-31 09:41:29 +00003955 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003956 if (!dinfo) return False;
3957
sewardje3f1e592009-07-31 09:41:29 +00003958 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003959 tl_assert(soname);
3960 if (0) VG_(printf)("%s\n", soname);
3961
3962# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00003963 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00003964 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3965 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3966 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3967 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3968# elif defined(VGO_darwin)
3969 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3970# else
3971# error "Unsupported OS"
3972# endif
3973 return False;
3974}
3975
sewardjb4112022007-11-09 22:49:28 +00003976static
3977IRSB* hg_instrument ( VgCallbackClosure* closure,
3978 IRSB* bbIn,
3979 VexGuestLayout* layout,
3980 VexGuestExtents* vge,
3981 IRType gWordTy, IRType hWordTy )
3982{
sewardj1c0ce7a2009-07-01 08:10:49 +00003983 Int i;
3984 IRSB* bbOut;
3985 Addr64 cia; /* address of current insn */
3986 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00003987 Bool inLDSO = False;
3988 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00003989
3990 if (gWordTy != hWordTy) {
3991 /* We don't currently support this case. */
3992 VG_(tool_panic)("host/guest word size mismatch");
3993 }
3994
sewardja0eee322009-07-31 08:46:35 +00003995 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
3996 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
3997 }
3998
sewardjb4112022007-11-09 22:49:28 +00003999 /* Set up BB */
4000 bbOut = emptyIRSB();
4001 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4002 bbOut->next = deepCopyIRExpr(bbIn->next);
4003 bbOut->jumpkind = bbIn->jumpkind;
4004
4005 // Copy verbatim any IR preamble preceding the first IMark
4006 i = 0;
4007 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4008 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4009 i++;
4010 }
4011
sewardj1c0ce7a2009-07-01 08:10:49 +00004012 // Get the first statement, and initial cia from it
4013 tl_assert(bbIn->stmts_used > 0);
4014 tl_assert(i < bbIn->stmts_used);
4015 st = bbIn->stmts[i];
4016 tl_assert(Ist_IMark == st->tag);
4017 cia = st->Ist.IMark.addr;
4018 st = NULL;
4019
sewardjb4112022007-11-09 22:49:28 +00004020 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004021 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004022 tl_assert(st);
4023 tl_assert(isFlatIRStmt(st));
4024 switch (st->tag) {
4025 case Ist_NoOp:
4026 case Ist_AbiHint:
4027 case Ist_Put:
4028 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004029 case Ist_Exit:
4030 /* None of these can contain any memory references. */
4031 break;
4032
sewardj1c0ce7a2009-07-01 08:10:49 +00004033 case Ist_IMark:
4034 /* no mem refs, but note the insn address. */
4035 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004036 /* Don't instrument the dynamic linker. It generates a
4037 lot of races which we just expensively suppress, so
4038 it's pointless.
4039
4040 Avoid flooding is_in_dynamic_linker_shared_object with
4041 requests by only checking at transitions between 4K
4042 pages. */
4043 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4044 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4045 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4046 inLDSO = is_in_dynamic_linker_shared_object(cia);
4047 } else {
4048 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4049 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004050 break;
4051
sewardjb4112022007-11-09 22:49:28 +00004052 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004053 switch (st->Ist.MBE.event) {
4054 case Imbe_Fence:
4055 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004056 default:
4057 goto unhandled;
4058 }
sewardjb4112022007-11-09 22:49:28 +00004059 break;
4060
sewardj1c0ce7a2009-07-01 08:10:49 +00004061 case Ist_CAS: {
4062 /* Atomic read-modify-write cycle. Just pretend it's a
4063 read. */
4064 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004065 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4066 if (isDCAS) {
4067 tl_assert(cas->expdHi);
4068 tl_assert(cas->dataHi);
4069 } else {
4070 tl_assert(!cas->expdHi);
4071 tl_assert(!cas->dataHi);
4072 }
4073 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004074 if (!inLDSO) {
4075 instrument_mem_access(
4076 bbOut,
4077 cas->addr,
4078 (isDCAS ? 2 : 1)
4079 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4080 False/*!isStore*/,
4081 sizeofIRType(hWordTy)
4082 );
4083 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004084 break;
4085 }
4086
sewardjdb5907d2009-11-26 17:20:21 +00004087 case Ist_LLSC: {
4088 /* We pretend store-conditionals don't exist, viz, ignore
4089 them. Whereas load-linked's are treated the same as
4090 normal loads. */
4091 IRType dataTy;
4092 if (st->Ist.LLSC.storedata == NULL) {
4093 /* LL */
4094 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004095 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004096 instrument_mem_access(
4097 bbOut,
4098 st->Ist.LLSC.addr,
4099 sizeofIRType(dataTy),
4100 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004101 sizeofIRType(hWordTy)
4102 );
4103 }
sewardjdb5907d2009-11-26 17:20:21 +00004104 } else {
4105 /* SC */
4106 /*ignore */
4107 }
4108 break;
4109 }
4110
4111 case Ist_Store:
4112 /* It seems we pretend that store-conditionals don't
4113 exist, viz, just ignore them ... */
4114 if (!inLDSO) {
4115 instrument_mem_access(
4116 bbOut,
4117 st->Ist.Store.addr,
4118 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4119 True/*isStore*/,
4120 sizeofIRType(hWordTy)
4121 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004122 }
njnb83caf22009-05-25 01:47:56 +00004123 break;
sewardjb4112022007-11-09 22:49:28 +00004124
4125 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004126 /* ... whereas here we don't care whether a load is a
4127 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004128 IRExpr* data = st->Ist.WrTmp.data;
4129 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004130 if (!inLDSO) {
4131 instrument_mem_access(
4132 bbOut,
4133 data->Iex.Load.addr,
4134 sizeofIRType(data->Iex.Load.ty),
4135 False/*!isStore*/,
4136 sizeofIRType(hWordTy)
4137 );
4138 }
sewardjb4112022007-11-09 22:49:28 +00004139 }
4140 break;
4141 }
4142
4143 case Ist_Dirty: {
4144 Int dataSize;
4145 IRDirty* d = st->Ist.Dirty.details;
4146 if (d->mFx != Ifx_None) {
4147 /* This dirty helper accesses memory. Collect the
4148 details. */
4149 tl_assert(d->mAddr != NULL);
4150 tl_assert(d->mSize != 0);
4151 dataSize = d->mSize;
4152 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004153 if (!inLDSO) {
4154 instrument_mem_access(
4155 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4156 sizeofIRType(hWordTy)
4157 );
4158 }
sewardjb4112022007-11-09 22:49:28 +00004159 }
4160 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004161 if (!inLDSO) {
4162 instrument_mem_access(
4163 bbOut, d->mAddr, dataSize, True/*isStore*/,
4164 sizeofIRType(hWordTy)
4165 );
4166 }
sewardjb4112022007-11-09 22:49:28 +00004167 }
4168 } else {
4169 tl_assert(d->mAddr == NULL);
4170 tl_assert(d->mSize == 0);
4171 }
4172 break;
4173 }
4174
4175 default:
sewardjf98e1c02008-10-25 16:22:41 +00004176 unhandled:
4177 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004178 tl_assert(0);
4179
4180 } /* switch (st->tag) */
4181
4182 addStmtToIRSB( bbOut, st );
4183 } /* iterate over bbIn->stmts */
4184
4185 return bbOut;
4186}
4187
4188
4189/*----------------------------------------------------------------*/
4190/*--- Client requests ---*/
4191/*----------------------------------------------------------------*/
4192
4193/* Sheesh. Yet another goddam finite map. */
4194static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4195
4196static void map_pthread_t_to_Thread_INIT ( void ) {
4197 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004198 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4199 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004200 tl_assert(map_pthread_t_to_Thread != NULL);
4201 }
4202}
4203
4204
4205static
4206Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4207{
4208 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4209 return False;
4210
4211 /* Anything that gets past the above check is one of ours, so we
4212 should be able to handle it. */
4213
4214 /* default, meaningless return value, unless otherwise set */
4215 *ret = 0;
4216
4217 switch (args[0]) {
4218
4219 /* --- --- User-visible client requests --- --- */
4220
4221 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004222 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004223 args[1], args[2]);
4224 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004225 are any held locks etc in the area. Calling evh__die_mem
4226 and then evh__new_mem is a bit inefficient; probably just
4227 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004228 if (args[2] > 0) { /* length */
4229 evh__die_mem(args[1], args[2]);
4230 /* and then set it to New */
4231 evh__new_mem(args[1], args[2]);
4232 }
4233 break;
4234
sewardjc8028ad2010-05-05 09:34:42 +00004235 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4236 Addr payload = 0;
4237 SizeT pszB = 0;
4238 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4239 args[1]);
4240 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4241 if (pszB > 0) {
4242 evh__die_mem(payload, pszB);
4243 evh__new_mem(payload, pszB);
4244 }
4245 *ret = pszB;
4246 } else {
4247 *ret = (UWord)-1;
4248 }
4249 break;
4250 }
4251
sewardj406bac82010-03-03 23:03:40 +00004252 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4253 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4254 args[1], args[2]);
4255 if (args[2] > 0) { /* length */
4256 evh__untrack_mem(args[1], args[2]);
4257 }
4258 break;
4259
4260 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4261 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4262 args[1], args[2]);
4263 if (args[2] > 0) { /* length */
4264 evh__new_mem(args[1], args[2]);
4265 }
4266 break;
4267
sewardjb4112022007-11-09 22:49:28 +00004268 /* --- --- Client requests for Helgrind's use only --- --- */
4269
4270 /* Some thread is telling us its pthread_t value. Record the
4271 binding between that and the associated Thread*, so we can
4272 later find the Thread* again when notified of a join by the
4273 thread. */
4274 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4275 Thread* my_thr = NULL;
4276 if (0)
4277 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4278 (void*)args[1]);
4279 map_pthread_t_to_Thread_INIT();
4280 my_thr = map_threads_maybe_lookup( tid );
4281 /* This assertion should hold because the map_threads (tid to
4282 Thread*) binding should have been made at the point of
4283 low-level creation of this thread, which should have
4284 happened prior to us getting this client request for it.
4285 That's because this client request is sent from
4286 client-world from the 'thread_wrapper' function, which
4287 only runs once the thread has been low-level created. */
4288 tl_assert(my_thr != NULL);
4289 /* So now we know that (pthread_t)args[1] is associated with
4290 (Thread*)my_thr. Note that down. */
4291 if (0)
4292 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4293 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004294 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004295 break;
4296 }
4297
4298 case _VG_USERREQ__HG_PTH_API_ERROR: {
4299 Thread* my_thr = NULL;
4300 map_pthread_t_to_Thread_INIT();
4301 my_thr = map_threads_maybe_lookup( tid );
4302 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004303 HG_(record_error_PthAPIerror)(
4304 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004305 break;
4306 }
4307
4308 /* This thread (tid) has completed a join with the quitting
4309 thread whose pthread_t is in args[1]. */
4310 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4311 Thread* thr_q = NULL; /* quitter Thread* */
4312 Bool found = False;
4313 if (0)
4314 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4315 (void*)args[1]);
4316 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004317 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004318 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004319 /* Can this fail? It would mean that our pthread_join
4320 wrapper observed a successful join on args[1] yet that
4321 thread never existed (or at least, it never lodged an
4322 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4323 sounds like a bug in the threads library. */
4324 // FIXME: get rid of this assertion; handle properly
4325 tl_assert(found);
4326 if (found) {
4327 if (0)
4328 VG_(printf)(".................... quitter Thread* = %p\n",
4329 thr_q);
4330 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4331 }
4332 break;
4333 }
4334
4335 /* EXPOSITION only: by intercepting lock init events we can show
4336 the user where the lock was initialised, rather than only
4337 being able to show where it was first locked. Intercepting
4338 lock initialisations is not necessary for the basic operation
4339 of the race checker. */
4340 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4341 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4342 break;
4343
4344 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4345 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4346 break;
4347
4348 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4349 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4350 break;
4351
4352 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4353 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4354 break;
4355
4356 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4357 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4358 break;
4359
4360 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4361 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4362 break;
4363
4364 /* This thread is about to do pthread_cond_signal on the
4365 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4366 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4367 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4368 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4369 break;
4370
4371 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4372 Returns a flag indicating whether or not the mutex is believed to be
4373 valid for this operation. */
4374 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4375 Bool mutex_is_valid
4376 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4377 (void*)args[2] );
4378 *ret = mutex_is_valid ? 1 : 0;
4379 break;
4380 }
4381
sewardjf98e1c02008-10-25 16:22:41 +00004382 /* cond=arg[1] */
4383 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4384 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4385 break;
4386
sewardjb4112022007-11-09 22:49:28 +00004387 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4388 mutex=arg[2] */
4389 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4390 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4391 (void*)args[1], (void*)args[2] );
4392 break;
4393
4394 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4395 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4396 break;
4397
4398 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4399 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4400 break;
4401
sewardj789c3c52008-02-25 12:10:07 +00004402 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004403 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004404 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4405 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004406 break;
4407
4408 /* rwlock=arg[1], isW=arg[2] */
4409 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4410 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4411 break;
4412
4413 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4414 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4415 break;
4416
4417 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4418 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4419 break;
4420
sewardj11e352f2007-11-30 11:11:02 +00004421 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4422 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004423 break;
4424
sewardj11e352f2007-11-30 11:11:02 +00004425 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4426 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004427 break;
4428
sewardj11e352f2007-11-30 11:11:02 +00004429 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4430 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4431 break;
4432
4433 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4434 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004435 break;
4436
sewardj9f569b72008-11-13 13:33:09 +00004437 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004438 /* pth_bar_t*, ulong count, ulong resizable */
4439 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4440 args[2], args[3] );
4441 break;
4442
4443 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4444 /* pth_bar_t*, ulong newcount */
4445 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4446 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004447 break;
4448
4449 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4450 /* pth_bar_t* */
4451 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4452 break;
4453
4454 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4455 /* pth_bar_t* */
4456 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4457 break;
sewardjb4112022007-11-09 22:49:28 +00004458
sewardj5a644da2009-08-11 10:35:58 +00004459 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4460 /* pth_spinlock_t* */
4461 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4462 break;
4463
4464 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4465 /* pth_spinlock_t* */
4466 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4467 break;
4468
4469 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4470 /* pth_spinlock_t*, Word */
4471 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4472 break;
4473
4474 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4475 /* pth_spinlock_t* */
4476 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4477 break;
4478
4479 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4480 /* pth_spinlock_t* */
4481 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4482 break;
4483
sewardjed2e72e2009-08-14 11:08:24 +00004484 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4485 /* char* who */
4486 HChar* who = (HChar*)args[1];
4487 HChar buf[50 + 50];
4488 Thread* thr = map_threads_maybe_lookup( tid );
4489 tl_assert( thr ); /* I must be mapped */
4490 tl_assert( who );
4491 tl_assert( VG_(strlen)(who) <= 50 );
4492 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4493 /* record_error_Misc strdup's buf, so this is safe: */
4494 HG_(record_error_Misc)( thr, buf );
4495 break;
4496 }
4497
4498 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4499 /* UWord arbitrary-SO-tag */
4500 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4501 break;
4502
4503 case _VG_USERREQ__HG_USERSO_RECV_POST:
4504 /* UWord arbitrary-SO-tag */
4505 evh__HG_USERSO_RECV_POST( tid, args[1] );
4506 break;
4507
sewardjb4112022007-11-09 22:49:28 +00004508 default:
4509 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004510 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4511 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004512 }
4513
4514 return True;
4515}
4516
4517
4518/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004519/*--- Setup ---*/
4520/*----------------------------------------------------------------*/
4521
4522static Bool hg_process_cmd_line_option ( Char* arg )
4523{
njn83df0b62009-02-25 01:01:05 +00004524 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004525
njn83df0b62009-02-25 01:01:05 +00004526 if VG_BOOL_CLO(arg, "--track-lockorders",
4527 HG_(clo_track_lockorders)) {}
4528 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4529 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004530
4531 else if VG_XACT_CLO(arg, "--history-level=none",
4532 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004533 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004534 HG_(clo_history_level), 1);
4535 else if VG_XACT_CLO(arg, "--history-level=full",
4536 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004537
sewardjf585e482009-08-16 22:52:29 +00004538 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004539 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004540 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004541 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004542
sewardj11e352f2007-11-30 11:11:02 +00004543 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004544 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004545 Int j;
sewardjb4112022007-11-09 22:49:28 +00004546
njn83df0b62009-02-25 01:01:05 +00004547 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004548 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004549 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004550 return False;
4551 }
sewardj11e352f2007-11-30 11:11:02 +00004552 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004553 if ('0' == tmp_str[j]) { /* do nothing */ }
4554 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004555 else {
sewardj11e352f2007-11-30 11:11:02 +00004556 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004557 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004558 return False;
4559 }
4560 }
sewardjf98e1c02008-10-25 16:22:41 +00004561 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004562 }
4563
4564 else
4565 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4566
4567 return True;
4568}
4569
4570static void hg_print_usage ( void )
4571{
4572 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004573" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004574" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004575" full: show both stack traces for a data race (can be very slow)\n"
4576" approx: full trace for one thread, approx for the other (faster)\n"
4577" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004578" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004579 );
sewardjb4112022007-11-09 22:49:28 +00004580}
4581
4582static void hg_print_debug_usage ( void )
4583{
sewardjb4112022007-11-09 22:49:28 +00004584 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4585 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004586 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004587 " at events (X = 0|1) [000000]\n");
4588 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004589 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004590 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004591 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4592 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004593 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004594 VG_(printf)(" 000010 at lock/unlock events\n");
4595 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004596}
4597
sewardjb4112022007-11-09 22:49:28 +00004598static void hg_fini ( Int exitcode )
4599{
sewardj2d9e8742009-08-07 15:46:56 +00004600 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4601 VG_(message)(Vg_UserMsg,
4602 "For counts of detected and suppressed errors, "
4603 "rerun with: -v\n");
4604 }
4605
4606 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4607 && HG_(clo_history_level) >= 2) {
4608 VG_(umsg)(
4609 "Use --history-level=approx or =none to gain increased speed, at\n" );
4610 VG_(umsg)(
4611 "the cost of reduced accuracy of conflicting-access information\n");
4612 }
4613
sewardjb4112022007-11-09 22:49:28 +00004614 if (SHOW_DATA_STRUCTURES)
4615 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004616 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004617 all__sanity_check("SK_(fini)");
4618
sewardj2d9e8742009-08-07 15:46:56 +00004619 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004620
4621 if (1) {
4622 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00004623 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004624 if (HG_(clo_track_lockorders)) {
4625 VG_(printf)("\n");
4626 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4627 }
sewardjb4112022007-11-09 22:49:28 +00004628 }
4629
sewardjf98e1c02008-10-25 16:22:41 +00004630 //zz VG_(printf)("\n");
4631 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4632 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4633 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4634 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4635 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4636 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4637 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4638 //zz stats__hbefore_stk_hwm);
4639 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4640 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004641
4642 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004643 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004644 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004645 if (HG_(clo_track_lockorders)) {
4646 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4647 (Int)HG_(cardinalityWSU)( univ_laog ));
4648 }
sewardjb4112022007-11-09 22:49:28 +00004649
sewardjd52392d2008-11-08 20:36:26 +00004650 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4651 // stats__ga_LL_adds,
4652 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004653
sewardjf98e1c02008-10-25 16:22:41 +00004654 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4655 HG_(stats__LockN_to_P_queries),
4656 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004657
sewardjf98e1c02008-10-25 16:22:41 +00004658 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4659 HG_(stats__string_table_queries),
4660 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004661 if (HG_(clo_track_lockorders)) {
4662 VG_(printf)(" LAOG: %'8d map size\n",
4663 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4664 VG_(printf)(" LAOG exposition: %'8d map size\n",
4665 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4666 }
4667
barta0b6b2c2008-07-07 06:49:24 +00004668 VG_(printf)(" locks: %'8lu acquires, "
4669 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004670 stats__lockN_acquires,
4671 stats__lockN_releases
4672 );
barta0b6b2c2008-07-07 06:49:24 +00004673 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004674
4675 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004676 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004677 }
4678}
4679
sewardjf98e1c02008-10-25 16:22:41 +00004680/* FIXME: move these somewhere sane */
4681
4682static
4683void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4684{
4685 Thread* thr;
4686 ThreadId tid;
4687 UWord nActual;
4688 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004689 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004690 tl_assert(thr);
4691 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4692 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4693 NULL, NULL, 0 );
4694 tl_assert(nActual <= nRequest);
4695 for (; nActual < nRequest; nActual++)
4696 frames[nActual] = 0;
4697}
4698
4699static
sewardj23f12002009-07-24 08:45:08 +00004700ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004701{
4702 Thread* thr;
4703 ThreadId tid;
4704 ExeContext* ec;
4705 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004706 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004707 tl_assert(thr);
4708 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004709 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004710 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004711 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004712}
4713
4714
sewardjc1fb9d22011-02-28 09:03:44 +00004715static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004716{
sewardjf98e1c02008-10-25 16:22:41 +00004717 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004718
sewardjc1fb9d22011-02-28 09:03:44 +00004719 /////////////////////////////////////////////
4720 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4721 for_libhb__get_EC );
4722 /////////////////////////////////////////////
4723
4724
4725 if (HG_(clo_track_lockorders))
4726 laog__init();
4727
4728 initialise_data_structures(hbthr_root);
4729}
4730
4731static void hg_pre_clo_init ( void )
4732{
sewardjb4112022007-11-09 22:49:28 +00004733 VG_(details_name) ("Helgrind");
4734 VG_(details_version) (NULL);
4735 VG_(details_description) ("a thread error detector");
4736 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004737 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004738 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00004739 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00004740
4741 VG_(basic_tool_funcs) (hg_post_clo_init,
4742 hg_instrument,
4743 hg_fini);
4744
4745 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004746 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004747 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004748 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004749 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004750 HG_(update_extra),
4751 HG_(recognised_suppression),
4752 HG_(read_extra_suppression_info),
4753 HG_(error_matches_suppression),
4754 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004755 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004756
sewardj24118492009-07-15 14:50:02 +00004757 VG_(needs_xml_output) ();
4758
sewardjb4112022007-11-09 22:49:28 +00004759 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4760 hg_print_usage,
4761 hg_print_debug_usage);
4762 VG_(needs_client_requests) (hg_handle_client_request);
4763
4764 // FIXME?
4765 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4766 // hg_expensive_sanity_check);
4767
4768 VG_(needs_malloc_replacement) (hg_cli__malloc,
4769 hg_cli____builtin_new,
4770 hg_cli____builtin_vec_new,
4771 hg_cli__memalign,
4772 hg_cli__calloc,
4773 hg_cli__free,
4774 hg_cli____builtin_delete,
4775 hg_cli____builtin_vec_delete,
4776 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004777 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004778 HG_CLI__MALLOC_REDZONE_SZB );
4779
sewardj849b0ed2008-12-21 10:43:10 +00004780 /* 21 Dec 08: disabled this; it mostly causes H to start more
4781 slowly and use significantly more memory, without very often
4782 providing useful results. The user can request to load this
4783 information manually with --read-var-info=yes. */
4784 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004785
4786 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004787 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4788 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004789 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004790 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004791
4792 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004793 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004794
4795 VG_(track_change_mem_mprotect) ( evh__set_perms );
4796
4797 VG_(track_die_mem_stack_signal)( evh__die_mem );
4798 VG_(track_die_mem_brk) ( evh__die_mem );
4799 VG_(track_die_mem_munmap) ( evh__die_mem );
4800 VG_(track_die_mem_stack) ( evh__die_mem );
4801
4802 // FIXME: what is this for?
4803 VG_(track_ban_mem_stack) (NULL);
4804
4805 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4806 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4807 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4808 VG_(track_post_mem_write) (NULL);
4809
4810 /////////////////
4811
4812 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4813 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4814
4815 VG_(track_start_client_code)( evh__start_client_code );
4816 VG_(track_stop_client_code)( evh__stop_client_code );
4817
sewardjb4112022007-11-09 22:49:28 +00004818 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4819 as described in comments at the top of pub_tool_hashtable.h, are
4820 met. Blargh. */
4821 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4822 tl_assert( sizeof(UWord) == sizeof(Addr) );
4823 hg_mallocmeta_table
4824 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4825
sewardj61bc2c52011-02-09 10:34:00 +00004826 // add a callback to clean up on (threaded) fork.
4827 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004828}
4829
4830VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4831
4832/*--------------------------------------------------------------------*/
4833/*--- end hg_main.c ---*/
4834/*--------------------------------------------------------------------*/