blob: 8baffcadfaf98b0101d7c1ca517180cb02c6d0cb [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
84// FIXME what is supposed to happen to locks in memory which
85// is relocated as a result of client realloc?
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME put referencing ThreadId into Thread and get
88// rid of the slow reverse mapping function.
89
90// FIXME accesses to NoAccess areas: change state to Excl?
91
92// FIXME report errors for accesses of NoAccess memory?
93
94// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
95// the thread still holds the lock.
96
97/* ------------ Debug/trace options ------------ */
98
sewardjb4112022007-11-09 22:49:28 +000099// 0 for silent, 1 for some stuff, 2 for lots of stuff
100#define SHOW_EVENTS 0
101
sewardjb4112022007-11-09 22:49:28 +0000102
103static void all__sanity_check ( Char* who ); /* fwds */
104
105#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
106
107// 0 for none, 1 for dump at end of run
108#define SHOW_DATA_STRUCTURES 0
109
110
sewardjb4112022007-11-09 22:49:28 +0000111/* ------------ Misc comments ------------ */
112
113// FIXME: don't hardwire initial entries for root thread.
114// Instead, let the pre_thread_ll_create handler do this.
115
sewardjb4112022007-11-09 22:49:28 +0000116
117/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000118/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000119/*----------------------------------------------------------------*/
120
sewardjb4112022007-11-09 22:49:28 +0000121/* Admin linked list of Threads */
122static Thread* admin_threads = NULL;
123
sewardj1d7c3322011-02-28 09:22:51 +0000124/* Admin double linked list of Locks */
125/* We need a double linked list to properly and efficiently
126 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000127static Lock* admin_locks = NULL;
128
sewardjb4112022007-11-09 22:49:28 +0000129/* Mapping table for core ThreadIds to Thread* */
130static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Mapping table for lock guest addresses to Lock* */
133static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
134
sewardj0f64c9e2011-03-10 17:40:22 +0000135/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000136static WordSetU* univ_lsets = NULL; /* sets of Lock* */
137static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
138
sewardjb4112022007-11-09 22:49:28 +0000139
140/*----------------------------------------------------------------*/
141/*--- Simple helpers for the data structures ---*/
142/*----------------------------------------------------------------*/
143
144static UWord stats__lockN_acquires = 0;
145static UWord stats__lockN_releases = 0;
146
sewardjf98e1c02008-10-25 16:22:41 +0000147static
148ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000149
150/* --------- Constructors --------- */
151
sewardjf98e1c02008-10-25 16:22:41 +0000152static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000153 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000154 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000155 thread->locksetA = HG_(emptyWS)( univ_lsets );
156 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000157 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000158 thread->hbthr = hbthr;
159 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000160 thread->created_at = NULL;
161 thread->announced = False;
162 thread->errmsg_index = indx++;
163 thread->admin = admin_threads;
164 admin_threads = thread;
165 return thread;
166}
sewardjf98e1c02008-10-25 16:22:41 +0000167
sewardjb4112022007-11-09 22:49:28 +0000168// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000169// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000170static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
171 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000172 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000173 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000174 if (admin_locks)
175 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000176 lock->admin_next = admin_locks;
177 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000178 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000179 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000180 lock->unique = unique++;
181 lock->magic = LockN_MAGIC;
182 lock->appeared_at = NULL;
183 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000184 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000185 lock->guestaddr = guestaddr;
186 lock->kind = kind;
187 lock->heldW = False;
188 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000189 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000190 return lock;
191}
sewardjb4112022007-11-09 22:49:28 +0000192
193/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000194 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000195static void del_LockN ( Lock* lk )
196{
sewardjf98e1c02008-10-25 16:22:41 +0000197 tl_assert(HG_(is_sane_LockN)(lk));
198 tl_assert(lk->hbso);
199 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000200 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000201 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000202 /* begin: del lock from double linked list */
203 if (lk == admin_locks) {
204 tl_assert(lk->admin_prev == NULL);
205 if (lk->admin_next)
206 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000207 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000208 }
209 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000210 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000211 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000212 if (lk->admin_next)
213 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000214 }
sewardj0f64c9e2011-03-10 17:40:22 +0000215 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000216 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000217 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000218}
219
220/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
221 it. This is done strictly: only combinations resulting from
222 correct program and libpthread behaviour are allowed. */
223static void lockN_acquire_writer ( Lock* lk, Thread* thr )
224{
sewardjf98e1c02008-10-25 16:22:41 +0000225 tl_assert(HG_(is_sane_LockN)(lk));
226 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000227
228 stats__lockN_acquires++;
229
230 /* EXPOSITION only */
231 /* We need to keep recording snapshots of where the lock was
232 acquired, so as to produce better lock-order error messages. */
233 if (lk->acquired_at == NULL) {
234 ThreadId tid;
235 tl_assert(lk->heldBy == NULL);
236 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
237 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000238 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000239 } else {
240 tl_assert(lk->heldBy != NULL);
241 }
242 /* end EXPOSITION only */
243
244 switch (lk->kind) {
245 case LK_nonRec:
246 case_LK_nonRec:
247 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
248 tl_assert(!lk->heldW);
249 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000250 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000251 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000252 break;
253 case LK_mbRec:
254 if (lk->heldBy == NULL)
255 goto case_LK_nonRec;
256 /* 2nd and subsequent locking of a lock by its owner */
257 tl_assert(lk->heldW);
258 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000259 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000260 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000261 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
262 == VG_(sizeTotalBag)(lk->heldBy));
263 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_rdwr:
266 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
267 goto case_LK_nonRec;
268 default:
269 tl_assert(0);
270 }
sewardjf98e1c02008-10-25 16:22:41 +0000271 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000272}
273
274static void lockN_acquire_reader ( Lock* lk, Thread* thr )
275{
sewardjf98e1c02008-10-25 16:22:41 +0000276 tl_assert(HG_(is_sane_LockN)(lk));
277 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000278 /* can only add reader to a reader-writer lock. */
279 tl_assert(lk->kind == LK_rdwr);
280 /* lk must be free or already r-held. */
281 tl_assert(lk->heldBy == NULL
282 || (lk->heldBy != NULL && !lk->heldW));
283
284 stats__lockN_acquires++;
285
286 /* EXPOSITION only */
287 /* We need to keep recording snapshots of where the lock was
288 acquired, so as to produce better lock-order error messages. */
289 if (lk->acquired_at == NULL) {
290 ThreadId tid;
291 tl_assert(lk->heldBy == NULL);
292 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
293 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000294 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000295 } else {
296 tl_assert(lk->heldBy != NULL);
297 }
298 /* end EXPOSITION only */
299
300 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000301 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000302 } else {
303 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000304 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000305 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000306 }
307 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000308 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000309}
310
311/* Update 'lk' to reflect a release of it by 'thr'. This is done
312 strictly: only combinations resulting from correct program and
313 libpthread behaviour are allowed. */
314
315static void lockN_release ( Lock* lk, Thread* thr )
316{
317 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000318 tl_assert(HG_(is_sane_LockN)(lk));
319 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000320 /* lock must be held by someone */
321 tl_assert(lk->heldBy);
322 stats__lockN_releases++;
323 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000324 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000325 /* thr must actually have been a holder of lk */
326 tl_assert(b);
327 /* normalise */
328 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000329 if (VG_(isEmptyBag)(lk->heldBy)) {
330 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000331 lk->heldBy = NULL;
332 lk->heldW = False;
333 lk->acquired_at = NULL;
334 }
sewardjf98e1c02008-10-25 16:22:41 +0000335 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000336}
337
338static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
339{
340 Thread* thr;
341 if (!lk->heldBy) {
342 tl_assert(!lk->heldW);
343 return;
344 }
345 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000346 VG_(initIterBag)( lk->heldBy );
347 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000348 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetA, (Word)lk ));
351 thr->locksetA
352 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
353
354 if (lk->heldW) {
355 tl_assert(HG_(elemWS)( univ_lsets,
356 thr->locksetW, (Word)lk ));
357 thr->locksetW
358 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
359 }
360 }
sewardj896f6f92008-08-19 08:38:52 +0000361 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000362}
363
sewardjb4112022007-11-09 22:49:28 +0000364
365/*----------------------------------------------------------------*/
366/*--- Print out the primary data structures ---*/
367/*----------------------------------------------------------------*/
368
sewardjb4112022007-11-09 22:49:28 +0000369#define PP_THREADS (1<<1)
370#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000371#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000372
373
374static const Int sHOW_ADMIN = 0;
375
376static void space ( Int n )
377{
378 Int i;
379 Char spaces[128+1];
380 tl_assert(n >= 0 && n < 128);
381 if (n == 0)
382 return;
383 for (i = 0; i < n; i++)
384 spaces[i] = ' ';
385 spaces[i] = 0;
386 tl_assert(i < 128+1);
387 VG_(printf)("%s", spaces);
388}
389
390static void pp_Thread ( Int d, Thread* t )
391{
392 space(d+0); VG_(printf)("Thread %p {\n", t);
393 if (sHOW_ADMIN) {
394 space(d+3); VG_(printf)("admin %p\n", t->admin);
395 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
396 }
397 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
398 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000399 space(d+0); VG_(printf)("}\n");
400}
401
402static void pp_admin_threads ( Int d )
403{
404 Int i, n;
405 Thread* t;
406 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
407 /* nothing */
408 }
409 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
410 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
411 if (0) {
412 space(n);
413 VG_(printf)("admin_threads record %d of %d:\n", i, n);
414 }
415 pp_Thread(d+3, t);
416 }
barta0b6b2c2008-07-07 06:49:24 +0000417 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000418}
419
420static void pp_map_threads ( Int d )
421{
njn4c245e52009-03-15 23:25:38 +0000422 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000423 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000424 for (i = 0; i < VG_N_THREADS; i++) {
425 if (map_threads[i] != NULL)
426 n++;
427 }
428 VG_(printf)("(%d entries) {\n", n);
429 for (i = 0; i < VG_N_THREADS; i++) {
430 if (map_threads[i] == NULL)
431 continue;
432 space(d+3);
433 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
434 }
435 space(d); VG_(printf)("}\n");
436}
437
438static const HChar* show_LockKind ( LockKind lkk ) {
439 switch (lkk) {
440 case LK_mbRec: return "mbRec";
441 case LK_nonRec: return "nonRec";
442 case LK_rdwr: return "rdwr";
443 default: tl_assert(0);
444 }
445}
446
447static void pp_Lock ( Int d, Lock* lk )
448{
barta0b6b2c2008-07-07 06:49:24 +0000449 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000450 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000451 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
452 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
453 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000454 }
455 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
456 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
457 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
458 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
459 if (lk->heldBy) {
460 Thread* thr;
461 Word count;
462 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000463 VG_(initIterBag)( lk->heldBy );
464 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000465 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000466 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000467 VG_(printf)("}");
468 }
469 VG_(printf)("\n");
470 space(d+0); VG_(printf)("}\n");
471}
472
473static void pp_admin_locks ( Int d )
474{
475 Int i, n;
476 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000477 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000478 /* nothing */
479 }
480 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000481 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000482 if (0) {
483 space(n);
484 VG_(printf)("admin_locks record %d of %d:\n", i, n);
485 }
486 pp_Lock(d+3, lk);
487 }
barta0b6b2c2008-07-07 06:49:24 +0000488 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000489}
490
491static void pp_map_locks ( Int d )
492{
493 void* gla;
494 Lock* lk;
495 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000496 (Int)VG_(sizeFM)( map_locks ));
497 VG_(initIterFM)( map_locks );
498 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000499 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000500 space(d+3);
501 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
502 }
sewardj896f6f92008-08-19 08:38:52 +0000503 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000504 space(d); VG_(printf)("}\n");
505}
506
sewardjb4112022007-11-09 22:49:28 +0000507static void pp_everything ( Int flags, Char* caller )
508{
509 Int d = 0;
510 VG_(printf)("\n");
511 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
512 if (flags & PP_THREADS) {
513 VG_(printf)("\n");
514 pp_admin_threads(d+3);
515 VG_(printf)("\n");
516 pp_map_threads(d+3);
517 }
518 if (flags & PP_LOCKS) {
519 VG_(printf)("\n");
520 pp_admin_locks(d+3);
521 VG_(printf)("\n");
522 pp_map_locks(d+3);
523 }
sewardjb4112022007-11-09 22:49:28 +0000524
525 VG_(printf)("\n");
526 VG_(printf)("}\n");
527 VG_(printf)("\n");
528}
529
530#undef SHOW_ADMIN
531
532
533/*----------------------------------------------------------------*/
534/*--- Initialise the primary data structures ---*/
535/*----------------------------------------------------------------*/
536
sewardjf98e1c02008-10-25 16:22:41 +0000537static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000538{
sewardjb4112022007-11-09 22:49:28 +0000539 Thread* thr;
540
541 /* Get everything initialised and zeroed. */
542 tl_assert(admin_threads == NULL);
543 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000544
545 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000546
547 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000549 tl_assert(map_threads != NULL);
550
sewardjb4112022007-11-09 22:49:28 +0000551 tl_assert(sizeof(Addr) == sizeof(Word));
552 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000553 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
554 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000555 tl_assert(map_locks != NULL);
556
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
559 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(univ_lsets != NULL);
561
562 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000563 if (HG_(clo_track_lockorders)) {
564 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
565 HG_(free), 24/*cacheSize*/ );
566 tl_assert(univ_laog != NULL);
567 }
sewardjb4112022007-11-09 22:49:28 +0000568
569 /* Set up entries for the root thread */
570 // FIXME: this assumes that the first real ThreadId is 1
571
sewardjb4112022007-11-09 22:49:28 +0000572 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000573 thr = mk_Thread(hbthr_root);
574 thr->coretid = 1; /* FIXME: hardwires an assumption about the
575 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000576 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
577 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000578
sewardjf98e1c02008-10-25 16:22:41 +0000579 /* and bind it in the thread-map table. */
580 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
581 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000584
585 tl_assert(VG_INVALID_THREADID == 0);
586
sewardjb4112022007-11-09 22:49:28 +0000587 all__sanity_check("initialise_data_structures");
588}
589
590
591/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000592/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000593/*----------------------------------------------------------------*/
594
595/* Doesn't assert if the relevant map_threads entry is NULL. */
596static Thread* map_threads_maybe_lookup ( ThreadId coretid )
597{
598 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000599 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000600 thr = map_threads[coretid];
601 return thr;
602}
603
604/* Asserts if the relevant map_threads entry is NULL. */
605static inline Thread* map_threads_lookup ( ThreadId coretid )
606{
607 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000608 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000609 thr = map_threads[coretid];
610 tl_assert(thr);
611 return thr;
612}
613
sewardjf98e1c02008-10-25 16:22:41 +0000614/* Do a reverse lookup. Does not assert if 'thr' is not found in
615 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000616static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
617{
sewardjf98e1c02008-10-25 16:22:41 +0000618 ThreadId tid;
619 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000620 /* Check nobody used the invalid-threadid slot */
621 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
622 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000623 tid = thr->coretid;
624 tl_assert(HG_(is_sane_ThreadId)(tid));
625 return tid;
sewardjb4112022007-11-09 22:49:28 +0000626}
627
628/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
629 is not found in map_threads. */
630static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
631{
632 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
633 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000634 tl_assert(map_threads[tid]);
635 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000636 return tid;
637}
638
639static void map_threads_delete ( ThreadId coretid )
640{
641 Thread* thr;
642 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000643 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000644 thr = map_threads[coretid];
645 tl_assert(thr);
646 map_threads[coretid] = NULL;
647}
648
649
650/*----------------------------------------------------------------*/
651/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
652/*----------------------------------------------------------------*/
653
654/* Make sure there is a lock table entry for the given (lock) guest
655 address. If not, create one of the stated 'kind' in unheld state.
656 In any case, return the address of the existing or new Lock. */
657static
658Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
659{
660 Bool found;
661 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000662 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000663 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000664 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000665 if (!found) {
666 Lock* lock = mk_LockN(lkk, ga);
667 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000669 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000670 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000671 return lock;
672 } else {
673 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000675 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000676 return oldlock;
677 }
678}
679
680static Lock* map_locks_maybe_lookup ( Addr ga )
681{
682 Bool found;
683 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000684 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000685 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000686 return lk;
687}
688
689static void map_locks_delete ( Addr ga )
690{
691 Addr ga2 = 0;
692 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000693 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000694 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000695 /* delFromFM produces the val which is being deleted, if it is
696 found. So assert it is non-null; that in effect asserts that we
697 are deleting a (ga, Lock) pair which actually exists. */
698 tl_assert(lk != NULL);
699 tl_assert(ga2 == ga);
700}
701
702
sewardjb4112022007-11-09 22:49:28 +0000703
704/*----------------------------------------------------------------*/
705/*--- Sanity checking the data structures ---*/
706/*----------------------------------------------------------------*/
707
708static UWord stats__sanity_checks = 0;
709
sewardjb4112022007-11-09 22:49:28 +0000710static void laog__sanity_check ( Char* who ); /* fwds */
711
712/* REQUIRED INVARIANTS:
713
714 Thread vs Segment/Lock/SecMaps
715
716 for each t in Threads {
717
718 // Thread.lockset: each element is really a valid Lock
719
720 // Thread.lockset: each Lock in set is actually held by that thread
721 for lk in Thread.lockset
722 lk == LockedBy(t)
723
724 // Thread.csegid is a valid SegmentID
725 // and the associated Segment has .thr == t
726
727 }
728
729 all thread Locksets are pairwise empty under intersection
730 (that is, no lock is claimed to be held by more than one thread)
731 -- this is guaranteed if all locks in locksets point back to their
732 owner threads
733
734 Lock vs Thread/Segment/SecMaps
735
736 for each entry (gla, la) in map_locks
737 gla == la->guest_addr
738
739 for each lk in Locks {
740
741 lk->tag is valid
742 lk->guest_addr does not have shadow state NoAccess
743 if lk == LockedBy(t), then t->lockset contains lk
744 if lk == UnlockedBy(segid) then segid is valid SegmentID
745 and can be mapped to a valid Segment(seg)
746 and seg->thr->lockset does not contain lk
747 if lk == UnlockedNew then (no lockset contains lk)
748
749 secmaps for lk has .mbHasLocks == True
750
751 }
752
753 Segment vs Thread/Lock/SecMaps
754
755 the Segment graph is a dag (no cycles)
756 all of the Segment graph must be reachable from the segids
757 mentioned in the Threads
758
759 for seg in Segments {
760
761 seg->thr is a sane Thread
762
763 }
764
765 SecMaps vs Segment/Thread/Lock
766
767 for sm in SecMaps {
768
769 sm properly aligned
770 if any shadow word is ShR or ShM then .mbHasShared == True
771
772 for each Excl(segid) state
773 map_segments_lookup maps to a sane Segment(seg)
774 for each ShM/ShR(tsetid,lsetid) state
775 each lk in lset is a valid Lock
776 each thr in tset is a valid thread, which is non-dead
777
778 }
779*/
780
781
782/* Return True iff 'thr' holds 'lk' in some mode. */
783static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
784{
785 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000786 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000787 else
788 return False;
789}
790
791/* Sanity check Threads, as far as possible */
792__attribute__((noinline))
793static void threads__sanity_check ( Char* who )
794{
795#define BAD(_str) do { how = (_str); goto bad; } while (0)
796 Char* how = "no error";
797 Thread* thr;
798 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000799 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000800 Word ls_size, i;
801 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000802 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000803 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000804 wsA = thr->locksetA;
805 wsW = thr->locksetW;
806 // locks held in W mode are a subset of all locks held
807 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
808 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
809 for (i = 0; i < ls_size; i++) {
810 lk = (Lock*)ls_words[i];
811 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000812 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000813 // Thread.lockset: each Lock in set is actually held by that
814 // thread
815 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000816 }
817 }
818 return;
819 bad:
820 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
821 tl_assert(0);
822#undef BAD
823}
824
825
826/* Sanity check Locks, as far as possible */
827__attribute__((noinline))
828static void locks__sanity_check ( Char* who )
829{
830#define BAD(_str) do { how = (_str); goto bad; } while (0)
831 Char* how = "no error";
832 Addr gla;
833 Lock* lk;
834 Int i;
835 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000836 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000837 ;
sewardj896f6f92008-08-19 08:38:52 +0000838 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000839 // for each entry (gla, lk) in map_locks
840 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000841 VG_(initIterFM)( map_locks );
842 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000843 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000844 if (lk->guestaddr != gla) BAD("2");
845 }
sewardj896f6f92008-08-19 08:38:52 +0000846 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000847 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000848 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000849 // lock is sane. Quite comprehensive, also checks that
850 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000851 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000852 // map_locks binds guest address back to this lock
853 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000854 // look at all threads mentioned as holders of this lock. Ensure
855 // this lock is mentioned in their locksets.
856 if (lk->heldBy) {
857 Thread* thr;
858 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000859 VG_(initIterBag)( lk->heldBy );
860 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000861 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000862 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000863 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000864 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000865 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
866 BAD("6");
867 // also check the w-only lockset
868 if (lk->heldW
869 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
870 BAD("7");
871 if ((!lk->heldW)
872 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
873 BAD("8");
874 }
sewardj896f6f92008-08-19 08:38:52 +0000875 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000876 } else {
877 /* lock not held by anybody */
878 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
879 // since lk is unheld, then (no lockset contains lk)
880 // hmm, this is really too expensive to check. Hmm.
881 }
sewardjb4112022007-11-09 22:49:28 +0000882 }
883
884 return;
885 bad:
886 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
887 tl_assert(0);
888#undef BAD
889}
890
891
sewardjb4112022007-11-09 22:49:28 +0000892static void all_except_Locks__sanity_check ( Char* who ) {
893 stats__sanity_checks++;
894 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
895 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000896 if (HG_(clo_track_lockorders))
897 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000898}
899static void all__sanity_check ( Char* who ) {
900 all_except_Locks__sanity_check(who);
901 locks__sanity_check(who);
902}
903
904
905/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000906/*--- Shadow value and address range handlers ---*/
907/*----------------------------------------------------------------*/
908
909static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000910//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000911static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000912__attribute__((noinline))
913static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000914
sewardjb4112022007-11-09 22:49:28 +0000915
916/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000917/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
918 Is that a problem? (hence 'scopy' rather than 'ccopy') */
919static void shadow_mem_scopy_range ( Thread* thr,
920 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000921{
922 Thr* hbthr = thr->hbthr;
923 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000924 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000925}
926
sewardj23f12002009-07-24 08:45:08 +0000927static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
928{
sewardjf98e1c02008-10-25 16:22:41 +0000929 Thr* hbthr = thr->hbthr;
930 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000931 LIBHB_CREAD_N(hbthr, a, len);
932}
933
934static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
935 Thr* hbthr = thr->hbthr;
936 tl_assert(hbthr);
937 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000938}
939
940static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
941{
sewardj23f12002009-07-24 08:45:08 +0000942 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000943}
944
sewardjb4112022007-11-09 22:49:28 +0000945static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
946{
sewardjb4112022007-11-09 22:49:28 +0000947 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +0000948 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +0000949 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +0000950}
951
sewardj406bac82010-03-03 23:03:40 +0000952static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
953{
954 if (0 && len > 500)
955 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
956 libhb_srange_untrack( thr->hbthr, aIN, len );
957}
958
sewardjb4112022007-11-09 22:49:28 +0000959
960/*----------------------------------------------------------------*/
961/*--- Event handlers (evh__* functions) ---*/
962/*--- plus helpers (evhH__* functions) ---*/
963/*----------------------------------------------------------------*/
964
965/*--------- Event handler helpers (evhH__* functions) ---------*/
966
967/* Create a new segment for 'thr', making it depend (.prev) on its
968 existing segment, bind together the SegmentID and Segment, and
969 return both of them. Also update 'thr' so it references the new
970 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +0000971//zz static
972//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
973//zz /*OUT*/Segment** new_segP,
974//zz Thread* thr )
975//zz {
976//zz Segment* cur_seg;
977//zz tl_assert(new_segP);
978//zz tl_assert(new_segidP);
979//zz tl_assert(HG_(is_sane_Thread)(thr));
980//zz cur_seg = map_segments_lookup( thr->csegid );
981//zz tl_assert(cur_seg);
982//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
983//zz at their owner thread. */
984//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
985//zz *new_segidP = alloc_SegmentID();
986//zz map_segments_add( *new_segidP, *new_segP );
987//zz thr->csegid = *new_segidP;
988//zz }
sewardjb4112022007-11-09 22:49:28 +0000989
990
991/* The lock at 'lock_ga' has acquired a writer. Make all necessary
992 updates, and also do all possible error checks. */
993static
994void evhH__post_thread_w_acquires_lock ( Thread* thr,
995 LockKind lkk, Addr lock_ga )
996{
997 Lock* lk;
998
999 /* Basically what we need to do is call lockN_acquire_writer.
1000 However, that will barf if any 'invalid' lock states would
1001 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001002 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001003 routine.
1004
1005 Because this routine is only called after successful lock
1006 acquisition, we should not be asked to move the lock into any
1007 invalid states. Requests to do so are bugs in libpthread, since
1008 that should have rejected any such requests. */
1009
sewardjf98e1c02008-10-25 16:22:41 +00001010 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001011 /* Try to find the lock. If we can't, then create a new one with
1012 kind 'lkk'. */
1013 lk = map_locks_lookup_or_create(
1014 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001015 tl_assert( HG_(is_sane_LockN)(lk) );
1016
1017 /* check libhb level entities exist */
1018 tl_assert(thr->hbthr);
1019 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001020
1021 if (lk->heldBy == NULL) {
1022 /* the lock isn't held. Simple. */
1023 tl_assert(!lk->heldW);
1024 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001025 /* acquire a dependency from the lock's VCs */
1026 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001027 goto noerror;
1028 }
1029
1030 /* So the lock is already held. If held as a r-lock then
1031 libpthread must be buggy. */
1032 tl_assert(lk->heldBy);
1033 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001034 HG_(record_error_Misc)(
1035 thr, "Bug in libpthread: write lock "
1036 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001037 goto error;
1038 }
1039
1040 /* So the lock is held in w-mode. If it's held by some other
1041 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001042 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001043
sewardj896f6f92008-08-19 08:38:52 +00001044 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001045 HG_(record_error_Misc)(
1046 thr, "Bug in libpthread: write lock "
1047 "granted on mutex/rwlock which is currently "
1048 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001049 goto error;
1050 }
1051
1052 /* So the lock is already held in w-mode by 'thr'. That means this
1053 is an attempt to lock it recursively, which is only allowable
1054 for LK_mbRec kinded locks. Since this routine is called only
1055 once the lock has been acquired, this must also be a libpthread
1056 bug. */
1057 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001058 HG_(record_error_Misc)(
1059 thr, "Bug in libpthread: recursive write lock "
1060 "granted on mutex/wrlock which does not "
1061 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001062 goto error;
1063 }
1064
1065 /* So we are recursively re-locking a lock we already w-hold. */
1066 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001067 /* acquire a dependency from the lock's VC. Probably pointless,
1068 but also harmless. */
1069 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001070 goto noerror;
1071
1072 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001073 if (HG_(clo_track_lockorders)) {
1074 /* check lock order acquisition graph, and update. This has to
1075 happen before the lock is added to the thread's locksetA/W. */
1076 laog__pre_thread_acquires_lock( thr, lk );
1077 }
sewardjb4112022007-11-09 22:49:28 +00001078 /* update the thread's held-locks set */
1079 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1080 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1081 /* fall through */
1082
1083 error:
sewardjf98e1c02008-10-25 16:22:41 +00001084 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
1087
1088/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1089 updates, and also do all possible error checks. */
1090static
1091void evhH__post_thread_r_acquires_lock ( Thread* thr,
1092 LockKind lkk, Addr lock_ga )
1093{
1094 Lock* lk;
1095
1096 /* Basically what we need to do is call lockN_acquire_reader.
1097 However, that will barf if any 'invalid' lock states would
1098 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001099 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001100 routine.
1101
1102 Because this routine is only called after successful lock
1103 acquisition, we should not be asked to move the lock into any
1104 invalid states. Requests to do so are bugs in libpthread, since
1105 that should have rejected any such requests. */
1106
sewardjf98e1c02008-10-25 16:22:41 +00001107 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001108 /* Try to find the lock. If we can't, then create a new one with
1109 kind 'lkk'. Only a reader-writer lock can be read-locked,
1110 hence the first assertion. */
1111 tl_assert(lkk == LK_rdwr);
1112 lk = map_locks_lookup_or_create(
1113 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001114 tl_assert( HG_(is_sane_LockN)(lk) );
1115
1116 /* check libhb level entities exist */
1117 tl_assert(thr->hbthr);
1118 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001119
1120 if (lk->heldBy == NULL) {
1121 /* the lock isn't held. Simple. */
1122 tl_assert(!lk->heldW);
1123 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001124 /* acquire a dependency from the lock's VC */
1125 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001126 goto noerror;
1127 }
1128
1129 /* So the lock is already held. If held as a w-lock then
1130 libpthread must be buggy. */
1131 tl_assert(lk->heldBy);
1132 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001133 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1134 "granted on rwlock which is "
1135 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001136 goto error;
1137 }
1138
1139 /* Easy enough. In short anybody can get a read-lock on a rwlock
1140 provided it is either unlocked or already in rd-held. */
1141 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001142 /* acquire a dependency from the lock's VC. Probably pointless,
1143 but also harmless. */
1144 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001145 goto noerror;
1146
1147 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001148 if (HG_(clo_track_lockorders)) {
1149 /* check lock order acquisition graph, and update. This has to
1150 happen before the lock is added to the thread's locksetA/W. */
1151 laog__pre_thread_acquires_lock( thr, lk );
1152 }
sewardjb4112022007-11-09 22:49:28 +00001153 /* update the thread's held-locks set */
1154 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1155 /* but don't update thr->locksetW, since lk is only rd-held */
1156 /* fall through */
1157
1158 error:
sewardjf98e1c02008-10-25 16:22:41 +00001159 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001160}
1161
1162
1163/* The lock at 'lock_ga' is just about to be unlocked. Make all
1164 necessary updates, and also do all possible error checks. */
1165static
1166void evhH__pre_thread_releases_lock ( Thread* thr,
1167 Addr lock_ga, Bool isRDWR )
1168{
1169 Lock* lock;
1170 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001171 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001172
1173 /* This routine is called prior to a lock release, before
1174 libpthread has had a chance to validate the call. Hence we need
1175 to detect and reject any attempts to move the lock into an
1176 invalid state. Such attempts are bugs in the client.
1177
1178 isRDWR is True if we know from the wrapper context that lock_ga
1179 should refer to a reader-writer lock, and is False if [ditto]
1180 lock_ga should refer to a standard mutex. */
1181
sewardjf98e1c02008-10-25 16:22:41 +00001182 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001183 lock = map_locks_maybe_lookup( lock_ga );
1184
1185 if (!lock) {
1186 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1187 the client is trying to unlock it. So complain, then ignore
1188 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001189 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001190 return;
1191 }
1192
1193 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001194 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001195
1196 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001197 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1198 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001199 }
1200 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001201 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1202 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001203 }
1204
1205 if (!lock->heldBy) {
1206 /* The lock is not held. This indicates a serious bug in the
1207 client. */
1208 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001209 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001210 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1211 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1212 goto error;
1213 }
1214
sewardjf98e1c02008-10-25 16:22:41 +00001215 /* test just above dominates */
1216 tl_assert(lock->heldBy);
1217 was_heldW = lock->heldW;
1218
sewardjb4112022007-11-09 22:49:28 +00001219 /* The lock is held. Is this thread one of the holders? If not,
1220 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001221 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001222 tl_assert(n >= 0);
1223 if (n == 0) {
1224 /* We are not a current holder of the lock. This is a bug in
1225 the guest, and (per POSIX pthread rules) the unlock
1226 attempt will fail. So just complain and do nothing
1227 else. */
sewardj896f6f92008-08-19 08:38:52 +00001228 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001229 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001230 tl_assert(realOwner != thr);
1231 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1232 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001233 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001234 goto error;
1235 }
1236
1237 /* Ok, we hold the lock 'n' times. */
1238 tl_assert(n >= 1);
1239
1240 lockN_release( lock, thr );
1241
1242 n--;
1243 tl_assert(n >= 0);
1244
1245 if (n > 0) {
1246 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001247 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001248 /* We still hold the lock. So either it's a recursive lock
1249 or a rwlock which is currently r-held. */
1250 tl_assert(lock->kind == LK_mbRec
1251 || (lock->kind == LK_rdwr && !lock->heldW));
1252 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1253 if (lock->heldW)
1254 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1255 else
1256 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1257 } else {
sewardj983f3022009-05-21 14:49:55 +00001258 /* n is zero. This means we don't hold the lock any more. But
1259 if it's a rwlock held in r-mode, someone else could still
1260 hold it. Just do whatever sanity checks we can. */
1261 if (lock->kind == LK_rdwr && lock->heldBy) {
1262 /* It's a rwlock. We no longer hold it but we used to;
1263 nevertheless it still appears to be held by someone else.
1264 The implication is that, prior to this release, it must
1265 have been shared by us and and whoever else is holding it;
1266 which in turn implies it must be r-held, since a lock
1267 can't be w-held by more than one thread. */
1268 /* The lock is now R-held by somebody else: */
1269 tl_assert(lock->heldW == False);
1270 } else {
1271 /* Normal case. It's either not a rwlock, or it's a rwlock
1272 that we used to hold in w-mode (which is pretty much the
1273 same thing as a non-rwlock.) Since this transaction is
1274 atomic (V does not allow multiple threads to run
1275 simultaneously), it must mean the lock is now not held by
1276 anybody. Hence assert for it. */
1277 /* The lock is now not held by anybody: */
1278 tl_assert(!lock->heldBy);
1279 tl_assert(lock->heldW == False);
1280 }
sewardjf98e1c02008-10-25 16:22:41 +00001281 //if (lock->heldBy) {
1282 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1283 //}
sewardjb4112022007-11-09 22:49:28 +00001284 /* update this thread's lockset accordingly. */
1285 thr->locksetA
1286 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1287 thr->locksetW
1288 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001289 /* push our VC into the lock */
1290 tl_assert(thr->hbthr);
1291 tl_assert(lock->hbso);
1292 /* If the lock was previously W-held, then we want to do a
1293 strong send, and if previously R-held, then a weak send. */
1294 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001295 }
1296 /* fall through */
1297
1298 error:
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001300}
1301
1302
sewardj9f569b72008-11-13 13:33:09 +00001303/* ---------------------------------------------------------- */
1304/* -------- Event handlers proper (evh__* functions) -------- */
1305/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001306
1307/* What is the Thread* for the currently running thread? This is
1308 absolutely performance critical. We receive notifications from the
1309 core for client code starts/stops, and cache the looked-up result
1310 in 'current_Thread'. Hence, for the vast majority of requests,
1311 finding the current thread reduces to a read of a global variable,
1312 provided get_current_Thread_in_C_C is inlined.
1313
1314 Outside of client code, current_Thread is NULL, and presumably
1315 any uses of it will cause a segfault. Hence:
1316
1317 - for uses definitely within client code, use
1318 get_current_Thread_in_C_C.
1319
1320 - for all other uses, use get_current_Thread.
1321*/
1322
sewardj23f12002009-07-24 08:45:08 +00001323static Thread *current_Thread = NULL,
1324 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001325
1326static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1327 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1328 tl_assert(current_Thread == NULL);
1329 current_Thread = map_threads_lookup( tid );
1330 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001331 if (current_Thread != current_Thread_prev) {
1332 libhb_Thr_resumes( current_Thread->hbthr );
1333 current_Thread_prev = current_Thread;
1334 }
sewardjb4112022007-11-09 22:49:28 +00001335}
1336static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1337 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1338 tl_assert(current_Thread != NULL);
1339 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001340 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001341}
1342static inline Thread* get_current_Thread_in_C_C ( void ) {
1343 return current_Thread;
1344}
1345static inline Thread* get_current_Thread ( void ) {
1346 ThreadId coretid;
1347 Thread* thr;
1348 thr = get_current_Thread_in_C_C();
1349 if (LIKELY(thr))
1350 return thr;
1351 /* evidently not in client code. Do it the slow way. */
1352 coretid = VG_(get_running_tid)();
1353 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001354 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001355 of initial memory layout) and VG_(get_running_tid)() returns
1356 VG_INVALID_THREADID at that point. */
1357 if (coretid == VG_INVALID_THREADID)
1358 coretid = 1; /* KLUDGE */
1359 thr = map_threads_lookup( coretid );
1360 return thr;
1361}
1362
1363static
1364void evh__new_mem ( Addr a, SizeT len ) {
1365 if (SHOW_EVENTS >= 2)
1366 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1367 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001368 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001369 all__sanity_check("evh__new_mem-post");
1370}
1371
1372static
sewardj1f77fec2010-04-12 19:51:04 +00001373void evh__new_mem_stack ( Addr a, SizeT len ) {
1374 if (SHOW_EVENTS >= 2)
1375 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1376 shadow_mem_make_New( get_current_Thread(),
1377 -VG_STACK_REDZONE_SZB + a, len );
1378 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1379 all__sanity_check("evh__new_mem_stack-post");
1380}
1381
1382static
sewardj7cf4e6b2008-05-01 20:24:26 +00001383void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1384 if (SHOW_EVENTS >= 2)
1385 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1386 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001387 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001388 all__sanity_check("evh__new_mem_w_tid-post");
1389}
1390
1391static
sewardjb4112022007-11-09 22:49:28 +00001392void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001393 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001394 if (SHOW_EVENTS >= 1)
1395 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1396 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1397 if (rr || ww || xx)
1398 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001399 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001400 all__sanity_check("evh__new_mem_w_perms-post");
1401}
1402
1403static
1404void evh__set_perms ( Addr a, SizeT len,
1405 Bool rr, Bool ww, Bool xx ) {
1406 if (SHOW_EVENTS >= 1)
1407 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1408 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1409 /* Hmm. What should we do here, that actually makes any sense?
1410 Let's say: if neither readable nor writable, then declare it
1411 NoAccess, else leave it alone. */
1412 if (!(rr || ww))
1413 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001414 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001415 all__sanity_check("evh__set_perms-post");
1416}
1417
1418static
1419void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001420 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001421 if (SHOW_EVENTS >= 2)
1422 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1423 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001424 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001425 all__sanity_check("evh__die_mem-post");
1426}
1427
1428static
sewardj406bac82010-03-03 23:03:40 +00001429void evh__untrack_mem ( Addr a, SizeT len ) {
1430 // whereas it doesn't ignore this
1431 if (SHOW_EVENTS >= 2)
1432 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1433 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1434 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1435 all__sanity_check("evh__untrack_mem-post");
1436}
1437
1438static
sewardj23f12002009-07-24 08:45:08 +00001439void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1440 if (SHOW_EVENTS >= 2)
1441 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1442 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1443 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1444 all__sanity_check("evh__copy_mem-post");
1445}
1446
1447static
sewardjb4112022007-11-09 22:49:28 +00001448void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1449{
1450 if (SHOW_EVENTS >= 1)
1451 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1452 (Int)parent, (Int)child );
1453
1454 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001455 Thread* thr_p;
1456 Thread* thr_c;
1457 Thr* hbthr_p;
1458 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001459
sewardjf98e1c02008-10-25 16:22:41 +00001460 tl_assert(HG_(is_sane_ThreadId)(parent));
1461 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001462 tl_assert(parent != child);
1463
1464 thr_p = map_threads_maybe_lookup( parent );
1465 thr_c = map_threads_maybe_lookup( child );
1466
1467 tl_assert(thr_p != NULL);
1468 tl_assert(thr_c == NULL);
1469
sewardjf98e1c02008-10-25 16:22:41 +00001470 hbthr_p = thr_p->hbthr;
1471 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001472 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001473
sewardjf98e1c02008-10-25 16:22:41 +00001474 hbthr_c = libhb_create ( hbthr_p );
1475
1476 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001477 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001478 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001479 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1480 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001481
1482 /* and bind it in the thread-map table */
1483 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001484 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1485 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001486
1487 /* Record where the parent is so we can later refer to this in
1488 error messages.
1489
1490 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1491 The stack snapshot is taken immediately after the parent has
1492 returned from its sys_clone call. Unfortunately there is no
1493 unwind info for the insn following "syscall" - reading the
1494 glibc sources confirms this. So we ask for a snapshot to be
1495 taken as if RIP was 3 bytes earlier, in a place where there
1496 is unwind info. Sigh.
1497 */
1498 { Word first_ip_delta = 0;
1499# if defined(VGP_amd64_linux)
1500 first_ip_delta = -3;
1501# endif
1502 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1503 }
sewardjb4112022007-11-09 22:49:28 +00001504 }
1505
sewardjf98e1c02008-10-25 16:22:41 +00001506 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001507 all__sanity_check("evh__pre_thread_create-post");
1508}
1509
1510static
1511void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1512{
1513 Int nHeld;
1514 Thread* thr_q;
1515 if (SHOW_EVENTS >= 1)
1516 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1517 (Int)quit_tid );
1518
1519 /* quit_tid has disappeared without joining to any other thread.
1520 Therefore there is no synchronisation event associated with its
1521 exit and so we have to pretty much treat it as if it was still
1522 alive but mysteriously making no progress. That is because, if
1523 we don't know when it really exited, then we can never say there
1524 is a point in time when we're sure the thread really has
1525 finished, and so we need to consider the possibility that it
1526 lingers indefinitely and continues to interact with other
1527 threads. */
1528 /* However, it might have rendezvous'd with a thread that called
1529 pthread_join with this one as arg, prior to this point (that's
1530 how NPTL works). In which case there has already been a prior
1531 sync event. So in any case, just let the thread exit. On NPTL,
1532 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001533 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001534 thr_q = map_threads_maybe_lookup( quit_tid );
1535 tl_assert(thr_q != NULL);
1536
1537 /* Complain if this thread holds any locks. */
1538 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1539 tl_assert(nHeld >= 0);
1540 if (nHeld > 0) {
1541 HChar buf[80];
1542 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1543 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001544 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001545 }
1546
sewardj23f12002009-07-24 08:45:08 +00001547 /* Not much to do here:
1548 - tell libhb the thread is gone
1549 - clear the map_threads entry, in order that the Valgrind core
1550 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001551 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1552 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001553 tl_assert(thr_q->hbthr);
1554 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001555 tl_assert(thr_q->coretid == quit_tid);
1556 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001557 map_threads_delete( quit_tid );
1558
sewardjf98e1c02008-10-25 16:22:41 +00001559 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001560 all__sanity_check("evh__pre_thread_ll_exit-post");
1561}
1562
sewardj61bc2c52011-02-09 10:34:00 +00001563/* This is called immediately after fork, for the child only. 'tid'
1564 is the only surviving thread (as per POSIX rules on fork() in
1565 threaded programs), so we have to clean up map_threads to remove
1566 entries for any other threads. */
1567static
1568void evh__atfork_child ( ThreadId tid )
1569{
1570 UInt i;
1571 Thread* thr;
1572 /* Slot 0 should never be used. */
1573 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1574 tl_assert(!thr);
1575 /* Clean up all other slots except 'tid'. */
1576 for (i = 1; i < VG_N_THREADS; i++) {
1577 if (i == tid)
1578 continue;
1579 thr = map_threads_maybe_lookup(i);
1580 if (!thr)
1581 continue;
1582 /* Cleanup actions (next 5 lines) copied from end of
1583 evh__pre_thread_ll_exit; keep in sync. */
1584 tl_assert(thr->hbthr);
1585 libhb_async_exit(thr->hbthr);
1586 tl_assert(thr->coretid == i);
1587 thr->coretid = VG_INVALID_THREADID;
1588 map_threads_delete(i);
1589 }
1590}
1591
sewardjf98e1c02008-10-25 16:22:41 +00001592
sewardjb4112022007-11-09 22:49:28 +00001593static
1594void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1595{
sewardjb4112022007-11-09 22:49:28 +00001596 Thread* thr_s;
1597 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001598 Thr* hbthr_s;
1599 Thr* hbthr_q;
1600 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001601
1602 if (SHOW_EVENTS >= 1)
1603 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1604 (Int)stay_tid, quit_thr );
1605
sewardjf98e1c02008-10-25 16:22:41 +00001606 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001607
1608 thr_s = map_threads_maybe_lookup( stay_tid );
1609 thr_q = quit_thr;
1610 tl_assert(thr_s != NULL);
1611 tl_assert(thr_q != NULL);
1612 tl_assert(thr_s != thr_q);
1613
sewardjf98e1c02008-10-25 16:22:41 +00001614 hbthr_s = thr_s->hbthr;
1615 hbthr_q = thr_q->hbthr;
1616 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001617 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1618 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001619
sewardjf98e1c02008-10-25 16:22:41 +00001620 /* Allocate a temporary synchronisation object and use it to send
1621 an imaginary message from the quitter to the stayer, the purpose
1622 being to generate a dependence from the quitter to the
1623 stayer. */
1624 so = libhb_so_alloc();
1625 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001626 /* Send last arg of _so_send as False, since the sending thread
1627 doesn't actually exist any more, so we don't want _so_send to
1628 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001629 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1630 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1631 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001632
sewardjf98e1c02008-10-25 16:22:41 +00001633 /* evh__pre_thread_ll_exit issues an error message if the exiting
1634 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001635
1636 /* This holds because, at least when using NPTL as the thread
1637 library, we should be notified the low level thread exit before
1638 we hear of any join event on it. The low level exit
1639 notification feeds through into evh__pre_thread_ll_exit,
1640 which should clear the map_threads entry for it. Hence we
1641 expect there to be no map_threads entry at this point. */
1642 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1643 == VG_INVALID_THREADID);
1644
sewardjf98e1c02008-10-25 16:22:41 +00001645 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001646 all__sanity_check("evh__post_thread_join-post");
1647}
1648
1649static
1650void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1651 Addr a, SizeT size) {
1652 if (SHOW_EVENTS >= 2
1653 || (SHOW_EVENTS >= 1 && size != 1))
1654 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1655 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001656 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001657 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001658 all__sanity_check("evh__pre_mem_read-post");
1659}
1660
1661static
1662void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1663 Char* s, Addr a ) {
1664 Int len;
1665 if (SHOW_EVENTS >= 1)
1666 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1667 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001668 // Don't segfault if the string starts in an obviously stupid
1669 // place. Actually we should check the whole string, not just
1670 // the start address, but that's too much trouble. At least
1671 // checking the first byte is better than nothing. See #255009.
1672 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1673 return;
sewardjb4112022007-11-09 22:49:28 +00001674 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001675 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001676 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001677 all__sanity_check("evh__pre_mem_read_asciiz-post");
1678}
1679
1680static
1681void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1682 Addr a, SizeT size ) {
1683 if (SHOW_EVENTS >= 1)
1684 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1685 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001686 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001687 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001688 all__sanity_check("evh__pre_mem_write-post");
1689}
1690
1691static
1692void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1693 if (SHOW_EVENTS >= 1)
1694 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1695 (void*)a, len, (Int)is_inited );
1696 // FIXME: this is kinda stupid
1697 if (is_inited) {
1698 shadow_mem_make_New(get_current_Thread(), a, len);
1699 } else {
1700 shadow_mem_make_New(get_current_Thread(), a, len);
1701 }
sewardjf98e1c02008-10-25 16:22:41 +00001702 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001703 all__sanity_check("evh__pre_mem_read-post");
1704}
1705
1706static
1707void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001708 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001709 if (SHOW_EVENTS >= 1)
1710 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001711 thr = get_current_Thread();
1712 tl_assert(thr);
1713 if (HG_(clo_free_is_write)) {
1714 /* Treat frees as if the memory was written immediately prior to
1715 the free. This shakes out more races, specifically, cases
1716 where memory is referenced by one thread, and freed by
1717 another, and there's no observable synchronisation event to
1718 guarantee that the reference happens before the free. */
1719 shadow_mem_cwrite_range(thr, a, len);
1720 }
1721 shadow_mem_make_NoAccess( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001722 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001723 all__sanity_check("evh__pre_mem_read-post");
1724}
1725
sewardj23f12002009-07-24 08:45:08 +00001726/* --- Event handlers called from generated code --- */
1727
sewardjb4112022007-11-09 22:49:28 +00001728static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001729void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001730 Thread* thr = get_current_Thread_in_C_C();
1731 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001732 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001733}
sewardjf98e1c02008-10-25 16:22:41 +00001734
sewardjb4112022007-11-09 22:49:28 +00001735static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001736void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001737 Thread* thr = get_current_Thread_in_C_C();
1738 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001739 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001740}
sewardjf98e1c02008-10-25 16:22:41 +00001741
sewardjb4112022007-11-09 22:49:28 +00001742static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001743void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001744 Thread* thr = get_current_Thread_in_C_C();
1745 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001746 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001747}
sewardjf98e1c02008-10-25 16:22:41 +00001748
sewardjb4112022007-11-09 22:49:28 +00001749static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001750void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001751 Thread* thr = get_current_Thread_in_C_C();
1752 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001753 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001754}
sewardjf98e1c02008-10-25 16:22:41 +00001755
sewardjb4112022007-11-09 22:49:28 +00001756static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001757void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001758 Thread* thr = get_current_Thread_in_C_C();
1759 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001760 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001761}
1762
1763static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001764void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001765 Thread* thr = get_current_Thread_in_C_C();
1766 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001767 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001768}
sewardjf98e1c02008-10-25 16:22:41 +00001769
sewardjb4112022007-11-09 22:49:28 +00001770static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001771void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001772 Thread* thr = get_current_Thread_in_C_C();
1773 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001774 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001775}
sewardjf98e1c02008-10-25 16:22:41 +00001776
sewardjb4112022007-11-09 22:49:28 +00001777static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001778void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001779 Thread* thr = get_current_Thread_in_C_C();
1780 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001781 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001782}
sewardjf98e1c02008-10-25 16:22:41 +00001783
sewardjb4112022007-11-09 22:49:28 +00001784static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001785void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001786 Thread* thr = get_current_Thread_in_C_C();
1787 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001788 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001789}
sewardjf98e1c02008-10-25 16:22:41 +00001790
sewardjb4112022007-11-09 22:49:28 +00001791static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001792void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001793 Thread* thr = get_current_Thread_in_C_C();
1794 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001795 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001796}
1797
sewardjb4112022007-11-09 22:49:28 +00001798
sewardj9f569b72008-11-13 13:33:09 +00001799/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001800/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001801/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001802
1803/* EXPOSITION only: by intercepting lock init events we can show the
1804 user where the lock was initialised, rather than only being able to
1805 show where it was first locked. Intercepting lock initialisations
1806 is not necessary for the basic operation of the race checker. */
1807static
1808void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1809 void* mutex, Word mbRec )
1810{
1811 if (SHOW_EVENTS >= 1)
1812 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1813 (Int)tid, mbRec, (void*)mutex );
1814 tl_assert(mbRec == 0 || mbRec == 1);
1815 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1816 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001817 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001818 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1819}
1820
1821static
1822void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1823{
1824 Thread* thr;
1825 Lock* lk;
1826 if (SHOW_EVENTS >= 1)
1827 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1828 (Int)tid, (void*)mutex );
1829
1830 thr = map_threads_maybe_lookup( tid );
1831 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001832 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001833
1834 lk = map_locks_maybe_lookup( (Addr)mutex );
1835
1836 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001837 HG_(record_error_Misc)(
1838 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001839 }
1840
1841 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001843 tl_assert( lk->guestaddr == (Addr)mutex );
1844 if (lk->heldBy) {
1845 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001846 HG_(record_error_Misc)(
1847 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001848 /* remove lock from locksets of all owning threads */
1849 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001850 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001851 lk->heldBy = NULL;
1852 lk->heldW = False;
1853 lk->acquired_at = NULL;
1854 }
1855 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001856 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001857
1858 if (HG_(clo_track_lockorders))
1859 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001860 map_locks_delete( lk->guestaddr );
1861 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001862 }
1863
sewardjf98e1c02008-10-25 16:22:41 +00001864 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001865 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1866}
1867
1868static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1869 void* mutex, Word isTryLock )
1870{
1871 /* Just check the mutex is sane; nothing else to do. */
1872 // 'mutex' may be invalid - not checked by wrapper
1873 Thread* thr;
1874 Lock* lk;
1875 if (SHOW_EVENTS >= 1)
1876 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1877 (Int)tid, (void*)mutex );
1878
1879 tl_assert(isTryLock == 0 || isTryLock == 1);
1880 thr = map_threads_maybe_lookup( tid );
1881 tl_assert(thr); /* cannot fail - Thread* must already exist */
1882
1883 lk = map_locks_maybe_lookup( (Addr)mutex );
1884
1885 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001886 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1887 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001888 }
1889
1890 if ( lk
1891 && isTryLock == 0
1892 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1893 && lk->heldBy
1894 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001895 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001896 /* uh, it's a non-recursive lock and we already w-hold it, and
1897 this is a real lock operation (not a speculative "tryLock"
1898 kind of thing). Duh. Deadlock coming up; but at least
1899 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001900 HChar* errstr = "Attempt to re-lock a "
1901 "non-recursive lock I already hold";
1902 HChar* auxstr = "Lock was previously acquired";
1903 if (lk->acquired_at) {
1904 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1905 } else {
1906 HG_(record_error_Misc)( thr, errstr );
1907 }
sewardjb4112022007-11-09 22:49:28 +00001908 }
1909}
1910
1911static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1912{
1913 // only called if the real library call succeeded - so mutex is sane
1914 Thread* thr;
1915 if (SHOW_EVENTS >= 1)
1916 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1917 (Int)tid, (void*)mutex );
1918
1919 thr = map_threads_maybe_lookup( tid );
1920 tl_assert(thr); /* cannot fail - Thread* must already exist */
1921
1922 evhH__post_thread_w_acquires_lock(
1923 thr,
1924 LK_mbRec, /* if not known, create new lock with this LockKind */
1925 (Addr)mutex
1926 );
1927}
1928
1929static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1930{
1931 // 'mutex' may be invalid - not checked by wrapper
1932 Thread* thr;
1933 if (SHOW_EVENTS >= 1)
1934 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1935 (Int)tid, (void*)mutex );
1936
1937 thr = map_threads_maybe_lookup( tid );
1938 tl_assert(thr); /* cannot fail - Thread* must already exist */
1939
1940 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1941}
1942
1943static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1944{
1945 // only called if the real library call succeeded - so mutex is sane
1946 Thread* thr;
1947 if (SHOW_EVENTS >= 1)
1948 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1949 (Int)tid, (void*)mutex );
1950 thr = map_threads_maybe_lookup( tid );
1951 tl_assert(thr); /* cannot fail - Thread* must already exist */
1952
1953 // anything we should do here?
1954}
1955
1956
sewardj5a644da2009-08-11 10:35:58 +00001957/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00001958/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00001959/* ------------------------------------------------------- */
1960
1961/* All a bit of a kludge. Pretend we're really dealing with ordinary
1962 pthread_mutex_t's instead, for the most part. */
1963
1964static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
1965 void* slock )
1966{
1967 Thread* thr;
1968 Lock* lk;
1969 /* In glibc's kludgey world, we're either initialising or unlocking
1970 it. Since this is the pre-routine, if it is locked, unlock it
1971 and take a dependence edge. Otherwise, do nothing. */
1972
1973 if (SHOW_EVENTS >= 1)
1974 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
1975 "(ctid=%d, slock=%p)\n",
1976 (Int)tid, (void*)slock );
1977
1978 thr = map_threads_maybe_lookup( tid );
1979 /* cannot fail - Thread* must already exist */;
1980 tl_assert( HG_(is_sane_Thread)(thr) );
1981
1982 lk = map_locks_maybe_lookup( (Addr)slock );
1983 if (lk && lk->heldBy) {
1984 /* it's held. So do the normal pre-unlock actions, as copied
1985 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
1986 duplicates the map_locks_maybe_lookup. */
1987 evhH__pre_thread_releases_lock( thr, (Addr)slock,
1988 False/*!isRDWR*/ );
1989 }
1990}
1991
1992static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
1993 void* slock )
1994{
1995 Lock* lk;
1996 /* More kludgery. If the lock has never been seen before, do
1997 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
1998 nothing. */
1999
2000 if (SHOW_EVENTS >= 1)
2001 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2002 "(ctid=%d, slock=%p)\n",
2003 (Int)tid, (void*)slock );
2004
2005 lk = map_locks_maybe_lookup( (Addr)slock );
2006 if (!lk) {
2007 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2008 }
2009}
2010
2011static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2012 void* slock, Word isTryLock )
2013{
2014 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2015}
2016
2017static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2018 void* slock )
2019{
2020 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2021}
2022
2023static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2024 void* slock )
2025{
2026 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2027}
2028
2029
sewardj9f569b72008-11-13 13:33:09 +00002030/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002031/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002032/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002033
sewardj02114542009-07-28 20:52:36 +00002034/* A mapping from CV to (the SO associated with it, plus some
2035 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002036 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2037 wait on it completes, we do a 'recv' from the SO. This is believed
2038 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002039 signallings/broadcasts.
2040*/
2041
sewardj02114542009-07-28 20:52:36 +00002042/* .so is the SO for this CV.
2043 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002044
sewardj02114542009-07-28 20:52:36 +00002045 POSIX says effectively that the first pthread_cond_{timed}wait call
2046 causes a dynamic binding between the CV and the mutex, and that
2047 lasts until such time as the waiter count falls to zero. Hence
2048 need to keep track of the number of waiters in order to do
2049 consistency tracking. */
2050typedef
2051 struct {
2052 SO* so; /* libhb-allocated SO */
2053 void* mx_ga; /* addr of associated mutex, if any */
2054 UWord nWaiters; /* # threads waiting on the CV */
2055 }
2056 CVInfo;
2057
2058
2059/* pthread_cond_t* -> CVInfo* */
2060static WordFM* map_cond_to_CVInfo = NULL;
2061
2062static void map_cond_to_CVInfo_INIT ( void ) {
2063 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2064 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2065 "hg.mctCI.1", HG_(free), NULL );
2066 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002067 }
2068}
2069
sewardj02114542009-07-28 20:52:36 +00002070static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002071 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002072 map_cond_to_CVInfo_INIT();
2073 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002074 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002075 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002076 } else {
sewardj02114542009-07-28 20:52:36 +00002077 SO* so = libhb_so_alloc();
2078 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2079 cvi->so = so;
2080 cvi->mx_ga = 0;
2081 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2082 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002083 }
2084}
2085
sewardj02114542009-07-28 20:52:36 +00002086static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002087 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002088 map_cond_to_CVInfo_INIT();
2089 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2090 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002091 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002092 tl_assert(cvi);
2093 tl_assert(cvi->so);
2094 libhb_so_dealloc(cvi->so);
2095 cvi->mx_ga = 0;
2096 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002097 }
2098}
2099
2100static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2101{
sewardjf98e1c02008-10-25 16:22:41 +00002102 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2103 cond to a SO if it is not already so bound, and 'send' on the
2104 SO. This is later used by other thread(s) which successfully
2105 exit from a pthread_cond_wait on the same cv; then they 'recv'
2106 from the SO, thereby acquiring a dependency on this signalling
2107 event. */
sewardjb4112022007-11-09 22:49:28 +00002108 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002109 CVInfo* cvi;
2110 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002111
2112 if (SHOW_EVENTS >= 1)
2113 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2114 (Int)tid, (void*)cond );
2115
sewardjb4112022007-11-09 22:49:28 +00002116 thr = map_threads_maybe_lookup( tid );
2117 tl_assert(thr); /* cannot fail - Thread* must already exist */
2118
sewardj02114542009-07-28 20:52:36 +00002119 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2120 tl_assert(cvi);
2121 tl_assert(cvi->so);
2122
sewardjb4112022007-11-09 22:49:28 +00002123 // error-if: mutex is bogus
2124 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002125 // Hmm. POSIX doesn't actually say that it's an error to call
2126 // pthread_cond_signal with the associated mutex being unlocked.
2127 // Although it does say that it should be "if consistent scheduling
2128 // is desired."
2129 //
2130 // For the moment, disable these checks.
2131 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2132 //if (lk == NULL || cvi->mx_ga == 0) {
2133 // HG_(record_error_Misc)( thr,
2134 // "pthread_cond_{signal,broadcast}: "
2135 // "no or invalid mutex associated with cond");
2136 //}
2137 ///* note: lk could be NULL. Be careful. */
2138 //if (lk) {
2139 // if (lk->kind == LK_rdwr) {
2140 // HG_(record_error_Misc)(thr,
2141 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2142 // }
2143 // if (lk->heldBy == NULL) {
2144 // HG_(record_error_Misc)(thr,
2145 // "pthread_cond_{signal,broadcast}: "
2146 // "associated lock is not held by any thread");
2147 // }
2148 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2149 // HG_(record_error_Misc)(thr,
2150 // "pthread_cond_{signal,broadcast}: "
2151 // "associated lock is not held by calling thread");
2152 // }
2153 //}
sewardjb4112022007-11-09 22:49:28 +00002154
sewardj02114542009-07-28 20:52:36 +00002155 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002156}
2157
2158/* returns True if it reckons 'mutex' is valid and held by this
2159 thread, else False */
2160static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2161 void* cond, void* mutex )
2162{
2163 Thread* thr;
2164 Lock* lk;
2165 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002166 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002167
2168 if (SHOW_EVENTS >= 1)
2169 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2170 "(ctid=%d, cond=%p, mutex=%p)\n",
2171 (Int)tid, (void*)cond, (void*)mutex );
2172
sewardjb4112022007-11-09 22:49:28 +00002173 thr = map_threads_maybe_lookup( tid );
2174 tl_assert(thr); /* cannot fail - Thread* must already exist */
2175
2176 lk = map_locks_maybe_lookup( (Addr)mutex );
2177
2178 /* Check for stupid mutex arguments. There are various ways to be
2179 a bozo. Only complain once, though, even if more than one thing
2180 is wrong. */
2181 if (lk == NULL) {
2182 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002183 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002184 thr,
2185 "pthread_cond_{timed}wait called with invalid mutex" );
2186 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002187 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002188 if (lk->kind == LK_rdwr) {
2189 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002190 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002191 thr, "pthread_cond_{timed}wait called with mutex "
2192 "of type pthread_rwlock_t*" );
2193 } else
2194 if (lk->heldBy == NULL) {
2195 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002196 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002197 thr, "pthread_cond_{timed}wait called with un-held mutex");
2198 } else
2199 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002200 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002201 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002202 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002203 thr, "pthread_cond_{timed}wait called with mutex "
2204 "held by a different thread" );
2205 }
2206 }
2207
2208 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002209 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2210 tl_assert(cvi);
2211 tl_assert(cvi->so);
2212 if (cvi->nWaiters == 0) {
2213 /* form initial (CV,MX) binding */
2214 cvi->mx_ga = mutex;
2215 }
2216 else /* check existing (CV,MX) binding */
2217 if (cvi->mx_ga != mutex) {
2218 HG_(record_error_Misc)(
2219 thr, "pthread_cond_{timed}wait: cond is associated "
2220 "with a different mutex");
2221 }
2222 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002223
2224 return lk_valid;
2225}
2226
2227static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2228 void* cond, void* mutex )
2229{
sewardjf98e1c02008-10-25 16:22:41 +00002230 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2231 the SO for this cond, and 'recv' from it so as to acquire a
2232 dependency edge back to the signaller/broadcaster. */
2233 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002234 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002235
2236 if (SHOW_EVENTS >= 1)
2237 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2238 "(ctid=%d, cond=%p, mutex=%p)\n",
2239 (Int)tid, (void*)cond, (void*)mutex );
2240
sewardjb4112022007-11-09 22:49:28 +00002241 thr = map_threads_maybe_lookup( tid );
2242 tl_assert(thr); /* cannot fail - Thread* must already exist */
2243
2244 // error-if: cond is also associated with a different mutex
2245
sewardj02114542009-07-28 20:52:36 +00002246 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2247 tl_assert(cvi);
2248 tl_assert(cvi->so);
2249 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002250
sewardj02114542009-07-28 20:52:36 +00002251 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002252 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2253 it? If this happened it would surely be a bug in the threads
2254 library. Or one of those fabled "spurious wakeups". */
2255 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2256 "succeeded on"
2257 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002258 }
sewardjf98e1c02008-10-25 16:22:41 +00002259
2260 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002261 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2262
2263 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002264}
2265
2266static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2267 void* cond )
2268{
2269 /* Deal with destroy events. The only purpose is to free storage
2270 associated with the CV, so as to avoid any possible resource
2271 leaks. */
2272 if (SHOW_EVENTS >= 1)
2273 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2274 "(ctid=%d, cond=%p)\n",
2275 (Int)tid, (void*)cond );
2276
sewardj02114542009-07-28 20:52:36 +00002277 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002278}
2279
2280
sewardj9f569b72008-11-13 13:33:09 +00002281/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002282/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002283/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002284
2285/* EXPOSITION only */
2286static
2287void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2288{
2289 if (SHOW_EVENTS >= 1)
2290 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2291 (Int)tid, (void*)rwl );
2292 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002293 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002294 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2295}
2296
2297static
2298void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2299{
2300 Thread* thr;
2301 Lock* lk;
2302 if (SHOW_EVENTS >= 1)
2303 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2304 (Int)tid, (void*)rwl );
2305
2306 thr = map_threads_maybe_lookup( tid );
2307 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002308 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002309
2310 lk = map_locks_maybe_lookup( (Addr)rwl );
2311
2312 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002313 HG_(record_error_Misc)(
2314 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002315 }
2316
2317 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002318 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002319 tl_assert( lk->guestaddr == (Addr)rwl );
2320 if (lk->heldBy) {
2321 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002322 HG_(record_error_Misc)(
2323 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002324 /* remove lock from locksets of all owning threads */
2325 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002326 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002327 lk->heldBy = NULL;
2328 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002329 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002330 }
2331 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002332 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002333
2334 if (HG_(clo_track_lockorders))
2335 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002336 map_locks_delete( lk->guestaddr );
2337 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002338 }
2339
sewardjf98e1c02008-10-25 16:22:41 +00002340 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002341 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2342}
2343
2344static
sewardj789c3c52008-02-25 12:10:07 +00002345void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2346 void* rwl,
2347 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002348{
2349 /* Just check the rwl is sane; nothing else to do. */
2350 // 'rwl' may be invalid - not checked by wrapper
2351 Thread* thr;
2352 Lock* lk;
2353 if (SHOW_EVENTS >= 1)
2354 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2355 (Int)tid, (Int)isW, (void*)rwl );
2356
2357 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002358 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002359 thr = map_threads_maybe_lookup( tid );
2360 tl_assert(thr); /* cannot fail - Thread* must already exist */
2361
2362 lk = map_locks_maybe_lookup( (Addr)rwl );
2363 if ( lk
2364 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2365 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002366 HG_(record_error_Misc)(
2367 thr, "pthread_rwlock_{rd,rw}lock with a "
2368 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002369 }
2370}
2371
2372static
2373void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2374{
2375 // only called if the real library call succeeded - so mutex is sane
2376 Thread* thr;
2377 if (SHOW_EVENTS >= 1)
2378 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2379 (Int)tid, (Int)isW, (void*)rwl );
2380
2381 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2382 thr = map_threads_maybe_lookup( tid );
2383 tl_assert(thr); /* cannot fail - Thread* must already exist */
2384
2385 (isW ? evhH__post_thread_w_acquires_lock
2386 : evhH__post_thread_r_acquires_lock)(
2387 thr,
2388 LK_rdwr, /* if not known, create new lock with this LockKind */
2389 (Addr)rwl
2390 );
2391}
2392
2393static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2394{
2395 // 'rwl' may be invalid - not checked by wrapper
2396 Thread* thr;
2397 if (SHOW_EVENTS >= 1)
2398 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2399 (Int)tid, (void*)rwl );
2400
2401 thr = map_threads_maybe_lookup( tid );
2402 tl_assert(thr); /* cannot fail - Thread* must already exist */
2403
2404 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2405}
2406
2407static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2408{
2409 // only called if the real library call succeeded - so mutex is sane
2410 Thread* thr;
2411 if (SHOW_EVENTS >= 1)
2412 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2413 (Int)tid, (void*)rwl );
2414 thr = map_threads_maybe_lookup( tid );
2415 tl_assert(thr); /* cannot fail - Thread* must already exist */
2416
2417 // anything we should do here?
2418}
2419
2420
sewardj9f569b72008-11-13 13:33:09 +00002421/* ---------------------------------------------------------- */
2422/* -------------- events to do with semaphores -------------- */
2423/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002424
sewardj11e352f2007-11-30 11:11:02 +00002425/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002426 variables. */
2427
sewardjf98e1c02008-10-25 16:22:41 +00002428/* For each semaphore, we maintain a stack of SOs. When a 'post'
2429 operation is done on a semaphore (unlocking, essentially), a new SO
2430 is created for the posting thread, the posting thread does a strong
2431 send to it (which merely installs the posting thread's VC in the
2432 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002433
2434 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002435 semaphore, we pop a SO off the semaphore's stack (which should be
2436 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002437 dependencies between posters and waiters of the semaphore.
2438
sewardjf98e1c02008-10-25 16:22:41 +00002439 It may not be necessary to use a stack - perhaps a bag of SOs would
2440 do. But we do need to keep track of how many unused-up posts have
2441 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002442
sewardjf98e1c02008-10-25 16:22:41 +00002443 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002444 twice on S. T3 cannot complete its waits without both T1 and T2
2445 posting. The above mechanism will ensure that T3 acquires
2446 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002447
sewardjf98e1c02008-10-25 16:22:41 +00002448 When a semaphore is initialised with value N, we do as if we'd
2449 posted N times on the semaphore: basically create N SOs and do a
2450 strong send to all of then. This allows up to N waits on the
2451 semaphore to acquire a dependency on the initialisation point,
2452 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002453
2454 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2455 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002456*/
2457
sewardjf98e1c02008-10-25 16:22:41 +00002458/* sem_t* -> XArray* SO* */
2459static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002460
sewardjf98e1c02008-10-25 16:22:41 +00002461static void map_sem_to_SO_stack_INIT ( void ) {
2462 if (map_sem_to_SO_stack == NULL) {
2463 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2464 HG_(free), NULL );
2465 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002466 }
2467}
2468
sewardjf98e1c02008-10-25 16:22:41 +00002469static void push_SO_for_sem ( void* sem, SO* so ) {
2470 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002471 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002472 tl_assert(so);
2473 map_sem_to_SO_stack_INIT();
2474 if (VG_(lookupFM)( map_sem_to_SO_stack,
2475 &keyW, (UWord*)&xa, (UWord)sem )) {
2476 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002477 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002478 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002479 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002480 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2481 VG_(addToXA)( xa, &so );
2482 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002483 }
2484}
2485
sewardjf98e1c02008-10-25 16:22:41 +00002486static SO* mb_pop_SO_for_sem ( void* sem ) {
2487 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002488 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002489 SO* so;
2490 map_sem_to_SO_stack_INIT();
2491 if (VG_(lookupFM)( map_sem_to_SO_stack,
2492 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002493 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002494 Word sz;
2495 tl_assert(keyW == (UWord)sem);
2496 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002497 tl_assert(sz >= 0);
2498 if (sz == 0)
2499 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002500 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2501 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002502 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002503 return so;
sewardjb4112022007-11-09 22:49:28 +00002504 } else {
2505 /* hmm, that's odd. No stack for this semaphore. */
2506 return NULL;
2507 }
2508}
2509
sewardj11e352f2007-11-30 11:11:02 +00002510static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002511{
sewardjf98e1c02008-10-25 16:22:41 +00002512 UWord keyW, valW;
2513 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002514
sewardjb4112022007-11-09 22:49:28 +00002515 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002516 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002517 (Int)tid, (void*)sem );
2518
sewardjf98e1c02008-10-25 16:22:41 +00002519 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002520
sewardjf98e1c02008-10-25 16:22:41 +00002521 /* Empty out the semaphore's SO stack. This way of doing it is
2522 stupid, but at least it's easy. */
2523 while (1) {
2524 so = mb_pop_SO_for_sem( sem );
2525 if (!so) break;
2526 libhb_so_dealloc(so);
2527 }
2528
2529 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2530 XArray* xa = (XArray*)valW;
2531 tl_assert(keyW == (UWord)sem);
2532 tl_assert(xa);
2533 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2534 VG_(deleteXA)(xa);
2535 }
sewardjb4112022007-11-09 22:49:28 +00002536}
2537
sewardj11e352f2007-11-30 11:11:02 +00002538static
2539void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2540{
sewardjf98e1c02008-10-25 16:22:41 +00002541 SO* so;
2542 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002543
2544 if (SHOW_EVENTS >= 1)
2545 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2546 (Int)tid, (void*)sem, value );
2547
sewardjf98e1c02008-10-25 16:22:41 +00002548 thr = map_threads_maybe_lookup( tid );
2549 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002550
sewardjf98e1c02008-10-25 16:22:41 +00002551 /* Empty out the semaphore's SO stack. This way of doing it is
2552 stupid, but at least it's easy. */
2553 while (1) {
2554 so = mb_pop_SO_for_sem( sem );
2555 if (!so) break;
2556 libhb_so_dealloc(so);
2557 }
sewardj11e352f2007-11-30 11:11:02 +00002558
sewardjf98e1c02008-10-25 16:22:41 +00002559 /* If we don't do this check, the following while loop runs us out
2560 of memory for stupid initial values of 'value'. */
2561 if (value > 10000) {
2562 HG_(record_error_Misc)(
2563 thr, "sem_init: initial value exceeds 10000; using 10000" );
2564 value = 10000;
2565 }
sewardj11e352f2007-11-30 11:11:02 +00002566
sewardjf98e1c02008-10-25 16:22:41 +00002567 /* Now create 'valid' new SOs for the thread, do a strong send to
2568 each of them, and push them all on the stack. */
2569 for (; value > 0; value--) {
2570 Thr* hbthr = thr->hbthr;
2571 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002572
sewardjf98e1c02008-10-25 16:22:41 +00002573 so = libhb_so_alloc();
2574 libhb_so_send( hbthr, so, True/*strong send*/ );
2575 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002576 }
2577}
2578
2579static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002580{
sewardjf98e1c02008-10-25 16:22:41 +00002581 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2582 it (iow, write our VC into it, then tick ours), and push the SO
2583 on on a stack of SOs associated with 'sem'. This is later used
2584 by other thread(s) which successfully exit from a sem_wait on
2585 the same sem; by doing a strong recv from SOs popped of the
2586 stack, they acquire dependencies on the posting thread
2587 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002588
sewardjf98e1c02008-10-25 16:22:41 +00002589 Thread* thr;
2590 SO* so;
2591 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002592
2593 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002594 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002595 (Int)tid, (void*)sem );
2596
2597 thr = map_threads_maybe_lookup( tid );
2598 tl_assert(thr); /* cannot fail - Thread* must already exist */
2599
2600 // error-if: sem is bogus
2601
sewardjf98e1c02008-10-25 16:22:41 +00002602 hbthr = thr->hbthr;
2603 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002604
sewardjf98e1c02008-10-25 16:22:41 +00002605 so = libhb_so_alloc();
2606 libhb_so_send( hbthr, so, True/*strong send*/ );
2607 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002608}
2609
sewardj11e352f2007-11-30 11:11:02 +00002610static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002611{
sewardjf98e1c02008-10-25 16:22:41 +00002612 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2613 the 'sem' from this semaphore's SO-stack, and do a strong recv
2614 from it. This creates a dependency back to one of the post-ers
2615 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002616
sewardjf98e1c02008-10-25 16:22:41 +00002617 Thread* thr;
2618 SO* so;
2619 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002620
2621 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002622 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002623 (Int)tid, (void*)sem );
2624
2625 thr = map_threads_maybe_lookup( tid );
2626 tl_assert(thr); /* cannot fail - Thread* must already exist */
2627
2628 // error-if: sem is bogus
2629
sewardjf98e1c02008-10-25 16:22:41 +00002630 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002631
sewardjf98e1c02008-10-25 16:22:41 +00002632 if (so) {
2633 hbthr = thr->hbthr;
2634 tl_assert(hbthr);
2635
2636 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2637 libhb_so_dealloc(so);
2638 } else {
2639 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2640 If this happened it would surely be a bug in the threads
2641 library. */
2642 HG_(record_error_Misc)(
2643 thr, "Bug in libpthread: sem_wait succeeded on"
2644 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002645 }
2646}
2647
2648
sewardj9f569b72008-11-13 13:33:09 +00002649/* -------------------------------------------------------- */
2650/* -------------- events to do with barriers -------------- */
2651/* -------------------------------------------------------- */
2652
2653typedef
2654 struct {
2655 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002656 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002657 UWord size; /* declared size */
2658 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2659 }
2660 Bar;
2661
2662static Bar* new_Bar ( void ) {
2663 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2664 tl_assert(bar);
2665 /* all fields are zero */
2666 tl_assert(bar->initted == False);
2667 return bar;
2668}
2669
2670static void delete_Bar ( Bar* bar ) {
2671 tl_assert(bar);
2672 if (bar->waiting)
2673 VG_(deleteXA)(bar->waiting);
2674 HG_(free)(bar);
2675}
2676
2677/* A mapping which stores auxiliary data for barriers. */
2678
2679/* pthread_barrier_t* -> Bar* */
2680static WordFM* map_barrier_to_Bar = NULL;
2681
2682static void map_barrier_to_Bar_INIT ( void ) {
2683 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2684 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2685 "hg.mbtBI.1", HG_(free), NULL );
2686 tl_assert(map_barrier_to_Bar != NULL);
2687 }
2688}
2689
2690static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2691 UWord key, val;
2692 map_barrier_to_Bar_INIT();
2693 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2694 tl_assert(key == (UWord)barrier);
2695 return (Bar*)val;
2696 } else {
2697 Bar* bar = new_Bar();
2698 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2699 return bar;
2700 }
2701}
2702
2703static void map_barrier_to_Bar_delete ( void* barrier ) {
2704 UWord keyW, valW;
2705 map_barrier_to_Bar_INIT();
2706 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2707 Bar* bar = (Bar*)valW;
2708 tl_assert(keyW == (UWord)barrier);
2709 delete_Bar(bar);
2710 }
2711}
2712
2713
2714static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2715 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002716 UWord count,
2717 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002718{
2719 Thread* thr;
2720 Bar* bar;
2721
2722 if (SHOW_EVENTS >= 1)
2723 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002724 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2725 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002726
2727 thr = map_threads_maybe_lookup( tid );
2728 tl_assert(thr); /* cannot fail - Thread* must already exist */
2729
2730 if (count == 0) {
2731 HG_(record_error_Misc)(
2732 thr, "pthread_barrier_init: 'count' argument is zero"
2733 );
2734 }
2735
sewardj406bac82010-03-03 23:03:40 +00002736 if (resizable != 0 && resizable != 1) {
2737 HG_(record_error_Misc)(
2738 thr, "pthread_barrier_init: invalid 'resizable' argument"
2739 );
2740 }
2741
sewardj9f569b72008-11-13 13:33:09 +00002742 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2743 tl_assert(bar);
2744
2745 if (bar->initted) {
2746 HG_(record_error_Misc)(
2747 thr, "pthread_barrier_init: barrier is already initialised"
2748 );
2749 }
2750
2751 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2752 tl_assert(bar->initted);
2753 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002754 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002755 );
2756 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2757 }
2758 if (!bar->waiting) {
2759 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2760 sizeof(Thread*) );
2761 }
2762
2763 tl_assert(bar->waiting);
2764 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002765 bar->initted = True;
2766 bar->resizable = resizable == 1 ? True : False;
2767 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002768}
2769
2770
2771static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2772 void* barrier )
2773{
sewardj553655c2008-11-14 19:41:19 +00002774 Thread* thr;
2775 Bar* bar;
2776
sewardj9f569b72008-11-13 13:33:09 +00002777 /* Deal with destroy events. The only purpose is to free storage
2778 associated with the barrier, so as to avoid any possible
2779 resource leaks. */
2780 if (SHOW_EVENTS >= 1)
2781 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2782 "(tid=%d, barrier=%p)\n",
2783 (Int)tid, (void*)barrier );
2784
sewardj553655c2008-11-14 19:41:19 +00002785 thr = map_threads_maybe_lookup( tid );
2786 tl_assert(thr); /* cannot fail - Thread* must already exist */
2787
2788 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2789 tl_assert(bar);
2790
2791 if (!bar->initted) {
2792 HG_(record_error_Misc)(
2793 thr, "pthread_barrier_destroy: barrier was never initialised"
2794 );
2795 }
2796
2797 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2798 HG_(record_error_Misc)(
2799 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2800 );
2801 }
2802
sewardj9f569b72008-11-13 13:33:09 +00002803 /* Maybe we shouldn't do this; just let it persist, so that when it
2804 is reinitialised we don't need to do any dynamic memory
2805 allocation? The downside is a potentially unlimited space leak,
2806 if the client creates (in turn) a large number of barriers all
2807 at different locations. Note that if we do later move to the
2808 don't-delete-it scheme, we need to mark the barrier as
2809 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002810 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002811 map_barrier_to_Bar_delete( barrier );
2812}
2813
2814
sewardj406bac82010-03-03 23:03:40 +00002815/* All the threads have arrived. Now do the Interesting Bit. Get a
2816 new synchronisation object and do a weak send to it from all the
2817 participating threads. This makes its vector clocks be the join of
2818 all the individual threads' vector clocks. Then do a strong
2819 receive from it back to all threads, so that their VCs are a copy
2820 of it (hence are all equal to the join of their original VCs.) */
2821static void do_barrier_cross_sync_and_empty ( Bar* bar )
2822{
2823 /* XXX check bar->waiting has no duplicates */
2824 UWord i;
2825 SO* so = libhb_so_alloc();
2826
2827 tl_assert(bar->waiting);
2828 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2829
2830 /* compute the join ... */
2831 for (i = 0; i < bar->size; i++) {
2832 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2833 Thr* hbthr = t->hbthr;
2834 libhb_so_send( hbthr, so, False/*weak send*/ );
2835 }
2836 /* ... and distribute to all threads */
2837 for (i = 0; i < bar->size; i++) {
2838 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2839 Thr* hbthr = t->hbthr;
2840 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2841 }
2842
2843 /* finally, we must empty out the waiting vector */
2844 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2845
2846 /* and we don't need this any more. Perhaps a stack-allocated
2847 SO would be better? */
2848 libhb_so_dealloc(so);
2849}
2850
2851
sewardj9f569b72008-11-13 13:33:09 +00002852static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2853 void* barrier )
2854{
sewardj1c466b72008-11-19 11:52:14 +00002855 /* This function gets called after a client thread calls
2856 pthread_barrier_wait but before it arrives at the real
2857 pthread_barrier_wait.
2858
2859 Why is the following correct? It's a bit subtle.
2860
2861 If this is not the last thread arriving at the barrier, we simply
2862 note its presence and return. Because valgrind (at least as of
2863 Nov 08) is single threaded, we are guaranteed safe from any race
2864 conditions when in this function -- no other client threads are
2865 running.
2866
2867 If this is the last thread, then we are again the only running
2868 thread. All the other threads will have either arrived at the
2869 real pthread_barrier_wait or are on their way to it, but in any
2870 case are guaranteed not to be able to move past it, because this
2871 thread is currently in this function and so has not yet arrived
2872 at the real pthread_barrier_wait. That means that:
2873
2874 1. While we are in this function, none of the other threads
2875 waiting at the barrier can move past it.
2876
2877 2. When this function returns (and simulated execution resumes),
2878 this thread and all other waiting threads will be able to move
2879 past the real barrier.
2880
2881 Because of this, it is now safe to update the vector clocks of
2882 all threads, to represent the fact that they all arrived at the
2883 barrier and have all moved on. There is no danger of any
2884 complications to do with some threads leaving the barrier and
2885 racing back round to the front, whilst others are still leaving
2886 (which is the primary source of complication in correct handling/
2887 implementation of barriers). That can't happen because we update
2888 here our data structures so as to indicate that the threads have
2889 passed the barrier, even though, as per (2) above, they are
2890 guaranteed not to pass the barrier until we return.
2891
2892 This relies crucially on Valgrind being single threaded. If that
2893 changes, this will need to be reconsidered.
2894 */
sewardj9f569b72008-11-13 13:33:09 +00002895 Thread* thr;
2896 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002897 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002898
2899 if (SHOW_EVENTS >= 1)
2900 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2901 "(tid=%d, barrier=%p)\n",
2902 (Int)tid, (void*)barrier );
2903
2904 thr = map_threads_maybe_lookup( tid );
2905 tl_assert(thr); /* cannot fail - Thread* must already exist */
2906
2907 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2908 tl_assert(bar);
2909
2910 if (!bar->initted) {
2911 HG_(record_error_Misc)(
2912 thr, "pthread_barrier_wait: barrier is uninitialised"
2913 );
2914 return; /* client is broken .. avoid assertions below */
2915 }
2916
2917 /* guaranteed by _INIT_PRE above */
2918 tl_assert(bar->size > 0);
2919 tl_assert(bar->waiting);
2920
2921 VG_(addToXA)( bar->waiting, &thr );
2922
2923 /* guaranteed by this function */
2924 present = VG_(sizeXA)(bar->waiting);
2925 tl_assert(present > 0 && present <= bar->size);
2926
2927 if (present < bar->size)
2928 return;
2929
sewardj406bac82010-03-03 23:03:40 +00002930 do_barrier_cross_sync_and_empty(bar);
2931}
sewardj9f569b72008-11-13 13:33:09 +00002932
sewardj9f569b72008-11-13 13:33:09 +00002933
sewardj406bac82010-03-03 23:03:40 +00002934static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
2935 void* barrier,
2936 UWord newcount )
2937{
2938 Thread* thr;
2939 Bar* bar;
2940 UWord present;
2941
2942 if (SHOW_EVENTS >= 1)
2943 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
2944 "(tid=%d, barrier=%p, newcount=%lu)\n",
2945 (Int)tid, (void*)barrier, newcount );
2946
2947 thr = map_threads_maybe_lookup( tid );
2948 tl_assert(thr); /* cannot fail - Thread* must already exist */
2949
2950 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2951 tl_assert(bar);
2952
2953 if (!bar->initted) {
2954 HG_(record_error_Misc)(
2955 thr, "pthread_barrier_resize: barrier is uninitialised"
2956 );
2957 return; /* client is broken .. avoid assertions below */
2958 }
2959
2960 if (!bar->resizable) {
2961 HG_(record_error_Misc)(
2962 thr, "pthread_barrier_resize: barrier is may not be resized"
2963 );
2964 return; /* client is broken .. avoid assertions below */
2965 }
2966
2967 if (newcount == 0) {
2968 HG_(record_error_Misc)(
2969 thr, "pthread_barrier_resize: 'newcount' argument is zero"
2970 );
2971 return; /* client is broken .. avoid assertions below */
2972 }
2973
2974 /* guaranteed by _INIT_PRE above */
2975 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00002976 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00002977 /* Guaranteed by this fn */
2978 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00002979
sewardj406bac82010-03-03 23:03:40 +00002980 if (newcount >= bar->size) {
2981 /* Increasing the capacity. There's no possibility of threads
2982 moving on from the barrier in this situation, so just note
2983 the fact and do nothing more. */
2984 bar->size = newcount;
2985 } else {
2986 /* Decreasing the capacity. If we decrease it to be equal or
2987 below the number of waiting threads, they will now move past
2988 the barrier, so need to mess with dep edges in the same way
2989 as if the barrier had filled up normally. */
2990 present = VG_(sizeXA)(bar->waiting);
2991 tl_assert(present >= 0 && present <= bar->size);
2992 if (newcount <= present) {
2993 bar->size = present; /* keep the cross_sync call happy */
2994 do_barrier_cross_sync_and_empty(bar);
2995 }
2996 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00002997 }
sewardj9f569b72008-11-13 13:33:09 +00002998}
2999
3000
sewardjed2e72e2009-08-14 11:08:24 +00003001/* ----------------------------------------------------- */
3002/* ----- events to do with user-specified HB edges ----- */
3003/* ----------------------------------------------------- */
3004
3005/* A mapping from arbitrary UWord tag to the SO associated with it.
3006 The UWord tags are meaningless to us, interpreted only by the
3007 user. */
3008
3009
3010
3011/* UWord -> SO* */
3012static WordFM* map_usertag_to_SO = NULL;
3013
3014static void map_usertag_to_SO_INIT ( void ) {
3015 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3016 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3017 "hg.mutS.1", HG_(free), NULL );
3018 tl_assert(map_usertag_to_SO != NULL);
3019 }
3020}
3021
3022static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3023 UWord key, val;
3024 map_usertag_to_SO_INIT();
3025 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3026 tl_assert(key == (UWord)usertag);
3027 return (SO*)val;
3028 } else {
3029 SO* so = libhb_so_alloc();
3030 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3031 return so;
3032 }
3033}
3034
sewardj6015d0e2011-03-11 19:10:48 +00003035static void map_usertag_to_SO_delete ( UWord usertag ) {
3036 UWord keyW, valW;
3037 map_usertag_to_SO_INIT();
3038 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3039 SO* so = (SO*)valW;
3040 tl_assert(keyW == usertag);
3041 tl_assert(so);
3042 libhb_so_dealloc(so);
3043 }
3044}
sewardjed2e72e2009-08-14 11:08:24 +00003045
3046
3047static
3048void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3049{
3050 /* TID is just about to notionally sent a message on a notional
3051 abstract synchronisation object whose identity is given by
3052 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003053 bound, and do a 'weak send' on the SO. This joins the vector
3054 clocks from this thread into any vector clocks already present
3055 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003056 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003057 thereby acquiring a dependency on all the events that have
3058 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003059 Thread* thr;
3060 SO* so;
3061
3062 if (SHOW_EVENTS >= 1)
3063 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3064 (Int)tid, usertag );
3065
3066 thr = map_threads_maybe_lookup( tid );
3067 tl_assert(thr); /* cannot fail - Thread* must already exist */
3068
3069 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3070 tl_assert(so);
3071
sewardj8c50d3c2011-03-11 18:38:12 +00003072 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003073}
3074
3075static
3076void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3077{
3078 /* TID has just notionally received a message from a notional
3079 abstract synchronisation object whose identity is given by
3080 USERTAG. Bind USERTAG to a real SO if it is not already so
3081 bound. If the SO has at some point in the past been 'sent' on,
3082 to a 'strong receive' on it, thereby acquiring a dependency on
3083 the sender. */
3084 Thread* thr;
3085 SO* so;
3086
3087 if (SHOW_EVENTS >= 1)
3088 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3089 (Int)tid, usertag );
3090
3091 thr = map_threads_maybe_lookup( tid );
3092 tl_assert(thr); /* cannot fail - Thread* must already exist */
3093
3094 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3095 tl_assert(so);
3096
3097 /* Acquire a dependency on it. If the SO has never so far been
3098 sent on, then libhb_so_recv will do nothing. So we're safe
3099 regardless of SO's history. */
3100 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3101}
3102
sewardj6015d0e2011-03-11 19:10:48 +00003103static
3104void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3105{
3106 /* TID declares that any happens-before edges notionally stored in
3107 USERTAG can be deleted. If (as would normally be the case) a
3108 SO is associated with USERTAG, then the assocation is removed
3109 and all resources associated with SO are freed. Importantly,
3110 that frees up any VTSs stored in SO. */
3111 if (SHOW_EVENTS >= 1)
3112 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3113 (Int)tid, usertag );
3114
3115 map_usertag_to_SO_delete( usertag );
3116}
3117
sewardjed2e72e2009-08-14 11:08:24 +00003118
sewardjb4112022007-11-09 22:49:28 +00003119/*--------------------------------------------------------------*/
3120/*--- Lock acquisition order monitoring ---*/
3121/*--------------------------------------------------------------*/
3122
3123/* FIXME: here are some optimisations still to do in
3124 laog__pre_thread_acquires_lock.
3125
3126 The graph is structured so that if L1 --*--> L2 then L1 must be
3127 acquired before L2.
3128
3129 The common case is that some thread T holds (eg) L1 L2 and L3 and
3130 is repeatedly acquiring and releasing Ln, and there is no ordering
3131 error in what it is doing. Hence it repeatly:
3132
3133 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3134 produces the answer No (because there is no error).
3135
3136 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3137 (because they already got added the first time T acquired Ln).
3138
3139 Hence cache these two events:
3140
3141 (1) Cache result of the query from last time. Invalidate the cache
3142 any time any edges are added to or deleted from laog.
3143
3144 (2) Cache these add-edge requests and ignore them if said edges
3145 have already been added to laog. Invalidate the cache any time
3146 any edges are deleted from laog.
3147*/
3148
3149typedef
3150 struct {
3151 WordSetID inns; /* in univ_laog */
3152 WordSetID outs; /* in univ_laog */
3153 }
3154 LAOGLinks;
3155
3156/* lock order acquisition graph */
3157static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3158
3159/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3160 where that edge was created, so that we can show the user later if
3161 we need to. */
3162typedef
3163 struct {
3164 Addr src_ga; /* Lock guest addresses for */
3165 Addr dst_ga; /* src/dst of the edge */
3166 ExeContext* src_ec; /* And corresponding places where that */
3167 ExeContext* dst_ec; /* ordering was established */
3168 }
3169 LAOGLinkExposition;
3170
sewardj250ec2e2008-02-15 22:02:30 +00003171static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003172 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3173 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3174 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3175 if (llx1->src_ga < llx2->src_ga) return -1;
3176 if (llx1->src_ga > llx2->src_ga) return 1;
3177 if (llx1->dst_ga < llx2->dst_ga) return -1;
3178 if (llx1->dst_ga > llx2->dst_ga) return 1;
3179 return 0;
3180}
3181
3182static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3183/* end EXPOSITION ONLY */
3184
3185
sewardja65db102009-01-26 10:45:16 +00003186__attribute__((noinline))
3187static void laog__init ( void )
3188{
3189 tl_assert(!laog);
3190 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003191 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003192
3193 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3194 HG_(free), NULL/*unboxedcmp*/ );
3195
3196 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3197 cmp_LAOGLinkExposition );
3198 tl_assert(laog);
3199 tl_assert(laog_exposition);
3200}
3201
sewardjb4112022007-11-09 22:49:28 +00003202static void laog__show ( Char* who ) {
3203 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003204 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003205 Lock* me;
3206 LAOGLinks* links;
3207 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003208 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003209 me = NULL;
3210 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003211 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003212 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003213 tl_assert(me);
3214 tl_assert(links);
3215 VG_(printf)(" node %p:\n", me);
3216 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3217 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003218 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003219 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3220 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003221 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003222 me = NULL;
3223 links = NULL;
3224 }
sewardj896f6f92008-08-19 08:38:52 +00003225 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003226 VG_(printf)("}\n");
3227}
3228
3229__attribute__((noinline))
3230static void laog__add_edge ( Lock* src, Lock* dst ) {
3231 Word keyW;
3232 LAOGLinks* links;
3233 Bool presentF, presentR;
3234 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3235
3236 /* Take the opportunity to sanity check the graph. Record in
3237 presentF if there is already a src->dst mapping in this node's
3238 forwards links, and presentR if there is already a src->dst
3239 mapping in this node's backwards links. They should agree!
3240 Also, we need to know whether the edge was already present so as
3241 to decide whether or not to update the link details mapping. We
3242 can compute presentF and presentR essentially for free, so may
3243 as well do this always. */
3244 presentF = presentR = False;
3245
3246 /* Update the out edges for src */
3247 keyW = 0;
3248 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003249 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003250 WordSetID outs_new;
3251 tl_assert(links);
3252 tl_assert(keyW == (Word)src);
3253 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3254 presentF = outs_new == links->outs;
3255 links->outs = outs_new;
3256 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003257 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003258 links->inns = HG_(emptyWS)( univ_laog );
3259 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003260 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003261 }
3262 /* Update the in edges for dst */
3263 keyW = 0;
3264 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003265 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003266 WordSetID inns_new;
3267 tl_assert(links);
3268 tl_assert(keyW == (Word)dst);
3269 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3270 presentR = inns_new == links->inns;
3271 links->inns = inns_new;
3272 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003273 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003274 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3275 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003276 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003277 }
3278
3279 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3280
3281 if (!presentF && src->acquired_at && dst->acquired_at) {
3282 LAOGLinkExposition expo;
3283 /* If this edge is entering the graph, and we have acquired_at
3284 information for both src and dst, record those acquisition
3285 points. Hence, if there is later a violation of this
3286 ordering, we can show the user the two places in which the
3287 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003288 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003289 src->guestaddr, dst->guestaddr);
3290 expo.src_ga = src->guestaddr;
3291 expo.dst_ga = dst->guestaddr;
3292 expo.src_ec = NULL;
3293 expo.dst_ec = NULL;
3294 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003295 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003296 /* we already have it; do nothing */
3297 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003298 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3299 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003300 expo2->src_ga = src->guestaddr;
3301 expo2->dst_ga = dst->guestaddr;
3302 expo2->src_ec = src->acquired_at;
3303 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003304 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003305 }
3306 }
3307}
3308
3309__attribute__((noinline))
3310static void laog__del_edge ( Lock* src, Lock* dst ) {
3311 Word keyW;
3312 LAOGLinks* links;
3313 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3314 /* Update the out edges for src */
3315 keyW = 0;
3316 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003317 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003318 tl_assert(links);
3319 tl_assert(keyW == (Word)src);
3320 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3321 }
3322 /* Update the in edges for dst */
3323 keyW = 0;
3324 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003325 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003326 tl_assert(links);
3327 tl_assert(keyW == (Word)dst);
3328 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3329 }
3330}
3331
3332__attribute__((noinline))
3333static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3334 Word keyW;
3335 LAOGLinks* links;
3336 keyW = 0;
3337 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003338 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003339 tl_assert(links);
3340 tl_assert(keyW == (Word)lk);
3341 return links->outs;
3342 } else {
3343 return HG_(emptyWS)( univ_laog );
3344 }
3345}
3346
3347__attribute__((noinline))
3348static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3349 Word keyW;
3350 LAOGLinks* links;
3351 keyW = 0;
3352 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003353 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003354 tl_assert(links);
3355 tl_assert(keyW == (Word)lk);
3356 return links->inns;
3357 } else {
3358 return HG_(emptyWS)( univ_laog );
3359 }
3360}
3361
3362__attribute__((noinline))
3363static void laog__sanity_check ( Char* who ) {
3364 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003365 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003366 Lock* me;
3367 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003368 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003369 me = NULL;
3370 links = NULL;
3371 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003372 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003373 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003374 tl_assert(me);
3375 tl_assert(links);
3376 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3377 for (i = 0; i < ws_size; i++) {
3378 if ( ! HG_(elemWS)( univ_laog,
3379 laog__succs( (Lock*)ws_words[i] ),
3380 (Word)me ))
3381 goto bad;
3382 }
3383 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3384 for (i = 0; i < ws_size; i++) {
3385 if ( ! HG_(elemWS)( univ_laog,
3386 laog__preds( (Lock*)ws_words[i] ),
3387 (Word)me ))
3388 goto bad;
3389 }
3390 me = NULL;
3391 links = NULL;
3392 }
sewardj896f6f92008-08-19 08:38:52 +00003393 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003394 return;
3395
3396 bad:
3397 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3398 laog__show(who);
3399 tl_assert(0);
3400}
3401
3402/* If there is a path in laog from 'src' to any of the elements in
3403 'dst', return an arbitrarily chosen element of 'dst' reachable from
3404 'src'. If no path exist from 'src' to any element in 'dst', return
3405 NULL. */
3406__attribute__((noinline))
3407static
3408Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3409{
3410 Lock* ret;
3411 Word i, ssz;
3412 XArray* stack; /* of Lock* */
3413 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3414 Lock* here;
3415 WordSetID succs;
3416 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003417 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003418 //laog__sanity_check();
3419
3420 /* If the destination set is empty, we can never get there from
3421 'src' :-), so don't bother to try */
3422 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3423 return NULL;
3424
3425 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003426 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3427 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003428
3429 (void) VG_(addToXA)( stack, &src );
3430
3431 while (True) {
3432
3433 ssz = VG_(sizeXA)( stack );
3434
3435 if (ssz == 0) { ret = NULL; break; }
3436
3437 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3438 VG_(dropTailXA)( stack, 1 );
3439
3440 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3441
sewardj896f6f92008-08-19 08:38:52 +00003442 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003443 continue;
3444
sewardj896f6f92008-08-19 08:38:52 +00003445 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003446
3447 succs = laog__succs( here );
3448 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3449 for (i = 0; i < succs_size; i++)
3450 (void) VG_(addToXA)( stack, &succs_words[i] );
3451 }
3452
sewardj896f6f92008-08-19 08:38:52 +00003453 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003454 VG_(deleteXA)( stack );
3455 return ret;
3456}
3457
3458
3459/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3460 between 'lk' and the locks already held by 'thr' and issue a
3461 complaint if so. Also, update the ordering graph appropriately.
3462*/
3463__attribute__((noinline))
3464static void laog__pre_thread_acquires_lock (
3465 Thread* thr, /* NB: BEFORE lock is added */
3466 Lock* lk
3467 )
3468{
sewardj250ec2e2008-02-15 22:02:30 +00003469 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003470 Word ls_size, i;
3471 Lock* other;
3472
3473 /* It may be that 'thr' already holds 'lk' and is recursively
3474 relocking in. In this case we just ignore the call. */
3475 /* NB: univ_lsets really is correct here */
3476 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3477 return;
3478
sewardjb4112022007-11-09 22:49:28 +00003479 /* First, the check. Complain if there is any path in laog from lk
3480 to any of the locks already held by thr, since if any such path
3481 existed, it would mean that previously lk was acquired before
3482 (rather than after, as we are doing here) at least one of those
3483 locks.
3484 */
3485 other = laog__do_dfs_from_to(lk, thr->locksetA);
3486 if (other) {
3487 LAOGLinkExposition key, *found;
3488 /* So we managed to find a path lk --*--> other in the graph,
3489 which implies that 'lk' should have been acquired before
3490 'other' but is in fact being acquired afterwards. We present
3491 the lk/other arguments to record_error_LockOrder in the order
3492 in which they should have been acquired. */
3493 /* Go look in the laog_exposition mapping, to find the allocation
3494 points for this edge, so we can show the user. */
3495 key.src_ga = lk->guestaddr;
3496 key.dst_ga = other->guestaddr;
3497 key.src_ec = NULL;
3498 key.dst_ec = NULL;
3499 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003500 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003501 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003502 tl_assert(found != &key);
3503 tl_assert(found->src_ga == key.src_ga);
3504 tl_assert(found->dst_ga == key.dst_ga);
3505 tl_assert(found->src_ec);
3506 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003507 HG_(record_error_LockOrder)(
3508 thr, lk->guestaddr, other->guestaddr,
3509 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003510 } else {
3511 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003512 HG_(record_error_LockOrder)(
3513 thr, lk->guestaddr, other->guestaddr,
3514 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003515 }
3516 }
3517
3518 /* Second, add to laog the pairs
3519 (old, lk) | old <- locks already held by thr
3520 Since both old and lk are currently held by thr, their acquired_at
3521 fields must be non-NULL.
3522 */
3523 tl_assert(lk->acquired_at);
3524 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3525 for (i = 0; i < ls_size; i++) {
3526 Lock* old = (Lock*)ls_words[i];
3527 tl_assert(old->acquired_at);
3528 laog__add_edge( old, lk );
3529 }
3530
3531 /* Why "except_Locks" ? We're here because a lock is being
3532 acquired by a thread, and we're in an inconsistent state here.
3533 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3534 When called in this inconsistent state, locks__sanity_check duly
3535 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003536 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003537 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3538}
3539
3540
3541/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3542
3543__attribute__((noinline))
3544static void laog__handle_one_lock_deletion ( Lock* lk )
3545{
3546 WordSetID preds, succs;
3547 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003548 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003549
3550 preds = laog__preds( lk );
3551 succs = laog__succs( lk );
3552
3553 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3554 for (i = 0; i < preds_size; i++)
3555 laog__del_edge( (Lock*)preds_words[i], lk );
3556
3557 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3558 for (j = 0; j < succs_size; j++)
3559 laog__del_edge( lk, (Lock*)succs_words[j] );
3560
3561 for (i = 0; i < preds_size; i++) {
3562 for (j = 0; j < succs_size; j++) {
3563 if (preds_words[i] != succs_words[j]) {
3564 /* This can pass unlocked locks to laog__add_edge, since
3565 we're deleting stuff. So their acquired_at fields may
3566 be NULL. */
3567 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3568 }
3569 }
3570 }
3571}
3572
sewardj1cbc12f2008-11-10 16:16:46 +00003573//__attribute__((noinline))
3574//static void laog__handle_lock_deletions (
3575// WordSetID /* in univ_laog */ locksToDelete
3576// )
3577//{
3578// Word i, ws_size;
3579// UWord* ws_words;
3580//
sewardj1cbc12f2008-11-10 16:16:46 +00003581//
3582// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3583// for (i = 0; i < ws_size; i++)
3584// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3585//
3586// if (HG_(clo_sanity_flags) & SCE_LAOG)
3587// all__sanity_check("laog__handle_lock_deletions-post");
3588//}
sewardjb4112022007-11-09 22:49:28 +00003589
3590
3591/*--------------------------------------------------------------*/
3592/*--- Malloc/free replacements ---*/
3593/*--------------------------------------------------------------*/
3594
3595typedef
3596 struct {
3597 void* next; /* required by m_hashtable */
3598 Addr payload; /* ptr to actual block */
3599 SizeT szB; /* size requested */
3600 ExeContext* where; /* where it was allocated */
3601 Thread* thr; /* allocating thread */
3602 }
3603 MallocMeta;
3604
3605/* A hash table of MallocMetas, used to track malloc'd blocks
3606 (obviously). */
3607static VgHashTable hg_mallocmeta_table = NULL;
3608
3609
3610static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003611 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003612 tl_assert(md);
3613 return md;
3614}
3615static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003616 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003617}
3618
3619
3620/* Allocate a client block and set up the metadata for it. */
3621
3622static
3623void* handle_alloc ( ThreadId tid,
3624 SizeT szB, SizeT alignB, Bool is_zeroed )
3625{
3626 Addr p;
3627 MallocMeta* md;
3628
3629 tl_assert( ((SSizeT)szB) >= 0 );
3630 p = (Addr)VG_(cli_malloc)(alignB, szB);
3631 if (!p) {
3632 return NULL;
3633 }
3634 if (is_zeroed)
3635 VG_(memset)((void*)p, 0, szB);
3636
3637 /* Note that map_threads_lookup must succeed (cannot assert), since
3638 memory can only be allocated by currently alive threads, hence
3639 they must have an entry in map_threads. */
3640 md = new_MallocMeta();
3641 md->payload = p;
3642 md->szB = szB;
3643 md->where = VG_(record_ExeContext)( tid, 0 );
3644 md->thr = map_threads_lookup( tid );
3645
3646 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3647
3648 /* Tell the lower level memory wranglers. */
3649 evh__new_mem_heap( p, szB, is_zeroed );
3650
3651 return (void*)p;
3652}
3653
3654/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3655 Cast to a signed type to catch any unexpectedly negative args.
3656 We're assuming here that the size asked for is not greater than
3657 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3658 platforms). */
3659static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3660 if (((SSizeT)n) < 0) return NULL;
3661 return handle_alloc ( tid, n, VG_(clo_alignment),
3662 /*is_zeroed*/False );
3663}
3664static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3665 if (((SSizeT)n) < 0) return NULL;
3666 return handle_alloc ( tid, n, VG_(clo_alignment),
3667 /*is_zeroed*/False );
3668}
3669static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3670 if (((SSizeT)n) < 0) return NULL;
3671 return handle_alloc ( tid, n, VG_(clo_alignment),
3672 /*is_zeroed*/False );
3673}
3674static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3675 if (((SSizeT)n) < 0) return NULL;
3676 return handle_alloc ( tid, n, align,
3677 /*is_zeroed*/False );
3678}
3679static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3680 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3681 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3682 /*is_zeroed*/True );
3683}
3684
3685
3686/* Free a client block, including getting rid of the relevant
3687 metadata. */
3688
3689static void handle_free ( ThreadId tid, void* p )
3690{
3691 MallocMeta *md, *old_md;
3692 SizeT szB;
3693
3694 /* First see if we can find the metadata for 'p'. */
3695 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3696 if (!md)
3697 return; /* apparently freeing a bogus address. Oh well. */
3698
3699 tl_assert(md->payload == (Addr)p);
3700 szB = md->szB;
3701
3702 /* Nuke the metadata block */
3703 old_md = (MallocMeta*)
3704 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3705 tl_assert(old_md); /* it must be present - we just found it */
3706 tl_assert(old_md == md);
3707 tl_assert(old_md->payload == (Addr)p);
3708
3709 VG_(cli_free)((void*)old_md->payload);
3710 delete_MallocMeta(old_md);
3711
3712 /* Tell the lower level memory wranglers. */
3713 evh__die_mem_heap( (Addr)p, szB );
3714}
3715
3716static void hg_cli__free ( ThreadId tid, void* p ) {
3717 handle_free(tid, p);
3718}
3719static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3720 handle_free(tid, p);
3721}
3722static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3723 handle_free(tid, p);
3724}
3725
3726
3727static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3728{
3729 MallocMeta *md, *md_new, *md_tmp;
3730 SizeT i;
3731
3732 Addr payload = (Addr)payloadV;
3733
3734 if (((SSizeT)new_size) < 0) return NULL;
3735
3736 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3737 if (!md)
3738 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3739
3740 tl_assert(md->payload == payload);
3741
3742 if (md->szB == new_size) {
3743 /* size unchanged */
3744 md->where = VG_(record_ExeContext)(tid, 0);
3745 return payloadV;
3746 }
3747
3748 if (md->szB > new_size) {
3749 /* new size is smaller */
3750 md->szB = new_size;
3751 md->where = VG_(record_ExeContext)(tid, 0);
3752 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3753 return payloadV;
3754 }
3755
3756 /* else */ {
3757 /* new size is bigger */
3758 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3759
3760 /* First half kept and copied, second half new */
3761 // FIXME: shouldn't we use a copier which implements the
3762 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003763 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003764 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003765 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003766 /* FIXME: can anything funny happen here? specifically, if the
3767 old range contained a lock, then die_mem_heap will complain.
3768 Is that the correct behaviour? Not sure. */
3769 evh__die_mem_heap( payload, md->szB );
3770
3771 /* Copy from old to new */
3772 for (i = 0; i < md->szB; i++)
3773 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3774
3775 /* Because the metadata hash table is index by payload address,
3776 we have to get rid of the old hash table entry and make a new
3777 one. We can't just modify the existing metadata in place,
3778 because then it would (almost certainly) be in the wrong hash
3779 chain. */
3780 md_new = new_MallocMeta();
3781 *md_new = *md;
3782
3783 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3784 tl_assert(md_tmp);
3785 tl_assert(md_tmp == md);
3786
3787 VG_(cli_free)((void*)md->payload);
3788 delete_MallocMeta(md);
3789
3790 /* Update fields */
3791 md_new->where = VG_(record_ExeContext)( tid, 0 );
3792 md_new->szB = new_size;
3793 md_new->payload = p_new;
3794 md_new->thr = map_threads_lookup( tid );
3795
3796 /* and add */
3797 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3798
3799 return (void*)p_new;
3800 }
3801}
3802
njn8b140de2009-02-17 04:31:18 +00003803static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3804{
3805 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3806
3807 // There may be slop, but pretend there isn't because only the asked-for
3808 // area will have been shadowed properly.
3809 return ( md ? md->szB : 0 );
3810}
3811
sewardjb4112022007-11-09 22:49:28 +00003812
sewardj095d61e2010-03-11 13:43:18 +00003813/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003814 Slow linear search. With a bit of hash table help if 'data_addr'
3815 is either the start of a block or up to 15 word-sized steps along
3816 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003817
3818static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3819{
sewardjc8028ad2010-05-05 09:34:42 +00003820 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3821 right at it. */
3822 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3823 return True;
3824 /* else normal interval rules apply */
3825 if (LIKELY(a < mm->payload)) return False;
3826 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3827 return True;
sewardj095d61e2010-03-11 13:43:18 +00003828}
3829
sewardjc8028ad2010-05-05 09:34:42 +00003830Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003831 /*OUT*/Addr* payload,
3832 /*OUT*/SizeT* szB,
3833 Addr data_addr )
3834{
3835 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003836 Int i;
3837 const Int n_fast_check_words = 16;
3838
3839 /* First, do a few fast searches on the basis that data_addr might
3840 be exactly the start of a block or up to 15 words inside. This
3841 can happen commonly via the creq
3842 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3843 for (i = 0; i < n_fast_check_words; i++) {
3844 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3845 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3846 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3847 goto found;
3848 }
3849
sewardj095d61e2010-03-11 13:43:18 +00003850 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003851 some such, it's hard to see how to do better. We have to check
3852 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003853 VG_(HT_ResetIter)(hg_mallocmeta_table);
3854 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003855 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3856 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003857 }
sewardjc8028ad2010-05-05 09:34:42 +00003858
3859 /* Not found. Bah. */
3860 return False;
3861 /*NOTREACHED*/
3862
3863 found:
3864 tl_assert(mm);
3865 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3866 if (where) *where = mm->where;
3867 if (payload) *payload = mm->payload;
3868 if (szB) *szB = mm->szB;
3869 return True;
sewardj095d61e2010-03-11 13:43:18 +00003870}
3871
3872
sewardjb4112022007-11-09 22:49:28 +00003873/*--------------------------------------------------------------*/
3874/*--- Instrumentation ---*/
3875/*--------------------------------------------------------------*/
3876
3877static void instrument_mem_access ( IRSB* bbOut,
3878 IRExpr* addr,
3879 Int szB,
3880 Bool isStore,
3881 Int hWordTy_szB )
3882{
3883 IRType tyAddr = Ity_INVALID;
3884 HChar* hName = NULL;
3885 void* hAddr = NULL;
3886 Int regparms = 0;
3887 IRExpr** argv = NULL;
3888 IRDirty* di = NULL;
3889
3890 tl_assert(isIRAtom(addr));
3891 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3892
3893 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3894 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3895
3896 /* So the effective address is in 'addr' now. */
3897 regparms = 1; // unless stated otherwise
3898 if (isStore) {
3899 switch (szB) {
3900 case 1:
sewardj23f12002009-07-24 08:45:08 +00003901 hName = "evh__mem_help_cwrite_1";
3902 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003903 argv = mkIRExprVec_1( addr );
3904 break;
3905 case 2:
sewardj23f12002009-07-24 08:45:08 +00003906 hName = "evh__mem_help_cwrite_2";
3907 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003908 argv = mkIRExprVec_1( addr );
3909 break;
3910 case 4:
sewardj23f12002009-07-24 08:45:08 +00003911 hName = "evh__mem_help_cwrite_4";
3912 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003913 argv = mkIRExprVec_1( addr );
3914 break;
3915 case 8:
sewardj23f12002009-07-24 08:45:08 +00003916 hName = "evh__mem_help_cwrite_8";
3917 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003918 argv = mkIRExprVec_1( addr );
3919 break;
3920 default:
3921 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3922 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003923 hName = "evh__mem_help_cwrite_N";
3924 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003925 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3926 break;
3927 }
3928 } else {
3929 switch (szB) {
3930 case 1:
sewardj23f12002009-07-24 08:45:08 +00003931 hName = "evh__mem_help_cread_1";
3932 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003933 argv = mkIRExprVec_1( addr );
3934 break;
3935 case 2:
sewardj23f12002009-07-24 08:45:08 +00003936 hName = "evh__mem_help_cread_2";
3937 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003938 argv = mkIRExprVec_1( addr );
3939 break;
3940 case 4:
sewardj23f12002009-07-24 08:45:08 +00003941 hName = "evh__mem_help_cread_4";
3942 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003943 argv = mkIRExprVec_1( addr );
3944 break;
3945 case 8:
sewardj23f12002009-07-24 08:45:08 +00003946 hName = "evh__mem_help_cread_8";
3947 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003948 argv = mkIRExprVec_1( addr );
3949 break;
3950 default:
3951 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3952 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003953 hName = "evh__mem_help_cread_N";
3954 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003955 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3956 break;
3957 }
3958 }
3959
3960 /* Add the helper. */
3961 tl_assert(hName);
3962 tl_assert(hAddr);
3963 tl_assert(argv);
3964 di = unsafeIRDirty_0_N( regparms,
3965 hName, VG_(fnptr_to_fnentry)( hAddr ),
3966 argv );
3967 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3968}
3969
3970
sewardja0eee322009-07-31 08:46:35 +00003971/* Figure out if GA is a guest code address in the dynamic linker, and
3972 if so return True. Otherwise (and in case of any doubt) return
3973 False. (sidedly safe w/ False as the safe value) */
3974static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3975{
3976 DebugInfo* dinfo;
3977 const UChar* soname;
3978 if (0) return False;
3979
sewardje3f1e592009-07-31 09:41:29 +00003980 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003981 if (!dinfo) return False;
3982
sewardje3f1e592009-07-31 09:41:29 +00003983 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003984 tl_assert(soname);
3985 if (0) VG_(printf)("%s\n", soname);
3986
3987# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00003988 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00003989 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3990 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3991 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3992 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3993# elif defined(VGO_darwin)
3994 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3995# else
3996# error "Unsupported OS"
3997# endif
3998 return False;
3999}
4000
sewardjb4112022007-11-09 22:49:28 +00004001static
4002IRSB* hg_instrument ( VgCallbackClosure* closure,
4003 IRSB* bbIn,
4004 VexGuestLayout* layout,
4005 VexGuestExtents* vge,
4006 IRType gWordTy, IRType hWordTy )
4007{
sewardj1c0ce7a2009-07-01 08:10:49 +00004008 Int i;
4009 IRSB* bbOut;
4010 Addr64 cia; /* address of current insn */
4011 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004012 Bool inLDSO = False;
4013 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004014
4015 if (gWordTy != hWordTy) {
4016 /* We don't currently support this case. */
4017 VG_(tool_panic)("host/guest word size mismatch");
4018 }
4019
sewardja0eee322009-07-31 08:46:35 +00004020 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4021 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4022 }
4023
sewardjb4112022007-11-09 22:49:28 +00004024 /* Set up BB */
4025 bbOut = emptyIRSB();
4026 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4027 bbOut->next = deepCopyIRExpr(bbIn->next);
4028 bbOut->jumpkind = bbIn->jumpkind;
4029
4030 // Copy verbatim any IR preamble preceding the first IMark
4031 i = 0;
4032 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4033 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4034 i++;
4035 }
4036
sewardj1c0ce7a2009-07-01 08:10:49 +00004037 // Get the first statement, and initial cia from it
4038 tl_assert(bbIn->stmts_used > 0);
4039 tl_assert(i < bbIn->stmts_used);
4040 st = bbIn->stmts[i];
4041 tl_assert(Ist_IMark == st->tag);
4042 cia = st->Ist.IMark.addr;
4043 st = NULL;
4044
sewardjb4112022007-11-09 22:49:28 +00004045 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004046 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004047 tl_assert(st);
4048 tl_assert(isFlatIRStmt(st));
4049 switch (st->tag) {
4050 case Ist_NoOp:
4051 case Ist_AbiHint:
4052 case Ist_Put:
4053 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004054 case Ist_Exit:
4055 /* None of these can contain any memory references. */
4056 break;
4057
sewardj1c0ce7a2009-07-01 08:10:49 +00004058 case Ist_IMark:
4059 /* no mem refs, but note the insn address. */
4060 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004061 /* Don't instrument the dynamic linker. It generates a
4062 lot of races which we just expensively suppress, so
4063 it's pointless.
4064
4065 Avoid flooding is_in_dynamic_linker_shared_object with
4066 requests by only checking at transitions between 4K
4067 pages. */
4068 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4069 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4070 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4071 inLDSO = is_in_dynamic_linker_shared_object(cia);
4072 } else {
4073 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4074 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004075 break;
4076
sewardjb4112022007-11-09 22:49:28 +00004077 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004078 switch (st->Ist.MBE.event) {
4079 case Imbe_Fence:
4080 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004081 default:
4082 goto unhandled;
4083 }
sewardjb4112022007-11-09 22:49:28 +00004084 break;
4085
sewardj1c0ce7a2009-07-01 08:10:49 +00004086 case Ist_CAS: {
4087 /* Atomic read-modify-write cycle. Just pretend it's a
4088 read. */
4089 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004090 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4091 if (isDCAS) {
4092 tl_assert(cas->expdHi);
4093 tl_assert(cas->dataHi);
4094 } else {
4095 tl_assert(!cas->expdHi);
4096 tl_assert(!cas->dataHi);
4097 }
4098 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004099 if (!inLDSO) {
4100 instrument_mem_access(
4101 bbOut,
4102 cas->addr,
4103 (isDCAS ? 2 : 1)
4104 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4105 False/*!isStore*/,
4106 sizeofIRType(hWordTy)
4107 );
4108 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004109 break;
4110 }
4111
sewardjdb5907d2009-11-26 17:20:21 +00004112 case Ist_LLSC: {
4113 /* We pretend store-conditionals don't exist, viz, ignore
4114 them. Whereas load-linked's are treated the same as
4115 normal loads. */
4116 IRType dataTy;
4117 if (st->Ist.LLSC.storedata == NULL) {
4118 /* LL */
4119 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004120 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004121 instrument_mem_access(
4122 bbOut,
4123 st->Ist.LLSC.addr,
4124 sizeofIRType(dataTy),
4125 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004126 sizeofIRType(hWordTy)
4127 );
4128 }
sewardjdb5907d2009-11-26 17:20:21 +00004129 } else {
4130 /* SC */
4131 /*ignore */
4132 }
4133 break;
4134 }
4135
4136 case Ist_Store:
4137 /* It seems we pretend that store-conditionals don't
4138 exist, viz, just ignore them ... */
4139 if (!inLDSO) {
4140 instrument_mem_access(
4141 bbOut,
4142 st->Ist.Store.addr,
4143 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4144 True/*isStore*/,
4145 sizeofIRType(hWordTy)
4146 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004147 }
njnb83caf22009-05-25 01:47:56 +00004148 break;
sewardjb4112022007-11-09 22:49:28 +00004149
4150 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004151 /* ... whereas here we don't care whether a load is a
4152 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004153 IRExpr* data = st->Ist.WrTmp.data;
4154 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004155 if (!inLDSO) {
4156 instrument_mem_access(
4157 bbOut,
4158 data->Iex.Load.addr,
4159 sizeofIRType(data->Iex.Load.ty),
4160 False/*!isStore*/,
4161 sizeofIRType(hWordTy)
4162 );
4163 }
sewardjb4112022007-11-09 22:49:28 +00004164 }
4165 break;
4166 }
4167
4168 case Ist_Dirty: {
4169 Int dataSize;
4170 IRDirty* d = st->Ist.Dirty.details;
4171 if (d->mFx != Ifx_None) {
4172 /* This dirty helper accesses memory. Collect the
4173 details. */
4174 tl_assert(d->mAddr != NULL);
4175 tl_assert(d->mSize != 0);
4176 dataSize = d->mSize;
4177 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004178 if (!inLDSO) {
4179 instrument_mem_access(
4180 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4181 sizeofIRType(hWordTy)
4182 );
4183 }
sewardjb4112022007-11-09 22:49:28 +00004184 }
4185 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004186 if (!inLDSO) {
4187 instrument_mem_access(
4188 bbOut, d->mAddr, dataSize, True/*isStore*/,
4189 sizeofIRType(hWordTy)
4190 );
4191 }
sewardjb4112022007-11-09 22:49:28 +00004192 }
4193 } else {
4194 tl_assert(d->mAddr == NULL);
4195 tl_assert(d->mSize == 0);
4196 }
4197 break;
4198 }
4199
4200 default:
sewardjf98e1c02008-10-25 16:22:41 +00004201 unhandled:
4202 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004203 tl_assert(0);
4204
4205 } /* switch (st->tag) */
4206
4207 addStmtToIRSB( bbOut, st );
4208 } /* iterate over bbIn->stmts */
4209
4210 return bbOut;
4211}
4212
4213
4214/*----------------------------------------------------------------*/
4215/*--- Client requests ---*/
4216/*----------------------------------------------------------------*/
4217
4218/* Sheesh. Yet another goddam finite map. */
4219static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4220
4221static void map_pthread_t_to_Thread_INIT ( void ) {
4222 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004223 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4224 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004225 tl_assert(map_pthread_t_to_Thread != NULL);
4226 }
4227}
4228
4229
4230static
4231Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4232{
4233 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4234 return False;
4235
4236 /* Anything that gets past the above check is one of ours, so we
4237 should be able to handle it. */
4238
4239 /* default, meaningless return value, unless otherwise set */
4240 *ret = 0;
4241
4242 switch (args[0]) {
4243
4244 /* --- --- User-visible client requests --- --- */
4245
4246 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004247 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004248 args[1], args[2]);
4249 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004250 are any held locks etc in the area. Calling evh__die_mem
4251 and then evh__new_mem is a bit inefficient; probably just
4252 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004253 if (args[2] > 0) { /* length */
4254 evh__die_mem(args[1], args[2]);
4255 /* and then set it to New */
4256 evh__new_mem(args[1], args[2]);
4257 }
4258 break;
4259
sewardjc8028ad2010-05-05 09:34:42 +00004260 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4261 Addr payload = 0;
4262 SizeT pszB = 0;
4263 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4264 args[1]);
4265 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4266 if (pszB > 0) {
4267 evh__die_mem(payload, pszB);
4268 evh__new_mem(payload, pszB);
4269 }
4270 *ret = pszB;
4271 } else {
4272 *ret = (UWord)-1;
4273 }
4274 break;
4275 }
4276
sewardj406bac82010-03-03 23:03:40 +00004277 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4278 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4279 args[1], args[2]);
4280 if (args[2] > 0) { /* length */
4281 evh__untrack_mem(args[1], args[2]);
4282 }
4283 break;
4284
4285 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4286 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4287 args[1], args[2]);
4288 if (args[2] > 0) { /* length */
4289 evh__new_mem(args[1], args[2]);
4290 }
4291 break;
4292
sewardjb4112022007-11-09 22:49:28 +00004293 /* --- --- Client requests for Helgrind's use only --- --- */
4294
4295 /* Some thread is telling us its pthread_t value. Record the
4296 binding between that and the associated Thread*, so we can
4297 later find the Thread* again when notified of a join by the
4298 thread. */
4299 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4300 Thread* my_thr = NULL;
4301 if (0)
4302 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4303 (void*)args[1]);
4304 map_pthread_t_to_Thread_INIT();
4305 my_thr = map_threads_maybe_lookup( tid );
4306 /* This assertion should hold because the map_threads (tid to
4307 Thread*) binding should have been made at the point of
4308 low-level creation of this thread, which should have
4309 happened prior to us getting this client request for it.
4310 That's because this client request is sent from
4311 client-world from the 'thread_wrapper' function, which
4312 only runs once the thread has been low-level created. */
4313 tl_assert(my_thr != NULL);
4314 /* So now we know that (pthread_t)args[1] is associated with
4315 (Thread*)my_thr. Note that down. */
4316 if (0)
4317 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4318 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004319 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004320 break;
4321 }
4322
4323 case _VG_USERREQ__HG_PTH_API_ERROR: {
4324 Thread* my_thr = NULL;
4325 map_pthread_t_to_Thread_INIT();
4326 my_thr = map_threads_maybe_lookup( tid );
4327 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004328 HG_(record_error_PthAPIerror)(
4329 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004330 break;
4331 }
4332
4333 /* This thread (tid) has completed a join with the quitting
4334 thread whose pthread_t is in args[1]. */
4335 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4336 Thread* thr_q = NULL; /* quitter Thread* */
4337 Bool found = False;
4338 if (0)
4339 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4340 (void*)args[1]);
4341 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004342 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004343 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004344 /* Can this fail? It would mean that our pthread_join
4345 wrapper observed a successful join on args[1] yet that
4346 thread never existed (or at least, it never lodged an
4347 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4348 sounds like a bug in the threads library. */
4349 // FIXME: get rid of this assertion; handle properly
4350 tl_assert(found);
4351 if (found) {
4352 if (0)
4353 VG_(printf)(".................... quitter Thread* = %p\n",
4354 thr_q);
4355 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4356 }
4357 break;
4358 }
4359
4360 /* EXPOSITION only: by intercepting lock init events we can show
4361 the user where the lock was initialised, rather than only
4362 being able to show where it was first locked. Intercepting
4363 lock initialisations is not necessary for the basic operation
4364 of the race checker. */
4365 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4366 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4367 break;
4368
4369 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4370 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4371 break;
4372
4373 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4374 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4375 break;
4376
4377 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4378 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4379 break;
4380
4381 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4382 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4383 break;
4384
4385 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4386 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4387 break;
4388
4389 /* This thread is about to do pthread_cond_signal on the
4390 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4391 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4392 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4393 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4394 break;
4395
4396 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4397 Returns a flag indicating whether or not the mutex is believed to be
4398 valid for this operation. */
4399 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4400 Bool mutex_is_valid
4401 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4402 (void*)args[2] );
4403 *ret = mutex_is_valid ? 1 : 0;
4404 break;
4405 }
4406
sewardjf98e1c02008-10-25 16:22:41 +00004407 /* cond=arg[1] */
4408 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4409 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4410 break;
4411
sewardjb4112022007-11-09 22:49:28 +00004412 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4413 mutex=arg[2] */
4414 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4415 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4416 (void*)args[1], (void*)args[2] );
4417 break;
4418
4419 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4420 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4421 break;
4422
4423 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4424 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4425 break;
4426
sewardj789c3c52008-02-25 12:10:07 +00004427 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004428 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004429 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4430 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004431 break;
4432
4433 /* rwlock=arg[1], isW=arg[2] */
4434 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4435 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4436 break;
4437
4438 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4439 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4440 break;
4441
4442 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4443 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4444 break;
4445
sewardj11e352f2007-11-30 11:11:02 +00004446 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4447 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004448 break;
4449
sewardj11e352f2007-11-30 11:11:02 +00004450 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4451 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004452 break;
4453
sewardj11e352f2007-11-30 11:11:02 +00004454 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4455 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4456 break;
4457
4458 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4459 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004460 break;
4461
sewardj9f569b72008-11-13 13:33:09 +00004462 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004463 /* pth_bar_t*, ulong count, ulong resizable */
4464 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4465 args[2], args[3] );
4466 break;
4467
4468 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4469 /* pth_bar_t*, ulong newcount */
4470 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4471 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004472 break;
4473
4474 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4475 /* pth_bar_t* */
4476 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4477 break;
4478
4479 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4480 /* pth_bar_t* */
4481 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4482 break;
sewardjb4112022007-11-09 22:49:28 +00004483
sewardj5a644da2009-08-11 10:35:58 +00004484 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4485 /* pth_spinlock_t* */
4486 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4487 break;
4488
4489 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4490 /* pth_spinlock_t* */
4491 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4492 break;
4493
4494 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4495 /* pth_spinlock_t*, Word */
4496 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4497 break;
4498
4499 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4500 /* pth_spinlock_t* */
4501 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4502 break;
4503
4504 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4505 /* pth_spinlock_t* */
4506 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4507 break;
4508
sewardjed2e72e2009-08-14 11:08:24 +00004509 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4510 /* char* who */
4511 HChar* who = (HChar*)args[1];
4512 HChar buf[50 + 50];
4513 Thread* thr = map_threads_maybe_lookup( tid );
4514 tl_assert( thr ); /* I must be mapped */
4515 tl_assert( who );
4516 tl_assert( VG_(strlen)(who) <= 50 );
4517 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4518 /* record_error_Misc strdup's buf, so this is safe: */
4519 HG_(record_error_Misc)( thr, buf );
4520 break;
4521 }
4522
4523 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4524 /* UWord arbitrary-SO-tag */
4525 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4526 break;
4527
4528 case _VG_USERREQ__HG_USERSO_RECV_POST:
4529 /* UWord arbitrary-SO-tag */
4530 evh__HG_USERSO_RECV_POST( tid, args[1] );
4531 break;
4532
sewardj6015d0e2011-03-11 19:10:48 +00004533 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
4534 /* UWord arbitrary-SO-tag */
4535 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
4536 break;
4537
sewardjb4112022007-11-09 22:49:28 +00004538 default:
4539 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004540 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4541 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004542 }
4543
4544 return True;
4545}
4546
4547
4548/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004549/*--- Setup ---*/
4550/*----------------------------------------------------------------*/
4551
4552static Bool hg_process_cmd_line_option ( Char* arg )
4553{
njn83df0b62009-02-25 01:01:05 +00004554 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004555
njn83df0b62009-02-25 01:01:05 +00004556 if VG_BOOL_CLO(arg, "--track-lockorders",
4557 HG_(clo_track_lockorders)) {}
4558 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4559 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004560
4561 else if VG_XACT_CLO(arg, "--history-level=none",
4562 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004563 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004564 HG_(clo_history_level), 1);
4565 else if VG_XACT_CLO(arg, "--history-level=full",
4566 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004567
sewardjf585e482009-08-16 22:52:29 +00004568 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004569 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004570 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004571 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004572
sewardj11e352f2007-11-30 11:11:02 +00004573 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004574 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004575 Int j;
sewardjb4112022007-11-09 22:49:28 +00004576
njn83df0b62009-02-25 01:01:05 +00004577 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004578 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004579 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004580 return False;
4581 }
sewardj11e352f2007-11-30 11:11:02 +00004582 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004583 if ('0' == tmp_str[j]) { /* do nothing */ }
4584 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004585 else {
sewardj11e352f2007-11-30 11:11:02 +00004586 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004587 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004588 return False;
4589 }
4590 }
sewardjf98e1c02008-10-25 16:22:41 +00004591 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004592 }
4593
sewardj622fe492011-03-11 21:06:59 +00004594 else if VG_BOOL_CLO(arg, "--free-is-write",
4595 HG_(clo_free_is_write)) {}
sewardjb4112022007-11-09 22:49:28 +00004596 else
4597 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4598
4599 return True;
4600}
4601
4602static void hg_print_usage ( void )
4603{
4604 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00004605" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00004606" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004607" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004608" full: show both stack traces for a data race (can be very slow)\n"
4609" approx: full trace for one thread, approx for the other (faster)\n"
4610" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004611" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004612 );
sewardjb4112022007-11-09 22:49:28 +00004613}
4614
4615static void hg_print_debug_usage ( void )
4616{
sewardjb4112022007-11-09 22:49:28 +00004617 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4618 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004619 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004620 " at events (X = 0|1) [000000]\n");
4621 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004622 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004623 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004624 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4625 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004626 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004627 VG_(printf)(" 000010 at lock/unlock events\n");
4628 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004629}
4630
sewardjb4112022007-11-09 22:49:28 +00004631static void hg_fini ( Int exitcode )
4632{
sewardj2d9e8742009-08-07 15:46:56 +00004633 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4634 VG_(message)(Vg_UserMsg,
4635 "For counts of detected and suppressed errors, "
4636 "rerun with: -v\n");
4637 }
4638
4639 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4640 && HG_(clo_history_level) >= 2) {
4641 VG_(umsg)(
4642 "Use --history-level=approx or =none to gain increased speed, at\n" );
4643 VG_(umsg)(
4644 "the cost of reduced accuracy of conflicting-access information\n");
4645 }
4646
sewardjb4112022007-11-09 22:49:28 +00004647 if (SHOW_DATA_STRUCTURES)
4648 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004649 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004650 all__sanity_check("SK_(fini)");
4651
sewardj2d9e8742009-08-07 15:46:56 +00004652 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004653
4654 if (1) {
4655 VG_(printf)("\n");
sewardjb4112022007-11-09 22:49:28 +00004656 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004657 if (HG_(clo_track_lockorders)) {
4658 VG_(printf)("\n");
4659 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4660 }
sewardjb4112022007-11-09 22:49:28 +00004661 }
4662
sewardjf98e1c02008-10-25 16:22:41 +00004663 //zz VG_(printf)("\n");
4664 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4665 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4666 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4667 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4668 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4669 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4670 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4671 //zz stats__hbefore_stk_hwm);
4672 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4673 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004674
4675 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004676 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004677 (Int)HG_(cardinalityWSU)( univ_lsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004678 if (HG_(clo_track_lockorders)) {
4679 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4680 (Int)HG_(cardinalityWSU)( univ_laog ));
4681 }
sewardjb4112022007-11-09 22:49:28 +00004682
sewardjd52392d2008-11-08 20:36:26 +00004683 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4684 // stats__ga_LL_adds,
4685 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004686
sewardjf98e1c02008-10-25 16:22:41 +00004687 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4688 HG_(stats__LockN_to_P_queries),
4689 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004690
sewardjf98e1c02008-10-25 16:22:41 +00004691 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4692 HG_(stats__string_table_queries),
4693 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004694 if (HG_(clo_track_lockorders)) {
4695 VG_(printf)(" LAOG: %'8d map size\n",
4696 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4697 VG_(printf)(" LAOG exposition: %'8d map size\n",
4698 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4699 }
4700
barta0b6b2c2008-07-07 06:49:24 +00004701 VG_(printf)(" locks: %'8lu acquires, "
4702 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004703 stats__lockN_acquires,
4704 stats__lockN_releases
4705 );
barta0b6b2c2008-07-07 06:49:24 +00004706 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004707
4708 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004709 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004710 }
4711}
4712
sewardjf98e1c02008-10-25 16:22:41 +00004713/* FIXME: move these somewhere sane */
4714
4715static
4716void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4717{
4718 Thread* thr;
4719 ThreadId tid;
4720 UWord nActual;
4721 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004722 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004723 tl_assert(thr);
4724 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4725 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4726 NULL, NULL, 0 );
4727 tl_assert(nActual <= nRequest);
4728 for (; nActual < nRequest; nActual++)
4729 frames[nActual] = 0;
4730}
4731
4732static
sewardj23f12002009-07-24 08:45:08 +00004733ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004734{
4735 Thread* thr;
4736 ThreadId tid;
4737 ExeContext* ec;
4738 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004739 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004740 tl_assert(thr);
4741 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004742 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004743 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004744 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004745}
4746
4747
sewardjc1fb9d22011-02-28 09:03:44 +00004748static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004749{
sewardjf98e1c02008-10-25 16:22:41 +00004750 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004751
sewardjc1fb9d22011-02-28 09:03:44 +00004752 /////////////////////////////////////////////
4753 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4754 for_libhb__get_EC );
4755 /////////////////////////////////////////////
4756
4757
4758 if (HG_(clo_track_lockorders))
4759 laog__init();
4760
4761 initialise_data_structures(hbthr_root);
4762}
4763
4764static void hg_pre_clo_init ( void )
4765{
sewardjb4112022007-11-09 22:49:28 +00004766 VG_(details_name) ("Helgrind");
4767 VG_(details_version) (NULL);
4768 VG_(details_description) ("a thread error detector");
4769 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004770 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004771 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00004772 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00004773
4774 VG_(basic_tool_funcs) (hg_post_clo_init,
4775 hg_instrument,
4776 hg_fini);
4777
4778 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004779 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004780 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004781 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004782 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004783 HG_(update_extra),
4784 HG_(recognised_suppression),
4785 HG_(read_extra_suppression_info),
4786 HG_(error_matches_suppression),
4787 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004788 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004789
sewardj24118492009-07-15 14:50:02 +00004790 VG_(needs_xml_output) ();
4791
sewardjb4112022007-11-09 22:49:28 +00004792 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4793 hg_print_usage,
4794 hg_print_debug_usage);
4795 VG_(needs_client_requests) (hg_handle_client_request);
4796
4797 // FIXME?
4798 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4799 // hg_expensive_sanity_check);
4800
4801 VG_(needs_malloc_replacement) (hg_cli__malloc,
4802 hg_cli____builtin_new,
4803 hg_cli____builtin_vec_new,
4804 hg_cli__memalign,
4805 hg_cli__calloc,
4806 hg_cli__free,
4807 hg_cli____builtin_delete,
4808 hg_cli____builtin_vec_delete,
4809 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004810 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004811 HG_CLI__MALLOC_REDZONE_SZB );
4812
sewardj849b0ed2008-12-21 10:43:10 +00004813 /* 21 Dec 08: disabled this; it mostly causes H to start more
4814 slowly and use significantly more memory, without very often
4815 providing useful results. The user can request to load this
4816 information manually with --read-var-info=yes. */
4817 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004818
4819 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004820 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4821 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004822 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004823 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004824
4825 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004826 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004827
4828 VG_(track_change_mem_mprotect) ( evh__set_perms );
4829
4830 VG_(track_die_mem_stack_signal)( evh__die_mem );
4831 VG_(track_die_mem_brk) ( evh__die_mem );
4832 VG_(track_die_mem_munmap) ( evh__die_mem );
4833 VG_(track_die_mem_stack) ( evh__die_mem );
4834
4835 // FIXME: what is this for?
4836 VG_(track_ban_mem_stack) (NULL);
4837
4838 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4839 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4840 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4841 VG_(track_post_mem_write) (NULL);
4842
4843 /////////////////
4844
4845 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4846 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4847
4848 VG_(track_start_client_code)( evh__start_client_code );
4849 VG_(track_stop_client_code)( evh__stop_client_code );
4850
sewardjb4112022007-11-09 22:49:28 +00004851 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4852 as described in comments at the top of pub_tool_hashtable.h, are
4853 met. Blargh. */
4854 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4855 tl_assert( sizeof(UWord) == sizeof(Addr) );
4856 hg_mallocmeta_table
4857 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4858
sewardj61bc2c52011-02-09 10:34:00 +00004859 // add a callback to clean up on (threaded) fork.
4860 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004861}
4862
4863VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4864
4865/*--------------------------------------------------------------------*/
4866/*--- end hg_main.c ---*/
4867/*--------------------------------------------------------------------*/