blob: 276f730e5a5ebd64528f72dd02cd282747e213cf [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj0f157dd2013-10-18 14:27:36 +000011 Copyright (C) 2007-2013 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj0f157dd2013-10-18 14:27:36 +000014 Copyright (C) 2007-2013 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
philippef5774342014-05-03 11:12:50 +000040#include "pub_tool_gdbserver.h"
sewardjb4112022007-11-09 22:49:28 +000041#include "pub_tool_libcassert.h"
42#include "pub_tool_libcbase.h"
43#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000044#include "pub_tool_threadstate.h"
45#include "pub_tool_tooliface.h"
46#include "pub_tool_hashtable.h"
47#include "pub_tool_replacemalloc.h"
48#include "pub_tool_machine.h"
49#include "pub_tool_options.h"
50#include "pub_tool_xarray.h"
51#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000052#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000053#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
54#include "pub_tool_redir.h" // sonames for the dynamic linkers
55#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000056#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000057#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
philippe5fbc9762013-12-01 19:28:48 +000058#include "pub_tool_poolalloc.h"
sewardjb4112022007-11-09 22:49:28 +000059
sewardjf98e1c02008-10-25 16:22:41 +000060#include "hg_basics.h"
61#include "hg_wordset.h"
philippef5774342014-05-03 11:12:50 +000062#include "hg_addrdescr.h"
sewardjf98e1c02008-10-25 16:22:41 +000063#include "hg_lock_n_thread.h"
64#include "hg_errors.h"
65
66#include "libhb.h"
67
sewardjb4112022007-11-09 22:49:28 +000068#include "helgrind.h"
69
sewardjf98e1c02008-10-25 16:22:41 +000070
71// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
72
73// FIXME: when client destroys a lock or a CV, remove these
74// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000075
76/*----------------------------------------------------------------*/
77/*--- ---*/
78/*----------------------------------------------------------------*/
79
sewardj11e352f2007-11-30 11:11:02 +000080/* Note this needs to be compiled with -fno-strict-aliasing, since it
81 contains a whole bunch of calls to lookupFM etc which cast between
82 Word and pointer types. gcc rightly complains this breaks ANSI C
83 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
84 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000085*/
sewardjb4112022007-11-09 22:49:28 +000086
87// FIXME what is supposed to happen to locks in memory which
88// is relocated as a result of client realloc?
89
sewardjb4112022007-11-09 22:49:28 +000090// FIXME put referencing ThreadId into Thread and get
91// rid of the slow reverse mapping function.
92
93// FIXME accesses to NoAccess areas: change state to Excl?
94
95// FIXME report errors for accesses of NoAccess memory?
96
97// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
98// the thread still holds the lock.
99
100/* ------------ Debug/trace options ------------ */
101
sewardjb4112022007-11-09 22:49:28 +0000102// 0 for silent, 1 for some stuff, 2 for lots of stuff
103#define SHOW_EVENTS 0
104
sewardjb4112022007-11-09 22:49:28 +0000105
florian6bf37262012-10-21 03:23:36 +0000106static void all__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000107
philipped99c26a2012-07-31 22:17:28 +0000108#define HG_CLI__DEFAULT_MALLOC_REDZONE_SZB 16 /* let's say */
sewardjb4112022007-11-09 22:49:28 +0000109
110// 0 for none, 1 for dump at end of run
111#define SHOW_DATA_STRUCTURES 0
112
113
sewardjb4112022007-11-09 22:49:28 +0000114/* ------------ Misc comments ------------ */
115
116// FIXME: don't hardwire initial entries for root thread.
117// Instead, let the pre_thread_ll_create handler do this.
118
sewardjb4112022007-11-09 22:49:28 +0000119
120/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000121/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000122/*----------------------------------------------------------------*/
123
sewardjb4112022007-11-09 22:49:28 +0000124/* Admin linked list of Threads */
125static Thread* admin_threads = NULL;
sewardjffce8152011-06-24 10:09:41 +0000126Thread* get_admin_threads ( void ) { return admin_threads; }
sewardjb4112022007-11-09 22:49:28 +0000127
sewardj1d7c3322011-02-28 09:22:51 +0000128/* Admin double linked list of Locks */
129/* We need a double linked list to properly and efficiently
130 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000131static Lock* admin_locks = NULL;
132
sewardjb4112022007-11-09 22:49:28 +0000133/* Mapping table for core ThreadIds to Thread* */
134static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
135
sewardjb4112022007-11-09 22:49:28 +0000136/* Mapping table for lock guest addresses to Lock* */
137static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
138
sewardj0f64c9e2011-03-10 17:40:22 +0000139/* The word-set universes for lock sets. */
sewardjb4112022007-11-09 22:49:28 +0000140static WordSetU* univ_lsets = NULL; /* sets of Lock* */
141static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
sewardj866c80c2011-10-22 19:29:51 +0000142static Int next_gc_univ_laog = 1;
143/* univ_laog will be garbaged collected when the nr of element in univ_laog is
144 >= next_gc_univ_laog. */
sewardjb4112022007-11-09 22:49:28 +0000145
sewardjffce8152011-06-24 10:09:41 +0000146/* Allow libhb to get at the universe of locksets stored
147 here. Sigh. */
148WordSetU* HG_(get_univ_lsets) ( void ) { return univ_lsets; }
149
150/* Allow libhb to get at the list of locks stored here. Ditto
151 sigh. */
152Lock* HG_(get_admin_locks) ( void ) { return admin_locks; }
153
sewardjb4112022007-11-09 22:49:28 +0000154
155/*----------------------------------------------------------------*/
156/*--- Simple helpers for the data structures ---*/
157/*----------------------------------------------------------------*/
158
159static UWord stats__lockN_acquires = 0;
160static UWord stats__lockN_releases = 0;
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static
163ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000164
165/* --------- Constructors --------- */
166
sewardjf98e1c02008-10-25 16:22:41 +0000167static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000168 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000169 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000170 thread->locksetA = HG_(emptyWS)( univ_lsets );
171 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000172 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000173 thread->hbthr = hbthr;
174 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000175 thread->created_at = NULL;
176 thread->announced = False;
177 thread->errmsg_index = indx++;
178 thread->admin = admin_threads;
179 admin_threads = thread;
180 return thread;
181}
sewardjf98e1c02008-10-25 16:22:41 +0000182
sewardjb4112022007-11-09 22:49:28 +0000183// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000184// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000185static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
186 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000187 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj0f64c9e2011-03-10 17:40:22 +0000188 /* begin: add to double linked list */
sewardj1d7c3322011-02-28 09:22:51 +0000189 if (admin_locks)
190 admin_locks->admin_prev = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000191 lock->admin_next = admin_locks;
192 lock->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000193 admin_locks = lock;
sewardj0f64c9e2011-03-10 17:40:22 +0000194 /* end: add */
sewardjb4112022007-11-09 22:49:28 +0000195 lock->unique = unique++;
196 lock->magic = LockN_MAGIC;
197 lock->appeared_at = NULL;
198 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000199 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000200 lock->guestaddr = guestaddr;
201 lock->kind = kind;
202 lock->heldW = False;
203 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000204 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000205 return lock;
206}
sewardjb4112022007-11-09 22:49:28 +0000207
208/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000209 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000210static void del_LockN ( Lock* lk )
211{
sewardjf98e1c02008-10-25 16:22:41 +0000212 tl_assert(HG_(is_sane_LockN)(lk));
213 tl_assert(lk->hbso);
214 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000215 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000216 VG_(deleteBag)( lk->heldBy );
sewardj0f64c9e2011-03-10 17:40:22 +0000217 /* begin: del lock from double linked list */
218 if (lk == admin_locks) {
219 tl_assert(lk->admin_prev == NULL);
220 if (lk->admin_next)
221 lk->admin_next->admin_prev = NULL;
sewardj1d7c3322011-02-28 09:22:51 +0000222 admin_locks = lk->admin_next;
sewardj1d7c3322011-02-28 09:22:51 +0000223 }
224 else {
sewardj0f64c9e2011-03-10 17:40:22 +0000225 tl_assert(lk->admin_prev != NULL);
sewardj1d7c3322011-02-28 09:22:51 +0000226 lk->admin_prev->admin_next = lk->admin_next;
sewardj0f64c9e2011-03-10 17:40:22 +0000227 if (lk->admin_next)
228 lk->admin_next->admin_prev = lk->admin_prev;
sewardj1d7c3322011-02-28 09:22:51 +0000229 }
sewardj0f64c9e2011-03-10 17:40:22 +0000230 /* end: del */
sewardjb4112022007-11-09 22:49:28 +0000231 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000232 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000233}
234
235/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
236 it. This is done strictly: only combinations resulting from
237 correct program and libpthread behaviour are allowed. */
238static void lockN_acquire_writer ( Lock* lk, Thread* thr )
239{
sewardjf98e1c02008-10-25 16:22:41 +0000240 tl_assert(HG_(is_sane_LockN)(lk));
241 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000242
243 stats__lockN_acquires++;
244
245 /* EXPOSITION only */
246 /* We need to keep recording snapshots of where the lock was
247 acquired, so as to produce better lock-order error messages. */
248 if (lk->acquired_at == NULL) {
249 ThreadId tid;
250 tl_assert(lk->heldBy == NULL);
251 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
252 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000253 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000254 } else {
255 tl_assert(lk->heldBy != NULL);
256 }
257 /* end EXPOSITION only */
258
259 switch (lk->kind) {
260 case LK_nonRec:
261 case_LK_nonRec:
262 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
263 tl_assert(!lk->heldW);
264 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000265 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000266 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000267 break;
268 case LK_mbRec:
269 if (lk->heldBy == NULL)
270 goto case_LK_nonRec;
271 /* 2nd and subsequent locking of a lock by its owner */
272 tl_assert(lk->heldW);
273 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000274 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000275 /* assert: .. and that thread is 'thr'. */
florian6bf37262012-10-21 03:23:36 +0000276 tl_assert(VG_(elemBag)(lk->heldBy, (UWord)thr)
sewardj896f6f92008-08-19 08:38:52 +0000277 == VG_(sizeTotalBag)(lk->heldBy));
florian6bf37262012-10-21 03:23:36 +0000278 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000279 break;
280 case LK_rdwr:
281 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
282 goto case_LK_nonRec;
283 default:
284 tl_assert(0);
285 }
sewardjf98e1c02008-10-25 16:22:41 +0000286 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000287}
288
289static void lockN_acquire_reader ( Lock* lk, Thread* thr )
290{
sewardjf98e1c02008-10-25 16:22:41 +0000291 tl_assert(HG_(is_sane_LockN)(lk));
292 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000293 /* can only add reader to a reader-writer lock. */
294 tl_assert(lk->kind == LK_rdwr);
295 /* lk must be free or already r-held. */
296 tl_assert(lk->heldBy == NULL
297 || (lk->heldBy != NULL && !lk->heldW));
298
299 stats__lockN_acquires++;
300
301 /* EXPOSITION only */
302 /* We need to keep recording snapshots of where the lock was
303 acquired, so as to produce better lock-order error messages. */
304 if (lk->acquired_at == NULL) {
305 ThreadId tid;
306 tl_assert(lk->heldBy == NULL);
307 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
308 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000309 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000310 } else {
311 tl_assert(lk->heldBy != NULL);
312 }
313 /* end EXPOSITION only */
314
315 if (lk->heldBy) {
florian6bf37262012-10-21 03:23:36 +0000316 VG_(addToBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000317 } else {
318 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000319 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
florian6bf37262012-10-21 03:23:36 +0000320 VG_(addToBag)( lk->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +0000321 }
322 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000323 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000324}
325
326/* Update 'lk' to reflect a release of it by 'thr'. This is done
327 strictly: only combinations resulting from correct program and
328 libpthread behaviour are allowed. */
329
330static void lockN_release ( Lock* lk, Thread* thr )
331{
332 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000333 tl_assert(HG_(is_sane_LockN)(lk));
334 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000335 /* lock must be held by someone */
336 tl_assert(lk->heldBy);
337 stats__lockN_releases++;
338 /* Remove it from the holder set */
florian6bf37262012-10-21 03:23:36 +0000339 b = VG_(delFromBag)(lk->heldBy, (UWord)thr);
sewardjb4112022007-11-09 22:49:28 +0000340 /* thr must actually have been a holder of lk */
341 tl_assert(b);
342 /* normalise */
343 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000344 if (VG_(isEmptyBag)(lk->heldBy)) {
345 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000346 lk->heldBy = NULL;
347 lk->heldW = False;
348 lk->acquired_at = NULL;
349 }
sewardjf98e1c02008-10-25 16:22:41 +0000350 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000351}
352
353static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
354{
355 Thread* thr;
356 if (!lk->heldBy) {
357 tl_assert(!lk->heldW);
358 return;
359 }
360 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000361 VG_(initIterBag)( lk->heldBy );
florian6bf37262012-10-21 03:23:36 +0000362 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000363 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000364 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000365 thr->locksetA, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000366 thr->locksetA
florian6bf37262012-10-21 03:23:36 +0000367 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000368
369 if (lk->heldW) {
370 tl_assert(HG_(elemWS)( univ_lsets,
florian6bf37262012-10-21 03:23:36 +0000371 thr->locksetW, (UWord)lk ));
sewardjb4112022007-11-09 22:49:28 +0000372 thr->locksetW
florian6bf37262012-10-21 03:23:36 +0000373 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +0000374 }
375 }
sewardj896f6f92008-08-19 08:38:52 +0000376 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000377}
378
sewardjb4112022007-11-09 22:49:28 +0000379
380/*----------------------------------------------------------------*/
381/*--- Print out the primary data structures ---*/
382/*----------------------------------------------------------------*/
383
sewardjb4112022007-11-09 22:49:28 +0000384#define PP_THREADS (1<<1)
385#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000386#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000387
388
389static const Int sHOW_ADMIN = 0;
390
391static void space ( Int n )
392{
393 Int i;
florian6bf37262012-10-21 03:23:36 +0000394 HChar spaces[128+1];
sewardjb4112022007-11-09 22:49:28 +0000395 tl_assert(n >= 0 && n < 128);
396 if (n == 0)
397 return;
398 for (i = 0; i < n; i++)
399 spaces[i] = ' ';
400 spaces[i] = 0;
401 tl_assert(i < 128+1);
402 VG_(printf)("%s", spaces);
403}
404
405static void pp_Thread ( Int d, Thread* t )
406{
407 space(d+0); VG_(printf)("Thread %p {\n", t);
408 if (sHOW_ADMIN) {
409 space(d+3); VG_(printf)("admin %p\n", t->admin);
410 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
411 }
412 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
413 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000414 space(d+0); VG_(printf)("}\n");
415}
416
417static void pp_admin_threads ( Int d )
418{
419 Int i, n;
420 Thread* t;
421 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
422 /* nothing */
423 }
424 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
425 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
426 if (0) {
427 space(n);
428 VG_(printf)("admin_threads record %d of %d:\n", i, n);
429 }
430 pp_Thread(d+3, t);
431 }
barta0b6b2c2008-07-07 06:49:24 +0000432 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000433}
434
435static void pp_map_threads ( Int d )
436{
njn4c245e52009-03-15 23:25:38 +0000437 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000438 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000439 for (i = 0; i < VG_N_THREADS; i++) {
440 if (map_threads[i] != NULL)
441 n++;
442 }
443 VG_(printf)("(%d entries) {\n", n);
444 for (i = 0; i < VG_N_THREADS; i++) {
445 if (map_threads[i] == NULL)
446 continue;
447 space(d+3);
448 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
449 }
450 space(d); VG_(printf)("}\n");
451}
452
453static const HChar* show_LockKind ( LockKind lkk ) {
454 switch (lkk) {
455 case LK_mbRec: return "mbRec";
456 case LK_nonRec: return "nonRec";
457 case LK_rdwr: return "rdwr";
458 default: tl_assert(0);
459 }
460}
461
philippef5774342014-05-03 11:12:50 +0000462/* Pretty Print lock lk.
463 if show_lock_addrdescr, describes the (guest) lock address.
464 (this description will be more complete with --read-var-info=yes).
465 if show_internal_data, shows also helgrind internal information.
466 d is the level at which output is indented. */
467static void pp_Lock ( Int d, Lock* lk,
468 Bool show_lock_addrdescr,
469 Bool show_internal_data)
sewardjb4112022007-11-09 22:49:28 +0000470{
philippef5774342014-05-03 11:12:50 +0000471 space(d+0);
472 if (show_internal_data)
473 VG_(printf)("Lock %p (ga %#lx) {", lk, lk->guestaddr);
474 else
475 VG_(printf)("Lock ga %#lx {", lk->guestaddr);
476 if (!show_lock_addrdescr
477 || !HG_(get_and_pp_addrdescr) ("lock", (Addr) lk->guestaddr))
478 VG_(printf)("\n");
479
sewardjb4112022007-11-09 22:49:28 +0000480 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000481 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
482 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
483 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000484 }
philippef5774342014-05-03 11:12:50 +0000485 if (show_internal_data) {
486 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
487 }
sewardjb4112022007-11-09 22:49:28 +0000488 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
philippef5774342014-05-03 11:12:50 +0000489 if (show_internal_data) {
490 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
491 }
492 if (show_internal_data) {
493 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
494 }
sewardjb4112022007-11-09 22:49:28 +0000495 if (lk->heldBy) {
496 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000497 UWord count;
sewardjb4112022007-11-09 22:49:28 +0000498 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000499 VG_(initIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000500 while (VG_(nextIterBag)( lk->heldBy, (UWord*)&thr, &count )) {
501 if (show_internal_data)
502 VG_(printf)("%lu:%p ", count, thr);
503 else {
504 VG_(printf)("%c%lu:thread #%d ",
505 lk->heldW ? 'W' : 'R',
506 count, thr->errmsg_index);
507 if (thr->coretid == VG_INVALID_THREADID)
508 VG_(printf)("tid (exited) ");
509 else
510 VG_(printf)("tid %d ", thr->coretid);
511
512 }
513 }
sewardj896f6f92008-08-19 08:38:52 +0000514 VG_(doneIterBag)( lk->heldBy );
philippef5774342014-05-03 11:12:50 +0000515 VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000516 }
sewardjb4112022007-11-09 22:49:28 +0000517 space(d+0); VG_(printf)("}\n");
518}
519
520static void pp_admin_locks ( Int d )
521{
522 Int i, n;
523 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000524 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000525 /* nothing */
526 }
527 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000528 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000529 if (0) {
530 space(n);
531 VG_(printf)("admin_locks record %d of %d:\n", i, n);
532 }
philippef5774342014-05-03 11:12:50 +0000533 pp_Lock(d+3, lk,
534 False /* show_lock_addrdescr */,
535 True /* show_internal_data */);
sewardjb4112022007-11-09 22:49:28 +0000536 }
barta0b6b2c2008-07-07 06:49:24 +0000537 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000538}
539
philippef5774342014-05-03 11:12:50 +0000540static void pp_map_locks ( Int d)
sewardjb4112022007-11-09 22:49:28 +0000541{
542 void* gla;
543 Lock* lk;
544 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000545 (Int)VG_(sizeFM)( map_locks ));
546 VG_(initIterFM)( map_locks );
florian6bf37262012-10-21 03:23:36 +0000547 while (VG_(nextIterFM)( map_locks, (UWord*)&gla,
548 (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000549 space(d+3);
550 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
551 }
sewardj896f6f92008-08-19 08:38:52 +0000552 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000553 space(d); VG_(printf)("}\n");
554}
555
florian6bf37262012-10-21 03:23:36 +0000556static void pp_everything ( Int flags, const HChar* caller )
sewardjb4112022007-11-09 22:49:28 +0000557{
558 Int d = 0;
559 VG_(printf)("\n");
560 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
561 if (flags & PP_THREADS) {
562 VG_(printf)("\n");
563 pp_admin_threads(d+3);
564 VG_(printf)("\n");
565 pp_map_threads(d+3);
566 }
567 if (flags & PP_LOCKS) {
568 VG_(printf)("\n");
569 pp_admin_locks(d+3);
570 VG_(printf)("\n");
571 pp_map_locks(d+3);
572 }
sewardjb4112022007-11-09 22:49:28 +0000573
574 VG_(printf)("\n");
575 VG_(printf)("}\n");
576 VG_(printf)("\n");
577}
578
579#undef SHOW_ADMIN
580
581
582/*----------------------------------------------------------------*/
583/*--- Initialise the primary data structures ---*/
584/*----------------------------------------------------------------*/
585
sewardjf98e1c02008-10-25 16:22:41 +0000586static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000587{
sewardjb4112022007-11-09 22:49:28 +0000588 Thread* thr;
sewardjffce8152011-06-24 10:09:41 +0000589 WordSetID wsid;
sewardjb4112022007-11-09 22:49:28 +0000590
591 /* Get everything initialised and zeroed. */
592 tl_assert(admin_threads == NULL);
593 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000594
sewardjb4112022007-11-09 22:49:28 +0000595 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000596 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000597 tl_assert(map_threads != NULL);
598
florian6bf37262012-10-21 03:23:36 +0000599 tl_assert(sizeof(Addr) == sizeof(UWord));
sewardjb4112022007-11-09 22:49:28 +0000600 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000601 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
602 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000603 tl_assert(map_locks != NULL);
604
sewardjb4112022007-11-09 22:49:28 +0000605 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000606 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
607 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000608 tl_assert(univ_lsets != NULL);
sewardjffce8152011-06-24 10:09:41 +0000609 /* Ensure that univ_lsets is non-empty, with lockset zero being the
610 empty lockset. hg_errors.c relies on the assumption that
611 lockset number zero in univ_lsets is always valid. */
612 wsid = HG_(emptyWS)(univ_lsets);
613 tl_assert(wsid == 0);
sewardjb4112022007-11-09 22:49:28 +0000614
615 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000616 if (HG_(clo_track_lockorders)) {
617 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
618 HG_(free), 24/*cacheSize*/ );
619 tl_assert(univ_laog != NULL);
620 }
sewardjb4112022007-11-09 22:49:28 +0000621
622 /* Set up entries for the root thread */
623 // FIXME: this assumes that the first real ThreadId is 1
624
sewardjb4112022007-11-09 22:49:28 +0000625 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000626 thr = mk_Thread(hbthr_root);
627 thr->coretid = 1; /* FIXME: hardwires an assumption about the
628 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000629 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
630 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000631
sewardjf98e1c02008-10-25 16:22:41 +0000632 /* and bind it in the thread-map table. */
633 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
634 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000635
sewardjf98e1c02008-10-25 16:22:41 +0000636 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000637
638 tl_assert(VG_INVALID_THREADID == 0);
639
sewardjb4112022007-11-09 22:49:28 +0000640 all__sanity_check("initialise_data_structures");
641}
642
643
644/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000645/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000646/*----------------------------------------------------------------*/
647
648/* Doesn't assert if the relevant map_threads entry is NULL. */
649static Thread* map_threads_maybe_lookup ( ThreadId coretid )
650{
651 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000652 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000653 thr = map_threads[coretid];
654 return thr;
655}
656
657/* Asserts if the relevant map_threads entry is NULL. */
658static inline Thread* map_threads_lookup ( ThreadId coretid )
659{
660 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000661 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000662 thr = map_threads[coretid];
663 tl_assert(thr);
664 return thr;
665}
666
sewardjf98e1c02008-10-25 16:22:41 +0000667/* Do a reverse lookup. Does not assert if 'thr' is not found in
668 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000669static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
670{
sewardjf98e1c02008-10-25 16:22:41 +0000671 ThreadId tid;
672 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000673 /* Check nobody used the invalid-threadid slot */
674 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
675 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000676 tid = thr->coretid;
677 tl_assert(HG_(is_sane_ThreadId)(tid));
678 return tid;
sewardjb4112022007-11-09 22:49:28 +0000679}
680
681/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
682 is not found in map_threads. */
683static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
684{
685 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
686 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000687 tl_assert(map_threads[tid]);
688 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000689 return tid;
690}
691
692static void map_threads_delete ( ThreadId coretid )
693{
694 Thread* thr;
695 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000696 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000697 thr = map_threads[coretid];
698 tl_assert(thr);
699 map_threads[coretid] = NULL;
700}
701
702
703/*----------------------------------------------------------------*/
704/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
705/*----------------------------------------------------------------*/
706
707/* Make sure there is a lock table entry for the given (lock) guest
708 address. If not, create one of the stated 'kind' in unheld state.
709 In any case, return the address of the existing or new Lock. */
710static
711Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
712{
713 Bool found;
714 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000715 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000716 found = VG_(lookupFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000717 NULL, (UWord*)&oldlock, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000718 if (!found) {
719 Lock* lock = mk_LockN(lkk, ga);
720 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000721 tl_assert(HG_(is_sane_LockN)(lock));
florian6bf37262012-10-21 03:23:36 +0000722 VG_(addToFM)( map_locks, (UWord)ga, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +0000723 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000724 return lock;
725 } else {
726 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000727 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000728 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000729 return oldlock;
730 }
731}
732
733static Lock* map_locks_maybe_lookup ( Addr ga )
734{
735 Bool found;
736 Lock* lk = NULL;
florian6bf37262012-10-21 03:23:36 +0000737 found = VG_(lookupFM)( map_locks, NULL, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000738 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000739 return lk;
740}
741
742static void map_locks_delete ( Addr ga )
743{
744 Addr ga2 = 0;
745 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000746 VG_(delFromFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000747 (UWord*)&ga2, (UWord*)&lk, (UWord)ga );
sewardjb4112022007-11-09 22:49:28 +0000748 /* delFromFM produces the val which is being deleted, if it is
749 found. So assert it is non-null; that in effect asserts that we
750 are deleting a (ga, Lock) pair which actually exists. */
751 tl_assert(lk != NULL);
752 tl_assert(ga2 == ga);
753}
754
755
sewardjb4112022007-11-09 22:49:28 +0000756
757/*----------------------------------------------------------------*/
758/*--- Sanity checking the data structures ---*/
759/*----------------------------------------------------------------*/
760
761static UWord stats__sanity_checks = 0;
762
florian6bf37262012-10-21 03:23:36 +0000763static void laog__sanity_check ( const HChar* who ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000764
765/* REQUIRED INVARIANTS:
766
767 Thread vs Segment/Lock/SecMaps
768
769 for each t in Threads {
770
771 // Thread.lockset: each element is really a valid Lock
772
773 // Thread.lockset: each Lock in set is actually held by that thread
774 for lk in Thread.lockset
775 lk == LockedBy(t)
776
777 // Thread.csegid is a valid SegmentID
778 // and the associated Segment has .thr == t
779
780 }
781
782 all thread Locksets are pairwise empty under intersection
783 (that is, no lock is claimed to be held by more than one thread)
784 -- this is guaranteed if all locks in locksets point back to their
785 owner threads
786
787 Lock vs Thread/Segment/SecMaps
788
789 for each entry (gla, la) in map_locks
790 gla == la->guest_addr
791
792 for each lk in Locks {
793
794 lk->tag is valid
795 lk->guest_addr does not have shadow state NoAccess
796 if lk == LockedBy(t), then t->lockset contains lk
797 if lk == UnlockedBy(segid) then segid is valid SegmentID
798 and can be mapped to a valid Segment(seg)
799 and seg->thr->lockset does not contain lk
800 if lk == UnlockedNew then (no lockset contains lk)
801
802 secmaps for lk has .mbHasLocks == True
803
804 }
805
806 Segment vs Thread/Lock/SecMaps
807
808 the Segment graph is a dag (no cycles)
809 all of the Segment graph must be reachable from the segids
810 mentioned in the Threads
811
812 for seg in Segments {
813
814 seg->thr is a sane Thread
815
816 }
817
818 SecMaps vs Segment/Thread/Lock
819
820 for sm in SecMaps {
821
822 sm properly aligned
823 if any shadow word is ShR or ShM then .mbHasShared == True
824
825 for each Excl(segid) state
826 map_segments_lookup maps to a sane Segment(seg)
827 for each ShM/ShR(tsetid,lsetid) state
828 each lk in lset is a valid Lock
829 each thr in tset is a valid thread, which is non-dead
830
831 }
832*/
833
834
835/* Return True iff 'thr' holds 'lk' in some mode. */
836static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
837{
838 if (lk->heldBy)
florian6bf37262012-10-21 03:23:36 +0000839 return VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000840 else
841 return False;
842}
843
844/* Sanity check Threads, as far as possible */
845__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000846static void threads__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000847{
848#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000849 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000850 Thread* thr;
851 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000852 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +0000853 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +0000854 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000855 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000856 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000857 wsA = thr->locksetA;
858 wsW = thr->locksetW;
859 // locks held in W mode are a subset of all locks held
860 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
861 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
862 for (i = 0; i < ls_size; i++) {
863 lk = (Lock*)ls_words[i];
864 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000865 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000866 // Thread.lockset: each Lock in set is actually held by that
867 // thread
868 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000869 }
870 }
871 return;
872 bad:
873 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
874 tl_assert(0);
875#undef BAD
876}
877
878
879/* Sanity check Locks, as far as possible */
880__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +0000881static void locks__sanity_check ( const HChar* who )
sewardjb4112022007-11-09 22:49:28 +0000882{
883#define BAD(_str) do { how = (_str); goto bad; } while (0)
florian6bf37262012-10-21 03:23:36 +0000884 const HChar* how = "no error";
sewardjb4112022007-11-09 22:49:28 +0000885 Addr gla;
886 Lock* lk;
887 Int i;
888 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000889 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000890 ;
sewardj896f6f92008-08-19 08:38:52 +0000891 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000892 // for each entry (gla, lk) in map_locks
893 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000894 VG_(initIterFM)( map_locks );
895 while (VG_(nextIterFM)( map_locks,
florian6bf37262012-10-21 03:23:36 +0000896 (UWord*)&gla, (UWord*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000897 if (lk->guestaddr != gla) BAD("2");
898 }
sewardj896f6f92008-08-19 08:38:52 +0000899 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000900 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000901 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000902 // lock is sane. Quite comprehensive, also checks that
903 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000904 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000905 // map_locks binds guest address back to this lock
906 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000907 // look at all threads mentioned as holders of this lock. Ensure
908 // this lock is mentioned in their locksets.
909 if (lk->heldBy) {
910 Thread* thr;
florian6bf37262012-10-21 03:23:36 +0000911 UWord count;
sewardj896f6f92008-08-19 08:38:52 +0000912 VG_(initIterBag)( lk->heldBy );
913 while (VG_(nextIterBag)( lk->heldBy,
florian6bf37262012-10-21 03:23:36 +0000914 (UWord*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000915 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000916 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000917 tl_assert(HG_(is_sane_Thread)(thr));
florian6bf37262012-10-21 03:23:36 +0000918 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000919 BAD("6");
920 // also check the w-only lockset
921 if (lk->heldW
florian6bf37262012-10-21 03:23:36 +0000922 && !HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000923 BAD("7");
924 if ((!lk->heldW)
florian6bf37262012-10-21 03:23:36 +0000925 && HG_(elemWS)(univ_lsets, thr->locksetW, (UWord)lk))
sewardjb4112022007-11-09 22:49:28 +0000926 BAD("8");
927 }
sewardj896f6f92008-08-19 08:38:52 +0000928 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000929 } else {
930 /* lock not held by anybody */
931 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
932 // since lk is unheld, then (no lockset contains lk)
933 // hmm, this is really too expensive to check. Hmm.
934 }
sewardjb4112022007-11-09 22:49:28 +0000935 }
936
937 return;
938 bad:
939 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
940 tl_assert(0);
941#undef BAD
942}
943
944
florian6bf37262012-10-21 03:23:36 +0000945static void all_except_Locks__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000946 stats__sanity_checks++;
947 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
948 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000949 if (HG_(clo_track_lockorders))
950 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000951}
florian6bf37262012-10-21 03:23:36 +0000952static void all__sanity_check ( const HChar* who ) {
sewardjb4112022007-11-09 22:49:28 +0000953 all_except_Locks__sanity_check(who);
954 locks__sanity_check(who);
955}
956
957
958/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +0000959/*--- Shadow value and address range handlers ---*/
960/*----------------------------------------------------------------*/
961
962static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000963//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000964static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +0000965__attribute__((noinline))
966static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000967
sewardjb4112022007-11-09 22:49:28 +0000968
969/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +0000970/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
971 Is that a problem? (hence 'scopy' rather than 'ccopy') */
972static void shadow_mem_scopy_range ( Thread* thr,
973 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +0000974{
975 Thr* hbthr = thr->hbthr;
976 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000977 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +0000978}
979
sewardj23f12002009-07-24 08:45:08 +0000980static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
981{
sewardjf98e1c02008-10-25 16:22:41 +0000982 Thr* hbthr = thr->hbthr;
983 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +0000984 LIBHB_CREAD_N(hbthr, a, len);
985}
986
987static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
988 Thr* hbthr = thr->hbthr;
989 tl_assert(hbthr);
990 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +0000991}
992
993static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
994{
sewardj23f12002009-07-24 08:45:08 +0000995 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +0000996}
997
sewardjfd35d492011-03-17 19:39:55 +0000998static void shadow_mem_make_NoAccess_NoFX ( Thread* thr, Addr aIN, SizeT len )
sewardjb4112022007-11-09 22:49:28 +0000999{
sewardjb4112022007-11-09 22:49:28 +00001000 if (0 && len > 500)
sewardjfd35d492011-03-17 19:39:55 +00001001 VG_(printf)("make NoAccess_NoFX ( %#lx, %ld )\n", aIN, len );
1002 // has no effect (NoFX)
1003 libhb_srange_noaccess_NoFX( thr->hbthr, aIN, len );
1004}
1005
1006static void shadow_mem_make_NoAccess_AHAE ( Thread* thr, Addr aIN, SizeT len )
1007{
1008 if (0 && len > 500)
1009 VG_(printf)("make NoAccess_AHAE ( %#lx, %ld )\n", aIN, len );
1010 // Actually Has An Effect (AHAE)
1011 libhb_srange_noaccess_AHAE( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001012}
1013
sewardj406bac82010-03-03 23:03:40 +00001014static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1015{
1016 if (0 && len > 500)
1017 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1018 libhb_srange_untrack( thr->hbthr, aIN, len );
1019}
1020
sewardjb4112022007-11-09 22:49:28 +00001021
1022/*----------------------------------------------------------------*/
1023/*--- Event handlers (evh__* functions) ---*/
1024/*--- plus helpers (evhH__* functions) ---*/
1025/*----------------------------------------------------------------*/
1026
1027/*--------- Event handler helpers (evhH__* functions) ---------*/
1028
1029/* Create a new segment for 'thr', making it depend (.prev) on its
1030 existing segment, bind together the SegmentID and Segment, and
1031 return both of them. Also update 'thr' so it references the new
1032 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001033//zz static
1034//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1035//zz /*OUT*/Segment** new_segP,
1036//zz Thread* thr )
1037//zz {
1038//zz Segment* cur_seg;
1039//zz tl_assert(new_segP);
1040//zz tl_assert(new_segidP);
1041//zz tl_assert(HG_(is_sane_Thread)(thr));
1042//zz cur_seg = map_segments_lookup( thr->csegid );
1043//zz tl_assert(cur_seg);
1044//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1045//zz at their owner thread. */
1046//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1047//zz *new_segidP = alloc_SegmentID();
1048//zz map_segments_add( *new_segidP, *new_segP );
1049//zz thr->csegid = *new_segidP;
1050//zz }
sewardjb4112022007-11-09 22:49:28 +00001051
1052
1053/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1054 updates, and also do all possible error checks. */
1055static
1056void evhH__post_thread_w_acquires_lock ( Thread* thr,
1057 LockKind lkk, Addr lock_ga )
1058{
1059 Lock* lk;
1060
1061 /* Basically what we need to do is call lockN_acquire_writer.
1062 However, that will barf if any 'invalid' lock states would
1063 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001064 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001065 routine.
1066
1067 Because this routine is only called after successful lock
1068 acquisition, we should not be asked to move the lock into any
1069 invalid states. Requests to do so are bugs in libpthread, since
1070 that should have rejected any such requests. */
1071
sewardjf98e1c02008-10-25 16:22:41 +00001072 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001073 /* Try to find the lock. If we can't, then create a new one with
1074 kind 'lkk'. */
1075 lk = map_locks_lookup_or_create(
1076 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001077 tl_assert( HG_(is_sane_LockN)(lk) );
1078
1079 /* check libhb level entities exist */
1080 tl_assert(thr->hbthr);
1081 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001082
1083 if (lk->heldBy == NULL) {
1084 /* the lock isn't held. Simple. */
1085 tl_assert(!lk->heldW);
1086 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001087 /* acquire a dependency from the lock's VCs */
1088 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001089 goto noerror;
1090 }
1091
1092 /* So the lock is already held. If held as a r-lock then
1093 libpthread must be buggy. */
1094 tl_assert(lk->heldBy);
1095 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001096 HG_(record_error_Misc)(
1097 thr, "Bug in libpthread: write lock "
1098 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001099 goto error;
1100 }
1101
1102 /* So the lock is held in w-mode. If it's held by some other
1103 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001104 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001105
sewardj896f6f92008-08-19 08:38:52 +00001106 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001107 HG_(record_error_Misc)(
1108 thr, "Bug in libpthread: write lock "
1109 "granted on mutex/rwlock which is currently "
1110 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001111 goto error;
1112 }
1113
1114 /* So the lock is already held in w-mode by 'thr'. That means this
1115 is an attempt to lock it recursively, which is only allowable
1116 for LK_mbRec kinded locks. Since this routine is called only
1117 once the lock has been acquired, this must also be a libpthread
1118 bug. */
1119 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001120 HG_(record_error_Misc)(
1121 thr, "Bug in libpthread: recursive write lock "
1122 "granted on mutex/wrlock which does not "
1123 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001124 goto error;
1125 }
1126
1127 /* So we are recursively re-locking a lock we already w-hold. */
1128 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001129 /* acquire a dependency from the lock's VC. Probably pointless,
1130 but also harmless. */
1131 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001132 goto noerror;
1133
1134 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001135 if (HG_(clo_track_lockorders)) {
1136 /* check lock order acquisition graph, and update. This has to
1137 happen before the lock is added to the thread's locksetA/W. */
1138 laog__pre_thread_acquires_lock( thr, lk );
1139 }
sewardjb4112022007-11-09 22:49:28 +00001140 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001141 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
1142 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001143 /* fall through */
1144
1145 error:
sewardjf98e1c02008-10-25 16:22:41 +00001146 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001147}
1148
1149
1150/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1151 updates, and also do all possible error checks. */
1152static
1153void evhH__post_thread_r_acquires_lock ( Thread* thr,
1154 LockKind lkk, Addr lock_ga )
1155{
1156 Lock* lk;
1157
1158 /* Basically what we need to do is call lockN_acquire_reader.
1159 However, that will barf if any 'invalid' lock states would
1160 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001161 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001162 routine.
1163
1164 Because this routine is only called after successful lock
1165 acquisition, we should not be asked to move the lock into any
1166 invalid states. Requests to do so are bugs in libpthread, since
1167 that should have rejected any such requests. */
1168
sewardjf98e1c02008-10-25 16:22:41 +00001169 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001170 /* Try to find the lock. If we can't, then create a new one with
1171 kind 'lkk'. Only a reader-writer lock can be read-locked,
1172 hence the first assertion. */
1173 tl_assert(lkk == LK_rdwr);
1174 lk = map_locks_lookup_or_create(
1175 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001176 tl_assert( HG_(is_sane_LockN)(lk) );
1177
1178 /* check libhb level entities exist */
1179 tl_assert(thr->hbthr);
1180 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001181
1182 if (lk->heldBy == NULL) {
1183 /* the lock isn't held. Simple. */
1184 tl_assert(!lk->heldW);
1185 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001186 /* acquire a dependency from the lock's VC */
1187 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001188 goto noerror;
1189 }
1190
1191 /* So the lock is already held. If held as a w-lock then
1192 libpthread must be buggy. */
1193 tl_assert(lk->heldBy);
1194 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001195 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1196 "granted on rwlock which is "
1197 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001198 goto error;
1199 }
1200
1201 /* Easy enough. In short anybody can get a read-lock on a rwlock
1202 provided it is either unlocked or already in rd-held. */
1203 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001204 /* acquire a dependency from the lock's VC. Probably pointless,
1205 but also harmless. */
1206 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001207 goto noerror;
1208
1209 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001210 if (HG_(clo_track_lockorders)) {
1211 /* check lock order acquisition graph, and update. This has to
1212 happen before the lock is added to the thread's locksetA/W. */
1213 laog__pre_thread_acquires_lock( thr, lk );
1214 }
sewardjb4112022007-11-09 22:49:28 +00001215 /* update the thread's held-locks set */
florian6bf37262012-10-21 03:23:36 +00001216 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (UWord)lk );
sewardjb4112022007-11-09 22:49:28 +00001217 /* but don't update thr->locksetW, since lk is only rd-held */
1218 /* fall through */
1219
1220 error:
sewardjf98e1c02008-10-25 16:22:41 +00001221 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001222}
1223
1224
1225/* The lock at 'lock_ga' is just about to be unlocked. Make all
1226 necessary updates, and also do all possible error checks. */
1227static
1228void evhH__pre_thread_releases_lock ( Thread* thr,
1229 Addr lock_ga, Bool isRDWR )
1230{
1231 Lock* lock;
1232 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001233 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001234
1235 /* This routine is called prior to a lock release, before
1236 libpthread has had a chance to validate the call. Hence we need
1237 to detect and reject any attempts to move the lock into an
1238 invalid state. Such attempts are bugs in the client.
1239
1240 isRDWR is True if we know from the wrapper context that lock_ga
1241 should refer to a reader-writer lock, and is False if [ditto]
1242 lock_ga should refer to a standard mutex. */
1243
sewardjf98e1c02008-10-25 16:22:41 +00001244 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001245 lock = map_locks_maybe_lookup( lock_ga );
1246
1247 if (!lock) {
1248 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1249 the client is trying to unlock it. So complain, then ignore
1250 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001251 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001252 return;
1253 }
1254
1255 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001256 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001257
1258 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001259 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1260 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001261 }
1262 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001263 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1264 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001265 }
1266
1267 if (!lock->heldBy) {
1268 /* The lock is not held. This indicates a serious bug in the
1269 client. */
1270 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001271 HG_(record_error_UnlockUnlocked)( thr, lock );
florian6bf37262012-10-21 03:23:36 +00001272 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1273 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001274 goto error;
1275 }
1276
sewardjf98e1c02008-10-25 16:22:41 +00001277 /* test just above dominates */
1278 tl_assert(lock->heldBy);
1279 was_heldW = lock->heldW;
1280
sewardjb4112022007-11-09 22:49:28 +00001281 /* The lock is held. Is this thread one of the holders? If not,
1282 report a bug in the client. */
florian6bf37262012-10-21 03:23:36 +00001283 n = VG_(elemBag)( lock->heldBy, (UWord)thr );
sewardjb4112022007-11-09 22:49:28 +00001284 tl_assert(n >= 0);
1285 if (n == 0) {
1286 /* We are not a current holder of the lock. This is a bug in
1287 the guest, and (per POSIX pthread rules) the unlock
1288 attempt will fail. So just complain and do nothing
1289 else. */
sewardj896f6f92008-08-19 08:38:52 +00001290 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001291 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001292 tl_assert(realOwner != thr);
florian6bf37262012-10-21 03:23:36 +00001293 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
1294 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001295 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001296 goto error;
1297 }
1298
1299 /* Ok, we hold the lock 'n' times. */
1300 tl_assert(n >= 1);
1301
1302 lockN_release( lock, thr );
1303
1304 n--;
1305 tl_assert(n >= 0);
1306
1307 if (n > 0) {
1308 tl_assert(lock->heldBy);
florian6bf37262012-10-21 03:23:36 +00001309 tl_assert(n == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjb4112022007-11-09 22:49:28 +00001310 /* We still hold the lock. So either it's a recursive lock
1311 or a rwlock which is currently r-held. */
1312 tl_assert(lock->kind == LK_mbRec
1313 || (lock->kind == LK_rdwr && !lock->heldW));
florian6bf37262012-10-21 03:23:36 +00001314 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001315 if (lock->heldW)
florian6bf37262012-10-21 03:23:36 +00001316 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001317 else
florian6bf37262012-10-21 03:23:36 +00001318 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (UWord)lock ));
sewardjb4112022007-11-09 22:49:28 +00001319 } else {
sewardj983f3022009-05-21 14:49:55 +00001320 /* n is zero. This means we don't hold the lock any more. But
1321 if it's a rwlock held in r-mode, someone else could still
1322 hold it. Just do whatever sanity checks we can. */
1323 if (lock->kind == LK_rdwr && lock->heldBy) {
1324 /* It's a rwlock. We no longer hold it but we used to;
1325 nevertheless it still appears to be held by someone else.
1326 The implication is that, prior to this release, it must
1327 have been shared by us and and whoever else is holding it;
1328 which in turn implies it must be r-held, since a lock
1329 can't be w-held by more than one thread. */
1330 /* The lock is now R-held by somebody else: */
1331 tl_assert(lock->heldW == False);
1332 } else {
1333 /* Normal case. It's either not a rwlock, or it's a rwlock
1334 that we used to hold in w-mode (which is pretty much the
1335 same thing as a non-rwlock.) Since this transaction is
1336 atomic (V does not allow multiple threads to run
1337 simultaneously), it must mean the lock is now not held by
1338 anybody. Hence assert for it. */
1339 /* The lock is now not held by anybody: */
1340 tl_assert(!lock->heldBy);
1341 tl_assert(lock->heldW == False);
1342 }
sewardjf98e1c02008-10-25 16:22:41 +00001343 //if (lock->heldBy) {
florian6bf37262012-10-21 03:23:36 +00001344 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (UWord)thr ));
sewardjf98e1c02008-10-25 16:22:41 +00001345 //}
sewardjb4112022007-11-09 22:49:28 +00001346 /* update this thread's lockset accordingly. */
1347 thr->locksetA
florian6bf37262012-10-21 03:23:36 +00001348 = HG_(delFromWS)( univ_lsets, thr->locksetA, (UWord)lock );
sewardjb4112022007-11-09 22:49:28 +00001349 thr->locksetW
florian6bf37262012-10-21 03:23:36 +00001350 = HG_(delFromWS)( univ_lsets, thr->locksetW, (UWord)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001351 /* push our VC into the lock */
1352 tl_assert(thr->hbthr);
1353 tl_assert(lock->hbso);
1354 /* If the lock was previously W-held, then we want to do a
1355 strong send, and if previously R-held, then a weak send. */
1356 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001357 }
1358 /* fall through */
1359
1360 error:
sewardjf98e1c02008-10-25 16:22:41 +00001361 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001362}
1363
1364
sewardj9f569b72008-11-13 13:33:09 +00001365/* ---------------------------------------------------------- */
1366/* -------- Event handlers proper (evh__* functions) -------- */
1367/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001368
1369/* What is the Thread* for the currently running thread? This is
1370 absolutely performance critical. We receive notifications from the
1371 core for client code starts/stops, and cache the looked-up result
1372 in 'current_Thread'. Hence, for the vast majority of requests,
1373 finding the current thread reduces to a read of a global variable,
1374 provided get_current_Thread_in_C_C is inlined.
1375
1376 Outside of client code, current_Thread is NULL, and presumably
1377 any uses of it will cause a segfault. Hence:
1378
1379 - for uses definitely within client code, use
1380 get_current_Thread_in_C_C.
1381
1382 - for all other uses, use get_current_Thread.
1383*/
1384
sewardj23f12002009-07-24 08:45:08 +00001385static Thread *current_Thread = NULL,
1386 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001387
1388static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1389 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1390 tl_assert(current_Thread == NULL);
1391 current_Thread = map_threads_lookup( tid );
1392 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001393 if (current_Thread != current_Thread_prev) {
1394 libhb_Thr_resumes( current_Thread->hbthr );
1395 current_Thread_prev = current_Thread;
1396 }
sewardjb4112022007-11-09 22:49:28 +00001397}
1398static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1399 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1400 tl_assert(current_Thread != NULL);
1401 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001402 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001403}
1404static inline Thread* get_current_Thread_in_C_C ( void ) {
1405 return current_Thread;
1406}
1407static inline Thread* get_current_Thread ( void ) {
1408 ThreadId coretid;
1409 Thread* thr;
1410 thr = get_current_Thread_in_C_C();
1411 if (LIKELY(thr))
1412 return thr;
1413 /* evidently not in client code. Do it the slow way. */
1414 coretid = VG_(get_running_tid)();
1415 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001416 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001417 of initial memory layout) and VG_(get_running_tid)() returns
1418 VG_INVALID_THREADID at that point. */
1419 if (coretid == VG_INVALID_THREADID)
1420 coretid = 1; /* KLUDGE */
1421 thr = map_threads_lookup( coretid );
1422 return thr;
1423}
1424
1425static
1426void evh__new_mem ( Addr a, SizeT len ) {
1427 if (SHOW_EVENTS >= 2)
1428 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1429 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001430 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001431 all__sanity_check("evh__new_mem-post");
1432}
1433
1434static
sewardj1f77fec2010-04-12 19:51:04 +00001435void evh__new_mem_stack ( Addr a, SizeT len ) {
1436 if (SHOW_EVENTS >= 2)
1437 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1438 shadow_mem_make_New( get_current_Thread(),
1439 -VG_STACK_REDZONE_SZB + a, len );
1440 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1441 all__sanity_check("evh__new_mem_stack-post");
1442}
1443
1444static
sewardj7cf4e6b2008-05-01 20:24:26 +00001445void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1446 if (SHOW_EVENTS >= 2)
1447 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1448 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001449 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001450 all__sanity_check("evh__new_mem_w_tid-post");
1451}
1452
1453static
sewardjb4112022007-11-09 22:49:28 +00001454void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001455 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001456 if (SHOW_EVENTS >= 1)
1457 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1458 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1459 if (rr || ww || xx)
1460 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001461 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001462 all__sanity_check("evh__new_mem_w_perms-post");
1463}
1464
1465static
1466void evh__set_perms ( Addr a, SizeT len,
1467 Bool rr, Bool ww, Bool xx ) {
sewardjfd35d492011-03-17 19:39:55 +00001468 // This handles mprotect requests. If the memory is being put
1469 // into no-R no-W state, paint it as NoAccess, for the reasons
1470 // documented at evh__die_mem_munmap().
sewardjb4112022007-11-09 22:49:28 +00001471 if (SHOW_EVENTS >= 1)
sewardjfd35d492011-03-17 19:39:55 +00001472 VG_(printf)("evh__set_perms(%p, %lu, r=%d w=%d x=%d)\n",
sewardjb4112022007-11-09 22:49:28 +00001473 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1474 /* Hmm. What should we do here, that actually makes any sense?
1475 Let's say: if neither readable nor writable, then declare it
1476 NoAccess, else leave it alone. */
1477 if (!(rr || ww))
sewardjfd35d492011-03-17 19:39:55 +00001478 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001479 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001480 all__sanity_check("evh__set_perms-post");
1481}
1482
1483static
1484void evh__die_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001485 // Urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001486 if (SHOW_EVENTS >= 2)
1487 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
sewardjfd35d492011-03-17 19:39:55 +00001488 shadow_mem_make_NoAccess_NoFX( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001489 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001490 all__sanity_check("evh__die_mem-post");
1491}
1492
1493static
sewardjfd35d492011-03-17 19:39:55 +00001494void evh__die_mem_munmap ( Addr a, SizeT len ) {
1495 // It's important that libhb doesn't ignore this. If, as is likely,
1496 // the client is subject to address space layout randomization,
1497 // then unmapped areas may never get remapped over, even in long
1498 // runs. If we just ignore them we wind up with large resource
1499 // (VTS) leaks in libhb. So force them to NoAccess, so that all
1500 // VTS references in the affected area are dropped. Marking memory
1501 // as NoAccess is expensive, but we assume that munmap is sufficiently
1502 // rare that the space gains of doing this are worth the costs.
1503 if (SHOW_EVENTS >= 2)
1504 VG_(printf)("evh__die_mem_munmap(%p, %lu)\n", (void*)a, len );
1505 shadow_mem_make_NoAccess_AHAE( get_current_Thread(), a, len );
1506}
1507
1508static
sewardj406bac82010-03-03 23:03:40 +00001509void evh__untrack_mem ( Addr a, SizeT len ) {
sewardjfd35d492011-03-17 19:39:55 +00001510 // Libhb doesn't ignore this.
sewardj406bac82010-03-03 23:03:40 +00001511 if (SHOW_EVENTS >= 2)
1512 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1513 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1514 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1515 all__sanity_check("evh__untrack_mem-post");
1516}
1517
1518static
sewardj23f12002009-07-24 08:45:08 +00001519void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1520 if (SHOW_EVENTS >= 2)
1521 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1522 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1523 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1524 all__sanity_check("evh__copy_mem-post");
1525}
1526
1527static
sewardjb4112022007-11-09 22:49:28 +00001528void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1529{
1530 if (SHOW_EVENTS >= 1)
1531 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1532 (Int)parent, (Int)child );
1533
1534 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001535 Thread* thr_p;
1536 Thread* thr_c;
1537 Thr* hbthr_p;
1538 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001539
sewardjf98e1c02008-10-25 16:22:41 +00001540 tl_assert(HG_(is_sane_ThreadId)(parent));
1541 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001542 tl_assert(parent != child);
1543
1544 thr_p = map_threads_maybe_lookup( parent );
1545 thr_c = map_threads_maybe_lookup( child );
1546
1547 tl_assert(thr_p != NULL);
1548 tl_assert(thr_c == NULL);
1549
sewardjf98e1c02008-10-25 16:22:41 +00001550 hbthr_p = thr_p->hbthr;
1551 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001552 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001553
sewardjf98e1c02008-10-25 16:22:41 +00001554 hbthr_c = libhb_create ( hbthr_p );
1555
1556 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001557 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001558 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001559 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1560 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001561
1562 /* and bind it in the thread-map table */
1563 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001564 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1565 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001566
1567 /* Record where the parent is so we can later refer to this in
1568 error messages.
1569
1570 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1571 The stack snapshot is taken immediately after the parent has
1572 returned from its sys_clone call. Unfortunately there is no
1573 unwind info for the insn following "syscall" - reading the
1574 glibc sources confirms this. So we ask for a snapshot to be
1575 taken as if RIP was 3 bytes earlier, in a place where there
1576 is unwind info. Sigh.
1577 */
1578 { Word first_ip_delta = 0;
1579# if defined(VGP_amd64_linux)
1580 first_ip_delta = -3;
1581# endif
1582 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1583 }
sewardjb4112022007-11-09 22:49:28 +00001584 }
1585
sewardjf98e1c02008-10-25 16:22:41 +00001586 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001587 all__sanity_check("evh__pre_thread_create-post");
1588}
1589
1590static
1591void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1592{
1593 Int nHeld;
1594 Thread* thr_q;
1595 if (SHOW_EVENTS >= 1)
1596 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1597 (Int)quit_tid );
1598
1599 /* quit_tid has disappeared without joining to any other thread.
1600 Therefore there is no synchronisation event associated with its
1601 exit and so we have to pretty much treat it as if it was still
1602 alive but mysteriously making no progress. That is because, if
1603 we don't know when it really exited, then we can never say there
1604 is a point in time when we're sure the thread really has
1605 finished, and so we need to consider the possibility that it
1606 lingers indefinitely and continues to interact with other
1607 threads. */
1608 /* However, it might have rendezvous'd with a thread that called
1609 pthread_join with this one as arg, prior to this point (that's
1610 how NPTL works). In which case there has already been a prior
1611 sync event. So in any case, just let the thread exit. On NPTL,
1612 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001613 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001614 thr_q = map_threads_maybe_lookup( quit_tid );
1615 tl_assert(thr_q != NULL);
1616
1617 /* Complain if this thread holds any locks. */
1618 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1619 tl_assert(nHeld >= 0);
1620 if (nHeld > 0) {
1621 HChar buf[80];
1622 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1623 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001624 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001625 }
1626
sewardj23f12002009-07-24 08:45:08 +00001627 /* Not much to do here:
1628 - tell libhb the thread is gone
1629 - clear the map_threads entry, in order that the Valgrind core
1630 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001631 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1632 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001633 tl_assert(thr_q->hbthr);
1634 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001635 tl_assert(thr_q->coretid == quit_tid);
1636 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001637 map_threads_delete( quit_tid );
1638
sewardjf98e1c02008-10-25 16:22:41 +00001639 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001640 all__sanity_check("evh__pre_thread_ll_exit-post");
1641}
1642
sewardj61bc2c52011-02-09 10:34:00 +00001643/* This is called immediately after fork, for the child only. 'tid'
1644 is the only surviving thread (as per POSIX rules on fork() in
1645 threaded programs), so we have to clean up map_threads to remove
1646 entries for any other threads. */
1647static
1648void evh__atfork_child ( ThreadId tid )
1649{
1650 UInt i;
1651 Thread* thr;
1652 /* Slot 0 should never be used. */
1653 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1654 tl_assert(!thr);
1655 /* Clean up all other slots except 'tid'. */
1656 for (i = 1; i < VG_N_THREADS; i++) {
1657 if (i == tid)
1658 continue;
1659 thr = map_threads_maybe_lookup(i);
1660 if (!thr)
1661 continue;
1662 /* Cleanup actions (next 5 lines) copied from end of
1663 evh__pre_thread_ll_exit; keep in sync. */
1664 tl_assert(thr->hbthr);
1665 libhb_async_exit(thr->hbthr);
1666 tl_assert(thr->coretid == i);
1667 thr->coretid = VG_INVALID_THREADID;
1668 map_threads_delete(i);
1669 }
1670}
1671
sewardjf98e1c02008-10-25 16:22:41 +00001672
sewardjb4112022007-11-09 22:49:28 +00001673static
1674void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1675{
sewardjb4112022007-11-09 22:49:28 +00001676 Thread* thr_s;
1677 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001678 Thr* hbthr_s;
1679 Thr* hbthr_q;
1680 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001681
1682 if (SHOW_EVENTS >= 1)
1683 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1684 (Int)stay_tid, quit_thr );
1685
sewardjf98e1c02008-10-25 16:22:41 +00001686 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001687
1688 thr_s = map_threads_maybe_lookup( stay_tid );
1689 thr_q = quit_thr;
1690 tl_assert(thr_s != NULL);
1691 tl_assert(thr_q != NULL);
1692 tl_assert(thr_s != thr_q);
1693
sewardjf98e1c02008-10-25 16:22:41 +00001694 hbthr_s = thr_s->hbthr;
1695 hbthr_q = thr_q->hbthr;
1696 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001697 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1698 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001699
sewardjf98e1c02008-10-25 16:22:41 +00001700 /* Allocate a temporary synchronisation object and use it to send
1701 an imaginary message from the quitter to the stayer, the purpose
1702 being to generate a dependence from the quitter to the
1703 stayer. */
1704 so = libhb_so_alloc();
1705 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001706 /* Send last arg of _so_send as False, since the sending thread
1707 doesn't actually exist any more, so we don't want _so_send to
1708 try taking stack snapshots of it. */
sewardjffce8152011-06-24 10:09:41 +00001709 libhb_so_send(hbthr_q, so, True/*strong_send*//*?!? wrt comment above*/);
sewardjf98e1c02008-10-25 16:22:41 +00001710 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1711 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001712
sewardjffce8152011-06-24 10:09:41 +00001713 /* Tell libhb that the quitter has been reaped. Note that we might
1714 have to be cleverer about this, to exclude 2nd and subsequent
1715 notifications for the same hbthr_q, in the case where the app is
1716 buggy (calls pthread_join twice or more on the same thread) AND
1717 where libpthread is also buggy and doesn't return ESRCH on
1718 subsequent calls. (If libpthread isn't thusly buggy, then the
1719 wrapper for pthread_join in hg_intercepts.c will stop us getting
1720 notified here multiple times for the same joinee.) See also
1721 comments in helgrind/tests/jointwice.c. */
1722 libhb_joinedwith_done(hbthr_q);
1723
sewardjf98e1c02008-10-25 16:22:41 +00001724 /* evh__pre_thread_ll_exit issues an error message if the exiting
1725 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001726
1727 /* This holds because, at least when using NPTL as the thread
1728 library, we should be notified the low level thread exit before
1729 we hear of any join event on it. The low level exit
1730 notification feeds through into evh__pre_thread_ll_exit,
1731 which should clear the map_threads entry for it. Hence we
1732 expect there to be no map_threads entry at this point. */
1733 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1734 == VG_INVALID_THREADID);
1735
sewardjf98e1c02008-10-25 16:22:41 +00001736 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001737 all__sanity_check("evh__post_thread_join-post");
1738}
1739
1740static
floriane543f302012-10-21 19:43:43 +00001741void evh__pre_mem_read ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001742 Addr a, SizeT size) {
1743 if (SHOW_EVENTS >= 2
1744 || (SHOW_EVENTS >= 1 && size != 1))
1745 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1746 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001747 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001748 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001749 all__sanity_check("evh__pre_mem_read-post");
1750}
1751
1752static
1753void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
floriane543f302012-10-21 19:43:43 +00001754 const HChar* s, Addr a ) {
sewardjb4112022007-11-09 22:49:28 +00001755 Int len;
1756 if (SHOW_EVENTS >= 1)
1757 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1758 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001759 // Don't segfault if the string starts in an obviously stupid
1760 // place. Actually we should check the whole string, not just
1761 // the start address, but that's too much trouble. At least
1762 // checking the first byte is better than nothing. See #255009.
1763 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1764 return;
florian19f91bb2012-11-10 22:29:54 +00001765 len = VG_(strlen)( (HChar*) a );
sewardj23f12002009-07-24 08:45:08 +00001766 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001767 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001768 all__sanity_check("evh__pre_mem_read_asciiz-post");
1769}
1770
1771static
floriane543f302012-10-21 19:43:43 +00001772void evh__pre_mem_write ( CorePart part, ThreadId tid, const HChar* s,
sewardjb4112022007-11-09 22:49:28 +00001773 Addr a, SizeT size ) {
1774 if (SHOW_EVENTS >= 1)
1775 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1776 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001777 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001778 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001779 all__sanity_check("evh__pre_mem_write-post");
1780}
1781
1782static
1783void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1784 if (SHOW_EVENTS >= 1)
1785 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1786 (void*)a, len, (Int)is_inited );
1787 // FIXME: this is kinda stupid
1788 if (is_inited) {
1789 shadow_mem_make_New(get_current_Thread(), a, len);
1790 } else {
1791 shadow_mem_make_New(get_current_Thread(), a, len);
1792 }
sewardjf98e1c02008-10-25 16:22:41 +00001793 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001794 all__sanity_check("evh__pre_mem_read-post");
1795}
1796
1797static
1798void evh__die_mem_heap ( Addr a, SizeT len ) {
sewardj622fe492011-03-11 21:06:59 +00001799 Thread* thr;
sewardjb4112022007-11-09 22:49:28 +00001800 if (SHOW_EVENTS >= 1)
1801 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
sewardj622fe492011-03-11 21:06:59 +00001802 thr = get_current_Thread();
1803 tl_assert(thr);
1804 if (HG_(clo_free_is_write)) {
1805 /* Treat frees as if the memory was written immediately prior to
1806 the free. This shakes out more races, specifically, cases
1807 where memory is referenced by one thread, and freed by
1808 another, and there's no observable synchronisation event to
1809 guarantee that the reference happens before the free. */
1810 shadow_mem_cwrite_range(thr, a, len);
1811 }
sewardjfd35d492011-03-17 19:39:55 +00001812 shadow_mem_make_NoAccess_NoFX( thr, a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001813 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001814 all__sanity_check("evh__pre_mem_read-post");
1815}
1816
sewardj23f12002009-07-24 08:45:08 +00001817/* --- Event handlers called from generated code --- */
1818
sewardjb4112022007-11-09 22:49:28 +00001819static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001820void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001823 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001824}
sewardjf98e1c02008-10-25 16:22:41 +00001825
sewardjb4112022007-11-09 22:49:28 +00001826static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001827void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001830 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001831}
sewardjf98e1c02008-10-25 16:22:41 +00001832
sewardjb4112022007-11-09 22:49:28 +00001833static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001834void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001837 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001838}
sewardjf98e1c02008-10-25 16:22:41 +00001839
sewardjb4112022007-11-09 22:49:28 +00001840static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001841void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001844 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001845}
sewardjf98e1c02008-10-25 16:22:41 +00001846
sewardjb4112022007-11-09 22:49:28 +00001847static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001848void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001851 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001852}
1853
1854static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001855void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001856 Thread* thr = get_current_Thread_in_C_C();
1857 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001858 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001859}
sewardjf98e1c02008-10-25 16:22:41 +00001860
sewardjb4112022007-11-09 22:49:28 +00001861static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001862void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001863 Thread* thr = get_current_Thread_in_C_C();
1864 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001865 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001866}
sewardjf98e1c02008-10-25 16:22:41 +00001867
sewardjb4112022007-11-09 22:49:28 +00001868static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001869void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001870 Thread* thr = get_current_Thread_in_C_C();
1871 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001872 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001873}
sewardjf98e1c02008-10-25 16:22:41 +00001874
sewardjb4112022007-11-09 22:49:28 +00001875static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001876void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001877 Thread* thr = get_current_Thread_in_C_C();
1878 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001879 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001880}
sewardjf98e1c02008-10-25 16:22:41 +00001881
sewardjb4112022007-11-09 22:49:28 +00001882static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001883void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001884 Thread* thr = get_current_Thread_in_C_C();
1885 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001886 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001887}
1888
sewardjb4112022007-11-09 22:49:28 +00001889
sewardj9f569b72008-11-13 13:33:09 +00001890/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001891/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001892/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001893
1894/* EXPOSITION only: by intercepting lock init events we can show the
1895 user where the lock was initialised, rather than only being able to
1896 show where it was first locked. Intercepting lock initialisations
1897 is not necessary for the basic operation of the race checker. */
1898static
1899void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1900 void* mutex, Word mbRec )
1901{
1902 if (SHOW_EVENTS >= 1)
1903 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1904 (Int)tid, mbRec, (void*)mutex );
1905 tl_assert(mbRec == 0 || mbRec == 1);
1906 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1907 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001908 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001909 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1910}
1911
1912static
sewardjc02f6c42013-10-14 13:51:25 +00001913void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex,
1914 Bool mutex_is_init )
sewardjb4112022007-11-09 22:49:28 +00001915{
1916 Thread* thr;
1917 Lock* lk;
1918 if (SHOW_EVENTS >= 1)
sewardjc02f6c42013-10-14 13:51:25 +00001919 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE"
1920 "(ctid=%d, %p, isInit=%d)\n",
1921 (Int)tid, (void*)mutex, (Int)mutex_is_init );
sewardjb4112022007-11-09 22:49:28 +00001922
1923 thr = map_threads_maybe_lookup( tid );
1924 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001925 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001926
1927 lk = map_locks_maybe_lookup( (Addr)mutex );
1928
sewardjc02f6c42013-10-14 13:51:25 +00001929 if (lk == NULL && mutex_is_init) {
1930 /* We're destroying a mutex which we don't have any record of,
1931 and which appears to have the value PTHREAD_MUTEX_INITIALIZER.
1932 Assume it never got used, and so we don't need to do anything
1933 more. */
1934 goto out;
1935 }
1936
sewardjb4112022007-11-09 22:49:28 +00001937 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001938 HG_(record_error_Misc)(
1939 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001940 }
1941
1942 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001943 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001944 tl_assert( lk->guestaddr == (Addr)mutex );
1945 if (lk->heldBy) {
1946 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001947 HG_(record_error_Misc)(
1948 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001949 /* remove lock from locksets of all owning threads */
1950 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001951 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001952 lk->heldBy = NULL;
1953 lk->heldW = False;
1954 lk->acquired_at = NULL;
1955 }
1956 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001957 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001958
1959 if (HG_(clo_track_lockorders))
1960 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001961 map_locks_delete( lk->guestaddr );
1962 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001963 }
1964
sewardjc02f6c42013-10-14 13:51:25 +00001965 out:
sewardjf98e1c02008-10-25 16:22:41 +00001966 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001967 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1968}
1969
1970static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1971 void* mutex, Word isTryLock )
1972{
1973 /* Just check the mutex is sane; nothing else to do. */
1974 // 'mutex' may be invalid - not checked by wrapper
1975 Thread* thr;
1976 Lock* lk;
1977 if (SHOW_EVENTS >= 1)
1978 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1979 (Int)tid, (void*)mutex );
1980
1981 tl_assert(isTryLock == 0 || isTryLock == 1);
1982 thr = map_threads_maybe_lookup( tid );
1983 tl_assert(thr); /* cannot fail - Thread* must already exist */
1984
1985 lk = map_locks_maybe_lookup( (Addr)mutex );
1986
1987 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001988 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1989 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001990 }
1991
1992 if ( lk
1993 && isTryLock == 0
1994 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1995 && lk->heldBy
1996 && lk->heldW
florian6bf37262012-10-21 03:23:36 +00001997 && VG_(elemBag)( lk->heldBy, (UWord)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001998 /* uh, it's a non-recursive lock and we already w-hold it, and
1999 this is a real lock operation (not a speculative "tryLock"
2000 kind of thing). Duh. Deadlock coming up; but at least
2001 produce an error message. */
florian6bd9dc12012-11-23 16:17:43 +00002002 const HChar* errstr = "Attempt to re-lock a "
2003 "non-recursive lock I already hold";
2004 const HChar* auxstr = "Lock was previously acquired";
sewardj8fef6252010-07-29 05:28:02 +00002005 if (lk->acquired_at) {
2006 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2007 } else {
2008 HG_(record_error_Misc)( thr, errstr );
2009 }
sewardjb4112022007-11-09 22:49:28 +00002010 }
2011}
2012
2013static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2014{
2015 // only called if the real library call succeeded - so mutex is sane
2016 Thread* thr;
2017 if (SHOW_EVENTS >= 1)
2018 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2019 (Int)tid, (void*)mutex );
2020
2021 thr = map_threads_maybe_lookup( tid );
2022 tl_assert(thr); /* cannot fail - Thread* must already exist */
2023
2024 evhH__post_thread_w_acquires_lock(
2025 thr,
2026 LK_mbRec, /* if not known, create new lock with this LockKind */
2027 (Addr)mutex
2028 );
2029}
2030
2031static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2032{
2033 // 'mutex' may be invalid - not checked by wrapper
2034 Thread* thr;
2035 if (SHOW_EVENTS >= 1)
2036 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2037 (Int)tid, (void*)mutex );
2038
2039 thr = map_threads_maybe_lookup( tid );
2040 tl_assert(thr); /* cannot fail - Thread* must already exist */
2041
2042 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2043}
2044
2045static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2046{
2047 // only called if the real library call succeeded - so mutex is sane
2048 Thread* thr;
2049 if (SHOW_EVENTS >= 1)
2050 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2051 (Int)tid, (void*)mutex );
2052 thr = map_threads_maybe_lookup( tid );
2053 tl_assert(thr); /* cannot fail - Thread* must already exist */
2054
2055 // anything we should do here?
2056}
2057
2058
sewardj5a644da2009-08-11 10:35:58 +00002059/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002060/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002061/* ------------------------------------------------------- */
2062
2063/* All a bit of a kludge. Pretend we're really dealing with ordinary
2064 pthread_mutex_t's instead, for the most part. */
2065
2066static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2067 void* slock )
2068{
2069 Thread* thr;
2070 Lock* lk;
2071 /* In glibc's kludgey world, we're either initialising or unlocking
2072 it. Since this is the pre-routine, if it is locked, unlock it
2073 and take a dependence edge. Otherwise, do nothing. */
2074
2075 if (SHOW_EVENTS >= 1)
2076 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2077 "(ctid=%d, slock=%p)\n",
2078 (Int)tid, (void*)slock );
2079
2080 thr = map_threads_maybe_lookup( tid );
2081 /* cannot fail - Thread* must already exist */;
2082 tl_assert( HG_(is_sane_Thread)(thr) );
2083
2084 lk = map_locks_maybe_lookup( (Addr)slock );
2085 if (lk && lk->heldBy) {
2086 /* it's held. So do the normal pre-unlock actions, as copied
2087 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2088 duplicates the map_locks_maybe_lookup. */
2089 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2090 False/*!isRDWR*/ );
2091 }
2092}
2093
2094static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2095 void* slock )
2096{
2097 Lock* lk;
2098 /* More kludgery. If the lock has never been seen before, do
2099 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2100 nothing. */
2101
2102 if (SHOW_EVENTS >= 1)
2103 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2104 "(ctid=%d, slock=%p)\n",
2105 (Int)tid, (void*)slock );
2106
2107 lk = map_locks_maybe_lookup( (Addr)slock );
2108 if (!lk) {
2109 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2110 }
2111}
2112
2113static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2114 void* slock, Word isTryLock )
2115{
2116 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2117}
2118
2119static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2120 void* slock )
2121{
2122 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2123}
2124
2125static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2126 void* slock )
2127{
sewardjc02f6c42013-10-14 13:51:25 +00002128 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock, 0/*!isInit*/ );
sewardj5a644da2009-08-11 10:35:58 +00002129}
2130
2131
sewardj9f569b72008-11-13 13:33:09 +00002132/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002133/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002134/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002135
sewardj02114542009-07-28 20:52:36 +00002136/* A mapping from CV to (the SO associated with it, plus some
2137 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002138 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2139 wait on it completes, we do a 'recv' from the SO. This is believed
2140 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002141 signallings/broadcasts.
2142*/
2143
sewardj02114542009-07-28 20:52:36 +00002144/* .so is the SO for this CV.
2145 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002146
sewardj02114542009-07-28 20:52:36 +00002147 POSIX says effectively that the first pthread_cond_{timed}wait call
2148 causes a dynamic binding between the CV and the mutex, and that
2149 lasts until such time as the waiter count falls to zero. Hence
2150 need to keep track of the number of waiters in order to do
2151 consistency tracking. */
2152typedef
2153 struct {
2154 SO* so; /* libhb-allocated SO */
2155 void* mx_ga; /* addr of associated mutex, if any */
2156 UWord nWaiters; /* # threads waiting on the CV */
2157 }
2158 CVInfo;
2159
2160
2161/* pthread_cond_t* -> CVInfo* */
2162static WordFM* map_cond_to_CVInfo = NULL;
2163
2164static void map_cond_to_CVInfo_INIT ( void ) {
2165 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2166 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2167 "hg.mctCI.1", HG_(free), NULL );
2168 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002169 }
2170}
2171
sewardj02114542009-07-28 20:52:36 +00002172static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002173 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002174 map_cond_to_CVInfo_INIT();
2175 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002176 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002177 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002178 } else {
sewardj02114542009-07-28 20:52:36 +00002179 SO* so = libhb_so_alloc();
2180 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2181 cvi->so = so;
2182 cvi->mx_ga = 0;
2183 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2184 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002185 }
2186}
2187
philippe8bfc2152012-07-06 23:38:24 +00002188static CVInfo* map_cond_to_CVInfo_lookup_NO_alloc ( void* cond ) {
2189 UWord key, val;
2190 map_cond_to_CVInfo_INIT();
2191 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
2192 tl_assert(key == (UWord)cond);
2193 return (CVInfo*)val;
2194 } else {
2195 return NULL;
2196 }
2197}
2198
sewardjc02f6c42013-10-14 13:51:25 +00002199static void map_cond_to_CVInfo_delete ( ThreadId tid,
2200 void* cond, Bool cond_is_init ) {
philippe8bfc2152012-07-06 23:38:24 +00002201 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +00002202 UWord keyW, valW;
philippe8bfc2152012-07-06 23:38:24 +00002203
2204 thr = map_threads_maybe_lookup( tid );
2205 tl_assert(thr); /* cannot fail - Thread* must already exist */
2206
sewardj02114542009-07-28 20:52:36 +00002207 map_cond_to_CVInfo_INIT();
philippe24111972013-03-18 22:48:22 +00002208 if (VG_(lookupFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
sewardj02114542009-07-28 20:52:36 +00002209 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002210 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002211 tl_assert(cvi);
2212 tl_assert(cvi->so);
philippe8bfc2152012-07-06 23:38:24 +00002213 if (cvi->nWaiters > 0) {
sewardjc02f6c42013-10-14 13:51:25 +00002214 HG_(record_error_Misc)(
2215 thr, "pthread_cond_destroy:"
2216 " destruction of condition variable being waited upon");
philippe24111972013-03-18 22:48:22 +00002217 /* Destroying a cond var being waited upon outcome is EBUSY and
2218 variable is not destroyed. */
2219 return;
philippe8bfc2152012-07-06 23:38:24 +00002220 }
philippe24111972013-03-18 22:48:22 +00002221 if (!VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond ))
2222 tl_assert(0); // cond var found above, and not here ???
sewardj02114542009-07-28 20:52:36 +00002223 libhb_so_dealloc(cvi->so);
2224 cvi->mx_ga = 0;
2225 HG_(free)(cvi);
philippe8bfc2152012-07-06 23:38:24 +00002226 } else {
sewardjc02f6c42013-10-14 13:51:25 +00002227 /* We have no record of this CV. So complain about it
2228 .. except, don't bother to complain if it has exactly the
2229 value PTHREAD_COND_INITIALIZER, since it might be that the CV
2230 was initialised like that but never used. */
2231 if (!cond_is_init) {
2232 HG_(record_error_Misc)(
2233 thr, "pthread_cond_destroy: destruction of unknown cond var");
2234 }
sewardjb4112022007-11-09 22:49:28 +00002235 }
2236}
2237
2238static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2239{
sewardjf98e1c02008-10-25 16:22:41 +00002240 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2241 cond to a SO if it is not already so bound, and 'send' on the
2242 SO. This is later used by other thread(s) which successfully
2243 exit from a pthread_cond_wait on the same cv; then they 'recv'
2244 from the SO, thereby acquiring a dependency on this signalling
2245 event. */
sewardjb4112022007-11-09 22:49:28 +00002246 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002247 CVInfo* cvi;
2248 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002249
2250 if (SHOW_EVENTS >= 1)
2251 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2252 (Int)tid, (void*)cond );
2253
sewardjb4112022007-11-09 22:49:28 +00002254 thr = map_threads_maybe_lookup( tid );
2255 tl_assert(thr); /* cannot fail - Thread* must already exist */
2256
sewardj02114542009-07-28 20:52:36 +00002257 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2258 tl_assert(cvi);
2259 tl_assert(cvi->so);
2260
sewardjb4112022007-11-09 22:49:28 +00002261 // error-if: mutex is bogus
2262 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002263 // Hmm. POSIX doesn't actually say that it's an error to call
2264 // pthread_cond_signal with the associated mutex being unlocked.
2265 // Although it does say that it should be "if consistent scheduling
sewardjffce8152011-06-24 10:09:41 +00002266 // is desired." For that reason, print "dubious" if the lock isn't
2267 // held by any thread. Skip the "dubious" if it is held by some
2268 // other thread; that sounds straight-out wrong.
sewardj02114542009-07-28 20:52:36 +00002269 //
sewardjffce8152011-06-24 10:09:41 +00002270 // Anybody who writes code that signals on a CV without holding
2271 // the associated MX needs to be shipped off to a lunatic asylum
2272 // ASAP, even though POSIX doesn't actually declare such behaviour
2273 // illegal -- it makes code extremely difficult to understand/
2274 // reason about. In particular it puts the signalling thread in
2275 // a situation where it is racing against the released waiter
2276 // as soon as the signalling is done, and so there needs to be
2277 // some auxiliary synchronisation mechanism in the program that
2278 // makes this safe -- or the race(s) need to be harmless, or
2279 // probably nonexistent.
2280 //
2281 if (1) {
2282 Lock* lk = NULL;
2283 if (cvi->mx_ga != 0) {
2284 lk = map_locks_maybe_lookup( (Addr)cvi->mx_ga );
2285 }
2286 /* note: lk could be NULL. Be careful. */
2287 if (lk) {
2288 if (lk->kind == LK_rdwr) {
2289 HG_(record_error_Misc)(thr,
2290 "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2291 }
2292 if (lk->heldBy == NULL) {
2293 HG_(record_error_Misc)(thr,
2294 "pthread_cond_{signal,broadcast}: dubious: "
2295 "associated lock is not held by any thread");
2296 }
florian6bf37262012-10-21 03:23:36 +00002297 if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (UWord)thr)) {
sewardjffce8152011-06-24 10:09:41 +00002298 HG_(record_error_Misc)(thr,
2299 "pthread_cond_{signal,broadcast}: "
2300 "associated lock is not held by calling thread");
2301 }
2302 } else {
2303 /* Couldn't even find the damn thing. */
2304 // But actually .. that's not necessarily an error. We don't
2305 // know the (CV,MX) binding until a pthread_cond_wait or bcast
2306 // shows us what it is, and if that may not have happened yet.
2307 // So just keep quiet in this circumstance.
2308 //HG_(record_error_Misc)( thr,
2309 // "pthread_cond_{signal,broadcast}: "
2310 // "no or invalid mutex associated with cond");
2311 }
2312 }
sewardjb4112022007-11-09 22:49:28 +00002313
sewardj02114542009-07-28 20:52:36 +00002314 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002315}
2316
2317/* returns True if it reckons 'mutex' is valid and held by this
2318 thread, else False */
2319static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2320 void* cond, void* mutex )
2321{
2322 Thread* thr;
2323 Lock* lk;
2324 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002325 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002326
2327 if (SHOW_EVENTS >= 1)
2328 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2329 "(ctid=%d, cond=%p, mutex=%p)\n",
2330 (Int)tid, (void*)cond, (void*)mutex );
2331
sewardjb4112022007-11-09 22:49:28 +00002332 thr = map_threads_maybe_lookup( tid );
2333 tl_assert(thr); /* cannot fail - Thread* must already exist */
2334
2335 lk = map_locks_maybe_lookup( (Addr)mutex );
2336
2337 /* Check for stupid mutex arguments. There are various ways to be
2338 a bozo. Only complain once, though, even if more than one thing
2339 is wrong. */
2340 if (lk == NULL) {
2341 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002342 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002343 thr,
2344 "pthread_cond_{timed}wait called with invalid mutex" );
2345 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002346 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002347 if (lk->kind == LK_rdwr) {
2348 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002349 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002350 thr, "pthread_cond_{timed}wait called with mutex "
2351 "of type pthread_rwlock_t*" );
2352 } else
2353 if (lk->heldBy == NULL) {
2354 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002355 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002356 thr, "pthread_cond_{timed}wait called with un-held mutex");
2357 } else
2358 if (lk->heldBy != NULL
florian6bf37262012-10-21 03:23:36 +00002359 && VG_(elemBag)( lk->heldBy, (UWord)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002360 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002361 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002362 thr, "pthread_cond_{timed}wait called with mutex "
2363 "held by a different thread" );
2364 }
2365 }
2366
2367 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002368 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2369 tl_assert(cvi);
2370 tl_assert(cvi->so);
2371 if (cvi->nWaiters == 0) {
2372 /* form initial (CV,MX) binding */
2373 cvi->mx_ga = mutex;
2374 }
2375 else /* check existing (CV,MX) binding */
2376 if (cvi->mx_ga != mutex) {
2377 HG_(record_error_Misc)(
2378 thr, "pthread_cond_{timed}wait: cond is associated "
2379 "with a different mutex");
2380 }
2381 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002382
2383 return lk_valid;
2384}
2385
2386static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
sewardjff427c92013-10-14 12:13:52 +00002387 void* cond, void* mutex,
2388 Bool timeout)
sewardjb4112022007-11-09 22:49:28 +00002389{
sewardjf98e1c02008-10-25 16:22:41 +00002390 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2391 the SO for this cond, and 'recv' from it so as to acquire a
2392 dependency edge back to the signaller/broadcaster. */
2393 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002394 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002395
2396 if (SHOW_EVENTS >= 1)
2397 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
sewardjff427c92013-10-14 12:13:52 +00002398 "(ctid=%d, cond=%p, mutex=%p)\n, timeout=%d",
2399 (Int)tid, (void*)cond, (void*)mutex, (Int)timeout );
sewardjb4112022007-11-09 22:49:28 +00002400
sewardjb4112022007-11-09 22:49:28 +00002401 thr = map_threads_maybe_lookup( tid );
2402 tl_assert(thr); /* cannot fail - Thread* must already exist */
2403
2404 // error-if: cond is also associated with a different mutex
2405
philippe8bfc2152012-07-06 23:38:24 +00002406 cvi = map_cond_to_CVInfo_lookup_NO_alloc( cond );
2407 if (!cvi) {
2408 /* This could be either a bug in helgrind or the guest application
2409 that did an error (e.g. cond var was destroyed by another thread.
2410 Let's assume helgrind is perfect ...
2411 Note that this is similar to drd behaviour. */
2412 HG_(record_error_Misc)(thr, "condition variable has been destroyed while"
2413 " being waited upon");
2414 return;
2415 }
2416
sewardj02114542009-07-28 20:52:36 +00002417 tl_assert(cvi);
2418 tl_assert(cvi->so);
2419 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002420
sewardjff427c92013-10-14 12:13:52 +00002421 if (!timeout && !libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002422 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2423 it? If this happened it would surely be a bug in the threads
2424 library. Or one of those fabled "spurious wakeups". */
2425 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
sewardjffce8152011-06-24 10:09:41 +00002426 "succeeded"
sewardjf98e1c02008-10-25 16:22:41 +00002427 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002428 }
sewardjf98e1c02008-10-25 16:22:41 +00002429
2430 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002431 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2432
2433 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002434}
2435
philippe19dfe032013-03-24 20:10:23 +00002436static void evh__HG_PTHREAD_COND_INIT_POST ( ThreadId tid,
2437 void* cond, void* cond_attr )
2438{
2439 CVInfo* cvi;
2440
2441 if (SHOW_EVENTS >= 1)
2442 VG_(printf)("evh__HG_PTHREAD_COND_INIT_POST"
2443 "(ctid=%d, cond=%p, cond_attr=%p)\n",
2444 (Int)tid, (void*)cond, (void*) cond_attr );
2445
2446 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2447 tl_assert (cvi);
2448 tl_assert (cvi->so);
2449}
2450
2451
sewardjf98e1c02008-10-25 16:22:41 +00002452static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
sewardjc02f6c42013-10-14 13:51:25 +00002453 void* cond, Bool cond_is_init )
sewardjf98e1c02008-10-25 16:22:41 +00002454{
2455 /* Deal with destroy events. The only purpose is to free storage
2456 associated with the CV, so as to avoid any possible resource
2457 leaks. */
2458 if (SHOW_EVENTS >= 1)
2459 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
sewardjc02f6c42013-10-14 13:51:25 +00002460 "(ctid=%d, cond=%p, cond_is_init=%d)\n",
2461 (Int)tid, (void*)cond, (Int)cond_is_init );
sewardjf98e1c02008-10-25 16:22:41 +00002462
sewardjc02f6c42013-10-14 13:51:25 +00002463 map_cond_to_CVInfo_delete( tid, cond, cond_is_init );
sewardjb4112022007-11-09 22:49:28 +00002464}
2465
2466
sewardj9f569b72008-11-13 13:33:09 +00002467/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002468/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002469/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002470
2471/* EXPOSITION only */
2472static
2473void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2474{
2475 if (SHOW_EVENTS >= 1)
2476 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2477 (Int)tid, (void*)rwl );
2478 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002479 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002480 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2481}
2482
2483static
2484void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2485{
2486 Thread* thr;
2487 Lock* lk;
2488 if (SHOW_EVENTS >= 1)
2489 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2490 (Int)tid, (void*)rwl );
2491
2492 thr = map_threads_maybe_lookup( tid );
2493 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002494 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002495
2496 lk = map_locks_maybe_lookup( (Addr)rwl );
2497
2498 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002499 HG_(record_error_Misc)(
2500 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002501 }
2502
2503 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002504 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002505 tl_assert( lk->guestaddr == (Addr)rwl );
2506 if (lk->heldBy) {
2507 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002508 HG_(record_error_Misc)(
2509 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002510 /* remove lock from locksets of all owning threads */
2511 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002512 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002513 lk->heldBy = NULL;
2514 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002515 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002516 }
2517 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002518 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002519
2520 if (HG_(clo_track_lockorders))
2521 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002522 map_locks_delete( lk->guestaddr );
2523 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002524 }
2525
sewardjf98e1c02008-10-25 16:22:41 +00002526 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002527 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2528}
2529
2530static
sewardj789c3c52008-02-25 12:10:07 +00002531void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2532 void* rwl,
2533 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002534{
2535 /* Just check the rwl is sane; nothing else to do. */
2536 // 'rwl' may be invalid - not checked by wrapper
2537 Thread* thr;
2538 Lock* lk;
2539 if (SHOW_EVENTS >= 1)
2540 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2541 (Int)tid, (Int)isW, (void*)rwl );
2542
2543 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002544 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002545 thr = map_threads_maybe_lookup( tid );
2546 tl_assert(thr); /* cannot fail - Thread* must already exist */
2547
2548 lk = map_locks_maybe_lookup( (Addr)rwl );
2549 if ( lk
2550 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2551 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002552 HG_(record_error_Misc)(
2553 thr, "pthread_rwlock_{rd,rw}lock with a "
2554 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002555 }
2556}
2557
2558static
2559void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2560{
2561 // only called if the real library call succeeded - so mutex is sane
2562 Thread* thr;
2563 if (SHOW_EVENTS >= 1)
2564 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2565 (Int)tid, (Int)isW, (void*)rwl );
2566
2567 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2568 thr = map_threads_maybe_lookup( tid );
2569 tl_assert(thr); /* cannot fail - Thread* must already exist */
2570
2571 (isW ? evhH__post_thread_w_acquires_lock
2572 : evhH__post_thread_r_acquires_lock)(
2573 thr,
2574 LK_rdwr, /* if not known, create new lock with this LockKind */
2575 (Addr)rwl
2576 );
2577}
2578
2579static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2580{
2581 // 'rwl' may be invalid - not checked by wrapper
2582 Thread* thr;
2583 if (SHOW_EVENTS >= 1)
2584 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2585 (Int)tid, (void*)rwl );
2586
2587 thr = map_threads_maybe_lookup( tid );
2588 tl_assert(thr); /* cannot fail - Thread* must already exist */
2589
2590 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2591}
2592
2593static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2594{
2595 // only called if the real library call succeeded - so mutex is sane
2596 Thread* thr;
2597 if (SHOW_EVENTS >= 1)
2598 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2599 (Int)tid, (void*)rwl );
2600 thr = map_threads_maybe_lookup( tid );
2601 tl_assert(thr); /* cannot fail - Thread* must already exist */
2602
2603 // anything we should do here?
2604}
2605
2606
sewardj9f569b72008-11-13 13:33:09 +00002607/* ---------------------------------------------------------- */
2608/* -------------- events to do with semaphores -------------- */
2609/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002610
sewardj11e352f2007-11-30 11:11:02 +00002611/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002612 variables. */
2613
sewardjf98e1c02008-10-25 16:22:41 +00002614/* For each semaphore, we maintain a stack of SOs. When a 'post'
2615 operation is done on a semaphore (unlocking, essentially), a new SO
2616 is created for the posting thread, the posting thread does a strong
2617 send to it (which merely installs the posting thread's VC in the
2618 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002619
2620 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002621 semaphore, we pop a SO off the semaphore's stack (which should be
2622 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002623 dependencies between posters and waiters of the semaphore.
2624
sewardjf98e1c02008-10-25 16:22:41 +00002625 It may not be necessary to use a stack - perhaps a bag of SOs would
2626 do. But we do need to keep track of how many unused-up posts have
2627 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002628
sewardjf98e1c02008-10-25 16:22:41 +00002629 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002630 twice on S. T3 cannot complete its waits without both T1 and T2
2631 posting. The above mechanism will ensure that T3 acquires
2632 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002633
sewardjf98e1c02008-10-25 16:22:41 +00002634 When a semaphore is initialised with value N, we do as if we'd
2635 posted N times on the semaphore: basically create N SOs and do a
2636 strong send to all of then. This allows up to N waits on the
2637 semaphore to acquire a dependency on the initialisation point,
2638 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002639
2640 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2641 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002642*/
2643
sewardjf98e1c02008-10-25 16:22:41 +00002644/* sem_t* -> XArray* SO* */
2645static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002646
sewardjf98e1c02008-10-25 16:22:41 +00002647static void map_sem_to_SO_stack_INIT ( void ) {
2648 if (map_sem_to_SO_stack == NULL) {
2649 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2650 HG_(free), NULL );
2651 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002652 }
2653}
2654
sewardjf98e1c02008-10-25 16:22:41 +00002655static void push_SO_for_sem ( void* sem, SO* so ) {
2656 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002657 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002658 tl_assert(so);
2659 map_sem_to_SO_stack_INIT();
2660 if (VG_(lookupFM)( map_sem_to_SO_stack,
2661 &keyW, (UWord*)&xa, (UWord)sem )) {
2662 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002663 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002664 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002665 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002666 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2667 VG_(addToXA)( xa, &so );
florian6bf37262012-10-21 03:23:36 +00002668 VG_(addToFM)( map_sem_to_SO_stack, (UWord)sem, (UWord)xa );
sewardjb4112022007-11-09 22:49:28 +00002669 }
2670}
2671
sewardjf98e1c02008-10-25 16:22:41 +00002672static SO* mb_pop_SO_for_sem ( void* sem ) {
2673 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002674 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002675 SO* so;
2676 map_sem_to_SO_stack_INIT();
2677 if (VG_(lookupFM)( map_sem_to_SO_stack,
2678 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002679 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002680 Word sz;
2681 tl_assert(keyW == (UWord)sem);
2682 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002683 tl_assert(sz >= 0);
2684 if (sz == 0)
2685 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002686 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2687 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002688 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002689 return so;
sewardjb4112022007-11-09 22:49:28 +00002690 } else {
2691 /* hmm, that's odd. No stack for this semaphore. */
2692 return NULL;
2693 }
2694}
2695
sewardj11e352f2007-11-30 11:11:02 +00002696static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002697{
sewardjf98e1c02008-10-25 16:22:41 +00002698 UWord keyW, valW;
2699 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002700
sewardjb4112022007-11-09 22:49:28 +00002701 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002702 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002703 (Int)tid, (void*)sem );
2704
sewardjf98e1c02008-10-25 16:22:41 +00002705 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002706
sewardjf98e1c02008-10-25 16:22:41 +00002707 /* Empty out the semaphore's SO stack. This way of doing it is
2708 stupid, but at least it's easy. */
2709 while (1) {
2710 so = mb_pop_SO_for_sem( sem );
2711 if (!so) break;
2712 libhb_so_dealloc(so);
2713 }
2714
2715 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2716 XArray* xa = (XArray*)valW;
2717 tl_assert(keyW == (UWord)sem);
2718 tl_assert(xa);
2719 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2720 VG_(deleteXA)(xa);
2721 }
sewardjb4112022007-11-09 22:49:28 +00002722}
2723
sewardj11e352f2007-11-30 11:11:02 +00002724static
2725void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2726{
sewardjf98e1c02008-10-25 16:22:41 +00002727 SO* so;
2728 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002729
2730 if (SHOW_EVENTS >= 1)
2731 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2732 (Int)tid, (void*)sem, value );
2733
sewardjf98e1c02008-10-25 16:22:41 +00002734 thr = map_threads_maybe_lookup( tid );
2735 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002736
sewardjf98e1c02008-10-25 16:22:41 +00002737 /* Empty out the semaphore's SO stack. This way of doing it is
2738 stupid, but at least it's easy. */
2739 while (1) {
2740 so = mb_pop_SO_for_sem( sem );
2741 if (!so) break;
2742 libhb_so_dealloc(so);
2743 }
sewardj11e352f2007-11-30 11:11:02 +00002744
sewardjf98e1c02008-10-25 16:22:41 +00002745 /* If we don't do this check, the following while loop runs us out
2746 of memory for stupid initial values of 'value'. */
2747 if (value > 10000) {
2748 HG_(record_error_Misc)(
2749 thr, "sem_init: initial value exceeds 10000; using 10000" );
2750 value = 10000;
2751 }
sewardj11e352f2007-11-30 11:11:02 +00002752
sewardjf98e1c02008-10-25 16:22:41 +00002753 /* Now create 'valid' new SOs for the thread, do a strong send to
2754 each of them, and push them all on the stack. */
2755 for (; value > 0; value--) {
2756 Thr* hbthr = thr->hbthr;
2757 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002758
sewardjf98e1c02008-10-25 16:22:41 +00002759 so = libhb_so_alloc();
2760 libhb_so_send( hbthr, so, True/*strong send*/ );
2761 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002762 }
2763}
2764
2765static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002766{
sewardjf98e1c02008-10-25 16:22:41 +00002767 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2768 it (iow, write our VC into it, then tick ours), and push the SO
2769 on on a stack of SOs associated with 'sem'. This is later used
2770 by other thread(s) which successfully exit from a sem_wait on
2771 the same sem; by doing a strong recv from SOs popped of the
2772 stack, they acquire dependencies on the posting thread
2773 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002774
sewardjf98e1c02008-10-25 16:22:41 +00002775 Thread* thr;
2776 SO* so;
2777 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002778
2779 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002780 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002781 (Int)tid, (void*)sem );
2782
2783 thr = map_threads_maybe_lookup( tid );
2784 tl_assert(thr); /* cannot fail - Thread* must already exist */
2785
2786 // error-if: sem is bogus
2787
sewardjf98e1c02008-10-25 16:22:41 +00002788 hbthr = thr->hbthr;
2789 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002790
sewardjf98e1c02008-10-25 16:22:41 +00002791 so = libhb_so_alloc();
2792 libhb_so_send( hbthr, so, True/*strong send*/ );
2793 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002794}
2795
sewardj11e352f2007-11-30 11:11:02 +00002796static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002797{
sewardjf98e1c02008-10-25 16:22:41 +00002798 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2799 the 'sem' from this semaphore's SO-stack, and do a strong recv
2800 from it. This creates a dependency back to one of the post-ers
2801 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002802
sewardjf98e1c02008-10-25 16:22:41 +00002803 Thread* thr;
2804 SO* so;
2805 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002806
2807 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002808 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002809 (Int)tid, (void*)sem );
2810
2811 thr = map_threads_maybe_lookup( tid );
2812 tl_assert(thr); /* cannot fail - Thread* must already exist */
2813
2814 // error-if: sem is bogus
2815
sewardjf98e1c02008-10-25 16:22:41 +00002816 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002817
sewardjf98e1c02008-10-25 16:22:41 +00002818 if (so) {
2819 hbthr = thr->hbthr;
2820 tl_assert(hbthr);
2821
2822 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2823 libhb_so_dealloc(so);
2824 } else {
2825 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2826 If this happened it would surely be a bug in the threads
2827 library. */
2828 HG_(record_error_Misc)(
2829 thr, "Bug in libpthread: sem_wait succeeded on"
2830 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002831 }
2832}
2833
2834
sewardj9f569b72008-11-13 13:33:09 +00002835/* -------------------------------------------------------- */
2836/* -------------- events to do with barriers -------------- */
2837/* -------------------------------------------------------- */
2838
2839typedef
2840 struct {
2841 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002842 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002843 UWord size; /* declared size */
2844 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2845 }
2846 Bar;
2847
2848static Bar* new_Bar ( void ) {
2849 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2850 tl_assert(bar);
2851 /* all fields are zero */
2852 tl_assert(bar->initted == False);
2853 return bar;
2854}
2855
2856static void delete_Bar ( Bar* bar ) {
2857 tl_assert(bar);
2858 if (bar->waiting)
2859 VG_(deleteXA)(bar->waiting);
2860 HG_(free)(bar);
2861}
2862
2863/* A mapping which stores auxiliary data for barriers. */
2864
2865/* pthread_barrier_t* -> Bar* */
2866static WordFM* map_barrier_to_Bar = NULL;
2867
2868static void map_barrier_to_Bar_INIT ( void ) {
2869 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2870 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2871 "hg.mbtBI.1", HG_(free), NULL );
2872 tl_assert(map_barrier_to_Bar != NULL);
2873 }
2874}
2875
2876static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2877 UWord key, val;
2878 map_barrier_to_Bar_INIT();
2879 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2880 tl_assert(key == (UWord)barrier);
2881 return (Bar*)val;
2882 } else {
2883 Bar* bar = new_Bar();
2884 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2885 return bar;
2886 }
2887}
2888
2889static void map_barrier_to_Bar_delete ( void* barrier ) {
2890 UWord keyW, valW;
2891 map_barrier_to_Bar_INIT();
2892 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2893 Bar* bar = (Bar*)valW;
2894 tl_assert(keyW == (UWord)barrier);
2895 delete_Bar(bar);
2896 }
2897}
2898
2899
2900static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2901 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002902 UWord count,
2903 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002904{
2905 Thread* thr;
2906 Bar* bar;
2907
2908 if (SHOW_EVENTS >= 1)
2909 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002910 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2911 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002912
2913 thr = map_threads_maybe_lookup( tid );
2914 tl_assert(thr); /* cannot fail - Thread* must already exist */
2915
2916 if (count == 0) {
2917 HG_(record_error_Misc)(
2918 thr, "pthread_barrier_init: 'count' argument is zero"
2919 );
2920 }
2921
sewardj406bac82010-03-03 23:03:40 +00002922 if (resizable != 0 && resizable != 1) {
2923 HG_(record_error_Misc)(
2924 thr, "pthread_barrier_init: invalid 'resizable' argument"
2925 );
2926 }
2927
sewardj9f569b72008-11-13 13:33:09 +00002928 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2929 tl_assert(bar);
2930
2931 if (bar->initted) {
2932 HG_(record_error_Misc)(
2933 thr, "pthread_barrier_init: barrier is already initialised"
2934 );
2935 }
2936
2937 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2938 tl_assert(bar->initted);
2939 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002940 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002941 );
2942 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2943 }
2944 if (!bar->waiting) {
2945 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2946 sizeof(Thread*) );
2947 }
2948
2949 tl_assert(bar->waiting);
2950 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002951 bar->initted = True;
2952 bar->resizable = resizable == 1 ? True : False;
2953 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002954}
2955
2956
2957static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2958 void* barrier )
2959{
sewardj553655c2008-11-14 19:41:19 +00002960 Thread* thr;
2961 Bar* bar;
2962
sewardj9f569b72008-11-13 13:33:09 +00002963 /* Deal with destroy events. The only purpose is to free storage
2964 associated with the barrier, so as to avoid any possible
2965 resource leaks. */
2966 if (SHOW_EVENTS >= 1)
2967 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2968 "(tid=%d, barrier=%p)\n",
2969 (Int)tid, (void*)barrier );
2970
sewardj553655c2008-11-14 19:41:19 +00002971 thr = map_threads_maybe_lookup( tid );
2972 tl_assert(thr); /* cannot fail - Thread* must already exist */
2973
2974 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2975 tl_assert(bar);
2976
2977 if (!bar->initted) {
2978 HG_(record_error_Misc)(
2979 thr, "pthread_barrier_destroy: barrier was never initialised"
2980 );
2981 }
2982
2983 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2984 HG_(record_error_Misc)(
2985 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2986 );
2987 }
2988
sewardj9f569b72008-11-13 13:33:09 +00002989 /* Maybe we shouldn't do this; just let it persist, so that when it
2990 is reinitialised we don't need to do any dynamic memory
2991 allocation? The downside is a potentially unlimited space leak,
2992 if the client creates (in turn) a large number of barriers all
2993 at different locations. Note that if we do later move to the
2994 don't-delete-it scheme, we need to mark the barrier as
2995 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002996 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002997 map_barrier_to_Bar_delete( barrier );
2998}
2999
3000
sewardj406bac82010-03-03 23:03:40 +00003001/* All the threads have arrived. Now do the Interesting Bit. Get a
3002 new synchronisation object and do a weak send to it from all the
3003 participating threads. This makes its vector clocks be the join of
3004 all the individual threads' vector clocks. Then do a strong
3005 receive from it back to all threads, so that their VCs are a copy
3006 of it (hence are all equal to the join of their original VCs.) */
3007static void do_barrier_cross_sync_and_empty ( Bar* bar )
3008{
3009 /* XXX check bar->waiting has no duplicates */
3010 UWord i;
3011 SO* so = libhb_so_alloc();
3012
3013 tl_assert(bar->waiting);
3014 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
3015
3016 /* compute the join ... */
3017 for (i = 0; i < bar->size; i++) {
3018 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3019 Thr* hbthr = t->hbthr;
3020 libhb_so_send( hbthr, so, False/*weak send*/ );
3021 }
3022 /* ... and distribute to all threads */
3023 for (i = 0; i < bar->size; i++) {
3024 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
3025 Thr* hbthr = t->hbthr;
3026 libhb_so_recv( hbthr, so, True/*strong recv*/ );
3027 }
3028
3029 /* finally, we must empty out the waiting vector */
3030 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
3031
3032 /* and we don't need this any more. Perhaps a stack-allocated
3033 SO would be better? */
3034 libhb_so_dealloc(so);
3035}
3036
3037
sewardj9f569b72008-11-13 13:33:09 +00003038static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
3039 void* barrier )
3040{
sewardj1c466b72008-11-19 11:52:14 +00003041 /* This function gets called after a client thread calls
3042 pthread_barrier_wait but before it arrives at the real
3043 pthread_barrier_wait.
3044
3045 Why is the following correct? It's a bit subtle.
3046
3047 If this is not the last thread arriving at the barrier, we simply
3048 note its presence and return. Because valgrind (at least as of
3049 Nov 08) is single threaded, we are guaranteed safe from any race
3050 conditions when in this function -- no other client threads are
3051 running.
3052
3053 If this is the last thread, then we are again the only running
3054 thread. All the other threads will have either arrived at the
3055 real pthread_barrier_wait or are on their way to it, but in any
3056 case are guaranteed not to be able to move past it, because this
3057 thread is currently in this function and so has not yet arrived
3058 at the real pthread_barrier_wait. That means that:
3059
3060 1. While we are in this function, none of the other threads
3061 waiting at the barrier can move past it.
3062
3063 2. When this function returns (and simulated execution resumes),
3064 this thread and all other waiting threads will be able to move
3065 past the real barrier.
3066
3067 Because of this, it is now safe to update the vector clocks of
3068 all threads, to represent the fact that they all arrived at the
3069 barrier and have all moved on. There is no danger of any
3070 complications to do with some threads leaving the barrier and
3071 racing back round to the front, whilst others are still leaving
3072 (which is the primary source of complication in correct handling/
3073 implementation of barriers). That can't happen because we update
3074 here our data structures so as to indicate that the threads have
3075 passed the barrier, even though, as per (2) above, they are
3076 guaranteed not to pass the barrier until we return.
3077
3078 This relies crucially on Valgrind being single threaded. If that
3079 changes, this will need to be reconsidered.
3080 */
sewardj9f569b72008-11-13 13:33:09 +00003081 Thread* thr;
3082 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003083 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003084
3085 if (SHOW_EVENTS >= 1)
3086 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3087 "(tid=%d, barrier=%p)\n",
3088 (Int)tid, (void*)barrier );
3089
3090 thr = map_threads_maybe_lookup( tid );
3091 tl_assert(thr); /* cannot fail - Thread* must already exist */
3092
3093 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3094 tl_assert(bar);
3095
3096 if (!bar->initted) {
3097 HG_(record_error_Misc)(
3098 thr, "pthread_barrier_wait: barrier is uninitialised"
3099 );
3100 return; /* client is broken .. avoid assertions below */
3101 }
3102
3103 /* guaranteed by _INIT_PRE above */
3104 tl_assert(bar->size > 0);
3105 tl_assert(bar->waiting);
3106
3107 VG_(addToXA)( bar->waiting, &thr );
3108
3109 /* guaranteed by this function */
3110 present = VG_(sizeXA)(bar->waiting);
3111 tl_assert(present > 0 && present <= bar->size);
3112
3113 if (present < bar->size)
3114 return;
3115
sewardj406bac82010-03-03 23:03:40 +00003116 do_barrier_cross_sync_and_empty(bar);
3117}
sewardj9f569b72008-11-13 13:33:09 +00003118
sewardj9f569b72008-11-13 13:33:09 +00003119
sewardj406bac82010-03-03 23:03:40 +00003120static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3121 void* barrier,
3122 UWord newcount )
3123{
3124 Thread* thr;
3125 Bar* bar;
3126 UWord present;
3127
3128 if (SHOW_EVENTS >= 1)
3129 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3130 "(tid=%d, barrier=%p, newcount=%lu)\n",
3131 (Int)tid, (void*)barrier, newcount );
3132
3133 thr = map_threads_maybe_lookup( tid );
3134 tl_assert(thr); /* cannot fail - Thread* must already exist */
3135
3136 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3137 tl_assert(bar);
3138
3139 if (!bar->initted) {
3140 HG_(record_error_Misc)(
3141 thr, "pthread_barrier_resize: barrier is uninitialised"
3142 );
3143 return; /* client is broken .. avoid assertions below */
3144 }
3145
3146 if (!bar->resizable) {
3147 HG_(record_error_Misc)(
3148 thr, "pthread_barrier_resize: barrier is may not be resized"
3149 );
3150 return; /* client is broken .. avoid assertions below */
3151 }
3152
3153 if (newcount == 0) {
3154 HG_(record_error_Misc)(
3155 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3156 );
3157 return; /* client is broken .. avoid assertions below */
3158 }
3159
3160 /* guaranteed by _INIT_PRE above */
3161 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003162 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003163 /* Guaranteed by this fn */
3164 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003165
sewardj406bac82010-03-03 23:03:40 +00003166 if (newcount >= bar->size) {
3167 /* Increasing the capacity. There's no possibility of threads
3168 moving on from the barrier in this situation, so just note
3169 the fact and do nothing more. */
3170 bar->size = newcount;
3171 } else {
3172 /* Decreasing the capacity. If we decrease it to be equal or
3173 below the number of waiting threads, they will now move past
3174 the barrier, so need to mess with dep edges in the same way
3175 as if the barrier had filled up normally. */
3176 present = VG_(sizeXA)(bar->waiting);
3177 tl_assert(present >= 0 && present <= bar->size);
3178 if (newcount <= present) {
3179 bar->size = present; /* keep the cross_sync call happy */
3180 do_barrier_cross_sync_and_empty(bar);
3181 }
3182 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003183 }
sewardj9f569b72008-11-13 13:33:09 +00003184}
3185
3186
sewardjed2e72e2009-08-14 11:08:24 +00003187/* ----------------------------------------------------- */
3188/* ----- events to do with user-specified HB edges ----- */
3189/* ----------------------------------------------------- */
3190
3191/* A mapping from arbitrary UWord tag to the SO associated with it.
3192 The UWord tags are meaningless to us, interpreted only by the
3193 user. */
3194
3195
3196
3197/* UWord -> SO* */
3198static WordFM* map_usertag_to_SO = NULL;
3199
3200static void map_usertag_to_SO_INIT ( void ) {
3201 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3202 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3203 "hg.mutS.1", HG_(free), NULL );
3204 tl_assert(map_usertag_to_SO != NULL);
3205 }
3206}
3207
3208static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3209 UWord key, val;
3210 map_usertag_to_SO_INIT();
3211 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3212 tl_assert(key == (UWord)usertag);
3213 return (SO*)val;
3214 } else {
3215 SO* so = libhb_so_alloc();
3216 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3217 return so;
3218 }
3219}
3220
sewardj6015d0e2011-03-11 19:10:48 +00003221static void map_usertag_to_SO_delete ( UWord usertag ) {
3222 UWord keyW, valW;
3223 map_usertag_to_SO_INIT();
3224 if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3225 SO* so = (SO*)valW;
3226 tl_assert(keyW == usertag);
3227 tl_assert(so);
3228 libhb_so_dealloc(so);
3229 }
3230}
sewardjed2e72e2009-08-14 11:08:24 +00003231
3232
3233static
3234void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3235{
3236 /* TID is just about to notionally sent a message on a notional
3237 abstract synchronisation object whose identity is given by
3238 USERTAG. Bind USERTAG to a real SO if it is not already so
sewardj8c50d3c2011-03-11 18:38:12 +00003239 bound, and do a 'weak send' on the SO. This joins the vector
3240 clocks from this thread into any vector clocks already present
3241 in the SO. The resulting SO vector clocks are later used by
sewardjed2e72e2009-08-14 11:08:24 +00003242 other thread(s) which successfully 'receive' from the SO,
sewardj8c50d3c2011-03-11 18:38:12 +00003243 thereby acquiring a dependency on all the events that have
3244 previously signalled on this SO. */
sewardjed2e72e2009-08-14 11:08:24 +00003245 Thread* thr;
3246 SO* so;
3247
3248 if (SHOW_EVENTS >= 1)
3249 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3250 (Int)tid, usertag );
3251
3252 thr = map_threads_maybe_lookup( tid );
3253 tl_assert(thr); /* cannot fail - Thread* must already exist */
3254
3255 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3256 tl_assert(so);
3257
sewardj8c50d3c2011-03-11 18:38:12 +00003258 libhb_so_send( thr->hbthr, so, False/*!strong_send*/ );
sewardjed2e72e2009-08-14 11:08:24 +00003259}
3260
3261static
3262void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3263{
3264 /* TID has just notionally received a message from a notional
3265 abstract synchronisation object whose identity is given by
3266 USERTAG. Bind USERTAG to a real SO if it is not already so
3267 bound. If the SO has at some point in the past been 'sent' on,
3268 to a 'strong receive' on it, thereby acquiring a dependency on
3269 the sender. */
3270 Thread* thr;
3271 SO* so;
3272
3273 if (SHOW_EVENTS >= 1)
3274 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3275 (Int)tid, usertag );
3276
3277 thr = map_threads_maybe_lookup( tid );
3278 tl_assert(thr); /* cannot fail - Thread* must already exist */
3279
3280 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3281 tl_assert(so);
3282
3283 /* Acquire a dependency on it. If the SO has never so far been
3284 sent on, then libhb_so_recv will do nothing. So we're safe
3285 regardless of SO's history. */
3286 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3287}
3288
sewardj6015d0e2011-03-11 19:10:48 +00003289static
3290void evh__HG_USERSO_FORGET_ALL ( ThreadId tid, UWord usertag )
3291{
3292 /* TID declares that any happens-before edges notionally stored in
3293 USERTAG can be deleted. If (as would normally be the case) a
3294 SO is associated with USERTAG, then the assocation is removed
3295 and all resources associated with SO are freed. Importantly,
3296 that frees up any VTSs stored in SO. */
3297 if (SHOW_EVENTS >= 1)
3298 VG_(printf)("evh__HG_USERSO_FORGET_ALL(ctid=%d, usertag=%#lx)\n",
3299 (Int)tid, usertag );
3300
3301 map_usertag_to_SO_delete( usertag );
3302}
3303
sewardjed2e72e2009-08-14 11:08:24 +00003304
sewardjb4112022007-11-09 22:49:28 +00003305/*--------------------------------------------------------------*/
3306/*--- Lock acquisition order monitoring ---*/
3307/*--------------------------------------------------------------*/
3308
3309/* FIXME: here are some optimisations still to do in
3310 laog__pre_thread_acquires_lock.
3311
3312 The graph is structured so that if L1 --*--> L2 then L1 must be
3313 acquired before L2.
3314
3315 The common case is that some thread T holds (eg) L1 L2 and L3 and
3316 is repeatedly acquiring and releasing Ln, and there is no ordering
3317 error in what it is doing. Hence it repeatly:
3318
3319 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3320 produces the answer No (because there is no error).
3321
3322 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3323 (because they already got added the first time T acquired Ln).
3324
3325 Hence cache these two events:
3326
3327 (1) Cache result of the query from last time. Invalidate the cache
3328 any time any edges are added to or deleted from laog.
3329
3330 (2) Cache these add-edge requests and ignore them if said edges
3331 have already been added to laog. Invalidate the cache any time
3332 any edges are deleted from laog.
3333*/
3334
3335typedef
3336 struct {
3337 WordSetID inns; /* in univ_laog */
3338 WordSetID outs; /* in univ_laog */
3339 }
3340 LAOGLinks;
3341
3342/* lock order acquisition graph */
3343static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3344
3345/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3346 where that edge was created, so that we can show the user later if
3347 we need to. */
3348typedef
3349 struct {
3350 Addr src_ga; /* Lock guest addresses for */
3351 Addr dst_ga; /* src/dst of the edge */
3352 ExeContext* src_ec; /* And corresponding places where that */
3353 ExeContext* dst_ec; /* ordering was established */
3354 }
3355 LAOGLinkExposition;
3356
sewardj250ec2e2008-02-15 22:02:30 +00003357static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003358 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3359 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3360 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3361 if (llx1->src_ga < llx2->src_ga) return -1;
3362 if (llx1->src_ga > llx2->src_ga) return 1;
3363 if (llx1->dst_ga < llx2->dst_ga) return -1;
3364 if (llx1->dst_ga > llx2->dst_ga) return 1;
3365 return 0;
3366}
3367
3368static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3369/* end EXPOSITION ONLY */
3370
3371
sewardja65db102009-01-26 10:45:16 +00003372__attribute__((noinline))
3373static void laog__init ( void )
3374{
3375 tl_assert(!laog);
3376 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003377 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003378
3379 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3380 HG_(free), NULL/*unboxedcmp*/ );
3381
3382 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3383 cmp_LAOGLinkExposition );
3384 tl_assert(laog);
3385 tl_assert(laog_exposition);
3386}
3387
florian6bf37262012-10-21 03:23:36 +00003388static void laog__show ( const HChar* who ) {
3389 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003390 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003391 Lock* me;
3392 LAOGLinks* links;
3393 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003394 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003395 me = NULL;
3396 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003397 while (VG_(nextIterFM)( laog, (UWord*)&me,
3398 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003399 tl_assert(me);
3400 tl_assert(links);
3401 VG_(printf)(" node %p:\n", me);
3402 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3403 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003404 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003405 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3406 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003407 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003408 me = NULL;
3409 links = NULL;
3410 }
sewardj896f6f92008-08-19 08:38:52 +00003411 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003412 VG_(printf)("}\n");
3413}
3414
sewardj866c80c2011-10-22 19:29:51 +00003415static void univ_laog_do_GC ( void ) {
3416 Word i;
3417 LAOGLinks* links;
3418 Word seen = 0;
3419 Int prev_next_gc_univ_laog = next_gc_univ_laog;
3420 const UWord univ_laog_cardinality = HG_(cardinalityWSU)( univ_laog);
3421
3422 Bool *univ_laog_seen = HG_(zalloc) ( "hg.gc_univ_laog.1",
3423 (Int) univ_laog_cardinality
3424 * sizeof(Bool) );
3425 // univ_laog_seen[*] set to 0 (False) by zalloc.
3426
3427 if (VG_(clo_stats))
3428 VG_(message)(Vg_DebugMsg,
3429 "univ_laog_do_GC enter cardinality %'10d\n",
3430 (Int)univ_laog_cardinality);
3431
3432 VG_(initIterFM)( laog );
3433 links = NULL;
3434 while (VG_(nextIterFM)( laog, NULL, (UWord*)&links )) {
3435 tl_assert(links);
3436 tl_assert(links->inns >= 0 && links->inns < univ_laog_cardinality);
3437 univ_laog_seen[links->inns] = True;
3438 tl_assert(links->outs >= 0 && links->outs < univ_laog_cardinality);
3439 univ_laog_seen[links->outs] = True;
3440 links = NULL;
3441 }
3442 VG_(doneIterFM)( laog );
3443
3444 for (i = 0; i < (Int)univ_laog_cardinality; i++) {
3445 if (univ_laog_seen[i])
3446 seen++;
3447 else
3448 HG_(dieWS) ( univ_laog, (WordSet)i );
3449 }
3450
3451 HG_(free) (univ_laog_seen);
3452
3453 // We need to decide the value of the next_gc.
3454 // 3 solutions were looked at:
3455 // Sol 1: garbage collect at seen * 2
3456 // This solution was a lot slower, probably because we both do a lot of
3457 // garbage collection and do not keep long enough laog WV that will become
3458 // useful again very soon.
3459 // Sol 2: garbage collect at a percentage increase of the current cardinality
3460 // (with a min increase of 1)
3461 // Trials on a small test program with 1%, 5% and 10% increase was done.
3462 // 1% is slightly faster than 5%, which is slightly slower than 10%.
3463 // However, on a big application, this caused the memory to be exhausted,
3464 // as even a 1% increase of size at each gc becomes a lot, when many gc
3465 // are done.
3466 // Sol 3: always garbage collect at current cardinality + 1.
3467 // This solution was the fastest of the 3 solutions, and caused no memory
3468 // exhaustion in the big application.
3469 //
3470 // With regards to cost introduced by gc: on the t2t perf test (doing only
3471 // lock/unlock operations), t2t 50 10 2 was about 25% faster than the
3472 // version with garbage collection. With t2t 50 20 2, my machine started
3473 // to page out, and so the garbage collected version was much faster.
3474 // On smaller lock sets (e.g. t2t 20 5 2, giving about 100 locks), the
3475 // difference performance is insignificant (~ 0.1 s).
3476 // Of course, it might be that real life programs are not well represented
3477 // by t2t.
3478
3479 // If ever we want to have a more sophisticated control
3480 // (e.g. clo options to control the percentage increase or fixed increased),
3481 // we should do it here, eg.
3482 // next_gc_univ_laog = prev_next_gc_univ_laog + VG_(clo_laog_gc_fixed);
3483 // Currently, we just hard-code the solution 3 above.
3484 next_gc_univ_laog = prev_next_gc_univ_laog + 1;
3485
3486 if (VG_(clo_stats))
3487 VG_(message)
3488 (Vg_DebugMsg,
3489 "univ_laog_do_GC exit seen %'8d next gc at cardinality %'10d\n",
3490 (Int)seen, next_gc_univ_laog);
3491}
3492
3493
sewardjb4112022007-11-09 22:49:28 +00003494__attribute__((noinline))
3495static void laog__add_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003496 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003497 LAOGLinks* links;
3498 Bool presentF, presentR;
3499 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3500
3501 /* Take the opportunity to sanity check the graph. Record in
3502 presentF if there is already a src->dst mapping in this node's
3503 forwards links, and presentR if there is already a src->dst
3504 mapping in this node's backwards links. They should agree!
3505 Also, we need to know whether the edge was already present so as
3506 to decide whether or not to update the link details mapping. We
3507 can compute presentF and presentR essentially for free, so may
3508 as well do this always. */
3509 presentF = presentR = False;
3510
3511 /* Update the out edges for src */
3512 keyW = 0;
3513 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003514 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003515 WordSetID outs_new;
3516 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003517 tl_assert(keyW == (UWord)src);
3518 outs_new = HG_(addToWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003519 presentF = outs_new == links->outs;
3520 links->outs = outs_new;
3521 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003522 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003523 links->inns = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003524 links->outs = HG_(singletonWS)( univ_laog, (UWord)dst );
3525 VG_(addToFM)( laog, (UWord)src, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003526 }
3527 /* Update the in edges for dst */
3528 keyW = 0;
3529 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003530 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003531 WordSetID inns_new;
3532 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003533 tl_assert(keyW == (UWord)dst);
3534 inns_new = HG_(addToWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003535 presentR = inns_new == links->inns;
3536 links->inns = inns_new;
3537 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003538 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
florian6bf37262012-10-21 03:23:36 +00003539 links->inns = HG_(singletonWS)( univ_laog, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003540 links->outs = HG_(emptyWS)( univ_laog );
florian6bf37262012-10-21 03:23:36 +00003541 VG_(addToFM)( laog, (UWord)dst, (UWord)links );
sewardjb4112022007-11-09 22:49:28 +00003542 }
3543
3544 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3545
3546 if (!presentF && src->acquired_at && dst->acquired_at) {
3547 LAOGLinkExposition expo;
3548 /* If this edge is entering the graph, and we have acquired_at
3549 information for both src and dst, record those acquisition
3550 points. Hence, if there is later a violation of this
3551 ordering, we can show the user the two places in which the
3552 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003553 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003554 src->guestaddr, dst->guestaddr);
3555 expo.src_ga = src->guestaddr;
3556 expo.dst_ga = dst->guestaddr;
3557 expo.src_ec = NULL;
3558 expo.dst_ec = NULL;
3559 tl_assert(laog_exposition);
florian6bf37262012-10-21 03:23:36 +00003560 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (UWord)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003561 /* we already have it; do nothing */
3562 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003563 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3564 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003565 expo2->src_ga = src->guestaddr;
3566 expo2->dst_ga = dst->guestaddr;
3567 expo2->src_ec = src->acquired_at;
3568 expo2->dst_ec = dst->acquired_at;
florian6bf37262012-10-21 03:23:36 +00003569 VG_(addToFM)( laog_exposition, (UWord)expo2, (UWord)NULL );
sewardjb4112022007-11-09 22:49:28 +00003570 }
3571 }
sewardj866c80c2011-10-22 19:29:51 +00003572
3573 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3574 univ_laog_do_GC();
sewardjb4112022007-11-09 22:49:28 +00003575}
3576
3577__attribute__((noinline))
3578static void laog__del_edge ( Lock* src, Lock* dst ) {
florian6bf37262012-10-21 03:23:36 +00003579 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003580 LAOGLinks* links;
sewardj866c80c2011-10-22 19:29:51 +00003581 if (0) VG_(printf)("laog__del_edge enter %p %p\n", src, dst);
sewardjb4112022007-11-09 22:49:28 +00003582 /* Update the out edges for src */
3583 keyW = 0;
3584 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003585 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)src )) {
sewardjb4112022007-11-09 22:49:28 +00003586 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003587 tl_assert(keyW == (UWord)src);
3588 links->outs = HG_(delFromWS)( univ_laog, links->outs, (UWord)dst );
sewardjb4112022007-11-09 22:49:28 +00003589 }
3590 /* Update the in edges for dst */
3591 keyW = 0;
3592 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003593 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003594 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003595 tl_assert(keyW == (UWord)dst);
3596 links->inns = HG_(delFromWS)( univ_laog, links->inns, (UWord)src );
sewardjb4112022007-11-09 22:49:28 +00003597 }
sewardj866c80c2011-10-22 19:29:51 +00003598
3599 /* Remove the exposition of src,dst (if present) */
3600 {
3601 LAOGLinkExposition *fm_expo;
3602
3603 LAOGLinkExposition expo;
3604 expo.src_ga = src->guestaddr;
3605 expo.dst_ga = dst->guestaddr;
3606 expo.src_ec = NULL;
3607 expo.dst_ec = NULL;
3608
3609 if (VG_(delFromFM) (laog_exposition,
3610 (UWord*)&fm_expo, NULL, (UWord)&expo )) {
3611 HG_(free) (fm_expo);
3612 }
3613 }
3614
3615 /* deleting edges can increase nr of of WS so check for gc. */
3616 if (HG_(cardinalityWSU) (univ_laog) >= next_gc_univ_laog)
3617 univ_laog_do_GC();
3618 if (0) VG_(printf)("laog__del_edge exit\n");
sewardjb4112022007-11-09 22:49:28 +00003619}
3620
3621__attribute__((noinline))
3622static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003623 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003624 LAOGLinks* links;
3625 keyW = 0;
3626 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003627 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003628 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003629 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003630 return links->outs;
3631 } else {
3632 return HG_(emptyWS)( univ_laog );
3633 }
3634}
3635
3636__attribute__((noinline))
3637static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
florian6bf37262012-10-21 03:23:36 +00003638 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00003639 LAOGLinks* links;
3640 keyW = 0;
3641 links = NULL;
florian6bf37262012-10-21 03:23:36 +00003642 if (VG_(lookupFM)( laog, &keyW, (UWord*)&links, (UWord)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003643 tl_assert(links);
florian6bf37262012-10-21 03:23:36 +00003644 tl_assert(keyW == (UWord)lk);
sewardjb4112022007-11-09 22:49:28 +00003645 return links->inns;
3646 } else {
3647 return HG_(emptyWS)( univ_laog );
3648 }
3649}
3650
3651__attribute__((noinline))
florian6bf37262012-10-21 03:23:36 +00003652static void laog__sanity_check ( const HChar* who ) {
3653 UWord i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003654 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003655 Lock* me;
3656 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003657 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003658 me = NULL;
3659 links = NULL;
3660 if (0) VG_(printf)("laog sanity check\n");
florian6bf37262012-10-21 03:23:36 +00003661 while (VG_(nextIterFM)( laog, (UWord*)&me,
3662 (UWord*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003663 tl_assert(me);
3664 tl_assert(links);
3665 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3666 for (i = 0; i < ws_size; i++) {
3667 if ( ! HG_(elemWS)( univ_laog,
3668 laog__succs( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003669 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003670 goto bad;
3671 }
3672 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3673 for (i = 0; i < ws_size; i++) {
3674 if ( ! HG_(elemWS)( univ_laog,
3675 laog__preds( (Lock*)ws_words[i] ),
florian6bf37262012-10-21 03:23:36 +00003676 (UWord)me ))
sewardjb4112022007-11-09 22:49:28 +00003677 goto bad;
3678 }
3679 me = NULL;
3680 links = NULL;
3681 }
sewardj896f6f92008-08-19 08:38:52 +00003682 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003683 return;
3684
3685 bad:
3686 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3687 laog__show(who);
3688 tl_assert(0);
3689}
3690
3691/* If there is a path in laog from 'src' to any of the elements in
3692 'dst', return an arbitrarily chosen element of 'dst' reachable from
3693 'src'. If no path exist from 'src' to any element in 'dst', return
3694 NULL. */
3695__attribute__((noinline))
3696static
3697Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3698{
3699 Lock* ret;
florian6bf37262012-10-21 03:23:36 +00003700 Word ssz;
sewardjb4112022007-11-09 22:49:28 +00003701 XArray* stack; /* of Lock* */
3702 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3703 Lock* here;
3704 WordSetID succs;
florian6bf37262012-10-21 03:23:36 +00003705 UWord succs_size, i;
sewardj250ec2e2008-02-15 22:02:30 +00003706 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003707 //laog__sanity_check();
3708
3709 /* If the destination set is empty, we can never get there from
3710 'src' :-), so don't bother to try */
3711 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3712 return NULL;
3713
3714 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003715 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3716 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003717
3718 (void) VG_(addToXA)( stack, &src );
3719
3720 while (True) {
3721
3722 ssz = VG_(sizeXA)( stack );
3723
3724 if (ssz == 0) { ret = NULL; break; }
3725
3726 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3727 VG_(dropTailXA)( stack, 1 );
3728
florian6bf37262012-10-21 03:23:36 +00003729 if (HG_(elemWS)( univ_lsets, dsts, (UWord)here )) { ret = here; break; }
sewardjb4112022007-11-09 22:49:28 +00003730
florian6bf37262012-10-21 03:23:36 +00003731 if (VG_(lookupFM)( visited, NULL, NULL, (UWord)here ))
sewardjb4112022007-11-09 22:49:28 +00003732 continue;
3733
florian6bf37262012-10-21 03:23:36 +00003734 VG_(addToFM)( visited, (UWord)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003735
3736 succs = laog__succs( here );
3737 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3738 for (i = 0; i < succs_size; i++)
3739 (void) VG_(addToXA)( stack, &succs_words[i] );
3740 }
3741
sewardj896f6f92008-08-19 08:38:52 +00003742 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003743 VG_(deleteXA)( stack );
3744 return ret;
3745}
3746
3747
3748/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3749 between 'lk' and the locks already held by 'thr' and issue a
3750 complaint if so. Also, update the ordering graph appropriately.
3751*/
3752__attribute__((noinline))
3753static void laog__pre_thread_acquires_lock (
3754 Thread* thr, /* NB: BEFORE lock is added */
3755 Lock* lk
3756 )
3757{
sewardj250ec2e2008-02-15 22:02:30 +00003758 UWord* ls_words;
florian6bf37262012-10-21 03:23:36 +00003759 UWord ls_size, i;
sewardjb4112022007-11-09 22:49:28 +00003760 Lock* other;
3761
3762 /* It may be that 'thr' already holds 'lk' and is recursively
3763 relocking in. In this case we just ignore the call. */
3764 /* NB: univ_lsets really is correct here */
florian6bf37262012-10-21 03:23:36 +00003765 if (HG_(elemWS)( univ_lsets, thr->locksetA, (UWord)lk ))
sewardjb4112022007-11-09 22:49:28 +00003766 return;
3767
sewardjb4112022007-11-09 22:49:28 +00003768 /* First, the check. Complain if there is any path in laog from lk
3769 to any of the locks already held by thr, since if any such path
3770 existed, it would mean that previously lk was acquired before
3771 (rather than after, as we are doing here) at least one of those
3772 locks.
3773 */
3774 other = laog__do_dfs_from_to(lk, thr->locksetA);
3775 if (other) {
3776 LAOGLinkExposition key, *found;
3777 /* So we managed to find a path lk --*--> other in the graph,
3778 which implies that 'lk' should have been acquired before
3779 'other' but is in fact being acquired afterwards. We present
3780 the lk/other arguments to record_error_LockOrder in the order
3781 in which they should have been acquired. */
3782 /* Go look in the laog_exposition mapping, to find the allocation
3783 points for this edge, so we can show the user. */
3784 key.src_ga = lk->guestaddr;
3785 key.dst_ga = other->guestaddr;
3786 key.src_ec = NULL;
3787 key.dst_ec = NULL;
3788 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003789 if (VG_(lookupFM)( laog_exposition,
florian6bf37262012-10-21 03:23:36 +00003790 (UWord*)&found, NULL, (UWord)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003791 tl_assert(found != &key);
3792 tl_assert(found->src_ga == key.src_ga);
3793 tl_assert(found->dst_ga == key.dst_ga);
3794 tl_assert(found->src_ec);
3795 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003796 HG_(record_error_LockOrder)(
3797 thr, lk->guestaddr, other->guestaddr,
sewardjffce8152011-06-24 10:09:41 +00003798 found->src_ec, found->dst_ec, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003799 } else {
3800 /* Hmm. This can't happen (can it?) */
philippeebe25802013-01-30 23:21:34 +00003801 /* Yes, it can happen: see tests/tc14_laog_dinphils.
3802 Imagine we have 3 philosophers A B C, and the forks
3803 between them:
3804
3805 C
3806
3807 fCA fBC
3808
3809 A fAB B
3810
3811 Let's have the following actions:
3812 A takes fCA,fAB
3813 A releases fCA,fAB
3814 B takes fAB,fBC
3815 B releases fAB,fBC
3816 C takes fBC,fCA
3817 C releases fBC,fCA
3818
3819 Helgrind will report a lock order error when C takes fCA.
3820 Effectively, we have a deadlock if the following
3821 sequence is done:
3822 A takes fCA
3823 B takes fAB
3824 C takes fBC
3825
3826 The error reported is:
3827 Observed (incorrect) order fBC followed by fCA
3828 but the stack traces that have established the required order
3829 are not given.
3830
3831 This is because there is no pair (fCA, fBC) in laog exposition :
3832 the laog_exposition records all pairs of locks between a new lock
3833 taken by a thread and all the already taken locks.
3834 So, there is no laog_exposition (fCA, fBC) as no thread ever
3835 first locked fCA followed by fBC.
3836
3837 In other words, when the deadlock cycle involves more than
3838 two locks, then helgrind does not report the sequence of
3839 operations that created the cycle.
3840
3841 However, we can report the current stack trace (where
3842 lk is being taken), and the stack trace where other was acquired:
3843 Effectively, the variable 'other' contains a lock currently
3844 held by this thread, with its 'acquired_at'. */
3845
sewardjf98e1c02008-10-25 16:22:41 +00003846 HG_(record_error_LockOrder)(
3847 thr, lk->guestaddr, other->guestaddr,
philippeebe25802013-01-30 23:21:34 +00003848 NULL, NULL, other->acquired_at );
sewardjb4112022007-11-09 22:49:28 +00003849 }
3850 }
3851
3852 /* Second, add to laog the pairs
3853 (old, lk) | old <- locks already held by thr
3854 Since both old and lk are currently held by thr, their acquired_at
3855 fields must be non-NULL.
3856 */
3857 tl_assert(lk->acquired_at);
3858 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3859 for (i = 0; i < ls_size; i++) {
3860 Lock* old = (Lock*)ls_words[i];
3861 tl_assert(old->acquired_at);
3862 laog__add_edge( old, lk );
3863 }
3864
3865 /* Why "except_Locks" ? We're here because a lock is being
3866 acquired by a thread, and we're in an inconsistent state here.
3867 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3868 When called in this inconsistent state, locks__sanity_check duly
3869 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003870 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003871 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3872}
3873
sewardj866c80c2011-10-22 19:29:51 +00003874/* Allocates a duplicate of words. Caller must HG_(free) the result. */
3875static UWord* UWordV_dup(UWord* words, Word words_size)
3876{
3877 UInt i;
3878
3879 if (words_size == 0)
3880 return NULL;
3881
3882 UWord *dup = HG_(zalloc) ("hg.dup.1", (SizeT) words_size * sizeof(UWord));
3883
3884 for (i = 0; i < words_size; i++)
3885 dup[i] = words[i];
3886
3887 return dup;
3888}
sewardjb4112022007-11-09 22:49:28 +00003889
3890/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3891
3892__attribute__((noinline))
3893static void laog__handle_one_lock_deletion ( Lock* lk )
3894{
3895 WordSetID preds, succs;
florian6bf37262012-10-21 03:23:36 +00003896 UWord preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003897 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003898
3899 preds = laog__preds( lk );
3900 succs = laog__succs( lk );
3901
sewardj866c80c2011-10-22 19:29:51 +00003902 // We need to duplicate the payload, as these can be garbage collected
3903 // during the del/add operations below.
sewardjb4112022007-11-09 22:49:28 +00003904 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
sewardj866c80c2011-10-22 19:29:51 +00003905 preds_words = UWordV_dup(preds_words, preds_size);
3906
3907 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3908 succs_words = UWordV_dup(succs_words, succs_size);
3909
sewardjb4112022007-11-09 22:49:28 +00003910 for (i = 0; i < preds_size; i++)
3911 laog__del_edge( (Lock*)preds_words[i], lk );
3912
sewardjb4112022007-11-09 22:49:28 +00003913 for (j = 0; j < succs_size; j++)
3914 laog__del_edge( lk, (Lock*)succs_words[j] );
3915
3916 for (i = 0; i < preds_size; i++) {
3917 for (j = 0; j < succs_size; j++) {
3918 if (preds_words[i] != succs_words[j]) {
3919 /* This can pass unlocked locks to laog__add_edge, since
3920 we're deleting stuff. So their acquired_at fields may
3921 be NULL. */
3922 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3923 }
3924 }
3925 }
sewardj866c80c2011-10-22 19:29:51 +00003926
3927 if (preds_words)
3928 HG_(free) (preds_words);
3929 if (succs_words)
3930 HG_(free) (succs_words);
3931
3932 // Remove lk information from laog links FM
3933 {
3934 LAOGLinks *links;
3935 Lock* linked_lk;
3936
3937 if (VG_(delFromFM) (laog,
3938 (UWord*)&linked_lk, (UWord*)&links, (UWord)lk)) {
3939 tl_assert (linked_lk == lk);
3940 HG_(free) (links);
3941 }
3942 }
3943 /* FIXME ??? What about removing lock lk data from EXPOSITION ??? */
sewardjb4112022007-11-09 22:49:28 +00003944}
3945
sewardj1cbc12f2008-11-10 16:16:46 +00003946//__attribute__((noinline))
3947//static void laog__handle_lock_deletions (
3948// WordSetID /* in univ_laog */ locksToDelete
3949// )
3950//{
3951// Word i, ws_size;
3952// UWord* ws_words;
3953//
sewardj1cbc12f2008-11-10 16:16:46 +00003954//
3955// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
sewardj866c80c2011-10-22 19:29:51 +00003956// UWordV_dup call needed here ...
sewardj1cbc12f2008-11-10 16:16:46 +00003957// for (i = 0; i < ws_size; i++)
3958// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3959//
3960// if (HG_(clo_sanity_flags) & SCE_LAOG)
3961// all__sanity_check("laog__handle_lock_deletions-post");
3962//}
sewardjb4112022007-11-09 22:49:28 +00003963
3964
3965/*--------------------------------------------------------------*/
3966/*--- Malloc/free replacements ---*/
3967/*--------------------------------------------------------------*/
3968
3969typedef
3970 struct {
3971 void* next; /* required by m_hashtable */
3972 Addr payload; /* ptr to actual block */
3973 SizeT szB; /* size requested */
3974 ExeContext* where; /* where it was allocated */
3975 Thread* thr; /* allocating thread */
3976 }
3977 MallocMeta;
3978
3979/* A hash table of MallocMetas, used to track malloc'd blocks
3980 (obviously). */
3981static VgHashTable hg_mallocmeta_table = NULL;
3982
philippe5fbc9762013-12-01 19:28:48 +00003983/* MallocMeta are small elements. We use a pool to avoid
3984 the overhead of malloc for each MallocMeta. */
3985static PoolAlloc *MallocMeta_poolalloc = NULL;
sewardjb4112022007-11-09 22:49:28 +00003986
3987static MallocMeta* new_MallocMeta ( void ) {
philippe5fbc9762013-12-01 19:28:48 +00003988 MallocMeta* md = VG_(allocEltPA) (MallocMeta_poolalloc);
3989 VG_(memset)(md, 0, sizeof(MallocMeta));
sewardjb4112022007-11-09 22:49:28 +00003990 return md;
3991}
3992static void delete_MallocMeta ( MallocMeta* md ) {
philippe5fbc9762013-12-01 19:28:48 +00003993 VG_(freeEltPA)(MallocMeta_poolalloc, md);
sewardjb4112022007-11-09 22:49:28 +00003994}
3995
3996
3997/* Allocate a client block and set up the metadata for it. */
3998
3999static
4000void* handle_alloc ( ThreadId tid,
4001 SizeT szB, SizeT alignB, Bool is_zeroed )
4002{
4003 Addr p;
4004 MallocMeta* md;
4005
4006 tl_assert( ((SSizeT)szB) >= 0 );
4007 p = (Addr)VG_(cli_malloc)(alignB, szB);
4008 if (!p) {
4009 return NULL;
4010 }
4011 if (is_zeroed)
4012 VG_(memset)((void*)p, 0, szB);
4013
4014 /* Note that map_threads_lookup must succeed (cannot assert), since
4015 memory can only be allocated by currently alive threads, hence
4016 they must have an entry in map_threads. */
4017 md = new_MallocMeta();
4018 md->payload = p;
4019 md->szB = szB;
4020 md->where = VG_(record_ExeContext)( tid, 0 );
4021 md->thr = map_threads_lookup( tid );
4022
4023 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
4024
4025 /* Tell the lower level memory wranglers. */
4026 evh__new_mem_heap( p, szB, is_zeroed );
4027
4028 return (void*)p;
4029}
4030
4031/* Re the checks for less-than-zero (also in hg_cli__realloc below):
4032 Cast to a signed type to catch any unexpectedly negative args.
4033 We're assuming here that the size asked for is not greater than
4034 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
4035 platforms). */
4036static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
4037 if (((SSizeT)n) < 0) return NULL;
4038 return handle_alloc ( tid, n, VG_(clo_alignment),
4039 /*is_zeroed*/False );
4040}
4041static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
4042 if (((SSizeT)n) < 0) return NULL;
4043 return handle_alloc ( tid, n, VG_(clo_alignment),
4044 /*is_zeroed*/False );
4045}
4046static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
4047 if (((SSizeT)n) < 0) return NULL;
4048 return handle_alloc ( tid, n, VG_(clo_alignment),
4049 /*is_zeroed*/False );
4050}
4051static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
4052 if (((SSizeT)n) < 0) return NULL;
4053 return handle_alloc ( tid, n, align,
4054 /*is_zeroed*/False );
4055}
4056static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
4057 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
4058 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
4059 /*is_zeroed*/True );
4060}
4061
4062
4063/* Free a client block, including getting rid of the relevant
4064 metadata. */
4065
4066static void handle_free ( ThreadId tid, void* p )
4067{
4068 MallocMeta *md, *old_md;
4069 SizeT szB;
4070
4071 /* First see if we can find the metadata for 'p'. */
4072 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4073 if (!md)
4074 return; /* apparently freeing a bogus address. Oh well. */
4075
4076 tl_assert(md->payload == (Addr)p);
4077 szB = md->szB;
4078
4079 /* Nuke the metadata block */
4080 old_md = (MallocMeta*)
4081 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
4082 tl_assert(old_md); /* it must be present - we just found it */
4083 tl_assert(old_md == md);
4084 tl_assert(old_md->payload == (Addr)p);
4085
4086 VG_(cli_free)((void*)old_md->payload);
4087 delete_MallocMeta(old_md);
4088
4089 /* Tell the lower level memory wranglers. */
4090 evh__die_mem_heap( (Addr)p, szB );
4091}
4092
4093static void hg_cli__free ( ThreadId tid, void* p ) {
4094 handle_free(tid, p);
4095}
4096static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
4097 handle_free(tid, p);
4098}
4099static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
4100 handle_free(tid, p);
4101}
4102
4103
4104static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
4105{
4106 MallocMeta *md, *md_new, *md_tmp;
4107 SizeT i;
4108
4109 Addr payload = (Addr)payloadV;
4110
4111 if (((SSizeT)new_size) < 0) return NULL;
4112
4113 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
4114 if (!md)
4115 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
4116
4117 tl_assert(md->payload == payload);
4118
4119 if (md->szB == new_size) {
4120 /* size unchanged */
4121 md->where = VG_(record_ExeContext)(tid, 0);
4122 return payloadV;
4123 }
4124
4125 if (md->szB > new_size) {
4126 /* new size is smaller */
4127 md->szB = new_size;
4128 md->where = VG_(record_ExeContext)(tid, 0);
4129 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
4130 return payloadV;
4131 }
4132
4133 /* else */ {
4134 /* new size is bigger */
4135 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
4136
4137 /* First half kept and copied, second half new */
4138 // FIXME: shouldn't we use a copier which implements the
4139 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00004140 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00004141 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00004142 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00004143 /* FIXME: can anything funny happen here? specifically, if the
4144 old range contained a lock, then die_mem_heap will complain.
4145 Is that the correct behaviour? Not sure. */
4146 evh__die_mem_heap( payload, md->szB );
4147
4148 /* Copy from old to new */
4149 for (i = 0; i < md->szB; i++)
4150 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
4151
4152 /* Because the metadata hash table is index by payload address,
4153 we have to get rid of the old hash table entry and make a new
4154 one. We can't just modify the existing metadata in place,
4155 because then it would (almost certainly) be in the wrong hash
4156 chain. */
4157 md_new = new_MallocMeta();
4158 *md_new = *md;
4159
4160 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
4161 tl_assert(md_tmp);
4162 tl_assert(md_tmp == md);
4163
4164 VG_(cli_free)((void*)md->payload);
4165 delete_MallocMeta(md);
4166
4167 /* Update fields */
4168 md_new->where = VG_(record_ExeContext)( tid, 0 );
4169 md_new->szB = new_size;
4170 md_new->payload = p_new;
4171 md_new->thr = map_threads_lookup( tid );
4172
4173 /* and add */
4174 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
4175
4176 return (void*)p_new;
4177 }
4178}
4179
njn8b140de2009-02-17 04:31:18 +00004180static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
4181{
4182 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
4183
4184 // There may be slop, but pretend there isn't because only the asked-for
4185 // area will have been shadowed properly.
4186 return ( md ? md->szB : 0 );
4187}
4188
sewardjb4112022007-11-09 22:49:28 +00004189
sewardj095d61e2010-03-11 13:43:18 +00004190/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00004191 Slow linear search. With a bit of hash table help if 'data_addr'
4192 is either the start of a block or up to 15 word-sized steps along
4193 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00004194
4195static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
4196{
sewardjc8028ad2010-05-05 09:34:42 +00004197 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
4198 right at it. */
4199 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
4200 return True;
4201 /* else normal interval rules apply */
4202 if (LIKELY(a < mm->payload)) return False;
4203 if (LIKELY(a >= mm->payload + mm->szB)) return False;
4204 return True;
sewardj095d61e2010-03-11 13:43:18 +00004205}
4206
sewardjc8028ad2010-05-05 09:34:42 +00004207Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00004208 /*OUT*/Addr* payload,
4209 /*OUT*/SizeT* szB,
4210 Addr data_addr )
4211{
4212 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00004213 Int i;
4214 const Int n_fast_check_words = 16;
4215
4216 /* First, do a few fast searches on the basis that data_addr might
4217 be exactly the start of a block or up to 15 words inside. This
4218 can happen commonly via the creq
4219 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
4220 for (i = 0; i < n_fast_check_words; i++) {
4221 mm = VG_(HT_lookup)( hg_mallocmeta_table,
4222 data_addr - (UWord)(UInt)i * sizeof(UWord) );
4223 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
4224 goto found;
4225 }
4226
sewardj095d61e2010-03-11 13:43:18 +00004227 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00004228 some such, it's hard to see how to do better. We have to check
4229 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00004230 VG_(HT_ResetIter)(hg_mallocmeta_table);
4231 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00004232 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
4233 goto found;
sewardj095d61e2010-03-11 13:43:18 +00004234 }
sewardjc8028ad2010-05-05 09:34:42 +00004235
4236 /* Not found. Bah. */
4237 return False;
4238 /*NOTREACHED*/
4239
4240 found:
4241 tl_assert(mm);
4242 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
4243 if (where) *where = mm->where;
4244 if (payload) *payload = mm->payload;
4245 if (szB) *szB = mm->szB;
4246 return True;
sewardj095d61e2010-03-11 13:43:18 +00004247}
4248
4249
sewardjb4112022007-11-09 22:49:28 +00004250/*--------------------------------------------------------------*/
4251/*--- Instrumentation ---*/
4252/*--------------------------------------------------------------*/
4253
sewardjcafe5052013-01-17 14:24:35 +00004254#define unop(_op, _arg1) IRExpr_Unop((_op),(_arg1))
sewardjffce8152011-06-24 10:09:41 +00004255#define binop(_op, _arg1, _arg2) IRExpr_Binop((_op),(_arg1),(_arg2))
4256#define mkexpr(_tmp) IRExpr_RdTmp((_tmp))
4257#define mkU32(_n) IRExpr_Const(IRConst_U32(_n))
4258#define mkU64(_n) IRExpr_Const(IRConst_U64(_n))
4259#define assign(_t, _e) IRStmt_WrTmp((_t), (_e))
4260
sewardjcafe5052013-01-17 14:24:35 +00004261/* This takes and returns atoms, of course. Not full IRExprs. */
4262static IRExpr* mk_And1 ( IRSB* sbOut, IRExpr* arg1, IRExpr* arg2 )
4263{
4264 tl_assert(arg1 && arg2);
4265 tl_assert(isIRAtom(arg1));
4266 tl_assert(isIRAtom(arg2));
4267 /* Generate 32to1(And32(1Uto32(arg1), 1Uto32(arg2))). Appalling
4268 code, I know. */
4269 IRTemp wide1 = newIRTemp(sbOut->tyenv, Ity_I32);
4270 IRTemp wide2 = newIRTemp(sbOut->tyenv, Ity_I32);
4271 IRTemp anded = newIRTemp(sbOut->tyenv, Ity_I32);
4272 IRTemp res = newIRTemp(sbOut->tyenv, Ity_I1);
4273 addStmtToIRSB(sbOut, assign(wide1, unop(Iop_1Uto32, arg1)));
4274 addStmtToIRSB(sbOut, assign(wide2, unop(Iop_1Uto32, arg2)));
4275 addStmtToIRSB(sbOut, assign(anded, binop(Iop_And32, mkexpr(wide1),
4276 mkexpr(wide2))));
4277 addStmtToIRSB(sbOut, assign(res, unop(Iop_32to1, mkexpr(anded))));
4278 return mkexpr(res);
4279}
4280
sewardjffce8152011-06-24 10:09:41 +00004281static void instrument_mem_access ( IRSB* sbOut,
sewardjb4112022007-11-09 22:49:28 +00004282 IRExpr* addr,
4283 Int szB,
4284 Bool isStore,
sewardjffce8152011-06-24 10:09:41 +00004285 Int hWordTy_szB,
sewardjcafe5052013-01-17 14:24:35 +00004286 Int goff_sp,
4287 IRExpr* guard ) /* NULL => True */
sewardjb4112022007-11-09 22:49:28 +00004288{
4289 IRType tyAddr = Ity_INVALID;
florian6bf37262012-10-21 03:23:36 +00004290 const HChar* hName = NULL;
sewardjb4112022007-11-09 22:49:28 +00004291 void* hAddr = NULL;
4292 Int regparms = 0;
4293 IRExpr** argv = NULL;
4294 IRDirty* di = NULL;
4295
sewardjffce8152011-06-24 10:09:41 +00004296 // THRESH is the size of the window above SP (well,
4297 // mostly above) that we assume implies a stack reference.
4298 const Int THRESH = 4096 * 4; // somewhat arbitrary
4299 const Int rz_szB = VG_STACK_REDZONE_SZB;
4300
sewardjb4112022007-11-09 22:49:28 +00004301 tl_assert(isIRAtom(addr));
4302 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4303
sewardjffce8152011-06-24 10:09:41 +00004304 tyAddr = typeOfIRExpr( sbOut->tyenv, addr );
sewardjb4112022007-11-09 22:49:28 +00004305 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4306
4307 /* So the effective address is in 'addr' now. */
4308 regparms = 1; // unless stated otherwise
4309 if (isStore) {
4310 switch (szB) {
4311 case 1:
sewardj23f12002009-07-24 08:45:08 +00004312 hName = "evh__mem_help_cwrite_1";
4313 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004314 argv = mkIRExprVec_1( addr );
4315 break;
4316 case 2:
sewardj23f12002009-07-24 08:45:08 +00004317 hName = "evh__mem_help_cwrite_2";
4318 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004319 argv = mkIRExprVec_1( addr );
4320 break;
4321 case 4:
sewardj23f12002009-07-24 08:45:08 +00004322 hName = "evh__mem_help_cwrite_4";
4323 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004324 argv = mkIRExprVec_1( addr );
4325 break;
4326 case 8:
sewardj23f12002009-07-24 08:45:08 +00004327 hName = "evh__mem_help_cwrite_8";
4328 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004329 argv = mkIRExprVec_1( addr );
4330 break;
4331 default:
4332 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4333 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004334 hName = "evh__mem_help_cwrite_N";
4335 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004336 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4337 break;
4338 }
4339 } else {
4340 switch (szB) {
4341 case 1:
sewardj23f12002009-07-24 08:45:08 +00004342 hName = "evh__mem_help_cread_1";
4343 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004344 argv = mkIRExprVec_1( addr );
4345 break;
4346 case 2:
sewardj23f12002009-07-24 08:45:08 +00004347 hName = "evh__mem_help_cread_2";
4348 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004349 argv = mkIRExprVec_1( addr );
4350 break;
4351 case 4:
sewardj23f12002009-07-24 08:45:08 +00004352 hName = "evh__mem_help_cread_4";
4353 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004354 argv = mkIRExprVec_1( addr );
4355 break;
4356 case 8:
sewardj23f12002009-07-24 08:45:08 +00004357 hName = "evh__mem_help_cread_8";
4358 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004359 argv = mkIRExprVec_1( addr );
4360 break;
4361 default:
4362 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4363 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004364 hName = "evh__mem_help_cread_N";
4365 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004366 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4367 break;
4368 }
4369 }
4370
sewardjffce8152011-06-24 10:09:41 +00004371 /* Create the helper. */
sewardjb4112022007-11-09 22:49:28 +00004372 tl_assert(hName);
4373 tl_assert(hAddr);
4374 tl_assert(argv);
4375 di = unsafeIRDirty_0_N( regparms,
4376 hName, VG_(fnptr_to_fnentry)( hAddr ),
4377 argv );
sewardjffce8152011-06-24 10:09:41 +00004378
4379 if (! HG_(clo_check_stack_refs)) {
4380 /* We're ignoring memory references which are (obviously) to the
4381 stack. In fact just skip stack refs that are within 4 pages
4382 of SP (SP - the redzone, really), as that's simple, easy, and
4383 filters out most stack references. */
4384 /* Generate the guard condition: "(addr - (SP - RZ)) >u N", for
4385 some arbitrary N. If that is true then addr is outside the
4386 range (SP - RZ .. SP + N - RZ). If N is smallish (a few
4387 pages) then we can say addr is within a few pages of SP and
4388 so can't possibly be a heap access, and so can be skipped.
4389
4390 Note that the condition simplifies to
4391 (addr - SP + RZ) >u N
4392 which generates better code in x86/amd64 backends, but it does
4393 not unfortunately simplify to
4394 (addr - SP) >u (N - RZ)
4395 (would be beneficial because N - RZ is a constant) because
4396 wraparound arithmetic messes up the comparison. eg.
4397 20 >u 10 == True,
4398 but (20 - 15) >u (10 - 15) == 5 >u (MAXINT-5) == False.
4399 */
4400 IRTemp sp = newIRTemp(sbOut->tyenv, tyAddr);
4401 addStmtToIRSB( sbOut, assign(sp, IRExpr_Get(goff_sp, tyAddr)));
4402
4403 /* "addr - SP" */
4404 IRTemp addr_minus_sp = newIRTemp(sbOut->tyenv, tyAddr);
4405 addStmtToIRSB(
4406 sbOut,
4407 assign(addr_minus_sp,
4408 tyAddr == Ity_I32
4409 ? binop(Iop_Sub32, addr, mkexpr(sp))
4410 : binop(Iop_Sub64, addr, mkexpr(sp)))
4411 );
4412
4413 /* "addr - SP + RZ" */
4414 IRTemp diff = newIRTemp(sbOut->tyenv, tyAddr);
4415 addStmtToIRSB(
4416 sbOut,
4417 assign(diff,
4418 tyAddr == Ity_I32
4419 ? binop(Iop_Add32, mkexpr(addr_minus_sp), mkU32(rz_szB))
4420 : binop(Iop_Add64, mkexpr(addr_minus_sp), mkU64(rz_szB)))
4421 );
4422
sewardjcafe5052013-01-17 14:24:35 +00004423 /* guardA == "guard on the address" */
4424 IRTemp guardA = newIRTemp(sbOut->tyenv, Ity_I1);
sewardjffce8152011-06-24 10:09:41 +00004425 addStmtToIRSB(
4426 sbOut,
sewardjcafe5052013-01-17 14:24:35 +00004427 assign(guardA,
sewardjffce8152011-06-24 10:09:41 +00004428 tyAddr == Ity_I32
4429 ? binop(Iop_CmpLT32U, mkU32(THRESH), mkexpr(diff))
4430 : binop(Iop_CmpLT64U, mkU64(THRESH), mkexpr(diff)))
4431 );
sewardjcafe5052013-01-17 14:24:35 +00004432 di->guard = mkexpr(guardA);
4433 }
4434
4435 /* If there's a guard on the access itself (as supplied by the
4436 caller of this routine), we need to AND that in to any guard we
4437 might already have. */
4438 if (guard) {
4439 di->guard = mk_And1(sbOut, di->guard, guard);
sewardjffce8152011-06-24 10:09:41 +00004440 }
4441
4442 /* Add the helper. */
4443 addStmtToIRSB( sbOut, IRStmt_Dirty(di) );
sewardjb4112022007-11-09 22:49:28 +00004444}
4445
4446
sewardja0eee322009-07-31 08:46:35 +00004447/* Figure out if GA is a guest code address in the dynamic linker, and
4448 if so return True. Otherwise (and in case of any doubt) return
4449 False. (sidedly safe w/ False as the safe value) */
4450static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4451{
4452 DebugInfo* dinfo;
florian19f91bb2012-11-10 22:29:54 +00004453 const HChar* soname;
sewardja0eee322009-07-31 08:46:35 +00004454 if (0) return False;
4455
sewardje3f1e592009-07-31 09:41:29 +00004456 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004457 if (!dinfo) return False;
4458
sewardje3f1e592009-07-31 09:41:29 +00004459 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004460 tl_assert(soname);
4461 if (0) VG_(printf)("%s\n", soname);
4462
4463# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004464 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004465 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4466 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4467 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4468 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4469# elif defined(VGO_darwin)
4470 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4471# else
4472# error "Unsupported OS"
4473# endif
4474 return False;
4475}
4476
sewardjb4112022007-11-09 22:49:28 +00004477static
4478IRSB* hg_instrument ( VgCallbackClosure* closure,
4479 IRSB* bbIn,
4480 VexGuestLayout* layout,
4481 VexGuestExtents* vge,
florianca503be2012-10-07 21:59:42 +00004482 VexArchInfo* archinfo_host,
sewardjb4112022007-11-09 22:49:28 +00004483 IRType gWordTy, IRType hWordTy )
4484{
sewardj1c0ce7a2009-07-01 08:10:49 +00004485 Int i;
4486 IRSB* bbOut;
4487 Addr64 cia; /* address of current insn */
4488 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004489 Bool inLDSO = False;
4490 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004491
sewardjffce8152011-06-24 10:09:41 +00004492 const Int goff_sp = layout->offset_SP;
4493
sewardjb4112022007-11-09 22:49:28 +00004494 if (gWordTy != hWordTy) {
4495 /* We don't currently support this case. */
4496 VG_(tool_panic)("host/guest word size mismatch");
4497 }
4498
sewardja0eee322009-07-31 08:46:35 +00004499 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4500 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4501 }
4502
sewardjb4112022007-11-09 22:49:28 +00004503 /* Set up BB */
4504 bbOut = emptyIRSB();
4505 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4506 bbOut->next = deepCopyIRExpr(bbIn->next);
4507 bbOut->jumpkind = bbIn->jumpkind;
sewardj291849f2012-04-20 23:58:55 +00004508 bbOut->offsIP = bbIn->offsIP;
sewardjb4112022007-11-09 22:49:28 +00004509
4510 // Copy verbatim any IR preamble preceding the first IMark
4511 i = 0;
4512 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4513 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4514 i++;
4515 }
4516
sewardj1c0ce7a2009-07-01 08:10:49 +00004517 // Get the first statement, and initial cia from it
4518 tl_assert(bbIn->stmts_used > 0);
4519 tl_assert(i < bbIn->stmts_used);
4520 st = bbIn->stmts[i];
4521 tl_assert(Ist_IMark == st->tag);
4522 cia = st->Ist.IMark.addr;
4523 st = NULL;
4524
sewardjb4112022007-11-09 22:49:28 +00004525 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004526 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004527 tl_assert(st);
4528 tl_assert(isFlatIRStmt(st));
4529 switch (st->tag) {
4530 case Ist_NoOp:
4531 case Ist_AbiHint:
4532 case Ist_Put:
4533 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004534 case Ist_Exit:
4535 /* None of these can contain any memory references. */
4536 break;
4537
sewardj1c0ce7a2009-07-01 08:10:49 +00004538 case Ist_IMark:
4539 /* no mem refs, but note the insn address. */
4540 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004541 /* Don't instrument the dynamic linker. It generates a
4542 lot of races which we just expensively suppress, so
4543 it's pointless.
4544
4545 Avoid flooding is_in_dynamic_linker_shared_object with
4546 requests by only checking at transitions between 4K
4547 pages. */
4548 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4549 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4550 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4551 inLDSO = is_in_dynamic_linker_shared_object(cia);
4552 } else {
4553 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4554 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004555 break;
4556
sewardjb4112022007-11-09 22:49:28 +00004557 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004558 switch (st->Ist.MBE.event) {
4559 case Imbe_Fence:
4560 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004561 default:
4562 goto unhandled;
4563 }
sewardjb4112022007-11-09 22:49:28 +00004564 break;
4565
sewardj1c0ce7a2009-07-01 08:10:49 +00004566 case Ist_CAS: {
4567 /* Atomic read-modify-write cycle. Just pretend it's a
4568 read. */
4569 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004570 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4571 if (isDCAS) {
4572 tl_assert(cas->expdHi);
4573 tl_assert(cas->dataHi);
4574 } else {
4575 tl_assert(!cas->expdHi);
4576 tl_assert(!cas->dataHi);
4577 }
4578 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004579 if (!inLDSO) {
4580 instrument_mem_access(
4581 bbOut,
4582 cas->addr,
4583 (isDCAS ? 2 : 1)
4584 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4585 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004586 sizeofIRType(hWordTy), goff_sp,
4587 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004588 );
4589 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004590 break;
4591 }
4592
sewardjdb5907d2009-11-26 17:20:21 +00004593 case Ist_LLSC: {
4594 /* We pretend store-conditionals don't exist, viz, ignore
4595 them. Whereas load-linked's are treated the same as
4596 normal loads. */
4597 IRType dataTy;
4598 if (st->Ist.LLSC.storedata == NULL) {
4599 /* LL */
4600 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004601 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004602 instrument_mem_access(
4603 bbOut,
4604 st->Ist.LLSC.addr,
4605 sizeofIRType(dataTy),
4606 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004607 sizeofIRType(hWordTy), goff_sp,
4608 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004609 );
4610 }
sewardjdb5907d2009-11-26 17:20:21 +00004611 } else {
4612 /* SC */
4613 /*ignore */
4614 }
4615 break;
4616 }
4617
4618 case Ist_Store:
sewardjdb5907d2009-11-26 17:20:21 +00004619 if (!inLDSO) {
4620 instrument_mem_access(
4621 bbOut,
4622 st->Ist.Store.addr,
4623 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4624 True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004625 sizeofIRType(hWordTy), goff_sp,
4626 NULL/*no-guard*/
sewardjdb5907d2009-11-26 17:20:21 +00004627 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004628 }
njnb83caf22009-05-25 01:47:56 +00004629 break;
sewardjb4112022007-11-09 22:49:28 +00004630
sewardjcafe5052013-01-17 14:24:35 +00004631 case Ist_StoreG: {
4632 IRStoreG* sg = st->Ist.StoreG.details;
4633 IRExpr* data = sg->data;
4634 IRExpr* addr = sg->addr;
4635 IRType type = typeOfIRExpr(bbIn->tyenv, data);
4636 tl_assert(type != Ity_INVALID);
4637 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4638 True/*isStore*/,
4639 sizeofIRType(hWordTy),
4640 goff_sp, sg->guard );
4641 break;
4642 }
4643
4644 case Ist_LoadG: {
4645 IRLoadG* lg = st->Ist.LoadG.details;
4646 IRType type = Ity_INVALID; /* loaded type */
4647 IRType typeWide = Ity_INVALID; /* after implicit widening */
4648 IRExpr* addr = lg->addr;
4649 typeOfIRLoadGOp(lg->cvt, &typeWide, &type);
4650 tl_assert(type != Ity_INVALID);
4651 instrument_mem_access( bbOut, addr, sizeofIRType(type),
4652 False/*!isStore*/,
4653 sizeofIRType(hWordTy),
4654 goff_sp, lg->guard );
4655 break;
4656 }
4657
sewardjb4112022007-11-09 22:49:28 +00004658 case Ist_WrTmp: {
4659 IRExpr* data = st->Ist.WrTmp.data;
4660 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004661 if (!inLDSO) {
4662 instrument_mem_access(
4663 bbOut,
4664 data->Iex.Load.addr,
4665 sizeofIRType(data->Iex.Load.ty),
4666 False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004667 sizeofIRType(hWordTy), goff_sp,
4668 NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004669 );
4670 }
sewardjb4112022007-11-09 22:49:28 +00004671 }
4672 break;
4673 }
4674
4675 case Ist_Dirty: {
4676 Int dataSize;
4677 IRDirty* d = st->Ist.Dirty.details;
4678 if (d->mFx != Ifx_None) {
4679 /* This dirty helper accesses memory. Collect the
4680 details. */
4681 tl_assert(d->mAddr != NULL);
4682 tl_assert(d->mSize != 0);
4683 dataSize = d->mSize;
4684 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004685 if (!inLDSO) {
4686 instrument_mem_access(
4687 bbOut, d->mAddr, dataSize, False/*!isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004688 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004689 );
4690 }
sewardjb4112022007-11-09 22:49:28 +00004691 }
4692 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004693 if (!inLDSO) {
4694 instrument_mem_access(
4695 bbOut, d->mAddr, dataSize, True/*isStore*/,
sewardjcafe5052013-01-17 14:24:35 +00004696 sizeofIRType(hWordTy), goff_sp, NULL/*no-guard*/
sewardja0eee322009-07-31 08:46:35 +00004697 );
4698 }
sewardjb4112022007-11-09 22:49:28 +00004699 }
4700 } else {
4701 tl_assert(d->mAddr == NULL);
4702 tl_assert(d->mSize == 0);
4703 }
4704 break;
4705 }
4706
4707 default:
sewardjf98e1c02008-10-25 16:22:41 +00004708 unhandled:
4709 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004710 tl_assert(0);
4711
4712 } /* switch (st->tag) */
4713
4714 addStmtToIRSB( bbOut, st );
4715 } /* iterate over bbIn->stmts */
4716
4717 return bbOut;
4718}
4719
sewardjffce8152011-06-24 10:09:41 +00004720#undef binop
4721#undef mkexpr
4722#undef mkU32
4723#undef mkU64
4724#undef assign
4725
sewardjb4112022007-11-09 22:49:28 +00004726
4727/*----------------------------------------------------------------*/
4728/*--- Client requests ---*/
4729/*----------------------------------------------------------------*/
4730
4731/* Sheesh. Yet another goddam finite map. */
4732static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4733
4734static void map_pthread_t_to_Thread_INIT ( void ) {
4735 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004736 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4737 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004738 tl_assert(map_pthread_t_to_Thread != NULL);
4739 }
4740}
4741
philippef5774342014-05-03 11:12:50 +00004742static void print_monitor_help ( void )
4743{
4744 VG_(gdb_printf)
4745 (
4746"\n"
4747"helgrind monitor commands:\n"
4748" describe <addr> : outputs a description of <addr>\n"
4749" info locks : show list of locks and their status\n"
4750"\n");
4751}
4752
4753/* return True if request recognised, False otherwise */
4754static Bool handle_gdb_monitor_command (ThreadId tid, HChar *req)
4755{
philippef5774342014-05-03 11:12:50 +00004756 HChar* wcmd;
4757 HChar s[VG_(strlen(req))]; /* copy for strtok_r */
4758 HChar *ssaveptr;
4759 Int kwdid;
4760
4761 VG_(strcpy) (s, req);
4762
4763 wcmd = VG_(strtok_r) (s, " ", &ssaveptr);
4764 /* NB: if possible, avoid introducing a new command below which
4765 starts with the same first letter(s) as an already existing
4766 command. This ensures a shorter abbreviation for the user. */
4767 switch (VG_(keyword_id)
4768 ("help info describe",
4769 wcmd, kwd_report_duplicated_matches)) {
4770 case -2: /* multiple matches */
4771 return True;
4772 case -1: /* not found */
4773 return False;
4774 case 0: /* help */
4775 print_monitor_help();
4776 return True;
4777 case 1: /* info */
philippef5774342014-05-03 11:12:50 +00004778 wcmd = VG_(strtok_r) (NULL, " ", &ssaveptr);
4779 switch (kwdid = VG_(keyword_id)
4780 ("locks",
4781 wcmd, kwd_report_all)) {
4782 case -2:
4783 case -1:
4784 break;
4785 case 0: // locks
4786 {
4787 Int i;
4788 Lock* lk;
4789 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
4790 pp_Lock(0, lk,
4791 True /* show_lock_addrdescr */,
4792 False /* show_internal_data */);
4793 }
4794 if (i == 0)
4795 VG_(gdb_printf) ("no locks\n");
4796 }
4797 break;
4798 default:
4799 tl_assert(0);
4800 }
4801 return True;
4802 case 2: { /* describe */
4803 Addr address;
4804 SizeT szB = 1;
4805 VG_(strtok_get_address_and_size) (&address, &szB, &ssaveptr);
4806 if (address == (Addr) 0 && szB == 0) return True;
4807 if (!HG_(get_and_pp_addrdescr) ("address", address))
4808 VG_(gdb_printf) ("No description found for address %p\n",
4809 (void*)address);
4810 return True;
4811 }
4812 default:
4813 tl_assert(0);
4814 return False;
4815 }
4816}
sewardjb4112022007-11-09 22:49:28 +00004817
4818static
4819Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4820{
philippef5774342014-05-03 11:12:50 +00004821 if (!VG_IS_TOOL_USERREQ('H','G',args[0])
4822 && VG_USERREQ__GDB_MONITOR_COMMAND != args[0])
sewardjb4112022007-11-09 22:49:28 +00004823 return False;
4824
4825 /* Anything that gets past the above check is one of ours, so we
4826 should be able to handle it. */
4827
4828 /* default, meaningless return value, unless otherwise set */
4829 *ret = 0;
4830
4831 switch (args[0]) {
4832
4833 /* --- --- User-visible client requests --- --- */
4834
4835 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004836 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004837 args[1], args[2]);
4838 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004839 are any held locks etc in the area. Calling evh__die_mem
4840 and then evh__new_mem is a bit inefficient; probably just
4841 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004842 if (args[2] > 0) { /* length */
4843 evh__die_mem(args[1], args[2]);
4844 /* and then set it to New */
4845 evh__new_mem(args[1], args[2]);
4846 }
4847 break;
4848
sewardjc8028ad2010-05-05 09:34:42 +00004849 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4850 Addr payload = 0;
4851 SizeT pszB = 0;
4852 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4853 args[1]);
4854 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4855 if (pszB > 0) {
4856 evh__die_mem(payload, pszB);
4857 evh__new_mem(payload, pszB);
4858 }
4859 *ret = pszB;
4860 } else {
4861 *ret = (UWord)-1;
4862 }
4863 break;
4864 }
4865
sewardj406bac82010-03-03 23:03:40 +00004866 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4867 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4868 args[1], args[2]);
4869 if (args[2] > 0) { /* length */
4870 evh__untrack_mem(args[1], args[2]);
4871 }
4872 break;
4873
4874 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4875 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4876 args[1], args[2]);
4877 if (args[2] > 0) { /* length */
4878 evh__new_mem(args[1], args[2]);
4879 }
4880 break;
4881
sewardjb4112022007-11-09 22:49:28 +00004882 /* --- --- Client requests for Helgrind's use only --- --- */
4883
4884 /* Some thread is telling us its pthread_t value. Record the
4885 binding between that and the associated Thread*, so we can
4886 later find the Thread* again when notified of a join by the
4887 thread. */
4888 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4889 Thread* my_thr = NULL;
4890 if (0)
4891 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4892 (void*)args[1]);
4893 map_pthread_t_to_Thread_INIT();
4894 my_thr = map_threads_maybe_lookup( tid );
4895 /* This assertion should hold because the map_threads (tid to
4896 Thread*) binding should have been made at the point of
4897 low-level creation of this thread, which should have
4898 happened prior to us getting this client request for it.
4899 That's because this client request is sent from
4900 client-world from the 'thread_wrapper' function, which
4901 only runs once the thread has been low-level created. */
4902 tl_assert(my_thr != NULL);
4903 /* So now we know that (pthread_t)args[1] is associated with
4904 (Thread*)my_thr. Note that down. */
4905 if (0)
4906 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4907 (void*)args[1], (void*)my_thr );
florian6bf37262012-10-21 03:23:36 +00004908 VG_(addToFM)( map_pthread_t_to_Thread, (UWord)args[1], (UWord)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004909 break;
4910 }
4911
4912 case _VG_USERREQ__HG_PTH_API_ERROR: {
4913 Thread* my_thr = NULL;
4914 map_pthread_t_to_Thread_INIT();
4915 my_thr = map_threads_maybe_lookup( tid );
4916 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004917 HG_(record_error_PthAPIerror)(
florian6bf37262012-10-21 03:23:36 +00004918 my_thr, (HChar*)args[1], (UWord)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004919 break;
4920 }
4921
4922 /* This thread (tid) has completed a join with the quitting
4923 thread whose pthread_t is in args[1]. */
4924 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4925 Thread* thr_q = NULL; /* quitter Thread* */
4926 Bool found = False;
4927 if (0)
4928 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4929 (void*)args[1]);
4930 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004931 found = VG_(lookupFM)( map_pthread_t_to_Thread,
florian6bf37262012-10-21 03:23:36 +00004932 NULL, (UWord*)&thr_q, (UWord)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004933 /* Can this fail? It would mean that our pthread_join
4934 wrapper observed a successful join on args[1] yet that
4935 thread never existed (or at least, it never lodged an
4936 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4937 sounds like a bug in the threads library. */
4938 // FIXME: get rid of this assertion; handle properly
4939 tl_assert(found);
4940 if (found) {
4941 if (0)
4942 VG_(printf)(".................... quitter Thread* = %p\n",
4943 thr_q);
4944 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4945 }
4946 break;
4947 }
4948
4949 /* EXPOSITION only: by intercepting lock init events we can show
4950 the user where the lock was initialised, rather than only
4951 being able to show where it was first locked. Intercepting
4952 lock initialisations is not necessary for the basic operation
4953 of the race checker. */
4954 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4955 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4956 break;
4957
sewardjc02f6c42013-10-14 13:51:25 +00004958 /* mutex=arg[1], mutex_is_init=arg[2] */
sewardjb4112022007-11-09 22:49:28 +00004959 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00004960 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjb4112022007-11-09 22:49:28 +00004961 break;
4962
4963 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4964 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4965 break;
4966
4967 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4968 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4969 break;
4970
4971 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4972 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4973 break;
4974
4975 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4976 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4977 break;
4978
4979 /* This thread is about to do pthread_cond_signal on the
4980 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4981 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4982 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4983 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4984 break;
4985
4986 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4987 Returns a flag indicating whether or not the mutex is believed to be
4988 valid for this operation. */
4989 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4990 Bool mutex_is_valid
4991 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4992 (void*)args[2] );
4993 *ret = mutex_is_valid ? 1 : 0;
4994 break;
4995 }
4996
philippe19dfe032013-03-24 20:10:23 +00004997 /* Thread successfully completed pthread_cond_init:
4998 cond=arg[1], cond_attr=arg[2] */
4999 case _VG_USERREQ__HG_PTHREAD_COND_INIT_POST:
5000 evh__HG_PTHREAD_COND_INIT_POST( tid,
5001 (void*)args[1], (void*)args[2] );
5002 break;
5003
sewardjc02f6c42013-10-14 13:51:25 +00005004 /* cond=arg[1], cond_is_init=arg[2] */
sewardjf98e1c02008-10-25 16:22:41 +00005005 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
sewardjc02f6c42013-10-14 13:51:25 +00005006 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1], args[2] != 0 );
sewardjf98e1c02008-10-25 16:22:41 +00005007 break;
5008
sewardjb4112022007-11-09 22:49:28 +00005009 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
5010 mutex=arg[2] */
5011 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
5012 evh__HG_PTHREAD_COND_WAIT_POST( tid,
sewardjff427c92013-10-14 12:13:52 +00005013 (void*)args[1], (void*)args[2],
5014 (Bool)args[3] );
sewardjb4112022007-11-09 22:49:28 +00005015 break;
5016
5017 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
5018 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
5019 break;
5020
5021 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
5022 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
5023 break;
5024
sewardj789c3c52008-02-25 12:10:07 +00005025 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00005026 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00005027 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
5028 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00005029 break;
5030
5031 /* rwlock=arg[1], isW=arg[2] */
5032 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
5033 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
5034 break;
5035
5036 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
5037 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
5038 break;
5039
5040 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
5041 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
5042 break;
5043
sewardj11e352f2007-11-30 11:11:02 +00005044 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
5045 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00005046 break;
5047
sewardj11e352f2007-11-30 11:11:02 +00005048 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
5049 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005050 break;
5051
sewardj11e352f2007-11-30 11:11:02 +00005052 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
5053 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
5054 break;
5055
5056 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
5057 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00005058 break;
5059
sewardj9f569b72008-11-13 13:33:09 +00005060 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00005061 /* pth_bar_t*, ulong count, ulong resizable */
5062 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
5063 args[2], args[3] );
5064 break;
5065
5066 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
5067 /* pth_bar_t*, ulong newcount */
5068 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
5069 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00005070 break;
5071
5072 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
5073 /* pth_bar_t* */
5074 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
5075 break;
5076
5077 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
5078 /* pth_bar_t* */
5079 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
5080 break;
sewardjb4112022007-11-09 22:49:28 +00005081
sewardj5a644da2009-08-11 10:35:58 +00005082 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
5083 /* pth_spinlock_t* */
5084 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
5085 break;
5086
5087 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
5088 /* pth_spinlock_t* */
5089 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
5090 break;
5091
5092 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
5093 /* pth_spinlock_t*, Word */
5094 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
5095 break;
5096
5097 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
5098 /* pth_spinlock_t* */
5099 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
5100 break;
5101
5102 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
5103 /* pth_spinlock_t* */
5104 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
5105 break;
5106
sewardjed2e72e2009-08-14 11:08:24 +00005107 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
florian19f91bb2012-11-10 22:29:54 +00005108 /* HChar* who */
sewardjed2e72e2009-08-14 11:08:24 +00005109 HChar* who = (HChar*)args[1];
5110 HChar buf[50 + 50];
5111 Thread* thr = map_threads_maybe_lookup( tid );
5112 tl_assert( thr ); /* I must be mapped */
5113 tl_assert( who );
5114 tl_assert( VG_(strlen)(who) <= 50 );
5115 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
5116 /* record_error_Misc strdup's buf, so this is safe: */
5117 HG_(record_error_Misc)( thr, buf );
5118 break;
5119 }
5120
5121 case _VG_USERREQ__HG_USERSO_SEND_PRE:
5122 /* UWord arbitrary-SO-tag */
5123 evh__HG_USERSO_SEND_PRE( tid, args[1] );
5124 break;
5125
5126 case _VG_USERREQ__HG_USERSO_RECV_POST:
5127 /* UWord arbitrary-SO-tag */
5128 evh__HG_USERSO_RECV_POST( tid, args[1] );
5129 break;
5130
sewardj6015d0e2011-03-11 19:10:48 +00005131 case _VG_USERREQ__HG_USERSO_FORGET_ALL:
5132 /* UWord arbitrary-SO-tag */
5133 evh__HG_USERSO_FORGET_ALL( tid, args[1] );
5134 break;
5135
philippef5774342014-05-03 11:12:50 +00005136 case VG_USERREQ__GDB_MONITOR_COMMAND: {
5137 Bool handled = handle_gdb_monitor_command (tid, (HChar*)args[1]);
5138 if (handled)
5139 *ret = 1;
5140 else
5141 *ret = 0;
5142 return handled;
5143 }
5144
sewardjb4112022007-11-09 22:49:28 +00005145 default:
5146 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00005147 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
5148 args[0]);
sewardjb4112022007-11-09 22:49:28 +00005149 }
5150
5151 return True;
5152}
5153
5154
5155/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00005156/*--- Setup ---*/
5157/*----------------------------------------------------------------*/
5158
florian19f91bb2012-11-10 22:29:54 +00005159static Bool hg_process_cmd_line_option ( const HChar* arg )
sewardjb4112022007-11-09 22:49:28 +00005160{
florian19f91bb2012-11-10 22:29:54 +00005161 const HChar* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00005162
njn83df0b62009-02-25 01:01:05 +00005163 if VG_BOOL_CLO(arg, "--track-lockorders",
5164 HG_(clo_track_lockorders)) {}
5165 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
5166 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00005167
5168 else if VG_XACT_CLO(arg, "--history-level=none",
5169 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00005170 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00005171 HG_(clo_history_level), 1);
5172 else if VG_XACT_CLO(arg, "--history-level=full",
5173 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00005174
sewardjf585e482009-08-16 22:52:29 +00005175 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00005176 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00005177 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00005178 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00005179
sewardj11e352f2007-11-30 11:11:02 +00005180 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00005181 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00005182 Int j;
sewardjb4112022007-11-09 22:49:28 +00005183
njn83df0b62009-02-25 01:01:05 +00005184 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00005185 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00005186 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00005187 return False;
5188 }
sewardj11e352f2007-11-30 11:11:02 +00005189 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00005190 if ('0' == tmp_str[j]) { /* do nothing */ }
5191 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00005192 else {
sewardj11e352f2007-11-30 11:11:02 +00005193 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00005194 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00005195 return False;
5196 }
5197 }
sewardjf98e1c02008-10-25 16:22:41 +00005198 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00005199 }
5200
sewardj622fe492011-03-11 21:06:59 +00005201 else if VG_BOOL_CLO(arg, "--free-is-write",
5202 HG_(clo_free_is_write)) {}
sewardjffce8152011-06-24 10:09:41 +00005203
5204 else if VG_XACT_CLO(arg, "--vts-pruning=never",
5205 HG_(clo_vts_pruning), 0);
5206 else if VG_XACT_CLO(arg, "--vts-pruning=auto",
5207 HG_(clo_vts_pruning), 1);
5208 else if VG_XACT_CLO(arg, "--vts-pruning=always",
5209 HG_(clo_vts_pruning), 2);
5210
5211 else if VG_BOOL_CLO(arg, "--check-stack-refs",
5212 HG_(clo_check_stack_refs)) {}
5213
sewardjb4112022007-11-09 22:49:28 +00005214 else
5215 return VG_(replacement_malloc_process_cmd_line_option)(arg);
5216
5217 return True;
5218}
5219
5220static void hg_print_usage ( void )
5221{
5222 VG_(printf)(
sewardj622fe492011-03-11 21:06:59 +00005223" --free-is-write=no|yes treat heap frees as writes [no]\n"
sewardj849b0ed2008-12-21 10:43:10 +00005224" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00005225" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00005226" full: show both stack traces for a data race (can be very slow)\n"
5227" approx: full trace for one thread, approx for the other (faster)\n"
5228" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00005229" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjffce8152011-06-24 10:09:41 +00005230" --check-stack-refs=no|yes race-check reads and writes on the\n"
5231" main stack and thread stacks? [yes]\n"
sewardjb4112022007-11-09 22:49:28 +00005232 );
sewardjb4112022007-11-09 22:49:28 +00005233}
5234
5235static void hg_print_debug_usage ( void )
5236{
sewardjb4112022007-11-09 22:49:28 +00005237 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
5238 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00005239 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00005240 " at events (X = 0|1) [000000]\n");
5241 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00005242 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00005243 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00005244 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
5245 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00005246 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00005247 VG_(printf)(" 000010 at lock/unlock events\n");
5248 VG_(printf)(" 000001 at thread create/join events\n");
sewardjffce8152011-06-24 10:09:41 +00005249 VG_(printf)(
5250" --vts-pruning=never|auto|always [auto]\n"
5251" never: is never done (may cause big space leaks in Helgrind)\n"
5252" auto: done just often enough to keep space usage under control\n"
5253" always: done after every VTS GC (mostly just a big time waster)\n"
5254 );
sewardjb4112022007-11-09 22:49:28 +00005255}
5256
philippe8587b542013-12-15 20:24:43 +00005257static void hg_print_stats (void)
5258{
5259
5260 if (1) {
5261 VG_(printf)("\n");
5262 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
5263 if (HG_(clo_track_lockorders)) {
5264 VG_(printf)("\n");
5265 HG_(ppWSUstats)( univ_laog, "univ_laog" );
5266 }
5267 }
5268
5269 //zz VG_(printf)("\n");
5270 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
5271 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
5272 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
5273 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
5274 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
5275 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
5276 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
5277 //zz stats__hbefore_stk_hwm);
5278 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
5279 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
5280
5281 VG_(printf)("\n");
5282 VG_(printf)(" locksets: %'8d unique lock sets\n",
5283 (Int)HG_(cardinalityWSU)( univ_lsets ));
5284 if (HG_(clo_track_lockorders)) {
5285 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
5286 (Int)HG_(cardinalityWSU)( univ_laog ));
5287 }
5288
5289 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
5290 // stats__ga_LL_adds,
5291 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
5292
5293 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
5294 HG_(stats__LockN_to_P_queries),
5295 HG_(stats__LockN_to_P_get_map_size)() );
5296
5297 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
5298 HG_(stats__string_table_queries),
5299 HG_(stats__string_table_get_map_size)() );
5300 if (HG_(clo_track_lockorders)) {
5301 VG_(printf)(" LAOG: %'8d map size\n",
5302 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
5303 VG_(printf)(" LAOG exposition: %'8d map size\n",
5304 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
5305 }
5306
5307 VG_(printf)(" locks: %'8lu acquires, "
5308 "%'lu releases\n",
5309 stats__lockN_acquires,
5310 stats__lockN_releases
5311 );
5312 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
5313
5314 VG_(printf)("\n");
5315 libhb_shutdown(True); // This in fact only print stats.
5316}
5317
sewardjb4112022007-11-09 22:49:28 +00005318static void hg_fini ( Int exitcode )
5319{
sewardj2d9e8742009-08-07 15:46:56 +00005320 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
5321 VG_(message)(Vg_UserMsg,
5322 "For counts of detected and suppressed errors, "
5323 "rerun with: -v\n");
5324 }
5325
5326 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
5327 && HG_(clo_history_level) >= 2) {
5328 VG_(umsg)(
5329 "Use --history-level=approx or =none to gain increased speed, at\n" );
5330 VG_(umsg)(
5331 "the cost of reduced accuracy of conflicting-access information\n");
5332 }
5333
sewardjb4112022007-11-09 22:49:28 +00005334 if (SHOW_DATA_STRUCTURES)
5335 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00005336 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00005337 all__sanity_check("SK_(fini)");
5338
philippe8587b542013-12-15 20:24:43 +00005339 if (VG_(clo_stats))
5340 hg_print_stats();
sewardjb4112022007-11-09 22:49:28 +00005341}
5342
sewardjf98e1c02008-10-25 16:22:41 +00005343/* FIXME: move these somewhere sane */
5344
5345static
5346void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
5347{
5348 Thread* thr;
5349 ThreadId tid;
5350 UWord nActual;
5351 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005352 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005353 tl_assert(thr);
5354 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
5355 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
5356 NULL, NULL, 0 );
5357 tl_assert(nActual <= nRequest);
5358 for (; nActual < nRequest; nActual++)
5359 frames[nActual] = 0;
5360}
5361
5362static
sewardj23f12002009-07-24 08:45:08 +00005363ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00005364{
5365 Thread* thr;
5366 ThreadId tid;
5367 ExeContext* ec;
5368 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00005369 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00005370 tl_assert(thr);
5371 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00005372 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00005373 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00005374 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00005375}
5376
5377
sewardjc1fb9d22011-02-28 09:03:44 +00005378static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00005379{
sewardjf98e1c02008-10-25 16:22:41 +00005380 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00005381
sewardjc1fb9d22011-02-28 09:03:44 +00005382 /////////////////////////////////////////////
5383 hbthr_root = libhb_init( for_libhb__get_stacktrace,
5384 for_libhb__get_EC );
5385 /////////////////////////////////////////////
5386
5387
5388 if (HG_(clo_track_lockorders))
5389 laog__init();
5390
5391 initialise_data_structures(hbthr_root);
5392}
5393
5394static void hg_pre_clo_init ( void )
5395{
sewardjb4112022007-11-09 22:49:28 +00005396 VG_(details_name) ("Helgrind");
5397 VG_(details_version) (NULL);
5398 VG_(details_description) ("a thread error detector");
5399 VG_(details_copyright_author)(
sewardj0f157dd2013-10-18 14:27:36 +00005400 "Copyright (C) 2007-2013, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00005401 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00005402 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00005403
5404 VG_(basic_tool_funcs) (hg_post_clo_init,
5405 hg_instrument,
5406 hg_fini);
5407
5408 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00005409 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00005410 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00005411 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00005412 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00005413 HG_(update_extra),
5414 HG_(recognised_suppression),
5415 HG_(read_extra_suppression_info),
5416 HG_(error_matches_suppression),
5417 HG_(get_error_name),
philippe4e32d672013-10-17 22:10:41 +00005418 HG_(get_extra_suppression_info),
5419 HG_(print_extra_suppression_use),
5420 HG_(update_extra_suppression_use));
sewardjb4112022007-11-09 22:49:28 +00005421
sewardj24118492009-07-15 14:50:02 +00005422 VG_(needs_xml_output) ();
5423
sewardjb4112022007-11-09 22:49:28 +00005424 VG_(needs_command_line_options)(hg_process_cmd_line_option,
5425 hg_print_usage,
5426 hg_print_debug_usage);
5427 VG_(needs_client_requests) (hg_handle_client_request);
5428
5429 // FIXME?
5430 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
5431 // hg_expensive_sanity_check);
5432
philippe8587b542013-12-15 20:24:43 +00005433 VG_(needs_print_stats) (hg_print_stats);
5434
sewardjb4112022007-11-09 22:49:28 +00005435 VG_(needs_malloc_replacement) (hg_cli__malloc,
5436 hg_cli____builtin_new,
5437 hg_cli____builtin_vec_new,
5438 hg_cli__memalign,
5439 hg_cli__calloc,
5440 hg_cli__free,
5441 hg_cli____builtin_delete,
5442 hg_cli____builtin_vec_delete,
5443 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00005444 hg_cli_malloc_usable_size,
philipped99c26a2012-07-31 22:17:28 +00005445 HG_CLI__DEFAULT_MALLOC_REDZONE_SZB );
sewardjb4112022007-11-09 22:49:28 +00005446
sewardj849b0ed2008-12-21 10:43:10 +00005447 /* 21 Dec 08: disabled this; it mostly causes H to start more
5448 slowly and use significantly more memory, without very often
5449 providing useful results. The user can request to load this
5450 information manually with --read-var-info=yes. */
5451 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00005452
5453 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00005454 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
5455 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00005456 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00005457 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00005458
5459 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00005460 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00005461
5462 VG_(track_change_mem_mprotect) ( evh__set_perms );
5463
5464 VG_(track_die_mem_stack_signal)( evh__die_mem );
sewardjfd35d492011-03-17 19:39:55 +00005465 VG_(track_die_mem_brk) ( evh__die_mem_munmap );
5466 VG_(track_die_mem_munmap) ( evh__die_mem_munmap );
sewardjb4112022007-11-09 22:49:28 +00005467 VG_(track_die_mem_stack) ( evh__die_mem );
5468
5469 // FIXME: what is this for?
5470 VG_(track_ban_mem_stack) (NULL);
5471
5472 VG_(track_pre_mem_read) ( evh__pre_mem_read );
5473 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
5474 VG_(track_pre_mem_write) ( evh__pre_mem_write );
5475 VG_(track_post_mem_write) (NULL);
5476
5477 /////////////////
5478
5479 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
5480 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
5481
5482 VG_(track_start_client_code)( evh__start_client_code );
5483 VG_(track_stop_client_code)( evh__stop_client_code );
5484
sewardjb4112022007-11-09 22:49:28 +00005485 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
5486 as described in comments at the top of pub_tool_hashtable.h, are
5487 met. Blargh. */
5488 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
5489 tl_assert( sizeof(UWord) == sizeof(Addr) );
5490 hg_mallocmeta_table
5491 = VG_(HT_construct)( "hg_malloc_metadata_table" );
5492
philippe5fbc9762013-12-01 19:28:48 +00005493 MallocMeta_poolalloc = VG_(newPA) ( sizeof(MallocMeta),
5494 1000,
5495 HG_(zalloc),
5496 "hg_malloc_metadata_pool",
5497 HG_(free));
5498
sewardj61bc2c52011-02-09 10:34:00 +00005499 // add a callback to clean up on (threaded) fork.
5500 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00005501}
5502
5503VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
5504
5505/*--------------------------------------------------------------------*/
5506/*--- end hg_main.c ---*/
5507/*--------------------------------------------------------------------*/