blob: 1b17f6f9ef0ceea793d5ee570e3a677bc5f6c47e [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
sewardjefd3b4d2007-12-02 02:05:23 +000084// FIXME catch sync signals (SEGV, basically) and unlock BHL,
85// if held. Otherwise a LOCK-prefixed insn which segfaults
86// gets Helgrind into a total muddle as the BHL will not be
87// released after the insn.
88
sewardjb4112022007-11-09 22:49:28 +000089// FIXME what is supposed to happen to locks in memory which
90// is relocated as a result of client realloc?
91
sewardjb4112022007-11-09 22:49:28 +000092// FIXME put referencing ThreadId into Thread and get
93// rid of the slow reverse mapping function.
94
95// FIXME accesses to NoAccess areas: change state to Excl?
96
97// FIXME report errors for accesses of NoAccess memory?
98
99// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
100// the thread still holds the lock.
101
102/* ------------ Debug/trace options ------------ */
103
sewardjb4112022007-11-09 22:49:28 +0000104// 0 for silent, 1 for some stuff, 2 for lots of stuff
105#define SHOW_EVENTS 0
106
sewardjb4112022007-11-09 22:49:28 +0000107
108static void all__sanity_check ( Char* who ); /* fwds */
109
110#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
111
112// 0 for none, 1 for dump at end of run
113#define SHOW_DATA_STRUCTURES 0
114
115
sewardjb4112022007-11-09 22:49:28 +0000116/* ------------ Misc comments ------------ */
117
118// FIXME: don't hardwire initial entries for root thread.
119// Instead, let the pre_thread_ll_create handler do this.
120
sewardjb4112022007-11-09 22:49:28 +0000121
122/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000123/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000124/*----------------------------------------------------------------*/
125
sewardjb4112022007-11-09 22:49:28 +0000126/* Admin linked list of Threads */
127static Thread* admin_threads = NULL;
128
sewardj1d7c3322011-02-28 09:22:51 +0000129/* Admin double linked list of Locks */
130/* We need a double linked list to properly and efficiently
131 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000132static Lock* admin_locks = NULL;
133
sewardjb4112022007-11-09 22:49:28 +0000134/* Mapping table for core ThreadIds to Thread* */
135static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for lock guest addresses to Lock* */
138static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
139
140/* The word-set universes for thread sets and lock sets. */
141static WordSetU* univ_tsets = NULL; /* sets of Thread* */
142static WordSetU* univ_lsets = NULL; /* sets of Lock* */
143static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
144
145/* never changed; we only care about its address. Is treated as if it
146 was a standard userspace lock. Also we have a Lock* describing it
147 so it can participate in lock sets in the usual way. */
148static Int __bus_lock = 0;
149static Lock* __bus_lock_Lock = NULL;
150
151
152/*----------------------------------------------------------------*/
153/*--- Simple helpers for the data structures ---*/
154/*----------------------------------------------------------------*/
155
156static UWord stats__lockN_acquires = 0;
157static UWord stats__lockN_releases = 0;
158
sewardjf98e1c02008-10-25 16:22:41 +0000159static
160ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000161
162/* --------- Constructors --------- */
163
sewardjf98e1c02008-10-25 16:22:41 +0000164static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000165 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000166 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000167 thread->locksetA = HG_(emptyWS)( univ_lsets );
168 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000170 thread->hbthr = hbthr;
171 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000172 thread->created_at = NULL;
173 thread->announced = False;
174 thread->errmsg_index = indx++;
175 thread->admin = admin_threads;
176 admin_threads = thread;
177 return thread;
178}
sewardjf98e1c02008-10-25 16:22:41 +0000179
sewardjb4112022007-11-09 22:49:28 +0000180// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000181// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000182static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
183 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000184 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj1d7c3322011-02-28 09:22:51 +0000185 lock->admin_next = admin_locks;
186 lock->admin_prev = NULL;
187 if (admin_locks)
188 admin_locks->admin_prev = lock;
189 admin_locks = lock;
sewardjb4112022007-11-09 22:49:28 +0000190 lock->unique = unique++;
191 lock->magic = LockN_MAGIC;
192 lock->appeared_at = NULL;
193 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000194 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000195 lock->guestaddr = guestaddr;
196 lock->kind = kind;
197 lock->heldW = False;
198 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000199 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000200 return lock;
201}
sewardjb4112022007-11-09 22:49:28 +0000202
203/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000204 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000205static void del_LockN ( Lock* lk )
206{
sewardjf98e1c02008-10-25 16:22:41 +0000207 tl_assert(HG_(is_sane_LockN)(lk));
208 tl_assert(lk->hbso);
209 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000210 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000211 VG_(deleteBag)( lk->heldBy );
sewardj1d7c3322011-02-28 09:22:51 +0000212 if (admin_locks == lk) {
213 admin_locks = lk->admin_next;
214 if (admin_locks)
215 admin_locks->admin_prev = NULL;
216 }
217 else {
218 lk->admin_prev->admin_next = lk->admin_next;
219 lk->admin_next->admin_prev = lk->admin_prev;
220 }
221
sewardjb4112022007-11-09 22:49:28 +0000222 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000223 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000224}
225
226/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
227 it. This is done strictly: only combinations resulting from
228 correct program and libpthread behaviour are allowed. */
229static void lockN_acquire_writer ( Lock* lk, Thread* thr )
230{
sewardjf98e1c02008-10-25 16:22:41 +0000231 tl_assert(HG_(is_sane_LockN)(lk));
232 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000233
234 stats__lockN_acquires++;
235
236 /* EXPOSITION only */
237 /* We need to keep recording snapshots of where the lock was
238 acquired, so as to produce better lock-order error messages. */
239 if (lk->acquired_at == NULL) {
240 ThreadId tid;
241 tl_assert(lk->heldBy == NULL);
242 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
243 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000244 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000245 } else {
246 tl_assert(lk->heldBy != NULL);
247 }
248 /* end EXPOSITION only */
249
250 switch (lk->kind) {
251 case LK_nonRec:
252 case_LK_nonRec:
253 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
254 tl_assert(!lk->heldW);
255 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000256 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000257 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000258 break;
259 case LK_mbRec:
260 if (lk->heldBy == NULL)
261 goto case_LK_nonRec;
262 /* 2nd and subsequent locking of a lock by its owner */
263 tl_assert(lk->heldW);
264 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000265 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000266 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000267 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
268 == VG_(sizeTotalBag)(lk->heldBy));
269 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000270 break;
271 case LK_rdwr:
272 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
273 goto case_LK_nonRec;
274 default:
275 tl_assert(0);
276 }
sewardjf98e1c02008-10-25 16:22:41 +0000277 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000278}
279
280static void lockN_acquire_reader ( Lock* lk, Thread* thr )
281{
sewardjf98e1c02008-10-25 16:22:41 +0000282 tl_assert(HG_(is_sane_LockN)(lk));
283 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000284 /* can only add reader to a reader-writer lock. */
285 tl_assert(lk->kind == LK_rdwr);
286 /* lk must be free or already r-held. */
287 tl_assert(lk->heldBy == NULL
288 || (lk->heldBy != NULL && !lk->heldW));
289
290 stats__lockN_acquires++;
291
292 /* EXPOSITION only */
293 /* We need to keep recording snapshots of where the lock was
294 acquired, so as to produce better lock-order error messages. */
295 if (lk->acquired_at == NULL) {
296 ThreadId tid;
297 tl_assert(lk->heldBy == NULL);
298 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
299 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000300 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000301 } else {
302 tl_assert(lk->heldBy != NULL);
303 }
304 /* end EXPOSITION only */
305
306 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000307 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000308 } else {
309 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000310 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000311 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000312 }
313 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000314 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000315}
316
317/* Update 'lk' to reflect a release of it by 'thr'. This is done
318 strictly: only combinations resulting from correct program and
319 libpthread behaviour are allowed. */
320
321static void lockN_release ( Lock* lk, Thread* thr )
322{
323 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000324 tl_assert(HG_(is_sane_LockN)(lk));
325 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000326 /* lock must be held by someone */
327 tl_assert(lk->heldBy);
328 stats__lockN_releases++;
329 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000330 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000331 /* thr must actually have been a holder of lk */
332 tl_assert(b);
333 /* normalise */
334 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000335 if (VG_(isEmptyBag)(lk->heldBy)) {
336 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000337 lk->heldBy = NULL;
338 lk->heldW = False;
339 lk->acquired_at = NULL;
340 }
sewardjf98e1c02008-10-25 16:22:41 +0000341 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000342}
343
344static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
345{
346 Thread* thr;
347 if (!lk->heldBy) {
348 tl_assert(!lk->heldW);
349 return;
350 }
351 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000352 VG_(initIterBag)( lk->heldBy );
353 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000354 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000355 tl_assert(HG_(elemWS)( univ_lsets,
356 thr->locksetA, (Word)lk ));
357 thr->locksetA
358 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
359
360 if (lk->heldW) {
361 tl_assert(HG_(elemWS)( univ_lsets,
362 thr->locksetW, (Word)lk ));
363 thr->locksetW
364 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
365 }
366 }
sewardj896f6f92008-08-19 08:38:52 +0000367 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000368}
369
sewardjb4112022007-11-09 22:49:28 +0000370
371/*----------------------------------------------------------------*/
372/*--- Print out the primary data structures ---*/
373/*----------------------------------------------------------------*/
374
sewardjd52392d2008-11-08 20:36:26 +0000375//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000376
377#define PP_THREADS (1<<1)
378#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000379#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000380
381
382static const Int sHOW_ADMIN = 0;
383
384static void space ( Int n )
385{
386 Int i;
387 Char spaces[128+1];
388 tl_assert(n >= 0 && n < 128);
389 if (n == 0)
390 return;
391 for (i = 0; i < n; i++)
392 spaces[i] = ' ';
393 spaces[i] = 0;
394 tl_assert(i < 128+1);
395 VG_(printf)("%s", spaces);
396}
397
398static void pp_Thread ( Int d, Thread* t )
399{
400 space(d+0); VG_(printf)("Thread %p {\n", t);
401 if (sHOW_ADMIN) {
402 space(d+3); VG_(printf)("admin %p\n", t->admin);
403 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
404 }
405 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
406 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000407 space(d+0); VG_(printf)("}\n");
408}
409
410static void pp_admin_threads ( Int d )
411{
412 Int i, n;
413 Thread* t;
414 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
415 /* nothing */
416 }
417 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
418 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
419 if (0) {
420 space(n);
421 VG_(printf)("admin_threads record %d of %d:\n", i, n);
422 }
423 pp_Thread(d+3, t);
424 }
barta0b6b2c2008-07-07 06:49:24 +0000425 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000426}
427
428static void pp_map_threads ( Int d )
429{
njn4c245e52009-03-15 23:25:38 +0000430 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000431 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000432 for (i = 0; i < VG_N_THREADS; i++) {
433 if (map_threads[i] != NULL)
434 n++;
435 }
436 VG_(printf)("(%d entries) {\n", n);
437 for (i = 0; i < VG_N_THREADS; i++) {
438 if (map_threads[i] == NULL)
439 continue;
440 space(d+3);
441 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
442 }
443 space(d); VG_(printf)("}\n");
444}
445
446static const HChar* show_LockKind ( LockKind lkk ) {
447 switch (lkk) {
448 case LK_mbRec: return "mbRec";
449 case LK_nonRec: return "nonRec";
450 case LK_rdwr: return "rdwr";
451 default: tl_assert(0);
452 }
453}
454
455static void pp_Lock ( Int d, Lock* lk )
456{
barta0b6b2c2008-07-07 06:49:24 +0000457 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000458 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000459 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
460 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
461 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000462 }
463 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
464 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
465 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
466 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
467 if (lk->heldBy) {
468 Thread* thr;
469 Word count;
470 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000471 VG_(initIterBag)( lk->heldBy );
472 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000473 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000474 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000475 VG_(printf)("}");
476 }
477 VG_(printf)("\n");
478 space(d+0); VG_(printf)("}\n");
479}
480
481static void pp_admin_locks ( Int d )
482{
483 Int i, n;
484 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000485 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000486 /* nothing */
487 }
488 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000489 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000490 if (0) {
491 space(n);
492 VG_(printf)("admin_locks record %d of %d:\n", i, n);
493 }
494 pp_Lock(d+3, lk);
495 }
barta0b6b2c2008-07-07 06:49:24 +0000496 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000497}
498
499static void pp_map_locks ( Int d )
500{
501 void* gla;
502 Lock* lk;
503 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000504 (Int)VG_(sizeFM)( map_locks ));
505 VG_(initIterFM)( map_locks );
506 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000507 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000508 space(d+3);
509 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
510 }
sewardj896f6f92008-08-19 08:38:52 +0000511 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000512 space(d); VG_(printf)("}\n");
513}
514
sewardjb4112022007-11-09 22:49:28 +0000515static void pp_everything ( Int flags, Char* caller )
516{
517 Int d = 0;
518 VG_(printf)("\n");
519 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
520 if (flags & PP_THREADS) {
521 VG_(printf)("\n");
522 pp_admin_threads(d+3);
523 VG_(printf)("\n");
524 pp_map_threads(d+3);
525 }
526 if (flags & PP_LOCKS) {
527 VG_(printf)("\n");
528 pp_admin_locks(d+3);
529 VG_(printf)("\n");
530 pp_map_locks(d+3);
531 }
sewardjb4112022007-11-09 22:49:28 +0000532
533 VG_(printf)("\n");
534 VG_(printf)("}\n");
535 VG_(printf)("\n");
536}
537
538#undef SHOW_ADMIN
539
540
541/*----------------------------------------------------------------*/
542/*--- Initialise the primary data structures ---*/
543/*----------------------------------------------------------------*/
544
sewardjf98e1c02008-10-25 16:22:41 +0000545static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000546{
sewardjb4112022007-11-09 22:49:28 +0000547 Thread* thr;
548
549 /* Get everything initialised and zeroed. */
550 tl_assert(admin_threads == NULL);
551 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000552
553 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000554
555 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000556 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000557 tl_assert(map_threads != NULL);
558
sewardjb4112022007-11-09 22:49:28 +0000559 tl_assert(sizeof(Addr) == sizeof(Word));
560 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000561 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
562 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000563 tl_assert(map_locks != NULL);
564
565 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000566 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000567 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000568
569 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000570 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
571 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000572 tl_assert(univ_tsets != NULL);
573
574 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000575 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
576 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000577 tl_assert(univ_lsets != NULL);
578
579 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000580 if (HG_(clo_track_lockorders)) {
581 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
582 HG_(free), 24/*cacheSize*/ );
583 tl_assert(univ_laog != NULL);
584 }
sewardjb4112022007-11-09 22:49:28 +0000585
586 /* Set up entries for the root thread */
587 // FIXME: this assumes that the first real ThreadId is 1
588
sewardjb4112022007-11-09 22:49:28 +0000589 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000590 thr = mk_Thread(hbthr_root);
591 thr->coretid = 1; /* FIXME: hardwires an assumption about the
592 identity of the root thread. */
sewardj60626642011-03-10 15:14:37 +0000593 tl_assert( libhb_get_Thr_hgthread(hbthr_root) == NULL );
594 libhb_set_Thr_hgthread(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000595
sewardjf98e1c02008-10-25 16:22:41 +0000596 /* and bind it in the thread-map table. */
597 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
598 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000599
sewardjf98e1c02008-10-25 16:22:41 +0000600 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000601
602 tl_assert(VG_INVALID_THREADID == 0);
603
604 /* Mark the new bus lock correctly (to stop the sanity checks
605 complaining) */
606 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000607
608 all__sanity_check("initialise_data_structures");
609}
610
611
612/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000613/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000614/*----------------------------------------------------------------*/
615
616/* Doesn't assert if the relevant map_threads entry is NULL. */
617static Thread* map_threads_maybe_lookup ( ThreadId coretid )
618{
619 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000620 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000621 thr = map_threads[coretid];
622 return thr;
623}
624
625/* Asserts if the relevant map_threads entry is NULL. */
626static inline Thread* map_threads_lookup ( ThreadId coretid )
627{
628 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000629 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000630 thr = map_threads[coretid];
631 tl_assert(thr);
632 return thr;
633}
634
sewardjf98e1c02008-10-25 16:22:41 +0000635/* Do a reverse lookup. Does not assert if 'thr' is not found in
636 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000637static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
638{
sewardjf98e1c02008-10-25 16:22:41 +0000639 ThreadId tid;
640 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000641 /* Check nobody used the invalid-threadid slot */
642 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
643 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000644 tid = thr->coretid;
645 tl_assert(HG_(is_sane_ThreadId)(tid));
646 return tid;
sewardjb4112022007-11-09 22:49:28 +0000647}
648
649/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
650 is not found in map_threads. */
651static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
652{
653 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
654 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000655 tl_assert(map_threads[tid]);
656 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000657 return tid;
658}
659
660static void map_threads_delete ( ThreadId coretid )
661{
662 Thread* thr;
663 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000664 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000665 thr = map_threads[coretid];
666 tl_assert(thr);
667 map_threads[coretid] = NULL;
668}
669
670
671/*----------------------------------------------------------------*/
672/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
673/*----------------------------------------------------------------*/
674
675/* Make sure there is a lock table entry for the given (lock) guest
676 address. If not, create one of the stated 'kind' in unheld state.
677 In any case, return the address of the existing or new Lock. */
678static
679Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
680{
681 Bool found;
682 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000683 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000684 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000685 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000686 if (!found) {
687 Lock* lock = mk_LockN(lkk, ga);
688 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000689 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000690 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000692 return lock;
693 } else {
694 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000695 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000696 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000697 return oldlock;
698 }
699}
700
701static Lock* map_locks_maybe_lookup ( Addr ga )
702{
703 Bool found;
704 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000705 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000706 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000707 return lk;
708}
709
710static void map_locks_delete ( Addr ga )
711{
712 Addr ga2 = 0;
713 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000714 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000715 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000716 /* delFromFM produces the val which is being deleted, if it is
717 found. So assert it is non-null; that in effect asserts that we
718 are deleting a (ga, Lock) pair which actually exists. */
719 tl_assert(lk != NULL);
720 tl_assert(ga2 == ga);
721}
722
723
sewardjb4112022007-11-09 22:49:28 +0000724
725/*----------------------------------------------------------------*/
726/*--- Sanity checking the data structures ---*/
727/*----------------------------------------------------------------*/
728
729static UWord stats__sanity_checks = 0;
730
sewardjb4112022007-11-09 22:49:28 +0000731static void laog__sanity_check ( Char* who ); /* fwds */
732
733/* REQUIRED INVARIANTS:
734
735 Thread vs Segment/Lock/SecMaps
736
737 for each t in Threads {
738
739 // Thread.lockset: each element is really a valid Lock
740
741 // Thread.lockset: each Lock in set is actually held by that thread
742 for lk in Thread.lockset
743 lk == LockedBy(t)
744
745 // Thread.csegid is a valid SegmentID
746 // and the associated Segment has .thr == t
747
748 }
749
750 all thread Locksets are pairwise empty under intersection
751 (that is, no lock is claimed to be held by more than one thread)
752 -- this is guaranteed if all locks in locksets point back to their
753 owner threads
754
755 Lock vs Thread/Segment/SecMaps
756
757 for each entry (gla, la) in map_locks
758 gla == la->guest_addr
759
760 for each lk in Locks {
761
762 lk->tag is valid
763 lk->guest_addr does not have shadow state NoAccess
764 if lk == LockedBy(t), then t->lockset contains lk
765 if lk == UnlockedBy(segid) then segid is valid SegmentID
766 and can be mapped to a valid Segment(seg)
767 and seg->thr->lockset does not contain lk
768 if lk == UnlockedNew then (no lockset contains lk)
769
770 secmaps for lk has .mbHasLocks == True
771
772 }
773
774 Segment vs Thread/Lock/SecMaps
775
776 the Segment graph is a dag (no cycles)
777 all of the Segment graph must be reachable from the segids
778 mentioned in the Threads
779
780 for seg in Segments {
781
782 seg->thr is a sane Thread
783
784 }
785
786 SecMaps vs Segment/Thread/Lock
787
788 for sm in SecMaps {
789
790 sm properly aligned
791 if any shadow word is ShR or ShM then .mbHasShared == True
792
793 for each Excl(segid) state
794 map_segments_lookup maps to a sane Segment(seg)
795 for each ShM/ShR(tsetid,lsetid) state
796 each lk in lset is a valid Lock
797 each thr in tset is a valid thread, which is non-dead
798
799 }
800*/
801
802
803/* Return True iff 'thr' holds 'lk' in some mode. */
804static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
805{
806 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000807 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000808 else
809 return False;
810}
811
812/* Sanity check Threads, as far as possible */
813__attribute__((noinline))
814static void threads__sanity_check ( Char* who )
815{
816#define BAD(_str) do { how = (_str); goto bad; } while (0)
817 Char* how = "no error";
818 Thread* thr;
819 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000820 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000821 Word ls_size, i;
822 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000823 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000824 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000825 wsA = thr->locksetA;
826 wsW = thr->locksetW;
827 // locks held in W mode are a subset of all locks held
828 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
829 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
830 for (i = 0; i < ls_size; i++) {
831 lk = (Lock*)ls_words[i];
832 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000833 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000834 // Thread.lockset: each Lock in set is actually held by that
835 // thread
836 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000837 }
838 }
839 return;
840 bad:
841 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
842 tl_assert(0);
843#undef BAD
844}
845
846
847/* Sanity check Locks, as far as possible */
848__attribute__((noinline))
849static void locks__sanity_check ( Char* who )
850{
851#define BAD(_str) do { how = (_str); goto bad; } while (0)
852 Char* how = "no error";
853 Addr gla;
854 Lock* lk;
855 Int i;
856 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000857 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000858 ;
sewardj896f6f92008-08-19 08:38:52 +0000859 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000860 // for each entry (gla, lk) in map_locks
861 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000862 VG_(initIterFM)( map_locks );
863 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000864 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000865 if (lk->guestaddr != gla) BAD("2");
866 }
sewardj896f6f92008-08-19 08:38:52 +0000867 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000868 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000869 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000870 // lock is sane. Quite comprehensive, also checks that
871 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000872 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000873 // map_locks binds guest address back to this lock
874 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000875 // look at all threads mentioned as holders of this lock. Ensure
876 // this lock is mentioned in their locksets.
877 if (lk->heldBy) {
878 Thread* thr;
879 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000880 VG_(initIterBag)( lk->heldBy );
881 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000882 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000883 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000884 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000885 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000886 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
887 BAD("6");
888 // also check the w-only lockset
889 if (lk->heldW
890 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
891 BAD("7");
892 if ((!lk->heldW)
893 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
894 BAD("8");
895 }
sewardj896f6f92008-08-19 08:38:52 +0000896 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000897 } else {
898 /* lock not held by anybody */
899 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
900 // since lk is unheld, then (no lockset contains lk)
901 // hmm, this is really too expensive to check. Hmm.
902 }
sewardjb4112022007-11-09 22:49:28 +0000903 }
904
905 return;
906 bad:
907 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
908 tl_assert(0);
909#undef BAD
910}
911
912
sewardjb4112022007-11-09 22:49:28 +0000913static void all_except_Locks__sanity_check ( Char* who ) {
914 stats__sanity_checks++;
915 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
916 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000917 if (HG_(clo_track_lockorders))
918 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000919}
920static void all__sanity_check ( Char* who ) {
921 all_except_Locks__sanity_check(who);
922 locks__sanity_check(who);
923}
924
925
926/*----------------------------------------------------------------*/
927/*--- the core memory state machine (msm__* functions) ---*/
928/*----------------------------------------------------------------*/
929
sewardjd52392d2008-11-08 20:36:26 +0000930//static WordSetID add_BHL ( WordSetID lockset ) {
931// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
932//}
933//static WordSetID del_BHL ( WordSetID lockset ) {
934// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
935//}
sewardjb4112022007-11-09 22:49:28 +0000936
937
sewardjd52392d2008-11-08 20:36:26 +0000938///* Last-lock-lossage records. This mechanism exists to help explain
939// to programmers why we are complaining about a race. The idea is to
940// monitor all lockset transitions. When a previously nonempty
941// lockset becomes empty, the lock(s) that just disappeared (the
942// "lossage") are the locks that have consistently protected the
943// location (ga_of_access) in question for the longest time. Most of
944// the time the lossage-set is a single lock. Because the
945// lossage-lock is the one that has survived longest, there is there
946// is a good chance that it is indeed the lock that the programmer
947// intended to use to protect the location.
948//
949// Note that we cannot in general just look at the lossage set when we
950// see a transition to ShM(...,empty-set), because a transition to an
951// empty lockset can happen arbitrarily far before the point where we
952// want to report an error. This is in the case where there are many
953// transitions ShR -> ShR, all with an empty lockset, and only later
954// is there a transition to ShM. So what we want to do is note the
955// lossage lock at the point where a ShR -> ShR transition empties out
956// the lockset, so we can present it later if there should be a
957// transition to ShM.
958//
959// So this function finds such transitions. For each, it associates
960// in ga_to_lastlock, the guest address and the lossage lock. In fact
961// we do not record the Lock* directly as that may disappear later,
962// but instead the ExeContext inside the Lock which says where it was
963// initialised or first locked. ExeContexts are permanent so keeping
964// them indefinitely is safe.
965//
966// A boring detail: the hardware bus lock is not interesting in this
967// respect, so we first remove that from the pre/post locksets.
968//*/
969//
970//static UWord stats__ga_LL_adds = 0;
971//
972//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
973//
974//static
975//void record_last_lock_lossage ( Addr ga_of_access,
976// WordSetID lset_old, WordSetID lset_new )
977//{
978// Lock* lk;
979// Int card_old, card_new;
980//
981// tl_assert(lset_old != lset_new);
982//
983// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
984// (Int)lset_old,
985// HG_(cardinalityWS)(univ_lsets,lset_old),
986// (Int)lset_new,
987// HG_(cardinalityWS)(univ_lsets,lset_new),
988// ga_of_access );
989//
990// /* This is slow, but at least it's simple. The bus hardware lock
991// just confuses the logic, so remove it from the locksets we're
992// considering before doing anything else. */
993// lset_new = del_BHL( lset_new );
994//
995// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
996// /* The post-transition lock set is not empty. So we are not
997// interested. We're only interested in spotting transitions
998// that make locksets become empty. */
999// return;
1000// }
1001//
1002// /* lset_new is now empty */
1003// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
1004// tl_assert(card_new == 0);
1005//
1006// lset_old = del_BHL( lset_old );
1007// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
1008//
1009// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
1010// (Int)lset_old, card_old, (Int)lset_new, card_new );
1011//
1012// if (card_old == 0) {
1013// /* The old lockset was also empty. Not interesting. */
1014// return;
1015// }
1016//
1017// tl_assert(card_old > 0);
1018// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1019//
1020// /* Now we know we've got a transition from a nonempty lockset to an
1021// empty one. So lset_old must be the set of locks lost. Record
1022// some details. If there is more than one element in the lossage
1023// set, just choose one arbitrarily -- not the best, but at least
1024// it's simple. */
1025//
1026// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1027// if (0) VG_(printf)("lossage %ld %p\n",
1028// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1029// if (lk->appeared_at) {
1030// if (ga_to_lastlock == NULL)
1031// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1032// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1033// stats__ga_LL_adds++;
1034// }
1035//}
1036//
1037///* This queries the table (ga_to_lastlock) made by
1038// record_last_lock_lossage, when constructing error messages. It
1039// attempts to find the ExeContext of the allocation or initialisation
1040// point for the lossage lock associated with 'ga'. */
1041//
1042//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1043//{
1044// ExeContext* ec_hint = NULL;
1045// if (ga_to_lastlock != NULL
1046// && VG_(lookupFM)(ga_to_lastlock,
1047// NULL, (Word*)&ec_hint, ga)) {
1048// tl_assert(ec_hint != NULL);
1049// return ec_hint;
1050// } else {
1051// return NULL;
1052// }
1053//}
sewardjb4112022007-11-09 22:49:28 +00001054
1055
sewardjb4112022007-11-09 22:49:28 +00001056/*----------------------------------------------------------------*/
1057/*--- Shadow value and address range handlers ---*/
1058/*----------------------------------------------------------------*/
1059
1060static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001061//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001062static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001063__attribute__((noinline))
1064static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001065
sewardjb4112022007-11-09 22:49:28 +00001066
1067/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001068/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1069 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1070static void shadow_mem_scopy_range ( Thread* thr,
1071 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001072{
1073 Thr* hbthr = thr->hbthr;
1074 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001075 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001076}
1077
sewardj23f12002009-07-24 08:45:08 +00001078static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1079{
sewardjf98e1c02008-10-25 16:22:41 +00001080 Thr* hbthr = thr->hbthr;
1081 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001082 LIBHB_CREAD_N(hbthr, a, len);
1083}
1084
1085static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1086 Thr* hbthr = thr->hbthr;
1087 tl_assert(hbthr);
1088 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001089}
1090
1091static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1092{
sewardj23f12002009-07-24 08:45:08 +00001093 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001094}
1095
sewardjb4112022007-11-09 22:49:28 +00001096static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1097{
sewardjb4112022007-11-09 22:49:28 +00001098 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001099 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001100 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001101}
1102
sewardj406bac82010-03-03 23:03:40 +00001103static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1104{
1105 if (0 && len > 500)
1106 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1107 libhb_srange_untrack( thr->hbthr, aIN, len );
1108}
1109
sewardjb4112022007-11-09 22:49:28 +00001110
1111/*----------------------------------------------------------------*/
1112/*--- Event handlers (evh__* functions) ---*/
1113/*--- plus helpers (evhH__* functions) ---*/
1114/*----------------------------------------------------------------*/
1115
1116/*--------- Event handler helpers (evhH__* functions) ---------*/
1117
1118/* Create a new segment for 'thr', making it depend (.prev) on its
1119 existing segment, bind together the SegmentID and Segment, and
1120 return both of them. Also update 'thr' so it references the new
1121 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001122//zz static
1123//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1124//zz /*OUT*/Segment** new_segP,
1125//zz Thread* thr )
1126//zz {
1127//zz Segment* cur_seg;
1128//zz tl_assert(new_segP);
1129//zz tl_assert(new_segidP);
1130//zz tl_assert(HG_(is_sane_Thread)(thr));
1131//zz cur_seg = map_segments_lookup( thr->csegid );
1132//zz tl_assert(cur_seg);
1133//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1134//zz at their owner thread. */
1135//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1136//zz *new_segidP = alloc_SegmentID();
1137//zz map_segments_add( *new_segidP, *new_segP );
1138//zz thr->csegid = *new_segidP;
1139//zz }
sewardjb4112022007-11-09 22:49:28 +00001140
1141
1142/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1143 updates, and also do all possible error checks. */
1144static
1145void evhH__post_thread_w_acquires_lock ( Thread* thr,
1146 LockKind lkk, Addr lock_ga )
1147{
1148 Lock* lk;
1149
1150 /* Basically what we need to do is call lockN_acquire_writer.
1151 However, that will barf if any 'invalid' lock states would
1152 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001153 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001154 routine.
1155
1156 Because this routine is only called after successful lock
1157 acquisition, we should not be asked to move the lock into any
1158 invalid states. Requests to do so are bugs in libpthread, since
1159 that should have rejected any such requests. */
1160
sewardjf98e1c02008-10-25 16:22:41 +00001161 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001162 /* Try to find the lock. If we can't, then create a new one with
1163 kind 'lkk'. */
1164 lk = map_locks_lookup_or_create(
1165 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001166 tl_assert( HG_(is_sane_LockN)(lk) );
1167
1168 /* check libhb level entities exist */
1169 tl_assert(thr->hbthr);
1170 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001171
1172 if (lk->heldBy == NULL) {
1173 /* the lock isn't held. Simple. */
1174 tl_assert(!lk->heldW);
1175 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001176 /* acquire a dependency from the lock's VCs */
1177 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001178 goto noerror;
1179 }
1180
1181 /* So the lock is already held. If held as a r-lock then
1182 libpthread must be buggy. */
1183 tl_assert(lk->heldBy);
1184 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001185 HG_(record_error_Misc)(
1186 thr, "Bug in libpthread: write lock "
1187 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001188 goto error;
1189 }
1190
1191 /* So the lock is held in w-mode. If it's held by some other
1192 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001193 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001194
sewardj896f6f92008-08-19 08:38:52 +00001195 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001196 HG_(record_error_Misc)(
1197 thr, "Bug in libpthread: write lock "
1198 "granted on mutex/rwlock which is currently "
1199 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001200 goto error;
1201 }
1202
1203 /* So the lock is already held in w-mode by 'thr'. That means this
1204 is an attempt to lock it recursively, which is only allowable
1205 for LK_mbRec kinded locks. Since this routine is called only
1206 once the lock has been acquired, this must also be a libpthread
1207 bug. */
1208 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001209 HG_(record_error_Misc)(
1210 thr, "Bug in libpthread: recursive write lock "
1211 "granted on mutex/wrlock which does not "
1212 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001213 goto error;
1214 }
1215
1216 /* So we are recursively re-locking a lock we already w-hold. */
1217 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001218 /* acquire a dependency from the lock's VC. Probably pointless,
1219 but also harmless. */
1220 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001221 goto noerror;
1222
1223 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001224 if (HG_(clo_track_lockorders)) {
1225 /* check lock order acquisition graph, and update. This has to
1226 happen before the lock is added to the thread's locksetA/W. */
1227 laog__pre_thread_acquires_lock( thr, lk );
1228 }
sewardjb4112022007-11-09 22:49:28 +00001229 /* update the thread's held-locks set */
1230 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1231 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1232 /* fall through */
1233
1234 error:
sewardjf98e1c02008-10-25 16:22:41 +00001235 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001236}
1237
1238
1239/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1240 updates, and also do all possible error checks. */
1241static
1242void evhH__post_thread_r_acquires_lock ( Thread* thr,
1243 LockKind lkk, Addr lock_ga )
1244{
1245 Lock* lk;
1246
1247 /* Basically what we need to do is call lockN_acquire_reader.
1248 However, that will barf if any 'invalid' lock states would
1249 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001250 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001251 routine.
1252
1253 Because this routine is only called after successful lock
1254 acquisition, we should not be asked to move the lock into any
1255 invalid states. Requests to do so are bugs in libpthread, since
1256 that should have rejected any such requests. */
1257
sewardjf98e1c02008-10-25 16:22:41 +00001258 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001259 /* Try to find the lock. If we can't, then create a new one with
1260 kind 'lkk'. Only a reader-writer lock can be read-locked,
1261 hence the first assertion. */
1262 tl_assert(lkk == LK_rdwr);
1263 lk = map_locks_lookup_or_create(
1264 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001265 tl_assert( HG_(is_sane_LockN)(lk) );
1266
1267 /* check libhb level entities exist */
1268 tl_assert(thr->hbthr);
1269 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001270
1271 if (lk->heldBy == NULL) {
1272 /* the lock isn't held. Simple. */
1273 tl_assert(!lk->heldW);
1274 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001275 /* acquire a dependency from the lock's VC */
1276 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001277 goto noerror;
1278 }
1279
1280 /* So the lock is already held. If held as a w-lock then
1281 libpthread must be buggy. */
1282 tl_assert(lk->heldBy);
1283 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001284 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1285 "granted on rwlock which is "
1286 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001287 goto error;
1288 }
1289
1290 /* Easy enough. In short anybody can get a read-lock on a rwlock
1291 provided it is either unlocked or already in rd-held. */
1292 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001293 /* acquire a dependency from the lock's VC. Probably pointless,
1294 but also harmless. */
1295 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001296 goto noerror;
1297
1298 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001299 if (HG_(clo_track_lockorders)) {
1300 /* check lock order acquisition graph, and update. This has to
1301 happen before the lock is added to the thread's locksetA/W. */
1302 laog__pre_thread_acquires_lock( thr, lk );
1303 }
sewardjb4112022007-11-09 22:49:28 +00001304 /* update the thread's held-locks set */
1305 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1306 /* but don't update thr->locksetW, since lk is only rd-held */
1307 /* fall through */
1308
1309 error:
sewardjf98e1c02008-10-25 16:22:41 +00001310 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001311}
1312
1313
1314/* The lock at 'lock_ga' is just about to be unlocked. Make all
1315 necessary updates, and also do all possible error checks. */
1316static
1317void evhH__pre_thread_releases_lock ( Thread* thr,
1318 Addr lock_ga, Bool isRDWR )
1319{
1320 Lock* lock;
1321 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001322 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001323
1324 /* This routine is called prior to a lock release, before
1325 libpthread has had a chance to validate the call. Hence we need
1326 to detect and reject any attempts to move the lock into an
1327 invalid state. Such attempts are bugs in the client.
1328
1329 isRDWR is True if we know from the wrapper context that lock_ga
1330 should refer to a reader-writer lock, and is False if [ditto]
1331 lock_ga should refer to a standard mutex. */
1332
sewardjf98e1c02008-10-25 16:22:41 +00001333 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001334 lock = map_locks_maybe_lookup( lock_ga );
1335
1336 if (!lock) {
1337 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1338 the client is trying to unlock it. So complain, then ignore
1339 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001340 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001341 return;
1342 }
1343
1344 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001345 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001346
1347 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001348 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1349 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001350 }
1351 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001352 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1353 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001354 }
1355
1356 if (!lock->heldBy) {
1357 /* The lock is not held. This indicates a serious bug in the
1358 client. */
1359 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001360 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001361 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1362 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1363 goto error;
1364 }
1365
sewardjf98e1c02008-10-25 16:22:41 +00001366 /* test just above dominates */
1367 tl_assert(lock->heldBy);
1368 was_heldW = lock->heldW;
1369
sewardjb4112022007-11-09 22:49:28 +00001370 /* The lock is held. Is this thread one of the holders? If not,
1371 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001372 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001373 tl_assert(n >= 0);
1374 if (n == 0) {
1375 /* We are not a current holder of the lock. This is a bug in
1376 the guest, and (per POSIX pthread rules) the unlock
1377 attempt will fail. So just complain and do nothing
1378 else. */
sewardj896f6f92008-08-19 08:38:52 +00001379 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001380 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001381 tl_assert(realOwner != thr);
1382 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1383 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001384 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001385 goto error;
1386 }
1387
1388 /* Ok, we hold the lock 'n' times. */
1389 tl_assert(n >= 1);
1390
1391 lockN_release( lock, thr );
1392
1393 n--;
1394 tl_assert(n >= 0);
1395
1396 if (n > 0) {
1397 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001398 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001399 /* We still hold the lock. So either it's a recursive lock
1400 or a rwlock which is currently r-held. */
1401 tl_assert(lock->kind == LK_mbRec
1402 || (lock->kind == LK_rdwr && !lock->heldW));
1403 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1404 if (lock->heldW)
1405 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1406 else
1407 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1408 } else {
sewardj983f3022009-05-21 14:49:55 +00001409 /* n is zero. This means we don't hold the lock any more. But
1410 if it's a rwlock held in r-mode, someone else could still
1411 hold it. Just do whatever sanity checks we can. */
1412 if (lock->kind == LK_rdwr && lock->heldBy) {
1413 /* It's a rwlock. We no longer hold it but we used to;
1414 nevertheless it still appears to be held by someone else.
1415 The implication is that, prior to this release, it must
1416 have been shared by us and and whoever else is holding it;
1417 which in turn implies it must be r-held, since a lock
1418 can't be w-held by more than one thread. */
1419 /* The lock is now R-held by somebody else: */
1420 tl_assert(lock->heldW == False);
1421 } else {
1422 /* Normal case. It's either not a rwlock, or it's a rwlock
1423 that we used to hold in w-mode (which is pretty much the
1424 same thing as a non-rwlock.) Since this transaction is
1425 atomic (V does not allow multiple threads to run
1426 simultaneously), it must mean the lock is now not held by
1427 anybody. Hence assert for it. */
1428 /* The lock is now not held by anybody: */
1429 tl_assert(!lock->heldBy);
1430 tl_assert(lock->heldW == False);
1431 }
sewardjf98e1c02008-10-25 16:22:41 +00001432 //if (lock->heldBy) {
1433 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1434 //}
sewardjb4112022007-11-09 22:49:28 +00001435 /* update this thread's lockset accordingly. */
1436 thr->locksetA
1437 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1438 thr->locksetW
1439 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001440 /* push our VC into the lock */
1441 tl_assert(thr->hbthr);
1442 tl_assert(lock->hbso);
1443 /* If the lock was previously W-held, then we want to do a
1444 strong send, and if previously R-held, then a weak send. */
1445 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001446 }
1447 /* fall through */
1448
1449 error:
sewardjf98e1c02008-10-25 16:22:41 +00001450 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001451}
1452
1453
sewardj9f569b72008-11-13 13:33:09 +00001454/* ---------------------------------------------------------- */
1455/* -------- Event handlers proper (evh__* functions) -------- */
1456/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001457
1458/* What is the Thread* for the currently running thread? This is
1459 absolutely performance critical. We receive notifications from the
1460 core for client code starts/stops, and cache the looked-up result
1461 in 'current_Thread'. Hence, for the vast majority of requests,
1462 finding the current thread reduces to a read of a global variable,
1463 provided get_current_Thread_in_C_C is inlined.
1464
1465 Outside of client code, current_Thread is NULL, and presumably
1466 any uses of it will cause a segfault. Hence:
1467
1468 - for uses definitely within client code, use
1469 get_current_Thread_in_C_C.
1470
1471 - for all other uses, use get_current_Thread.
1472*/
1473
sewardj23f12002009-07-24 08:45:08 +00001474static Thread *current_Thread = NULL,
1475 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001476
1477static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1478 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1479 tl_assert(current_Thread == NULL);
1480 current_Thread = map_threads_lookup( tid );
1481 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001482 if (current_Thread != current_Thread_prev) {
1483 libhb_Thr_resumes( current_Thread->hbthr );
1484 current_Thread_prev = current_Thread;
1485 }
sewardjb4112022007-11-09 22:49:28 +00001486}
1487static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1488 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1489 tl_assert(current_Thread != NULL);
1490 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001491 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001492}
1493static inline Thread* get_current_Thread_in_C_C ( void ) {
1494 return current_Thread;
1495}
1496static inline Thread* get_current_Thread ( void ) {
1497 ThreadId coretid;
1498 Thread* thr;
1499 thr = get_current_Thread_in_C_C();
1500 if (LIKELY(thr))
1501 return thr;
1502 /* evidently not in client code. Do it the slow way. */
1503 coretid = VG_(get_running_tid)();
1504 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001505 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001506 of initial memory layout) and VG_(get_running_tid)() returns
1507 VG_INVALID_THREADID at that point. */
1508 if (coretid == VG_INVALID_THREADID)
1509 coretid = 1; /* KLUDGE */
1510 thr = map_threads_lookup( coretid );
1511 return thr;
1512}
1513
1514static
1515void evh__new_mem ( Addr a, SizeT len ) {
1516 if (SHOW_EVENTS >= 2)
1517 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1518 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001519 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001520 all__sanity_check("evh__new_mem-post");
1521}
1522
1523static
sewardj1f77fec2010-04-12 19:51:04 +00001524void evh__new_mem_stack ( Addr a, SizeT len ) {
1525 if (SHOW_EVENTS >= 2)
1526 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1527 shadow_mem_make_New( get_current_Thread(),
1528 -VG_STACK_REDZONE_SZB + a, len );
1529 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1530 all__sanity_check("evh__new_mem_stack-post");
1531}
1532
1533static
sewardj7cf4e6b2008-05-01 20:24:26 +00001534void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1535 if (SHOW_EVENTS >= 2)
1536 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1537 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001538 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001539 all__sanity_check("evh__new_mem_w_tid-post");
1540}
1541
1542static
sewardjb4112022007-11-09 22:49:28 +00001543void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001544 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001545 if (SHOW_EVENTS >= 1)
1546 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1547 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1548 if (rr || ww || xx)
1549 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001550 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001551 all__sanity_check("evh__new_mem_w_perms-post");
1552}
1553
1554static
1555void evh__set_perms ( Addr a, SizeT len,
1556 Bool rr, Bool ww, Bool xx ) {
1557 if (SHOW_EVENTS >= 1)
1558 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1559 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1560 /* Hmm. What should we do here, that actually makes any sense?
1561 Let's say: if neither readable nor writable, then declare it
1562 NoAccess, else leave it alone. */
1563 if (!(rr || ww))
1564 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001565 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001566 all__sanity_check("evh__set_perms-post");
1567}
1568
1569static
1570void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001571 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001572 if (SHOW_EVENTS >= 2)
1573 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1574 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001575 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001576 all__sanity_check("evh__die_mem-post");
1577}
1578
1579static
sewardj406bac82010-03-03 23:03:40 +00001580void evh__untrack_mem ( Addr a, SizeT len ) {
1581 // whereas it doesn't ignore this
1582 if (SHOW_EVENTS >= 2)
1583 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1584 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1585 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1586 all__sanity_check("evh__untrack_mem-post");
1587}
1588
1589static
sewardj23f12002009-07-24 08:45:08 +00001590void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1591 if (SHOW_EVENTS >= 2)
1592 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1593 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1594 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1595 all__sanity_check("evh__copy_mem-post");
1596}
1597
1598static
sewardjb4112022007-11-09 22:49:28 +00001599void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1600{
1601 if (SHOW_EVENTS >= 1)
1602 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1603 (Int)parent, (Int)child );
1604
1605 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001606 Thread* thr_p;
1607 Thread* thr_c;
1608 Thr* hbthr_p;
1609 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001610
sewardjf98e1c02008-10-25 16:22:41 +00001611 tl_assert(HG_(is_sane_ThreadId)(parent));
1612 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001613 tl_assert(parent != child);
1614
1615 thr_p = map_threads_maybe_lookup( parent );
1616 thr_c = map_threads_maybe_lookup( child );
1617
1618 tl_assert(thr_p != NULL);
1619 tl_assert(thr_c == NULL);
1620
sewardjf98e1c02008-10-25 16:22:41 +00001621 hbthr_p = thr_p->hbthr;
1622 tl_assert(hbthr_p != NULL);
sewardj60626642011-03-10 15:14:37 +00001623 tl_assert( libhb_get_Thr_hgthread(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001624
sewardjf98e1c02008-10-25 16:22:41 +00001625 hbthr_c = libhb_create ( hbthr_p );
1626
1627 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001628 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001629 thr_c = mk_Thread( hbthr_c );
sewardj60626642011-03-10 15:14:37 +00001630 tl_assert( libhb_get_Thr_hgthread(hbthr_c) == NULL );
1631 libhb_set_Thr_hgthread(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001632
1633 /* and bind it in the thread-map table */
1634 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001635 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1636 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001637
1638 /* Record where the parent is so we can later refer to this in
1639 error messages.
1640
1641 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1642 The stack snapshot is taken immediately after the parent has
1643 returned from its sys_clone call. Unfortunately there is no
1644 unwind info for the insn following "syscall" - reading the
1645 glibc sources confirms this. So we ask for a snapshot to be
1646 taken as if RIP was 3 bytes earlier, in a place where there
1647 is unwind info. Sigh.
1648 */
1649 { Word first_ip_delta = 0;
1650# if defined(VGP_amd64_linux)
1651 first_ip_delta = -3;
1652# endif
1653 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1654 }
sewardjb4112022007-11-09 22:49:28 +00001655 }
1656
sewardjf98e1c02008-10-25 16:22:41 +00001657 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001658 all__sanity_check("evh__pre_thread_create-post");
1659}
1660
1661static
1662void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1663{
1664 Int nHeld;
1665 Thread* thr_q;
1666 if (SHOW_EVENTS >= 1)
1667 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1668 (Int)quit_tid );
1669
1670 /* quit_tid has disappeared without joining to any other thread.
1671 Therefore there is no synchronisation event associated with its
1672 exit and so we have to pretty much treat it as if it was still
1673 alive but mysteriously making no progress. That is because, if
1674 we don't know when it really exited, then we can never say there
1675 is a point in time when we're sure the thread really has
1676 finished, and so we need to consider the possibility that it
1677 lingers indefinitely and continues to interact with other
1678 threads. */
1679 /* However, it might have rendezvous'd with a thread that called
1680 pthread_join with this one as arg, prior to this point (that's
1681 how NPTL works). In which case there has already been a prior
1682 sync event. So in any case, just let the thread exit. On NPTL,
1683 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001684 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001685 thr_q = map_threads_maybe_lookup( quit_tid );
1686 tl_assert(thr_q != NULL);
1687
1688 /* Complain if this thread holds any locks. */
1689 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1690 tl_assert(nHeld >= 0);
1691 if (nHeld > 0) {
1692 HChar buf[80];
1693 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1694 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001695 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001696 }
1697
sewardj23f12002009-07-24 08:45:08 +00001698 /* Not much to do here:
1699 - tell libhb the thread is gone
1700 - clear the map_threads entry, in order that the Valgrind core
1701 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001702 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1703 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001704 tl_assert(thr_q->hbthr);
1705 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001706 tl_assert(thr_q->coretid == quit_tid);
1707 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001708 map_threads_delete( quit_tid );
1709
sewardjf98e1c02008-10-25 16:22:41 +00001710 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001711 all__sanity_check("evh__pre_thread_ll_exit-post");
1712}
1713
sewardj61bc2c52011-02-09 10:34:00 +00001714/* This is called immediately after fork, for the child only. 'tid'
1715 is the only surviving thread (as per POSIX rules on fork() in
1716 threaded programs), so we have to clean up map_threads to remove
1717 entries for any other threads. */
1718static
1719void evh__atfork_child ( ThreadId tid )
1720{
1721 UInt i;
1722 Thread* thr;
1723 /* Slot 0 should never be used. */
1724 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1725 tl_assert(!thr);
1726 /* Clean up all other slots except 'tid'. */
1727 for (i = 1; i < VG_N_THREADS; i++) {
1728 if (i == tid)
1729 continue;
1730 thr = map_threads_maybe_lookup(i);
1731 if (!thr)
1732 continue;
1733 /* Cleanup actions (next 5 lines) copied from end of
1734 evh__pre_thread_ll_exit; keep in sync. */
1735 tl_assert(thr->hbthr);
1736 libhb_async_exit(thr->hbthr);
1737 tl_assert(thr->coretid == i);
1738 thr->coretid = VG_INVALID_THREADID;
1739 map_threads_delete(i);
1740 }
1741}
1742
sewardjf98e1c02008-10-25 16:22:41 +00001743
sewardjb4112022007-11-09 22:49:28 +00001744static
1745void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1746{
sewardjb4112022007-11-09 22:49:28 +00001747 Thread* thr_s;
1748 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001749 Thr* hbthr_s;
1750 Thr* hbthr_q;
1751 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001752
1753 if (SHOW_EVENTS >= 1)
1754 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1755 (Int)stay_tid, quit_thr );
1756
sewardjf98e1c02008-10-25 16:22:41 +00001757 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001758
1759 thr_s = map_threads_maybe_lookup( stay_tid );
1760 thr_q = quit_thr;
1761 tl_assert(thr_s != NULL);
1762 tl_assert(thr_q != NULL);
1763 tl_assert(thr_s != thr_q);
1764
sewardjf98e1c02008-10-25 16:22:41 +00001765 hbthr_s = thr_s->hbthr;
1766 hbthr_q = thr_q->hbthr;
1767 tl_assert(hbthr_s != hbthr_q);
sewardj60626642011-03-10 15:14:37 +00001768 tl_assert( libhb_get_Thr_hgthread(hbthr_s) == thr_s );
1769 tl_assert( libhb_get_Thr_hgthread(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001770
sewardjf98e1c02008-10-25 16:22:41 +00001771 /* Allocate a temporary synchronisation object and use it to send
1772 an imaginary message from the quitter to the stayer, the purpose
1773 being to generate a dependence from the quitter to the
1774 stayer. */
1775 so = libhb_so_alloc();
1776 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001777 /* Send last arg of _so_send as False, since the sending thread
1778 doesn't actually exist any more, so we don't want _so_send to
1779 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001780 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1781 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1782 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001783
sewardjf98e1c02008-10-25 16:22:41 +00001784 /* evh__pre_thread_ll_exit issues an error message if the exiting
1785 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001786
1787 /* This holds because, at least when using NPTL as the thread
1788 library, we should be notified the low level thread exit before
1789 we hear of any join event on it. The low level exit
1790 notification feeds through into evh__pre_thread_ll_exit,
1791 which should clear the map_threads entry for it. Hence we
1792 expect there to be no map_threads entry at this point. */
1793 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1794 == VG_INVALID_THREADID);
1795
sewardjf98e1c02008-10-25 16:22:41 +00001796 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001797 all__sanity_check("evh__post_thread_join-post");
1798}
1799
1800static
1801void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1802 Addr a, SizeT size) {
1803 if (SHOW_EVENTS >= 2
1804 || (SHOW_EVENTS >= 1 && size != 1))
1805 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1806 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001807 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001808 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001809 all__sanity_check("evh__pre_mem_read-post");
1810}
1811
1812static
1813void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1814 Char* s, Addr a ) {
1815 Int len;
1816 if (SHOW_EVENTS >= 1)
1817 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1818 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001819 // Don't segfault if the string starts in an obviously stupid
1820 // place. Actually we should check the whole string, not just
1821 // the start address, but that's too much trouble. At least
1822 // checking the first byte is better than nothing. See #255009.
1823 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1824 return;
sewardjb4112022007-11-09 22:49:28 +00001825 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001826 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001827 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001828 all__sanity_check("evh__pre_mem_read_asciiz-post");
1829}
1830
1831static
1832void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1833 Addr a, SizeT size ) {
1834 if (SHOW_EVENTS >= 1)
1835 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1836 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001837 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001838 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001839 all__sanity_check("evh__pre_mem_write-post");
1840}
1841
1842static
1843void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1844 if (SHOW_EVENTS >= 1)
1845 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1846 (void*)a, len, (Int)is_inited );
1847 // FIXME: this is kinda stupid
1848 if (is_inited) {
1849 shadow_mem_make_New(get_current_Thread(), a, len);
1850 } else {
1851 shadow_mem_make_New(get_current_Thread(), a, len);
1852 }
sewardjf98e1c02008-10-25 16:22:41 +00001853 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001854 all__sanity_check("evh__pre_mem_read-post");
1855}
1856
1857static
1858void evh__die_mem_heap ( Addr a, SizeT len ) {
1859 if (SHOW_EVENTS >= 1)
1860 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1861 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001862 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001863 all__sanity_check("evh__pre_mem_read-post");
1864}
1865
sewardj23f12002009-07-24 08:45:08 +00001866/* --- Event handlers called from generated code --- */
1867
sewardjb4112022007-11-09 22:49:28 +00001868static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001869void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001870 Thread* thr = get_current_Thread_in_C_C();
1871 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001872 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001873}
sewardjf98e1c02008-10-25 16:22:41 +00001874
sewardjb4112022007-11-09 22:49:28 +00001875static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001876void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001877 Thread* thr = get_current_Thread_in_C_C();
1878 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001879 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001880}
sewardjf98e1c02008-10-25 16:22:41 +00001881
sewardjb4112022007-11-09 22:49:28 +00001882static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001883void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001884 Thread* thr = get_current_Thread_in_C_C();
1885 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001886 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001887}
sewardjf98e1c02008-10-25 16:22:41 +00001888
sewardjb4112022007-11-09 22:49:28 +00001889static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001890void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001891 Thread* thr = get_current_Thread_in_C_C();
1892 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001893 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001894}
sewardjf98e1c02008-10-25 16:22:41 +00001895
sewardjb4112022007-11-09 22:49:28 +00001896static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001897void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001898 Thread* thr = get_current_Thread_in_C_C();
1899 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001900 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001901}
1902
1903static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001904void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001905 Thread* thr = get_current_Thread_in_C_C();
1906 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001907 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001908}
sewardjf98e1c02008-10-25 16:22:41 +00001909
sewardjb4112022007-11-09 22:49:28 +00001910static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001911void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001912 Thread* thr = get_current_Thread_in_C_C();
1913 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001914 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001915}
sewardjf98e1c02008-10-25 16:22:41 +00001916
sewardjb4112022007-11-09 22:49:28 +00001917static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001918void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001919 Thread* thr = get_current_Thread_in_C_C();
1920 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001921 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001922}
sewardjf98e1c02008-10-25 16:22:41 +00001923
sewardjb4112022007-11-09 22:49:28 +00001924static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001925void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001926 Thread* thr = get_current_Thread_in_C_C();
1927 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001928 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001929}
sewardjf98e1c02008-10-25 16:22:41 +00001930
sewardjb4112022007-11-09 22:49:28 +00001931static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001932void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001933 Thread* thr = get_current_Thread_in_C_C();
1934 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001935 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001936}
1937
sewardjb4112022007-11-09 22:49:28 +00001938
sewardj9f569b72008-11-13 13:33:09 +00001939/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001940/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001941/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001942
1943/* EXPOSITION only: by intercepting lock init events we can show the
1944 user where the lock was initialised, rather than only being able to
1945 show where it was first locked. Intercepting lock initialisations
1946 is not necessary for the basic operation of the race checker. */
1947static
1948void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1949 void* mutex, Word mbRec )
1950{
1951 if (SHOW_EVENTS >= 1)
1952 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1953 (Int)tid, mbRec, (void*)mutex );
1954 tl_assert(mbRec == 0 || mbRec == 1);
1955 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1956 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001957 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001958 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1959}
1960
1961static
1962void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1963{
1964 Thread* thr;
1965 Lock* lk;
1966 if (SHOW_EVENTS >= 1)
1967 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1968 (Int)tid, (void*)mutex );
1969
1970 thr = map_threads_maybe_lookup( tid );
1971 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001972 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001973
1974 lk = map_locks_maybe_lookup( (Addr)mutex );
1975
1976 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001977 HG_(record_error_Misc)(
1978 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001979 }
1980
1981 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001982 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001983 tl_assert( lk->guestaddr == (Addr)mutex );
1984 if (lk->heldBy) {
1985 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001986 HG_(record_error_Misc)(
1987 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001988 /* remove lock from locksets of all owning threads */
1989 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001990 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001991 lk->heldBy = NULL;
1992 lk->heldW = False;
1993 lk->acquired_at = NULL;
1994 }
1995 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001996 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001997
1998 if (HG_(clo_track_lockorders))
1999 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002000 map_locks_delete( lk->guestaddr );
2001 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002002 }
2003
sewardjf98e1c02008-10-25 16:22:41 +00002004 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002005 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2006}
2007
2008static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2009 void* mutex, Word isTryLock )
2010{
2011 /* Just check the mutex is sane; nothing else to do. */
2012 // 'mutex' may be invalid - not checked by wrapper
2013 Thread* thr;
2014 Lock* lk;
2015 if (SHOW_EVENTS >= 1)
2016 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2017 (Int)tid, (void*)mutex );
2018
2019 tl_assert(isTryLock == 0 || isTryLock == 1);
2020 thr = map_threads_maybe_lookup( tid );
2021 tl_assert(thr); /* cannot fail - Thread* must already exist */
2022
2023 lk = map_locks_maybe_lookup( (Addr)mutex );
2024
2025 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002026 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2027 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002028 }
2029
2030 if ( lk
2031 && isTryLock == 0
2032 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2033 && lk->heldBy
2034 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00002035 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002036 /* uh, it's a non-recursive lock and we already w-hold it, and
2037 this is a real lock operation (not a speculative "tryLock"
2038 kind of thing). Duh. Deadlock coming up; but at least
2039 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00002040 HChar* errstr = "Attempt to re-lock a "
2041 "non-recursive lock I already hold";
2042 HChar* auxstr = "Lock was previously acquired";
2043 if (lk->acquired_at) {
2044 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2045 } else {
2046 HG_(record_error_Misc)( thr, errstr );
2047 }
sewardjb4112022007-11-09 22:49:28 +00002048 }
2049}
2050
2051static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2052{
2053 // only called if the real library call succeeded - so mutex is sane
2054 Thread* thr;
2055 if (SHOW_EVENTS >= 1)
2056 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2057 (Int)tid, (void*)mutex );
2058
2059 thr = map_threads_maybe_lookup( tid );
2060 tl_assert(thr); /* cannot fail - Thread* must already exist */
2061
2062 evhH__post_thread_w_acquires_lock(
2063 thr,
2064 LK_mbRec, /* if not known, create new lock with this LockKind */
2065 (Addr)mutex
2066 );
2067}
2068
2069static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2070{
2071 // 'mutex' may be invalid - not checked by wrapper
2072 Thread* thr;
2073 if (SHOW_EVENTS >= 1)
2074 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2075 (Int)tid, (void*)mutex );
2076
2077 thr = map_threads_maybe_lookup( tid );
2078 tl_assert(thr); /* cannot fail - Thread* must already exist */
2079
2080 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2081}
2082
2083static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2084{
2085 // only called if the real library call succeeded - so mutex is sane
2086 Thread* thr;
2087 if (SHOW_EVENTS >= 1)
2088 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2089 (Int)tid, (void*)mutex );
2090 thr = map_threads_maybe_lookup( tid );
2091 tl_assert(thr); /* cannot fail - Thread* must already exist */
2092
2093 // anything we should do here?
2094}
2095
2096
sewardj5a644da2009-08-11 10:35:58 +00002097/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002098/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002099/* ------------------------------------------------------- */
2100
2101/* All a bit of a kludge. Pretend we're really dealing with ordinary
2102 pthread_mutex_t's instead, for the most part. */
2103
2104static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2105 void* slock )
2106{
2107 Thread* thr;
2108 Lock* lk;
2109 /* In glibc's kludgey world, we're either initialising or unlocking
2110 it. Since this is the pre-routine, if it is locked, unlock it
2111 and take a dependence edge. Otherwise, do nothing. */
2112
2113 if (SHOW_EVENTS >= 1)
2114 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2115 "(ctid=%d, slock=%p)\n",
2116 (Int)tid, (void*)slock );
2117
2118 thr = map_threads_maybe_lookup( tid );
2119 /* cannot fail - Thread* must already exist */;
2120 tl_assert( HG_(is_sane_Thread)(thr) );
2121
2122 lk = map_locks_maybe_lookup( (Addr)slock );
2123 if (lk && lk->heldBy) {
2124 /* it's held. So do the normal pre-unlock actions, as copied
2125 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2126 duplicates the map_locks_maybe_lookup. */
2127 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2128 False/*!isRDWR*/ );
2129 }
2130}
2131
2132static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2133 void* slock )
2134{
2135 Lock* lk;
2136 /* More kludgery. If the lock has never been seen before, do
2137 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2138 nothing. */
2139
2140 if (SHOW_EVENTS >= 1)
2141 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2142 "(ctid=%d, slock=%p)\n",
2143 (Int)tid, (void*)slock );
2144
2145 lk = map_locks_maybe_lookup( (Addr)slock );
2146 if (!lk) {
2147 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2148 }
2149}
2150
2151static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2152 void* slock, Word isTryLock )
2153{
2154 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2155}
2156
2157static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2158 void* slock )
2159{
2160 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2161}
2162
2163static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2164 void* slock )
2165{
2166 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2167}
2168
2169
sewardj9f569b72008-11-13 13:33:09 +00002170/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002171/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002172/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002173
sewardj02114542009-07-28 20:52:36 +00002174/* A mapping from CV to (the SO associated with it, plus some
2175 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002176 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2177 wait on it completes, we do a 'recv' from the SO. This is believed
2178 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002179 signallings/broadcasts.
2180*/
2181
sewardj02114542009-07-28 20:52:36 +00002182/* .so is the SO for this CV.
2183 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002184
sewardj02114542009-07-28 20:52:36 +00002185 POSIX says effectively that the first pthread_cond_{timed}wait call
2186 causes a dynamic binding between the CV and the mutex, and that
2187 lasts until such time as the waiter count falls to zero. Hence
2188 need to keep track of the number of waiters in order to do
2189 consistency tracking. */
2190typedef
2191 struct {
2192 SO* so; /* libhb-allocated SO */
2193 void* mx_ga; /* addr of associated mutex, if any */
2194 UWord nWaiters; /* # threads waiting on the CV */
2195 }
2196 CVInfo;
2197
2198
2199/* pthread_cond_t* -> CVInfo* */
2200static WordFM* map_cond_to_CVInfo = NULL;
2201
2202static void map_cond_to_CVInfo_INIT ( void ) {
2203 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2204 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2205 "hg.mctCI.1", HG_(free), NULL );
2206 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002207 }
2208}
2209
sewardj02114542009-07-28 20:52:36 +00002210static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002211 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002212 map_cond_to_CVInfo_INIT();
2213 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002214 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002215 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002216 } else {
sewardj02114542009-07-28 20:52:36 +00002217 SO* so = libhb_so_alloc();
2218 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2219 cvi->so = so;
2220 cvi->mx_ga = 0;
2221 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2222 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002223 }
2224}
2225
sewardj02114542009-07-28 20:52:36 +00002226static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002227 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002228 map_cond_to_CVInfo_INIT();
2229 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2230 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002231 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002232 tl_assert(cvi);
2233 tl_assert(cvi->so);
2234 libhb_so_dealloc(cvi->so);
2235 cvi->mx_ga = 0;
2236 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002237 }
2238}
2239
2240static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2241{
sewardjf98e1c02008-10-25 16:22:41 +00002242 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2243 cond to a SO if it is not already so bound, and 'send' on the
2244 SO. This is later used by other thread(s) which successfully
2245 exit from a pthread_cond_wait on the same cv; then they 'recv'
2246 from the SO, thereby acquiring a dependency on this signalling
2247 event. */
sewardjb4112022007-11-09 22:49:28 +00002248 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002249 CVInfo* cvi;
2250 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002251
2252 if (SHOW_EVENTS >= 1)
2253 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2254 (Int)tid, (void*)cond );
2255
sewardjb4112022007-11-09 22:49:28 +00002256 thr = map_threads_maybe_lookup( tid );
2257 tl_assert(thr); /* cannot fail - Thread* must already exist */
2258
sewardj02114542009-07-28 20:52:36 +00002259 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2260 tl_assert(cvi);
2261 tl_assert(cvi->so);
2262
sewardjb4112022007-11-09 22:49:28 +00002263 // error-if: mutex is bogus
2264 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002265 // Hmm. POSIX doesn't actually say that it's an error to call
2266 // pthread_cond_signal with the associated mutex being unlocked.
2267 // Although it does say that it should be "if consistent scheduling
2268 // is desired."
2269 //
2270 // For the moment, disable these checks.
2271 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2272 //if (lk == NULL || cvi->mx_ga == 0) {
2273 // HG_(record_error_Misc)( thr,
2274 // "pthread_cond_{signal,broadcast}: "
2275 // "no or invalid mutex associated with cond");
2276 //}
2277 ///* note: lk could be NULL. Be careful. */
2278 //if (lk) {
2279 // if (lk->kind == LK_rdwr) {
2280 // HG_(record_error_Misc)(thr,
2281 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2282 // }
2283 // if (lk->heldBy == NULL) {
2284 // HG_(record_error_Misc)(thr,
2285 // "pthread_cond_{signal,broadcast}: "
2286 // "associated lock is not held by any thread");
2287 // }
2288 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2289 // HG_(record_error_Misc)(thr,
2290 // "pthread_cond_{signal,broadcast}: "
2291 // "associated lock is not held by calling thread");
2292 // }
2293 //}
sewardjb4112022007-11-09 22:49:28 +00002294
sewardj02114542009-07-28 20:52:36 +00002295 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002296}
2297
2298/* returns True if it reckons 'mutex' is valid and held by this
2299 thread, else False */
2300static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2301 void* cond, void* mutex )
2302{
2303 Thread* thr;
2304 Lock* lk;
2305 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002306 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002307
2308 if (SHOW_EVENTS >= 1)
2309 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2310 "(ctid=%d, cond=%p, mutex=%p)\n",
2311 (Int)tid, (void*)cond, (void*)mutex );
2312
sewardjb4112022007-11-09 22:49:28 +00002313 thr = map_threads_maybe_lookup( tid );
2314 tl_assert(thr); /* cannot fail - Thread* must already exist */
2315
2316 lk = map_locks_maybe_lookup( (Addr)mutex );
2317
2318 /* Check for stupid mutex arguments. There are various ways to be
2319 a bozo. Only complain once, though, even if more than one thing
2320 is wrong. */
2321 if (lk == NULL) {
2322 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002323 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002324 thr,
2325 "pthread_cond_{timed}wait called with invalid mutex" );
2326 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002327 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002328 if (lk->kind == LK_rdwr) {
2329 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002330 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002331 thr, "pthread_cond_{timed}wait called with mutex "
2332 "of type pthread_rwlock_t*" );
2333 } else
2334 if (lk->heldBy == NULL) {
2335 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002336 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002337 thr, "pthread_cond_{timed}wait called with un-held mutex");
2338 } else
2339 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002340 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002341 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002342 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002343 thr, "pthread_cond_{timed}wait called with mutex "
2344 "held by a different thread" );
2345 }
2346 }
2347
2348 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002349 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2350 tl_assert(cvi);
2351 tl_assert(cvi->so);
2352 if (cvi->nWaiters == 0) {
2353 /* form initial (CV,MX) binding */
2354 cvi->mx_ga = mutex;
2355 }
2356 else /* check existing (CV,MX) binding */
2357 if (cvi->mx_ga != mutex) {
2358 HG_(record_error_Misc)(
2359 thr, "pthread_cond_{timed}wait: cond is associated "
2360 "with a different mutex");
2361 }
2362 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002363
2364 return lk_valid;
2365}
2366
2367static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2368 void* cond, void* mutex )
2369{
sewardjf98e1c02008-10-25 16:22:41 +00002370 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2371 the SO for this cond, and 'recv' from it so as to acquire a
2372 dependency edge back to the signaller/broadcaster. */
2373 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002374 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002375
2376 if (SHOW_EVENTS >= 1)
2377 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2378 "(ctid=%d, cond=%p, mutex=%p)\n",
2379 (Int)tid, (void*)cond, (void*)mutex );
2380
sewardjb4112022007-11-09 22:49:28 +00002381 thr = map_threads_maybe_lookup( tid );
2382 tl_assert(thr); /* cannot fail - Thread* must already exist */
2383
2384 // error-if: cond is also associated with a different mutex
2385
sewardj02114542009-07-28 20:52:36 +00002386 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2387 tl_assert(cvi);
2388 tl_assert(cvi->so);
2389 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002390
sewardj02114542009-07-28 20:52:36 +00002391 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002392 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2393 it? If this happened it would surely be a bug in the threads
2394 library. Or one of those fabled "spurious wakeups". */
2395 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2396 "succeeded on"
2397 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002398 }
sewardjf98e1c02008-10-25 16:22:41 +00002399
2400 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002401 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2402
2403 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002404}
2405
2406static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2407 void* cond )
2408{
2409 /* Deal with destroy events. The only purpose is to free storage
2410 associated with the CV, so as to avoid any possible resource
2411 leaks. */
2412 if (SHOW_EVENTS >= 1)
2413 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2414 "(ctid=%d, cond=%p)\n",
2415 (Int)tid, (void*)cond );
2416
sewardj02114542009-07-28 20:52:36 +00002417 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002418}
2419
2420
sewardj9f569b72008-11-13 13:33:09 +00002421/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002422/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002423/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002424
2425/* EXPOSITION only */
2426static
2427void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2428{
2429 if (SHOW_EVENTS >= 1)
2430 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2431 (Int)tid, (void*)rwl );
2432 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002433 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002434 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2435}
2436
2437static
2438void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2439{
2440 Thread* thr;
2441 Lock* lk;
2442 if (SHOW_EVENTS >= 1)
2443 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2444 (Int)tid, (void*)rwl );
2445
2446 thr = map_threads_maybe_lookup( tid );
2447 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002448 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002449
2450 lk = map_locks_maybe_lookup( (Addr)rwl );
2451
2452 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002453 HG_(record_error_Misc)(
2454 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002455 }
2456
2457 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002458 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002459 tl_assert( lk->guestaddr == (Addr)rwl );
2460 if (lk->heldBy) {
2461 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002462 HG_(record_error_Misc)(
2463 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002464 /* remove lock from locksets of all owning threads */
2465 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002466 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002467 lk->heldBy = NULL;
2468 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002469 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002470 }
2471 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002472 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002473
2474 if (HG_(clo_track_lockorders))
2475 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002476 map_locks_delete( lk->guestaddr );
2477 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002478 }
2479
sewardjf98e1c02008-10-25 16:22:41 +00002480 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002481 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2482}
2483
2484static
sewardj789c3c52008-02-25 12:10:07 +00002485void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2486 void* rwl,
2487 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002488{
2489 /* Just check the rwl is sane; nothing else to do. */
2490 // 'rwl' may be invalid - not checked by wrapper
2491 Thread* thr;
2492 Lock* lk;
2493 if (SHOW_EVENTS >= 1)
2494 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2495 (Int)tid, (Int)isW, (void*)rwl );
2496
2497 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002498 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002499 thr = map_threads_maybe_lookup( tid );
2500 tl_assert(thr); /* cannot fail - Thread* must already exist */
2501
2502 lk = map_locks_maybe_lookup( (Addr)rwl );
2503 if ( lk
2504 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2505 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002506 HG_(record_error_Misc)(
2507 thr, "pthread_rwlock_{rd,rw}lock with a "
2508 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002509 }
2510}
2511
2512static
2513void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2514{
2515 // only called if the real library call succeeded - so mutex is sane
2516 Thread* thr;
2517 if (SHOW_EVENTS >= 1)
2518 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2519 (Int)tid, (Int)isW, (void*)rwl );
2520
2521 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2522 thr = map_threads_maybe_lookup( tid );
2523 tl_assert(thr); /* cannot fail - Thread* must already exist */
2524
2525 (isW ? evhH__post_thread_w_acquires_lock
2526 : evhH__post_thread_r_acquires_lock)(
2527 thr,
2528 LK_rdwr, /* if not known, create new lock with this LockKind */
2529 (Addr)rwl
2530 );
2531}
2532
2533static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2534{
2535 // 'rwl' may be invalid - not checked by wrapper
2536 Thread* thr;
2537 if (SHOW_EVENTS >= 1)
2538 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2539 (Int)tid, (void*)rwl );
2540
2541 thr = map_threads_maybe_lookup( tid );
2542 tl_assert(thr); /* cannot fail - Thread* must already exist */
2543
2544 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2545}
2546
2547static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2548{
2549 // only called if the real library call succeeded - so mutex is sane
2550 Thread* thr;
2551 if (SHOW_EVENTS >= 1)
2552 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2553 (Int)tid, (void*)rwl );
2554 thr = map_threads_maybe_lookup( tid );
2555 tl_assert(thr); /* cannot fail - Thread* must already exist */
2556
2557 // anything we should do here?
2558}
2559
2560
sewardj9f569b72008-11-13 13:33:09 +00002561/* ---------------------------------------------------------- */
2562/* -------------- events to do with semaphores -------------- */
2563/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002564
sewardj11e352f2007-11-30 11:11:02 +00002565/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002566 variables. */
2567
sewardjf98e1c02008-10-25 16:22:41 +00002568/* For each semaphore, we maintain a stack of SOs. When a 'post'
2569 operation is done on a semaphore (unlocking, essentially), a new SO
2570 is created for the posting thread, the posting thread does a strong
2571 send to it (which merely installs the posting thread's VC in the
2572 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002573
2574 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002575 semaphore, we pop a SO off the semaphore's stack (which should be
2576 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002577 dependencies between posters and waiters of the semaphore.
2578
sewardjf98e1c02008-10-25 16:22:41 +00002579 It may not be necessary to use a stack - perhaps a bag of SOs would
2580 do. But we do need to keep track of how many unused-up posts have
2581 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002582
sewardjf98e1c02008-10-25 16:22:41 +00002583 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002584 twice on S. T3 cannot complete its waits without both T1 and T2
2585 posting. The above mechanism will ensure that T3 acquires
2586 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002587
sewardjf98e1c02008-10-25 16:22:41 +00002588 When a semaphore is initialised with value N, we do as if we'd
2589 posted N times on the semaphore: basically create N SOs and do a
2590 strong send to all of then. This allows up to N waits on the
2591 semaphore to acquire a dependency on the initialisation point,
2592 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002593
2594 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2595 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002596*/
2597
sewardjf98e1c02008-10-25 16:22:41 +00002598/* sem_t* -> XArray* SO* */
2599static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002600
sewardjf98e1c02008-10-25 16:22:41 +00002601static void map_sem_to_SO_stack_INIT ( void ) {
2602 if (map_sem_to_SO_stack == NULL) {
2603 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2604 HG_(free), NULL );
2605 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002606 }
2607}
2608
sewardjf98e1c02008-10-25 16:22:41 +00002609static void push_SO_for_sem ( void* sem, SO* so ) {
2610 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002611 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002612 tl_assert(so);
2613 map_sem_to_SO_stack_INIT();
2614 if (VG_(lookupFM)( map_sem_to_SO_stack,
2615 &keyW, (UWord*)&xa, (UWord)sem )) {
2616 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002617 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002618 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002619 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002620 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2621 VG_(addToXA)( xa, &so );
2622 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002623 }
2624}
2625
sewardjf98e1c02008-10-25 16:22:41 +00002626static SO* mb_pop_SO_for_sem ( void* sem ) {
2627 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002628 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002629 SO* so;
2630 map_sem_to_SO_stack_INIT();
2631 if (VG_(lookupFM)( map_sem_to_SO_stack,
2632 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002633 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002634 Word sz;
2635 tl_assert(keyW == (UWord)sem);
2636 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002637 tl_assert(sz >= 0);
2638 if (sz == 0)
2639 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002640 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2641 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002642 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002643 return so;
sewardjb4112022007-11-09 22:49:28 +00002644 } else {
2645 /* hmm, that's odd. No stack for this semaphore. */
2646 return NULL;
2647 }
2648}
2649
sewardj11e352f2007-11-30 11:11:02 +00002650static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002651{
sewardjf98e1c02008-10-25 16:22:41 +00002652 UWord keyW, valW;
2653 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002654
sewardjb4112022007-11-09 22:49:28 +00002655 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002656 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002657 (Int)tid, (void*)sem );
2658
sewardjf98e1c02008-10-25 16:22:41 +00002659 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002660
sewardjf98e1c02008-10-25 16:22:41 +00002661 /* Empty out the semaphore's SO stack. This way of doing it is
2662 stupid, but at least it's easy. */
2663 while (1) {
2664 so = mb_pop_SO_for_sem( sem );
2665 if (!so) break;
2666 libhb_so_dealloc(so);
2667 }
2668
2669 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2670 XArray* xa = (XArray*)valW;
2671 tl_assert(keyW == (UWord)sem);
2672 tl_assert(xa);
2673 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2674 VG_(deleteXA)(xa);
2675 }
sewardjb4112022007-11-09 22:49:28 +00002676}
2677
sewardj11e352f2007-11-30 11:11:02 +00002678static
2679void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2680{
sewardjf98e1c02008-10-25 16:22:41 +00002681 SO* so;
2682 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002683
2684 if (SHOW_EVENTS >= 1)
2685 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2686 (Int)tid, (void*)sem, value );
2687
sewardjf98e1c02008-10-25 16:22:41 +00002688 thr = map_threads_maybe_lookup( tid );
2689 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002690
sewardjf98e1c02008-10-25 16:22:41 +00002691 /* Empty out the semaphore's SO stack. This way of doing it is
2692 stupid, but at least it's easy. */
2693 while (1) {
2694 so = mb_pop_SO_for_sem( sem );
2695 if (!so) break;
2696 libhb_so_dealloc(so);
2697 }
sewardj11e352f2007-11-30 11:11:02 +00002698
sewardjf98e1c02008-10-25 16:22:41 +00002699 /* If we don't do this check, the following while loop runs us out
2700 of memory for stupid initial values of 'value'. */
2701 if (value > 10000) {
2702 HG_(record_error_Misc)(
2703 thr, "sem_init: initial value exceeds 10000; using 10000" );
2704 value = 10000;
2705 }
sewardj11e352f2007-11-30 11:11:02 +00002706
sewardjf98e1c02008-10-25 16:22:41 +00002707 /* Now create 'valid' new SOs for the thread, do a strong send to
2708 each of them, and push them all on the stack. */
2709 for (; value > 0; value--) {
2710 Thr* hbthr = thr->hbthr;
2711 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002712
sewardjf98e1c02008-10-25 16:22:41 +00002713 so = libhb_so_alloc();
2714 libhb_so_send( hbthr, so, True/*strong send*/ );
2715 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002716 }
2717}
2718
2719static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002720{
sewardjf98e1c02008-10-25 16:22:41 +00002721 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2722 it (iow, write our VC into it, then tick ours), and push the SO
2723 on on a stack of SOs associated with 'sem'. This is later used
2724 by other thread(s) which successfully exit from a sem_wait on
2725 the same sem; by doing a strong recv from SOs popped of the
2726 stack, they acquire dependencies on the posting thread
2727 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002728
sewardjf98e1c02008-10-25 16:22:41 +00002729 Thread* thr;
2730 SO* so;
2731 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002732
2733 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002734 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002735 (Int)tid, (void*)sem );
2736
2737 thr = map_threads_maybe_lookup( tid );
2738 tl_assert(thr); /* cannot fail - Thread* must already exist */
2739
2740 // error-if: sem is bogus
2741
sewardjf98e1c02008-10-25 16:22:41 +00002742 hbthr = thr->hbthr;
2743 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002744
sewardjf98e1c02008-10-25 16:22:41 +00002745 so = libhb_so_alloc();
2746 libhb_so_send( hbthr, so, True/*strong send*/ );
2747 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002748}
2749
sewardj11e352f2007-11-30 11:11:02 +00002750static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002751{
sewardjf98e1c02008-10-25 16:22:41 +00002752 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2753 the 'sem' from this semaphore's SO-stack, and do a strong recv
2754 from it. This creates a dependency back to one of the post-ers
2755 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002756
sewardjf98e1c02008-10-25 16:22:41 +00002757 Thread* thr;
2758 SO* so;
2759 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002760
2761 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002762 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002763 (Int)tid, (void*)sem );
2764
2765 thr = map_threads_maybe_lookup( tid );
2766 tl_assert(thr); /* cannot fail - Thread* must already exist */
2767
2768 // error-if: sem is bogus
2769
sewardjf98e1c02008-10-25 16:22:41 +00002770 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002771
sewardjf98e1c02008-10-25 16:22:41 +00002772 if (so) {
2773 hbthr = thr->hbthr;
2774 tl_assert(hbthr);
2775
2776 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2777 libhb_so_dealloc(so);
2778 } else {
2779 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2780 If this happened it would surely be a bug in the threads
2781 library. */
2782 HG_(record_error_Misc)(
2783 thr, "Bug in libpthread: sem_wait succeeded on"
2784 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002785 }
2786}
2787
2788
sewardj9f569b72008-11-13 13:33:09 +00002789/* -------------------------------------------------------- */
2790/* -------------- events to do with barriers -------------- */
2791/* -------------------------------------------------------- */
2792
2793typedef
2794 struct {
2795 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002796 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002797 UWord size; /* declared size */
2798 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2799 }
2800 Bar;
2801
2802static Bar* new_Bar ( void ) {
2803 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2804 tl_assert(bar);
2805 /* all fields are zero */
2806 tl_assert(bar->initted == False);
2807 return bar;
2808}
2809
2810static void delete_Bar ( Bar* bar ) {
2811 tl_assert(bar);
2812 if (bar->waiting)
2813 VG_(deleteXA)(bar->waiting);
2814 HG_(free)(bar);
2815}
2816
2817/* A mapping which stores auxiliary data for barriers. */
2818
2819/* pthread_barrier_t* -> Bar* */
2820static WordFM* map_barrier_to_Bar = NULL;
2821
2822static void map_barrier_to_Bar_INIT ( void ) {
2823 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2824 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2825 "hg.mbtBI.1", HG_(free), NULL );
2826 tl_assert(map_barrier_to_Bar != NULL);
2827 }
2828}
2829
2830static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2831 UWord key, val;
2832 map_barrier_to_Bar_INIT();
2833 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2834 tl_assert(key == (UWord)barrier);
2835 return (Bar*)val;
2836 } else {
2837 Bar* bar = new_Bar();
2838 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2839 return bar;
2840 }
2841}
2842
2843static void map_barrier_to_Bar_delete ( void* barrier ) {
2844 UWord keyW, valW;
2845 map_barrier_to_Bar_INIT();
2846 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2847 Bar* bar = (Bar*)valW;
2848 tl_assert(keyW == (UWord)barrier);
2849 delete_Bar(bar);
2850 }
2851}
2852
2853
2854static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2855 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002856 UWord count,
2857 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002858{
2859 Thread* thr;
2860 Bar* bar;
2861
2862 if (SHOW_EVENTS >= 1)
2863 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002864 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2865 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002866
2867 thr = map_threads_maybe_lookup( tid );
2868 tl_assert(thr); /* cannot fail - Thread* must already exist */
2869
2870 if (count == 0) {
2871 HG_(record_error_Misc)(
2872 thr, "pthread_barrier_init: 'count' argument is zero"
2873 );
2874 }
2875
sewardj406bac82010-03-03 23:03:40 +00002876 if (resizable != 0 && resizable != 1) {
2877 HG_(record_error_Misc)(
2878 thr, "pthread_barrier_init: invalid 'resizable' argument"
2879 );
2880 }
2881
sewardj9f569b72008-11-13 13:33:09 +00002882 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2883 tl_assert(bar);
2884
2885 if (bar->initted) {
2886 HG_(record_error_Misc)(
2887 thr, "pthread_barrier_init: barrier is already initialised"
2888 );
2889 }
2890
2891 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2892 tl_assert(bar->initted);
2893 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002894 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002895 );
2896 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2897 }
2898 if (!bar->waiting) {
2899 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2900 sizeof(Thread*) );
2901 }
2902
2903 tl_assert(bar->waiting);
2904 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002905 bar->initted = True;
2906 bar->resizable = resizable == 1 ? True : False;
2907 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002908}
2909
2910
2911static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2912 void* barrier )
2913{
sewardj553655c2008-11-14 19:41:19 +00002914 Thread* thr;
2915 Bar* bar;
2916
sewardj9f569b72008-11-13 13:33:09 +00002917 /* Deal with destroy events. The only purpose is to free storage
2918 associated with the barrier, so as to avoid any possible
2919 resource leaks. */
2920 if (SHOW_EVENTS >= 1)
2921 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2922 "(tid=%d, barrier=%p)\n",
2923 (Int)tid, (void*)barrier );
2924
sewardj553655c2008-11-14 19:41:19 +00002925 thr = map_threads_maybe_lookup( tid );
2926 tl_assert(thr); /* cannot fail - Thread* must already exist */
2927
2928 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2929 tl_assert(bar);
2930
2931 if (!bar->initted) {
2932 HG_(record_error_Misc)(
2933 thr, "pthread_barrier_destroy: barrier was never initialised"
2934 );
2935 }
2936
2937 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2938 HG_(record_error_Misc)(
2939 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2940 );
2941 }
2942
sewardj9f569b72008-11-13 13:33:09 +00002943 /* Maybe we shouldn't do this; just let it persist, so that when it
2944 is reinitialised we don't need to do any dynamic memory
2945 allocation? The downside is a potentially unlimited space leak,
2946 if the client creates (in turn) a large number of barriers all
2947 at different locations. Note that if we do later move to the
2948 don't-delete-it scheme, we need to mark the barrier as
2949 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002950 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002951 map_barrier_to_Bar_delete( barrier );
2952}
2953
2954
sewardj406bac82010-03-03 23:03:40 +00002955/* All the threads have arrived. Now do the Interesting Bit. Get a
2956 new synchronisation object and do a weak send to it from all the
2957 participating threads. This makes its vector clocks be the join of
2958 all the individual threads' vector clocks. Then do a strong
2959 receive from it back to all threads, so that their VCs are a copy
2960 of it (hence are all equal to the join of their original VCs.) */
2961static void do_barrier_cross_sync_and_empty ( Bar* bar )
2962{
2963 /* XXX check bar->waiting has no duplicates */
2964 UWord i;
2965 SO* so = libhb_so_alloc();
2966
2967 tl_assert(bar->waiting);
2968 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2969
2970 /* compute the join ... */
2971 for (i = 0; i < bar->size; i++) {
2972 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2973 Thr* hbthr = t->hbthr;
2974 libhb_so_send( hbthr, so, False/*weak send*/ );
2975 }
2976 /* ... and distribute to all threads */
2977 for (i = 0; i < bar->size; i++) {
2978 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2979 Thr* hbthr = t->hbthr;
2980 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2981 }
2982
2983 /* finally, we must empty out the waiting vector */
2984 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2985
2986 /* and we don't need this any more. Perhaps a stack-allocated
2987 SO would be better? */
2988 libhb_so_dealloc(so);
2989}
2990
2991
sewardj9f569b72008-11-13 13:33:09 +00002992static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2993 void* barrier )
2994{
sewardj1c466b72008-11-19 11:52:14 +00002995 /* This function gets called after a client thread calls
2996 pthread_barrier_wait but before it arrives at the real
2997 pthread_barrier_wait.
2998
2999 Why is the following correct? It's a bit subtle.
3000
3001 If this is not the last thread arriving at the barrier, we simply
3002 note its presence and return. Because valgrind (at least as of
3003 Nov 08) is single threaded, we are guaranteed safe from any race
3004 conditions when in this function -- no other client threads are
3005 running.
3006
3007 If this is the last thread, then we are again the only running
3008 thread. All the other threads will have either arrived at the
3009 real pthread_barrier_wait or are on their way to it, but in any
3010 case are guaranteed not to be able to move past it, because this
3011 thread is currently in this function and so has not yet arrived
3012 at the real pthread_barrier_wait. That means that:
3013
3014 1. While we are in this function, none of the other threads
3015 waiting at the barrier can move past it.
3016
3017 2. When this function returns (and simulated execution resumes),
3018 this thread and all other waiting threads will be able to move
3019 past the real barrier.
3020
3021 Because of this, it is now safe to update the vector clocks of
3022 all threads, to represent the fact that they all arrived at the
3023 barrier and have all moved on. There is no danger of any
3024 complications to do with some threads leaving the barrier and
3025 racing back round to the front, whilst others are still leaving
3026 (which is the primary source of complication in correct handling/
3027 implementation of barriers). That can't happen because we update
3028 here our data structures so as to indicate that the threads have
3029 passed the barrier, even though, as per (2) above, they are
3030 guaranteed not to pass the barrier until we return.
3031
3032 This relies crucially on Valgrind being single threaded. If that
3033 changes, this will need to be reconsidered.
3034 */
sewardj9f569b72008-11-13 13:33:09 +00003035 Thread* thr;
3036 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003037 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003038
3039 if (SHOW_EVENTS >= 1)
3040 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3041 "(tid=%d, barrier=%p)\n",
3042 (Int)tid, (void*)barrier );
3043
3044 thr = map_threads_maybe_lookup( tid );
3045 tl_assert(thr); /* cannot fail - Thread* must already exist */
3046
3047 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3048 tl_assert(bar);
3049
3050 if (!bar->initted) {
3051 HG_(record_error_Misc)(
3052 thr, "pthread_barrier_wait: barrier is uninitialised"
3053 );
3054 return; /* client is broken .. avoid assertions below */
3055 }
3056
3057 /* guaranteed by _INIT_PRE above */
3058 tl_assert(bar->size > 0);
3059 tl_assert(bar->waiting);
3060
3061 VG_(addToXA)( bar->waiting, &thr );
3062
3063 /* guaranteed by this function */
3064 present = VG_(sizeXA)(bar->waiting);
3065 tl_assert(present > 0 && present <= bar->size);
3066
3067 if (present < bar->size)
3068 return;
3069
sewardj406bac82010-03-03 23:03:40 +00003070 do_barrier_cross_sync_and_empty(bar);
3071}
sewardj9f569b72008-11-13 13:33:09 +00003072
sewardj9f569b72008-11-13 13:33:09 +00003073
sewardj406bac82010-03-03 23:03:40 +00003074static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3075 void* barrier,
3076 UWord newcount )
3077{
3078 Thread* thr;
3079 Bar* bar;
3080 UWord present;
3081
3082 if (SHOW_EVENTS >= 1)
3083 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3084 "(tid=%d, barrier=%p, newcount=%lu)\n",
3085 (Int)tid, (void*)barrier, newcount );
3086
3087 thr = map_threads_maybe_lookup( tid );
3088 tl_assert(thr); /* cannot fail - Thread* must already exist */
3089
3090 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3091 tl_assert(bar);
3092
3093 if (!bar->initted) {
3094 HG_(record_error_Misc)(
3095 thr, "pthread_barrier_resize: barrier is uninitialised"
3096 );
3097 return; /* client is broken .. avoid assertions below */
3098 }
3099
3100 if (!bar->resizable) {
3101 HG_(record_error_Misc)(
3102 thr, "pthread_barrier_resize: barrier is may not be resized"
3103 );
3104 return; /* client is broken .. avoid assertions below */
3105 }
3106
3107 if (newcount == 0) {
3108 HG_(record_error_Misc)(
3109 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3110 );
3111 return; /* client is broken .. avoid assertions below */
3112 }
3113
3114 /* guaranteed by _INIT_PRE above */
3115 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003116 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003117 /* Guaranteed by this fn */
3118 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003119
sewardj406bac82010-03-03 23:03:40 +00003120 if (newcount >= bar->size) {
3121 /* Increasing the capacity. There's no possibility of threads
3122 moving on from the barrier in this situation, so just note
3123 the fact and do nothing more. */
3124 bar->size = newcount;
3125 } else {
3126 /* Decreasing the capacity. If we decrease it to be equal or
3127 below the number of waiting threads, they will now move past
3128 the barrier, so need to mess with dep edges in the same way
3129 as if the barrier had filled up normally. */
3130 present = VG_(sizeXA)(bar->waiting);
3131 tl_assert(present >= 0 && present <= bar->size);
3132 if (newcount <= present) {
3133 bar->size = present; /* keep the cross_sync call happy */
3134 do_barrier_cross_sync_and_empty(bar);
3135 }
3136 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003137 }
sewardj9f569b72008-11-13 13:33:09 +00003138}
3139
3140
sewardjed2e72e2009-08-14 11:08:24 +00003141/* ----------------------------------------------------- */
3142/* ----- events to do with user-specified HB edges ----- */
3143/* ----------------------------------------------------- */
3144
3145/* A mapping from arbitrary UWord tag to the SO associated with it.
3146 The UWord tags are meaningless to us, interpreted only by the
3147 user. */
3148
3149
3150
3151/* UWord -> SO* */
3152static WordFM* map_usertag_to_SO = NULL;
3153
3154static void map_usertag_to_SO_INIT ( void ) {
3155 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3156 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3157 "hg.mutS.1", HG_(free), NULL );
3158 tl_assert(map_usertag_to_SO != NULL);
3159 }
3160}
3161
3162static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3163 UWord key, val;
3164 map_usertag_to_SO_INIT();
3165 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3166 tl_assert(key == (UWord)usertag);
3167 return (SO*)val;
3168 } else {
3169 SO* so = libhb_so_alloc();
3170 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3171 return so;
3172 }
3173}
3174
3175// If it's ever needed (XXX check before use)
3176//static void map_usertag_to_SO_delete ( UWord usertag ) {
3177// UWord keyW, valW;
3178// map_usertag_to_SO_INIT();
3179// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3180// SO* so = (SO*)valW;
3181// tl_assert(keyW == usertag);
3182// tl_assert(so);
3183// libhb_so_dealloc(so);
3184// }
3185//}
3186
3187
3188static
3189void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3190{
3191 /* TID is just about to notionally sent a message on a notional
3192 abstract synchronisation object whose identity is given by
3193 USERTAG. Bind USERTAG to a real SO if it is not already so
3194 bound, and do a 'strong send' on the SO. This is later used by
3195 other thread(s) which successfully 'receive' from the SO,
3196 thereby acquiring a dependency on this signalling event. */
3197 Thread* thr;
3198 SO* so;
3199
3200 if (SHOW_EVENTS >= 1)
3201 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3202 (Int)tid, usertag );
3203
3204 thr = map_threads_maybe_lookup( tid );
3205 tl_assert(thr); /* cannot fail - Thread* must already exist */
3206
3207 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3208 tl_assert(so);
3209
3210 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3211}
3212
3213static
3214void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3215{
3216 /* TID has just notionally received a message from a notional
3217 abstract synchronisation object whose identity is given by
3218 USERTAG. Bind USERTAG to a real SO if it is not already so
3219 bound. If the SO has at some point in the past been 'sent' on,
3220 to a 'strong receive' on it, thereby acquiring a dependency on
3221 the sender. */
3222 Thread* thr;
3223 SO* so;
3224
3225 if (SHOW_EVENTS >= 1)
3226 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3227 (Int)tid, usertag );
3228
3229 thr = map_threads_maybe_lookup( tid );
3230 tl_assert(thr); /* cannot fail - Thread* must already exist */
3231
3232 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3233 tl_assert(so);
3234
3235 /* Acquire a dependency on it. If the SO has never so far been
3236 sent on, then libhb_so_recv will do nothing. So we're safe
3237 regardless of SO's history. */
3238 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3239}
3240
3241
sewardjb4112022007-11-09 22:49:28 +00003242/*--------------------------------------------------------------*/
3243/*--- Lock acquisition order monitoring ---*/
3244/*--------------------------------------------------------------*/
3245
3246/* FIXME: here are some optimisations still to do in
3247 laog__pre_thread_acquires_lock.
3248
3249 The graph is structured so that if L1 --*--> L2 then L1 must be
3250 acquired before L2.
3251
3252 The common case is that some thread T holds (eg) L1 L2 and L3 and
3253 is repeatedly acquiring and releasing Ln, and there is no ordering
3254 error in what it is doing. Hence it repeatly:
3255
3256 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3257 produces the answer No (because there is no error).
3258
3259 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3260 (because they already got added the first time T acquired Ln).
3261
3262 Hence cache these two events:
3263
3264 (1) Cache result of the query from last time. Invalidate the cache
3265 any time any edges are added to or deleted from laog.
3266
3267 (2) Cache these add-edge requests and ignore them if said edges
3268 have already been added to laog. Invalidate the cache any time
3269 any edges are deleted from laog.
3270*/
3271
3272typedef
3273 struct {
3274 WordSetID inns; /* in univ_laog */
3275 WordSetID outs; /* in univ_laog */
3276 }
3277 LAOGLinks;
3278
3279/* lock order acquisition graph */
3280static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3281
3282/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3283 where that edge was created, so that we can show the user later if
3284 we need to. */
3285typedef
3286 struct {
3287 Addr src_ga; /* Lock guest addresses for */
3288 Addr dst_ga; /* src/dst of the edge */
3289 ExeContext* src_ec; /* And corresponding places where that */
3290 ExeContext* dst_ec; /* ordering was established */
3291 }
3292 LAOGLinkExposition;
3293
sewardj250ec2e2008-02-15 22:02:30 +00003294static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003295 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3296 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3297 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3298 if (llx1->src_ga < llx2->src_ga) return -1;
3299 if (llx1->src_ga > llx2->src_ga) return 1;
3300 if (llx1->dst_ga < llx2->dst_ga) return -1;
3301 if (llx1->dst_ga > llx2->dst_ga) return 1;
3302 return 0;
3303}
3304
3305static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3306/* end EXPOSITION ONLY */
3307
3308
sewardja65db102009-01-26 10:45:16 +00003309__attribute__((noinline))
3310static void laog__init ( void )
3311{
3312 tl_assert(!laog);
3313 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003314 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003315
3316 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3317 HG_(free), NULL/*unboxedcmp*/ );
3318
3319 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3320 cmp_LAOGLinkExposition );
3321 tl_assert(laog);
3322 tl_assert(laog_exposition);
3323}
3324
sewardjb4112022007-11-09 22:49:28 +00003325static void laog__show ( Char* who ) {
3326 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003327 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003328 Lock* me;
3329 LAOGLinks* links;
3330 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003331 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003332 me = NULL;
3333 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003334 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003335 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003336 tl_assert(me);
3337 tl_assert(links);
3338 VG_(printf)(" node %p:\n", me);
3339 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3340 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003341 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003342 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3343 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003344 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003345 me = NULL;
3346 links = NULL;
3347 }
sewardj896f6f92008-08-19 08:38:52 +00003348 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003349 VG_(printf)("}\n");
3350}
3351
3352__attribute__((noinline))
3353static void laog__add_edge ( Lock* src, Lock* dst ) {
3354 Word keyW;
3355 LAOGLinks* links;
3356 Bool presentF, presentR;
3357 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3358
3359 /* Take the opportunity to sanity check the graph. Record in
3360 presentF if there is already a src->dst mapping in this node's
3361 forwards links, and presentR if there is already a src->dst
3362 mapping in this node's backwards links. They should agree!
3363 Also, we need to know whether the edge was already present so as
3364 to decide whether or not to update the link details mapping. We
3365 can compute presentF and presentR essentially for free, so may
3366 as well do this always. */
3367 presentF = presentR = False;
3368
3369 /* Update the out edges for src */
3370 keyW = 0;
3371 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003372 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003373 WordSetID outs_new;
3374 tl_assert(links);
3375 tl_assert(keyW == (Word)src);
3376 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3377 presentF = outs_new == links->outs;
3378 links->outs = outs_new;
3379 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003380 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003381 links->inns = HG_(emptyWS)( univ_laog );
3382 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003383 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003384 }
3385 /* Update the in edges for dst */
3386 keyW = 0;
3387 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003388 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003389 WordSetID inns_new;
3390 tl_assert(links);
3391 tl_assert(keyW == (Word)dst);
3392 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3393 presentR = inns_new == links->inns;
3394 links->inns = inns_new;
3395 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003396 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003397 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3398 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003399 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003400 }
3401
3402 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3403
3404 if (!presentF && src->acquired_at && dst->acquired_at) {
3405 LAOGLinkExposition expo;
3406 /* If this edge is entering the graph, and we have acquired_at
3407 information for both src and dst, record those acquisition
3408 points. Hence, if there is later a violation of this
3409 ordering, we can show the user the two places in which the
3410 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003411 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003412 src->guestaddr, dst->guestaddr);
3413 expo.src_ga = src->guestaddr;
3414 expo.dst_ga = dst->guestaddr;
3415 expo.src_ec = NULL;
3416 expo.dst_ec = NULL;
3417 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003418 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003419 /* we already have it; do nothing */
3420 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003421 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3422 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003423 expo2->src_ga = src->guestaddr;
3424 expo2->dst_ga = dst->guestaddr;
3425 expo2->src_ec = src->acquired_at;
3426 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003427 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003428 }
3429 }
3430}
3431
3432__attribute__((noinline))
3433static void laog__del_edge ( Lock* src, Lock* dst ) {
3434 Word keyW;
3435 LAOGLinks* links;
3436 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3437 /* Update the out edges for src */
3438 keyW = 0;
3439 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003440 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003441 tl_assert(links);
3442 tl_assert(keyW == (Word)src);
3443 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3444 }
3445 /* Update the in edges for dst */
3446 keyW = 0;
3447 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003448 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003449 tl_assert(links);
3450 tl_assert(keyW == (Word)dst);
3451 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3452 }
3453}
3454
3455__attribute__((noinline))
3456static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3457 Word keyW;
3458 LAOGLinks* links;
3459 keyW = 0;
3460 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003461 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003462 tl_assert(links);
3463 tl_assert(keyW == (Word)lk);
3464 return links->outs;
3465 } else {
3466 return HG_(emptyWS)( univ_laog );
3467 }
3468}
3469
3470__attribute__((noinline))
3471static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3472 Word keyW;
3473 LAOGLinks* links;
3474 keyW = 0;
3475 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003476 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003477 tl_assert(links);
3478 tl_assert(keyW == (Word)lk);
3479 return links->inns;
3480 } else {
3481 return HG_(emptyWS)( univ_laog );
3482 }
3483}
3484
3485__attribute__((noinline))
3486static void laog__sanity_check ( Char* who ) {
3487 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003488 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003489 Lock* me;
3490 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003491 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003492 me = NULL;
3493 links = NULL;
3494 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003495 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003496 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003497 tl_assert(me);
3498 tl_assert(links);
3499 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3500 for (i = 0; i < ws_size; i++) {
3501 if ( ! HG_(elemWS)( univ_laog,
3502 laog__succs( (Lock*)ws_words[i] ),
3503 (Word)me ))
3504 goto bad;
3505 }
3506 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3507 for (i = 0; i < ws_size; i++) {
3508 if ( ! HG_(elemWS)( univ_laog,
3509 laog__preds( (Lock*)ws_words[i] ),
3510 (Word)me ))
3511 goto bad;
3512 }
3513 me = NULL;
3514 links = NULL;
3515 }
sewardj896f6f92008-08-19 08:38:52 +00003516 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003517 return;
3518
3519 bad:
3520 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3521 laog__show(who);
3522 tl_assert(0);
3523}
3524
3525/* If there is a path in laog from 'src' to any of the elements in
3526 'dst', return an arbitrarily chosen element of 'dst' reachable from
3527 'src'. If no path exist from 'src' to any element in 'dst', return
3528 NULL. */
3529__attribute__((noinline))
3530static
3531Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3532{
3533 Lock* ret;
3534 Word i, ssz;
3535 XArray* stack; /* of Lock* */
3536 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3537 Lock* here;
3538 WordSetID succs;
3539 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003540 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003541 //laog__sanity_check();
3542
3543 /* If the destination set is empty, we can never get there from
3544 'src' :-), so don't bother to try */
3545 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3546 return NULL;
3547
3548 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003549 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3550 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003551
3552 (void) VG_(addToXA)( stack, &src );
3553
3554 while (True) {
3555
3556 ssz = VG_(sizeXA)( stack );
3557
3558 if (ssz == 0) { ret = NULL; break; }
3559
3560 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3561 VG_(dropTailXA)( stack, 1 );
3562
3563 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3564
sewardj896f6f92008-08-19 08:38:52 +00003565 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003566 continue;
3567
sewardj896f6f92008-08-19 08:38:52 +00003568 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003569
3570 succs = laog__succs( here );
3571 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3572 for (i = 0; i < succs_size; i++)
3573 (void) VG_(addToXA)( stack, &succs_words[i] );
3574 }
3575
sewardj896f6f92008-08-19 08:38:52 +00003576 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003577 VG_(deleteXA)( stack );
3578 return ret;
3579}
3580
3581
3582/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3583 between 'lk' and the locks already held by 'thr' and issue a
3584 complaint if so. Also, update the ordering graph appropriately.
3585*/
3586__attribute__((noinline))
3587static void laog__pre_thread_acquires_lock (
3588 Thread* thr, /* NB: BEFORE lock is added */
3589 Lock* lk
3590 )
3591{
sewardj250ec2e2008-02-15 22:02:30 +00003592 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003593 Word ls_size, i;
3594 Lock* other;
3595
3596 /* It may be that 'thr' already holds 'lk' and is recursively
3597 relocking in. In this case we just ignore the call. */
3598 /* NB: univ_lsets really is correct here */
3599 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3600 return;
3601
sewardjb4112022007-11-09 22:49:28 +00003602 /* First, the check. Complain if there is any path in laog from lk
3603 to any of the locks already held by thr, since if any such path
3604 existed, it would mean that previously lk was acquired before
3605 (rather than after, as we are doing here) at least one of those
3606 locks.
3607 */
3608 other = laog__do_dfs_from_to(lk, thr->locksetA);
3609 if (other) {
3610 LAOGLinkExposition key, *found;
3611 /* So we managed to find a path lk --*--> other in the graph,
3612 which implies that 'lk' should have been acquired before
3613 'other' but is in fact being acquired afterwards. We present
3614 the lk/other arguments to record_error_LockOrder in the order
3615 in which they should have been acquired. */
3616 /* Go look in the laog_exposition mapping, to find the allocation
3617 points for this edge, so we can show the user. */
3618 key.src_ga = lk->guestaddr;
3619 key.dst_ga = other->guestaddr;
3620 key.src_ec = NULL;
3621 key.dst_ec = NULL;
3622 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003623 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003624 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003625 tl_assert(found != &key);
3626 tl_assert(found->src_ga == key.src_ga);
3627 tl_assert(found->dst_ga == key.dst_ga);
3628 tl_assert(found->src_ec);
3629 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003630 HG_(record_error_LockOrder)(
3631 thr, lk->guestaddr, other->guestaddr,
3632 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003633 } else {
3634 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003635 HG_(record_error_LockOrder)(
3636 thr, lk->guestaddr, other->guestaddr,
3637 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003638 }
3639 }
3640
3641 /* Second, add to laog the pairs
3642 (old, lk) | old <- locks already held by thr
3643 Since both old and lk are currently held by thr, their acquired_at
3644 fields must be non-NULL.
3645 */
3646 tl_assert(lk->acquired_at);
3647 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3648 for (i = 0; i < ls_size; i++) {
3649 Lock* old = (Lock*)ls_words[i];
3650 tl_assert(old->acquired_at);
3651 laog__add_edge( old, lk );
3652 }
3653
3654 /* Why "except_Locks" ? We're here because a lock is being
3655 acquired by a thread, and we're in an inconsistent state here.
3656 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3657 When called in this inconsistent state, locks__sanity_check duly
3658 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003659 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003660 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3661}
3662
3663
3664/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3665
3666__attribute__((noinline))
3667static void laog__handle_one_lock_deletion ( Lock* lk )
3668{
3669 WordSetID preds, succs;
3670 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003671 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003672
3673 preds = laog__preds( lk );
3674 succs = laog__succs( lk );
3675
3676 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3677 for (i = 0; i < preds_size; i++)
3678 laog__del_edge( (Lock*)preds_words[i], lk );
3679
3680 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3681 for (j = 0; j < succs_size; j++)
3682 laog__del_edge( lk, (Lock*)succs_words[j] );
3683
3684 for (i = 0; i < preds_size; i++) {
3685 for (j = 0; j < succs_size; j++) {
3686 if (preds_words[i] != succs_words[j]) {
3687 /* This can pass unlocked locks to laog__add_edge, since
3688 we're deleting stuff. So their acquired_at fields may
3689 be NULL. */
3690 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3691 }
3692 }
3693 }
3694}
3695
sewardj1cbc12f2008-11-10 16:16:46 +00003696//__attribute__((noinline))
3697//static void laog__handle_lock_deletions (
3698// WordSetID /* in univ_laog */ locksToDelete
3699// )
3700//{
3701// Word i, ws_size;
3702// UWord* ws_words;
3703//
sewardj1cbc12f2008-11-10 16:16:46 +00003704//
3705// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3706// for (i = 0; i < ws_size; i++)
3707// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3708//
3709// if (HG_(clo_sanity_flags) & SCE_LAOG)
3710// all__sanity_check("laog__handle_lock_deletions-post");
3711//}
sewardjb4112022007-11-09 22:49:28 +00003712
3713
3714/*--------------------------------------------------------------*/
3715/*--- Malloc/free replacements ---*/
3716/*--------------------------------------------------------------*/
3717
3718typedef
3719 struct {
3720 void* next; /* required by m_hashtable */
3721 Addr payload; /* ptr to actual block */
3722 SizeT szB; /* size requested */
3723 ExeContext* where; /* where it was allocated */
3724 Thread* thr; /* allocating thread */
3725 }
3726 MallocMeta;
3727
3728/* A hash table of MallocMetas, used to track malloc'd blocks
3729 (obviously). */
3730static VgHashTable hg_mallocmeta_table = NULL;
3731
3732
3733static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003734 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003735 tl_assert(md);
3736 return md;
3737}
3738static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003739 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003740}
3741
3742
3743/* Allocate a client block and set up the metadata for it. */
3744
3745static
3746void* handle_alloc ( ThreadId tid,
3747 SizeT szB, SizeT alignB, Bool is_zeroed )
3748{
3749 Addr p;
3750 MallocMeta* md;
3751
3752 tl_assert( ((SSizeT)szB) >= 0 );
3753 p = (Addr)VG_(cli_malloc)(alignB, szB);
3754 if (!p) {
3755 return NULL;
3756 }
3757 if (is_zeroed)
3758 VG_(memset)((void*)p, 0, szB);
3759
3760 /* Note that map_threads_lookup must succeed (cannot assert), since
3761 memory can only be allocated by currently alive threads, hence
3762 they must have an entry in map_threads. */
3763 md = new_MallocMeta();
3764 md->payload = p;
3765 md->szB = szB;
3766 md->where = VG_(record_ExeContext)( tid, 0 );
3767 md->thr = map_threads_lookup( tid );
3768
3769 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3770
3771 /* Tell the lower level memory wranglers. */
3772 evh__new_mem_heap( p, szB, is_zeroed );
3773
3774 return (void*)p;
3775}
3776
3777/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3778 Cast to a signed type to catch any unexpectedly negative args.
3779 We're assuming here that the size asked for is not greater than
3780 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3781 platforms). */
3782static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3783 if (((SSizeT)n) < 0) return NULL;
3784 return handle_alloc ( tid, n, VG_(clo_alignment),
3785 /*is_zeroed*/False );
3786}
3787static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3788 if (((SSizeT)n) < 0) return NULL;
3789 return handle_alloc ( tid, n, VG_(clo_alignment),
3790 /*is_zeroed*/False );
3791}
3792static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3793 if (((SSizeT)n) < 0) return NULL;
3794 return handle_alloc ( tid, n, VG_(clo_alignment),
3795 /*is_zeroed*/False );
3796}
3797static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3798 if (((SSizeT)n) < 0) return NULL;
3799 return handle_alloc ( tid, n, align,
3800 /*is_zeroed*/False );
3801}
3802static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3803 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3804 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3805 /*is_zeroed*/True );
3806}
3807
3808
3809/* Free a client block, including getting rid of the relevant
3810 metadata. */
3811
3812static void handle_free ( ThreadId tid, void* p )
3813{
3814 MallocMeta *md, *old_md;
3815 SizeT szB;
3816
3817 /* First see if we can find the metadata for 'p'. */
3818 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3819 if (!md)
3820 return; /* apparently freeing a bogus address. Oh well. */
3821
3822 tl_assert(md->payload == (Addr)p);
3823 szB = md->szB;
3824
3825 /* Nuke the metadata block */
3826 old_md = (MallocMeta*)
3827 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3828 tl_assert(old_md); /* it must be present - we just found it */
3829 tl_assert(old_md == md);
3830 tl_assert(old_md->payload == (Addr)p);
3831
3832 VG_(cli_free)((void*)old_md->payload);
3833 delete_MallocMeta(old_md);
3834
3835 /* Tell the lower level memory wranglers. */
3836 evh__die_mem_heap( (Addr)p, szB );
3837}
3838
3839static void hg_cli__free ( ThreadId tid, void* p ) {
3840 handle_free(tid, p);
3841}
3842static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3843 handle_free(tid, p);
3844}
3845static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3846 handle_free(tid, p);
3847}
3848
3849
3850static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3851{
3852 MallocMeta *md, *md_new, *md_tmp;
3853 SizeT i;
3854
3855 Addr payload = (Addr)payloadV;
3856
3857 if (((SSizeT)new_size) < 0) return NULL;
3858
3859 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3860 if (!md)
3861 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3862
3863 tl_assert(md->payload == payload);
3864
3865 if (md->szB == new_size) {
3866 /* size unchanged */
3867 md->where = VG_(record_ExeContext)(tid, 0);
3868 return payloadV;
3869 }
3870
3871 if (md->szB > new_size) {
3872 /* new size is smaller */
3873 md->szB = new_size;
3874 md->where = VG_(record_ExeContext)(tid, 0);
3875 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3876 return payloadV;
3877 }
3878
3879 /* else */ {
3880 /* new size is bigger */
3881 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3882
3883 /* First half kept and copied, second half new */
3884 // FIXME: shouldn't we use a copier which implements the
3885 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003886 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003887 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003888 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003889 /* FIXME: can anything funny happen here? specifically, if the
3890 old range contained a lock, then die_mem_heap will complain.
3891 Is that the correct behaviour? Not sure. */
3892 evh__die_mem_heap( payload, md->szB );
3893
3894 /* Copy from old to new */
3895 for (i = 0; i < md->szB; i++)
3896 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3897
3898 /* Because the metadata hash table is index by payload address,
3899 we have to get rid of the old hash table entry and make a new
3900 one. We can't just modify the existing metadata in place,
3901 because then it would (almost certainly) be in the wrong hash
3902 chain. */
3903 md_new = new_MallocMeta();
3904 *md_new = *md;
3905
3906 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3907 tl_assert(md_tmp);
3908 tl_assert(md_tmp == md);
3909
3910 VG_(cli_free)((void*)md->payload);
3911 delete_MallocMeta(md);
3912
3913 /* Update fields */
3914 md_new->where = VG_(record_ExeContext)( tid, 0 );
3915 md_new->szB = new_size;
3916 md_new->payload = p_new;
3917 md_new->thr = map_threads_lookup( tid );
3918
3919 /* and add */
3920 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3921
3922 return (void*)p_new;
3923 }
3924}
3925
njn8b140de2009-02-17 04:31:18 +00003926static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3927{
3928 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3929
3930 // There may be slop, but pretend there isn't because only the asked-for
3931 // area will have been shadowed properly.
3932 return ( md ? md->szB : 0 );
3933}
3934
sewardjb4112022007-11-09 22:49:28 +00003935
sewardj095d61e2010-03-11 13:43:18 +00003936/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003937 Slow linear search. With a bit of hash table help if 'data_addr'
3938 is either the start of a block or up to 15 word-sized steps along
3939 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003940
3941static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3942{
sewardjc8028ad2010-05-05 09:34:42 +00003943 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3944 right at it. */
3945 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3946 return True;
3947 /* else normal interval rules apply */
3948 if (LIKELY(a < mm->payload)) return False;
3949 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3950 return True;
sewardj095d61e2010-03-11 13:43:18 +00003951}
3952
sewardjc8028ad2010-05-05 09:34:42 +00003953Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003954 /*OUT*/Addr* payload,
3955 /*OUT*/SizeT* szB,
3956 Addr data_addr )
3957{
3958 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003959 Int i;
3960 const Int n_fast_check_words = 16;
3961
3962 /* First, do a few fast searches on the basis that data_addr might
3963 be exactly the start of a block or up to 15 words inside. This
3964 can happen commonly via the creq
3965 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3966 for (i = 0; i < n_fast_check_words; i++) {
3967 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3968 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3969 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3970 goto found;
3971 }
3972
sewardj095d61e2010-03-11 13:43:18 +00003973 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003974 some such, it's hard to see how to do better. We have to check
3975 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003976 VG_(HT_ResetIter)(hg_mallocmeta_table);
3977 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003978 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3979 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003980 }
sewardjc8028ad2010-05-05 09:34:42 +00003981
3982 /* Not found. Bah. */
3983 return False;
3984 /*NOTREACHED*/
3985
3986 found:
3987 tl_assert(mm);
3988 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3989 if (where) *where = mm->where;
3990 if (payload) *payload = mm->payload;
3991 if (szB) *szB = mm->szB;
3992 return True;
sewardj095d61e2010-03-11 13:43:18 +00003993}
3994
3995
sewardjb4112022007-11-09 22:49:28 +00003996/*--------------------------------------------------------------*/
3997/*--- Instrumentation ---*/
3998/*--------------------------------------------------------------*/
3999
4000static void instrument_mem_access ( IRSB* bbOut,
4001 IRExpr* addr,
4002 Int szB,
4003 Bool isStore,
4004 Int hWordTy_szB )
4005{
4006 IRType tyAddr = Ity_INVALID;
4007 HChar* hName = NULL;
4008 void* hAddr = NULL;
4009 Int regparms = 0;
4010 IRExpr** argv = NULL;
4011 IRDirty* di = NULL;
4012
4013 tl_assert(isIRAtom(addr));
4014 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4015
4016 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
4017 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4018
4019 /* So the effective address is in 'addr' now. */
4020 regparms = 1; // unless stated otherwise
4021 if (isStore) {
4022 switch (szB) {
4023 case 1:
sewardj23f12002009-07-24 08:45:08 +00004024 hName = "evh__mem_help_cwrite_1";
4025 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004026 argv = mkIRExprVec_1( addr );
4027 break;
4028 case 2:
sewardj23f12002009-07-24 08:45:08 +00004029 hName = "evh__mem_help_cwrite_2";
4030 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004031 argv = mkIRExprVec_1( addr );
4032 break;
4033 case 4:
sewardj23f12002009-07-24 08:45:08 +00004034 hName = "evh__mem_help_cwrite_4";
4035 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004036 argv = mkIRExprVec_1( addr );
4037 break;
4038 case 8:
sewardj23f12002009-07-24 08:45:08 +00004039 hName = "evh__mem_help_cwrite_8";
4040 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004041 argv = mkIRExprVec_1( addr );
4042 break;
4043 default:
4044 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4045 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004046 hName = "evh__mem_help_cwrite_N";
4047 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004048 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4049 break;
4050 }
4051 } else {
4052 switch (szB) {
4053 case 1:
sewardj23f12002009-07-24 08:45:08 +00004054 hName = "evh__mem_help_cread_1";
4055 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004056 argv = mkIRExprVec_1( addr );
4057 break;
4058 case 2:
sewardj23f12002009-07-24 08:45:08 +00004059 hName = "evh__mem_help_cread_2";
4060 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004061 argv = mkIRExprVec_1( addr );
4062 break;
4063 case 4:
sewardj23f12002009-07-24 08:45:08 +00004064 hName = "evh__mem_help_cread_4";
4065 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004066 argv = mkIRExprVec_1( addr );
4067 break;
4068 case 8:
sewardj23f12002009-07-24 08:45:08 +00004069 hName = "evh__mem_help_cread_8";
4070 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004071 argv = mkIRExprVec_1( addr );
4072 break;
4073 default:
4074 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4075 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004076 hName = "evh__mem_help_cread_N";
4077 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004078 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4079 break;
4080 }
4081 }
4082
4083 /* Add the helper. */
4084 tl_assert(hName);
4085 tl_assert(hAddr);
4086 tl_assert(argv);
4087 di = unsafeIRDirty_0_N( regparms,
4088 hName, VG_(fnptr_to_fnentry)( hAddr ),
4089 argv );
4090 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4091}
4092
4093
sewardja0eee322009-07-31 08:46:35 +00004094/* Figure out if GA is a guest code address in the dynamic linker, and
4095 if so return True. Otherwise (and in case of any doubt) return
4096 False. (sidedly safe w/ False as the safe value) */
4097static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4098{
4099 DebugInfo* dinfo;
4100 const UChar* soname;
4101 if (0) return False;
4102
sewardje3f1e592009-07-31 09:41:29 +00004103 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004104 if (!dinfo) return False;
4105
sewardje3f1e592009-07-31 09:41:29 +00004106 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004107 tl_assert(soname);
4108 if (0) VG_(printf)("%s\n", soname);
4109
4110# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004111 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004112 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4113 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4114 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4115 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4116# elif defined(VGO_darwin)
4117 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4118# else
4119# error "Unsupported OS"
4120# endif
4121 return False;
4122}
4123
sewardjb4112022007-11-09 22:49:28 +00004124static
4125IRSB* hg_instrument ( VgCallbackClosure* closure,
4126 IRSB* bbIn,
4127 VexGuestLayout* layout,
4128 VexGuestExtents* vge,
4129 IRType gWordTy, IRType hWordTy )
4130{
sewardj1c0ce7a2009-07-01 08:10:49 +00004131 Int i;
4132 IRSB* bbOut;
4133 Addr64 cia; /* address of current insn */
4134 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004135 Bool inLDSO = False;
4136 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004137
4138 if (gWordTy != hWordTy) {
4139 /* We don't currently support this case. */
4140 VG_(tool_panic)("host/guest word size mismatch");
4141 }
4142
sewardja0eee322009-07-31 08:46:35 +00004143 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4144 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4145 }
4146
sewardjb4112022007-11-09 22:49:28 +00004147 /* Set up BB */
4148 bbOut = emptyIRSB();
4149 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4150 bbOut->next = deepCopyIRExpr(bbIn->next);
4151 bbOut->jumpkind = bbIn->jumpkind;
4152
4153 // Copy verbatim any IR preamble preceding the first IMark
4154 i = 0;
4155 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4156 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4157 i++;
4158 }
4159
sewardj1c0ce7a2009-07-01 08:10:49 +00004160 // Get the first statement, and initial cia from it
4161 tl_assert(bbIn->stmts_used > 0);
4162 tl_assert(i < bbIn->stmts_used);
4163 st = bbIn->stmts[i];
4164 tl_assert(Ist_IMark == st->tag);
4165 cia = st->Ist.IMark.addr;
4166 st = NULL;
4167
sewardjb4112022007-11-09 22:49:28 +00004168 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004169 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004170 tl_assert(st);
4171 tl_assert(isFlatIRStmt(st));
4172 switch (st->tag) {
4173 case Ist_NoOp:
4174 case Ist_AbiHint:
4175 case Ist_Put:
4176 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004177 case Ist_Exit:
4178 /* None of these can contain any memory references. */
4179 break;
4180
sewardj1c0ce7a2009-07-01 08:10:49 +00004181 case Ist_IMark:
4182 /* no mem refs, but note the insn address. */
4183 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004184 /* Don't instrument the dynamic linker. It generates a
4185 lot of races which we just expensively suppress, so
4186 it's pointless.
4187
4188 Avoid flooding is_in_dynamic_linker_shared_object with
4189 requests by only checking at transitions between 4K
4190 pages. */
4191 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4192 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4193 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4194 inLDSO = is_in_dynamic_linker_shared_object(cia);
4195 } else {
4196 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4197 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004198 break;
4199
sewardjb4112022007-11-09 22:49:28 +00004200 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004201 switch (st->Ist.MBE.event) {
4202 case Imbe_Fence:
4203 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004204 default:
4205 goto unhandled;
4206 }
sewardjb4112022007-11-09 22:49:28 +00004207 break;
4208
sewardj1c0ce7a2009-07-01 08:10:49 +00004209 case Ist_CAS: {
4210 /* Atomic read-modify-write cycle. Just pretend it's a
4211 read. */
4212 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004213 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4214 if (isDCAS) {
4215 tl_assert(cas->expdHi);
4216 tl_assert(cas->dataHi);
4217 } else {
4218 tl_assert(!cas->expdHi);
4219 tl_assert(!cas->dataHi);
4220 }
4221 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004222 if (!inLDSO) {
4223 instrument_mem_access(
4224 bbOut,
4225 cas->addr,
4226 (isDCAS ? 2 : 1)
4227 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4228 False/*!isStore*/,
4229 sizeofIRType(hWordTy)
4230 );
4231 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004232 break;
4233 }
4234
sewardjdb5907d2009-11-26 17:20:21 +00004235 case Ist_LLSC: {
4236 /* We pretend store-conditionals don't exist, viz, ignore
4237 them. Whereas load-linked's are treated the same as
4238 normal loads. */
4239 IRType dataTy;
4240 if (st->Ist.LLSC.storedata == NULL) {
4241 /* LL */
4242 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004243 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004244 instrument_mem_access(
4245 bbOut,
4246 st->Ist.LLSC.addr,
4247 sizeofIRType(dataTy),
4248 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004249 sizeofIRType(hWordTy)
4250 );
4251 }
sewardjdb5907d2009-11-26 17:20:21 +00004252 } else {
4253 /* SC */
4254 /*ignore */
4255 }
4256 break;
4257 }
4258
4259 case Ist_Store:
4260 /* It seems we pretend that store-conditionals don't
4261 exist, viz, just ignore them ... */
4262 if (!inLDSO) {
4263 instrument_mem_access(
4264 bbOut,
4265 st->Ist.Store.addr,
4266 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4267 True/*isStore*/,
4268 sizeofIRType(hWordTy)
4269 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004270 }
njnb83caf22009-05-25 01:47:56 +00004271 break;
sewardjb4112022007-11-09 22:49:28 +00004272
4273 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004274 /* ... whereas here we don't care whether a load is a
4275 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004276 IRExpr* data = st->Ist.WrTmp.data;
4277 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004278 if (!inLDSO) {
4279 instrument_mem_access(
4280 bbOut,
4281 data->Iex.Load.addr,
4282 sizeofIRType(data->Iex.Load.ty),
4283 False/*!isStore*/,
4284 sizeofIRType(hWordTy)
4285 );
4286 }
sewardjb4112022007-11-09 22:49:28 +00004287 }
4288 break;
4289 }
4290
4291 case Ist_Dirty: {
4292 Int dataSize;
4293 IRDirty* d = st->Ist.Dirty.details;
4294 if (d->mFx != Ifx_None) {
4295 /* This dirty helper accesses memory. Collect the
4296 details. */
4297 tl_assert(d->mAddr != NULL);
4298 tl_assert(d->mSize != 0);
4299 dataSize = d->mSize;
4300 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004301 if (!inLDSO) {
4302 instrument_mem_access(
4303 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4304 sizeofIRType(hWordTy)
4305 );
4306 }
sewardjb4112022007-11-09 22:49:28 +00004307 }
4308 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004309 if (!inLDSO) {
4310 instrument_mem_access(
4311 bbOut, d->mAddr, dataSize, True/*isStore*/,
4312 sizeofIRType(hWordTy)
4313 );
4314 }
sewardjb4112022007-11-09 22:49:28 +00004315 }
4316 } else {
4317 tl_assert(d->mAddr == NULL);
4318 tl_assert(d->mSize == 0);
4319 }
4320 break;
4321 }
4322
4323 default:
sewardjf98e1c02008-10-25 16:22:41 +00004324 unhandled:
4325 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004326 tl_assert(0);
4327
4328 } /* switch (st->tag) */
4329
4330 addStmtToIRSB( bbOut, st );
4331 } /* iterate over bbIn->stmts */
4332
4333 return bbOut;
4334}
4335
4336
4337/*----------------------------------------------------------------*/
4338/*--- Client requests ---*/
4339/*----------------------------------------------------------------*/
4340
4341/* Sheesh. Yet another goddam finite map. */
4342static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4343
4344static void map_pthread_t_to_Thread_INIT ( void ) {
4345 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004346 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4347 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004348 tl_assert(map_pthread_t_to_Thread != NULL);
4349 }
4350}
4351
4352
4353static
4354Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4355{
4356 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4357 return False;
4358
4359 /* Anything that gets past the above check is one of ours, so we
4360 should be able to handle it. */
4361
4362 /* default, meaningless return value, unless otherwise set */
4363 *ret = 0;
4364
4365 switch (args[0]) {
4366
4367 /* --- --- User-visible client requests --- --- */
4368
4369 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004370 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004371 args[1], args[2]);
4372 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004373 are any held locks etc in the area. Calling evh__die_mem
4374 and then evh__new_mem is a bit inefficient; probably just
4375 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004376 if (args[2] > 0) { /* length */
4377 evh__die_mem(args[1], args[2]);
4378 /* and then set it to New */
4379 evh__new_mem(args[1], args[2]);
4380 }
4381 break;
4382
sewardjc8028ad2010-05-05 09:34:42 +00004383 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4384 Addr payload = 0;
4385 SizeT pszB = 0;
4386 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4387 args[1]);
4388 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4389 if (pszB > 0) {
4390 evh__die_mem(payload, pszB);
4391 evh__new_mem(payload, pszB);
4392 }
4393 *ret = pszB;
4394 } else {
4395 *ret = (UWord)-1;
4396 }
4397 break;
4398 }
4399
sewardj406bac82010-03-03 23:03:40 +00004400 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4401 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4402 args[1], args[2]);
4403 if (args[2] > 0) { /* length */
4404 evh__untrack_mem(args[1], args[2]);
4405 }
4406 break;
4407
4408 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4409 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4410 args[1], args[2]);
4411 if (args[2] > 0) { /* length */
4412 evh__new_mem(args[1], args[2]);
4413 }
4414 break;
4415
sewardjb4112022007-11-09 22:49:28 +00004416 /* --- --- Client requests for Helgrind's use only --- --- */
4417
4418 /* Some thread is telling us its pthread_t value. Record the
4419 binding between that and the associated Thread*, so we can
4420 later find the Thread* again when notified of a join by the
4421 thread. */
4422 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4423 Thread* my_thr = NULL;
4424 if (0)
4425 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4426 (void*)args[1]);
4427 map_pthread_t_to_Thread_INIT();
4428 my_thr = map_threads_maybe_lookup( tid );
4429 /* This assertion should hold because the map_threads (tid to
4430 Thread*) binding should have been made at the point of
4431 low-level creation of this thread, which should have
4432 happened prior to us getting this client request for it.
4433 That's because this client request is sent from
4434 client-world from the 'thread_wrapper' function, which
4435 only runs once the thread has been low-level created. */
4436 tl_assert(my_thr != NULL);
4437 /* So now we know that (pthread_t)args[1] is associated with
4438 (Thread*)my_thr. Note that down. */
4439 if (0)
4440 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4441 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004442 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004443 break;
4444 }
4445
4446 case _VG_USERREQ__HG_PTH_API_ERROR: {
4447 Thread* my_thr = NULL;
4448 map_pthread_t_to_Thread_INIT();
4449 my_thr = map_threads_maybe_lookup( tid );
4450 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004451 HG_(record_error_PthAPIerror)(
4452 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004453 break;
4454 }
4455
4456 /* This thread (tid) has completed a join with the quitting
4457 thread whose pthread_t is in args[1]. */
4458 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4459 Thread* thr_q = NULL; /* quitter Thread* */
4460 Bool found = False;
4461 if (0)
4462 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4463 (void*)args[1]);
4464 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004465 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004466 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004467 /* Can this fail? It would mean that our pthread_join
4468 wrapper observed a successful join on args[1] yet that
4469 thread never existed (or at least, it never lodged an
4470 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4471 sounds like a bug in the threads library. */
4472 // FIXME: get rid of this assertion; handle properly
4473 tl_assert(found);
4474 if (found) {
4475 if (0)
4476 VG_(printf)(".................... quitter Thread* = %p\n",
4477 thr_q);
4478 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4479 }
4480 break;
4481 }
4482
4483 /* EXPOSITION only: by intercepting lock init events we can show
4484 the user where the lock was initialised, rather than only
4485 being able to show where it was first locked. Intercepting
4486 lock initialisations is not necessary for the basic operation
4487 of the race checker. */
4488 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4489 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4490 break;
4491
4492 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4493 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4494 break;
4495
4496 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4497 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4498 break;
4499
4500 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4501 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4502 break;
4503
4504 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4505 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4506 break;
4507
4508 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4509 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4510 break;
4511
4512 /* This thread is about to do pthread_cond_signal on the
4513 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4514 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4515 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4516 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4517 break;
4518
4519 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4520 Returns a flag indicating whether or not the mutex is believed to be
4521 valid for this operation. */
4522 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4523 Bool mutex_is_valid
4524 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4525 (void*)args[2] );
4526 *ret = mutex_is_valid ? 1 : 0;
4527 break;
4528 }
4529
sewardjf98e1c02008-10-25 16:22:41 +00004530 /* cond=arg[1] */
4531 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4532 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4533 break;
4534
sewardjb4112022007-11-09 22:49:28 +00004535 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4536 mutex=arg[2] */
4537 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4538 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4539 (void*)args[1], (void*)args[2] );
4540 break;
4541
4542 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4543 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4544 break;
4545
4546 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4547 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4548 break;
4549
sewardj789c3c52008-02-25 12:10:07 +00004550 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004551 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004552 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4553 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004554 break;
4555
4556 /* rwlock=arg[1], isW=arg[2] */
4557 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4558 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4559 break;
4560
4561 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4562 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4563 break;
4564
4565 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4566 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4567 break;
4568
sewardj11e352f2007-11-30 11:11:02 +00004569 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4570 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004571 break;
4572
sewardj11e352f2007-11-30 11:11:02 +00004573 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4574 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004575 break;
4576
sewardj11e352f2007-11-30 11:11:02 +00004577 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4578 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4579 break;
4580
4581 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4582 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004583 break;
4584
sewardj9f569b72008-11-13 13:33:09 +00004585 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004586 /* pth_bar_t*, ulong count, ulong resizable */
4587 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4588 args[2], args[3] );
4589 break;
4590
4591 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4592 /* pth_bar_t*, ulong newcount */
4593 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4594 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004595 break;
4596
4597 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4598 /* pth_bar_t* */
4599 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4600 break;
4601
4602 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4603 /* pth_bar_t* */
4604 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4605 break;
sewardjb4112022007-11-09 22:49:28 +00004606
sewardj5a644da2009-08-11 10:35:58 +00004607 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4608 /* pth_spinlock_t* */
4609 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4610 break;
4611
4612 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4613 /* pth_spinlock_t* */
4614 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4615 break;
4616
4617 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4618 /* pth_spinlock_t*, Word */
4619 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4620 break;
4621
4622 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4623 /* pth_spinlock_t* */
4624 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4625 break;
4626
4627 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4628 /* pth_spinlock_t* */
4629 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4630 break;
4631
sewardjed2e72e2009-08-14 11:08:24 +00004632 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4633 /* char* who */
4634 HChar* who = (HChar*)args[1];
4635 HChar buf[50 + 50];
4636 Thread* thr = map_threads_maybe_lookup( tid );
4637 tl_assert( thr ); /* I must be mapped */
4638 tl_assert( who );
4639 tl_assert( VG_(strlen)(who) <= 50 );
4640 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4641 /* record_error_Misc strdup's buf, so this is safe: */
4642 HG_(record_error_Misc)( thr, buf );
4643 break;
4644 }
4645
4646 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4647 /* UWord arbitrary-SO-tag */
4648 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4649 break;
4650
4651 case _VG_USERREQ__HG_USERSO_RECV_POST:
4652 /* UWord arbitrary-SO-tag */
4653 evh__HG_USERSO_RECV_POST( tid, args[1] );
4654 break;
4655
sewardjb4112022007-11-09 22:49:28 +00004656 default:
4657 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004658 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4659 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004660 }
4661
4662 return True;
4663}
4664
4665
4666/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004667/*--- Setup ---*/
4668/*----------------------------------------------------------------*/
4669
4670static Bool hg_process_cmd_line_option ( Char* arg )
4671{
njn83df0b62009-02-25 01:01:05 +00004672 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004673
njn83df0b62009-02-25 01:01:05 +00004674 if VG_BOOL_CLO(arg, "--track-lockorders",
4675 HG_(clo_track_lockorders)) {}
4676 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4677 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004678
4679 else if VG_XACT_CLO(arg, "--history-level=none",
4680 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004681 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004682 HG_(clo_history_level), 1);
4683 else if VG_XACT_CLO(arg, "--history-level=full",
4684 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004685
sewardjf585e482009-08-16 22:52:29 +00004686 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004687 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004688 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004689 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004690
sewardj11e352f2007-11-30 11:11:02 +00004691 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004692 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004693 Int j;
sewardjb4112022007-11-09 22:49:28 +00004694
njn83df0b62009-02-25 01:01:05 +00004695 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004696 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004697 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004698 return False;
4699 }
sewardj11e352f2007-11-30 11:11:02 +00004700 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004701 if ('0' == tmp_str[j]) { /* do nothing */ }
4702 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004703 else {
sewardj11e352f2007-11-30 11:11:02 +00004704 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004705 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004706 return False;
4707 }
4708 }
sewardjf98e1c02008-10-25 16:22:41 +00004709 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004710 }
4711
4712 else
4713 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4714
4715 return True;
4716}
4717
4718static void hg_print_usage ( void )
4719{
4720 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004721" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004722" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004723" full: show both stack traces for a data race (can be very slow)\n"
4724" approx: full trace for one thread, approx for the other (faster)\n"
4725" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004726" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004727 );
sewardjb4112022007-11-09 22:49:28 +00004728}
4729
4730static void hg_print_debug_usage ( void )
4731{
sewardjb4112022007-11-09 22:49:28 +00004732 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4733 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004734 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004735 " at events (X = 0|1) [000000]\n");
4736 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004737 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004738 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004739 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4740 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004741 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004742 VG_(printf)(" 000010 at lock/unlock events\n");
4743 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004744}
4745
sewardjb4112022007-11-09 22:49:28 +00004746static void hg_fini ( Int exitcode )
4747{
sewardj2d9e8742009-08-07 15:46:56 +00004748 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4749 VG_(message)(Vg_UserMsg,
4750 "For counts of detected and suppressed errors, "
4751 "rerun with: -v\n");
4752 }
4753
4754 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4755 && HG_(clo_history_level) >= 2) {
4756 VG_(umsg)(
4757 "Use --history-level=approx or =none to gain increased speed, at\n" );
4758 VG_(umsg)(
4759 "the cost of reduced accuracy of conflicting-access information\n");
4760 }
4761
sewardjb4112022007-11-09 22:49:28 +00004762 if (SHOW_DATA_STRUCTURES)
4763 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004764 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004765 all__sanity_check("SK_(fini)");
4766
sewardj2d9e8742009-08-07 15:46:56 +00004767 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004768
4769 if (1) {
4770 VG_(printf)("\n");
4771 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4772 VG_(printf)("\n");
4773 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004774 if (HG_(clo_track_lockorders)) {
4775 VG_(printf)("\n");
4776 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4777 }
sewardjb4112022007-11-09 22:49:28 +00004778 }
4779
sewardjf98e1c02008-10-25 16:22:41 +00004780 //zz VG_(printf)("\n");
4781 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4782 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4783 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4784 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4785 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4786 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4787 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4788 //zz stats__hbefore_stk_hwm);
4789 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4790 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004791
4792 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004793 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004794 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004795 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004796 (Int)HG_(cardinalityWSU)( univ_tsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004797 if (HG_(clo_track_lockorders)) {
4798 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4799 (Int)HG_(cardinalityWSU)( univ_laog ));
4800 }
sewardjb4112022007-11-09 22:49:28 +00004801
sewardjd52392d2008-11-08 20:36:26 +00004802 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4803 // stats__ga_LL_adds,
4804 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004805
sewardjf98e1c02008-10-25 16:22:41 +00004806 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4807 HG_(stats__LockN_to_P_queries),
4808 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004809
sewardjf98e1c02008-10-25 16:22:41 +00004810 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4811 HG_(stats__string_table_queries),
4812 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004813 if (HG_(clo_track_lockorders)) {
4814 VG_(printf)(" LAOG: %'8d map size\n",
4815 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4816 VG_(printf)(" LAOG exposition: %'8d map size\n",
4817 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4818 }
4819
barta0b6b2c2008-07-07 06:49:24 +00004820 VG_(printf)(" locks: %'8lu acquires, "
4821 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004822 stats__lockN_acquires,
4823 stats__lockN_releases
4824 );
barta0b6b2c2008-07-07 06:49:24 +00004825 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004826
4827 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004828 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004829 }
4830}
4831
sewardjf98e1c02008-10-25 16:22:41 +00004832/* FIXME: move these somewhere sane */
4833
4834static
4835void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4836{
4837 Thread* thr;
4838 ThreadId tid;
4839 UWord nActual;
4840 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004841 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004842 tl_assert(thr);
4843 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4844 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4845 NULL, NULL, 0 );
4846 tl_assert(nActual <= nRequest);
4847 for (; nActual < nRequest; nActual++)
4848 frames[nActual] = 0;
4849}
4850
4851static
sewardj23f12002009-07-24 08:45:08 +00004852ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004853{
4854 Thread* thr;
4855 ThreadId tid;
4856 ExeContext* ec;
4857 tl_assert(hbt);
sewardj60626642011-03-10 15:14:37 +00004858 thr = libhb_get_Thr_hgthread( hbt );
sewardjf98e1c02008-10-25 16:22:41 +00004859 tl_assert(thr);
4860 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004861 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004862 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004863 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004864}
4865
4866
sewardjc1fb9d22011-02-28 09:03:44 +00004867static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004868{
sewardjf98e1c02008-10-25 16:22:41 +00004869 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004870
sewardjc1fb9d22011-02-28 09:03:44 +00004871 /////////////////////////////////////////////
4872 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4873 for_libhb__get_EC );
4874 /////////////////////////////////////////////
4875
4876
4877 if (HG_(clo_track_lockorders))
4878 laog__init();
4879
4880 initialise_data_structures(hbthr_root);
4881}
4882
4883static void hg_pre_clo_init ( void )
4884{
sewardjb4112022007-11-09 22:49:28 +00004885 VG_(details_name) ("Helgrind");
4886 VG_(details_version) (NULL);
4887 VG_(details_description) ("a thread error detector");
4888 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004889 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004890 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj9c08c0f2011-03-10 15:01:14 +00004891 VG_(details_avg_translation_sizeB) ( 320 );
sewardjb4112022007-11-09 22:49:28 +00004892
4893 VG_(basic_tool_funcs) (hg_post_clo_init,
4894 hg_instrument,
4895 hg_fini);
4896
4897 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004898 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004899 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004900 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004901 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004902 HG_(update_extra),
4903 HG_(recognised_suppression),
4904 HG_(read_extra_suppression_info),
4905 HG_(error_matches_suppression),
4906 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004907 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004908
sewardj24118492009-07-15 14:50:02 +00004909 VG_(needs_xml_output) ();
4910
sewardjb4112022007-11-09 22:49:28 +00004911 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4912 hg_print_usage,
4913 hg_print_debug_usage);
4914 VG_(needs_client_requests) (hg_handle_client_request);
4915
4916 // FIXME?
4917 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4918 // hg_expensive_sanity_check);
4919
4920 VG_(needs_malloc_replacement) (hg_cli__malloc,
4921 hg_cli____builtin_new,
4922 hg_cli____builtin_vec_new,
4923 hg_cli__memalign,
4924 hg_cli__calloc,
4925 hg_cli__free,
4926 hg_cli____builtin_delete,
4927 hg_cli____builtin_vec_delete,
4928 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004929 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004930 HG_CLI__MALLOC_REDZONE_SZB );
4931
sewardj849b0ed2008-12-21 10:43:10 +00004932 /* 21 Dec 08: disabled this; it mostly causes H to start more
4933 slowly and use significantly more memory, without very often
4934 providing useful results. The user can request to load this
4935 information manually with --read-var-info=yes. */
4936 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004937
4938 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004939 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4940 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004941 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004942 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004943
4944 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004945 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004946
4947 VG_(track_change_mem_mprotect) ( evh__set_perms );
4948
4949 VG_(track_die_mem_stack_signal)( evh__die_mem );
4950 VG_(track_die_mem_brk) ( evh__die_mem );
4951 VG_(track_die_mem_munmap) ( evh__die_mem );
4952 VG_(track_die_mem_stack) ( evh__die_mem );
4953
4954 // FIXME: what is this for?
4955 VG_(track_ban_mem_stack) (NULL);
4956
4957 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4958 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4959 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4960 VG_(track_post_mem_write) (NULL);
4961
4962 /////////////////
4963
4964 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4965 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4966
4967 VG_(track_start_client_code)( evh__start_client_code );
4968 VG_(track_stop_client_code)( evh__stop_client_code );
4969
sewardjb4112022007-11-09 22:49:28 +00004970 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4971 as described in comments at the top of pub_tool_hashtable.h, are
4972 met. Blargh. */
4973 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4974 tl_assert( sizeof(UWord) == sizeof(Addr) );
4975 hg_mallocmeta_table
4976 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4977
sewardj61bc2c52011-02-09 10:34:00 +00004978 // add a callback to clean up on (threaded) fork.
4979 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004980}
4981
4982VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4983
4984/*--------------------------------------------------------------------*/
4985/*--- end hg_main.c ---*/
4986/*--------------------------------------------------------------------*/