blob: 21a473d210ce19b91256052b72a61102f105b185 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
sewardjefd3b4d2007-12-02 02:05:23 +000084// FIXME catch sync signals (SEGV, basically) and unlock BHL,
85// if held. Otherwise a LOCK-prefixed insn which segfaults
86// gets Helgrind into a total muddle as the BHL will not be
87// released after the insn.
88
sewardjb4112022007-11-09 22:49:28 +000089// FIXME what is supposed to happen to locks in memory which
90// is relocated as a result of client realloc?
91
sewardjb4112022007-11-09 22:49:28 +000092// FIXME put referencing ThreadId into Thread and get
93// rid of the slow reverse mapping function.
94
95// FIXME accesses to NoAccess areas: change state to Excl?
96
97// FIXME report errors for accesses of NoAccess memory?
98
99// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
100// the thread still holds the lock.
101
102/* ------------ Debug/trace options ------------ */
103
104// this is:
105// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
106// happens_before_wrk: 1000
107// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
108#define SHOW_EXPENSIVE_STUFF 0
109
110// 0 for silent, 1 for some stuff, 2 for lots of stuff
111#define SHOW_EVENTS 0
112
sewardjb4112022007-11-09 22:49:28 +0000113
114static void all__sanity_check ( Char* who ); /* fwds */
115
116#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
117
118// 0 for none, 1 for dump at end of run
119#define SHOW_DATA_STRUCTURES 0
120
121
sewardjb4112022007-11-09 22:49:28 +0000122/* ------------ Misc comments ------------ */
123
124// FIXME: don't hardwire initial entries for root thread.
125// Instead, let the pre_thread_ll_create handler do this.
126
sewardjb4112022007-11-09 22:49:28 +0000127
128/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000129/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000130/*----------------------------------------------------------------*/
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Admin linked list of Threads */
133static Thread* admin_threads = NULL;
134
sewardj1d7c3322011-02-28 09:22:51 +0000135/* Admin double linked list of Locks */
136/* We need a double linked list to properly and efficiently
137 handle del_LockN. */
sewardjb4112022007-11-09 22:49:28 +0000138static Lock* admin_locks = NULL;
139
sewardjb4112022007-11-09 22:49:28 +0000140/* Mapping table for core ThreadIds to Thread* */
141static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
142
sewardjb4112022007-11-09 22:49:28 +0000143/* Mapping table for lock guest addresses to Lock* */
144static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
145
146/* The word-set universes for thread sets and lock sets. */
147static WordSetU* univ_tsets = NULL; /* sets of Thread* */
148static WordSetU* univ_lsets = NULL; /* sets of Lock* */
149static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
150
151/* never changed; we only care about its address. Is treated as if it
152 was a standard userspace lock. Also we have a Lock* describing it
153 so it can participate in lock sets in the usual way. */
154static Int __bus_lock = 0;
155static Lock* __bus_lock_Lock = NULL;
156
157
158/*----------------------------------------------------------------*/
159/*--- Simple helpers for the data structures ---*/
160/*----------------------------------------------------------------*/
161
162static UWord stats__lockN_acquires = 0;
163static UWord stats__lockN_releases = 0;
164
sewardjf98e1c02008-10-25 16:22:41 +0000165static
166ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000167
168/* --------- Constructors --------- */
169
sewardjf98e1c02008-10-25 16:22:41 +0000170static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000171 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000172 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->locksetA = HG_(emptyWS)( univ_lsets );
174 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000175 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000176 thread->hbthr = hbthr;
177 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000178 thread->created_at = NULL;
179 thread->announced = False;
180 thread->errmsg_index = indx++;
181 thread->admin = admin_threads;
182 admin_threads = thread;
183 return thread;
184}
sewardjf98e1c02008-10-25 16:22:41 +0000185
sewardjb4112022007-11-09 22:49:28 +0000186// Make a new lock which is unlocked (hence ownerless)
sewardj1d7c3322011-02-28 09:22:51 +0000187// and insert the new lock in admin_locks double linked list.
sewardjb4112022007-11-09 22:49:28 +0000188static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
189 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000190 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardj1d7c3322011-02-28 09:22:51 +0000191 lock->admin_next = admin_locks;
192 lock->admin_prev = NULL;
193 if (admin_locks)
194 admin_locks->admin_prev = lock;
195 admin_locks = lock;
sewardjb4112022007-11-09 22:49:28 +0000196 lock->unique = unique++;
197 lock->magic = LockN_MAGIC;
198 lock->appeared_at = NULL;
199 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000200 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000201 lock->guestaddr = guestaddr;
202 lock->kind = kind;
203 lock->heldW = False;
204 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000206 return lock;
207}
sewardjb4112022007-11-09 22:49:28 +0000208
209/* Release storage for a Lock. Also release storage in .heldBy, if
sewardj1d7c3322011-02-28 09:22:51 +0000210 any. Removes from admin_locks double linked list. */
sewardjb4112022007-11-09 22:49:28 +0000211static void del_LockN ( Lock* lk )
212{
sewardjf98e1c02008-10-25 16:22:41 +0000213 tl_assert(HG_(is_sane_LockN)(lk));
214 tl_assert(lk->hbso);
215 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000216 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000217 VG_(deleteBag)( lk->heldBy );
sewardj1d7c3322011-02-28 09:22:51 +0000218 if (admin_locks == lk) {
219 admin_locks = lk->admin_next;
220 if (admin_locks)
221 admin_locks->admin_prev = NULL;
222 }
223 else {
224 lk->admin_prev->admin_next = lk->admin_next;
225 lk->admin_next->admin_prev = lk->admin_prev;
226 }
227
sewardjb4112022007-11-09 22:49:28 +0000228 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000229 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000230}
231
232/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
233 it. This is done strictly: only combinations resulting from
234 correct program and libpthread behaviour are allowed. */
235static void lockN_acquire_writer ( Lock* lk, Thread* thr )
236{
sewardjf98e1c02008-10-25 16:22:41 +0000237 tl_assert(HG_(is_sane_LockN)(lk));
238 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000239
240 stats__lockN_acquires++;
241
242 /* EXPOSITION only */
243 /* We need to keep recording snapshots of where the lock was
244 acquired, so as to produce better lock-order error messages. */
245 if (lk->acquired_at == NULL) {
246 ThreadId tid;
247 tl_assert(lk->heldBy == NULL);
248 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
249 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000250 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000251 } else {
252 tl_assert(lk->heldBy != NULL);
253 }
254 /* end EXPOSITION only */
255
256 switch (lk->kind) {
257 case LK_nonRec:
258 case_LK_nonRec:
259 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
260 tl_assert(!lk->heldW);
261 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000262 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000263 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000264 break;
265 case LK_mbRec:
266 if (lk->heldBy == NULL)
267 goto case_LK_nonRec;
268 /* 2nd and subsequent locking of a lock by its owner */
269 tl_assert(lk->heldW);
270 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000271 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000272 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000273 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
274 == VG_(sizeTotalBag)(lk->heldBy));
275 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000276 break;
277 case LK_rdwr:
278 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
279 goto case_LK_nonRec;
280 default:
281 tl_assert(0);
282 }
sewardjf98e1c02008-10-25 16:22:41 +0000283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000284}
285
286static void lockN_acquire_reader ( Lock* lk, Thread* thr )
287{
sewardjf98e1c02008-10-25 16:22:41 +0000288 tl_assert(HG_(is_sane_LockN)(lk));
289 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000290 /* can only add reader to a reader-writer lock. */
291 tl_assert(lk->kind == LK_rdwr);
292 /* lk must be free or already r-held. */
293 tl_assert(lk->heldBy == NULL
294 || (lk->heldBy != NULL && !lk->heldW));
295
296 stats__lockN_acquires++;
297
298 /* EXPOSITION only */
299 /* We need to keep recording snapshots of where the lock was
300 acquired, so as to produce better lock-order error messages. */
301 if (lk->acquired_at == NULL) {
302 ThreadId tid;
303 tl_assert(lk->heldBy == NULL);
304 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
305 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000306 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000307 } else {
308 tl_assert(lk->heldBy != NULL);
309 }
310 /* end EXPOSITION only */
311
312 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000313 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000314 } else {
315 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000316 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000317 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000318 }
319 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000320 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000321}
322
323/* Update 'lk' to reflect a release of it by 'thr'. This is done
324 strictly: only combinations resulting from correct program and
325 libpthread behaviour are allowed. */
326
327static void lockN_release ( Lock* lk, Thread* thr )
328{
329 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
331 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000332 /* lock must be held by someone */
333 tl_assert(lk->heldBy);
334 stats__lockN_releases++;
335 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000336 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000337 /* thr must actually have been a holder of lk */
338 tl_assert(b);
339 /* normalise */
340 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000341 if (VG_(isEmptyBag)(lk->heldBy)) {
342 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000343 lk->heldBy = NULL;
344 lk->heldW = False;
345 lk->acquired_at = NULL;
346 }
sewardjf98e1c02008-10-25 16:22:41 +0000347 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000348}
349
350static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
351{
352 Thread* thr;
353 if (!lk->heldBy) {
354 tl_assert(!lk->heldW);
355 return;
356 }
357 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000358 VG_(initIterBag)( lk->heldBy );
359 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000360 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000361 tl_assert(HG_(elemWS)( univ_lsets,
362 thr->locksetA, (Word)lk ));
363 thr->locksetA
364 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
365
366 if (lk->heldW) {
367 tl_assert(HG_(elemWS)( univ_lsets,
368 thr->locksetW, (Word)lk ));
369 thr->locksetW
370 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
371 }
372 }
sewardj896f6f92008-08-19 08:38:52 +0000373 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000374}
375
sewardjb4112022007-11-09 22:49:28 +0000376
377/*----------------------------------------------------------------*/
378/*--- Print out the primary data structures ---*/
379/*----------------------------------------------------------------*/
380
sewardjd52392d2008-11-08 20:36:26 +0000381//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000382
383#define PP_THREADS (1<<1)
384#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000385#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000386
387
388static const Int sHOW_ADMIN = 0;
389
390static void space ( Int n )
391{
392 Int i;
393 Char spaces[128+1];
394 tl_assert(n >= 0 && n < 128);
395 if (n == 0)
396 return;
397 for (i = 0; i < n; i++)
398 spaces[i] = ' ';
399 spaces[i] = 0;
400 tl_assert(i < 128+1);
401 VG_(printf)("%s", spaces);
402}
403
404static void pp_Thread ( Int d, Thread* t )
405{
406 space(d+0); VG_(printf)("Thread %p {\n", t);
407 if (sHOW_ADMIN) {
408 space(d+3); VG_(printf)("admin %p\n", t->admin);
409 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
410 }
411 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
412 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000413 space(d+0); VG_(printf)("}\n");
414}
415
416static void pp_admin_threads ( Int d )
417{
418 Int i, n;
419 Thread* t;
420 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
421 /* nothing */
422 }
423 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
424 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
425 if (0) {
426 space(n);
427 VG_(printf)("admin_threads record %d of %d:\n", i, n);
428 }
429 pp_Thread(d+3, t);
430 }
barta0b6b2c2008-07-07 06:49:24 +0000431 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000432}
433
434static void pp_map_threads ( Int d )
435{
njn4c245e52009-03-15 23:25:38 +0000436 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000437 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000438 for (i = 0; i < VG_N_THREADS; i++) {
439 if (map_threads[i] != NULL)
440 n++;
441 }
442 VG_(printf)("(%d entries) {\n", n);
443 for (i = 0; i < VG_N_THREADS; i++) {
444 if (map_threads[i] == NULL)
445 continue;
446 space(d+3);
447 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
448 }
449 space(d); VG_(printf)("}\n");
450}
451
452static const HChar* show_LockKind ( LockKind lkk ) {
453 switch (lkk) {
454 case LK_mbRec: return "mbRec";
455 case LK_nonRec: return "nonRec";
456 case LK_rdwr: return "rdwr";
457 default: tl_assert(0);
458 }
459}
460
461static void pp_Lock ( Int d, Lock* lk )
462{
barta0b6b2c2008-07-07 06:49:24 +0000463 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000464 if (sHOW_ADMIN) {
sewardj1d7c3322011-02-28 09:22:51 +0000465 space(d+3); VG_(printf)("admin_n %p\n", lk->admin_next);
466 space(d+3); VG_(printf)("admin_p %p\n", lk->admin_prev);
467 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
sewardjb4112022007-11-09 22:49:28 +0000468 }
469 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
470 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
471 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
472 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
473 if (lk->heldBy) {
474 Thread* thr;
475 Word count;
476 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000477 VG_(initIterBag)( lk->heldBy );
478 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000479 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000480 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000481 VG_(printf)("}");
482 }
483 VG_(printf)("\n");
484 space(d+0); VG_(printf)("}\n");
485}
486
487static void pp_admin_locks ( Int d )
488{
489 Int i, n;
490 Lock* lk;
sewardj1d7c3322011-02-28 09:22:51 +0000491 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000492 /* nothing */
493 }
494 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
sewardj1d7c3322011-02-28 09:22:51 +0000495 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000496 if (0) {
497 space(n);
498 VG_(printf)("admin_locks record %d of %d:\n", i, n);
499 }
500 pp_Lock(d+3, lk);
501 }
barta0b6b2c2008-07-07 06:49:24 +0000502 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000503}
504
505static void pp_map_locks ( Int d )
506{
507 void* gla;
508 Lock* lk;
509 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000510 (Int)VG_(sizeFM)( map_locks ));
511 VG_(initIterFM)( map_locks );
512 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000513 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000514 space(d+3);
515 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
516 }
sewardj896f6f92008-08-19 08:38:52 +0000517 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000518 space(d); VG_(printf)("}\n");
519}
520
sewardjb4112022007-11-09 22:49:28 +0000521static void pp_everything ( Int flags, Char* caller )
522{
523 Int d = 0;
524 VG_(printf)("\n");
525 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
526 if (flags & PP_THREADS) {
527 VG_(printf)("\n");
528 pp_admin_threads(d+3);
529 VG_(printf)("\n");
530 pp_map_threads(d+3);
531 }
532 if (flags & PP_LOCKS) {
533 VG_(printf)("\n");
534 pp_admin_locks(d+3);
535 VG_(printf)("\n");
536 pp_map_locks(d+3);
537 }
sewardjb4112022007-11-09 22:49:28 +0000538
539 VG_(printf)("\n");
540 VG_(printf)("}\n");
541 VG_(printf)("\n");
542}
543
544#undef SHOW_ADMIN
545
546
547/*----------------------------------------------------------------*/
548/*--- Initialise the primary data structures ---*/
549/*----------------------------------------------------------------*/
550
sewardjf98e1c02008-10-25 16:22:41 +0000551static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000552{
sewardjb4112022007-11-09 22:49:28 +0000553 Thread* thr;
554
555 /* Get everything initialised and zeroed. */
556 tl_assert(admin_threads == NULL);
557 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000558
559 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000560
561 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000562 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000563 tl_assert(map_threads != NULL);
564
sewardjb4112022007-11-09 22:49:28 +0000565 tl_assert(sizeof(Addr) == sizeof(Word));
566 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000567 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
568 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000569 tl_assert(map_locks != NULL);
570
571 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000572 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000573 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000574
575 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000576 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
577 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000578 tl_assert(univ_tsets != NULL);
579
580 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000581 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
582 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000583 tl_assert(univ_lsets != NULL);
584
585 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000586 if (HG_(clo_track_lockorders)) {
587 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
588 HG_(free), 24/*cacheSize*/ );
589 tl_assert(univ_laog != NULL);
590 }
sewardjb4112022007-11-09 22:49:28 +0000591
592 /* Set up entries for the root thread */
593 // FIXME: this assumes that the first real ThreadId is 1
594
sewardjb4112022007-11-09 22:49:28 +0000595 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000596 thr = mk_Thread(hbthr_root);
597 thr->coretid = 1; /* FIXME: hardwires an assumption about the
598 identity of the root thread. */
599 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
600 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000601
sewardjf98e1c02008-10-25 16:22:41 +0000602 /* and bind it in the thread-map table. */
603 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
604 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000605
sewardjf98e1c02008-10-25 16:22:41 +0000606 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000607
608 tl_assert(VG_INVALID_THREADID == 0);
609
610 /* Mark the new bus lock correctly (to stop the sanity checks
611 complaining) */
612 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000613
614 all__sanity_check("initialise_data_structures");
615}
616
617
618/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000619/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000620/*----------------------------------------------------------------*/
621
622/* Doesn't assert if the relevant map_threads entry is NULL. */
623static Thread* map_threads_maybe_lookup ( ThreadId coretid )
624{
625 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000626 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000627 thr = map_threads[coretid];
628 return thr;
629}
630
631/* Asserts if the relevant map_threads entry is NULL. */
632static inline Thread* map_threads_lookup ( ThreadId coretid )
633{
634 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000635 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000636 thr = map_threads[coretid];
637 tl_assert(thr);
638 return thr;
639}
640
sewardjf98e1c02008-10-25 16:22:41 +0000641/* Do a reverse lookup. Does not assert if 'thr' is not found in
642 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000643static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
644{
sewardjf98e1c02008-10-25 16:22:41 +0000645 ThreadId tid;
646 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000647 /* Check nobody used the invalid-threadid slot */
648 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
649 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000650 tid = thr->coretid;
651 tl_assert(HG_(is_sane_ThreadId)(tid));
652 return tid;
sewardjb4112022007-11-09 22:49:28 +0000653}
654
655/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
656 is not found in map_threads. */
657static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
658{
659 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
660 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000661 tl_assert(map_threads[tid]);
662 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000663 return tid;
664}
665
666static void map_threads_delete ( ThreadId coretid )
667{
668 Thread* thr;
669 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000670 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000671 thr = map_threads[coretid];
672 tl_assert(thr);
673 map_threads[coretid] = NULL;
674}
675
676
677/*----------------------------------------------------------------*/
678/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
679/*----------------------------------------------------------------*/
680
681/* Make sure there is a lock table entry for the given (lock) guest
682 address. If not, create one of the stated 'kind' in unheld state.
683 In any case, return the address of the existing or new Lock. */
684static
685Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
686{
687 Bool found;
688 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000689 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000690 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000691 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000692 if (!found) {
693 Lock* lock = mk_LockN(lkk, ga);
694 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000695 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000696 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000697 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000698 return lock;
699 } else {
700 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000701 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000702 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000703 return oldlock;
704 }
705}
706
707static Lock* map_locks_maybe_lookup ( Addr ga )
708{
709 Bool found;
710 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000711 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000712 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000713 return lk;
714}
715
716static void map_locks_delete ( Addr ga )
717{
718 Addr ga2 = 0;
719 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000720 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000721 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000722 /* delFromFM produces the val which is being deleted, if it is
723 found. So assert it is non-null; that in effect asserts that we
724 are deleting a (ga, Lock) pair which actually exists. */
725 tl_assert(lk != NULL);
726 tl_assert(ga2 == ga);
727}
728
729
sewardjb4112022007-11-09 22:49:28 +0000730
731/*----------------------------------------------------------------*/
732/*--- Sanity checking the data structures ---*/
733/*----------------------------------------------------------------*/
734
735static UWord stats__sanity_checks = 0;
736
sewardjb4112022007-11-09 22:49:28 +0000737static void laog__sanity_check ( Char* who ); /* fwds */
738
739/* REQUIRED INVARIANTS:
740
741 Thread vs Segment/Lock/SecMaps
742
743 for each t in Threads {
744
745 // Thread.lockset: each element is really a valid Lock
746
747 // Thread.lockset: each Lock in set is actually held by that thread
748 for lk in Thread.lockset
749 lk == LockedBy(t)
750
751 // Thread.csegid is a valid SegmentID
752 // and the associated Segment has .thr == t
753
754 }
755
756 all thread Locksets are pairwise empty under intersection
757 (that is, no lock is claimed to be held by more than one thread)
758 -- this is guaranteed if all locks in locksets point back to their
759 owner threads
760
761 Lock vs Thread/Segment/SecMaps
762
763 for each entry (gla, la) in map_locks
764 gla == la->guest_addr
765
766 for each lk in Locks {
767
768 lk->tag is valid
769 lk->guest_addr does not have shadow state NoAccess
770 if lk == LockedBy(t), then t->lockset contains lk
771 if lk == UnlockedBy(segid) then segid is valid SegmentID
772 and can be mapped to a valid Segment(seg)
773 and seg->thr->lockset does not contain lk
774 if lk == UnlockedNew then (no lockset contains lk)
775
776 secmaps for lk has .mbHasLocks == True
777
778 }
779
780 Segment vs Thread/Lock/SecMaps
781
782 the Segment graph is a dag (no cycles)
783 all of the Segment graph must be reachable from the segids
784 mentioned in the Threads
785
786 for seg in Segments {
787
788 seg->thr is a sane Thread
789
790 }
791
792 SecMaps vs Segment/Thread/Lock
793
794 for sm in SecMaps {
795
796 sm properly aligned
797 if any shadow word is ShR or ShM then .mbHasShared == True
798
799 for each Excl(segid) state
800 map_segments_lookup maps to a sane Segment(seg)
801 for each ShM/ShR(tsetid,lsetid) state
802 each lk in lset is a valid Lock
803 each thr in tset is a valid thread, which is non-dead
804
805 }
806*/
807
808
809/* Return True iff 'thr' holds 'lk' in some mode. */
810static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
811{
812 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000813 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000814 else
815 return False;
816}
817
818/* Sanity check Threads, as far as possible */
819__attribute__((noinline))
820static void threads__sanity_check ( Char* who )
821{
822#define BAD(_str) do { how = (_str); goto bad; } while (0)
823 Char* how = "no error";
824 Thread* thr;
825 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000826 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000827 Word ls_size, i;
828 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000829 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000830 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000831 wsA = thr->locksetA;
832 wsW = thr->locksetW;
833 // locks held in W mode are a subset of all locks held
834 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
835 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
836 for (i = 0; i < ls_size; i++) {
837 lk = (Lock*)ls_words[i];
838 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000839 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000840 // Thread.lockset: each Lock in set is actually held by that
841 // thread
842 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000843 }
844 }
845 return;
846 bad:
847 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
848 tl_assert(0);
849#undef BAD
850}
851
852
853/* Sanity check Locks, as far as possible */
854__attribute__((noinline))
855static void locks__sanity_check ( Char* who )
856{
857#define BAD(_str) do { how = (_str); goto bad; } while (0)
858 Char* how = "no error";
859 Addr gla;
860 Lock* lk;
861 Int i;
862 // # entries in admin_locks == # entries in map_locks
sewardj1d7c3322011-02-28 09:22:51 +0000863 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin_next)
sewardjb4112022007-11-09 22:49:28 +0000864 ;
sewardj896f6f92008-08-19 08:38:52 +0000865 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000866 // for each entry (gla, lk) in map_locks
867 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000868 VG_(initIterFM)( map_locks );
869 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000870 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000871 if (lk->guestaddr != gla) BAD("2");
872 }
sewardj896f6f92008-08-19 08:38:52 +0000873 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000874 // scan through admin_locks ...
sewardj1d7c3322011-02-28 09:22:51 +0000875 for (lk = admin_locks; lk; lk = lk->admin_next) {
sewardjb4112022007-11-09 22:49:28 +0000876 // lock is sane. Quite comprehensive, also checks that
877 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000878 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000879 // map_locks binds guest address back to this lock
880 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000881 // look at all threads mentioned as holders of this lock. Ensure
882 // this lock is mentioned in their locksets.
883 if (lk->heldBy) {
884 Thread* thr;
885 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000886 VG_(initIterBag)( lk->heldBy );
887 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000888 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000889 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000890 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000891 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000892 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
893 BAD("6");
894 // also check the w-only lockset
895 if (lk->heldW
896 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
897 BAD("7");
898 if ((!lk->heldW)
899 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
900 BAD("8");
901 }
sewardj896f6f92008-08-19 08:38:52 +0000902 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000903 } else {
904 /* lock not held by anybody */
905 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
906 // since lk is unheld, then (no lockset contains lk)
907 // hmm, this is really too expensive to check. Hmm.
908 }
sewardjb4112022007-11-09 22:49:28 +0000909 }
910
911 return;
912 bad:
913 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
914 tl_assert(0);
915#undef BAD
916}
917
918
sewardjb4112022007-11-09 22:49:28 +0000919static void all_except_Locks__sanity_check ( Char* who ) {
920 stats__sanity_checks++;
921 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
922 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000923 if (HG_(clo_track_lockorders))
924 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000925}
926static void all__sanity_check ( Char* who ) {
927 all_except_Locks__sanity_check(who);
928 locks__sanity_check(who);
929}
930
931
932/*----------------------------------------------------------------*/
933/*--- the core memory state machine (msm__* functions) ---*/
934/*----------------------------------------------------------------*/
935
sewardjd52392d2008-11-08 20:36:26 +0000936//static WordSetID add_BHL ( WordSetID lockset ) {
937// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
938//}
939//static WordSetID del_BHL ( WordSetID lockset ) {
940// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
941//}
sewardjb4112022007-11-09 22:49:28 +0000942
943
sewardjd52392d2008-11-08 20:36:26 +0000944///* Last-lock-lossage records. This mechanism exists to help explain
945// to programmers why we are complaining about a race. The idea is to
946// monitor all lockset transitions. When a previously nonempty
947// lockset becomes empty, the lock(s) that just disappeared (the
948// "lossage") are the locks that have consistently protected the
949// location (ga_of_access) in question for the longest time. Most of
950// the time the lossage-set is a single lock. Because the
951// lossage-lock is the one that has survived longest, there is there
952// is a good chance that it is indeed the lock that the programmer
953// intended to use to protect the location.
954//
955// Note that we cannot in general just look at the lossage set when we
956// see a transition to ShM(...,empty-set), because a transition to an
957// empty lockset can happen arbitrarily far before the point where we
958// want to report an error. This is in the case where there are many
959// transitions ShR -> ShR, all with an empty lockset, and only later
960// is there a transition to ShM. So what we want to do is note the
961// lossage lock at the point where a ShR -> ShR transition empties out
962// the lockset, so we can present it later if there should be a
963// transition to ShM.
964//
965// So this function finds such transitions. For each, it associates
966// in ga_to_lastlock, the guest address and the lossage lock. In fact
967// we do not record the Lock* directly as that may disappear later,
968// but instead the ExeContext inside the Lock which says where it was
969// initialised or first locked. ExeContexts are permanent so keeping
970// them indefinitely is safe.
971//
972// A boring detail: the hardware bus lock is not interesting in this
973// respect, so we first remove that from the pre/post locksets.
974//*/
975//
976//static UWord stats__ga_LL_adds = 0;
977//
978//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
979//
980//static
981//void record_last_lock_lossage ( Addr ga_of_access,
982// WordSetID lset_old, WordSetID lset_new )
983//{
984// Lock* lk;
985// Int card_old, card_new;
986//
987// tl_assert(lset_old != lset_new);
988//
989// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
990// (Int)lset_old,
991// HG_(cardinalityWS)(univ_lsets,lset_old),
992// (Int)lset_new,
993// HG_(cardinalityWS)(univ_lsets,lset_new),
994// ga_of_access );
995//
996// /* This is slow, but at least it's simple. The bus hardware lock
997// just confuses the logic, so remove it from the locksets we're
998// considering before doing anything else. */
999// lset_new = del_BHL( lset_new );
1000//
1001// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
1002// /* The post-transition lock set is not empty. So we are not
1003// interested. We're only interested in spotting transitions
1004// that make locksets become empty. */
1005// return;
1006// }
1007//
1008// /* lset_new is now empty */
1009// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
1010// tl_assert(card_new == 0);
1011//
1012// lset_old = del_BHL( lset_old );
1013// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
1014//
1015// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
1016// (Int)lset_old, card_old, (Int)lset_new, card_new );
1017//
1018// if (card_old == 0) {
1019// /* The old lockset was also empty. Not interesting. */
1020// return;
1021// }
1022//
1023// tl_assert(card_old > 0);
1024// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1025//
1026// /* Now we know we've got a transition from a nonempty lockset to an
1027// empty one. So lset_old must be the set of locks lost. Record
1028// some details. If there is more than one element in the lossage
1029// set, just choose one arbitrarily -- not the best, but at least
1030// it's simple. */
1031//
1032// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1033// if (0) VG_(printf)("lossage %ld %p\n",
1034// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1035// if (lk->appeared_at) {
1036// if (ga_to_lastlock == NULL)
1037// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1038// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1039// stats__ga_LL_adds++;
1040// }
1041//}
1042//
1043///* This queries the table (ga_to_lastlock) made by
1044// record_last_lock_lossage, when constructing error messages. It
1045// attempts to find the ExeContext of the allocation or initialisation
1046// point for the lossage lock associated with 'ga'. */
1047//
1048//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1049//{
1050// ExeContext* ec_hint = NULL;
1051// if (ga_to_lastlock != NULL
1052// && VG_(lookupFM)(ga_to_lastlock,
1053// NULL, (Word*)&ec_hint, ga)) {
1054// tl_assert(ec_hint != NULL);
1055// return ec_hint;
1056// } else {
1057// return NULL;
1058// }
1059//}
sewardjb4112022007-11-09 22:49:28 +00001060
1061
sewardjb4112022007-11-09 22:49:28 +00001062/*----------------------------------------------------------------*/
1063/*--- Shadow value and address range handlers ---*/
1064/*----------------------------------------------------------------*/
1065
1066static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001067//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001068static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001069__attribute__((noinline))
1070static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001071
sewardjb4112022007-11-09 22:49:28 +00001072
1073/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001074/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1075 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1076static void shadow_mem_scopy_range ( Thread* thr,
1077 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001078{
1079 Thr* hbthr = thr->hbthr;
1080 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001081 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001082}
1083
sewardj23f12002009-07-24 08:45:08 +00001084static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1085{
sewardjf98e1c02008-10-25 16:22:41 +00001086 Thr* hbthr = thr->hbthr;
1087 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001088 LIBHB_CREAD_N(hbthr, a, len);
1089}
1090
1091static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1092 Thr* hbthr = thr->hbthr;
1093 tl_assert(hbthr);
1094 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001095}
1096
1097static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1098{
sewardj23f12002009-07-24 08:45:08 +00001099 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001100}
1101
sewardjb4112022007-11-09 22:49:28 +00001102static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1103{
sewardjb4112022007-11-09 22:49:28 +00001104 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001105 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001106 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001107}
1108
sewardj406bac82010-03-03 23:03:40 +00001109static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1110{
1111 if (0 && len > 500)
1112 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1113 libhb_srange_untrack( thr->hbthr, aIN, len );
1114}
1115
sewardjb4112022007-11-09 22:49:28 +00001116
1117/*----------------------------------------------------------------*/
1118/*--- Event handlers (evh__* functions) ---*/
1119/*--- plus helpers (evhH__* functions) ---*/
1120/*----------------------------------------------------------------*/
1121
1122/*--------- Event handler helpers (evhH__* functions) ---------*/
1123
1124/* Create a new segment for 'thr', making it depend (.prev) on its
1125 existing segment, bind together the SegmentID and Segment, and
1126 return both of them. Also update 'thr' so it references the new
1127 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001128//zz static
1129//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1130//zz /*OUT*/Segment** new_segP,
1131//zz Thread* thr )
1132//zz {
1133//zz Segment* cur_seg;
1134//zz tl_assert(new_segP);
1135//zz tl_assert(new_segidP);
1136//zz tl_assert(HG_(is_sane_Thread)(thr));
1137//zz cur_seg = map_segments_lookup( thr->csegid );
1138//zz tl_assert(cur_seg);
1139//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1140//zz at their owner thread. */
1141//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1142//zz *new_segidP = alloc_SegmentID();
1143//zz map_segments_add( *new_segidP, *new_segP );
1144//zz thr->csegid = *new_segidP;
1145//zz }
sewardjb4112022007-11-09 22:49:28 +00001146
1147
1148/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1149 updates, and also do all possible error checks. */
1150static
1151void evhH__post_thread_w_acquires_lock ( Thread* thr,
1152 LockKind lkk, Addr lock_ga )
1153{
1154 Lock* lk;
1155
1156 /* Basically what we need to do is call lockN_acquire_writer.
1157 However, that will barf if any 'invalid' lock states would
1158 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001159 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001160 routine.
1161
1162 Because this routine is only called after successful lock
1163 acquisition, we should not be asked to move the lock into any
1164 invalid states. Requests to do so are bugs in libpthread, since
1165 that should have rejected any such requests. */
1166
sewardjf98e1c02008-10-25 16:22:41 +00001167 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001168 /* Try to find the lock. If we can't, then create a new one with
1169 kind 'lkk'. */
1170 lk = map_locks_lookup_or_create(
1171 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001172 tl_assert( HG_(is_sane_LockN)(lk) );
1173
1174 /* check libhb level entities exist */
1175 tl_assert(thr->hbthr);
1176 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001177
1178 if (lk->heldBy == NULL) {
1179 /* the lock isn't held. Simple. */
1180 tl_assert(!lk->heldW);
1181 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001182 /* acquire a dependency from the lock's VCs */
1183 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001184 goto noerror;
1185 }
1186
1187 /* So the lock is already held. If held as a r-lock then
1188 libpthread must be buggy. */
1189 tl_assert(lk->heldBy);
1190 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001191 HG_(record_error_Misc)(
1192 thr, "Bug in libpthread: write lock "
1193 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001194 goto error;
1195 }
1196
1197 /* So the lock is held in w-mode. If it's held by some other
1198 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001199 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001200
sewardj896f6f92008-08-19 08:38:52 +00001201 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001202 HG_(record_error_Misc)(
1203 thr, "Bug in libpthread: write lock "
1204 "granted on mutex/rwlock which is currently "
1205 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001206 goto error;
1207 }
1208
1209 /* So the lock is already held in w-mode by 'thr'. That means this
1210 is an attempt to lock it recursively, which is only allowable
1211 for LK_mbRec kinded locks. Since this routine is called only
1212 once the lock has been acquired, this must also be a libpthread
1213 bug. */
1214 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001215 HG_(record_error_Misc)(
1216 thr, "Bug in libpthread: recursive write lock "
1217 "granted on mutex/wrlock which does not "
1218 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001219 goto error;
1220 }
1221
1222 /* So we are recursively re-locking a lock we already w-hold. */
1223 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001224 /* acquire a dependency from the lock's VC. Probably pointless,
1225 but also harmless. */
1226 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001227 goto noerror;
1228
1229 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001230 if (HG_(clo_track_lockorders)) {
1231 /* check lock order acquisition graph, and update. This has to
1232 happen before the lock is added to the thread's locksetA/W. */
1233 laog__pre_thread_acquires_lock( thr, lk );
1234 }
sewardjb4112022007-11-09 22:49:28 +00001235 /* update the thread's held-locks set */
1236 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1237 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1238 /* fall through */
1239
1240 error:
sewardjf98e1c02008-10-25 16:22:41 +00001241 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001242}
1243
1244
1245/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1246 updates, and also do all possible error checks. */
1247static
1248void evhH__post_thread_r_acquires_lock ( Thread* thr,
1249 LockKind lkk, Addr lock_ga )
1250{
1251 Lock* lk;
1252
1253 /* Basically what we need to do is call lockN_acquire_reader.
1254 However, that will barf if any 'invalid' lock states would
1255 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001256 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001257 routine.
1258
1259 Because this routine is only called after successful lock
1260 acquisition, we should not be asked to move the lock into any
1261 invalid states. Requests to do so are bugs in libpthread, since
1262 that should have rejected any such requests. */
1263
sewardjf98e1c02008-10-25 16:22:41 +00001264 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001265 /* Try to find the lock. If we can't, then create a new one with
1266 kind 'lkk'. Only a reader-writer lock can be read-locked,
1267 hence the first assertion. */
1268 tl_assert(lkk == LK_rdwr);
1269 lk = map_locks_lookup_or_create(
1270 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001271 tl_assert( HG_(is_sane_LockN)(lk) );
1272
1273 /* check libhb level entities exist */
1274 tl_assert(thr->hbthr);
1275 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001276
1277 if (lk->heldBy == NULL) {
1278 /* the lock isn't held. Simple. */
1279 tl_assert(!lk->heldW);
1280 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001281 /* acquire a dependency from the lock's VC */
1282 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001283 goto noerror;
1284 }
1285
1286 /* So the lock is already held. If held as a w-lock then
1287 libpthread must be buggy. */
1288 tl_assert(lk->heldBy);
1289 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001290 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1291 "granted on rwlock which is "
1292 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001293 goto error;
1294 }
1295
1296 /* Easy enough. In short anybody can get a read-lock on a rwlock
1297 provided it is either unlocked or already in rd-held. */
1298 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001299 /* acquire a dependency from the lock's VC. Probably pointless,
1300 but also harmless. */
1301 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001302 goto noerror;
1303
1304 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001305 if (HG_(clo_track_lockorders)) {
1306 /* check lock order acquisition graph, and update. This has to
1307 happen before the lock is added to the thread's locksetA/W. */
1308 laog__pre_thread_acquires_lock( thr, lk );
1309 }
sewardjb4112022007-11-09 22:49:28 +00001310 /* update the thread's held-locks set */
1311 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1312 /* but don't update thr->locksetW, since lk is only rd-held */
1313 /* fall through */
1314
1315 error:
sewardjf98e1c02008-10-25 16:22:41 +00001316 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001317}
1318
1319
1320/* The lock at 'lock_ga' is just about to be unlocked. Make all
1321 necessary updates, and also do all possible error checks. */
1322static
1323void evhH__pre_thread_releases_lock ( Thread* thr,
1324 Addr lock_ga, Bool isRDWR )
1325{
1326 Lock* lock;
1327 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001328 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001329
1330 /* This routine is called prior to a lock release, before
1331 libpthread has had a chance to validate the call. Hence we need
1332 to detect and reject any attempts to move the lock into an
1333 invalid state. Such attempts are bugs in the client.
1334
1335 isRDWR is True if we know from the wrapper context that lock_ga
1336 should refer to a reader-writer lock, and is False if [ditto]
1337 lock_ga should refer to a standard mutex. */
1338
sewardjf98e1c02008-10-25 16:22:41 +00001339 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001340 lock = map_locks_maybe_lookup( lock_ga );
1341
1342 if (!lock) {
1343 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1344 the client is trying to unlock it. So complain, then ignore
1345 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001346 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001347 return;
1348 }
1349
1350 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001351 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001352
1353 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001354 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1355 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001356 }
1357 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001358 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1359 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001360 }
1361
1362 if (!lock->heldBy) {
1363 /* The lock is not held. This indicates a serious bug in the
1364 client. */
1365 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001366 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001367 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1368 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1369 goto error;
1370 }
1371
sewardjf98e1c02008-10-25 16:22:41 +00001372 /* test just above dominates */
1373 tl_assert(lock->heldBy);
1374 was_heldW = lock->heldW;
1375
sewardjb4112022007-11-09 22:49:28 +00001376 /* The lock is held. Is this thread one of the holders? If not,
1377 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001378 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001379 tl_assert(n >= 0);
1380 if (n == 0) {
1381 /* We are not a current holder of the lock. This is a bug in
1382 the guest, and (per POSIX pthread rules) the unlock
1383 attempt will fail. So just complain and do nothing
1384 else. */
sewardj896f6f92008-08-19 08:38:52 +00001385 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001386 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001387 tl_assert(realOwner != thr);
1388 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1389 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001390 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001391 goto error;
1392 }
1393
1394 /* Ok, we hold the lock 'n' times. */
1395 tl_assert(n >= 1);
1396
1397 lockN_release( lock, thr );
1398
1399 n--;
1400 tl_assert(n >= 0);
1401
1402 if (n > 0) {
1403 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001404 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001405 /* We still hold the lock. So either it's a recursive lock
1406 or a rwlock which is currently r-held. */
1407 tl_assert(lock->kind == LK_mbRec
1408 || (lock->kind == LK_rdwr && !lock->heldW));
1409 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1410 if (lock->heldW)
1411 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1412 else
1413 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1414 } else {
sewardj983f3022009-05-21 14:49:55 +00001415 /* n is zero. This means we don't hold the lock any more. But
1416 if it's a rwlock held in r-mode, someone else could still
1417 hold it. Just do whatever sanity checks we can. */
1418 if (lock->kind == LK_rdwr && lock->heldBy) {
1419 /* It's a rwlock. We no longer hold it but we used to;
1420 nevertheless it still appears to be held by someone else.
1421 The implication is that, prior to this release, it must
1422 have been shared by us and and whoever else is holding it;
1423 which in turn implies it must be r-held, since a lock
1424 can't be w-held by more than one thread. */
1425 /* The lock is now R-held by somebody else: */
1426 tl_assert(lock->heldW == False);
1427 } else {
1428 /* Normal case. It's either not a rwlock, or it's a rwlock
1429 that we used to hold in w-mode (which is pretty much the
1430 same thing as a non-rwlock.) Since this transaction is
1431 atomic (V does not allow multiple threads to run
1432 simultaneously), it must mean the lock is now not held by
1433 anybody. Hence assert for it. */
1434 /* The lock is now not held by anybody: */
1435 tl_assert(!lock->heldBy);
1436 tl_assert(lock->heldW == False);
1437 }
sewardjf98e1c02008-10-25 16:22:41 +00001438 //if (lock->heldBy) {
1439 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1440 //}
sewardjb4112022007-11-09 22:49:28 +00001441 /* update this thread's lockset accordingly. */
1442 thr->locksetA
1443 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1444 thr->locksetW
1445 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001446 /* push our VC into the lock */
1447 tl_assert(thr->hbthr);
1448 tl_assert(lock->hbso);
1449 /* If the lock was previously W-held, then we want to do a
1450 strong send, and if previously R-held, then a weak send. */
1451 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001452 }
1453 /* fall through */
1454
1455 error:
sewardjf98e1c02008-10-25 16:22:41 +00001456 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001457}
1458
1459
sewardj9f569b72008-11-13 13:33:09 +00001460/* ---------------------------------------------------------- */
1461/* -------- Event handlers proper (evh__* functions) -------- */
1462/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001463
1464/* What is the Thread* for the currently running thread? This is
1465 absolutely performance critical. We receive notifications from the
1466 core for client code starts/stops, and cache the looked-up result
1467 in 'current_Thread'. Hence, for the vast majority of requests,
1468 finding the current thread reduces to a read of a global variable,
1469 provided get_current_Thread_in_C_C is inlined.
1470
1471 Outside of client code, current_Thread is NULL, and presumably
1472 any uses of it will cause a segfault. Hence:
1473
1474 - for uses definitely within client code, use
1475 get_current_Thread_in_C_C.
1476
1477 - for all other uses, use get_current_Thread.
1478*/
1479
sewardj23f12002009-07-24 08:45:08 +00001480static Thread *current_Thread = NULL,
1481 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001482
1483static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1484 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1485 tl_assert(current_Thread == NULL);
1486 current_Thread = map_threads_lookup( tid );
1487 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001488 if (current_Thread != current_Thread_prev) {
1489 libhb_Thr_resumes( current_Thread->hbthr );
1490 current_Thread_prev = current_Thread;
1491 }
sewardjb4112022007-11-09 22:49:28 +00001492}
1493static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1494 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1495 tl_assert(current_Thread != NULL);
1496 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001497 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001498}
1499static inline Thread* get_current_Thread_in_C_C ( void ) {
1500 return current_Thread;
1501}
1502static inline Thread* get_current_Thread ( void ) {
1503 ThreadId coretid;
1504 Thread* thr;
1505 thr = get_current_Thread_in_C_C();
1506 if (LIKELY(thr))
1507 return thr;
1508 /* evidently not in client code. Do it the slow way. */
1509 coretid = VG_(get_running_tid)();
1510 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001511 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001512 of initial memory layout) and VG_(get_running_tid)() returns
1513 VG_INVALID_THREADID at that point. */
1514 if (coretid == VG_INVALID_THREADID)
1515 coretid = 1; /* KLUDGE */
1516 thr = map_threads_lookup( coretid );
1517 return thr;
1518}
1519
1520static
1521void evh__new_mem ( Addr a, SizeT len ) {
1522 if (SHOW_EVENTS >= 2)
1523 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1524 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001525 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001526 all__sanity_check("evh__new_mem-post");
1527}
1528
1529static
sewardj1f77fec2010-04-12 19:51:04 +00001530void evh__new_mem_stack ( Addr a, SizeT len ) {
1531 if (SHOW_EVENTS >= 2)
1532 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1533 shadow_mem_make_New( get_current_Thread(),
1534 -VG_STACK_REDZONE_SZB + a, len );
1535 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1536 all__sanity_check("evh__new_mem_stack-post");
1537}
1538
1539static
sewardj7cf4e6b2008-05-01 20:24:26 +00001540void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1541 if (SHOW_EVENTS >= 2)
1542 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1543 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001544 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001545 all__sanity_check("evh__new_mem_w_tid-post");
1546}
1547
1548static
sewardjb4112022007-11-09 22:49:28 +00001549void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001550 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001551 if (SHOW_EVENTS >= 1)
1552 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1553 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1554 if (rr || ww || xx)
1555 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001556 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001557 all__sanity_check("evh__new_mem_w_perms-post");
1558}
1559
1560static
1561void evh__set_perms ( Addr a, SizeT len,
1562 Bool rr, Bool ww, Bool xx ) {
1563 if (SHOW_EVENTS >= 1)
1564 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1565 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1566 /* Hmm. What should we do here, that actually makes any sense?
1567 Let's say: if neither readable nor writable, then declare it
1568 NoAccess, else leave it alone. */
1569 if (!(rr || ww))
1570 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001571 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001572 all__sanity_check("evh__set_perms-post");
1573}
1574
1575static
1576void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001577 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001578 if (SHOW_EVENTS >= 2)
1579 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1580 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001581 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001582 all__sanity_check("evh__die_mem-post");
1583}
1584
1585static
sewardj406bac82010-03-03 23:03:40 +00001586void evh__untrack_mem ( Addr a, SizeT len ) {
1587 // whereas it doesn't ignore this
1588 if (SHOW_EVENTS >= 2)
1589 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1590 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1591 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1592 all__sanity_check("evh__untrack_mem-post");
1593}
1594
1595static
sewardj23f12002009-07-24 08:45:08 +00001596void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1597 if (SHOW_EVENTS >= 2)
1598 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1599 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1600 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1601 all__sanity_check("evh__copy_mem-post");
1602}
1603
1604static
sewardjb4112022007-11-09 22:49:28 +00001605void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1606{
1607 if (SHOW_EVENTS >= 1)
1608 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1609 (Int)parent, (Int)child );
1610
1611 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001612 Thread* thr_p;
1613 Thread* thr_c;
1614 Thr* hbthr_p;
1615 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001616
sewardjf98e1c02008-10-25 16:22:41 +00001617 tl_assert(HG_(is_sane_ThreadId)(parent));
1618 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001619 tl_assert(parent != child);
1620
1621 thr_p = map_threads_maybe_lookup( parent );
1622 thr_c = map_threads_maybe_lookup( child );
1623
1624 tl_assert(thr_p != NULL);
1625 tl_assert(thr_c == NULL);
1626
sewardjf98e1c02008-10-25 16:22:41 +00001627 hbthr_p = thr_p->hbthr;
1628 tl_assert(hbthr_p != NULL);
1629 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001630
sewardjf98e1c02008-10-25 16:22:41 +00001631 hbthr_c = libhb_create ( hbthr_p );
1632
1633 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001634 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001635 thr_c = mk_Thread( hbthr_c );
1636 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1637 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001638
1639 /* and bind it in the thread-map table */
1640 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001641 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1642 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001643
1644 /* Record where the parent is so we can later refer to this in
1645 error messages.
1646
1647 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1648 The stack snapshot is taken immediately after the parent has
1649 returned from its sys_clone call. Unfortunately there is no
1650 unwind info for the insn following "syscall" - reading the
1651 glibc sources confirms this. So we ask for a snapshot to be
1652 taken as if RIP was 3 bytes earlier, in a place where there
1653 is unwind info. Sigh.
1654 */
1655 { Word first_ip_delta = 0;
1656# if defined(VGP_amd64_linux)
1657 first_ip_delta = -3;
1658# endif
1659 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1660 }
sewardjb4112022007-11-09 22:49:28 +00001661 }
1662
sewardjf98e1c02008-10-25 16:22:41 +00001663 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001664 all__sanity_check("evh__pre_thread_create-post");
1665}
1666
1667static
1668void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1669{
1670 Int nHeld;
1671 Thread* thr_q;
1672 if (SHOW_EVENTS >= 1)
1673 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1674 (Int)quit_tid );
1675
1676 /* quit_tid has disappeared without joining to any other thread.
1677 Therefore there is no synchronisation event associated with its
1678 exit and so we have to pretty much treat it as if it was still
1679 alive but mysteriously making no progress. That is because, if
1680 we don't know when it really exited, then we can never say there
1681 is a point in time when we're sure the thread really has
1682 finished, and so we need to consider the possibility that it
1683 lingers indefinitely and continues to interact with other
1684 threads. */
1685 /* However, it might have rendezvous'd with a thread that called
1686 pthread_join with this one as arg, prior to this point (that's
1687 how NPTL works). In which case there has already been a prior
1688 sync event. So in any case, just let the thread exit. On NPTL,
1689 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001690 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001691 thr_q = map_threads_maybe_lookup( quit_tid );
1692 tl_assert(thr_q != NULL);
1693
1694 /* Complain if this thread holds any locks. */
1695 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1696 tl_assert(nHeld >= 0);
1697 if (nHeld > 0) {
1698 HChar buf[80];
1699 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1700 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001701 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001702 }
1703
sewardj23f12002009-07-24 08:45:08 +00001704 /* Not much to do here:
1705 - tell libhb the thread is gone
1706 - clear the map_threads entry, in order that the Valgrind core
1707 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001708 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1709 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001710 tl_assert(thr_q->hbthr);
1711 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001712 tl_assert(thr_q->coretid == quit_tid);
1713 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001714 map_threads_delete( quit_tid );
1715
sewardjf98e1c02008-10-25 16:22:41 +00001716 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001717 all__sanity_check("evh__pre_thread_ll_exit-post");
1718}
1719
sewardj61bc2c52011-02-09 10:34:00 +00001720/* This is called immediately after fork, for the child only. 'tid'
1721 is the only surviving thread (as per POSIX rules on fork() in
1722 threaded programs), so we have to clean up map_threads to remove
1723 entries for any other threads. */
1724static
1725void evh__atfork_child ( ThreadId tid )
1726{
1727 UInt i;
1728 Thread* thr;
1729 /* Slot 0 should never be used. */
1730 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1731 tl_assert(!thr);
1732 /* Clean up all other slots except 'tid'. */
1733 for (i = 1; i < VG_N_THREADS; i++) {
1734 if (i == tid)
1735 continue;
1736 thr = map_threads_maybe_lookup(i);
1737 if (!thr)
1738 continue;
1739 /* Cleanup actions (next 5 lines) copied from end of
1740 evh__pre_thread_ll_exit; keep in sync. */
1741 tl_assert(thr->hbthr);
1742 libhb_async_exit(thr->hbthr);
1743 tl_assert(thr->coretid == i);
1744 thr->coretid = VG_INVALID_THREADID;
1745 map_threads_delete(i);
1746 }
1747}
1748
sewardjf98e1c02008-10-25 16:22:41 +00001749
sewardjb4112022007-11-09 22:49:28 +00001750static
1751void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1752{
sewardjb4112022007-11-09 22:49:28 +00001753 Thread* thr_s;
1754 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001755 Thr* hbthr_s;
1756 Thr* hbthr_q;
1757 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001758
1759 if (SHOW_EVENTS >= 1)
1760 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1761 (Int)stay_tid, quit_thr );
1762
sewardjf98e1c02008-10-25 16:22:41 +00001763 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001764
1765 thr_s = map_threads_maybe_lookup( stay_tid );
1766 thr_q = quit_thr;
1767 tl_assert(thr_s != NULL);
1768 tl_assert(thr_q != NULL);
1769 tl_assert(thr_s != thr_q);
1770
sewardjf98e1c02008-10-25 16:22:41 +00001771 hbthr_s = thr_s->hbthr;
1772 hbthr_q = thr_q->hbthr;
1773 tl_assert(hbthr_s != hbthr_q);
1774 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1775 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001776
sewardjf98e1c02008-10-25 16:22:41 +00001777 /* Allocate a temporary synchronisation object and use it to send
1778 an imaginary message from the quitter to the stayer, the purpose
1779 being to generate a dependence from the quitter to the
1780 stayer. */
1781 so = libhb_so_alloc();
1782 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001783 /* Send last arg of _so_send as False, since the sending thread
1784 doesn't actually exist any more, so we don't want _so_send to
1785 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001786 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1787 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1788 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001789
sewardjf98e1c02008-10-25 16:22:41 +00001790 /* evh__pre_thread_ll_exit issues an error message if the exiting
1791 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001792
1793 /* This holds because, at least when using NPTL as the thread
1794 library, we should be notified the low level thread exit before
1795 we hear of any join event on it. The low level exit
1796 notification feeds through into evh__pre_thread_ll_exit,
1797 which should clear the map_threads entry for it. Hence we
1798 expect there to be no map_threads entry at this point. */
1799 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1800 == VG_INVALID_THREADID);
1801
sewardjf98e1c02008-10-25 16:22:41 +00001802 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001803 all__sanity_check("evh__post_thread_join-post");
1804}
1805
1806static
1807void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1808 Addr a, SizeT size) {
1809 if (SHOW_EVENTS >= 2
1810 || (SHOW_EVENTS >= 1 && size != 1))
1811 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1812 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001813 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001814 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001815 all__sanity_check("evh__pre_mem_read-post");
1816}
1817
1818static
1819void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1820 Char* s, Addr a ) {
1821 Int len;
1822 if (SHOW_EVENTS >= 1)
1823 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1824 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001825 // Don't segfault if the string starts in an obviously stupid
1826 // place. Actually we should check the whole string, not just
1827 // the start address, but that's too much trouble. At least
1828 // checking the first byte is better than nothing. See #255009.
1829 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1830 return;
sewardjb4112022007-11-09 22:49:28 +00001831 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001832 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001833 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001834 all__sanity_check("evh__pre_mem_read_asciiz-post");
1835}
1836
1837static
1838void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1839 Addr a, SizeT size ) {
1840 if (SHOW_EVENTS >= 1)
1841 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1842 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001843 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001844 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001845 all__sanity_check("evh__pre_mem_write-post");
1846}
1847
1848static
1849void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1850 if (SHOW_EVENTS >= 1)
1851 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1852 (void*)a, len, (Int)is_inited );
1853 // FIXME: this is kinda stupid
1854 if (is_inited) {
1855 shadow_mem_make_New(get_current_Thread(), a, len);
1856 } else {
1857 shadow_mem_make_New(get_current_Thread(), a, len);
1858 }
sewardjf98e1c02008-10-25 16:22:41 +00001859 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001860 all__sanity_check("evh__pre_mem_read-post");
1861}
1862
1863static
1864void evh__die_mem_heap ( Addr a, SizeT len ) {
1865 if (SHOW_EVENTS >= 1)
1866 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1867 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001868 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001869 all__sanity_check("evh__pre_mem_read-post");
1870}
1871
sewardj23f12002009-07-24 08:45:08 +00001872/* --- Event handlers called from generated code --- */
1873
sewardjb4112022007-11-09 22:49:28 +00001874static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001875void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001876 Thread* thr = get_current_Thread_in_C_C();
1877 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001878 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001879}
sewardjf98e1c02008-10-25 16:22:41 +00001880
sewardjb4112022007-11-09 22:49:28 +00001881static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001882void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001883 Thread* thr = get_current_Thread_in_C_C();
1884 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001885 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001886}
sewardjf98e1c02008-10-25 16:22:41 +00001887
sewardjb4112022007-11-09 22:49:28 +00001888static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001889void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001890 Thread* thr = get_current_Thread_in_C_C();
1891 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001892 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001893}
sewardjf98e1c02008-10-25 16:22:41 +00001894
sewardjb4112022007-11-09 22:49:28 +00001895static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001896void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001897 Thread* thr = get_current_Thread_in_C_C();
1898 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001899 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001900}
sewardjf98e1c02008-10-25 16:22:41 +00001901
sewardjb4112022007-11-09 22:49:28 +00001902static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001903void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001904 Thread* thr = get_current_Thread_in_C_C();
1905 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001906 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001907}
1908
1909static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001910void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001911 Thread* thr = get_current_Thread_in_C_C();
1912 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001913 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001914}
sewardjf98e1c02008-10-25 16:22:41 +00001915
sewardjb4112022007-11-09 22:49:28 +00001916static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001917void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001918 Thread* thr = get_current_Thread_in_C_C();
1919 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001920 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001921}
sewardjf98e1c02008-10-25 16:22:41 +00001922
sewardjb4112022007-11-09 22:49:28 +00001923static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001924void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001925 Thread* thr = get_current_Thread_in_C_C();
1926 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001927 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001928}
sewardjf98e1c02008-10-25 16:22:41 +00001929
sewardjb4112022007-11-09 22:49:28 +00001930static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001931void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001932 Thread* thr = get_current_Thread_in_C_C();
1933 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001934 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001935}
sewardjf98e1c02008-10-25 16:22:41 +00001936
sewardjb4112022007-11-09 22:49:28 +00001937static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001938void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001939 Thread* thr = get_current_Thread_in_C_C();
1940 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001941 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001942}
1943
sewardjb4112022007-11-09 22:49:28 +00001944
sewardj9f569b72008-11-13 13:33:09 +00001945/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001946/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001947/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001948
1949/* EXPOSITION only: by intercepting lock init events we can show the
1950 user where the lock was initialised, rather than only being able to
1951 show where it was first locked. Intercepting lock initialisations
1952 is not necessary for the basic operation of the race checker. */
1953static
1954void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1955 void* mutex, Word mbRec )
1956{
1957 if (SHOW_EVENTS >= 1)
1958 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1959 (Int)tid, mbRec, (void*)mutex );
1960 tl_assert(mbRec == 0 || mbRec == 1);
1961 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1962 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001963 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001964 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1965}
1966
1967static
1968void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1969{
1970 Thread* thr;
1971 Lock* lk;
1972 if (SHOW_EVENTS >= 1)
1973 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1974 (Int)tid, (void*)mutex );
1975
1976 thr = map_threads_maybe_lookup( tid );
1977 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001978 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001979
1980 lk = map_locks_maybe_lookup( (Addr)mutex );
1981
1982 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001983 HG_(record_error_Misc)(
1984 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001985 }
1986
1987 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001988 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001989 tl_assert( lk->guestaddr == (Addr)mutex );
1990 if (lk->heldBy) {
1991 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001992 HG_(record_error_Misc)(
1993 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001994 /* remove lock from locksets of all owning threads */
1995 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001996 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001997 lk->heldBy = NULL;
1998 lk->heldW = False;
1999 lk->acquired_at = NULL;
2000 }
2001 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002002 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002003
2004 if (HG_(clo_track_lockorders))
2005 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002006 map_locks_delete( lk->guestaddr );
2007 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002008 }
2009
sewardjf98e1c02008-10-25 16:22:41 +00002010 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002011 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
2012}
2013
2014static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
2015 void* mutex, Word isTryLock )
2016{
2017 /* Just check the mutex is sane; nothing else to do. */
2018 // 'mutex' may be invalid - not checked by wrapper
2019 Thread* thr;
2020 Lock* lk;
2021 if (SHOW_EVENTS >= 1)
2022 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2023 (Int)tid, (void*)mutex );
2024
2025 tl_assert(isTryLock == 0 || isTryLock == 1);
2026 thr = map_threads_maybe_lookup( tid );
2027 tl_assert(thr); /* cannot fail - Thread* must already exist */
2028
2029 lk = map_locks_maybe_lookup( (Addr)mutex );
2030
2031 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002032 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2033 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002034 }
2035
2036 if ( lk
2037 && isTryLock == 0
2038 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2039 && lk->heldBy
2040 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00002041 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002042 /* uh, it's a non-recursive lock and we already w-hold it, and
2043 this is a real lock operation (not a speculative "tryLock"
2044 kind of thing). Duh. Deadlock coming up; but at least
2045 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00002046 HChar* errstr = "Attempt to re-lock a "
2047 "non-recursive lock I already hold";
2048 HChar* auxstr = "Lock was previously acquired";
2049 if (lk->acquired_at) {
2050 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2051 } else {
2052 HG_(record_error_Misc)( thr, errstr );
2053 }
sewardjb4112022007-11-09 22:49:28 +00002054 }
2055}
2056
2057static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2058{
2059 // only called if the real library call succeeded - so mutex is sane
2060 Thread* thr;
2061 if (SHOW_EVENTS >= 1)
2062 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2063 (Int)tid, (void*)mutex );
2064
2065 thr = map_threads_maybe_lookup( tid );
2066 tl_assert(thr); /* cannot fail - Thread* must already exist */
2067
2068 evhH__post_thread_w_acquires_lock(
2069 thr,
2070 LK_mbRec, /* if not known, create new lock with this LockKind */
2071 (Addr)mutex
2072 );
2073}
2074
2075static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2076{
2077 // 'mutex' may be invalid - not checked by wrapper
2078 Thread* thr;
2079 if (SHOW_EVENTS >= 1)
2080 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2081 (Int)tid, (void*)mutex );
2082
2083 thr = map_threads_maybe_lookup( tid );
2084 tl_assert(thr); /* cannot fail - Thread* must already exist */
2085
2086 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2087}
2088
2089static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2090{
2091 // only called if the real library call succeeded - so mutex is sane
2092 Thread* thr;
2093 if (SHOW_EVENTS >= 1)
2094 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2095 (Int)tid, (void*)mutex );
2096 thr = map_threads_maybe_lookup( tid );
2097 tl_assert(thr); /* cannot fail - Thread* must already exist */
2098
2099 // anything we should do here?
2100}
2101
2102
sewardj5a644da2009-08-11 10:35:58 +00002103/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002104/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002105/* ------------------------------------------------------- */
2106
2107/* All a bit of a kludge. Pretend we're really dealing with ordinary
2108 pthread_mutex_t's instead, for the most part. */
2109
2110static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2111 void* slock )
2112{
2113 Thread* thr;
2114 Lock* lk;
2115 /* In glibc's kludgey world, we're either initialising or unlocking
2116 it. Since this is the pre-routine, if it is locked, unlock it
2117 and take a dependence edge. Otherwise, do nothing. */
2118
2119 if (SHOW_EVENTS >= 1)
2120 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2121 "(ctid=%d, slock=%p)\n",
2122 (Int)tid, (void*)slock );
2123
2124 thr = map_threads_maybe_lookup( tid );
2125 /* cannot fail - Thread* must already exist */;
2126 tl_assert( HG_(is_sane_Thread)(thr) );
2127
2128 lk = map_locks_maybe_lookup( (Addr)slock );
2129 if (lk && lk->heldBy) {
2130 /* it's held. So do the normal pre-unlock actions, as copied
2131 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2132 duplicates the map_locks_maybe_lookup. */
2133 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2134 False/*!isRDWR*/ );
2135 }
2136}
2137
2138static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2139 void* slock )
2140{
2141 Lock* lk;
2142 /* More kludgery. If the lock has never been seen before, do
2143 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2144 nothing. */
2145
2146 if (SHOW_EVENTS >= 1)
2147 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2148 "(ctid=%d, slock=%p)\n",
2149 (Int)tid, (void*)slock );
2150
2151 lk = map_locks_maybe_lookup( (Addr)slock );
2152 if (!lk) {
2153 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2154 }
2155}
2156
2157static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2158 void* slock, Word isTryLock )
2159{
2160 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2161}
2162
2163static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2164 void* slock )
2165{
2166 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2167}
2168
2169static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2170 void* slock )
2171{
2172 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2173}
2174
2175
sewardj9f569b72008-11-13 13:33:09 +00002176/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002177/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002178/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002179
sewardj02114542009-07-28 20:52:36 +00002180/* A mapping from CV to (the SO associated with it, plus some
2181 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002182 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2183 wait on it completes, we do a 'recv' from the SO. This is believed
2184 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002185 signallings/broadcasts.
2186*/
2187
sewardj02114542009-07-28 20:52:36 +00002188/* .so is the SO for this CV.
2189 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002190
sewardj02114542009-07-28 20:52:36 +00002191 POSIX says effectively that the first pthread_cond_{timed}wait call
2192 causes a dynamic binding between the CV and the mutex, and that
2193 lasts until such time as the waiter count falls to zero. Hence
2194 need to keep track of the number of waiters in order to do
2195 consistency tracking. */
2196typedef
2197 struct {
2198 SO* so; /* libhb-allocated SO */
2199 void* mx_ga; /* addr of associated mutex, if any */
2200 UWord nWaiters; /* # threads waiting on the CV */
2201 }
2202 CVInfo;
2203
2204
2205/* pthread_cond_t* -> CVInfo* */
2206static WordFM* map_cond_to_CVInfo = NULL;
2207
2208static void map_cond_to_CVInfo_INIT ( void ) {
2209 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2210 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2211 "hg.mctCI.1", HG_(free), NULL );
2212 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002213 }
2214}
2215
sewardj02114542009-07-28 20:52:36 +00002216static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002217 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002218 map_cond_to_CVInfo_INIT();
2219 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002220 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002221 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002222 } else {
sewardj02114542009-07-28 20:52:36 +00002223 SO* so = libhb_so_alloc();
2224 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2225 cvi->so = so;
2226 cvi->mx_ga = 0;
2227 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2228 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002229 }
2230}
2231
sewardj02114542009-07-28 20:52:36 +00002232static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002233 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002234 map_cond_to_CVInfo_INIT();
2235 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2236 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002237 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002238 tl_assert(cvi);
2239 tl_assert(cvi->so);
2240 libhb_so_dealloc(cvi->so);
2241 cvi->mx_ga = 0;
2242 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002243 }
2244}
2245
2246static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2247{
sewardjf98e1c02008-10-25 16:22:41 +00002248 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2249 cond to a SO if it is not already so bound, and 'send' on the
2250 SO. This is later used by other thread(s) which successfully
2251 exit from a pthread_cond_wait on the same cv; then they 'recv'
2252 from the SO, thereby acquiring a dependency on this signalling
2253 event. */
sewardjb4112022007-11-09 22:49:28 +00002254 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002255 CVInfo* cvi;
2256 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002257
2258 if (SHOW_EVENTS >= 1)
2259 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2260 (Int)tid, (void*)cond );
2261
sewardjb4112022007-11-09 22:49:28 +00002262 thr = map_threads_maybe_lookup( tid );
2263 tl_assert(thr); /* cannot fail - Thread* must already exist */
2264
sewardj02114542009-07-28 20:52:36 +00002265 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2266 tl_assert(cvi);
2267 tl_assert(cvi->so);
2268
sewardjb4112022007-11-09 22:49:28 +00002269 // error-if: mutex is bogus
2270 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002271 // Hmm. POSIX doesn't actually say that it's an error to call
2272 // pthread_cond_signal with the associated mutex being unlocked.
2273 // Although it does say that it should be "if consistent scheduling
2274 // is desired."
2275 //
2276 // For the moment, disable these checks.
2277 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2278 //if (lk == NULL || cvi->mx_ga == 0) {
2279 // HG_(record_error_Misc)( thr,
2280 // "pthread_cond_{signal,broadcast}: "
2281 // "no or invalid mutex associated with cond");
2282 //}
2283 ///* note: lk could be NULL. Be careful. */
2284 //if (lk) {
2285 // if (lk->kind == LK_rdwr) {
2286 // HG_(record_error_Misc)(thr,
2287 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2288 // }
2289 // if (lk->heldBy == NULL) {
2290 // HG_(record_error_Misc)(thr,
2291 // "pthread_cond_{signal,broadcast}: "
2292 // "associated lock is not held by any thread");
2293 // }
2294 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2295 // HG_(record_error_Misc)(thr,
2296 // "pthread_cond_{signal,broadcast}: "
2297 // "associated lock is not held by calling thread");
2298 // }
2299 //}
sewardjb4112022007-11-09 22:49:28 +00002300
sewardj02114542009-07-28 20:52:36 +00002301 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002302}
2303
2304/* returns True if it reckons 'mutex' is valid and held by this
2305 thread, else False */
2306static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2307 void* cond, void* mutex )
2308{
2309 Thread* thr;
2310 Lock* lk;
2311 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002312 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002313
2314 if (SHOW_EVENTS >= 1)
2315 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2316 "(ctid=%d, cond=%p, mutex=%p)\n",
2317 (Int)tid, (void*)cond, (void*)mutex );
2318
sewardjb4112022007-11-09 22:49:28 +00002319 thr = map_threads_maybe_lookup( tid );
2320 tl_assert(thr); /* cannot fail - Thread* must already exist */
2321
2322 lk = map_locks_maybe_lookup( (Addr)mutex );
2323
2324 /* Check for stupid mutex arguments. There are various ways to be
2325 a bozo. Only complain once, though, even if more than one thing
2326 is wrong. */
2327 if (lk == NULL) {
2328 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002329 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002330 thr,
2331 "pthread_cond_{timed}wait called with invalid mutex" );
2332 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002333 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002334 if (lk->kind == LK_rdwr) {
2335 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002336 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002337 thr, "pthread_cond_{timed}wait called with mutex "
2338 "of type pthread_rwlock_t*" );
2339 } else
2340 if (lk->heldBy == NULL) {
2341 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002342 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002343 thr, "pthread_cond_{timed}wait called with un-held mutex");
2344 } else
2345 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002346 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002347 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002348 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002349 thr, "pthread_cond_{timed}wait called with mutex "
2350 "held by a different thread" );
2351 }
2352 }
2353
2354 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002355 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2356 tl_assert(cvi);
2357 tl_assert(cvi->so);
2358 if (cvi->nWaiters == 0) {
2359 /* form initial (CV,MX) binding */
2360 cvi->mx_ga = mutex;
2361 }
2362 else /* check existing (CV,MX) binding */
2363 if (cvi->mx_ga != mutex) {
2364 HG_(record_error_Misc)(
2365 thr, "pthread_cond_{timed}wait: cond is associated "
2366 "with a different mutex");
2367 }
2368 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002369
2370 return lk_valid;
2371}
2372
2373static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2374 void* cond, void* mutex )
2375{
sewardjf98e1c02008-10-25 16:22:41 +00002376 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2377 the SO for this cond, and 'recv' from it so as to acquire a
2378 dependency edge back to the signaller/broadcaster. */
2379 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002380 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002381
2382 if (SHOW_EVENTS >= 1)
2383 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2384 "(ctid=%d, cond=%p, mutex=%p)\n",
2385 (Int)tid, (void*)cond, (void*)mutex );
2386
sewardjb4112022007-11-09 22:49:28 +00002387 thr = map_threads_maybe_lookup( tid );
2388 tl_assert(thr); /* cannot fail - Thread* must already exist */
2389
2390 // error-if: cond is also associated with a different mutex
2391
sewardj02114542009-07-28 20:52:36 +00002392 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2393 tl_assert(cvi);
2394 tl_assert(cvi->so);
2395 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002396
sewardj02114542009-07-28 20:52:36 +00002397 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002398 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2399 it? If this happened it would surely be a bug in the threads
2400 library. Or one of those fabled "spurious wakeups". */
2401 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2402 "succeeded on"
2403 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002404 }
sewardjf98e1c02008-10-25 16:22:41 +00002405
2406 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002407 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2408
2409 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002410}
2411
2412static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2413 void* cond )
2414{
2415 /* Deal with destroy events. The only purpose is to free storage
2416 associated with the CV, so as to avoid any possible resource
2417 leaks. */
2418 if (SHOW_EVENTS >= 1)
2419 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2420 "(ctid=%d, cond=%p)\n",
2421 (Int)tid, (void*)cond );
2422
sewardj02114542009-07-28 20:52:36 +00002423 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002424}
2425
2426
sewardj9f569b72008-11-13 13:33:09 +00002427/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002428/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002429/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002430
2431/* EXPOSITION only */
2432static
2433void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2434{
2435 if (SHOW_EVENTS >= 1)
2436 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2437 (Int)tid, (void*)rwl );
2438 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002439 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002440 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2441}
2442
2443static
2444void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2445{
2446 Thread* thr;
2447 Lock* lk;
2448 if (SHOW_EVENTS >= 1)
2449 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2450 (Int)tid, (void*)rwl );
2451
2452 thr = map_threads_maybe_lookup( tid );
2453 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002454 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002455
2456 lk = map_locks_maybe_lookup( (Addr)rwl );
2457
2458 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002459 HG_(record_error_Misc)(
2460 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002461 }
2462
2463 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002464 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002465 tl_assert( lk->guestaddr == (Addr)rwl );
2466 if (lk->heldBy) {
2467 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002468 HG_(record_error_Misc)(
2469 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002470 /* remove lock from locksets of all owning threads */
2471 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002472 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002473 lk->heldBy = NULL;
2474 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002475 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002476 }
2477 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002478 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002479
2480 if (HG_(clo_track_lockorders))
2481 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002482 map_locks_delete( lk->guestaddr );
2483 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002484 }
2485
sewardjf98e1c02008-10-25 16:22:41 +00002486 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002487 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2488}
2489
2490static
sewardj789c3c52008-02-25 12:10:07 +00002491void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2492 void* rwl,
2493 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002494{
2495 /* Just check the rwl is sane; nothing else to do. */
2496 // 'rwl' may be invalid - not checked by wrapper
2497 Thread* thr;
2498 Lock* lk;
2499 if (SHOW_EVENTS >= 1)
2500 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2501 (Int)tid, (Int)isW, (void*)rwl );
2502
2503 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002504 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002505 thr = map_threads_maybe_lookup( tid );
2506 tl_assert(thr); /* cannot fail - Thread* must already exist */
2507
2508 lk = map_locks_maybe_lookup( (Addr)rwl );
2509 if ( lk
2510 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2511 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002512 HG_(record_error_Misc)(
2513 thr, "pthread_rwlock_{rd,rw}lock with a "
2514 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002515 }
2516}
2517
2518static
2519void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2520{
2521 // only called if the real library call succeeded - so mutex is sane
2522 Thread* thr;
2523 if (SHOW_EVENTS >= 1)
2524 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2525 (Int)tid, (Int)isW, (void*)rwl );
2526
2527 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2528 thr = map_threads_maybe_lookup( tid );
2529 tl_assert(thr); /* cannot fail - Thread* must already exist */
2530
2531 (isW ? evhH__post_thread_w_acquires_lock
2532 : evhH__post_thread_r_acquires_lock)(
2533 thr,
2534 LK_rdwr, /* if not known, create new lock with this LockKind */
2535 (Addr)rwl
2536 );
2537}
2538
2539static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2540{
2541 // 'rwl' may be invalid - not checked by wrapper
2542 Thread* thr;
2543 if (SHOW_EVENTS >= 1)
2544 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2545 (Int)tid, (void*)rwl );
2546
2547 thr = map_threads_maybe_lookup( tid );
2548 tl_assert(thr); /* cannot fail - Thread* must already exist */
2549
2550 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2551}
2552
2553static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2554{
2555 // only called if the real library call succeeded - so mutex is sane
2556 Thread* thr;
2557 if (SHOW_EVENTS >= 1)
2558 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2559 (Int)tid, (void*)rwl );
2560 thr = map_threads_maybe_lookup( tid );
2561 tl_assert(thr); /* cannot fail - Thread* must already exist */
2562
2563 // anything we should do here?
2564}
2565
2566
sewardj9f569b72008-11-13 13:33:09 +00002567/* ---------------------------------------------------------- */
2568/* -------------- events to do with semaphores -------------- */
2569/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002570
sewardj11e352f2007-11-30 11:11:02 +00002571/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002572 variables. */
2573
sewardjf98e1c02008-10-25 16:22:41 +00002574/* For each semaphore, we maintain a stack of SOs. When a 'post'
2575 operation is done on a semaphore (unlocking, essentially), a new SO
2576 is created for the posting thread, the posting thread does a strong
2577 send to it (which merely installs the posting thread's VC in the
2578 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002579
2580 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002581 semaphore, we pop a SO off the semaphore's stack (which should be
2582 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002583 dependencies between posters and waiters of the semaphore.
2584
sewardjf98e1c02008-10-25 16:22:41 +00002585 It may not be necessary to use a stack - perhaps a bag of SOs would
2586 do. But we do need to keep track of how many unused-up posts have
2587 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002588
sewardjf98e1c02008-10-25 16:22:41 +00002589 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002590 twice on S. T3 cannot complete its waits without both T1 and T2
2591 posting. The above mechanism will ensure that T3 acquires
2592 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002593
sewardjf98e1c02008-10-25 16:22:41 +00002594 When a semaphore is initialised with value N, we do as if we'd
2595 posted N times on the semaphore: basically create N SOs and do a
2596 strong send to all of then. This allows up to N waits on the
2597 semaphore to acquire a dependency on the initialisation point,
2598 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002599
2600 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2601 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002602*/
2603
sewardjf98e1c02008-10-25 16:22:41 +00002604/* sem_t* -> XArray* SO* */
2605static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002606
sewardjf98e1c02008-10-25 16:22:41 +00002607static void map_sem_to_SO_stack_INIT ( void ) {
2608 if (map_sem_to_SO_stack == NULL) {
2609 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2610 HG_(free), NULL );
2611 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002612 }
2613}
2614
sewardjf98e1c02008-10-25 16:22:41 +00002615static void push_SO_for_sem ( void* sem, SO* so ) {
2616 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002617 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002618 tl_assert(so);
2619 map_sem_to_SO_stack_INIT();
2620 if (VG_(lookupFM)( map_sem_to_SO_stack,
2621 &keyW, (UWord*)&xa, (UWord)sem )) {
2622 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002623 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002624 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002625 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002626 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2627 VG_(addToXA)( xa, &so );
2628 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002629 }
2630}
2631
sewardjf98e1c02008-10-25 16:22:41 +00002632static SO* mb_pop_SO_for_sem ( void* sem ) {
2633 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002634 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002635 SO* so;
2636 map_sem_to_SO_stack_INIT();
2637 if (VG_(lookupFM)( map_sem_to_SO_stack,
2638 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002639 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002640 Word sz;
2641 tl_assert(keyW == (UWord)sem);
2642 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002643 tl_assert(sz >= 0);
2644 if (sz == 0)
2645 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002646 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2647 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002648 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002649 return so;
sewardjb4112022007-11-09 22:49:28 +00002650 } else {
2651 /* hmm, that's odd. No stack for this semaphore. */
2652 return NULL;
2653 }
2654}
2655
sewardj11e352f2007-11-30 11:11:02 +00002656static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002657{
sewardjf98e1c02008-10-25 16:22:41 +00002658 UWord keyW, valW;
2659 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002660
sewardjb4112022007-11-09 22:49:28 +00002661 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002662 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002663 (Int)tid, (void*)sem );
2664
sewardjf98e1c02008-10-25 16:22:41 +00002665 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002666
sewardjf98e1c02008-10-25 16:22:41 +00002667 /* Empty out the semaphore's SO stack. This way of doing it is
2668 stupid, but at least it's easy. */
2669 while (1) {
2670 so = mb_pop_SO_for_sem( sem );
2671 if (!so) break;
2672 libhb_so_dealloc(so);
2673 }
2674
2675 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2676 XArray* xa = (XArray*)valW;
2677 tl_assert(keyW == (UWord)sem);
2678 tl_assert(xa);
2679 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2680 VG_(deleteXA)(xa);
2681 }
sewardjb4112022007-11-09 22:49:28 +00002682}
2683
sewardj11e352f2007-11-30 11:11:02 +00002684static
2685void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2686{
sewardjf98e1c02008-10-25 16:22:41 +00002687 SO* so;
2688 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002689
2690 if (SHOW_EVENTS >= 1)
2691 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2692 (Int)tid, (void*)sem, value );
2693
sewardjf98e1c02008-10-25 16:22:41 +00002694 thr = map_threads_maybe_lookup( tid );
2695 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002696
sewardjf98e1c02008-10-25 16:22:41 +00002697 /* Empty out the semaphore's SO stack. This way of doing it is
2698 stupid, but at least it's easy. */
2699 while (1) {
2700 so = mb_pop_SO_for_sem( sem );
2701 if (!so) break;
2702 libhb_so_dealloc(so);
2703 }
sewardj11e352f2007-11-30 11:11:02 +00002704
sewardjf98e1c02008-10-25 16:22:41 +00002705 /* If we don't do this check, the following while loop runs us out
2706 of memory for stupid initial values of 'value'. */
2707 if (value > 10000) {
2708 HG_(record_error_Misc)(
2709 thr, "sem_init: initial value exceeds 10000; using 10000" );
2710 value = 10000;
2711 }
sewardj11e352f2007-11-30 11:11:02 +00002712
sewardjf98e1c02008-10-25 16:22:41 +00002713 /* Now create 'valid' new SOs for the thread, do a strong send to
2714 each of them, and push them all on the stack. */
2715 for (; value > 0; value--) {
2716 Thr* hbthr = thr->hbthr;
2717 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002718
sewardjf98e1c02008-10-25 16:22:41 +00002719 so = libhb_so_alloc();
2720 libhb_so_send( hbthr, so, True/*strong send*/ );
2721 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002722 }
2723}
2724
2725static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002726{
sewardjf98e1c02008-10-25 16:22:41 +00002727 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2728 it (iow, write our VC into it, then tick ours), and push the SO
2729 on on a stack of SOs associated with 'sem'. This is later used
2730 by other thread(s) which successfully exit from a sem_wait on
2731 the same sem; by doing a strong recv from SOs popped of the
2732 stack, they acquire dependencies on the posting thread
2733 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002734
sewardjf98e1c02008-10-25 16:22:41 +00002735 Thread* thr;
2736 SO* so;
2737 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002738
2739 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002740 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002741 (Int)tid, (void*)sem );
2742
2743 thr = map_threads_maybe_lookup( tid );
2744 tl_assert(thr); /* cannot fail - Thread* must already exist */
2745
2746 // error-if: sem is bogus
2747
sewardjf98e1c02008-10-25 16:22:41 +00002748 hbthr = thr->hbthr;
2749 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002750
sewardjf98e1c02008-10-25 16:22:41 +00002751 so = libhb_so_alloc();
2752 libhb_so_send( hbthr, so, True/*strong send*/ );
2753 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002754}
2755
sewardj11e352f2007-11-30 11:11:02 +00002756static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002757{
sewardjf98e1c02008-10-25 16:22:41 +00002758 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2759 the 'sem' from this semaphore's SO-stack, and do a strong recv
2760 from it. This creates a dependency back to one of the post-ers
2761 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002762
sewardjf98e1c02008-10-25 16:22:41 +00002763 Thread* thr;
2764 SO* so;
2765 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002766
2767 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002768 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002769 (Int)tid, (void*)sem );
2770
2771 thr = map_threads_maybe_lookup( tid );
2772 tl_assert(thr); /* cannot fail - Thread* must already exist */
2773
2774 // error-if: sem is bogus
2775
sewardjf98e1c02008-10-25 16:22:41 +00002776 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002777
sewardjf98e1c02008-10-25 16:22:41 +00002778 if (so) {
2779 hbthr = thr->hbthr;
2780 tl_assert(hbthr);
2781
2782 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2783 libhb_so_dealloc(so);
2784 } else {
2785 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2786 If this happened it would surely be a bug in the threads
2787 library. */
2788 HG_(record_error_Misc)(
2789 thr, "Bug in libpthread: sem_wait succeeded on"
2790 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002791 }
2792}
2793
2794
sewardj9f569b72008-11-13 13:33:09 +00002795/* -------------------------------------------------------- */
2796/* -------------- events to do with barriers -------------- */
2797/* -------------------------------------------------------- */
2798
2799typedef
2800 struct {
2801 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002802 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002803 UWord size; /* declared size */
2804 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2805 }
2806 Bar;
2807
2808static Bar* new_Bar ( void ) {
2809 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2810 tl_assert(bar);
2811 /* all fields are zero */
2812 tl_assert(bar->initted == False);
2813 return bar;
2814}
2815
2816static void delete_Bar ( Bar* bar ) {
2817 tl_assert(bar);
2818 if (bar->waiting)
2819 VG_(deleteXA)(bar->waiting);
2820 HG_(free)(bar);
2821}
2822
2823/* A mapping which stores auxiliary data for barriers. */
2824
2825/* pthread_barrier_t* -> Bar* */
2826static WordFM* map_barrier_to_Bar = NULL;
2827
2828static void map_barrier_to_Bar_INIT ( void ) {
2829 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2830 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2831 "hg.mbtBI.1", HG_(free), NULL );
2832 tl_assert(map_barrier_to_Bar != NULL);
2833 }
2834}
2835
2836static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2837 UWord key, val;
2838 map_barrier_to_Bar_INIT();
2839 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2840 tl_assert(key == (UWord)barrier);
2841 return (Bar*)val;
2842 } else {
2843 Bar* bar = new_Bar();
2844 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2845 return bar;
2846 }
2847}
2848
2849static void map_barrier_to_Bar_delete ( void* barrier ) {
2850 UWord keyW, valW;
2851 map_barrier_to_Bar_INIT();
2852 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2853 Bar* bar = (Bar*)valW;
2854 tl_assert(keyW == (UWord)barrier);
2855 delete_Bar(bar);
2856 }
2857}
2858
2859
2860static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2861 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002862 UWord count,
2863 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002864{
2865 Thread* thr;
2866 Bar* bar;
2867
2868 if (SHOW_EVENTS >= 1)
2869 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002870 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2871 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002872
2873 thr = map_threads_maybe_lookup( tid );
2874 tl_assert(thr); /* cannot fail - Thread* must already exist */
2875
2876 if (count == 0) {
2877 HG_(record_error_Misc)(
2878 thr, "pthread_barrier_init: 'count' argument is zero"
2879 );
2880 }
2881
sewardj406bac82010-03-03 23:03:40 +00002882 if (resizable != 0 && resizable != 1) {
2883 HG_(record_error_Misc)(
2884 thr, "pthread_barrier_init: invalid 'resizable' argument"
2885 );
2886 }
2887
sewardj9f569b72008-11-13 13:33:09 +00002888 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2889 tl_assert(bar);
2890
2891 if (bar->initted) {
2892 HG_(record_error_Misc)(
2893 thr, "pthread_barrier_init: barrier is already initialised"
2894 );
2895 }
2896
2897 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2898 tl_assert(bar->initted);
2899 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002900 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002901 );
2902 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2903 }
2904 if (!bar->waiting) {
2905 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2906 sizeof(Thread*) );
2907 }
2908
2909 tl_assert(bar->waiting);
2910 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002911 bar->initted = True;
2912 bar->resizable = resizable == 1 ? True : False;
2913 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002914}
2915
2916
2917static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2918 void* barrier )
2919{
sewardj553655c2008-11-14 19:41:19 +00002920 Thread* thr;
2921 Bar* bar;
2922
sewardj9f569b72008-11-13 13:33:09 +00002923 /* Deal with destroy events. The only purpose is to free storage
2924 associated with the barrier, so as to avoid any possible
2925 resource leaks. */
2926 if (SHOW_EVENTS >= 1)
2927 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2928 "(tid=%d, barrier=%p)\n",
2929 (Int)tid, (void*)barrier );
2930
sewardj553655c2008-11-14 19:41:19 +00002931 thr = map_threads_maybe_lookup( tid );
2932 tl_assert(thr); /* cannot fail - Thread* must already exist */
2933
2934 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2935 tl_assert(bar);
2936
2937 if (!bar->initted) {
2938 HG_(record_error_Misc)(
2939 thr, "pthread_barrier_destroy: barrier was never initialised"
2940 );
2941 }
2942
2943 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2944 HG_(record_error_Misc)(
2945 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2946 );
2947 }
2948
sewardj9f569b72008-11-13 13:33:09 +00002949 /* Maybe we shouldn't do this; just let it persist, so that when it
2950 is reinitialised we don't need to do any dynamic memory
2951 allocation? The downside is a potentially unlimited space leak,
2952 if the client creates (in turn) a large number of barriers all
2953 at different locations. Note that if we do later move to the
2954 don't-delete-it scheme, we need to mark the barrier as
2955 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002956 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002957 map_barrier_to_Bar_delete( barrier );
2958}
2959
2960
sewardj406bac82010-03-03 23:03:40 +00002961/* All the threads have arrived. Now do the Interesting Bit. Get a
2962 new synchronisation object and do a weak send to it from all the
2963 participating threads. This makes its vector clocks be the join of
2964 all the individual threads' vector clocks. Then do a strong
2965 receive from it back to all threads, so that their VCs are a copy
2966 of it (hence are all equal to the join of their original VCs.) */
2967static void do_barrier_cross_sync_and_empty ( Bar* bar )
2968{
2969 /* XXX check bar->waiting has no duplicates */
2970 UWord i;
2971 SO* so = libhb_so_alloc();
2972
2973 tl_assert(bar->waiting);
2974 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2975
2976 /* compute the join ... */
2977 for (i = 0; i < bar->size; i++) {
2978 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2979 Thr* hbthr = t->hbthr;
2980 libhb_so_send( hbthr, so, False/*weak send*/ );
2981 }
2982 /* ... and distribute to all threads */
2983 for (i = 0; i < bar->size; i++) {
2984 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2985 Thr* hbthr = t->hbthr;
2986 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2987 }
2988
2989 /* finally, we must empty out the waiting vector */
2990 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2991
2992 /* and we don't need this any more. Perhaps a stack-allocated
2993 SO would be better? */
2994 libhb_so_dealloc(so);
2995}
2996
2997
sewardj9f569b72008-11-13 13:33:09 +00002998static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2999 void* barrier )
3000{
sewardj1c466b72008-11-19 11:52:14 +00003001 /* This function gets called after a client thread calls
3002 pthread_barrier_wait but before it arrives at the real
3003 pthread_barrier_wait.
3004
3005 Why is the following correct? It's a bit subtle.
3006
3007 If this is not the last thread arriving at the barrier, we simply
3008 note its presence and return. Because valgrind (at least as of
3009 Nov 08) is single threaded, we are guaranteed safe from any race
3010 conditions when in this function -- no other client threads are
3011 running.
3012
3013 If this is the last thread, then we are again the only running
3014 thread. All the other threads will have either arrived at the
3015 real pthread_barrier_wait or are on their way to it, but in any
3016 case are guaranteed not to be able to move past it, because this
3017 thread is currently in this function and so has not yet arrived
3018 at the real pthread_barrier_wait. That means that:
3019
3020 1. While we are in this function, none of the other threads
3021 waiting at the barrier can move past it.
3022
3023 2. When this function returns (and simulated execution resumes),
3024 this thread and all other waiting threads will be able to move
3025 past the real barrier.
3026
3027 Because of this, it is now safe to update the vector clocks of
3028 all threads, to represent the fact that they all arrived at the
3029 barrier and have all moved on. There is no danger of any
3030 complications to do with some threads leaving the barrier and
3031 racing back round to the front, whilst others are still leaving
3032 (which is the primary source of complication in correct handling/
3033 implementation of barriers). That can't happen because we update
3034 here our data structures so as to indicate that the threads have
3035 passed the barrier, even though, as per (2) above, they are
3036 guaranteed not to pass the barrier until we return.
3037
3038 This relies crucially on Valgrind being single threaded. If that
3039 changes, this will need to be reconsidered.
3040 */
sewardj9f569b72008-11-13 13:33:09 +00003041 Thread* thr;
3042 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003043 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003044
3045 if (SHOW_EVENTS >= 1)
3046 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3047 "(tid=%d, barrier=%p)\n",
3048 (Int)tid, (void*)barrier );
3049
3050 thr = map_threads_maybe_lookup( tid );
3051 tl_assert(thr); /* cannot fail - Thread* must already exist */
3052
3053 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3054 tl_assert(bar);
3055
3056 if (!bar->initted) {
3057 HG_(record_error_Misc)(
3058 thr, "pthread_barrier_wait: barrier is uninitialised"
3059 );
3060 return; /* client is broken .. avoid assertions below */
3061 }
3062
3063 /* guaranteed by _INIT_PRE above */
3064 tl_assert(bar->size > 0);
3065 tl_assert(bar->waiting);
3066
3067 VG_(addToXA)( bar->waiting, &thr );
3068
3069 /* guaranteed by this function */
3070 present = VG_(sizeXA)(bar->waiting);
3071 tl_assert(present > 0 && present <= bar->size);
3072
3073 if (present < bar->size)
3074 return;
3075
sewardj406bac82010-03-03 23:03:40 +00003076 do_barrier_cross_sync_and_empty(bar);
3077}
sewardj9f569b72008-11-13 13:33:09 +00003078
sewardj9f569b72008-11-13 13:33:09 +00003079
sewardj406bac82010-03-03 23:03:40 +00003080static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3081 void* barrier,
3082 UWord newcount )
3083{
3084 Thread* thr;
3085 Bar* bar;
3086 UWord present;
3087
3088 if (SHOW_EVENTS >= 1)
3089 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3090 "(tid=%d, barrier=%p, newcount=%lu)\n",
3091 (Int)tid, (void*)barrier, newcount );
3092
3093 thr = map_threads_maybe_lookup( tid );
3094 tl_assert(thr); /* cannot fail - Thread* must already exist */
3095
3096 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3097 tl_assert(bar);
3098
3099 if (!bar->initted) {
3100 HG_(record_error_Misc)(
3101 thr, "pthread_barrier_resize: barrier is uninitialised"
3102 );
3103 return; /* client is broken .. avoid assertions below */
3104 }
3105
3106 if (!bar->resizable) {
3107 HG_(record_error_Misc)(
3108 thr, "pthread_barrier_resize: barrier is may not be resized"
3109 );
3110 return; /* client is broken .. avoid assertions below */
3111 }
3112
3113 if (newcount == 0) {
3114 HG_(record_error_Misc)(
3115 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3116 );
3117 return; /* client is broken .. avoid assertions below */
3118 }
3119
3120 /* guaranteed by _INIT_PRE above */
3121 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003122 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003123 /* Guaranteed by this fn */
3124 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003125
sewardj406bac82010-03-03 23:03:40 +00003126 if (newcount >= bar->size) {
3127 /* Increasing the capacity. There's no possibility of threads
3128 moving on from the barrier in this situation, so just note
3129 the fact and do nothing more. */
3130 bar->size = newcount;
3131 } else {
3132 /* Decreasing the capacity. If we decrease it to be equal or
3133 below the number of waiting threads, they will now move past
3134 the barrier, so need to mess with dep edges in the same way
3135 as if the barrier had filled up normally. */
3136 present = VG_(sizeXA)(bar->waiting);
3137 tl_assert(present >= 0 && present <= bar->size);
3138 if (newcount <= present) {
3139 bar->size = present; /* keep the cross_sync call happy */
3140 do_barrier_cross_sync_and_empty(bar);
3141 }
3142 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003143 }
sewardj9f569b72008-11-13 13:33:09 +00003144}
3145
3146
sewardjed2e72e2009-08-14 11:08:24 +00003147/* ----------------------------------------------------- */
3148/* ----- events to do with user-specified HB edges ----- */
3149/* ----------------------------------------------------- */
3150
3151/* A mapping from arbitrary UWord tag to the SO associated with it.
3152 The UWord tags are meaningless to us, interpreted only by the
3153 user. */
3154
3155
3156
3157/* UWord -> SO* */
3158static WordFM* map_usertag_to_SO = NULL;
3159
3160static void map_usertag_to_SO_INIT ( void ) {
3161 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3162 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3163 "hg.mutS.1", HG_(free), NULL );
3164 tl_assert(map_usertag_to_SO != NULL);
3165 }
3166}
3167
3168static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3169 UWord key, val;
3170 map_usertag_to_SO_INIT();
3171 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3172 tl_assert(key == (UWord)usertag);
3173 return (SO*)val;
3174 } else {
3175 SO* so = libhb_so_alloc();
3176 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3177 return so;
3178 }
3179}
3180
3181// If it's ever needed (XXX check before use)
3182//static void map_usertag_to_SO_delete ( UWord usertag ) {
3183// UWord keyW, valW;
3184// map_usertag_to_SO_INIT();
3185// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3186// SO* so = (SO*)valW;
3187// tl_assert(keyW == usertag);
3188// tl_assert(so);
3189// libhb_so_dealloc(so);
3190// }
3191//}
3192
3193
3194static
3195void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3196{
3197 /* TID is just about to notionally sent a message on a notional
3198 abstract synchronisation object whose identity is given by
3199 USERTAG. Bind USERTAG to a real SO if it is not already so
3200 bound, and do a 'strong send' on the SO. This is later used by
3201 other thread(s) which successfully 'receive' from the SO,
3202 thereby acquiring a dependency on this signalling event. */
3203 Thread* thr;
3204 SO* so;
3205
3206 if (SHOW_EVENTS >= 1)
3207 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3208 (Int)tid, usertag );
3209
3210 thr = map_threads_maybe_lookup( tid );
3211 tl_assert(thr); /* cannot fail - Thread* must already exist */
3212
3213 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3214 tl_assert(so);
3215
3216 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3217}
3218
3219static
3220void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3221{
3222 /* TID has just notionally received a message from a notional
3223 abstract synchronisation object whose identity is given by
3224 USERTAG. Bind USERTAG to a real SO if it is not already so
3225 bound. If the SO has at some point in the past been 'sent' on,
3226 to a 'strong receive' on it, thereby acquiring a dependency on
3227 the sender. */
3228 Thread* thr;
3229 SO* so;
3230
3231 if (SHOW_EVENTS >= 1)
3232 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3233 (Int)tid, usertag );
3234
3235 thr = map_threads_maybe_lookup( tid );
3236 tl_assert(thr); /* cannot fail - Thread* must already exist */
3237
3238 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3239 tl_assert(so);
3240
3241 /* Acquire a dependency on it. If the SO has never so far been
3242 sent on, then libhb_so_recv will do nothing. So we're safe
3243 regardless of SO's history. */
3244 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3245}
3246
3247
sewardjb4112022007-11-09 22:49:28 +00003248/*--------------------------------------------------------------*/
3249/*--- Lock acquisition order monitoring ---*/
3250/*--------------------------------------------------------------*/
3251
3252/* FIXME: here are some optimisations still to do in
3253 laog__pre_thread_acquires_lock.
3254
3255 The graph is structured so that if L1 --*--> L2 then L1 must be
3256 acquired before L2.
3257
3258 The common case is that some thread T holds (eg) L1 L2 and L3 and
3259 is repeatedly acquiring and releasing Ln, and there is no ordering
3260 error in what it is doing. Hence it repeatly:
3261
3262 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3263 produces the answer No (because there is no error).
3264
3265 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3266 (because they already got added the first time T acquired Ln).
3267
3268 Hence cache these two events:
3269
3270 (1) Cache result of the query from last time. Invalidate the cache
3271 any time any edges are added to or deleted from laog.
3272
3273 (2) Cache these add-edge requests and ignore them if said edges
3274 have already been added to laog. Invalidate the cache any time
3275 any edges are deleted from laog.
3276*/
3277
3278typedef
3279 struct {
3280 WordSetID inns; /* in univ_laog */
3281 WordSetID outs; /* in univ_laog */
3282 }
3283 LAOGLinks;
3284
3285/* lock order acquisition graph */
3286static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3287
3288/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3289 where that edge was created, so that we can show the user later if
3290 we need to. */
3291typedef
3292 struct {
3293 Addr src_ga; /* Lock guest addresses for */
3294 Addr dst_ga; /* src/dst of the edge */
3295 ExeContext* src_ec; /* And corresponding places where that */
3296 ExeContext* dst_ec; /* ordering was established */
3297 }
3298 LAOGLinkExposition;
3299
sewardj250ec2e2008-02-15 22:02:30 +00003300static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003301 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3302 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3303 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3304 if (llx1->src_ga < llx2->src_ga) return -1;
3305 if (llx1->src_ga > llx2->src_ga) return 1;
3306 if (llx1->dst_ga < llx2->dst_ga) return -1;
3307 if (llx1->dst_ga > llx2->dst_ga) return 1;
3308 return 0;
3309}
3310
3311static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3312/* end EXPOSITION ONLY */
3313
3314
sewardja65db102009-01-26 10:45:16 +00003315__attribute__((noinline))
3316static void laog__init ( void )
3317{
3318 tl_assert(!laog);
3319 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003320 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003321
3322 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3323 HG_(free), NULL/*unboxedcmp*/ );
3324
3325 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3326 cmp_LAOGLinkExposition );
3327 tl_assert(laog);
3328 tl_assert(laog_exposition);
3329}
3330
sewardjb4112022007-11-09 22:49:28 +00003331static void laog__show ( Char* who ) {
3332 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003333 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003334 Lock* me;
3335 LAOGLinks* links;
3336 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003337 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003338 me = NULL;
3339 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003340 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003341 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003342 tl_assert(me);
3343 tl_assert(links);
3344 VG_(printf)(" node %p:\n", me);
3345 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3346 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003347 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003348 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3349 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003350 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003351 me = NULL;
3352 links = NULL;
3353 }
sewardj896f6f92008-08-19 08:38:52 +00003354 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003355 VG_(printf)("}\n");
3356}
3357
3358__attribute__((noinline))
3359static void laog__add_edge ( Lock* src, Lock* dst ) {
3360 Word keyW;
3361 LAOGLinks* links;
3362 Bool presentF, presentR;
3363 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3364
3365 /* Take the opportunity to sanity check the graph. Record in
3366 presentF if there is already a src->dst mapping in this node's
3367 forwards links, and presentR if there is already a src->dst
3368 mapping in this node's backwards links. They should agree!
3369 Also, we need to know whether the edge was already present so as
3370 to decide whether or not to update the link details mapping. We
3371 can compute presentF and presentR essentially for free, so may
3372 as well do this always. */
3373 presentF = presentR = False;
3374
3375 /* Update the out edges for src */
3376 keyW = 0;
3377 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003378 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003379 WordSetID outs_new;
3380 tl_assert(links);
3381 tl_assert(keyW == (Word)src);
3382 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3383 presentF = outs_new == links->outs;
3384 links->outs = outs_new;
3385 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003386 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003387 links->inns = HG_(emptyWS)( univ_laog );
3388 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003389 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003390 }
3391 /* Update the in edges for dst */
3392 keyW = 0;
3393 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003394 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003395 WordSetID inns_new;
3396 tl_assert(links);
3397 tl_assert(keyW == (Word)dst);
3398 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3399 presentR = inns_new == links->inns;
3400 links->inns = inns_new;
3401 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003402 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003403 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3404 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003405 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003406 }
3407
3408 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3409
3410 if (!presentF && src->acquired_at && dst->acquired_at) {
3411 LAOGLinkExposition expo;
3412 /* If this edge is entering the graph, and we have acquired_at
3413 information for both src and dst, record those acquisition
3414 points. Hence, if there is later a violation of this
3415 ordering, we can show the user the two places in which the
3416 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003417 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003418 src->guestaddr, dst->guestaddr);
3419 expo.src_ga = src->guestaddr;
3420 expo.dst_ga = dst->guestaddr;
3421 expo.src_ec = NULL;
3422 expo.dst_ec = NULL;
3423 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003424 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003425 /* we already have it; do nothing */
3426 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003427 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3428 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003429 expo2->src_ga = src->guestaddr;
3430 expo2->dst_ga = dst->guestaddr;
3431 expo2->src_ec = src->acquired_at;
3432 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003433 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003434 }
3435 }
3436}
3437
3438__attribute__((noinline))
3439static void laog__del_edge ( Lock* src, Lock* dst ) {
3440 Word keyW;
3441 LAOGLinks* links;
3442 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3443 /* Update the out edges for src */
3444 keyW = 0;
3445 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003446 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003447 tl_assert(links);
3448 tl_assert(keyW == (Word)src);
3449 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3450 }
3451 /* Update the in edges for dst */
3452 keyW = 0;
3453 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003454 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003455 tl_assert(links);
3456 tl_assert(keyW == (Word)dst);
3457 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3458 }
3459}
3460
3461__attribute__((noinline))
3462static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3463 Word keyW;
3464 LAOGLinks* links;
3465 keyW = 0;
3466 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003467 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003468 tl_assert(links);
3469 tl_assert(keyW == (Word)lk);
3470 return links->outs;
3471 } else {
3472 return HG_(emptyWS)( univ_laog );
3473 }
3474}
3475
3476__attribute__((noinline))
3477static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3478 Word keyW;
3479 LAOGLinks* links;
3480 keyW = 0;
3481 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003482 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003483 tl_assert(links);
3484 tl_assert(keyW == (Word)lk);
3485 return links->inns;
3486 } else {
3487 return HG_(emptyWS)( univ_laog );
3488 }
3489}
3490
3491__attribute__((noinline))
3492static void laog__sanity_check ( Char* who ) {
3493 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003494 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003495 Lock* me;
3496 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003497 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003498 me = NULL;
3499 links = NULL;
3500 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003501 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003502 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003503 tl_assert(me);
3504 tl_assert(links);
3505 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3506 for (i = 0; i < ws_size; i++) {
3507 if ( ! HG_(elemWS)( univ_laog,
3508 laog__succs( (Lock*)ws_words[i] ),
3509 (Word)me ))
3510 goto bad;
3511 }
3512 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3513 for (i = 0; i < ws_size; i++) {
3514 if ( ! HG_(elemWS)( univ_laog,
3515 laog__preds( (Lock*)ws_words[i] ),
3516 (Word)me ))
3517 goto bad;
3518 }
3519 me = NULL;
3520 links = NULL;
3521 }
sewardj896f6f92008-08-19 08:38:52 +00003522 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003523 return;
3524
3525 bad:
3526 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3527 laog__show(who);
3528 tl_assert(0);
3529}
3530
3531/* If there is a path in laog from 'src' to any of the elements in
3532 'dst', return an arbitrarily chosen element of 'dst' reachable from
3533 'src'. If no path exist from 'src' to any element in 'dst', return
3534 NULL. */
3535__attribute__((noinline))
3536static
3537Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3538{
3539 Lock* ret;
3540 Word i, ssz;
3541 XArray* stack; /* of Lock* */
3542 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3543 Lock* here;
3544 WordSetID succs;
3545 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003546 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003547 //laog__sanity_check();
3548
3549 /* If the destination set is empty, we can never get there from
3550 'src' :-), so don't bother to try */
3551 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3552 return NULL;
3553
3554 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003555 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3556 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003557
3558 (void) VG_(addToXA)( stack, &src );
3559
3560 while (True) {
3561
3562 ssz = VG_(sizeXA)( stack );
3563
3564 if (ssz == 0) { ret = NULL; break; }
3565
3566 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3567 VG_(dropTailXA)( stack, 1 );
3568
3569 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3570
sewardj896f6f92008-08-19 08:38:52 +00003571 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003572 continue;
3573
sewardj896f6f92008-08-19 08:38:52 +00003574 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003575
3576 succs = laog__succs( here );
3577 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3578 for (i = 0; i < succs_size; i++)
3579 (void) VG_(addToXA)( stack, &succs_words[i] );
3580 }
3581
sewardj896f6f92008-08-19 08:38:52 +00003582 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003583 VG_(deleteXA)( stack );
3584 return ret;
3585}
3586
3587
3588/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3589 between 'lk' and the locks already held by 'thr' and issue a
3590 complaint if so. Also, update the ordering graph appropriately.
3591*/
3592__attribute__((noinline))
3593static void laog__pre_thread_acquires_lock (
3594 Thread* thr, /* NB: BEFORE lock is added */
3595 Lock* lk
3596 )
3597{
sewardj250ec2e2008-02-15 22:02:30 +00003598 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003599 Word ls_size, i;
3600 Lock* other;
3601
3602 /* It may be that 'thr' already holds 'lk' and is recursively
3603 relocking in. In this case we just ignore the call. */
3604 /* NB: univ_lsets really is correct here */
3605 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3606 return;
3607
sewardjb4112022007-11-09 22:49:28 +00003608 /* First, the check. Complain if there is any path in laog from lk
3609 to any of the locks already held by thr, since if any such path
3610 existed, it would mean that previously lk was acquired before
3611 (rather than after, as we are doing here) at least one of those
3612 locks.
3613 */
3614 other = laog__do_dfs_from_to(lk, thr->locksetA);
3615 if (other) {
3616 LAOGLinkExposition key, *found;
3617 /* So we managed to find a path lk --*--> other in the graph,
3618 which implies that 'lk' should have been acquired before
3619 'other' but is in fact being acquired afterwards. We present
3620 the lk/other arguments to record_error_LockOrder in the order
3621 in which they should have been acquired. */
3622 /* Go look in the laog_exposition mapping, to find the allocation
3623 points for this edge, so we can show the user. */
3624 key.src_ga = lk->guestaddr;
3625 key.dst_ga = other->guestaddr;
3626 key.src_ec = NULL;
3627 key.dst_ec = NULL;
3628 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003629 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003630 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003631 tl_assert(found != &key);
3632 tl_assert(found->src_ga == key.src_ga);
3633 tl_assert(found->dst_ga == key.dst_ga);
3634 tl_assert(found->src_ec);
3635 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003636 HG_(record_error_LockOrder)(
3637 thr, lk->guestaddr, other->guestaddr,
3638 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003639 } else {
3640 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003641 HG_(record_error_LockOrder)(
3642 thr, lk->guestaddr, other->guestaddr,
3643 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003644 }
3645 }
3646
3647 /* Second, add to laog the pairs
3648 (old, lk) | old <- locks already held by thr
3649 Since both old and lk are currently held by thr, their acquired_at
3650 fields must be non-NULL.
3651 */
3652 tl_assert(lk->acquired_at);
3653 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3654 for (i = 0; i < ls_size; i++) {
3655 Lock* old = (Lock*)ls_words[i];
3656 tl_assert(old->acquired_at);
3657 laog__add_edge( old, lk );
3658 }
3659
3660 /* Why "except_Locks" ? We're here because a lock is being
3661 acquired by a thread, and we're in an inconsistent state here.
3662 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3663 When called in this inconsistent state, locks__sanity_check duly
3664 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003665 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003666 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3667}
3668
3669
3670/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3671
3672__attribute__((noinline))
3673static void laog__handle_one_lock_deletion ( Lock* lk )
3674{
3675 WordSetID preds, succs;
3676 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003677 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003678
3679 preds = laog__preds( lk );
3680 succs = laog__succs( lk );
3681
3682 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3683 for (i = 0; i < preds_size; i++)
3684 laog__del_edge( (Lock*)preds_words[i], lk );
3685
3686 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3687 for (j = 0; j < succs_size; j++)
3688 laog__del_edge( lk, (Lock*)succs_words[j] );
3689
3690 for (i = 0; i < preds_size; i++) {
3691 for (j = 0; j < succs_size; j++) {
3692 if (preds_words[i] != succs_words[j]) {
3693 /* This can pass unlocked locks to laog__add_edge, since
3694 we're deleting stuff. So their acquired_at fields may
3695 be NULL. */
3696 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3697 }
3698 }
3699 }
3700}
3701
sewardj1cbc12f2008-11-10 16:16:46 +00003702//__attribute__((noinline))
3703//static void laog__handle_lock_deletions (
3704// WordSetID /* in univ_laog */ locksToDelete
3705// )
3706//{
3707// Word i, ws_size;
3708// UWord* ws_words;
3709//
sewardj1cbc12f2008-11-10 16:16:46 +00003710//
3711// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3712// for (i = 0; i < ws_size; i++)
3713// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3714//
3715// if (HG_(clo_sanity_flags) & SCE_LAOG)
3716// all__sanity_check("laog__handle_lock_deletions-post");
3717//}
sewardjb4112022007-11-09 22:49:28 +00003718
3719
3720/*--------------------------------------------------------------*/
3721/*--- Malloc/free replacements ---*/
3722/*--------------------------------------------------------------*/
3723
3724typedef
3725 struct {
3726 void* next; /* required by m_hashtable */
3727 Addr payload; /* ptr to actual block */
3728 SizeT szB; /* size requested */
3729 ExeContext* where; /* where it was allocated */
3730 Thread* thr; /* allocating thread */
3731 }
3732 MallocMeta;
3733
3734/* A hash table of MallocMetas, used to track malloc'd blocks
3735 (obviously). */
3736static VgHashTable hg_mallocmeta_table = NULL;
3737
3738
3739static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003740 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003741 tl_assert(md);
3742 return md;
3743}
3744static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003745 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003746}
3747
3748
3749/* Allocate a client block and set up the metadata for it. */
3750
3751static
3752void* handle_alloc ( ThreadId tid,
3753 SizeT szB, SizeT alignB, Bool is_zeroed )
3754{
3755 Addr p;
3756 MallocMeta* md;
3757
3758 tl_assert( ((SSizeT)szB) >= 0 );
3759 p = (Addr)VG_(cli_malloc)(alignB, szB);
3760 if (!p) {
3761 return NULL;
3762 }
3763 if (is_zeroed)
3764 VG_(memset)((void*)p, 0, szB);
3765
3766 /* Note that map_threads_lookup must succeed (cannot assert), since
3767 memory can only be allocated by currently alive threads, hence
3768 they must have an entry in map_threads. */
3769 md = new_MallocMeta();
3770 md->payload = p;
3771 md->szB = szB;
3772 md->where = VG_(record_ExeContext)( tid, 0 );
3773 md->thr = map_threads_lookup( tid );
3774
3775 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3776
3777 /* Tell the lower level memory wranglers. */
3778 evh__new_mem_heap( p, szB, is_zeroed );
3779
3780 return (void*)p;
3781}
3782
3783/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3784 Cast to a signed type to catch any unexpectedly negative args.
3785 We're assuming here that the size asked for is not greater than
3786 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3787 platforms). */
3788static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3789 if (((SSizeT)n) < 0) return NULL;
3790 return handle_alloc ( tid, n, VG_(clo_alignment),
3791 /*is_zeroed*/False );
3792}
3793static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3794 if (((SSizeT)n) < 0) return NULL;
3795 return handle_alloc ( tid, n, VG_(clo_alignment),
3796 /*is_zeroed*/False );
3797}
3798static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3799 if (((SSizeT)n) < 0) return NULL;
3800 return handle_alloc ( tid, n, VG_(clo_alignment),
3801 /*is_zeroed*/False );
3802}
3803static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3804 if (((SSizeT)n) < 0) return NULL;
3805 return handle_alloc ( tid, n, align,
3806 /*is_zeroed*/False );
3807}
3808static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3809 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3810 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3811 /*is_zeroed*/True );
3812}
3813
3814
3815/* Free a client block, including getting rid of the relevant
3816 metadata. */
3817
3818static void handle_free ( ThreadId tid, void* p )
3819{
3820 MallocMeta *md, *old_md;
3821 SizeT szB;
3822
3823 /* First see if we can find the metadata for 'p'. */
3824 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3825 if (!md)
3826 return; /* apparently freeing a bogus address. Oh well. */
3827
3828 tl_assert(md->payload == (Addr)p);
3829 szB = md->szB;
3830
3831 /* Nuke the metadata block */
3832 old_md = (MallocMeta*)
3833 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3834 tl_assert(old_md); /* it must be present - we just found it */
3835 tl_assert(old_md == md);
3836 tl_assert(old_md->payload == (Addr)p);
3837
3838 VG_(cli_free)((void*)old_md->payload);
3839 delete_MallocMeta(old_md);
3840
3841 /* Tell the lower level memory wranglers. */
3842 evh__die_mem_heap( (Addr)p, szB );
3843}
3844
3845static void hg_cli__free ( ThreadId tid, void* p ) {
3846 handle_free(tid, p);
3847}
3848static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3849 handle_free(tid, p);
3850}
3851static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3852 handle_free(tid, p);
3853}
3854
3855
3856static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3857{
3858 MallocMeta *md, *md_new, *md_tmp;
3859 SizeT i;
3860
3861 Addr payload = (Addr)payloadV;
3862
3863 if (((SSizeT)new_size) < 0) return NULL;
3864
3865 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3866 if (!md)
3867 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3868
3869 tl_assert(md->payload == payload);
3870
3871 if (md->szB == new_size) {
3872 /* size unchanged */
3873 md->where = VG_(record_ExeContext)(tid, 0);
3874 return payloadV;
3875 }
3876
3877 if (md->szB > new_size) {
3878 /* new size is smaller */
3879 md->szB = new_size;
3880 md->where = VG_(record_ExeContext)(tid, 0);
3881 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3882 return payloadV;
3883 }
3884
3885 /* else */ {
3886 /* new size is bigger */
3887 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3888
3889 /* First half kept and copied, second half new */
3890 // FIXME: shouldn't we use a copier which implements the
3891 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003892 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003893 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003894 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003895 /* FIXME: can anything funny happen here? specifically, if the
3896 old range contained a lock, then die_mem_heap will complain.
3897 Is that the correct behaviour? Not sure. */
3898 evh__die_mem_heap( payload, md->szB );
3899
3900 /* Copy from old to new */
3901 for (i = 0; i < md->szB; i++)
3902 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3903
3904 /* Because the metadata hash table is index by payload address,
3905 we have to get rid of the old hash table entry and make a new
3906 one. We can't just modify the existing metadata in place,
3907 because then it would (almost certainly) be in the wrong hash
3908 chain. */
3909 md_new = new_MallocMeta();
3910 *md_new = *md;
3911
3912 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3913 tl_assert(md_tmp);
3914 tl_assert(md_tmp == md);
3915
3916 VG_(cli_free)((void*)md->payload);
3917 delete_MallocMeta(md);
3918
3919 /* Update fields */
3920 md_new->where = VG_(record_ExeContext)( tid, 0 );
3921 md_new->szB = new_size;
3922 md_new->payload = p_new;
3923 md_new->thr = map_threads_lookup( tid );
3924
3925 /* and add */
3926 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3927
3928 return (void*)p_new;
3929 }
3930}
3931
njn8b140de2009-02-17 04:31:18 +00003932static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3933{
3934 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3935
3936 // There may be slop, but pretend there isn't because only the asked-for
3937 // area will have been shadowed properly.
3938 return ( md ? md->szB : 0 );
3939}
3940
sewardjb4112022007-11-09 22:49:28 +00003941
sewardj095d61e2010-03-11 13:43:18 +00003942/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003943 Slow linear search. With a bit of hash table help if 'data_addr'
3944 is either the start of a block or up to 15 word-sized steps along
3945 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003946
3947static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3948{
sewardjc8028ad2010-05-05 09:34:42 +00003949 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3950 right at it. */
3951 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3952 return True;
3953 /* else normal interval rules apply */
3954 if (LIKELY(a < mm->payload)) return False;
3955 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3956 return True;
sewardj095d61e2010-03-11 13:43:18 +00003957}
3958
sewardjc8028ad2010-05-05 09:34:42 +00003959Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003960 /*OUT*/Addr* payload,
3961 /*OUT*/SizeT* szB,
3962 Addr data_addr )
3963{
3964 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003965 Int i;
3966 const Int n_fast_check_words = 16;
3967
3968 /* First, do a few fast searches on the basis that data_addr might
3969 be exactly the start of a block or up to 15 words inside. This
3970 can happen commonly via the creq
3971 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3972 for (i = 0; i < n_fast_check_words; i++) {
3973 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3974 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3975 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3976 goto found;
3977 }
3978
sewardj095d61e2010-03-11 13:43:18 +00003979 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003980 some such, it's hard to see how to do better. We have to check
3981 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003982 VG_(HT_ResetIter)(hg_mallocmeta_table);
3983 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003984 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3985 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003986 }
sewardjc8028ad2010-05-05 09:34:42 +00003987
3988 /* Not found. Bah. */
3989 return False;
3990 /*NOTREACHED*/
3991
3992 found:
3993 tl_assert(mm);
3994 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3995 if (where) *where = mm->where;
3996 if (payload) *payload = mm->payload;
3997 if (szB) *szB = mm->szB;
3998 return True;
sewardj095d61e2010-03-11 13:43:18 +00003999}
4000
4001
sewardjb4112022007-11-09 22:49:28 +00004002/*--------------------------------------------------------------*/
4003/*--- Instrumentation ---*/
4004/*--------------------------------------------------------------*/
4005
4006static void instrument_mem_access ( IRSB* bbOut,
4007 IRExpr* addr,
4008 Int szB,
4009 Bool isStore,
4010 Int hWordTy_szB )
4011{
4012 IRType tyAddr = Ity_INVALID;
4013 HChar* hName = NULL;
4014 void* hAddr = NULL;
4015 Int regparms = 0;
4016 IRExpr** argv = NULL;
4017 IRDirty* di = NULL;
4018
4019 tl_assert(isIRAtom(addr));
4020 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4021
4022 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
4023 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4024
4025 /* So the effective address is in 'addr' now. */
4026 regparms = 1; // unless stated otherwise
4027 if (isStore) {
4028 switch (szB) {
4029 case 1:
sewardj23f12002009-07-24 08:45:08 +00004030 hName = "evh__mem_help_cwrite_1";
4031 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004032 argv = mkIRExprVec_1( addr );
4033 break;
4034 case 2:
sewardj23f12002009-07-24 08:45:08 +00004035 hName = "evh__mem_help_cwrite_2";
4036 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004037 argv = mkIRExprVec_1( addr );
4038 break;
4039 case 4:
sewardj23f12002009-07-24 08:45:08 +00004040 hName = "evh__mem_help_cwrite_4";
4041 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004042 argv = mkIRExprVec_1( addr );
4043 break;
4044 case 8:
sewardj23f12002009-07-24 08:45:08 +00004045 hName = "evh__mem_help_cwrite_8";
4046 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004047 argv = mkIRExprVec_1( addr );
4048 break;
4049 default:
4050 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4051 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004052 hName = "evh__mem_help_cwrite_N";
4053 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004054 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4055 break;
4056 }
4057 } else {
4058 switch (szB) {
4059 case 1:
sewardj23f12002009-07-24 08:45:08 +00004060 hName = "evh__mem_help_cread_1";
4061 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004062 argv = mkIRExprVec_1( addr );
4063 break;
4064 case 2:
sewardj23f12002009-07-24 08:45:08 +00004065 hName = "evh__mem_help_cread_2";
4066 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004067 argv = mkIRExprVec_1( addr );
4068 break;
4069 case 4:
sewardj23f12002009-07-24 08:45:08 +00004070 hName = "evh__mem_help_cread_4";
4071 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004072 argv = mkIRExprVec_1( addr );
4073 break;
4074 case 8:
sewardj23f12002009-07-24 08:45:08 +00004075 hName = "evh__mem_help_cread_8";
4076 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004077 argv = mkIRExprVec_1( addr );
4078 break;
4079 default:
4080 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4081 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004082 hName = "evh__mem_help_cread_N";
4083 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004084 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4085 break;
4086 }
4087 }
4088
4089 /* Add the helper. */
4090 tl_assert(hName);
4091 tl_assert(hAddr);
4092 tl_assert(argv);
4093 di = unsafeIRDirty_0_N( regparms,
4094 hName, VG_(fnptr_to_fnentry)( hAddr ),
4095 argv );
4096 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4097}
4098
4099
sewardja0eee322009-07-31 08:46:35 +00004100/* Figure out if GA is a guest code address in the dynamic linker, and
4101 if so return True. Otherwise (and in case of any doubt) return
4102 False. (sidedly safe w/ False as the safe value) */
4103static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4104{
4105 DebugInfo* dinfo;
4106 const UChar* soname;
4107 if (0) return False;
4108
sewardje3f1e592009-07-31 09:41:29 +00004109 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004110 if (!dinfo) return False;
4111
sewardje3f1e592009-07-31 09:41:29 +00004112 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004113 tl_assert(soname);
4114 if (0) VG_(printf)("%s\n", soname);
4115
4116# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004117 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004118 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4119 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4120 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4121 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4122# elif defined(VGO_darwin)
4123 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4124# else
4125# error "Unsupported OS"
4126# endif
4127 return False;
4128}
4129
sewardjb4112022007-11-09 22:49:28 +00004130static
4131IRSB* hg_instrument ( VgCallbackClosure* closure,
4132 IRSB* bbIn,
4133 VexGuestLayout* layout,
4134 VexGuestExtents* vge,
4135 IRType gWordTy, IRType hWordTy )
4136{
sewardj1c0ce7a2009-07-01 08:10:49 +00004137 Int i;
4138 IRSB* bbOut;
4139 Addr64 cia; /* address of current insn */
4140 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004141 Bool inLDSO = False;
4142 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004143
4144 if (gWordTy != hWordTy) {
4145 /* We don't currently support this case. */
4146 VG_(tool_panic)("host/guest word size mismatch");
4147 }
4148
sewardja0eee322009-07-31 08:46:35 +00004149 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4150 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4151 }
4152
sewardjb4112022007-11-09 22:49:28 +00004153 /* Set up BB */
4154 bbOut = emptyIRSB();
4155 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4156 bbOut->next = deepCopyIRExpr(bbIn->next);
4157 bbOut->jumpkind = bbIn->jumpkind;
4158
4159 // Copy verbatim any IR preamble preceding the first IMark
4160 i = 0;
4161 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4162 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4163 i++;
4164 }
4165
sewardj1c0ce7a2009-07-01 08:10:49 +00004166 // Get the first statement, and initial cia from it
4167 tl_assert(bbIn->stmts_used > 0);
4168 tl_assert(i < bbIn->stmts_used);
4169 st = bbIn->stmts[i];
4170 tl_assert(Ist_IMark == st->tag);
4171 cia = st->Ist.IMark.addr;
4172 st = NULL;
4173
sewardjb4112022007-11-09 22:49:28 +00004174 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004175 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004176 tl_assert(st);
4177 tl_assert(isFlatIRStmt(st));
4178 switch (st->tag) {
4179 case Ist_NoOp:
4180 case Ist_AbiHint:
4181 case Ist_Put:
4182 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004183 case Ist_Exit:
4184 /* None of these can contain any memory references. */
4185 break;
4186
sewardj1c0ce7a2009-07-01 08:10:49 +00004187 case Ist_IMark:
4188 /* no mem refs, but note the insn address. */
4189 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004190 /* Don't instrument the dynamic linker. It generates a
4191 lot of races which we just expensively suppress, so
4192 it's pointless.
4193
4194 Avoid flooding is_in_dynamic_linker_shared_object with
4195 requests by only checking at transitions between 4K
4196 pages. */
4197 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4198 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4199 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4200 inLDSO = is_in_dynamic_linker_shared_object(cia);
4201 } else {
4202 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4203 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004204 break;
4205
sewardjb4112022007-11-09 22:49:28 +00004206 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004207 switch (st->Ist.MBE.event) {
4208 case Imbe_Fence:
4209 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004210 default:
4211 goto unhandled;
4212 }
sewardjb4112022007-11-09 22:49:28 +00004213 break;
4214
sewardj1c0ce7a2009-07-01 08:10:49 +00004215 case Ist_CAS: {
4216 /* Atomic read-modify-write cycle. Just pretend it's a
4217 read. */
4218 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004219 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4220 if (isDCAS) {
4221 tl_assert(cas->expdHi);
4222 tl_assert(cas->dataHi);
4223 } else {
4224 tl_assert(!cas->expdHi);
4225 tl_assert(!cas->dataHi);
4226 }
4227 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004228 if (!inLDSO) {
4229 instrument_mem_access(
4230 bbOut,
4231 cas->addr,
4232 (isDCAS ? 2 : 1)
4233 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4234 False/*!isStore*/,
4235 sizeofIRType(hWordTy)
4236 );
4237 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004238 break;
4239 }
4240
sewardjdb5907d2009-11-26 17:20:21 +00004241 case Ist_LLSC: {
4242 /* We pretend store-conditionals don't exist, viz, ignore
4243 them. Whereas load-linked's are treated the same as
4244 normal loads. */
4245 IRType dataTy;
4246 if (st->Ist.LLSC.storedata == NULL) {
4247 /* LL */
4248 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004249 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004250 instrument_mem_access(
4251 bbOut,
4252 st->Ist.LLSC.addr,
4253 sizeofIRType(dataTy),
4254 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004255 sizeofIRType(hWordTy)
4256 );
4257 }
sewardjdb5907d2009-11-26 17:20:21 +00004258 } else {
4259 /* SC */
4260 /*ignore */
4261 }
4262 break;
4263 }
4264
4265 case Ist_Store:
4266 /* It seems we pretend that store-conditionals don't
4267 exist, viz, just ignore them ... */
4268 if (!inLDSO) {
4269 instrument_mem_access(
4270 bbOut,
4271 st->Ist.Store.addr,
4272 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4273 True/*isStore*/,
4274 sizeofIRType(hWordTy)
4275 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004276 }
njnb83caf22009-05-25 01:47:56 +00004277 break;
sewardjb4112022007-11-09 22:49:28 +00004278
4279 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004280 /* ... whereas here we don't care whether a load is a
4281 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004282 IRExpr* data = st->Ist.WrTmp.data;
4283 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004284 if (!inLDSO) {
4285 instrument_mem_access(
4286 bbOut,
4287 data->Iex.Load.addr,
4288 sizeofIRType(data->Iex.Load.ty),
4289 False/*!isStore*/,
4290 sizeofIRType(hWordTy)
4291 );
4292 }
sewardjb4112022007-11-09 22:49:28 +00004293 }
4294 break;
4295 }
4296
4297 case Ist_Dirty: {
4298 Int dataSize;
4299 IRDirty* d = st->Ist.Dirty.details;
4300 if (d->mFx != Ifx_None) {
4301 /* This dirty helper accesses memory. Collect the
4302 details. */
4303 tl_assert(d->mAddr != NULL);
4304 tl_assert(d->mSize != 0);
4305 dataSize = d->mSize;
4306 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004307 if (!inLDSO) {
4308 instrument_mem_access(
4309 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4310 sizeofIRType(hWordTy)
4311 );
4312 }
sewardjb4112022007-11-09 22:49:28 +00004313 }
4314 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004315 if (!inLDSO) {
4316 instrument_mem_access(
4317 bbOut, d->mAddr, dataSize, True/*isStore*/,
4318 sizeofIRType(hWordTy)
4319 );
4320 }
sewardjb4112022007-11-09 22:49:28 +00004321 }
4322 } else {
4323 tl_assert(d->mAddr == NULL);
4324 tl_assert(d->mSize == 0);
4325 }
4326 break;
4327 }
4328
4329 default:
sewardjf98e1c02008-10-25 16:22:41 +00004330 unhandled:
4331 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004332 tl_assert(0);
4333
4334 } /* switch (st->tag) */
4335
4336 addStmtToIRSB( bbOut, st );
4337 } /* iterate over bbIn->stmts */
4338
4339 return bbOut;
4340}
4341
4342
4343/*----------------------------------------------------------------*/
4344/*--- Client requests ---*/
4345/*----------------------------------------------------------------*/
4346
4347/* Sheesh. Yet another goddam finite map. */
4348static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4349
4350static void map_pthread_t_to_Thread_INIT ( void ) {
4351 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004352 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4353 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004354 tl_assert(map_pthread_t_to_Thread != NULL);
4355 }
4356}
4357
4358
4359static
4360Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4361{
4362 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4363 return False;
4364
4365 /* Anything that gets past the above check is one of ours, so we
4366 should be able to handle it. */
4367
4368 /* default, meaningless return value, unless otherwise set */
4369 *ret = 0;
4370
4371 switch (args[0]) {
4372
4373 /* --- --- User-visible client requests --- --- */
4374
4375 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004376 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004377 args[1], args[2]);
4378 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004379 are any held locks etc in the area. Calling evh__die_mem
4380 and then evh__new_mem is a bit inefficient; probably just
4381 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004382 if (args[2] > 0) { /* length */
4383 evh__die_mem(args[1], args[2]);
4384 /* and then set it to New */
4385 evh__new_mem(args[1], args[2]);
4386 }
4387 break;
4388
sewardjc8028ad2010-05-05 09:34:42 +00004389 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4390 Addr payload = 0;
4391 SizeT pszB = 0;
4392 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4393 args[1]);
4394 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4395 if (pszB > 0) {
4396 evh__die_mem(payload, pszB);
4397 evh__new_mem(payload, pszB);
4398 }
4399 *ret = pszB;
4400 } else {
4401 *ret = (UWord)-1;
4402 }
4403 break;
4404 }
4405
sewardj406bac82010-03-03 23:03:40 +00004406 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4407 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4408 args[1], args[2]);
4409 if (args[2] > 0) { /* length */
4410 evh__untrack_mem(args[1], args[2]);
4411 }
4412 break;
4413
4414 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4415 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4416 args[1], args[2]);
4417 if (args[2] > 0) { /* length */
4418 evh__new_mem(args[1], args[2]);
4419 }
4420 break;
4421
sewardjb4112022007-11-09 22:49:28 +00004422 /* --- --- Client requests for Helgrind's use only --- --- */
4423
4424 /* Some thread is telling us its pthread_t value. Record the
4425 binding between that and the associated Thread*, so we can
4426 later find the Thread* again when notified of a join by the
4427 thread. */
4428 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4429 Thread* my_thr = NULL;
4430 if (0)
4431 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4432 (void*)args[1]);
4433 map_pthread_t_to_Thread_INIT();
4434 my_thr = map_threads_maybe_lookup( tid );
4435 /* This assertion should hold because the map_threads (tid to
4436 Thread*) binding should have been made at the point of
4437 low-level creation of this thread, which should have
4438 happened prior to us getting this client request for it.
4439 That's because this client request is sent from
4440 client-world from the 'thread_wrapper' function, which
4441 only runs once the thread has been low-level created. */
4442 tl_assert(my_thr != NULL);
4443 /* So now we know that (pthread_t)args[1] is associated with
4444 (Thread*)my_thr. Note that down. */
4445 if (0)
4446 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4447 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004448 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004449 break;
4450 }
4451
4452 case _VG_USERREQ__HG_PTH_API_ERROR: {
4453 Thread* my_thr = NULL;
4454 map_pthread_t_to_Thread_INIT();
4455 my_thr = map_threads_maybe_lookup( tid );
4456 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004457 HG_(record_error_PthAPIerror)(
4458 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004459 break;
4460 }
4461
4462 /* This thread (tid) has completed a join with the quitting
4463 thread whose pthread_t is in args[1]. */
4464 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4465 Thread* thr_q = NULL; /* quitter Thread* */
4466 Bool found = False;
4467 if (0)
4468 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4469 (void*)args[1]);
4470 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004471 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004472 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004473 /* Can this fail? It would mean that our pthread_join
4474 wrapper observed a successful join on args[1] yet that
4475 thread never existed (or at least, it never lodged an
4476 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4477 sounds like a bug in the threads library. */
4478 // FIXME: get rid of this assertion; handle properly
4479 tl_assert(found);
4480 if (found) {
4481 if (0)
4482 VG_(printf)(".................... quitter Thread* = %p\n",
4483 thr_q);
4484 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4485 }
4486 break;
4487 }
4488
4489 /* EXPOSITION only: by intercepting lock init events we can show
4490 the user where the lock was initialised, rather than only
4491 being able to show where it was first locked. Intercepting
4492 lock initialisations is not necessary for the basic operation
4493 of the race checker. */
4494 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4495 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4496 break;
4497
4498 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4499 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4500 break;
4501
4502 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4503 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4504 break;
4505
4506 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4507 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4508 break;
4509
4510 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4511 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4512 break;
4513
4514 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4515 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4516 break;
4517
4518 /* This thread is about to do pthread_cond_signal on the
4519 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4520 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4521 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4522 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4523 break;
4524
4525 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4526 Returns a flag indicating whether or not the mutex is believed to be
4527 valid for this operation. */
4528 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4529 Bool mutex_is_valid
4530 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4531 (void*)args[2] );
4532 *ret = mutex_is_valid ? 1 : 0;
4533 break;
4534 }
4535
sewardjf98e1c02008-10-25 16:22:41 +00004536 /* cond=arg[1] */
4537 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4538 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4539 break;
4540
sewardjb4112022007-11-09 22:49:28 +00004541 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4542 mutex=arg[2] */
4543 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4544 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4545 (void*)args[1], (void*)args[2] );
4546 break;
4547
4548 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4549 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4550 break;
4551
4552 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4553 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4554 break;
4555
sewardj789c3c52008-02-25 12:10:07 +00004556 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004557 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004558 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4559 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004560 break;
4561
4562 /* rwlock=arg[1], isW=arg[2] */
4563 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4564 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4565 break;
4566
4567 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4568 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4569 break;
4570
4571 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4572 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4573 break;
4574
sewardj11e352f2007-11-30 11:11:02 +00004575 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4576 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004577 break;
4578
sewardj11e352f2007-11-30 11:11:02 +00004579 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4580 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004581 break;
4582
sewardj11e352f2007-11-30 11:11:02 +00004583 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4584 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4585 break;
4586
4587 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4588 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004589 break;
4590
sewardj9f569b72008-11-13 13:33:09 +00004591 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004592 /* pth_bar_t*, ulong count, ulong resizable */
4593 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4594 args[2], args[3] );
4595 break;
4596
4597 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4598 /* pth_bar_t*, ulong newcount */
4599 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4600 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004601 break;
4602
4603 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4604 /* pth_bar_t* */
4605 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4606 break;
4607
4608 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4609 /* pth_bar_t* */
4610 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4611 break;
sewardjb4112022007-11-09 22:49:28 +00004612
sewardj5a644da2009-08-11 10:35:58 +00004613 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4614 /* pth_spinlock_t* */
4615 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4616 break;
4617
4618 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4619 /* pth_spinlock_t* */
4620 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4621 break;
4622
4623 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4624 /* pth_spinlock_t*, Word */
4625 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4626 break;
4627
4628 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4629 /* pth_spinlock_t* */
4630 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4631 break;
4632
4633 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4634 /* pth_spinlock_t* */
4635 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4636 break;
4637
sewardjed2e72e2009-08-14 11:08:24 +00004638 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4639 /* char* who */
4640 HChar* who = (HChar*)args[1];
4641 HChar buf[50 + 50];
4642 Thread* thr = map_threads_maybe_lookup( tid );
4643 tl_assert( thr ); /* I must be mapped */
4644 tl_assert( who );
4645 tl_assert( VG_(strlen)(who) <= 50 );
4646 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4647 /* record_error_Misc strdup's buf, so this is safe: */
4648 HG_(record_error_Misc)( thr, buf );
4649 break;
4650 }
4651
4652 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4653 /* UWord arbitrary-SO-tag */
4654 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4655 break;
4656
4657 case _VG_USERREQ__HG_USERSO_RECV_POST:
4658 /* UWord arbitrary-SO-tag */
4659 evh__HG_USERSO_RECV_POST( tid, args[1] );
4660 break;
4661
sewardjb4112022007-11-09 22:49:28 +00004662 default:
4663 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004664 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4665 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004666 }
4667
4668 return True;
4669}
4670
4671
4672/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004673/*--- Setup ---*/
4674/*----------------------------------------------------------------*/
4675
4676static Bool hg_process_cmd_line_option ( Char* arg )
4677{
njn83df0b62009-02-25 01:01:05 +00004678 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004679
njn83df0b62009-02-25 01:01:05 +00004680 if VG_BOOL_CLO(arg, "--track-lockorders",
4681 HG_(clo_track_lockorders)) {}
4682 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4683 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004684
4685 else if VG_XACT_CLO(arg, "--history-level=none",
4686 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004687 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004688 HG_(clo_history_level), 1);
4689 else if VG_XACT_CLO(arg, "--history-level=full",
4690 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004691
sewardjf585e482009-08-16 22:52:29 +00004692 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004693 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004694 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004695 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004696
sewardj11e352f2007-11-30 11:11:02 +00004697 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004698 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004699 Int j;
sewardjb4112022007-11-09 22:49:28 +00004700
njn83df0b62009-02-25 01:01:05 +00004701 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004702 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004703 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004704 return False;
4705 }
sewardj11e352f2007-11-30 11:11:02 +00004706 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004707 if ('0' == tmp_str[j]) { /* do nothing */ }
4708 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004709 else {
sewardj11e352f2007-11-30 11:11:02 +00004710 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004711 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004712 return False;
4713 }
4714 }
sewardjf98e1c02008-10-25 16:22:41 +00004715 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004716 }
4717
4718 else
4719 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4720
4721 return True;
4722}
4723
4724static void hg_print_usage ( void )
4725{
4726 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004727" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004728" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004729" full: show both stack traces for a data race (can be very slow)\n"
4730" approx: full trace for one thread, approx for the other (faster)\n"
4731" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004732" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004733 );
sewardjb4112022007-11-09 22:49:28 +00004734}
4735
4736static void hg_print_debug_usage ( void )
4737{
sewardjb4112022007-11-09 22:49:28 +00004738 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4739 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004740 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004741 " at events (X = 0|1) [000000]\n");
4742 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004743 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004744 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004745 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4746 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004747 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004748 VG_(printf)(" 000010 at lock/unlock events\n");
4749 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004750}
4751
sewardjb4112022007-11-09 22:49:28 +00004752static void hg_fini ( Int exitcode )
4753{
sewardj2d9e8742009-08-07 15:46:56 +00004754 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4755 VG_(message)(Vg_UserMsg,
4756 "For counts of detected and suppressed errors, "
4757 "rerun with: -v\n");
4758 }
4759
4760 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4761 && HG_(clo_history_level) >= 2) {
4762 VG_(umsg)(
4763 "Use --history-level=approx or =none to gain increased speed, at\n" );
4764 VG_(umsg)(
4765 "the cost of reduced accuracy of conflicting-access information\n");
4766 }
4767
sewardjb4112022007-11-09 22:49:28 +00004768 if (SHOW_DATA_STRUCTURES)
4769 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004770 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004771 all__sanity_check("SK_(fini)");
4772
sewardj2d9e8742009-08-07 15:46:56 +00004773 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004774
4775 if (1) {
4776 VG_(printf)("\n");
4777 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4778 VG_(printf)("\n");
4779 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004780 if (HG_(clo_track_lockorders)) {
4781 VG_(printf)("\n");
4782 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4783 }
sewardjb4112022007-11-09 22:49:28 +00004784 }
4785
sewardjf98e1c02008-10-25 16:22:41 +00004786 //zz VG_(printf)("\n");
4787 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4788 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4789 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4790 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4791 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4792 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4793 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4794 //zz stats__hbefore_stk_hwm);
4795 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4796 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004797
4798 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004799 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004800 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004801 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004802 (Int)HG_(cardinalityWSU)( univ_tsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004803 if (HG_(clo_track_lockorders)) {
4804 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4805 (Int)HG_(cardinalityWSU)( univ_laog ));
4806 }
sewardjb4112022007-11-09 22:49:28 +00004807
sewardjd52392d2008-11-08 20:36:26 +00004808 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4809 // stats__ga_LL_adds,
4810 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004811
sewardjf98e1c02008-10-25 16:22:41 +00004812 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4813 HG_(stats__LockN_to_P_queries),
4814 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004815
sewardjf98e1c02008-10-25 16:22:41 +00004816 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4817 HG_(stats__string_table_queries),
4818 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004819 if (HG_(clo_track_lockorders)) {
4820 VG_(printf)(" LAOG: %'8d map size\n",
4821 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4822 VG_(printf)(" LAOG exposition: %'8d map size\n",
4823 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4824 }
4825
barta0b6b2c2008-07-07 06:49:24 +00004826 VG_(printf)(" locks: %'8lu acquires, "
4827 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004828 stats__lockN_acquires,
4829 stats__lockN_releases
4830 );
barta0b6b2c2008-07-07 06:49:24 +00004831 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004832
4833 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004834 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004835 }
4836}
4837
sewardjf98e1c02008-10-25 16:22:41 +00004838/* FIXME: move these somewhere sane */
4839
4840static
4841void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4842{
4843 Thread* thr;
4844 ThreadId tid;
4845 UWord nActual;
4846 tl_assert(hbt);
4847 thr = libhb_get_Thr_opaque( hbt );
4848 tl_assert(thr);
4849 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4850 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4851 NULL, NULL, 0 );
4852 tl_assert(nActual <= nRequest);
4853 for (; nActual < nRequest; nActual++)
4854 frames[nActual] = 0;
4855}
4856
4857static
sewardj23f12002009-07-24 08:45:08 +00004858ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004859{
4860 Thread* thr;
4861 ThreadId tid;
4862 ExeContext* ec;
4863 tl_assert(hbt);
4864 thr = libhb_get_Thr_opaque( hbt );
4865 tl_assert(thr);
4866 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004867 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004868 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004869 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004870}
4871
4872
sewardjc1fb9d22011-02-28 09:03:44 +00004873static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004874{
sewardjf98e1c02008-10-25 16:22:41 +00004875 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004876
sewardjc1fb9d22011-02-28 09:03:44 +00004877 /////////////////////////////////////////////
4878 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4879 for_libhb__get_EC );
4880 /////////////////////////////////////////////
4881
4882
4883 if (HG_(clo_track_lockorders))
4884 laog__init();
4885
4886 initialise_data_structures(hbthr_root);
4887}
4888
4889static void hg_pre_clo_init ( void )
4890{
sewardjb4112022007-11-09 22:49:28 +00004891 VG_(details_name) ("Helgrind");
4892 VG_(details_version) (NULL);
4893 VG_(details_description) ("a thread error detector");
4894 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004895 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004896 VG_(details_bug_reports_to) (VG_BUGS_TO);
4897 VG_(details_avg_translation_sizeB) ( 200 );
4898
4899 VG_(basic_tool_funcs) (hg_post_clo_init,
4900 hg_instrument,
4901 hg_fini);
4902
4903 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004904 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004905 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004906 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004907 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004908 HG_(update_extra),
4909 HG_(recognised_suppression),
4910 HG_(read_extra_suppression_info),
4911 HG_(error_matches_suppression),
4912 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004913 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004914
sewardj24118492009-07-15 14:50:02 +00004915 VG_(needs_xml_output) ();
4916
sewardjb4112022007-11-09 22:49:28 +00004917 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4918 hg_print_usage,
4919 hg_print_debug_usage);
4920 VG_(needs_client_requests) (hg_handle_client_request);
4921
4922 // FIXME?
4923 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4924 // hg_expensive_sanity_check);
4925
4926 VG_(needs_malloc_replacement) (hg_cli__malloc,
4927 hg_cli____builtin_new,
4928 hg_cli____builtin_vec_new,
4929 hg_cli__memalign,
4930 hg_cli__calloc,
4931 hg_cli__free,
4932 hg_cli____builtin_delete,
4933 hg_cli____builtin_vec_delete,
4934 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004935 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004936 HG_CLI__MALLOC_REDZONE_SZB );
4937
sewardj849b0ed2008-12-21 10:43:10 +00004938 /* 21 Dec 08: disabled this; it mostly causes H to start more
4939 slowly and use significantly more memory, without very often
4940 providing useful results. The user can request to load this
4941 information manually with --read-var-info=yes. */
4942 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004943
4944 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004945 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4946 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004947 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004948 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004949
4950 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004951 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004952
4953 VG_(track_change_mem_mprotect) ( evh__set_perms );
4954
4955 VG_(track_die_mem_stack_signal)( evh__die_mem );
4956 VG_(track_die_mem_brk) ( evh__die_mem );
4957 VG_(track_die_mem_munmap) ( evh__die_mem );
4958 VG_(track_die_mem_stack) ( evh__die_mem );
4959
4960 // FIXME: what is this for?
4961 VG_(track_ban_mem_stack) (NULL);
4962
4963 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4964 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4965 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4966 VG_(track_post_mem_write) (NULL);
4967
4968 /////////////////
4969
4970 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4971 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4972
4973 VG_(track_start_client_code)( evh__start_client_code );
4974 VG_(track_stop_client_code)( evh__stop_client_code );
4975
sewardjb4112022007-11-09 22:49:28 +00004976 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4977 as described in comments at the top of pub_tool_hashtable.h, are
4978 met. Blargh. */
4979 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4980 tl_assert( sizeof(UWord) == sizeof(Addr) );
4981 hg_mallocmeta_table
4982 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4983
sewardj61bc2c52011-02-09 10:34:00 +00004984 // add a callback to clean up on (threaded) fork.
4985 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004986}
4987
4988VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4989
4990/*--------------------------------------------------------------------*/
4991/*--- end hg_main.c ---*/
4992/*--------------------------------------------------------------------*/