blob: f22f39ac57218a643a967d6c296f91420d62f263 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
sewardjefd3b4d2007-12-02 02:05:23 +000084// FIXME catch sync signals (SEGV, basically) and unlock BHL,
85// if held. Otherwise a LOCK-prefixed insn which segfaults
86// gets Helgrind into a total muddle as the BHL will not be
87// released after the insn.
88
sewardjb4112022007-11-09 22:49:28 +000089// FIXME what is supposed to happen to locks in memory which
90// is relocated as a result of client realloc?
91
sewardjb4112022007-11-09 22:49:28 +000092// FIXME put referencing ThreadId into Thread and get
93// rid of the slow reverse mapping function.
94
95// FIXME accesses to NoAccess areas: change state to Excl?
96
97// FIXME report errors for accesses of NoAccess memory?
98
99// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
100// the thread still holds the lock.
101
102/* ------------ Debug/trace options ------------ */
103
104// this is:
105// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
106// happens_before_wrk: 1000
107// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
108#define SHOW_EXPENSIVE_STUFF 0
109
110// 0 for silent, 1 for some stuff, 2 for lots of stuff
111#define SHOW_EVENTS 0
112
sewardjb4112022007-11-09 22:49:28 +0000113
114static void all__sanity_check ( Char* who ); /* fwds */
115
116#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
117
118// 0 for none, 1 for dump at end of run
119#define SHOW_DATA_STRUCTURES 0
120
121
sewardjb4112022007-11-09 22:49:28 +0000122/* ------------ Misc comments ------------ */
123
124// FIXME: don't hardwire initial entries for root thread.
125// Instead, let the pre_thread_ll_create handler do this.
126
sewardjb4112022007-11-09 22:49:28 +0000127
128/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000129/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000130/*----------------------------------------------------------------*/
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Admin linked list of Threads */
133static Thread* admin_threads = NULL;
134
135/* Admin linked list of Locks */
136static Lock* admin_locks = NULL;
137
sewardjb4112022007-11-09 22:49:28 +0000138/* Mapping table for core ThreadIds to Thread* */
139static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
140
sewardjb4112022007-11-09 22:49:28 +0000141/* Mapping table for lock guest addresses to Lock* */
142static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
143
144/* The word-set universes for thread sets and lock sets. */
145static WordSetU* univ_tsets = NULL; /* sets of Thread* */
146static WordSetU* univ_lsets = NULL; /* sets of Lock* */
147static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
148
149/* never changed; we only care about its address. Is treated as if it
150 was a standard userspace lock. Also we have a Lock* describing it
151 so it can participate in lock sets in the usual way. */
152static Int __bus_lock = 0;
153static Lock* __bus_lock_Lock = NULL;
154
155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardjf98e1c02008-10-25 16:22:41 +0000163static
164ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000165
166/* --------- Constructors --------- */
167
sewardjf98e1c02008-10-25 16:22:41 +0000168static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000169 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000170 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->locksetA = HG_(emptyWS)( univ_lsets );
172 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000174 thread->hbthr = hbthr;
175 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000176 thread->created_at = NULL;
177 thread->announced = False;
178 thread->errmsg_index = indx++;
179 thread->admin = admin_threads;
180 admin_threads = thread;
181 return thread;
182}
sewardjf98e1c02008-10-25 16:22:41 +0000183
sewardjb4112022007-11-09 22:49:28 +0000184// Make a new lock which is unlocked (hence ownerless)
185static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
186 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000187 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000188 lock->admin = admin_locks;
189 lock->unique = unique++;
190 lock->magic = LockN_MAGIC;
191 lock->appeared_at = NULL;
192 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000193 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000194 lock->guestaddr = guestaddr;
195 lock->kind = kind;
196 lock->heldW = False;
197 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000198 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000199 admin_locks = lock;
200 return lock;
201}
sewardjb4112022007-11-09 22:49:28 +0000202
203/* Release storage for a Lock. Also release storage in .heldBy, if
204 any. */
205static void del_LockN ( Lock* lk )
206{
sewardjf98e1c02008-10-25 16:22:41 +0000207 tl_assert(HG_(is_sane_LockN)(lk));
208 tl_assert(lk->hbso);
209 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000210 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000211 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000212 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000213 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000214}
215
216/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
217 it. This is done strictly: only combinations resulting from
218 correct program and libpthread behaviour are allowed. */
219static void lockN_acquire_writer ( Lock* lk, Thread* thr )
220{
sewardjf98e1c02008-10-25 16:22:41 +0000221 tl_assert(HG_(is_sane_LockN)(lk));
222 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000223
224 stats__lockN_acquires++;
225
226 /* EXPOSITION only */
227 /* We need to keep recording snapshots of where the lock was
228 acquired, so as to produce better lock-order error messages. */
229 if (lk->acquired_at == NULL) {
230 ThreadId tid;
231 tl_assert(lk->heldBy == NULL);
232 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
233 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000234 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000235 } else {
236 tl_assert(lk->heldBy != NULL);
237 }
238 /* end EXPOSITION only */
239
240 switch (lk->kind) {
241 case LK_nonRec:
242 case_LK_nonRec:
243 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
244 tl_assert(!lk->heldW);
245 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000246 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000247 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000248 break;
249 case LK_mbRec:
250 if (lk->heldBy == NULL)
251 goto case_LK_nonRec;
252 /* 2nd and subsequent locking of a lock by its owner */
253 tl_assert(lk->heldW);
254 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000255 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000256 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000257 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
258 == VG_(sizeTotalBag)(lk->heldBy));
259 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000260 break;
261 case LK_rdwr:
262 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
263 goto case_LK_nonRec;
264 default:
265 tl_assert(0);
266 }
sewardjf98e1c02008-10-25 16:22:41 +0000267 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000268}
269
270static void lockN_acquire_reader ( Lock* lk, Thread* thr )
271{
sewardjf98e1c02008-10-25 16:22:41 +0000272 tl_assert(HG_(is_sane_LockN)(lk));
273 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000274 /* can only add reader to a reader-writer lock. */
275 tl_assert(lk->kind == LK_rdwr);
276 /* lk must be free or already r-held. */
277 tl_assert(lk->heldBy == NULL
278 || (lk->heldBy != NULL && !lk->heldW));
279
280 stats__lockN_acquires++;
281
282 /* EXPOSITION only */
283 /* We need to keep recording snapshots of where the lock was
284 acquired, so as to produce better lock-order error messages. */
285 if (lk->acquired_at == NULL) {
286 ThreadId tid;
287 tl_assert(lk->heldBy == NULL);
288 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
289 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000290 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000291 } else {
292 tl_assert(lk->heldBy != NULL);
293 }
294 /* end EXPOSITION only */
295
296 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000297 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000298 } else {
299 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000300 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000301 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000302 }
303 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000304 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000305}
306
307/* Update 'lk' to reflect a release of it by 'thr'. This is done
308 strictly: only combinations resulting from correct program and
309 libpthread behaviour are allowed. */
310
311static void lockN_release ( Lock* lk, Thread* thr )
312{
313 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000314 tl_assert(HG_(is_sane_LockN)(lk));
315 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000316 /* lock must be held by someone */
317 tl_assert(lk->heldBy);
318 stats__lockN_releases++;
319 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000320 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000321 /* thr must actually have been a holder of lk */
322 tl_assert(b);
323 /* normalise */
324 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000325 if (VG_(isEmptyBag)(lk->heldBy)) {
326 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000327 lk->heldBy = NULL;
328 lk->heldW = False;
329 lk->acquired_at = NULL;
330 }
sewardjf98e1c02008-10-25 16:22:41 +0000331 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000332}
333
334static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
335{
336 Thread* thr;
337 if (!lk->heldBy) {
338 tl_assert(!lk->heldW);
339 return;
340 }
341 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000342 VG_(initIterBag)( lk->heldBy );
343 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000344 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetA, (Word)lk ));
347 thr->locksetA
348 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
349
350 if (lk->heldW) {
351 tl_assert(HG_(elemWS)( univ_lsets,
352 thr->locksetW, (Word)lk ));
353 thr->locksetW
354 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
355 }
356 }
sewardj896f6f92008-08-19 08:38:52 +0000357 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000358}
359
sewardjb4112022007-11-09 22:49:28 +0000360
361/*----------------------------------------------------------------*/
362/*--- Print out the primary data structures ---*/
363/*----------------------------------------------------------------*/
364
sewardjd52392d2008-11-08 20:36:26 +0000365//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000366
367#define PP_THREADS (1<<1)
368#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000369#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000370
371
372static const Int sHOW_ADMIN = 0;
373
374static void space ( Int n )
375{
376 Int i;
377 Char spaces[128+1];
378 tl_assert(n >= 0 && n < 128);
379 if (n == 0)
380 return;
381 for (i = 0; i < n; i++)
382 spaces[i] = ' ';
383 spaces[i] = 0;
384 tl_assert(i < 128+1);
385 VG_(printf)("%s", spaces);
386}
387
388static void pp_Thread ( Int d, Thread* t )
389{
390 space(d+0); VG_(printf)("Thread %p {\n", t);
391 if (sHOW_ADMIN) {
392 space(d+3); VG_(printf)("admin %p\n", t->admin);
393 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
394 }
395 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
396 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000397 space(d+0); VG_(printf)("}\n");
398}
399
400static void pp_admin_threads ( Int d )
401{
402 Int i, n;
403 Thread* t;
404 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
405 /* nothing */
406 }
407 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
408 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
409 if (0) {
410 space(n);
411 VG_(printf)("admin_threads record %d of %d:\n", i, n);
412 }
413 pp_Thread(d+3, t);
414 }
barta0b6b2c2008-07-07 06:49:24 +0000415 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000416}
417
418static void pp_map_threads ( Int d )
419{
njn4c245e52009-03-15 23:25:38 +0000420 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000421 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000422 for (i = 0; i < VG_N_THREADS; i++) {
423 if (map_threads[i] != NULL)
424 n++;
425 }
426 VG_(printf)("(%d entries) {\n", n);
427 for (i = 0; i < VG_N_THREADS; i++) {
428 if (map_threads[i] == NULL)
429 continue;
430 space(d+3);
431 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
432 }
433 space(d); VG_(printf)("}\n");
434}
435
436static const HChar* show_LockKind ( LockKind lkk ) {
437 switch (lkk) {
438 case LK_mbRec: return "mbRec";
439 case LK_nonRec: return "nonRec";
440 case LK_rdwr: return "rdwr";
441 default: tl_assert(0);
442 }
443}
444
445static void pp_Lock ( Int d, Lock* lk )
446{
barta0b6b2c2008-07-07 06:49:24 +0000447 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000448 if (sHOW_ADMIN) {
449 space(d+3); VG_(printf)("admin %p\n", lk->admin);
450 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
451 }
452 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
453 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
454 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
455 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
456 if (lk->heldBy) {
457 Thread* thr;
458 Word count;
459 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000460 VG_(initIterBag)( lk->heldBy );
461 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000462 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000463 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000464 VG_(printf)("}");
465 }
466 VG_(printf)("\n");
467 space(d+0); VG_(printf)("}\n");
468}
469
470static void pp_admin_locks ( Int d )
471{
472 Int i, n;
473 Lock* lk;
474 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
475 /* nothing */
476 }
477 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
478 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
479 if (0) {
480 space(n);
481 VG_(printf)("admin_locks record %d of %d:\n", i, n);
482 }
483 pp_Lock(d+3, lk);
484 }
barta0b6b2c2008-07-07 06:49:24 +0000485 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000486}
487
488static void pp_map_locks ( Int d )
489{
490 void* gla;
491 Lock* lk;
492 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000493 (Int)VG_(sizeFM)( map_locks ));
494 VG_(initIterFM)( map_locks );
495 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000496 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000497 space(d+3);
498 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
499 }
sewardj896f6f92008-08-19 08:38:52 +0000500 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000501 space(d); VG_(printf)("}\n");
502}
503
sewardjb4112022007-11-09 22:49:28 +0000504static void pp_everything ( Int flags, Char* caller )
505{
506 Int d = 0;
507 VG_(printf)("\n");
508 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
509 if (flags & PP_THREADS) {
510 VG_(printf)("\n");
511 pp_admin_threads(d+3);
512 VG_(printf)("\n");
513 pp_map_threads(d+3);
514 }
515 if (flags & PP_LOCKS) {
516 VG_(printf)("\n");
517 pp_admin_locks(d+3);
518 VG_(printf)("\n");
519 pp_map_locks(d+3);
520 }
sewardjb4112022007-11-09 22:49:28 +0000521
522 VG_(printf)("\n");
523 VG_(printf)("}\n");
524 VG_(printf)("\n");
525}
526
527#undef SHOW_ADMIN
528
529
530/*----------------------------------------------------------------*/
531/*--- Initialise the primary data structures ---*/
532/*----------------------------------------------------------------*/
533
sewardjf98e1c02008-10-25 16:22:41 +0000534static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000535{
sewardjb4112022007-11-09 22:49:28 +0000536 Thread* thr;
537
538 /* Get everything initialised and zeroed. */
539 tl_assert(admin_threads == NULL);
540 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000541
542 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000543
544 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000545 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(map_threads != NULL);
547
sewardjb4112022007-11-09 22:49:28 +0000548 tl_assert(sizeof(Addr) == sizeof(Word));
549 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000550 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
551 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000552 tl_assert(map_locks != NULL);
553
554 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000555 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000556 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000557
558 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000559 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
560 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000561 tl_assert(univ_tsets != NULL);
562
563 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000564 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
565 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000566 tl_assert(univ_lsets != NULL);
567
568 tl_assert(univ_laog == NULL);
sewardjc1fb9d22011-02-28 09:03:44 +0000569 if (HG_(clo_track_lockorders)) {
570 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
571 HG_(free), 24/*cacheSize*/ );
572 tl_assert(univ_laog != NULL);
573 }
sewardjb4112022007-11-09 22:49:28 +0000574
575 /* Set up entries for the root thread */
576 // FIXME: this assumes that the first real ThreadId is 1
577
sewardjb4112022007-11-09 22:49:28 +0000578 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000579 thr = mk_Thread(hbthr_root);
580 thr->coretid = 1; /* FIXME: hardwires an assumption about the
581 identity of the root thread. */
582 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
583 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000584
sewardjf98e1c02008-10-25 16:22:41 +0000585 /* and bind it in the thread-map table. */
586 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
587 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000588
sewardjf98e1c02008-10-25 16:22:41 +0000589 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000590
591 tl_assert(VG_INVALID_THREADID == 0);
592
593 /* Mark the new bus lock correctly (to stop the sanity checks
594 complaining) */
595 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000596
597 all__sanity_check("initialise_data_structures");
598}
599
600
601/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000602/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000603/*----------------------------------------------------------------*/
604
605/* Doesn't assert if the relevant map_threads entry is NULL. */
606static Thread* map_threads_maybe_lookup ( ThreadId coretid )
607{
608 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000609 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000610 thr = map_threads[coretid];
611 return thr;
612}
613
614/* Asserts if the relevant map_threads entry is NULL. */
615static inline Thread* map_threads_lookup ( ThreadId coretid )
616{
617 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000618 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000619 thr = map_threads[coretid];
620 tl_assert(thr);
621 return thr;
622}
623
sewardjf98e1c02008-10-25 16:22:41 +0000624/* Do a reverse lookup. Does not assert if 'thr' is not found in
625 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000626static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
627{
sewardjf98e1c02008-10-25 16:22:41 +0000628 ThreadId tid;
629 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000630 /* Check nobody used the invalid-threadid slot */
631 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
632 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000633 tid = thr->coretid;
634 tl_assert(HG_(is_sane_ThreadId)(tid));
635 return tid;
sewardjb4112022007-11-09 22:49:28 +0000636}
637
638/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
639 is not found in map_threads. */
640static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
641{
642 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
643 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000644 tl_assert(map_threads[tid]);
645 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000646 return tid;
647}
648
649static void map_threads_delete ( ThreadId coretid )
650{
651 Thread* thr;
652 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000653 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000654 thr = map_threads[coretid];
655 tl_assert(thr);
656 map_threads[coretid] = NULL;
657}
658
659
660/*----------------------------------------------------------------*/
661/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
662/*----------------------------------------------------------------*/
663
664/* Make sure there is a lock table entry for the given (lock) guest
665 address. If not, create one of the stated 'kind' in unheld state.
666 In any case, return the address of the existing or new Lock. */
667static
668Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
669{
670 Bool found;
671 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000672 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000673 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000674 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000675 if (!found) {
676 Lock* lock = mk_LockN(lkk, ga);
677 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000678 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000679 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000680 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000681 return lock;
682 } else {
683 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000684 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000685 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000686 return oldlock;
687 }
688}
689
690static Lock* map_locks_maybe_lookup ( Addr ga )
691{
692 Bool found;
693 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000694 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000695 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000696 return lk;
697}
698
699static void map_locks_delete ( Addr ga )
700{
701 Addr ga2 = 0;
702 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000703 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000704 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000705 /* delFromFM produces the val which is being deleted, if it is
706 found. So assert it is non-null; that in effect asserts that we
707 are deleting a (ga, Lock) pair which actually exists. */
708 tl_assert(lk != NULL);
709 tl_assert(ga2 == ga);
710}
711
712
sewardjb4112022007-11-09 22:49:28 +0000713
714/*----------------------------------------------------------------*/
715/*--- Sanity checking the data structures ---*/
716/*----------------------------------------------------------------*/
717
718static UWord stats__sanity_checks = 0;
719
sewardjb4112022007-11-09 22:49:28 +0000720static void laog__sanity_check ( Char* who ); /* fwds */
721
722/* REQUIRED INVARIANTS:
723
724 Thread vs Segment/Lock/SecMaps
725
726 for each t in Threads {
727
728 // Thread.lockset: each element is really a valid Lock
729
730 // Thread.lockset: each Lock in set is actually held by that thread
731 for lk in Thread.lockset
732 lk == LockedBy(t)
733
734 // Thread.csegid is a valid SegmentID
735 // and the associated Segment has .thr == t
736
737 }
738
739 all thread Locksets are pairwise empty under intersection
740 (that is, no lock is claimed to be held by more than one thread)
741 -- this is guaranteed if all locks in locksets point back to their
742 owner threads
743
744 Lock vs Thread/Segment/SecMaps
745
746 for each entry (gla, la) in map_locks
747 gla == la->guest_addr
748
749 for each lk in Locks {
750
751 lk->tag is valid
752 lk->guest_addr does not have shadow state NoAccess
753 if lk == LockedBy(t), then t->lockset contains lk
754 if lk == UnlockedBy(segid) then segid is valid SegmentID
755 and can be mapped to a valid Segment(seg)
756 and seg->thr->lockset does not contain lk
757 if lk == UnlockedNew then (no lockset contains lk)
758
759 secmaps for lk has .mbHasLocks == True
760
761 }
762
763 Segment vs Thread/Lock/SecMaps
764
765 the Segment graph is a dag (no cycles)
766 all of the Segment graph must be reachable from the segids
767 mentioned in the Threads
768
769 for seg in Segments {
770
771 seg->thr is a sane Thread
772
773 }
774
775 SecMaps vs Segment/Thread/Lock
776
777 for sm in SecMaps {
778
779 sm properly aligned
780 if any shadow word is ShR or ShM then .mbHasShared == True
781
782 for each Excl(segid) state
783 map_segments_lookup maps to a sane Segment(seg)
784 for each ShM/ShR(tsetid,lsetid) state
785 each lk in lset is a valid Lock
786 each thr in tset is a valid thread, which is non-dead
787
788 }
789*/
790
791
792/* Return True iff 'thr' holds 'lk' in some mode. */
793static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
794{
795 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000796 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000797 else
798 return False;
799}
800
801/* Sanity check Threads, as far as possible */
802__attribute__((noinline))
803static void threads__sanity_check ( Char* who )
804{
805#define BAD(_str) do { how = (_str); goto bad; } while (0)
806 Char* how = "no error";
807 Thread* thr;
808 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000809 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000810 Word ls_size, i;
811 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000812 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000813 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000814 wsA = thr->locksetA;
815 wsW = thr->locksetW;
816 // locks held in W mode are a subset of all locks held
817 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
818 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
819 for (i = 0; i < ls_size; i++) {
820 lk = (Lock*)ls_words[i];
821 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000822 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000823 // Thread.lockset: each Lock in set is actually held by that
824 // thread
825 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000826 }
827 }
828 return;
829 bad:
830 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
831 tl_assert(0);
832#undef BAD
833}
834
835
836/* Sanity check Locks, as far as possible */
837__attribute__((noinline))
838static void locks__sanity_check ( Char* who )
839{
840#define BAD(_str) do { how = (_str); goto bad; } while (0)
841 Char* how = "no error";
842 Addr gla;
843 Lock* lk;
844 Int i;
845 // # entries in admin_locks == # entries in map_locks
846 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
847 ;
sewardj896f6f92008-08-19 08:38:52 +0000848 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000849 // for each entry (gla, lk) in map_locks
850 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000851 VG_(initIterFM)( map_locks );
852 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000853 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000854 if (lk->guestaddr != gla) BAD("2");
855 }
sewardj896f6f92008-08-19 08:38:52 +0000856 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000857 // scan through admin_locks ...
858 for (lk = admin_locks; lk; lk = lk->admin) {
859 // lock is sane. Quite comprehensive, also checks that
860 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000861 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000862 // map_locks binds guest address back to this lock
863 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000864 // look at all threads mentioned as holders of this lock. Ensure
865 // this lock is mentioned in their locksets.
866 if (lk->heldBy) {
867 Thread* thr;
868 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000869 VG_(initIterBag)( lk->heldBy );
870 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000871 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000872 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000873 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000874 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000875 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
876 BAD("6");
877 // also check the w-only lockset
878 if (lk->heldW
879 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
880 BAD("7");
881 if ((!lk->heldW)
882 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
883 BAD("8");
884 }
sewardj896f6f92008-08-19 08:38:52 +0000885 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000886 } else {
887 /* lock not held by anybody */
888 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
889 // since lk is unheld, then (no lockset contains lk)
890 // hmm, this is really too expensive to check. Hmm.
891 }
sewardjb4112022007-11-09 22:49:28 +0000892 }
893
894 return;
895 bad:
896 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
897 tl_assert(0);
898#undef BAD
899}
900
901
sewardjb4112022007-11-09 22:49:28 +0000902static void all_except_Locks__sanity_check ( Char* who ) {
903 stats__sanity_checks++;
904 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
905 threads__sanity_check(who);
sewardjc1fb9d22011-02-28 09:03:44 +0000906 if (HG_(clo_track_lockorders))
907 laog__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000908}
909static void all__sanity_check ( Char* who ) {
910 all_except_Locks__sanity_check(who);
911 locks__sanity_check(who);
912}
913
914
915/*----------------------------------------------------------------*/
916/*--- the core memory state machine (msm__* functions) ---*/
917/*----------------------------------------------------------------*/
918
sewardjd52392d2008-11-08 20:36:26 +0000919//static WordSetID add_BHL ( WordSetID lockset ) {
920// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
921//}
922//static WordSetID del_BHL ( WordSetID lockset ) {
923// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
924//}
sewardjb4112022007-11-09 22:49:28 +0000925
926
sewardjd52392d2008-11-08 20:36:26 +0000927///* Last-lock-lossage records. This mechanism exists to help explain
928// to programmers why we are complaining about a race. The idea is to
929// monitor all lockset transitions. When a previously nonempty
930// lockset becomes empty, the lock(s) that just disappeared (the
931// "lossage") are the locks that have consistently protected the
932// location (ga_of_access) in question for the longest time. Most of
933// the time the lossage-set is a single lock. Because the
934// lossage-lock is the one that has survived longest, there is there
935// is a good chance that it is indeed the lock that the programmer
936// intended to use to protect the location.
937//
938// Note that we cannot in general just look at the lossage set when we
939// see a transition to ShM(...,empty-set), because a transition to an
940// empty lockset can happen arbitrarily far before the point where we
941// want to report an error. This is in the case where there are many
942// transitions ShR -> ShR, all with an empty lockset, and only later
943// is there a transition to ShM. So what we want to do is note the
944// lossage lock at the point where a ShR -> ShR transition empties out
945// the lockset, so we can present it later if there should be a
946// transition to ShM.
947//
948// So this function finds such transitions. For each, it associates
949// in ga_to_lastlock, the guest address and the lossage lock. In fact
950// we do not record the Lock* directly as that may disappear later,
951// but instead the ExeContext inside the Lock which says where it was
952// initialised or first locked. ExeContexts are permanent so keeping
953// them indefinitely is safe.
954//
955// A boring detail: the hardware bus lock is not interesting in this
956// respect, so we first remove that from the pre/post locksets.
957//*/
958//
959//static UWord stats__ga_LL_adds = 0;
960//
961//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
962//
963//static
964//void record_last_lock_lossage ( Addr ga_of_access,
965// WordSetID lset_old, WordSetID lset_new )
966//{
967// Lock* lk;
968// Int card_old, card_new;
969//
970// tl_assert(lset_old != lset_new);
971//
972// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
973// (Int)lset_old,
974// HG_(cardinalityWS)(univ_lsets,lset_old),
975// (Int)lset_new,
976// HG_(cardinalityWS)(univ_lsets,lset_new),
977// ga_of_access );
978//
979// /* This is slow, but at least it's simple. The bus hardware lock
980// just confuses the logic, so remove it from the locksets we're
981// considering before doing anything else. */
982// lset_new = del_BHL( lset_new );
983//
984// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
985// /* The post-transition lock set is not empty. So we are not
986// interested. We're only interested in spotting transitions
987// that make locksets become empty. */
988// return;
989// }
990//
991// /* lset_new is now empty */
992// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
993// tl_assert(card_new == 0);
994//
995// lset_old = del_BHL( lset_old );
996// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
997//
998// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
999// (Int)lset_old, card_old, (Int)lset_new, card_new );
1000//
1001// if (card_old == 0) {
1002// /* The old lockset was also empty. Not interesting. */
1003// return;
1004// }
1005//
1006// tl_assert(card_old > 0);
1007// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1008//
1009// /* Now we know we've got a transition from a nonempty lockset to an
1010// empty one. So lset_old must be the set of locks lost. Record
1011// some details. If there is more than one element in the lossage
1012// set, just choose one arbitrarily -- not the best, but at least
1013// it's simple. */
1014//
1015// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1016// if (0) VG_(printf)("lossage %ld %p\n",
1017// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1018// if (lk->appeared_at) {
1019// if (ga_to_lastlock == NULL)
1020// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1021// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1022// stats__ga_LL_adds++;
1023// }
1024//}
1025//
1026///* This queries the table (ga_to_lastlock) made by
1027// record_last_lock_lossage, when constructing error messages. It
1028// attempts to find the ExeContext of the allocation or initialisation
1029// point for the lossage lock associated with 'ga'. */
1030//
1031//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1032//{
1033// ExeContext* ec_hint = NULL;
1034// if (ga_to_lastlock != NULL
1035// && VG_(lookupFM)(ga_to_lastlock,
1036// NULL, (Word*)&ec_hint, ga)) {
1037// tl_assert(ec_hint != NULL);
1038// return ec_hint;
1039// } else {
1040// return NULL;
1041// }
1042//}
sewardjb4112022007-11-09 22:49:28 +00001043
1044
sewardjb4112022007-11-09 22:49:28 +00001045/*----------------------------------------------------------------*/
1046/*--- Shadow value and address range handlers ---*/
1047/*----------------------------------------------------------------*/
1048
1049static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001050//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001051static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001052__attribute__((noinline))
1053static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001054
sewardjb4112022007-11-09 22:49:28 +00001055
1056/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001057/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1058 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1059static void shadow_mem_scopy_range ( Thread* thr,
1060 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001061{
1062 Thr* hbthr = thr->hbthr;
1063 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001064 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001065}
1066
sewardj23f12002009-07-24 08:45:08 +00001067static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1068{
sewardjf98e1c02008-10-25 16:22:41 +00001069 Thr* hbthr = thr->hbthr;
1070 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001071 LIBHB_CREAD_N(hbthr, a, len);
1072}
1073
1074static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1075 Thr* hbthr = thr->hbthr;
1076 tl_assert(hbthr);
1077 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
1080static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1081{
sewardj23f12002009-07-24 08:45:08 +00001082 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001083}
1084
sewardjb4112022007-11-09 22:49:28 +00001085static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1086{
sewardjb4112022007-11-09 22:49:28 +00001087 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001088 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001089 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001090}
1091
sewardj406bac82010-03-03 23:03:40 +00001092static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1093{
1094 if (0 && len > 500)
1095 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1096 libhb_srange_untrack( thr->hbthr, aIN, len );
1097}
1098
sewardjb4112022007-11-09 22:49:28 +00001099
1100/*----------------------------------------------------------------*/
1101/*--- Event handlers (evh__* functions) ---*/
1102/*--- plus helpers (evhH__* functions) ---*/
1103/*----------------------------------------------------------------*/
1104
1105/*--------- Event handler helpers (evhH__* functions) ---------*/
1106
1107/* Create a new segment for 'thr', making it depend (.prev) on its
1108 existing segment, bind together the SegmentID and Segment, and
1109 return both of them. Also update 'thr' so it references the new
1110 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001111//zz static
1112//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1113//zz /*OUT*/Segment** new_segP,
1114//zz Thread* thr )
1115//zz {
1116//zz Segment* cur_seg;
1117//zz tl_assert(new_segP);
1118//zz tl_assert(new_segidP);
1119//zz tl_assert(HG_(is_sane_Thread)(thr));
1120//zz cur_seg = map_segments_lookup( thr->csegid );
1121//zz tl_assert(cur_seg);
1122//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1123//zz at their owner thread. */
1124//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1125//zz *new_segidP = alloc_SegmentID();
1126//zz map_segments_add( *new_segidP, *new_segP );
1127//zz thr->csegid = *new_segidP;
1128//zz }
sewardjb4112022007-11-09 22:49:28 +00001129
1130
1131/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1132 updates, and also do all possible error checks. */
1133static
1134void evhH__post_thread_w_acquires_lock ( Thread* thr,
1135 LockKind lkk, Addr lock_ga )
1136{
1137 Lock* lk;
1138
1139 /* Basically what we need to do is call lockN_acquire_writer.
1140 However, that will barf if any 'invalid' lock states would
1141 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001142 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001143 routine.
1144
1145 Because this routine is only called after successful lock
1146 acquisition, we should not be asked to move the lock into any
1147 invalid states. Requests to do so are bugs in libpthread, since
1148 that should have rejected any such requests. */
1149
sewardjf98e1c02008-10-25 16:22:41 +00001150 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001151 /* Try to find the lock. If we can't, then create a new one with
1152 kind 'lkk'. */
1153 lk = map_locks_lookup_or_create(
1154 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001155 tl_assert( HG_(is_sane_LockN)(lk) );
1156
1157 /* check libhb level entities exist */
1158 tl_assert(thr->hbthr);
1159 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001160
1161 if (lk->heldBy == NULL) {
1162 /* the lock isn't held. Simple. */
1163 tl_assert(!lk->heldW);
1164 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001165 /* acquire a dependency from the lock's VCs */
1166 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001167 goto noerror;
1168 }
1169
1170 /* So the lock is already held. If held as a r-lock then
1171 libpthread must be buggy. */
1172 tl_assert(lk->heldBy);
1173 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001174 HG_(record_error_Misc)(
1175 thr, "Bug in libpthread: write lock "
1176 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001177 goto error;
1178 }
1179
1180 /* So the lock is held in w-mode. If it's held by some other
1181 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001182 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001183
sewardj896f6f92008-08-19 08:38:52 +00001184 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001185 HG_(record_error_Misc)(
1186 thr, "Bug in libpthread: write lock "
1187 "granted on mutex/rwlock which is currently "
1188 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001189 goto error;
1190 }
1191
1192 /* So the lock is already held in w-mode by 'thr'. That means this
1193 is an attempt to lock it recursively, which is only allowable
1194 for LK_mbRec kinded locks. Since this routine is called only
1195 once the lock has been acquired, this must also be a libpthread
1196 bug. */
1197 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001198 HG_(record_error_Misc)(
1199 thr, "Bug in libpthread: recursive write lock "
1200 "granted on mutex/wrlock which does not "
1201 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001202 goto error;
1203 }
1204
1205 /* So we are recursively re-locking a lock we already w-hold. */
1206 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001207 /* acquire a dependency from the lock's VC. Probably pointless,
1208 but also harmless. */
1209 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001210 goto noerror;
1211
1212 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001213 if (HG_(clo_track_lockorders)) {
1214 /* check lock order acquisition graph, and update. This has to
1215 happen before the lock is added to the thread's locksetA/W. */
1216 laog__pre_thread_acquires_lock( thr, lk );
1217 }
sewardjb4112022007-11-09 22:49:28 +00001218 /* update the thread's held-locks set */
1219 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1220 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1221 /* fall through */
1222
1223 error:
sewardjf98e1c02008-10-25 16:22:41 +00001224 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001225}
1226
1227
1228/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1229 updates, and also do all possible error checks. */
1230static
1231void evhH__post_thread_r_acquires_lock ( Thread* thr,
1232 LockKind lkk, Addr lock_ga )
1233{
1234 Lock* lk;
1235
1236 /* Basically what we need to do is call lockN_acquire_reader.
1237 However, that will barf if any 'invalid' lock states would
1238 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001239 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001240 routine.
1241
1242 Because this routine is only called after successful lock
1243 acquisition, we should not be asked to move the lock into any
1244 invalid states. Requests to do so are bugs in libpthread, since
1245 that should have rejected any such requests. */
1246
sewardjf98e1c02008-10-25 16:22:41 +00001247 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001248 /* Try to find the lock. If we can't, then create a new one with
1249 kind 'lkk'. Only a reader-writer lock can be read-locked,
1250 hence the first assertion. */
1251 tl_assert(lkk == LK_rdwr);
1252 lk = map_locks_lookup_or_create(
1253 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001254 tl_assert( HG_(is_sane_LockN)(lk) );
1255
1256 /* check libhb level entities exist */
1257 tl_assert(thr->hbthr);
1258 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001259
1260 if (lk->heldBy == NULL) {
1261 /* the lock isn't held. Simple. */
1262 tl_assert(!lk->heldW);
1263 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001264 /* acquire a dependency from the lock's VC */
1265 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001266 goto noerror;
1267 }
1268
1269 /* So the lock is already held. If held as a w-lock then
1270 libpthread must be buggy. */
1271 tl_assert(lk->heldBy);
1272 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001273 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1274 "granted on rwlock which is "
1275 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001276 goto error;
1277 }
1278
1279 /* Easy enough. In short anybody can get a read-lock on a rwlock
1280 provided it is either unlocked or already in rd-held. */
1281 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001282 /* acquire a dependency from the lock's VC. Probably pointless,
1283 but also harmless. */
1284 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001285 goto noerror;
1286
1287 noerror:
sewardjc1fb9d22011-02-28 09:03:44 +00001288 if (HG_(clo_track_lockorders)) {
1289 /* check lock order acquisition graph, and update. This has to
1290 happen before the lock is added to the thread's locksetA/W. */
1291 laog__pre_thread_acquires_lock( thr, lk );
1292 }
sewardjb4112022007-11-09 22:49:28 +00001293 /* update the thread's held-locks set */
1294 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1295 /* but don't update thr->locksetW, since lk is only rd-held */
1296 /* fall through */
1297
1298 error:
sewardjf98e1c02008-10-25 16:22:41 +00001299 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001300}
1301
1302
1303/* The lock at 'lock_ga' is just about to be unlocked. Make all
1304 necessary updates, and also do all possible error checks. */
1305static
1306void evhH__pre_thread_releases_lock ( Thread* thr,
1307 Addr lock_ga, Bool isRDWR )
1308{
1309 Lock* lock;
1310 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001311 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001312
1313 /* This routine is called prior to a lock release, before
1314 libpthread has had a chance to validate the call. Hence we need
1315 to detect and reject any attempts to move the lock into an
1316 invalid state. Such attempts are bugs in the client.
1317
1318 isRDWR is True if we know from the wrapper context that lock_ga
1319 should refer to a reader-writer lock, and is False if [ditto]
1320 lock_ga should refer to a standard mutex. */
1321
sewardjf98e1c02008-10-25 16:22:41 +00001322 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001323 lock = map_locks_maybe_lookup( lock_ga );
1324
1325 if (!lock) {
1326 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1327 the client is trying to unlock it. So complain, then ignore
1328 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001329 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001330 return;
1331 }
1332
1333 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001334 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001335
1336 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001337 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1338 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001339 }
1340 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001341 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1342 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001343 }
1344
1345 if (!lock->heldBy) {
1346 /* The lock is not held. This indicates a serious bug in the
1347 client. */
1348 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001349 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001350 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1351 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1352 goto error;
1353 }
1354
sewardjf98e1c02008-10-25 16:22:41 +00001355 /* test just above dominates */
1356 tl_assert(lock->heldBy);
1357 was_heldW = lock->heldW;
1358
sewardjb4112022007-11-09 22:49:28 +00001359 /* The lock is held. Is this thread one of the holders? If not,
1360 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001361 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001362 tl_assert(n >= 0);
1363 if (n == 0) {
1364 /* We are not a current holder of the lock. This is a bug in
1365 the guest, and (per POSIX pthread rules) the unlock
1366 attempt will fail. So just complain and do nothing
1367 else. */
sewardj896f6f92008-08-19 08:38:52 +00001368 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001369 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001370 tl_assert(realOwner != thr);
1371 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1372 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001373 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001374 goto error;
1375 }
1376
1377 /* Ok, we hold the lock 'n' times. */
1378 tl_assert(n >= 1);
1379
1380 lockN_release( lock, thr );
1381
1382 n--;
1383 tl_assert(n >= 0);
1384
1385 if (n > 0) {
1386 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001387 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001388 /* We still hold the lock. So either it's a recursive lock
1389 or a rwlock which is currently r-held. */
1390 tl_assert(lock->kind == LK_mbRec
1391 || (lock->kind == LK_rdwr && !lock->heldW));
1392 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1393 if (lock->heldW)
1394 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1395 else
1396 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1397 } else {
sewardj983f3022009-05-21 14:49:55 +00001398 /* n is zero. This means we don't hold the lock any more. But
1399 if it's a rwlock held in r-mode, someone else could still
1400 hold it. Just do whatever sanity checks we can. */
1401 if (lock->kind == LK_rdwr && lock->heldBy) {
1402 /* It's a rwlock. We no longer hold it but we used to;
1403 nevertheless it still appears to be held by someone else.
1404 The implication is that, prior to this release, it must
1405 have been shared by us and and whoever else is holding it;
1406 which in turn implies it must be r-held, since a lock
1407 can't be w-held by more than one thread. */
1408 /* The lock is now R-held by somebody else: */
1409 tl_assert(lock->heldW == False);
1410 } else {
1411 /* Normal case. It's either not a rwlock, or it's a rwlock
1412 that we used to hold in w-mode (which is pretty much the
1413 same thing as a non-rwlock.) Since this transaction is
1414 atomic (V does not allow multiple threads to run
1415 simultaneously), it must mean the lock is now not held by
1416 anybody. Hence assert for it. */
1417 /* The lock is now not held by anybody: */
1418 tl_assert(!lock->heldBy);
1419 tl_assert(lock->heldW == False);
1420 }
sewardjf98e1c02008-10-25 16:22:41 +00001421 //if (lock->heldBy) {
1422 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1423 //}
sewardjb4112022007-11-09 22:49:28 +00001424 /* update this thread's lockset accordingly. */
1425 thr->locksetA
1426 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1427 thr->locksetW
1428 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001429 /* push our VC into the lock */
1430 tl_assert(thr->hbthr);
1431 tl_assert(lock->hbso);
1432 /* If the lock was previously W-held, then we want to do a
1433 strong send, and if previously R-held, then a weak send. */
1434 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001435 }
1436 /* fall through */
1437
1438 error:
sewardjf98e1c02008-10-25 16:22:41 +00001439 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001440}
1441
1442
sewardj9f569b72008-11-13 13:33:09 +00001443/* ---------------------------------------------------------- */
1444/* -------- Event handlers proper (evh__* functions) -------- */
1445/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001446
1447/* What is the Thread* for the currently running thread? This is
1448 absolutely performance critical. We receive notifications from the
1449 core for client code starts/stops, and cache the looked-up result
1450 in 'current_Thread'. Hence, for the vast majority of requests,
1451 finding the current thread reduces to a read of a global variable,
1452 provided get_current_Thread_in_C_C is inlined.
1453
1454 Outside of client code, current_Thread is NULL, and presumably
1455 any uses of it will cause a segfault. Hence:
1456
1457 - for uses definitely within client code, use
1458 get_current_Thread_in_C_C.
1459
1460 - for all other uses, use get_current_Thread.
1461*/
1462
sewardj23f12002009-07-24 08:45:08 +00001463static Thread *current_Thread = NULL,
1464 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001465
1466static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1467 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1468 tl_assert(current_Thread == NULL);
1469 current_Thread = map_threads_lookup( tid );
1470 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001471 if (current_Thread != current_Thread_prev) {
1472 libhb_Thr_resumes( current_Thread->hbthr );
1473 current_Thread_prev = current_Thread;
1474 }
sewardjb4112022007-11-09 22:49:28 +00001475}
1476static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1477 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1478 tl_assert(current_Thread != NULL);
1479 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001480 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001481}
1482static inline Thread* get_current_Thread_in_C_C ( void ) {
1483 return current_Thread;
1484}
1485static inline Thread* get_current_Thread ( void ) {
1486 ThreadId coretid;
1487 Thread* thr;
1488 thr = get_current_Thread_in_C_C();
1489 if (LIKELY(thr))
1490 return thr;
1491 /* evidently not in client code. Do it the slow way. */
1492 coretid = VG_(get_running_tid)();
1493 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001494 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001495 of initial memory layout) and VG_(get_running_tid)() returns
1496 VG_INVALID_THREADID at that point. */
1497 if (coretid == VG_INVALID_THREADID)
1498 coretid = 1; /* KLUDGE */
1499 thr = map_threads_lookup( coretid );
1500 return thr;
1501}
1502
1503static
1504void evh__new_mem ( Addr a, SizeT len ) {
1505 if (SHOW_EVENTS >= 2)
1506 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1507 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001508 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001509 all__sanity_check("evh__new_mem-post");
1510}
1511
1512static
sewardj1f77fec2010-04-12 19:51:04 +00001513void evh__new_mem_stack ( Addr a, SizeT len ) {
1514 if (SHOW_EVENTS >= 2)
1515 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1516 shadow_mem_make_New( get_current_Thread(),
1517 -VG_STACK_REDZONE_SZB + a, len );
1518 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1519 all__sanity_check("evh__new_mem_stack-post");
1520}
1521
1522static
sewardj7cf4e6b2008-05-01 20:24:26 +00001523void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1524 if (SHOW_EVENTS >= 2)
1525 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1526 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001527 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001528 all__sanity_check("evh__new_mem_w_tid-post");
1529}
1530
1531static
sewardjb4112022007-11-09 22:49:28 +00001532void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001533 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001534 if (SHOW_EVENTS >= 1)
1535 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1536 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1537 if (rr || ww || xx)
1538 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001539 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001540 all__sanity_check("evh__new_mem_w_perms-post");
1541}
1542
1543static
1544void evh__set_perms ( Addr a, SizeT len,
1545 Bool rr, Bool ww, Bool xx ) {
1546 if (SHOW_EVENTS >= 1)
1547 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1548 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1549 /* Hmm. What should we do here, that actually makes any sense?
1550 Let's say: if neither readable nor writable, then declare it
1551 NoAccess, else leave it alone. */
1552 if (!(rr || ww))
1553 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001554 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001555 all__sanity_check("evh__set_perms-post");
1556}
1557
1558static
1559void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001560 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001561 if (SHOW_EVENTS >= 2)
1562 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1563 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001564 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001565 all__sanity_check("evh__die_mem-post");
1566}
1567
1568static
sewardj406bac82010-03-03 23:03:40 +00001569void evh__untrack_mem ( Addr a, SizeT len ) {
1570 // whereas it doesn't ignore this
1571 if (SHOW_EVENTS >= 2)
1572 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1573 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1574 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1575 all__sanity_check("evh__untrack_mem-post");
1576}
1577
1578static
sewardj23f12002009-07-24 08:45:08 +00001579void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1580 if (SHOW_EVENTS >= 2)
1581 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1582 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1583 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1584 all__sanity_check("evh__copy_mem-post");
1585}
1586
1587static
sewardjb4112022007-11-09 22:49:28 +00001588void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1589{
1590 if (SHOW_EVENTS >= 1)
1591 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1592 (Int)parent, (Int)child );
1593
1594 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001595 Thread* thr_p;
1596 Thread* thr_c;
1597 Thr* hbthr_p;
1598 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001599
sewardjf98e1c02008-10-25 16:22:41 +00001600 tl_assert(HG_(is_sane_ThreadId)(parent));
1601 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001602 tl_assert(parent != child);
1603
1604 thr_p = map_threads_maybe_lookup( parent );
1605 thr_c = map_threads_maybe_lookup( child );
1606
1607 tl_assert(thr_p != NULL);
1608 tl_assert(thr_c == NULL);
1609
sewardjf98e1c02008-10-25 16:22:41 +00001610 hbthr_p = thr_p->hbthr;
1611 tl_assert(hbthr_p != NULL);
1612 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001613
sewardjf98e1c02008-10-25 16:22:41 +00001614 hbthr_c = libhb_create ( hbthr_p );
1615
1616 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001617 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001618 thr_c = mk_Thread( hbthr_c );
1619 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1620 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001621
1622 /* and bind it in the thread-map table */
1623 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001624 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1625 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001626
1627 /* Record where the parent is so we can later refer to this in
1628 error messages.
1629
1630 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1631 The stack snapshot is taken immediately after the parent has
1632 returned from its sys_clone call. Unfortunately there is no
1633 unwind info for the insn following "syscall" - reading the
1634 glibc sources confirms this. So we ask for a snapshot to be
1635 taken as if RIP was 3 bytes earlier, in a place where there
1636 is unwind info. Sigh.
1637 */
1638 { Word first_ip_delta = 0;
1639# if defined(VGP_amd64_linux)
1640 first_ip_delta = -3;
1641# endif
1642 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1643 }
sewardjb4112022007-11-09 22:49:28 +00001644 }
1645
sewardjf98e1c02008-10-25 16:22:41 +00001646 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001647 all__sanity_check("evh__pre_thread_create-post");
1648}
1649
1650static
1651void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1652{
1653 Int nHeld;
1654 Thread* thr_q;
1655 if (SHOW_EVENTS >= 1)
1656 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1657 (Int)quit_tid );
1658
1659 /* quit_tid has disappeared without joining to any other thread.
1660 Therefore there is no synchronisation event associated with its
1661 exit and so we have to pretty much treat it as if it was still
1662 alive but mysteriously making no progress. That is because, if
1663 we don't know when it really exited, then we can never say there
1664 is a point in time when we're sure the thread really has
1665 finished, and so we need to consider the possibility that it
1666 lingers indefinitely and continues to interact with other
1667 threads. */
1668 /* However, it might have rendezvous'd with a thread that called
1669 pthread_join with this one as arg, prior to this point (that's
1670 how NPTL works). In which case there has already been a prior
1671 sync event. So in any case, just let the thread exit. On NPTL,
1672 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001673 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001674 thr_q = map_threads_maybe_lookup( quit_tid );
1675 tl_assert(thr_q != NULL);
1676
1677 /* Complain if this thread holds any locks. */
1678 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1679 tl_assert(nHeld >= 0);
1680 if (nHeld > 0) {
1681 HChar buf[80];
1682 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1683 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001684 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001685 }
1686
sewardj23f12002009-07-24 08:45:08 +00001687 /* Not much to do here:
1688 - tell libhb the thread is gone
1689 - clear the map_threads entry, in order that the Valgrind core
1690 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001691 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1692 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001693 tl_assert(thr_q->hbthr);
1694 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001695 tl_assert(thr_q->coretid == quit_tid);
1696 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001697 map_threads_delete( quit_tid );
1698
sewardjf98e1c02008-10-25 16:22:41 +00001699 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001700 all__sanity_check("evh__pre_thread_ll_exit-post");
1701}
1702
sewardj61bc2c52011-02-09 10:34:00 +00001703/* This is called immediately after fork, for the child only. 'tid'
1704 is the only surviving thread (as per POSIX rules on fork() in
1705 threaded programs), so we have to clean up map_threads to remove
1706 entries for any other threads. */
1707static
1708void evh__atfork_child ( ThreadId tid )
1709{
1710 UInt i;
1711 Thread* thr;
1712 /* Slot 0 should never be used. */
1713 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1714 tl_assert(!thr);
1715 /* Clean up all other slots except 'tid'. */
1716 for (i = 1; i < VG_N_THREADS; i++) {
1717 if (i == tid)
1718 continue;
1719 thr = map_threads_maybe_lookup(i);
1720 if (!thr)
1721 continue;
1722 /* Cleanup actions (next 5 lines) copied from end of
1723 evh__pre_thread_ll_exit; keep in sync. */
1724 tl_assert(thr->hbthr);
1725 libhb_async_exit(thr->hbthr);
1726 tl_assert(thr->coretid == i);
1727 thr->coretid = VG_INVALID_THREADID;
1728 map_threads_delete(i);
1729 }
1730}
1731
sewardjf98e1c02008-10-25 16:22:41 +00001732
sewardjb4112022007-11-09 22:49:28 +00001733static
1734void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1735{
sewardjb4112022007-11-09 22:49:28 +00001736 Thread* thr_s;
1737 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001738 Thr* hbthr_s;
1739 Thr* hbthr_q;
1740 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001741
1742 if (SHOW_EVENTS >= 1)
1743 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1744 (Int)stay_tid, quit_thr );
1745
sewardjf98e1c02008-10-25 16:22:41 +00001746 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001747
1748 thr_s = map_threads_maybe_lookup( stay_tid );
1749 thr_q = quit_thr;
1750 tl_assert(thr_s != NULL);
1751 tl_assert(thr_q != NULL);
1752 tl_assert(thr_s != thr_q);
1753
sewardjf98e1c02008-10-25 16:22:41 +00001754 hbthr_s = thr_s->hbthr;
1755 hbthr_q = thr_q->hbthr;
1756 tl_assert(hbthr_s != hbthr_q);
1757 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1758 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001759
sewardjf98e1c02008-10-25 16:22:41 +00001760 /* Allocate a temporary synchronisation object and use it to send
1761 an imaginary message from the quitter to the stayer, the purpose
1762 being to generate a dependence from the quitter to the
1763 stayer. */
1764 so = libhb_so_alloc();
1765 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001766 /* Send last arg of _so_send as False, since the sending thread
1767 doesn't actually exist any more, so we don't want _so_send to
1768 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001769 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1770 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1771 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001772
sewardjf98e1c02008-10-25 16:22:41 +00001773 /* evh__pre_thread_ll_exit issues an error message if the exiting
1774 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001775
1776 /* This holds because, at least when using NPTL as the thread
1777 library, we should be notified the low level thread exit before
1778 we hear of any join event on it. The low level exit
1779 notification feeds through into evh__pre_thread_ll_exit,
1780 which should clear the map_threads entry for it. Hence we
1781 expect there to be no map_threads entry at this point. */
1782 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1783 == VG_INVALID_THREADID);
1784
sewardjf98e1c02008-10-25 16:22:41 +00001785 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001786 all__sanity_check("evh__post_thread_join-post");
1787}
1788
1789static
1790void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1791 Addr a, SizeT size) {
1792 if (SHOW_EVENTS >= 2
1793 || (SHOW_EVENTS >= 1 && size != 1))
1794 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1795 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001796 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001797 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001798 all__sanity_check("evh__pre_mem_read-post");
1799}
1800
1801static
1802void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1803 Char* s, Addr a ) {
1804 Int len;
1805 if (SHOW_EVENTS >= 1)
1806 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1807 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001808 // Don't segfault if the string starts in an obviously stupid
1809 // place. Actually we should check the whole string, not just
1810 // the start address, but that's too much trouble. At least
1811 // checking the first byte is better than nothing. See #255009.
1812 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1813 return;
sewardjb4112022007-11-09 22:49:28 +00001814 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001815 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001816 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001817 all__sanity_check("evh__pre_mem_read_asciiz-post");
1818}
1819
1820static
1821void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1822 Addr a, SizeT size ) {
1823 if (SHOW_EVENTS >= 1)
1824 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1825 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001826 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001827 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001828 all__sanity_check("evh__pre_mem_write-post");
1829}
1830
1831static
1832void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1833 if (SHOW_EVENTS >= 1)
1834 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1835 (void*)a, len, (Int)is_inited );
1836 // FIXME: this is kinda stupid
1837 if (is_inited) {
1838 shadow_mem_make_New(get_current_Thread(), a, len);
1839 } else {
1840 shadow_mem_make_New(get_current_Thread(), a, len);
1841 }
sewardjf98e1c02008-10-25 16:22:41 +00001842 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001843 all__sanity_check("evh__pre_mem_read-post");
1844}
1845
1846static
1847void evh__die_mem_heap ( Addr a, SizeT len ) {
1848 if (SHOW_EVENTS >= 1)
1849 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1850 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001851 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001852 all__sanity_check("evh__pre_mem_read-post");
1853}
1854
sewardj23f12002009-07-24 08:45:08 +00001855/* --- Event handlers called from generated code --- */
1856
sewardjb4112022007-11-09 22:49:28 +00001857static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001858void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001859 Thread* thr = get_current_Thread_in_C_C();
1860 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001861 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001862}
sewardjf98e1c02008-10-25 16:22:41 +00001863
sewardjb4112022007-11-09 22:49:28 +00001864static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001865void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001866 Thread* thr = get_current_Thread_in_C_C();
1867 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001868 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001869}
sewardjf98e1c02008-10-25 16:22:41 +00001870
sewardjb4112022007-11-09 22:49:28 +00001871static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001872void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001873 Thread* thr = get_current_Thread_in_C_C();
1874 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001875 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001876}
sewardjf98e1c02008-10-25 16:22:41 +00001877
sewardjb4112022007-11-09 22:49:28 +00001878static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001879void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001880 Thread* thr = get_current_Thread_in_C_C();
1881 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001882 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001883}
sewardjf98e1c02008-10-25 16:22:41 +00001884
sewardjb4112022007-11-09 22:49:28 +00001885static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001886void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001887 Thread* thr = get_current_Thread_in_C_C();
1888 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001889 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001890}
1891
1892static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001893void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001894 Thread* thr = get_current_Thread_in_C_C();
1895 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001896 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001897}
sewardjf98e1c02008-10-25 16:22:41 +00001898
sewardjb4112022007-11-09 22:49:28 +00001899static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001900void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001901 Thread* thr = get_current_Thread_in_C_C();
1902 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001903 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001904}
sewardjf98e1c02008-10-25 16:22:41 +00001905
sewardjb4112022007-11-09 22:49:28 +00001906static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001907void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001908 Thread* thr = get_current_Thread_in_C_C();
1909 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001910 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001911}
sewardjf98e1c02008-10-25 16:22:41 +00001912
sewardjb4112022007-11-09 22:49:28 +00001913static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001914void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001915 Thread* thr = get_current_Thread_in_C_C();
1916 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001917 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001918}
sewardjf98e1c02008-10-25 16:22:41 +00001919
sewardjb4112022007-11-09 22:49:28 +00001920static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001921void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001922 Thread* thr = get_current_Thread_in_C_C();
1923 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001924 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001925}
1926
sewardjb4112022007-11-09 22:49:28 +00001927
sewardj9f569b72008-11-13 13:33:09 +00001928/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001929/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001930/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001931
1932/* EXPOSITION only: by intercepting lock init events we can show the
1933 user where the lock was initialised, rather than only being able to
1934 show where it was first locked. Intercepting lock initialisations
1935 is not necessary for the basic operation of the race checker. */
1936static
1937void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1938 void* mutex, Word mbRec )
1939{
1940 if (SHOW_EVENTS >= 1)
1941 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1942 (Int)tid, mbRec, (void*)mutex );
1943 tl_assert(mbRec == 0 || mbRec == 1);
1944 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1945 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001946 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001947 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1948}
1949
1950static
1951void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1952{
1953 Thread* thr;
1954 Lock* lk;
1955 if (SHOW_EVENTS >= 1)
1956 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1957 (Int)tid, (void*)mutex );
1958
1959 thr = map_threads_maybe_lookup( tid );
1960 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001961 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001962
1963 lk = map_locks_maybe_lookup( (Addr)mutex );
1964
1965 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001966 HG_(record_error_Misc)(
1967 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001968 }
1969
1970 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001971 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001972 tl_assert( lk->guestaddr == (Addr)mutex );
1973 if (lk->heldBy) {
1974 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001975 HG_(record_error_Misc)(
1976 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001977 /* remove lock from locksets of all owning threads */
1978 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001979 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001980 lk->heldBy = NULL;
1981 lk->heldW = False;
1982 lk->acquired_at = NULL;
1983 }
1984 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001985 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00001986
1987 if (HG_(clo_track_lockorders))
1988 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001989 map_locks_delete( lk->guestaddr );
1990 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001991 }
1992
sewardjf98e1c02008-10-25 16:22:41 +00001993 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001994 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1995}
1996
1997static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1998 void* mutex, Word isTryLock )
1999{
2000 /* Just check the mutex is sane; nothing else to do. */
2001 // 'mutex' may be invalid - not checked by wrapper
2002 Thread* thr;
2003 Lock* lk;
2004 if (SHOW_EVENTS >= 1)
2005 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
2006 (Int)tid, (void*)mutex );
2007
2008 tl_assert(isTryLock == 0 || isTryLock == 1);
2009 thr = map_threads_maybe_lookup( tid );
2010 tl_assert(thr); /* cannot fail - Thread* must already exist */
2011
2012 lk = map_locks_maybe_lookup( (Addr)mutex );
2013
2014 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002015 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2016 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002017 }
2018
2019 if ( lk
2020 && isTryLock == 0
2021 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2022 && lk->heldBy
2023 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00002024 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002025 /* uh, it's a non-recursive lock and we already w-hold it, and
2026 this is a real lock operation (not a speculative "tryLock"
2027 kind of thing). Duh. Deadlock coming up; but at least
2028 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00002029 HChar* errstr = "Attempt to re-lock a "
2030 "non-recursive lock I already hold";
2031 HChar* auxstr = "Lock was previously acquired";
2032 if (lk->acquired_at) {
2033 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2034 } else {
2035 HG_(record_error_Misc)( thr, errstr );
2036 }
sewardjb4112022007-11-09 22:49:28 +00002037 }
2038}
2039
2040static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2041{
2042 // only called if the real library call succeeded - so mutex is sane
2043 Thread* thr;
2044 if (SHOW_EVENTS >= 1)
2045 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2046 (Int)tid, (void*)mutex );
2047
2048 thr = map_threads_maybe_lookup( tid );
2049 tl_assert(thr); /* cannot fail - Thread* must already exist */
2050
2051 evhH__post_thread_w_acquires_lock(
2052 thr,
2053 LK_mbRec, /* if not known, create new lock with this LockKind */
2054 (Addr)mutex
2055 );
2056}
2057
2058static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2059{
2060 // 'mutex' may be invalid - not checked by wrapper
2061 Thread* thr;
2062 if (SHOW_EVENTS >= 1)
2063 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2064 (Int)tid, (void*)mutex );
2065
2066 thr = map_threads_maybe_lookup( tid );
2067 tl_assert(thr); /* cannot fail - Thread* must already exist */
2068
2069 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2070}
2071
2072static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2073{
2074 // only called if the real library call succeeded - so mutex is sane
2075 Thread* thr;
2076 if (SHOW_EVENTS >= 1)
2077 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2078 (Int)tid, (void*)mutex );
2079 thr = map_threads_maybe_lookup( tid );
2080 tl_assert(thr); /* cannot fail - Thread* must already exist */
2081
2082 // anything we should do here?
2083}
2084
2085
sewardj5a644da2009-08-11 10:35:58 +00002086/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002087/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002088/* ------------------------------------------------------- */
2089
2090/* All a bit of a kludge. Pretend we're really dealing with ordinary
2091 pthread_mutex_t's instead, for the most part. */
2092
2093static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2094 void* slock )
2095{
2096 Thread* thr;
2097 Lock* lk;
2098 /* In glibc's kludgey world, we're either initialising or unlocking
2099 it. Since this is the pre-routine, if it is locked, unlock it
2100 and take a dependence edge. Otherwise, do nothing. */
2101
2102 if (SHOW_EVENTS >= 1)
2103 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2104 "(ctid=%d, slock=%p)\n",
2105 (Int)tid, (void*)slock );
2106
2107 thr = map_threads_maybe_lookup( tid );
2108 /* cannot fail - Thread* must already exist */;
2109 tl_assert( HG_(is_sane_Thread)(thr) );
2110
2111 lk = map_locks_maybe_lookup( (Addr)slock );
2112 if (lk && lk->heldBy) {
2113 /* it's held. So do the normal pre-unlock actions, as copied
2114 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2115 duplicates the map_locks_maybe_lookup. */
2116 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2117 False/*!isRDWR*/ );
2118 }
2119}
2120
2121static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2122 void* slock )
2123{
2124 Lock* lk;
2125 /* More kludgery. If the lock has never been seen before, do
2126 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2127 nothing. */
2128
2129 if (SHOW_EVENTS >= 1)
2130 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2131 "(ctid=%d, slock=%p)\n",
2132 (Int)tid, (void*)slock );
2133
2134 lk = map_locks_maybe_lookup( (Addr)slock );
2135 if (!lk) {
2136 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2137 }
2138}
2139
2140static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2141 void* slock, Word isTryLock )
2142{
2143 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2144}
2145
2146static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2147 void* slock )
2148{
2149 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2150}
2151
2152static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2153 void* slock )
2154{
2155 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2156}
2157
2158
sewardj9f569b72008-11-13 13:33:09 +00002159/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002160/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002161/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002162
sewardj02114542009-07-28 20:52:36 +00002163/* A mapping from CV to (the SO associated with it, plus some
2164 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002165 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2166 wait on it completes, we do a 'recv' from the SO. This is believed
2167 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002168 signallings/broadcasts.
2169*/
2170
sewardj02114542009-07-28 20:52:36 +00002171/* .so is the SO for this CV.
2172 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002173
sewardj02114542009-07-28 20:52:36 +00002174 POSIX says effectively that the first pthread_cond_{timed}wait call
2175 causes a dynamic binding between the CV and the mutex, and that
2176 lasts until such time as the waiter count falls to zero. Hence
2177 need to keep track of the number of waiters in order to do
2178 consistency tracking. */
2179typedef
2180 struct {
2181 SO* so; /* libhb-allocated SO */
2182 void* mx_ga; /* addr of associated mutex, if any */
2183 UWord nWaiters; /* # threads waiting on the CV */
2184 }
2185 CVInfo;
2186
2187
2188/* pthread_cond_t* -> CVInfo* */
2189static WordFM* map_cond_to_CVInfo = NULL;
2190
2191static void map_cond_to_CVInfo_INIT ( void ) {
2192 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2193 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2194 "hg.mctCI.1", HG_(free), NULL );
2195 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002196 }
2197}
2198
sewardj02114542009-07-28 20:52:36 +00002199static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002200 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002201 map_cond_to_CVInfo_INIT();
2202 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002203 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002204 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002205 } else {
sewardj02114542009-07-28 20:52:36 +00002206 SO* so = libhb_so_alloc();
2207 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2208 cvi->so = so;
2209 cvi->mx_ga = 0;
2210 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2211 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002212 }
2213}
2214
sewardj02114542009-07-28 20:52:36 +00002215static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002216 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002217 map_cond_to_CVInfo_INIT();
2218 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2219 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002220 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002221 tl_assert(cvi);
2222 tl_assert(cvi->so);
2223 libhb_so_dealloc(cvi->so);
2224 cvi->mx_ga = 0;
2225 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002226 }
2227}
2228
2229static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2230{
sewardjf98e1c02008-10-25 16:22:41 +00002231 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2232 cond to a SO if it is not already so bound, and 'send' on the
2233 SO. This is later used by other thread(s) which successfully
2234 exit from a pthread_cond_wait on the same cv; then they 'recv'
2235 from the SO, thereby acquiring a dependency on this signalling
2236 event. */
sewardjb4112022007-11-09 22:49:28 +00002237 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002238 CVInfo* cvi;
2239 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002240
2241 if (SHOW_EVENTS >= 1)
2242 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2243 (Int)tid, (void*)cond );
2244
sewardjb4112022007-11-09 22:49:28 +00002245 thr = map_threads_maybe_lookup( tid );
2246 tl_assert(thr); /* cannot fail - Thread* must already exist */
2247
sewardj02114542009-07-28 20:52:36 +00002248 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2249 tl_assert(cvi);
2250 tl_assert(cvi->so);
2251
sewardjb4112022007-11-09 22:49:28 +00002252 // error-if: mutex is bogus
2253 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002254 // Hmm. POSIX doesn't actually say that it's an error to call
2255 // pthread_cond_signal with the associated mutex being unlocked.
2256 // Although it does say that it should be "if consistent scheduling
2257 // is desired."
2258 //
2259 // For the moment, disable these checks.
2260 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2261 //if (lk == NULL || cvi->mx_ga == 0) {
2262 // HG_(record_error_Misc)( thr,
2263 // "pthread_cond_{signal,broadcast}: "
2264 // "no or invalid mutex associated with cond");
2265 //}
2266 ///* note: lk could be NULL. Be careful. */
2267 //if (lk) {
2268 // if (lk->kind == LK_rdwr) {
2269 // HG_(record_error_Misc)(thr,
2270 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2271 // }
2272 // if (lk->heldBy == NULL) {
2273 // HG_(record_error_Misc)(thr,
2274 // "pthread_cond_{signal,broadcast}: "
2275 // "associated lock is not held by any thread");
2276 // }
2277 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2278 // HG_(record_error_Misc)(thr,
2279 // "pthread_cond_{signal,broadcast}: "
2280 // "associated lock is not held by calling thread");
2281 // }
2282 //}
sewardjb4112022007-11-09 22:49:28 +00002283
sewardj02114542009-07-28 20:52:36 +00002284 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002285}
2286
2287/* returns True if it reckons 'mutex' is valid and held by this
2288 thread, else False */
2289static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2290 void* cond, void* mutex )
2291{
2292 Thread* thr;
2293 Lock* lk;
2294 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002295 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002296
2297 if (SHOW_EVENTS >= 1)
2298 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2299 "(ctid=%d, cond=%p, mutex=%p)\n",
2300 (Int)tid, (void*)cond, (void*)mutex );
2301
sewardjb4112022007-11-09 22:49:28 +00002302 thr = map_threads_maybe_lookup( tid );
2303 tl_assert(thr); /* cannot fail - Thread* must already exist */
2304
2305 lk = map_locks_maybe_lookup( (Addr)mutex );
2306
2307 /* Check for stupid mutex arguments. There are various ways to be
2308 a bozo. Only complain once, though, even if more than one thing
2309 is wrong. */
2310 if (lk == NULL) {
2311 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002312 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002313 thr,
2314 "pthread_cond_{timed}wait called with invalid mutex" );
2315 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002316 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002317 if (lk->kind == LK_rdwr) {
2318 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002319 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002320 thr, "pthread_cond_{timed}wait called with mutex "
2321 "of type pthread_rwlock_t*" );
2322 } else
2323 if (lk->heldBy == NULL) {
2324 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002325 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002326 thr, "pthread_cond_{timed}wait called with un-held mutex");
2327 } else
2328 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002329 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002330 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002331 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002332 thr, "pthread_cond_{timed}wait called with mutex "
2333 "held by a different thread" );
2334 }
2335 }
2336
2337 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002338 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2339 tl_assert(cvi);
2340 tl_assert(cvi->so);
2341 if (cvi->nWaiters == 0) {
2342 /* form initial (CV,MX) binding */
2343 cvi->mx_ga = mutex;
2344 }
2345 else /* check existing (CV,MX) binding */
2346 if (cvi->mx_ga != mutex) {
2347 HG_(record_error_Misc)(
2348 thr, "pthread_cond_{timed}wait: cond is associated "
2349 "with a different mutex");
2350 }
2351 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002352
2353 return lk_valid;
2354}
2355
2356static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2357 void* cond, void* mutex )
2358{
sewardjf98e1c02008-10-25 16:22:41 +00002359 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2360 the SO for this cond, and 'recv' from it so as to acquire a
2361 dependency edge back to the signaller/broadcaster. */
2362 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002363 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002364
2365 if (SHOW_EVENTS >= 1)
2366 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2367 "(ctid=%d, cond=%p, mutex=%p)\n",
2368 (Int)tid, (void*)cond, (void*)mutex );
2369
sewardjb4112022007-11-09 22:49:28 +00002370 thr = map_threads_maybe_lookup( tid );
2371 tl_assert(thr); /* cannot fail - Thread* must already exist */
2372
2373 // error-if: cond is also associated with a different mutex
2374
sewardj02114542009-07-28 20:52:36 +00002375 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2376 tl_assert(cvi);
2377 tl_assert(cvi->so);
2378 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002379
sewardj02114542009-07-28 20:52:36 +00002380 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002381 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2382 it? If this happened it would surely be a bug in the threads
2383 library. Or one of those fabled "spurious wakeups". */
2384 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2385 "succeeded on"
2386 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002387 }
sewardjf98e1c02008-10-25 16:22:41 +00002388
2389 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002390 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2391
2392 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002393}
2394
2395static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2396 void* cond )
2397{
2398 /* Deal with destroy events. The only purpose is to free storage
2399 associated with the CV, so as to avoid any possible resource
2400 leaks. */
2401 if (SHOW_EVENTS >= 1)
2402 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2403 "(ctid=%d, cond=%p)\n",
2404 (Int)tid, (void*)cond );
2405
sewardj02114542009-07-28 20:52:36 +00002406 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002407}
2408
2409
sewardj9f569b72008-11-13 13:33:09 +00002410/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002411/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002412/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002413
2414/* EXPOSITION only */
2415static
2416void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2417{
2418 if (SHOW_EVENTS >= 1)
2419 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2420 (Int)tid, (void*)rwl );
2421 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002422 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002423 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2424}
2425
2426static
2427void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2428{
2429 Thread* thr;
2430 Lock* lk;
2431 if (SHOW_EVENTS >= 1)
2432 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2433 (Int)tid, (void*)rwl );
2434
2435 thr = map_threads_maybe_lookup( tid );
2436 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002437 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002438
2439 lk = map_locks_maybe_lookup( (Addr)rwl );
2440
2441 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002442 HG_(record_error_Misc)(
2443 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002444 }
2445
2446 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002447 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002448 tl_assert( lk->guestaddr == (Addr)rwl );
2449 if (lk->heldBy) {
2450 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002451 HG_(record_error_Misc)(
2452 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002453 /* remove lock from locksets of all owning threads */
2454 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002455 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002456 lk->heldBy = NULL;
2457 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002458 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002459 }
2460 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002461 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjc1fb9d22011-02-28 09:03:44 +00002462
2463 if (HG_(clo_track_lockorders))
2464 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002465 map_locks_delete( lk->guestaddr );
2466 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002467 }
2468
sewardjf98e1c02008-10-25 16:22:41 +00002469 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002470 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2471}
2472
2473static
sewardj789c3c52008-02-25 12:10:07 +00002474void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2475 void* rwl,
2476 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002477{
2478 /* Just check the rwl is sane; nothing else to do. */
2479 // 'rwl' may be invalid - not checked by wrapper
2480 Thread* thr;
2481 Lock* lk;
2482 if (SHOW_EVENTS >= 1)
2483 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2484 (Int)tid, (Int)isW, (void*)rwl );
2485
2486 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002487 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002488 thr = map_threads_maybe_lookup( tid );
2489 tl_assert(thr); /* cannot fail - Thread* must already exist */
2490
2491 lk = map_locks_maybe_lookup( (Addr)rwl );
2492 if ( lk
2493 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2494 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002495 HG_(record_error_Misc)(
2496 thr, "pthread_rwlock_{rd,rw}lock with a "
2497 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002498 }
2499}
2500
2501static
2502void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2503{
2504 // only called if the real library call succeeded - so mutex is sane
2505 Thread* thr;
2506 if (SHOW_EVENTS >= 1)
2507 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2508 (Int)tid, (Int)isW, (void*)rwl );
2509
2510 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2511 thr = map_threads_maybe_lookup( tid );
2512 tl_assert(thr); /* cannot fail - Thread* must already exist */
2513
2514 (isW ? evhH__post_thread_w_acquires_lock
2515 : evhH__post_thread_r_acquires_lock)(
2516 thr,
2517 LK_rdwr, /* if not known, create new lock with this LockKind */
2518 (Addr)rwl
2519 );
2520}
2521
2522static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2523{
2524 // 'rwl' may be invalid - not checked by wrapper
2525 Thread* thr;
2526 if (SHOW_EVENTS >= 1)
2527 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2528 (Int)tid, (void*)rwl );
2529
2530 thr = map_threads_maybe_lookup( tid );
2531 tl_assert(thr); /* cannot fail - Thread* must already exist */
2532
2533 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2534}
2535
2536static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2537{
2538 // only called if the real library call succeeded - so mutex is sane
2539 Thread* thr;
2540 if (SHOW_EVENTS >= 1)
2541 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2542 (Int)tid, (void*)rwl );
2543 thr = map_threads_maybe_lookup( tid );
2544 tl_assert(thr); /* cannot fail - Thread* must already exist */
2545
2546 // anything we should do here?
2547}
2548
2549
sewardj9f569b72008-11-13 13:33:09 +00002550/* ---------------------------------------------------------- */
2551/* -------------- events to do with semaphores -------------- */
2552/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002553
sewardj11e352f2007-11-30 11:11:02 +00002554/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002555 variables. */
2556
sewardjf98e1c02008-10-25 16:22:41 +00002557/* For each semaphore, we maintain a stack of SOs. When a 'post'
2558 operation is done on a semaphore (unlocking, essentially), a new SO
2559 is created for the posting thread, the posting thread does a strong
2560 send to it (which merely installs the posting thread's VC in the
2561 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002562
2563 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002564 semaphore, we pop a SO off the semaphore's stack (which should be
2565 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002566 dependencies between posters and waiters of the semaphore.
2567
sewardjf98e1c02008-10-25 16:22:41 +00002568 It may not be necessary to use a stack - perhaps a bag of SOs would
2569 do. But we do need to keep track of how many unused-up posts have
2570 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002571
sewardjf98e1c02008-10-25 16:22:41 +00002572 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002573 twice on S. T3 cannot complete its waits without both T1 and T2
2574 posting. The above mechanism will ensure that T3 acquires
2575 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002576
sewardjf98e1c02008-10-25 16:22:41 +00002577 When a semaphore is initialised with value N, we do as if we'd
2578 posted N times on the semaphore: basically create N SOs and do a
2579 strong send to all of then. This allows up to N waits on the
2580 semaphore to acquire a dependency on the initialisation point,
2581 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002582
2583 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2584 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002585*/
2586
sewardjf98e1c02008-10-25 16:22:41 +00002587/* sem_t* -> XArray* SO* */
2588static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002589
sewardjf98e1c02008-10-25 16:22:41 +00002590static void map_sem_to_SO_stack_INIT ( void ) {
2591 if (map_sem_to_SO_stack == NULL) {
2592 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2593 HG_(free), NULL );
2594 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002595 }
2596}
2597
sewardjf98e1c02008-10-25 16:22:41 +00002598static void push_SO_for_sem ( void* sem, SO* so ) {
2599 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002600 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002601 tl_assert(so);
2602 map_sem_to_SO_stack_INIT();
2603 if (VG_(lookupFM)( map_sem_to_SO_stack,
2604 &keyW, (UWord*)&xa, (UWord)sem )) {
2605 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002606 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002607 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002608 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002609 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2610 VG_(addToXA)( xa, &so );
2611 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002612 }
2613}
2614
sewardjf98e1c02008-10-25 16:22:41 +00002615static SO* mb_pop_SO_for_sem ( void* sem ) {
2616 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002617 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002618 SO* so;
2619 map_sem_to_SO_stack_INIT();
2620 if (VG_(lookupFM)( map_sem_to_SO_stack,
2621 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002622 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002623 Word sz;
2624 tl_assert(keyW == (UWord)sem);
2625 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002626 tl_assert(sz >= 0);
2627 if (sz == 0)
2628 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002629 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2630 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002631 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002632 return so;
sewardjb4112022007-11-09 22:49:28 +00002633 } else {
2634 /* hmm, that's odd. No stack for this semaphore. */
2635 return NULL;
2636 }
2637}
2638
sewardj11e352f2007-11-30 11:11:02 +00002639static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002640{
sewardjf98e1c02008-10-25 16:22:41 +00002641 UWord keyW, valW;
2642 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002643
sewardjb4112022007-11-09 22:49:28 +00002644 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002645 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002646 (Int)tid, (void*)sem );
2647
sewardjf98e1c02008-10-25 16:22:41 +00002648 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002649
sewardjf98e1c02008-10-25 16:22:41 +00002650 /* Empty out the semaphore's SO stack. This way of doing it is
2651 stupid, but at least it's easy. */
2652 while (1) {
2653 so = mb_pop_SO_for_sem( sem );
2654 if (!so) break;
2655 libhb_so_dealloc(so);
2656 }
2657
2658 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2659 XArray* xa = (XArray*)valW;
2660 tl_assert(keyW == (UWord)sem);
2661 tl_assert(xa);
2662 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2663 VG_(deleteXA)(xa);
2664 }
sewardjb4112022007-11-09 22:49:28 +00002665}
2666
sewardj11e352f2007-11-30 11:11:02 +00002667static
2668void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2669{
sewardjf98e1c02008-10-25 16:22:41 +00002670 SO* so;
2671 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002672
2673 if (SHOW_EVENTS >= 1)
2674 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2675 (Int)tid, (void*)sem, value );
2676
sewardjf98e1c02008-10-25 16:22:41 +00002677 thr = map_threads_maybe_lookup( tid );
2678 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002679
sewardjf98e1c02008-10-25 16:22:41 +00002680 /* Empty out the semaphore's SO stack. This way of doing it is
2681 stupid, but at least it's easy. */
2682 while (1) {
2683 so = mb_pop_SO_for_sem( sem );
2684 if (!so) break;
2685 libhb_so_dealloc(so);
2686 }
sewardj11e352f2007-11-30 11:11:02 +00002687
sewardjf98e1c02008-10-25 16:22:41 +00002688 /* If we don't do this check, the following while loop runs us out
2689 of memory for stupid initial values of 'value'. */
2690 if (value > 10000) {
2691 HG_(record_error_Misc)(
2692 thr, "sem_init: initial value exceeds 10000; using 10000" );
2693 value = 10000;
2694 }
sewardj11e352f2007-11-30 11:11:02 +00002695
sewardjf98e1c02008-10-25 16:22:41 +00002696 /* Now create 'valid' new SOs for the thread, do a strong send to
2697 each of them, and push them all on the stack. */
2698 for (; value > 0; value--) {
2699 Thr* hbthr = thr->hbthr;
2700 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002701
sewardjf98e1c02008-10-25 16:22:41 +00002702 so = libhb_so_alloc();
2703 libhb_so_send( hbthr, so, True/*strong send*/ );
2704 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002705 }
2706}
2707
2708static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002709{
sewardjf98e1c02008-10-25 16:22:41 +00002710 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2711 it (iow, write our VC into it, then tick ours), and push the SO
2712 on on a stack of SOs associated with 'sem'. This is later used
2713 by other thread(s) which successfully exit from a sem_wait on
2714 the same sem; by doing a strong recv from SOs popped of the
2715 stack, they acquire dependencies on the posting thread
2716 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002717
sewardjf98e1c02008-10-25 16:22:41 +00002718 Thread* thr;
2719 SO* so;
2720 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002721
2722 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002723 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002724 (Int)tid, (void*)sem );
2725
2726 thr = map_threads_maybe_lookup( tid );
2727 tl_assert(thr); /* cannot fail - Thread* must already exist */
2728
2729 // error-if: sem is bogus
2730
sewardjf98e1c02008-10-25 16:22:41 +00002731 hbthr = thr->hbthr;
2732 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002733
sewardjf98e1c02008-10-25 16:22:41 +00002734 so = libhb_so_alloc();
2735 libhb_so_send( hbthr, so, True/*strong send*/ );
2736 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002737}
2738
sewardj11e352f2007-11-30 11:11:02 +00002739static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002740{
sewardjf98e1c02008-10-25 16:22:41 +00002741 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2742 the 'sem' from this semaphore's SO-stack, and do a strong recv
2743 from it. This creates a dependency back to one of the post-ers
2744 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002745
sewardjf98e1c02008-10-25 16:22:41 +00002746 Thread* thr;
2747 SO* so;
2748 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002749
2750 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002751 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002752 (Int)tid, (void*)sem );
2753
2754 thr = map_threads_maybe_lookup( tid );
2755 tl_assert(thr); /* cannot fail - Thread* must already exist */
2756
2757 // error-if: sem is bogus
2758
sewardjf98e1c02008-10-25 16:22:41 +00002759 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002760
sewardjf98e1c02008-10-25 16:22:41 +00002761 if (so) {
2762 hbthr = thr->hbthr;
2763 tl_assert(hbthr);
2764
2765 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2766 libhb_so_dealloc(so);
2767 } else {
2768 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2769 If this happened it would surely be a bug in the threads
2770 library. */
2771 HG_(record_error_Misc)(
2772 thr, "Bug in libpthread: sem_wait succeeded on"
2773 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002774 }
2775}
2776
2777
sewardj9f569b72008-11-13 13:33:09 +00002778/* -------------------------------------------------------- */
2779/* -------------- events to do with barriers -------------- */
2780/* -------------------------------------------------------- */
2781
2782typedef
2783 struct {
2784 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002785 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002786 UWord size; /* declared size */
2787 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2788 }
2789 Bar;
2790
2791static Bar* new_Bar ( void ) {
2792 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2793 tl_assert(bar);
2794 /* all fields are zero */
2795 tl_assert(bar->initted == False);
2796 return bar;
2797}
2798
2799static void delete_Bar ( Bar* bar ) {
2800 tl_assert(bar);
2801 if (bar->waiting)
2802 VG_(deleteXA)(bar->waiting);
2803 HG_(free)(bar);
2804}
2805
2806/* A mapping which stores auxiliary data for barriers. */
2807
2808/* pthread_barrier_t* -> Bar* */
2809static WordFM* map_barrier_to_Bar = NULL;
2810
2811static void map_barrier_to_Bar_INIT ( void ) {
2812 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2813 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2814 "hg.mbtBI.1", HG_(free), NULL );
2815 tl_assert(map_barrier_to_Bar != NULL);
2816 }
2817}
2818
2819static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2820 UWord key, val;
2821 map_barrier_to_Bar_INIT();
2822 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2823 tl_assert(key == (UWord)barrier);
2824 return (Bar*)val;
2825 } else {
2826 Bar* bar = new_Bar();
2827 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2828 return bar;
2829 }
2830}
2831
2832static void map_barrier_to_Bar_delete ( void* barrier ) {
2833 UWord keyW, valW;
2834 map_barrier_to_Bar_INIT();
2835 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2836 Bar* bar = (Bar*)valW;
2837 tl_assert(keyW == (UWord)barrier);
2838 delete_Bar(bar);
2839 }
2840}
2841
2842
2843static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2844 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002845 UWord count,
2846 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002847{
2848 Thread* thr;
2849 Bar* bar;
2850
2851 if (SHOW_EVENTS >= 1)
2852 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002853 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2854 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002855
2856 thr = map_threads_maybe_lookup( tid );
2857 tl_assert(thr); /* cannot fail - Thread* must already exist */
2858
2859 if (count == 0) {
2860 HG_(record_error_Misc)(
2861 thr, "pthread_barrier_init: 'count' argument is zero"
2862 );
2863 }
2864
sewardj406bac82010-03-03 23:03:40 +00002865 if (resizable != 0 && resizable != 1) {
2866 HG_(record_error_Misc)(
2867 thr, "pthread_barrier_init: invalid 'resizable' argument"
2868 );
2869 }
2870
sewardj9f569b72008-11-13 13:33:09 +00002871 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2872 tl_assert(bar);
2873
2874 if (bar->initted) {
2875 HG_(record_error_Misc)(
2876 thr, "pthread_barrier_init: barrier is already initialised"
2877 );
2878 }
2879
2880 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2881 tl_assert(bar->initted);
2882 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002883 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002884 );
2885 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2886 }
2887 if (!bar->waiting) {
2888 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2889 sizeof(Thread*) );
2890 }
2891
2892 tl_assert(bar->waiting);
2893 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002894 bar->initted = True;
2895 bar->resizable = resizable == 1 ? True : False;
2896 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002897}
2898
2899
2900static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2901 void* barrier )
2902{
sewardj553655c2008-11-14 19:41:19 +00002903 Thread* thr;
2904 Bar* bar;
2905
sewardj9f569b72008-11-13 13:33:09 +00002906 /* Deal with destroy events. The only purpose is to free storage
2907 associated with the barrier, so as to avoid any possible
2908 resource leaks. */
2909 if (SHOW_EVENTS >= 1)
2910 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2911 "(tid=%d, barrier=%p)\n",
2912 (Int)tid, (void*)barrier );
2913
sewardj553655c2008-11-14 19:41:19 +00002914 thr = map_threads_maybe_lookup( tid );
2915 tl_assert(thr); /* cannot fail - Thread* must already exist */
2916
2917 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2918 tl_assert(bar);
2919
2920 if (!bar->initted) {
2921 HG_(record_error_Misc)(
2922 thr, "pthread_barrier_destroy: barrier was never initialised"
2923 );
2924 }
2925
2926 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2927 HG_(record_error_Misc)(
2928 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2929 );
2930 }
2931
sewardj9f569b72008-11-13 13:33:09 +00002932 /* Maybe we shouldn't do this; just let it persist, so that when it
2933 is reinitialised we don't need to do any dynamic memory
2934 allocation? The downside is a potentially unlimited space leak,
2935 if the client creates (in turn) a large number of barriers all
2936 at different locations. Note that if we do later move to the
2937 don't-delete-it scheme, we need to mark the barrier as
2938 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002939 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002940 map_barrier_to_Bar_delete( barrier );
2941}
2942
2943
sewardj406bac82010-03-03 23:03:40 +00002944/* All the threads have arrived. Now do the Interesting Bit. Get a
2945 new synchronisation object and do a weak send to it from all the
2946 participating threads. This makes its vector clocks be the join of
2947 all the individual threads' vector clocks. Then do a strong
2948 receive from it back to all threads, so that their VCs are a copy
2949 of it (hence are all equal to the join of their original VCs.) */
2950static void do_barrier_cross_sync_and_empty ( Bar* bar )
2951{
2952 /* XXX check bar->waiting has no duplicates */
2953 UWord i;
2954 SO* so = libhb_so_alloc();
2955
2956 tl_assert(bar->waiting);
2957 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2958
2959 /* compute the join ... */
2960 for (i = 0; i < bar->size; i++) {
2961 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2962 Thr* hbthr = t->hbthr;
2963 libhb_so_send( hbthr, so, False/*weak send*/ );
2964 }
2965 /* ... and distribute to all threads */
2966 for (i = 0; i < bar->size; i++) {
2967 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2968 Thr* hbthr = t->hbthr;
2969 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2970 }
2971
2972 /* finally, we must empty out the waiting vector */
2973 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2974
2975 /* and we don't need this any more. Perhaps a stack-allocated
2976 SO would be better? */
2977 libhb_so_dealloc(so);
2978}
2979
2980
sewardj9f569b72008-11-13 13:33:09 +00002981static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2982 void* barrier )
2983{
sewardj1c466b72008-11-19 11:52:14 +00002984 /* This function gets called after a client thread calls
2985 pthread_barrier_wait but before it arrives at the real
2986 pthread_barrier_wait.
2987
2988 Why is the following correct? It's a bit subtle.
2989
2990 If this is not the last thread arriving at the barrier, we simply
2991 note its presence and return. Because valgrind (at least as of
2992 Nov 08) is single threaded, we are guaranteed safe from any race
2993 conditions when in this function -- no other client threads are
2994 running.
2995
2996 If this is the last thread, then we are again the only running
2997 thread. All the other threads will have either arrived at the
2998 real pthread_barrier_wait or are on their way to it, but in any
2999 case are guaranteed not to be able to move past it, because this
3000 thread is currently in this function and so has not yet arrived
3001 at the real pthread_barrier_wait. That means that:
3002
3003 1. While we are in this function, none of the other threads
3004 waiting at the barrier can move past it.
3005
3006 2. When this function returns (and simulated execution resumes),
3007 this thread and all other waiting threads will be able to move
3008 past the real barrier.
3009
3010 Because of this, it is now safe to update the vector clocks of
3011 all threads, to represent the fact that they all arrived at the
3012 barrier and have all moved on. There is no danger of any
3013 complications to do with some threads leaving the barrier and
3014 racing back round to the front, whilst others are still leaving
3015 (which is the primary source of complication in correct handling/
3016 implementation of barriers). That can't happen because we update
3017 here our data structures so as to indicate that the threads have
3018 passed the barrier, even though, as per (2) above, they are
3019 guaranteed not to pass the barrier until we return.
3020
3021 This relies crucially on Valgrind being single threaded. If that
3022 changes, this will need to be reconsidered.
3023 */
sewardj9f569b72008-11-13 13:33:09 +00003024 Thread* thr;
3025 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003026 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003027
3028 if (SHOW_EVENTS >= 1)
3029 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3030 "(tid=%d, barrier=%p)\n",
3031 (Int)tid, (void*)barrier );
3032
3033 thr = map_threads_maybe_lookup( tid );
3034 tl_assert(thr); /* cannot fail - Thread* must already exist */
3035
3036 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3037 tl_assert(bar);
3038
3039 if (!bar->initted) {
3040 HG_(record_error_Misc)(
3041 thr, "pthread_barrier_wait: barrier is uninitialised"
3042 );
3043 return; /* client is broken .. avoid assertions below */
3044 }
3045
3046 /* guaranteed by _INIT_PRE above */
3047 tl_assert(bar->size > 0);
3048 tl_assert(bar->waiting);
3049
3050 VG_(addToXA)( bar->waiting, &thr );
3051
3052 /* guaranteed by this function */
3053 present = VG_(sizeXA)(bar->waiting);
3054 tl_assert(present > 0 && present <= bar->size);
3055
3056 if (present < bar->size)
3057 return;
3058
sewardj406bac82010-03-03 23:03:40 +00003059 do_barrier_cross_sync_and_empty(bar);
3060}
sewardj9f569b72008-11-13 13:33:09 +00003061
sewardj9f569b72008-11-13 13:33:09 +00003062
sewardj406bac82010-03-03 23:03:40 +00003063static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3064 void* barrier,
3065 UWord newcount )
3066{
3067 Thread* thr;
3068 Bar* bar;
3069 UWord present;
3070
3071 if (SHOW_EVENTS >= 1)
3072 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3073 "(tid=%d, barrier=%p, newcount=%lu)\n",
3074 (Int)tid, (void*)barrier, newcount );
3075
3076 thr = map_threads_maybe_lookup( tid );
3077 tl_assert(thr); /* cannot fail - Thread* must already exist */
3078
3079 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3080 tl_assert(bar);
3081
3082 if (!bar->initted) {
3083 HG_(record_error_Misc)(
3084 thr, "pthread_barrier_resize: barrier is uninitialised"
3085 );
3086 return; /* client is broken .. avoid assertions below */
3087 }
3088
3089 if (!bar->resizable) {
3090 HG_(record_error_Misc)(
3091 thr, "pthread_barrier_resize: barrier is may not be resized"
3092 );
3093 return; /* client is broken .. avoid assertions below */
3094 }
3095
3096 if (newcount == 0) {
3097 HG_(record_error_Misc)(
3098 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3099 );
3100 return; /* client is broken .. avoid assertions below */
3101 }
3102
3103 /* guaranteed by _INIT_PRE above */
3104 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003105 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003106 /* Guaranteed by this fn */
3107 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003108
sewardj406bac82010-03-03 23:03:40 +00003109 if (newcount >= bar->size) {
3110 /* Increasing the capacity. There's no possibility of threads
3111 moving on from the barrier in this situation, so just note
3112 the fact and do nothing more. */
3113 bar->size = newcount;
3114 } else {
3115 /* Decreasing the capacity. If we decrease it to be equal or
3116 below the number of waiting threads, they will now move past
3117 the barrier, so need to mess with dep edges in the same way
3118 as if the barrier had filled up normally. */
3119 present = VG_(sizeXA)(bar->waiting);
3120 tl_assert(present >= 0 && present <= bar->size);
3121 if (newcount <= present) {
3122 bar->size = present; /* keep the cross_sync call happy */
3123 do_barrier_cross_sync_and_empty(bar);
3124 }
3125 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003126 }
sewardj9f569b72008-11-13 13:33:09 +00003127}
3128
3129
sewardjed2e72e2009-08-14 11:08:24 +00003130/* ----------------------------------------------------- */
3131/* ----- events to do with user-specified HB edges ----- */
3132/* ----------------------------------------------------- */
3133
3134/* A mapping from arbitrary UWord tag to the SO associated with it.
3135 The UWord tags are meaningless to us, interpreted only by the
3136 user. */
3137
3138
3139
3140/* UWord -> SO* */
3141static WordFM* map_usertag_to_SO = NULL;
3142
3143static void map_usertag_to_SO_INIT ( void ) {
3144 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3145 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3146 "hg.mutS.1", HG_(free), NULL );
3147 tl_assert(map_usertag_to_SO != NULL);
3148 }
3149}
3150
3151static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3152 UWord key, val;
3153 map_usertag_to_SO_INIT();
3154 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3155 tl_assert(key == (UWord)usertag);
3156 return (SO*)val;
3157 } else {
3158 SO* so = libhb_so_alloc();
3159 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3160 return so;
3161 }
3162}
3163
3164// If it's ever needed (XXX check before use)
3165//static void map_usertag_to_SO_delete ( UWord usertag ) {
3166// UWord keyW, valW;
3167// map_usertag_to_SO_INIT();
3168// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3169// SO* so = (SO*)valW;
3170// tl_assert(keyW == usertag);
3171// tl_assert(so);
3172// libhb_so_dealloc(so);
3173// }
3174//}
3175
3176
3177static
3178void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3179{
3180 /* TID is just about to notionally sent a message on a notional
3181 abstract synchronisation object whose identity is given by
3182 USERTAG. Bind USERTAG to a real SO if it is not already so
3183 bound, and do a 'strong send' on the SO. This is later used by
3184 other thread(s) which successfully 'receive' from the SO,
3185 thereby acquiring a dependency on this signalling event. */
3186 Thread* thr;
3187 SO* so;
3188
3189 if (SHOW_EVENTS >= 1)
3190 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3191 (Int)tid, usertag );
3192
3193 thr = map_threads_maybe_lookup( tid );
3194 tl_assert(thr); /* cannot fail - Thread* must already exist */
3195
3196 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3197 tl_assert(so);
3198
3199 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3200}
3201
3202static
3203void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3204{
3205 /* TID has just notionally received a message from a notional
3206 abstract synchronisation object whose identity is given by
3207 USERTAG. Bind USERTAG to a real SO if it is not already so
3208 bound. If the SO has at some point in the past been 'sent' on,
3209 to a 'strong receive' on it, thereby acquiring a dependency on
3210 the sender. */
3211 Thread* thr;
3212 SO* so;
3213
3214 if (SHOW_EVENTS >= 1)
3215 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3216 (Int)tid, usertag );
3217
3218 thr = map_threads_maybe_lookup( tid );
3219 tl_assert(thr); /* cannot fail - Thread* must already exist */
3220
3221 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3222 tl_assert(so);
3223
3224 /* Acquire a dependency on it. If the SO has never so far been
3225 sent on, then libhb_so_recv will do nothing. So we're safe
3226 regardless of SO's history. */
3227 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3228}
3229
3230
sewardjb4112022007-11-09 22:49:28 +00003231/*--------------------------------------------------------------*/
3232/*--- Lock acquisition order monitoring ---*/
3233/*--------------------------------------------------------------*/
3234
3235/* FIXME: here are some optimisations still to do in
3236 laog__pre_thread_acquires_lock.
3237
3238 The graph is structured so that if L1 --*--> L2 then L1 must be
3239 acquired before L2.
3240
3241 The common case is that some thread T holds (eg) L1 L2 and L3 and
3242 is repeatedly acquiring and releasing Ln, and there is no ordering
3243 error in what it is doing. Hence it repeatly:
3244
3245 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3246 produces the answer No (because there is no error).
3247
3248 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3249 (because they already got added the first time T acquired Ln).
3250
3251 Hence cache these two events:
3252
3253 (1) Cache result of the query from last time. Invalidate the cache
3254 any time any edges are added to or deleted from laog.
3255
3256 (2) Cache these add-edge requests and ignore them if said edges
3257 have already been added to laog. Invalidate the cache any time
3258 any edges are deleted from laog.
3259*/
3260
3261typedef
3262 struct {
3263 WordSetID inns; /* in univ_laog */
3264 WordSetID outs; /* in univ_laog */
3265 }
3266 LAOGLinks;
3267
3268/* lock order acquisition graph */
3269static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3270
3271/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3272 where that edge was created, so that we can show the user later if
3273 we need to. */
3274typedef
3275 struct {
3276 Addr src_ga; /* Lock guest addresses for */
3277 Addr dst_ga; /* src/dst of the edge */
3278 ExeContext* src_ec; /* And corresponding places where that */
3279 ExeContext* dst_ec; /* ordering was established */
3280 }
3281 LAOGLinkExposition;
3282
sewardj250ec2e2008-02-15 22:02:30 +00003283static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003284 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3285 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3286 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3287 if (llx1->src_ga < llx2->src_ga) return -1;
3288 if (llx1->src_ga > llx2->src_ga) return 1;
3289 if (llx1->dst_ga < llx2->dst_ga) return -1;
3290 if (llx1->dst_ga > llx2->dst_ga) return 1;
3291 return 0;
3292}
3293
3294static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3295/* end EXPOSITION ONLY */
3296
3297
sewardja65db102009-01-26 10:45:16 +00003298__attribute__((noinline))
3299static void laog__init ( void )
3300{
3301 tl_assert(!laog);
3302 tl_assert(!laog_exposition);
sewardjc1fb9d22011-02-28 09:03:44 +00003303 tl_assert(HG_(clo_track_lockorders));
sewardja65db102009-01-26 10:45:16 +00003304
3305 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3306 HG_(free), NULL/*unboxedcmp*/ );
3307
3308 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3309 cmp_LAOGLinkExposition );
3310 tl_assert(laog);
3311 tl_assert(laog_exposition);
3312}
3313
sewardjb4112022007-11-09 22:49:28 +00003314static void laog__show ( Char* who ) {
3315 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003316 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003317 Lock* me;
3318 LAOGLinks* links;
3319 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003320 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003321 me = NULL;
3322 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003323 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003324 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003325 tl_assert(me);
3326 tl_assert(links);
3327 VG_(printf)(" node %p:\n", me);
3328 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3329 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003330 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003331 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3332 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003333 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003334 me = NULL;
3335 links = NULL;
3336 }
sewardj896f6f92008-08-19 08:38:52 +00003337 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003338 VG_(printf)("}\n");
3339}
3340
3341__attribute__((noinline))
3342static void laog__add_edge ( Lock* src, Lock* dst ) {
3343 Word keyW;
3344 LAOGLinks* links;
3345 Bool presentF, presentR;
3346 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3347
3348 /* Take the opportunity to sanity check the graph. Record in
3349 presentF if there is already a src->dst mapping in this node's
3350 forwards links, and presentR if there is already a src->dst
3351 mapping in this node's backwards links. They should agree!
3352 Also, we need to know whether the edge was already present so as
3353 to decide whether or not to update the link details mapping. We
3354 can compute presentF and presentR essentially for free, so may
3355 as well do this always. */
3356 presentF = presentR = False;
3357
3358 /* Update the out edges for src */
3359 keyW = 0;
3360 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003361 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003362 WordSetID outs_new;
3363 tl_assert(links);
3364 tl_assert(keyW == (Word)src);
3365 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3366 presentF = outs_new == links->outs;
3367 links->outs = outs_new;
3368 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003369 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003370 links->inns = HG_(emptyWS)( univ_laog );
3371 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003372 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003373 }
3374 /* Update the in edges for dst */
3375 keyW = 0;
3376 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003377 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003378 WordSetID inns_new;
3379 tl_assert(links);
3380 tl_assert(keyW == (Word)dst);
3381 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3382 presentR = inns_new == links->inns;
3383 links->inns = inns_new;
3384 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003385 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003386 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3387 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003388 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003389 }
3390
3391 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3392
3393 if (!presentF && src->acquired_at && dst->acquired_at) {
3394 LAOGLinkExposition expo;
3395 /* If this edge is entering the graph, and we have acquired_at
3396 information for both src and dst, record those acquisition
3397 points. Hence, if there is later a violation of this
3398 ordering, we can show the user the two places in which the
3399 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003400 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003401 src->guestaddr, dst->guestaddr);
3402 expo.src_ga = src->guestaddr;
3403 expo.dst_ga = dst->guestaddr;
3404 expo.src_ec = NULL;
3405 expo.dst_ec = NULL;
3406 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003407 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003408 /* we already have it; do nothing */
3409 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003410 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3411 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003412 expo2->src_ga = src->guestaddr;
3413 expo2->dst_ga = dst->guestaddr;
3414 expo2->src_ec = src->acquired_at;
3415 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003416 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003417 }
3418 }
3419}
3420
3421__attribute__((noinline))
3422static void laog__del_edge ( Lock* src, Lock* dst ) {
3423 Word keyW;
3424 LAOGLinks* links;
3425 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3426 /* Update the out edges for src */
3427 keyW = 0;
3428 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003429 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003430 tl_assert(links);
3431 tl_assert(keyW == (Word)src);
3432 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3433 }
3434 /* Update the in edges for dst */
3435 keyW = 0;
3436 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003437 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003438 tl_assert(links);
3439 tl_assert(keyW == (Word)dst);
3440 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3441 }
3442}
3443
3444__attribute__((noinline))
3445static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3446 Word keyW;
3447 LAOGLinks* links;
3448 keyW = 0;
3449 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003450 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003451 tl_assert(links);
3452 tl_assert(keyW == (Word)lk);
3453 return links->outs;
3454 } else {
3455 return HG_(emptyWS)( univ_laog );
3456 }
3457}
3458
3459__attribute__((noinline))
3460static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3461 Word keyW;
3462 LAOGLinks* links;
3463 keyW = 0;
3464 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003465 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003466 tl_assert(links);
3467 tl_assert(keyW == (Word)lk);
3468 return links->inns;
3469 } else {
3470 return HG_(emptyWS)( univ_laog );
3471 }
3472}
3473
3474__attribute__((noinline))
3475static void laog__sanity_check ( Char* who ) {
3476 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003477 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003478 Lock* me;
3479 LAOGLinks* links;
sewardj896f6f92008-08-19 08:38:52 +00003480 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003481 me = NULL;
3482 links = NULL;
3483 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003484 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003485 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003486 tl_assert(me);
3487 tl_assert(links);
3488 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3489 for (i = 0; i < ws_size; i++) {
3490 if ( ! HG_(elemWS)( univ_laog,
3491 laog__succs( (Lock*)ws_words[i] ),
3492 (Word)me ))
3493 goto bad;
3494 }
3495 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3496 for (i = 0; i < ws_size; i++) {
3497 if ( ! HG_(elemWS)( univ_laog,
3498 laog__preds( (Lock*)ws_words[i] ),
3499 (Word)me ))
3500 goto bad;
3501 }
3502 me = NULL;
3503 links = NULL;
3504 }
sewardj896f6f92008-08-19 08:38:52 +00003505 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003506 return;
3507
3508 bad:
3509 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3510 laog__show(who);
3511 tl_assert(0);
3512}
3513
3514/* If there is a path in laog from 'src' to any of the elements in
3515 'dst', return an arbitrarily chosen element of 'dst' reachable from
3516 'src'. If no path exist from 'src' to any element in 'dst', return
3517 NULL. */
3518__attribute__((noinline))
3519static
3520Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3521{
3522 Lock* ret;
3523 Word i, ssz;
3524 XArray* stack; /* of Lock* */
3525 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3526 Lock* here;
3527 WordSetID succs;
3528 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003529 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003530 //laog__sanity_check();
3531
3532 /* If the destination set is empty, we can never get there from
3533 'src' :-), so don't bother to try */
3534 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3535 return NULL;
3536
3537 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003538 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3539 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003540
3541 (void) VG_(addToXA)( stack, &src );
3542
3543 while (True) {
3544
3545 ssz = VG_(sizeXA)( stack );
3546
3547 if (ssz == 0) { ret = NULL; break; }
3548
3549 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3550 VG_(dropTailXA)( stack, 1 );
3551
3552 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3553
sewardj896f6f92008-08-19 08:38:52 +00003554 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003555 continue;
3556
sewardj896f6f92008-08-19 08:38:52 +00003557 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003558
3559 succs = laog__succs( here );
3560 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3561 for (i = 0; i < succs_size; i++)
3562 (void) VG_(addToXA)( stack, &succs_words[i] );
3563 }
3564
sewardj896f6f92008-08-19 08:38:52 +00003565 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003566 VG_(deleteXA)( stack );
3567 return ret;
3568}
3569
3570
3571/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3572 between 'lk' and the locks already held by 'thr' and issue a
3573 complaint if so. Also, update the ordering graph appropriately.
3574*/
3575__attribute__((noinline))
3576static void laog__pre_thread_acquires_lock (
3577 Thread* thr, /* NB: BEFORE lock is added */
3578 Lock* lk
3579 )
3580{
sewardj250ec2e2008-02-15 22:02:30 +00003581 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003582 Word ls_size, i;
3583 Lock* other;
3584
3585 /* It may be that 'thr' already holds 'lk' and is recursively
3586 relocking in. In this case we just ignore the call. */
3587 /* NB: univ_lsets really is correct here */
3588 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3589 return;
3590
sewardjb4112022007-11-09 22:49:28 +00003591 /* First, the check. Complain if there is any path in laog from lk
3592 to any of the locks already held by thr, since if any such path
3593 existed, it would mean that previously lk was acquired before
3594 (rather than after, as we are doing here) at least one of those
3595 locks.
3596 */
3597 other = laog__do_dfs_from_to(lk, thr->locksetA);
3598 if (other) {
3599 LAOGLinkExposition key, *found;
3600 /* So we managed to find a path lk --*--> other in the graph,
3601 which implies that 'lk' should have been acquired before
3602 'other' but is in fact being acquired afterwards. We present
3603 the lk/other arguments to record_error_LockOrder in the order
3604 in which they should have been acquired. */
3605 /* Go look in the laog_exposition mapping, to find the allocation
3606 points for this edge, so we can show the user. */
3607 key.src_ga = lk->guestaddr;
3608 key.dst_ga = other->guestaddr;
3609 key.src_ec = NULL;
3610 key.dst_ec = NULL;
3611 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003612 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003613 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003614 tl_assert(found != &key);
3615 tl_assert(found->src_ga == key.src_ga);
3616 tl_assert(found->dst_ga == key.dst_ga);
3617 tl_assert(found->src_ec);
3618 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003619 HG_(record_error_LockOrder)(
3620 thr, lk->guestaddr, other->guestaddr,
3621 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003622 } else {
3623 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003624 HG_(record_error_LockOrder)(
3625 thr, lk->guestaddr, other->guestaddr,
3626 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003627 }
3628 }
3629
3630 /* Second, add to laog the pairs
3631 (old, lk) | old <- locks already held by thr
3632 Since both old and lk are currently held by thr, their acquired_at
3633 fields must be non-NULL.
3634 */
3635 tl_assert(lk->acquired_at);
3636 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3637 for (i = 0; i < ls_size; i++) {
3638 Lock* old = (Lock*)ls_words[i];
3639 tl_assert(old->acquired_at);
3640 laog__add_edge( old, lk );
3641 }
3642
3643 /* Why "except_Locks" ? We're here because a lock is being
3644 acquired by a thread, and we're in an inconsistent state here.
3645 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3646 When called in this inconsistent state, locks__sanity_check duly
3647 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003648 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003649 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3650}
3651
3652
3653/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3654
3655__attribute__((noinline))
3656static void laog__handle_one_lock_deletion ( Lock* lk )
3657{
3658 WordSetID preds, succs;
3659 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003660 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003661
3662 preds = laog__preds( lk );
3663 succs = laog__succs( lk );
3664
3665 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3666 for (i = 0; i < preds_size; i++)
3667 laog__del_edge( (Lock*)preds_words[i], lk );
3668
3669 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3670 for (j = 0; j < succs_size; j++)
3671 laog__del_edge( lk, (Lock*)succs_words[j] );
3672
3673 for (i = 0; i < preds_size; i++) {
3674 for (j = 0; j < succs_size; j++) {
3675 if (preds_words[i] != succs_words[j]) {
3676 /* This can pass unlocked locks to laog__add_edge, since
3677 we're deleting stuff. So their acquired_at fields may
3678 be NULL. */
3679 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3680 }
3681 }
3682 }
3683}
3684
sewardj1cbc12f2008-11-10 16:16:46 +00003685//__attribute__((noinline))
3686//static void laog__handle_lock_deletions (
3687// WordSetID /* in univ_laog */ locksToDelete
3688// )
3689//{
3690// Word i, ws_size;
3691// UWord* ws_words;
3692//
sewardj1cbc12f2008-11-10 16:16:46 +00003693//
3694// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3695// for (i = 0; i < ws_size; i++)
3696// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3697//
3698// if (HG_(clo_sanity_flags) & SCE_LAOG)
3699// all__sanity_check("laog__handle_lock_deletions-post");
3700//}
sewardjb4112022007-11-09 22:49:28 +00003701
3702
3703/*--------------------------------------------------------------*/
3704/*--- Malloc/free replacements ---*/
3705/*--------------------------------------------------------------*/
3706
3707typedef
3708 struct {
3709 void* next; /* required by m_hashtable */
3710 Addr payload; /* ptr to actual block */
3711 SizeT szB; /* size requested */
3712 ExeContext* where; /* where it was allocated */
3713 Thread* thr; /* allocating thread */
3714 }
3715 MallocMeta;
3716
3717/* A hash table of MallocMetas, used to track malloc'd blocks
3718 (obviously). */
3719static VgHashTable hg_mallocmeta_table = NULL;
3720
3721
3722static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003723 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003724 tl_assert(md);
3725 return md;
3726}
3727static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003728 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003729}
3730
3731
3732/* Allocate a client block and set up the metadata for it. */
3733
3734static
3735void* handle_alloc ( ThreadId tid,
3736 SizeT szB, SizeT alignB, Bool is_zeroed )
3737{
3738 Addr p;
3739 MallocMeta* md;
3740
3741 tl_assert( ((SSizeT)szB) >= 0 );
3742 p = (Addr)VG_(cli_malloc)(alignB, szB);
3743 if (!p) {
3744 return NULL;
3745 }
3746 if (is_zeroed)
3747 VG_(memset)((void*)p, 0, szB);
3748
3749 /* Note that map_threads_lookup must succeed (cannot assert), since
3750 memory can only be allocated by currently alive threads, hence
3751 they must have an entry in map_threads. */
3752 md = new_MallocMeta();
3753 md->payload = p;
3754 md->szB = szB;
3755 md->where = VG_(record_ExeContext)( tid, 0 );
3756 md->thr = map_threads_lookup( tid );
3757
3758 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3759
3760 /* Tell the lower level memory wranglers. */
3761 evh__new_mem_heap( p, szB, is_zeroed );
3762
3763 return (void*)p;
3764}
3765
3766/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3767 Cast to a signed type to catch any unexpectedly negative args.
3768 We're assuming here that the size asked for is not greater than
3769 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3770 platforms). */
3771static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3772 if (((SSizeT)n) < 0) return NULL;
3773 return handle_alloc ( tid, n, VG_(clo_alignment),
3774 /*is_zeroed*/False );
3775}
3776static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3777 if (((SSizeT)n) < 0) return NULL;
3778 return handle_alloc ( tid, n, VG_(clo_alignment),
3779 /*is_zeroed*/False );
3780}
3781static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3782 if (((SSizeT)n) < 0) return NULL;
3783 return handle_alloc ( tid, n, VG_(clo_alignment),
3784 /*is_zeroed*/False );
3785}
3786static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3787 if (((SSizeT)n) < 0) return NULL;
3788 return handle_alloc ( tid, n, align,
3789 /*is_zeroed*/False );
3790}
3791static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3792 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3793 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3794 /*is_zeroed*/True );
3795}
3796
3797
3798/* Free a client block, including getting rid of the relevant
3799 metadata. */
3800
3801static void handle_free ( ThreadId tid, void* p )
3802{
3803 MallocMeta *md, *old_md;
3804 SizeT szB;
3805
3806 /* First see if we can find the metadata for 'p'. */
3807 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3808 if (!md)
3809 return; /* apparently freeing a bogus address. Oh well. */
3810
3811 tl_assert(md->payload == (Addr)p);
3812 szB = md->szB;
3813
3814 /* Nuke the metadata block */
3815 old_md = (MallocMeta*)
3816 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3817 tl_assert(old_md); /* it must be present - we just found it */
3818 tl_assert(old_md == md);
3819 tl_assert(old_md->payload == (Addr)p);
3820
3821 VG_(cli_free)((void*)old_md->payload);
3822 delete_MallocMeta(old_md);
3823
3824 /* Tell the lower level memory wranglers. */
3825 evh__die_mem_heap( (Addr)p, szB );
3826}
3827
3828static void hg_cli__free ( ThreadId tid, void* p ) {
3829 handle_free(tid, p);
3830}
3831static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3832 handle_free(tid, p);
3833}
3834static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3835 handle_free(tid, p);
3836}
3837
3838
3839static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3840{
3841 MallocMeta *md, *md_new, *md_tmp;
3842 SizeT i;
3843
3844 Addr payload = (Addr)payloadV;
3845
3846 if (((SSizeT)new_size) < 0) return NULL;
3847
3848 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3849 if (!md)
3850 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3851
3852 tl_assert(md->payload == payload);
3853
3854 if (md->szB == new_size) {
3855 /* size unchanged */
3856 md->where = VG_(record_ExeContext)(tid, 0);
3857 return payloadV;
3858 }
3859
3860 if (md->szB > new_size) {
3861 /* new size is smaller */
3862 md->szB = new_size;
3863 md->where = VG_(record_ExeContext)(tid, 0);
3864 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3865 return payloadV;
3866 }
3867
3868 /* else */ {
3869 /* new size is bigger */
3870 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3871
3872 /* First half kept and copied, second half new */
3873 // FIXME: shouldn't we use a copier which implements the
3874 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003875 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003876 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003877 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003878 /* FIXME: can anything funny happen here? specifically, if the
3879 old range contained a lock, then die_mem_heap will complain.
3880 Is that the correct behaviour? Not sure. */
3881 evh__die_mem_heap( payload, md->szB );
3882
3883 /* Copy from old to new */
3884 for (i = 0; i < md->szB; i++)
3885 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3886
3887 /* Because the metadata hash table is index by payload address,
3888 we have to get rid of the old hash table entry and make a new
3889 one. We can't just modify the existing metadata in place,
3890 because then it would (almost certainly) be in the wrong hash
3891 chain. */
3892 md_new = new_MallocMeta();
3893 *md_new = *md;
3894
3895 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3896 tl_assert(md_tmp);
3897 tl_assert(md_tmp == md);
3898
3899 VG_(cli_free)((void*)md->payload);
3900 delete_MallocMeta(md);
3901
3902 /* Update fields */
3903 md_new->where = VG_(record_ExeContext)( tid, 0 );
3904 md_new->szB = new_size;
3905 md_new->payload = p_new;
3906 md_new->thr = map_threads_lookup( tid );
3907
3908 /* and add */
3909 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3910
3911 return (void*)p_new;
3912 }
3913}
3914
njn8b140de2009-02-17 04:31:18 +00003915static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3916{
3917 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3918
3919 // There may be slop, but pretend there isn't because only the asked-for
3920 // area will have been shadowed properly.
3921 return ( md ? md->szB : 0 );
3922}
3923
sewardjb4112022007-11-09 22:49:28 +00003924
sewardj095d61e2010-03-11 13:43:18 +00003925/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003926 Slow linear search. With a bit of hash table help if 'data_addr'
3927 is either the start of a block or up to 15 word-sized steps along
3928 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003929
3930static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3931{
sewardjc8028ad2010-05-05 09:34:42 +00003932 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3933 right at it. */
3934 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3935 return True;
3936 /* else normal interval rules apply */
3937 if (LIKELY(a < mm->payload)) return False;
3938 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3939 return True;
sewardj095d61e2010-03-11 13:43:18 +00003940}
3941
sewardjc8028ad2010-05-05 09:34:42 +00003942Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003943 /*OUT*/Addr* payload,
3944 /*OUT*/SizeT* szB,
3945 Addr data_addr )
3946{
3947 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003948 Int i;
3949 const Int n_fast_check_words = 16;
3950
3951 /* First, do a few fast searches on the basis that data_addr might
3952 be exactly the start of a block or up to 15 words inside. This
3953 can happen commonly via the creq
3954 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3955 for (i = 0; i < n_fast_check_words; i++) {
3956 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3957 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3958 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3959 goto found;
3960 }
3961
sewardj095d61e2010-03-11 13:43:18 +00003962 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003963 some such, it's hard to see how to do better. We have to check
3964 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003965 VG_(HT_ResetIter)(hg_mallocmeta_table);
3966 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003967 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3968 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003969 }
sewardjc8028ad2010-05-05 09:34:42 +00003970
3971 /* Not found. Bah. */
3972 return False;
3973 /*NOTREACHED*/
3974
3975 found:
3976 tl_assert(mm);
3977 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3978 if (where) *where = mm->where;
3979 if (payload) *payload = mm->payload;
3980 if (szB) *szB = mm->szB;
3981 return True;
sewardj095d61e2010-03-11 13:43:18 +00003982}
3983
3984
sewardjb4112022007-11-09 22:49:28 +00003985/*--------------------------------------------------------------*/
3986/*--- Instrumentation ---*/
3987/*--------------------------------------------------------------*/
3988
3989static void instrument_mem_access ( IRSB* bbOut,
3990 IRExpr* addr,
3991 Int szB,
3992 Bool isStore,
3993 Int hWordTy_szB )
3994{
3995 IRType tyAddr = Ity_INVALID;
3996 HChar* hName = NULL;
3997 void* hAddr = NULL;
3998 Int regparms = 0;
3999 IRExpr** argv = NULL;
4000 IRDirty* di = NULL;
4001
4002 tl_assert(isIRAtom(addr));
4003 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4004
4005 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
4006 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4007
4008 /* So the effective address is in 'addr' now. */
4009 regparms = 1; // unless stated otherwise
4010 if (isStore) {
4011 switch (szB) {
4012 case 1:
sewardj23f12002009-07-24 08:45:08 +00004013 hName = "evh__mem_help_cwrite_1";
4014 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004015 argv = mkIRExprVec_1( addr );
4016 break;
4017 case 2:
sewardj23f12002009-07-24 08:45:08 +00004018 hName = "evh__mem_help_cwrite_2";
4019 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004020 argv = mkIRExprVec_1( addr );
4021 break;
4022 case 4:
sewardj23f12002009-07-24 08:45:08 +00004023 hName = "evh__mem_help_cwrite_4";
4024 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004025 argv = mkIRExprVec_1( addr );
4026 break;
4027 case 8:
sewardj23f12002009-07-24 08:45:08 +00004028 hName = "evh__mem_help_cwrite_8";
4029 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004030 argv = mkIRExprVec_1( addr );
4031 break;
4032 default:
4033 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4034 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004035 hName = "evh__mem_help_cwrite_N";
4036 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004037 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4038 break;
4039 }
4040 } else {
4041 switch (szB) {
4042 case 1:
sewardj23f12002009-07-24 08:45:08 +00004043 hName = "evh__mem_help_cread_1";
4044 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004045 argv = mkIRExprVec_1( addr );
4046 break;
4047 case 2:
sewardj23f12002009-07-24 08:45:08 +00004048 hName = "evh__mem_help_cread_2";
4049 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004050 argv = mkIRExprVec_1( addr );
4051 break;
4052 case 4:
sewardj23f12002009-07-24 08:45:08 +00004053 hName = "evh__mem_help_cread_4";
4054 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004055 argv = mkIRExprVec_1( addr );
4056 break;
4057 case 8:
sewardj23f12002009-07-24 08:45:08 +00004058 hName = "evh__mem_help_cread_8";
4059 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004060 argv = mkIRExprVec_1( addr );
4061 break;
4062 default:
4063 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4064 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004065 hName = "evh__mem_help_cread_N";
4066 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004067 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4068 break;
4069 }
4070 }
4071
4072 /* Add the helper. */
4073 tl_assert(hName);
4074 tl_assert(hAddr);
4075 tl_assert(argv);
4076 di = unsafeIRDirty_0_N( regparms,
4077 hName, VG_(fnptr_to_fnentry)( hAddr ),
4078 argv );
4079 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4080}
4081
4082
sewardja0eee322009-07-31 08:46:35 +00004083/* Figure out if GA is a guest code address in the dynamic linker, and
4084 if so return True. Otherwise (and in case of any doubt) return
4085 False. (sidedly safe w/ False as the safe value) */
4086static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4087{
4088 DebugInfo* dinfo;
4089 const UChar* soname;
4090 if (0) return False;
4091
sewardje3f1e592009-07-31 09:41:29 +00004092 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004093 if (!dinfo) return False;
4094
sewardje3f1e592009-07-31 09:41:29 +00004095 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004096 tl_assert(soname);
4097 if (0) VG_(printf)("%s\n", soname);
4098
4099# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004100 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004101 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4102 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4103 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4104 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4105# elif defined(VGO_darwin)
4106 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4107# else
4108# error "Unsupported OS"
4109# endif
4110 return False;
4111}
4112
sewardjb4112022007-11-09 22:49:28 +00004113static
4114IRSB* hg_instrument ( VgCallbackClosure* closure,
4115 IRSB* bbIn,
4116 VexGuestLayout* layout,
4117 VexGuestExtents* vge,
4118 IRType gWordTy, IRType hWordTy )
4119{
sewardj1c0ce7a2009-07-01 08:10:49 +00004120 Int i;
4121 IRSB* bbOut;
4122 Addr64 cia; /* address of current insn */
4123 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004124 Bool inLDSO = False;
4125 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004126
4127 if (gWordTy != hWordTy) {
4128 /* We don't currently support this case. */
4129 VG_(tool_panic)("host/guest word size mismatch");
4130 }
4131
sewardja0eee322009-07-31 08:46:35 +00004132 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4133 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4134 }
4135
sewardjb4112022007-11-09 22:49:28 +00004136 /* Set up BB */
4137 bbOut = emptyIRSB();
4138 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4139 bbOut->next = deepCopyIRExpr(bbIn->next);
4140 bbOut->jumpkind = bbIn->jumpkind;
4141
4142 // Copy verbatim any IR preamble preceding the first IMark
4143 i = 0;
4144 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4145 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4146 i++;
4147 }
4148
sewardj1c0ce7a2009-07-01 08:10:49 +00004149 // Get the first statement, and initial cia from it
4150 tl_assert(bbIn->stmts_used > 0);
4151 tl_assert(i < bbIn->stmts_used);
4152 st = bbIn->stmts[i];
4153 tl_assert(Ist_IMark == st->tag);
4154 cia = st->Ist.IMark.addr;
4155 st = NULL;
4156
sewardjb4112022007-11-09 22:49:28 +00004157 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004158 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004159 tl_assert(st);
4160 tl_assert(isFlatIRStmt(st));
4161 switch (st->tag) {
4162 case Ist_NoOp:
4163 case Ist_AbiHint:
4164 case Ist_Put:
4165 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004166 case Ist_Exit:
4167 /* None of these can contain any memory references. */
4168 break;
4169
sewardj1c0ce7a2009-07-01 08:10:49 +00004170 case Ist_IMark:
4171 /* no mem refs, but note the insn address. */
4172 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004173 /* Don't instrument the dynamic linker. It generates a
4174 lot of races which we just expensively suppress, so
4175 it's pointless.
4176
4177 Avoid flooding is_in_dynamic_linker_shared_object with
4178 requests by only checking at transitions between 4K
4179 pages. */
4180 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4181 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4182 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4183 inLDSO = is_in_dynamic_linker_shared_object(cia);
4184 } else {
4185 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4186 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004187 break;
4188
sewardjb4112022007-11-09 22:49:28 +00004189 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004190 switch (st->Ist.MBE.event) {
4191 case Imbe_Fence:
4192 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004193 default:
4194 goto unhandled;
4195 }
sewardjb4112022007-11-09 22:49:28 +00004196 break;
4197
sewardj1c0ce7a2009-07-01 08:10:49 +00004198 case Ist_CAS: {
4199 /* Atomic read-modify-write cycle. Just pretend it's a
4200 read. */
4201 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004202 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4203 if (isDCAS) {
4204 tl_assert(cas->expdHi);
4205 tl_assert(cas->dataHi);
4206 } else {
4207 tl_assert(!cas->expdHi);
4208 tl_assert(!cas->dataHi);
4209 }
4210 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004211 if (!inLDSO) {
4212 instrument_mem_access(
4213 bbOut,
4214 cas->addr,
4215 (isDCAS ? 2 : 1)
4216 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4217 False/*!isStore*/,
4218 sizeofIRType(hWordTy)
4219 );
4220 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004221 break;
4222 }
4223
sewardjdb5907d2009-11-26 17:20:21 +00004224 case Ist_LLSC: {
4225 /* We pretend store-conditionals don't exist, viz, ignore
4226 them. Whereas load-linked's are treated the same as
4227 normal loads. */
4228 IRType dataTy;
4229 if (st->Ist.LLSC.storedata == NULL) {
4230 /* LL */
4231 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004232 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004233 instrument_mem_access(
4234 bbOut,
4235 st->Ist.LLSC.addr,
4236 sizeofIRType(dataTy),
4237 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004238 sizeofIRType(hWordTy)
4239 );
4240 }
sewardjdb5907d2009-11-26 17:20:21 +00004241 } else {
4242 /* SC */
4243 /*ignore */
4244 }
4245 break;
4246 }
4247
4248 case Ist_Store:
4249 /* It seems we pretend that store-conditionals don't
4250 exist, viz, just ignore them ... */
4251 if (!inLDSO) {
4252 instrument_mem_access(
4253 bbOut,
4254 st->Ist.Store.addr,
4255 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4256 True/*isStore*/,
4257 sizeofIRType(hWordTy)
4258 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004259 }
njnb83caf22009-05-25 01:47:56 +00004260 break;
sewardjb4112022007-11-09 22:49:28 +00004261
4262 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004263 /* ... whereas here we don't care whether a load is a
4264 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004265 IRExpr* data = st->Ist.WrTmp.data;
4266 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004267 if (!inLDSO) {
4268 instrument_mem_access(
4269 bbOut,
4270 data->Iex.Load.addr,
4271 sizeofIRType(data->Iex.Load.ty),
4272 False/*!isStore*/,
4273 sizeofIRType(hWordTy)
4274 );
4275 }
sewardjb4112022007-11-09 22:49:28 +00004276 }
4277 break;
4278 }
4279
4280 case Ist_Dirty: {
4281 Int dataSize;
4282 IRDirty* d = st->Ist.Dirty.details;
4283 if (d->mFx != Ifx_None) {
4284 /* This dirty helper accesses memory. Collect the
4285 details. */
4286 tl_assert(d->mAddr != NULL);
4287 tl_assert(d->mSize != 0);
4288 dataSize = d->mSize;
4289 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004290 if (!inLDSO) {
4291 instrument_mem_access(
4292 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4293 sizeofIRType(hWordTy)
4294 );
4295 }
sewardjb4112022007-11-09 22:49:28 +00004296 }
4297 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004298 if (!inLDSO) {
4299 instrument_mem_access(
4300 bbOut, d->mAddr, dataSize, True/*isStore*/,
4301 sizeofIRType(hWordTy)
4302 );
4303 }
sewardjb4112022007-11-09 22:49:28 +00004304 }
4305 } else {
4306 tl_assert(d->mAddr == NULL);
4307 tl_assert(d->mSize == 0);
4308 }
4309 break;
4310 }
4311
4312 default:
sewardjf98e1c02008-10-25 16:22:41 +00004313 unhandled:
4314 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004315 tl_assert(0);
4316
4317 } /* switch (st->tag) */
4318
4319 addStmtToIRSB( bbOut, st );
4320 } /* iterate over bbIn->stmts */
4321
4322 return bbOut;
4323}
4324
4325
4326/*----------------------------------------------------------------*/
4327/*--- Client requests ---*/
4328/*----------------------------------------------------------------*/
4329
4330/* Sheesh. Yet another goddam finite map. */
4331static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4332
4333static void map_pthread_t_to_Thread_INIT ( void ) {
4334 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004335 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4336 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004337 tl_assert(map_pthread_t_to_Thread != NULL);
4338 }
4339}
4340
4341
4342static
4343Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4344{
4345 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4346 return False;
4347
4348 /* Anything that gets past the above check is one of ours, so we
4349 should be able to handle it. */
4350
4351 /* default, meaningless return value, unless otherwise set */
4352 *ret = 0;
4353
4354 switch (args[0]) {
4355
4356 /* --- --- User-visible client requests --- --- */
4357
4358 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004359 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004360 args[1], args[2]);
4361 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004362 are any held locks etc in the area. Calling evh__die_mem
4363 and then evh__new_mem is a bit inefficient; probably just
4364 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004365 if (args[2] > 0) { /* length */
4366 evh__die_mem(args[1], args[2]);
4367 /* and then set it to New */
4368 evh__new_mem(args[1], args[2]);
4369 }
4370 break;
4371
sewardjc8028ad2010-05-05 09:34:42 +00004372 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4373 Addr payload = 0;
4374 SizeT pszB = 0;
4375 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4376 args[1]);
4377 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4378 if (pszB > 0) {
4379 evh__die_mem(payload, pszB);
4380 evh__new_mem(payload, pszB);
4381 }
4382 *ret = pszB;
4383 } else {
4384 *ret = (UWord)-1;
4385 }
4386 break;
4387 }
4388
sewardj406bac82010-03-03 23:03:40 +00004389 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4390 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4391 args[1], args[2]);
4392 if (args[2] > 0) { /* length */
4393 evh__untrack_mem(args[1], args[2]);
4394 }
4395 break;
4396
4397 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4398 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4399 args[1], args[2]);
4400 if (args[2] > 0) { /* length */
4401 evh__new_mem(args[1], args[2]);
4402 }
4403 break;
4404
sewardjb4112022007-11-09 22:49:28 +00004405 /* --- --- Client requests for Helgrind's use only --- --- */
4406
4407 /* Some thread is telling us its pthread_t value. Record the
4408 binding between that and the associated Thread*, so we can
4409 later find the Thread* again when notified of a join by the
4410 thread. */
4411 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4412 Thread* my_thr = NULL;
4413 if (0)
4414 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4415 (void*)args[1]);
4416 map_pthread_t_to_Thread_INIT();
4417 my_thr = map_threads_maybe_lookup( tid );
4418 /* This assertion should hold because the map_threads (tid to
4419 Thread*) binding should have been made at the point of
4420 low-level creation of this thread, which should have
4421 happened prior to us getting this client request for it.
4422 That's because this client request is sent from
4423 client-world from the 'thread_wrapper' function, which
4424 only runs once the thread has been low-level created. */
4425 tl_assert(my_thr != NULL);
4426 /* So now we know that (pthread_t)args[1] is associated with
4427 (Thread*)my_thr. Note that down. */
4428 if (0)
4429 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4430 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004431 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004432 break;
4433 }
4434
4435 case _VG_USERREQ__HG_PTH_API_ERROR: {
4436 Thread* my_thr = NULL;
4437 map_pthread_t_to_Thread_INIT();
4438 my_thr = map_threads_maybe_lookup( tid );
4439 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004440 HG_(record_error_PthAPIerror)(
4441 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004442 break;
4443 }
4444
4445 /* This thread (tid) has completed a join with the quitting
4446 thread whose pthread_t is in args[1]. */
4447 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4448 Thread* thr_q = NULL; /* quitter Thread* */
4449 Bool found = False;
4450 if (0)
4451 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4452 (void*)args[1]);
4453 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004454 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004455 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004456 /* Can this fail? It would mean that our pthread_join
4457 wrapper observed a successful join on args[1] yet that
4458 thread never existed (or at least, it never lodged an
4459 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4460 sounds like a bug in the threads library. */
4461 // FIXME: get rid of this assertion; handle properly
4462 tl_assert(found);
4463 if (found) {
4464 if (0)
4465 VG_(printf)(".................... quitter Thread* = %p\n",
4466 thr_q);
4467 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4468 }
4469 break;
4470 }
4471
4472 /* EXPOSITION only: by intercepting lock init events we can show
4473 the user where the lock was initialised, rather than only
4474 being able to show where it was first locked. Intercepting
4475 lock initialisations is not necessary for the basic operation
4476 of the race checker. */
4477 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4478 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4479 break;
4480
4481 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4482 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4483 break;
4484
4485 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4486 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4487 break;
4488
4489 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4490 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4491 break;
4492
4493 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4494 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4495 break;
4496
4497 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4498 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4499 break;
4500
4501 /* This thread is about to do pthread_cond_signal on the
4502 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4503 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4504 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4505 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4506 break;
4507
4508 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4509 Returns a flag indicating whether or not the mutex is believed to be
4510 valid for this operation. */
4511 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4512 Bool mutex_is_valid
4513 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4514 (void*)args[2] );
4515 *ret = mutex_is_valid ? 1 : 0;
4516 break;
4517 }
4518
sewardjf98e1c02008-10-25 16:22:41 +00004519 /* cond=arg[1] */
4520 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4521 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4522 break;
4523
sewardjb4112022007-11-09 22:49:28 +00004524 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4525 mutex=arg[2] */
4526 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4527 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4528 (void*)args[1], (void*)args[2] );
4529 break;
4530
4531 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4532 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4533 break;
4534
4535 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4536 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4537 break;
4538
sewardj789c3c52008-02-25 12:10:07 +00004539 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004540 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004541 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4542 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004543 break;
4544
4545 /* rwlock=arg[1], isW=arg[2] */
4546 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4547 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4548 break;
4549
4550 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4551 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4552 break;
4553
4554 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4555 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4556 break;
4557
sewardj11e352f2007-11-30 11:11:02 +00004558 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4559 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004560 break;
4561
sewardj11e352f2007-11-30 11:11:02 +00004562 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4563 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004564 break;
4565
sewardj11e352f2007-11-30 11:11:02 +00004566 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4567 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4568 break;
4569
4570 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4571 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004572 break;
4573
sewardj9f569b72008-11-13 13:33:09 +00004574 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004575 /* pth_bar_t*, ulong count, ulong resizable */
4576 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4577 args[2], args[3] );
4578 break;
4579
4580 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4581 /* pth_bar_t*, ulong newcount */
4582 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4583 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004584 break;
4585
4586 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4587 /* pth_bar_t* */
4588 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4589 break;
4590
4591 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4592 /* pth_bar_t* */
4593 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4594 break;
sewardjb4112022007-11-09 22:49:28 +00004595
sewardj5a644da2009-08-11 10:35:58 +00004596 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4597 /* pth_spinlock_t* */
4598 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4599 break;
4600
4601 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4602 /* pth_spinlock_t* */
4603 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4604 break;
4605
4606 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4607 /* pth_spinlock_t*, Word */
4608 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4609 break;
4610
4611 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4612 /* pth_spinlock_t* */
4613 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4614 break;
4615
4616 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4617 /* pth_spinlock_t* */
4618 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4619 break;
4620
sewardjed2e72e2009-08-14 11:08:24 +00004621 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4622 /* char* who */
4623 HChar* who = (HChar*)args[1];
4624 HChar buf[50 + 50];
4625 Thread* thr = map_threads_maybe_lookup( tid );
4626 tl_assert( thr ); /* I must be mapped */
4627 tl_assert( who );
4628 tl_assert( VG_(strlen)(who) <= 50 );
4629 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4630 /* record_error_Misc strdup's buf, so this is safe: */
4631 HG_(record_error_Misc)( thr, buf );
4632 break;
4633 }
4634
4635 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4636 /* UWord arbitrary-SO-tag */
4637 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4638 break;
4639
4640 case _VG_USERREQ__HG_USERSO_RECV_POST:
4641 /* UWord arbitrary-SO-tag */
4642 evh__HG_USERSO_RECV_POST( tid, args[1] );
4643 break;
4644
sewardjb4112022007-11-09 22:49:28 +00004645 default:
4646 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004647 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4648 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004649 }
4650
4651 return True;
4652}
4653
4654
4655/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004656/*--- Setup ---*/
4657/*----------------------------------------------------------------*/
4658
4659static Bool hg_process_cmd_line_option ( Char* arg )
4660{
njn83df0b62009-02-25 01:01:05 +00004661 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004662
njn83df0b62009-02-25 01:01:05 +00004663 if VG_BOOL_CLO(arg, "--track-lockorders",
4664 HG_(clo_track_lockorders)) {}
4665 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4666 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004667
4668 else if VG_XACT_CLO(arg, "--history-level=none",
4669 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004670 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004671 HG_(clo_history_level), 1);
4672 else if VG_XACT_CLO(arg, "--history-level=full",
4673 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004674
sewardjf585e482009-08-16 22:52:29 +00004675 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004676 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004677 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004678 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004679
sewardj11e352f2007-11-30 11:11:02 +00004680 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004681 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004682 Int j;
sewardjb4112022007-11-09 22:49:28 +00004683
njn83df0b62009-02-25 01:01:05 +00004684 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004685 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004686 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004687 return False;
4688 }
sewardj11e352f2007-11-30 11:11:02 +00004689 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004690 if ('0' == tmp_str[j]) { /* do nothing */ }
4691 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004692 else {
sewardj11e352f2007-11-30 11:11:02 +00004693 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004694 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004695 return False;
4696 }
4697 }
sewardjf98e1c02008-10-25 16:22:41 +00004698 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004699 }
4700
4701 else
4702 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4703
4704 return True;
4705}
4706
4707static void hg_print_usage ( void )
4708{
4709 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004710" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004711" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004712" full: show both stack traces for a data race (can be very slow)\n"
4713" approx: full trace for one thread, approx for the other (faster)\n"
4714" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004715" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004716 );
sewardjb4112022007-11-09 22:49:28 +00004717}
4718
4719static void hg_print_debug_usage ( void )
4720{
sewardjb4112022007-11-09 22:49:28 +00004721 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4722 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004723 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004724 " at events (X = 0|1) [000000]\n");
4725 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004726 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004727 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004728 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4729 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004730 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004731 VG_(printf)(" 000010 at lock/unlock events\n");
4732 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004733}
4734
sewardjb4112022007-11-09 22:49:28 +00004735static void hg_fini ( Int exitcode )
4736{
sewardj2d9e8742009-08-07 15:46:56 +00004737 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4738 VG_(message)(Vg_UserMsg,
4739 "For counts of detected and suppressed errors, "
4740 "rerun with: -v\n");
4741 }
4742
4743 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4744 && HG_(clo_history_level) >= 2) {
4745 VG_(umsg)(
4746 "Use --history-level=approx or =none to gain increased speed, at\n" );
4747 VG_(umsg)(
4748 "the cost of reduced accuracy of conflicting-access information\n");
4749 }
4750
sewardjb4112022007-11-09 22:49:28 +00004751 if (SHOW_DATA_STRUCTURES)
4752 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004753 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004754 all__sanity_check("SK_(fini)");
4755
sewardj2d9e8742009-08-07 15:46:56 +00004756 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004757
4758 if (1) {
4759 VG_(printf)("\n");
4760 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4761 VG_(printf)("\n");
4762 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
sewardjc1fb9d22011-02-28 09:03:44 +00004763 if (HG_(clo_track_lockorders)) {
4764 VG_(printf)("\n");
4765 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4766 }
sewardjb4112022007-11-09 22:49:28 +00004767 }
4768
sewardjf98e1c02008-10-25 16:22:41 +00004769 //zz VG_(printf)("\n");
4770 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4771 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4772 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4773 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4774 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4775 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4776 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4777 //zz stats__hbefore_stk_hwm);
4778 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4779 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004780
4781 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004782 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004783 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004784 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004785 (Int)HG_(cardinalityWSU)( univ_tsets ));
sewardjc1fb9d22011-02-28 09:03:44 +00004786 if (HG_(clo_track_lockorders)) {
4787 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
4788 (Int)HG_(cardinalityWSU)( univ_laog ));
4789 }
sewardjb4112022007-11-09 22:49:28 +00004790
sewardjd52392d2008-11-08 20:36:26 +00004791 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4792 // stats__ga_LL_adds,
4793 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004794
sewardjf98e1c02008-10-25 16:22:41 +00004795 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4796 HG_(stats__LockN_to_P_queries),
4797 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004798
sewardjf98e1c02008-10-25 16:22:41 +00004799 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4800 HG_(stats__string_table_queries),
4801 HG_(stats__string_table_get_map_size)() );
sewardjc1fb9d22011-02-28 09:03:44 +00004802 if (HG_(clo_track_lockorders)) {
4803 VG_(printf)(" LAOG: %'8d map size\n",
4804 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
4805 VG_(printf)(" LAOG exposition: %'8d map size\n",
4806 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
4807 }
4808
barta0b6b2c2008-07-07 06:49:24 +00004809 VG_(printf)(" locks: %'8lu acquires, "
4810 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004811 stats__lockN_acquires,
4812 stats__lockN_releases
4813 );
barta0b6b2c2008-07-07 06:49:24 +00004814 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004815
4816 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004817 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004818 }
4819}
4820
sewardjf98e1c02008-10-25 16:22:41 +00004821/* FIXME: move these somewhere sane */
4822
4823static
4824void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4825{
4826 Thread* thr;
4827 ThreadId tid;
4828 UWord nActual;
4829 tl_assert(hbt);
4830 thr = libhb_get_Thr_opaque( hbt );
4831 tl_assert(thr);
4832 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4833 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4834 NULL, NULL, 0 );
4835 tl_assert(nActual <= nRequest);
4836 for (; nActual < nRequest; nActual++)
4837 frames[nActual] = 0;
4838}
4839
4840static
sewardj23f12002009-07-24 08:45:08 +00004841ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004842{
4843 Thread* thr;
4844 ThreadId tid;
4845 ExeContext* ec;
4846 tl_assert(hbt);
4847 thr = libhb_get_Thr_opaque( hbt );
4848 tl_assert(thr);
4849 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004850 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004851 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004852 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004853}
4854
4855
sewardjc1fb9d22011-02-28 09:03:44 +00004856static void hg_post_clo_init ( void )
sewardjb4112022007-11-09 22:49:28 +00004857{
sewardjf98e1c02008-10-25 16:22:41 +00004858 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004859
sewardjc1fb9d22011-02-28 09:03:44 +00004860 /////////////////////////////////////////////
4861 hbthr_root = libhb_init( for_libhb__get_stacktrace,
4862 for_libhb__get_EC );
4863 /////////////////////////////////////////////
4864
4865
4866 if (HG_(clo_track_lockorders))
4867 laog__init();
4868
4869 initialise_data_structures(hbthr_root);
4870}
4871
4872static void hg_pre_clo_init ( void )
4873{
sewardjb4112022007-11-09 22:49:28 +00004874 VG_(details_name) ("Helgrind");
4875 VG_(details_version) (NULL);
4876 VG_(details_description) ("a thread error detector");
4877 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004878 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004879 VG_(details_bug_reports_to) (VG_BUGS_TO);
4880 VG_(details_avg_translation_sizeB) ( 200 );
4881
4882 VG_(basic_tool_funcs) (hg_post_clo_init,
4883 hg_instrument,
4884 hg_fini);
4885
4886 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004887 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004888 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004889 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004890 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004891 HG_(update_extra),
4892 HG_(recognised_suppression),
4893 HG_(read_extra_suppression_info),
4894 HG_(error_matches_suppression),
4895 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004896 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004897
sewardj24118492009-07-15 14:50:02 +00004898 VG_(needs_xml_output) ();
4899
sewardjb4112022007-11-09 22:49:28 +00004900 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4901 hg_print_usage,
4902 hg_print_debug_usage);
4903 VG_(needs_client_requests) (hg_handle_client_request);
4904
4905 // FIXME?
4906 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4907 // hg_expensive_sanity_check);
4908
4909 VG_(needs_malloc_replacement) (hg_cli__malloc,
4910 hg_cli____builtin_new,
4911 hg_cli____builtin_vec_new,
4912 hg_cli__memalign,
4913 hg_cli__calloc,
4914 hg_cli__free,
4915 hg_cli____builtin_delete,
4916 hg_cli____builtin_vec_delete,
4917 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004918 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004919 HG_CLI__MALLOC_REDZONE_SZB );
4920
sewardj849b0ed2008-12-21 10:43:10 +00004921 /* 21 Dec 08: disabled this; it mostly causes H to start more
4922 slowly and use significantly more memory, without very often
4923 providing useful results. The user can request to load this
4924 information manually with --read-var-info=yes. */
4925 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004926
4927 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004928 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4929 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004930 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004931 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004932
4933 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004934 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004935
4936 VG_(track_change_mem_mprotect) ( evh__set_perms );
4937
4938 VG_(track_die_mem_stack_signal)( evh__die_mem );
4939 VG_(track_die_mem_brk) ( evh__die_mem );
4940 VG_(track_die_mem_munmap) ( evh__die_mem );
4941 VG_(track_die_mem_stack) ( evh__die_mem );
4942
4943 // FIXME: what is this for?
4944 VG_(track_ban_mem_stack) (NULL);
4945
4946 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4947 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4948 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4949 VG_(track_post_mem_write) (NULL);
4950
4951 /////////////////
4952
4953 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4954 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4955
4956 VG_(track_start_client_code)( evh__start_client_code );
4957 VG_(track_stop_client_code)( evh__stop_client_code );
4958
sewardjb4112022007-11-09 22:49:28 +00004959 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4960 as described in comments at the top of pub_tool_hashtable.h, are
4961 met. Blargh. */
4962 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4963 tl_assert( sizeof(UWord) == sizeof(Addr) );
4964 hg_mallocmeta_table
4965 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4966
sewardj61bc2c52011-02-09 10:34:00 +00004967 // add a callback to clean up on (threaded) fork.
4968 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004969}
4970
4971VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4972
4973/*--------------------------------------------------------------------*/
4974/*--- end hg_main.c ---*/
4975/*--------------------------------------------------------------------*/