blob: d68cd105514f3f2b060a5206b3c08197c100de67 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardj234e5582011-02-09 12:47:23 +000056#include "pub_tool_aspacemgr.h" // VG_(am_is_valid_for_client)
sewardjb4112022007-11-09 22:49:28 +000057
sewardjf98e1c02008-10-25 16:22:41 +000058#include "hg_basics.h"
59#include "hg_wordset.h"
60#include "hg_lock_n_thread.h"
61#include "hg_errors.h"
62
63#include "libhb.h"
64
sewardjb4112022007-11-09 22:49:28 +000065#include "helgrind.h"
66
sewardjf98e1c02008-10-25 16:22:41 +000067
68// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
69
70// FIXME: when client destroys a lock or a CV, remove these
71// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000072
73/*----------------------------------------------------------------*/
74/*--- ---*/
75/*----------------------------------------------------------------*/
76
sewardj11e352f2007-11-30 11:11:02 +000077/* Note this needs to be compiled with -fno-strict-aliasing, since it
78 contains a whole bunch of calls to lookupFM etc which cast between
79 Word and pointer types. gcc rightly complains this breaks ANSI C
80 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
81 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000082*/
sewardjb4112022007-11-09 22:49:28 +000083
sewardjefd3b4d2007-12-02 02:05:23 +000084// FIXME catch sync signals (SEGV, basically) and unlock BHL,
85// if held. Otherwise a LOCK-prefixed insn which segfaults
86// gets Helgrind into a total muddle as the BHL will not be
87// released after the insn.
88
sewardjb4112022007-11-09 22:49:28 +000089// FIXME what is supposed to happen to locks in memory which
90// is relocated as a result of client realloc?
91
sewardjb4112022007-11-09 22:49:28 +000092// FIXME put referencing ThreadId into Thread and get
93// rid of the slow reverse mapping function.
94
95// FIXME accesses to NoAccess areas: change state to Excl?
96
97// FIXME report errors for accesses of NoAccess memory?
98
99// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
100// the thread still holds the lock.
101
102/* ------------ Debug/trace options ------------ */
103
104// this is:
105// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
106// happens_before_wrk: 1000
107// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
108#define SHOW_EXPENSIVE_STUFF 0
109
110// 0 for silent, 1 for some stuff, 2 for lots of stuff
111#define SHOW_EVENTS 0
112
sewardjb4112022007-11-09 22:49:28 +0000113
114static void all__sanity_check ( Char* who ); /* fwds */
115
116#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
117
118// 0 for none, 1 for dump at end of run
119#define SHOW_DATA_STRUCTURES 0
120
121
sewardjb4112022007-11-09 22:49:28 +0000122/* ------------ Misc comments ------------ */
123
124// FIXME: don't hardwire initial entries for root thread.
125// Instead, let the pre_thread_ll_create handler do this.
126
sewardjb4112022007-11-09 22:49:28 +0000127
128/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000129/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000130/*----------------------------------------------------------------*/
131
sewardjb4112022007-11-09 22:49:28 +0000132/* Admin linked list of Threads */
133static Thread* admin_threads = NULL;
134
135/* Admin linked list of Locks */
136static Lock* admin_locks = NULL;
137
sewardjb4112022007-11-09 22:49:28 +0000138/* Mapping table for core ThreadIds to Thread* */
139static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
140
sewardjb4112022007-11-09 22:49:28 +0000141/* Mapping table for lock guest addresses to Lock* */
142static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
143
144/* The word-set universes for thread sets and lock sets. */
145static WordSetU* univ_tsets = NULL; /* sets of Thread* */
146static WordSetU* univ_lsets = NULL; /* sets of Lock* */
147static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
148
149/* never changed; we only care about its address. Is treated as if it
150 was a standard userspace lock. Also we have a Lock* describing it
151 so it can participate in lock sets in the usual way. */
152static Int __bus_lock = 0;
153static Lock* __bus_lock_Lock = NULL;
154
155
156/*----------------------------------------------------------------*/
157/*--- Simple helpers for the data structures ---*/
158/*----------------------------------------------------------------*/
159
160static UWord stats__lockN_acquires = 0;
161static UWord stats__lockN_releases = 0;
162
sewardjf98e1c02008-10-25 16:22:41 +0000163static
164ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000165
166/* --------- Constructors --------- */
167
sewardjf98e1c02008-10-25 16:22:41 +0000168static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000169 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000170 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->locksetA = HG_(emptyWS)( univ_lsets );
172 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000173 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000174 thread->hbthr = hbthr;
175 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000176 thread->created_at = NULL;
177 thread->announced = False;
178 thread->errmsg_index = indx++;
179 thread->admin = admin_threads;
180 admin_threads = thread;
181 return thread;
182}
sewardjf98e1c02008-10-25 16:22:41 +0000183
sewardjb4112022007-11-09 22:49:28 +0000184// Make a new lock which is unlocked (hence ownerless)
185static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
186 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000187 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000188 lock->admin = admin_locks;
189 lock->unique = unique++;
190 lock->magic = LockN_MAGIC;
191 lock->appeared_at = NULL;
192 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000193 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000194 lock->guestaddr = guestaddr;
195 lock->kind = kind;
196 lock->heldW = False;
197 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000198 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000199 admin_locks = lock;
200 return lock;
201}
sewardjb4112022007-11-09 22:49:28 +0000202
203/* Release storage for a Lock. Also release storage in .heldBy, if
204 any. */
205static void del_LockN ( Lock* lk )
206{
sewardjf98e1c02008-10-25 16:22:41 +0000207 tl_assert(HG_(is_sane_LockN)(lk));
208 tl_assert(lk->hbso);
209 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000210 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000211 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000212 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000213 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000214}
215
216/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
217 it. This is done strictly: only combinations resulting from
218 correct program and libpthread behaviour are allowed. */
219static void lockN_acquire_writer ( Lock* lk, Thread* thr )
220{
sewardjf98e1c02008-10-25 16:22:41 +0000221 tl_assert(HG_(is_sane_LockN)(lk));
222 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000223
224 stats__lockN_acquires++;
225
226 /* EXPOSITION only */
227 /* We need to keep recording snapshots of where the lock was
228 acquired, so as to produce better lock-order error messages. */
229 if (lk->acquired_at == NULL) {
230 ThreadId tid;
231 tl_assert(lk->heldBy == NULL);
232 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
233 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000234 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000235 } else {
236 tl_assert(lk->heldBy != NULL);
237 }
238 /* end EXPOSITION only */
239
240 switch (lk->kind) {
241 case LK_nonRec:
242 case_LK_nonRec:
243 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
244 tl_assert(!lk->heldW);
245 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000246 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000247 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000248 break;
249 case LK_mbRec:
250 if (lk->heldBy == NULL)
251 goto case_LK_nonRec;
252 /* 2nd and subsequent locking of a lock by its owner */
253 tl_assert(lk->heldW);
254 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000255 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000256 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000257 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
258 == VG_(sizeTotalBag)(lk->heldBy));
259 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000260 break;
261 case LK_rdwr:
262 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
263 goto case_LK_nonRec;
264 default:
265 tl_assert(0);
266 }
sewardjf98e1c02008-10-25 16:22:41 +0000267 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000268}
269
270static void lockN_acquire_reader ( Lock* lk, Thread* thr )
271{
sewardjf98e1c02008-10-25 16:22:41 +0000272 tl_assert(HG_(is_sane_LockN)(lk));
273 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000274 /* can only add reader to a reader-writer lock. */
275 tl_assert(lk->kind == LK_rdwr);
276 /* lk must be free or already r-held. */
277 tl_assert(lk->heldBy == NULL
278 || (lk->heldBy != NULL && !lk->heldW));
279
280 stats__lockN_acquires++;
281
282 /* EXPOSITION only */
283 /* We need to keep recording snapshots of where the lock was
284 acquired, so as to produce better lock-order error messages. */
285 if (lk->acquired_at == NULL) {
286 ThreadId tid;
287 tl_assert(lk->heldBy == NULL);
288 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
289 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000290 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000291 } else {
292 tl_assert(lk->heldBy != NULL);
293 }
294 /* end EXPOSITION only */
295
296 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000297 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000298 } else {
299 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000300 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000301 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000302 }
303 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000304 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000305}
306
307/* Update 'lk' to reflect a release of it by 'thr'. This is done
308 strictly: only combinations resulting from correct program and
309 libpthread behaviour are allowed. */
310
311static void lockN_release ( Lock* lk, Thread* thr )
312{
313 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000314 tl_assert(HG_(is_sane_LockN)(lk));
315 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000316 /* lock must be held by someone */
317 tl_assert(lk->heldBy);
318 stats__lockN_releases++;
319 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000320 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000321 /* thr must actually have been a holder of lk */
322 tl_assert(b);
323 /* normalise */
324 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000325 if (VG_(isEmptyBag)(lk->heldBy)) {
326 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000327 lk->heldBy = NULL;
328 lk->heldW = False;
329 lk->acquired_at = NULL;
330 }
sewardjf98e1c02008-10-25 16:22:41 +0000331 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000332}
333
334static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
335{
336 Thread* thr;
337 if (!lk->heldBy) {
338 tl_assert(!lk->heldW);
339 return;
340 }
341 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000342 VG_(initIterBag)( lk->heldBy );
343 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000344 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000345 tl_assert(HG_(elemWS)( univ_lsets,
346 thr->locksetA, (Word)lk ));
347 thr->locksetA
348 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
349
350 if (lk->heldW) {
351 tl_assert(HG_(elemWS)( univ_lsets,
352 thr->locksetW, (Word)lk ));
353 thr->locksetW
354 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
355 }
356 }
sewardj896f6f92008-08-19 08:38:52 +0000357 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000358}
359
sewardjb4112022007-11-09 22:49:28 +0000360
361/*----------------------------------------------------------------*/
362/*--- Print out the primary data structures ---*/
363/*----------------------------------------------------------------*/
364
sewardjd52392d2008-11-08 20:36:26 +0000365//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000366
367#define PP_THREADS (1<<1)
368#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000369#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000370
371
372static const Int sHOW_ADMIN = 0;
373
374static void space ( Int n )
375{
376 Int i;
377 Char spaces[128+1];
378 tl_assert(n >= 0 && n < 128);
379 if (n == 0)
380 return;
381 for (i = 0; i < n; i++)
382 spaces[i] = ' ';
383 spaces[i] = 0;
384 tl_assert(i < 128+1);
385 VG_(printf)("%s", spaces);
386}
387
388static void pp_Thread ( Int d, Thread* t )
389{
390 space(d+0); VG_(printf)("Thread %p {\n", t);
391 if (sHOW_ADMIN) {
392 space(d+3); VG_(printf)("admin %p\n", t->admin);
393 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
394 }
395 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
396 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000397 space(d+0); VG_(printf)("}\n");
398}
399
400static void pp_admin_threads ( Int d )
401{
402 Int i, n;
403 Thread* t;
404 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
405 /* nothing */
406 }
407 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
408 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
409 if (0) {
410 space(n);
411 VG_(printf)("admin_threads record %d of %d:\n", i, n);
412 }
413 pp_Thread(d+3, t);
414 }
barta0b6b2c2008-07-07 06:49:24 +0000415 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000416}
417
418static void pp_map_threads ( Int d )
419{
njn4c245e52009-03-15 23:25:38 +0000420 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000421 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000422 for (i = 0; i < VG_N_THREADS; i++) {
423 if (map_threads[i] != NULL)
424 n++;
425 }
426 VG_(printf)("(%d entries) {\n", n);
427 for (i = 0; i < VG_N_THREADS; i++) {
428 if (map_threads[i] == NULL)
429 continue;
430 space(d+3);
431 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
432 }
433 space(d); VG_(printf)("}\n");
434}
435
436static const HChar* show_LockKind ( LockKind lkk ) {
437 switch (lkk) {
438 case LK_mbRec: return "mbRec";
439 case LK_nonRec: return "nonRec";
440 case LK_rdwr: return "rdwr";
441 default: tl_assert(0);
442 }
443}
444
445static void pp_Lock ( Int d, Lock* lk )
446{
barta0b6b2c2008-07-07 06:49:24 +0000447 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000448 if (sHOW_ADMIN) {
449 space(d+3); VG_(printf)("admin %p\n", lk->admin);
450 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
451 }
452 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
453 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
454 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
455 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
456 if (lk->heldBy) {
457 Thread* thr;
458 Word count;
459 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000460 VG_(initIterBag)( lk->heldBy );
461 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000462 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000463 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000464 VG_(printf)("}");
465 }
466 VG_(printf)("\n");
467 space(d+0); VG_(printf)("}\n");
468}
469
470static void pp_admin_locks ( Int d )
471{
472 Int i, n;
473 Lock* lk;
474 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
475 /* nothing */
476 }
477 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
478 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
479 if (0) {
480 space(n);
481 VG_(printf)("admin_locks record %d of %d:\n", i, n);
482 }
483 pp_Lock(d+3, lk);
484 }
barta0b6b2c2008-07-07 06:49:24 +0000485 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000486}
487
488static void pp_map_locks ( Int d )
489{
490 void* gla;
491 Lock* lk;
492 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000493 (Int)VG_(sizeFM)( map_locks ));
494 VG_(initIterFM)( map_locks );
495 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000496 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000497 space(d+3);
498 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
499 }
sewardj896f6f92008-08-19 08:38:52 +0000500 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000501 space(d); VG_(printf)("}\n");
502}
503
sewardjb4112022007-11-09 22:49:28 +0000504static void pp_everything ( Int flags, Char* caller )
505{
506 Int d = 0;
507 VG_(printf)("\n");
508 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
509 if (flags & PP_THREADS) {
510 VG_(printf)("\n");
511 pp_admin_threads(d+3);
512 VG_(printf)("\n");
513 pp_map_threads(d+3);
514 }
515 if (flags & PP_LOCKS) {
516 VG_(printf)("\n");
517 pp_admin_locks(d+3);
518 VG_(printf)("\n");
519 pp_map_locks(d+3);
520 }
sewardjb4112022007-11-09 22:49:28 +0000521
522 VG_(printf)("\n");
523 VG_(printf)("}\n");
524 VG_(printf)("\n");
525}
526
527#undef SHOW_ADMIN
528
529
530/*----------------------------------------------------------------*/
531/*--- Initialise the primary data structures ---*/
532/*----------------------------------------------------------------*/
533
sewardjf98e1c02008-10-25 16:22:41 +0000534static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000535{
sewardjb4112022007-11-09 22:49:28 +0000536 Thread* thr;
537
538 /* Get everything initialised and zeroed. */
539 tl_assert(admin_threads == NULL);
540 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000541
542 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000543
544 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000545 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(map_threads != NULL);
547
sewardjb4112022007-11-09 22:49:28 +0000548 tl_assert(sizeof(Addr) == sizeof(Word));
549 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000550 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
551 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000552 tl_assert(map_locks != NULL);
553
554 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000555 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000556 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000557
558 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000559 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
560 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000561 tl_assert(univ_tsets != NULL);
562
563 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000564 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
565 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000566 tl_assert(univ_lsets != NULL);
567
568 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000569 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
570 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000571 tl_assert(univ_laog != NULL);
572
573 /* Set up entries for the root thread */
574 // FIXME: this assumes that the first real ThreadId is 1
575
sewardjb4112022007-11-09 22:49:28 +0000576 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000577 thr = mk_Thread(hbthr_root);
578 thr->coretid = 1; /* FIXME: hardwires an assumption about the
579 identity of the root thread. */
580 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
581 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000582
sewardjf98e1c02008-10-25 16:22:41 +0000583 /* and bind it in the thread-map table. */
584 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
585 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000586
sewardjf98e1c02008-10-25 16:22:41 +0000587 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000588
589 tl_assert(VG_INVALID_THREADID == 0);
590
591 /* Mark the new bus lock correctly (to stop the sanity checks
592 complaining) */
593 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000594
595 all__sanity_check("initialise_data_structures");
596}
597
598
599/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000600/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000601/*----------------------------------------------------------------*/
602
603/* Doesn't assert if the relevant map_threads entry is NULL. */
604static Thread* map_threads_maybe_lookup ( ThreadId coretid )
605{
606 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000607 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000608 thr = map_threads[coretid];
609 return thr;
610}
611
612/* Asserts if the relevant map_threads entry is NULL. */
613static inline Thread* map_threads_lookup ( ThreadId coretid )
614{
615 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000616 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000617 thr = map_threads[coretid];
618 tl_assert(thr);
619 return thr;
620}
621
sewardjf98e1c02008-10-25 16:22:41 +0000622/* Do a reverse lookup. Does not assert if 'thr' is not found in
623 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000624static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
625{
sewardjf98e1c02008-10-25 16:22:41 +0000626 ThreadId tid;
627 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000628 /* Check nobody used the invalid-threadid slot */
629 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
630 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000631 tid = thr->coretid;
632 tl_assert(HG_(is_sane_ThreadId)(tid));
633 return tid;
sewardjb4112022007-11-09 22:49:28 +0000634}
635
636/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
637 is not found in map_threads. */
638static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
639{
640 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
641 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000642 tl_assert(map_threads[tid]);
643 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000644 return tid;
645}
646
647static void map_threads_delete ( ThreadId coretid )
648{
649 Thread* thr;
650 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000651 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000652 thr = map_threads[coretid];
653 tl_assert(thr);
654 map_threads[coretid] = NULL;
655}
656
657
658/*----------------------------------------------------------------*/
659/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
660/*----------------------------------------------------------------*/
661
662/* Make sure there is a lock table entry for the given (lock) guest
663 address. If not, create one of the stated 'kind' in unheld state.
664 In any case, return the address of the existing or new Lock. */
665static
666Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
667{
668 Bool found;
669 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000670 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000671 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000672 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000673 if (!found) {
674 Lock* lock = mk_LockN(lkk, ga);
675 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000676 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000677 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000678 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000679 return lock;
680 } else {
681 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000682 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000683 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000684 return oldlock;
685 }
686}
687
688static Lock* map_locks_maybe_lookup ( Addr ga )
689{
690 Bool found;
691 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000692 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000693 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000694 return lk;
695}
696
697static void map_locks_delete ( Addr ga )
698{
699 Addr ga2 = 0;
700 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000701 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000702 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000703 /* delFromFM produces the val which is being deleted, if it is
704 found. So assert it is non-null; that in effect asserts that we
705 are deleting a (ga, Lock) pair which actually exists. */
706 tl_assert(lk != NULL);
707 tl_assert(ga2 == ga);
708}
709
710
sewardjb4112022007-11-09 22:49:28 +0000711
712/*----------------------------------------------------------------*/
713/*--- Sanity checking the data structures ---*/
714/*----------------------------------------------------------------*/
715
716static UWord stats__sanity_checks = 0;
717
sewardjb4112022007-11-09 22:49:28 +0000718static void laog__sanity_check ( Char* who ); /* fwds */
719
720/* REQUIRED INVARIANTS:
721
722 Thread vs Segment/Lock/SecMaps
723
724 for each t in Threads {
725
726 // Thread.lockset: each element is really a valid Lock
727
728 // Thread.lockset: each Lock in set is actually held by that thread
729 for lk in Thread.lockset
730 lk == LockedBy(t)
731
732 // Thread.csegid is a valid SegmentID
733 // and the associated Segment has .thr == t
734
735 }
736
737 all thread Locksets are pairwise empty under intersection
738 (that is, no lock is claimed to be held by more than one thread)
739 -- this is guaranteed if all locks in locksets point back to their
740 owner threads
741
742 Lock vs Thread/Segment/SecMaps
743
744 for each entry (gla, la) in map_locks
745 gla == la->guest_addr
746
747 for each lk in Locks {
748
749 lk->tag is valid
750 lk->guest_addr does not have shadow state NoAccess
751 if lk == LockedBy(t), then t->lockset contains lk
752 if lk == UnlockedBy(segid) then segid is valid SegmentID
753 and can be mapped to a valid Segment(seg)
754 and seg->thr->lockset does not contain lk
755 if lk == UnlockedNew then (no lockset contains lk)
756
757 secmaps for lk has .mbHasLocks == True
758
759 }
760
761 Segment vs Thread/Lock/SecMaps
762
763 the Segment graph is a dag (no cycles)
764 all of the Segment graph must be reachable from the segids
765 mentioned in the Threads
766
767 for seg in Segments {
768
769 seg->thr is a sane Thread
770
771 }
772
773 SecMaps vs Segment/Thread/Lock
774
775 for sm in SecMaps {
776
777 sm properly aligned
778 if any shadow word is ShR or ShM then .mbHasShared == True
779
780 for each Excl(segid) state
781 map_segments_lookup maps to a sane Segment(seg)
782 for each ShM/ShR(tsetid,lsetid) state
783 each lk in lset is a valid Lock
784 each thr in tset is a valid thread, which is non-dead
785
786 }
787*/
788
789
790/* Return True iff 'thr' holds 'lk' in some mode. */
791static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
792{
793 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000794 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000795 else
796 return False;
797}
798
799/* Sanity check Threads, as far as possible */
800__attribute__((noinline))
801static void threads__sanity_check ( Char* who )
802{
803#define BAD(_str) do { how = (_str); goto bad; } while (0)
804 Char* how = "no error";
805 Thread* thr;
806 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000807 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000808 Word ls_size, i;
809 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000810 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000811 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000812 wsA = thr->locksetA;
813 wsW = thr->locksetW;
814 // locks held in W mode are a subset of all locks held
815 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
816 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
817 for (i = 0; i < ls_size; i++) {
818 lk = (Lock*)ls_words[i];
819 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000820 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000821 // Thread.lockset: each Lock in set is actually held by that
822 // thread
823 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000824 }
825 }
826 return;
827 bad:
828 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
829 tl_assert(0);
830#undef BAD
831}
832
833
834/* Sanity check Locks, as far as possible */
835__attribute__((noinline))
836static void locks__sanity_check ( Char* who )
837{
838#define BAD(_str) do { how = (_str); goto bad; } while (0)
839 Char* how = "no error";
840 Addr gla;
841 Lock* lk;
842 Int i;
843 // # entries in admin_locks == # entries in map_locks
844 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
845 ;
sewardj896f6f92008-08-19 08:38:52 +0000846 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000847 // for each entry (gla, lk) in map_locks
848 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000849 VG_(initIterFM)( map_locks );
850 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000851 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000852 if (lk->guestaddr != gla) BAD("2");
853 }
sewardj896f6f92008-08-19 08:38:52 +0000854 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000855 // scan through admin_locks ...
856 for (lk = admin_locks; lk; lk = lk->admin) {
857 // lock is sane. Quite comprehensive, also checks that
858 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000859 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000860 // map_locks binds guest address back to this lock
861 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000862 // look at all threads mentioned as holders of this lock. Ensure
863 // this lock is mentioned in their locksets.
864 if (lk->heldBy) {
865 Thread* thr;
866 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000867 VG_(initIterBag)( lk->heldBy );
868 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000869 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000870 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000871 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000872 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000873 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
874 BAD("6");
875 // also check the w-only lockset
876 if (lk->heldW
877 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
878 BAD("7");
879 if ((!lk->heldW)
880 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
881 BAD("8");
882 }
sewardj896f6f92008-08-19 08:38:52 +0000883 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000884 } else {
885 /* lock not held by anybody */
886 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
887 // since lk is unheld, then (no lockset contains lk)
888 // hmm, this is really too expensive to check. Hmm.
889 }
sewardjb4112022007-11-09 22:49:28 +0000890 }
891
892 return;
893 bad:
894 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
895 tl_assert(0);
896#undef BAD
897}
898
899
sewardjb4112022007-11-09 22:49:28 +0000900static void all_except_Locks__sanity_check ( Char* who ) {
901 stats__sanity_checks++;
902 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
903 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000904 laog__sanity_check(who);
905}
906static void all__sanity_check ( Char* who ) {
907 all_except_Locks__sanity_check(who);
908 locks__sanity_check(who);
909}
910
911
912/*----------------------------------------------------------------*/
913/*--- the core memory state machine (msm__* functions) ---*/
914/*----------------------------------------------------------------*/
915
sewardjd52392d2008-11-08 20:36:26 +0000916//static WordSetID add_BHL ( WordSetID lockset ) {
917// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
918//}
919//static WordSetID del_BHL ( WordSetID lockset ) {
920// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
921//}
sewardjb4112022007-11-09 22:49:28 +0000922
923
sewardjd52392d2008-11-08 20:36:26 +0000924///* Last-lock-lossage records. This mechanism exists to help explain
925// to programmers why we are complaining about a race. The idea is to
926// monitor all lockset transitions. When a previously nonempty
927// lockset becomes empty, the lock(s) that just disappeared (the
928// "lossage") are the locks that have consistently protected the
929// location (ga_of_access) in question for the longest time. Most of
930// the time the lossage-set is a single lock. Because the
931// lossage-lock is the one that has survived longest, there is there
932// is a good chance that it is indeed the lock that the programmer
933// intended to use to protect the location.
934//
935// Note that we cannot in general just look at the lossage set when we
936// see a transition to ShM(...,empty-set), because a transition to an
937// empty lockset can happen arbitrarily far before the point where we
938// want to report an error. This is in the case where there are many
939// transitions ShR -> ShR, all with an empty lockset, and only later
940// is there a transition to ShM. So what we want to do is note the
941// lossage lock at the point where a ShR -> ShR transition empties out
942// the lockset, so we can present it later if there should be a
943// transition to ShM.
944//
945// So this function finds such transitions. For each, it associates
946// in ga_to_lastlock, the guest address and the lossage lock. In fact
947// we do not record the Lock* directly as that may disappear later,
948// but instead the ExeContext inside the Lock which says where it was
949// initialised or first locked. ExeContexts are permanent so keeping
950// them indefinitely is safe.
951//
952// A boring detail: the hardware bus lock is not interesting in this
953// respect, so we first remove that from the pre/post locksets.
954//*/
955//
956//static UWord stats__ga_LL_adds = 0;
957//
958//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
959//
960//static
961//void record_last_lock_lossage ( Addr ga_of_access,
962// WordSetID lset_old, WordSetID lset_new )
963//{
964// Lock* lk;
965// Int card_old, card_new;
966//
967// tl_assert(lset_old != lset_new);
968//
969// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
970// (Int)lset_old,
971// HG_(cardinalityWS)(univ_lsets,lset_old),
972// (Int)lset_new,
973// HG_(cardinalityWS)(univ_lsets,lset_new),
974// ga_of_access );
975//
976// /* This is slow, but at least it's simple. The bus hardware lock
977// just confuses the logic, so remove it from the locksets we're
978// considering before doing anything else. */
979// lset_new = del_BHL( lset_new );
980//
981// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
982// /* The post-transition lock set is not empty. So we are not
983// interested. We're only interested in spotting transitions
984// that make locksets become empty. */
985// return;
986// }
987//
988// /* lset_new is now empty */
989// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
990// tl_assert(card_new == 0);
991//
992// lset_old = del_BHL( lset_old );
993// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
994//
995// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
996// (Int)lset_old, card_old, (Int)lset_new, card_new );
997//
998// if (card_old == 0) {
999// /* The old lockset was also empty. Not interesting. */
1000// return;
1001// }
1002//
1003// tl_assert(card_old > 0);
1004// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1005//
1006// /* Now we know we've got a transition from a nonempty lockset to an
1007// empty one. So lset_old must be the set of locks lost. Record
1008// some details. If there is more than one element in the lossage
1009// set, just choose one arbitrarily -- not the best, but at least
1010// it's simple. */
1011//
1012// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1013// if (0) VG_(printf)("lossage %ld %p\n",
1014// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1015// if (lk->appeared_at) {
1016// if (ga_to_lastlock == NULL)
1017// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1018// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1019// stats__ga_LL_adds++;
1020// }
1021//}
1022//
1023///* This queries the table (ga_to_lastlock) made by
1024// record_last_lock_lossage, when constructing error messages. It
1025// attempts to find the ExeContext of the allocation or initialisation
1026// point for the lossage lock associated with 'ga'. */
1027//
1028//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1029//{
1030// ExeContext* ec_hint = NULL;
1031// if (ga_to_lastlock != NULL
1032// && VG_(lookupFM)(ga_to_lastlock,
1033// NULL, (Word*)&ec_hint, ga)) {
1034// tl_assert(ec_hint != NULL);
1035// return ec_hint;
1036// } else {
1037// return NULL;
1038// }
1039//}
sewardjb4112022007-11-09 22:49:28 +00001040
1041
sewardjb4112022007-11-09 22:49:28 +00001042/*----------------------------------------------------------------*/
1043/*--- Shadow value and address range handlers ---*/
1044/*----------------------------------------------------------------*/
1045
1046static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001047//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001048static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001049__attribute__((noinline))
1050static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001051
sewardjb4112022007-11-09 22:49:28 +00001052
1053/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001054/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1055 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1056static void shadow_mem_scopy_range ( Thread* thr,
1057 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001058{
1059 Thr* hbthr = thr->hbthr;
1060 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001061 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001062}
1063
sewardj23f12002009-07-24 08:45:08 +00001064static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1065{
sewardjf98e1c02008-10-25 16:22:41 +00001066 Thr* hbthr = thr->hbthr;
1067 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001068 LIBHB_CREAD_N(hbthr, a, len);
1069}
1070
1071static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1072 Thr* hbthr = thr->hbthr;
1073 tl_assert(hbthr);
1074 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001075}
1076
1077static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1078{
sewardj23f12002009-07-24 08:45:08 +00001079 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001080}
1081
sewardjb4112022007-11-09 22:49:28 +00001082static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1083{
sewardjb4112022007-11-09 22:49:28 +00001084 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001085 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001086 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001087}
1088
sewardj406bac82010-03-03 23:03:40 +00001089static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1090{
1091 if (0 && len > 500)
1092 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1093 libhb_srange_untrack( thr->hbthr, aIN, len );
1094}
1095
sewardjb4112022007-11-09 22:49:28 +00001096
1097/*----------------------------------------------------------------*/
1098/*--- Event handlers (evh__* functions) ---*/
1099/*--- plus helpers (evhH__* functions) ---*/
1100/*----------------------------------------------------------------*/
1101
1102/*--------- Event handler helpers (evhH__* functions) ---------*/
1103
1104/* Create a new segment for 'thr', making it depend (.prev) on its
1105 existing segment, bind together the SegmentID and Segment, and
1106 return both of them. Also update 'thr' so it references the new
1107 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001108//zz static
1109//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1110//zz /*OUT*/Segment** new_segP,
1111//zz Thread* thr )
1112//zz {
1113//zz Segment* cur_seg;
1114//zz tl_assert(new_segP);
1115//zz tl_assert(new_segidP);
1116//zz tl_assert(HG_(is_sane_Thread)(thr));
1117//zz cur_seg = map_segments_lookup( thr->csegid );
1118//zz tl_assert(cur_seg);
1119//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1120//zz at their owner thread. */
1121//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1122//zz *new_segidP = alloc_SegmentID();
1123//zz map_segments_add( *new_segidP, *new_segP );
1124//zz thr->csegid = *new_segidP;
1125//zz }
sewardjb4112022007-11-09 22:49:28 +00001126
1127
1128/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1129 updates, and also do all possible error checks. */
1130static
1131void evhH__post_thread_w_acquires_lock ( Thread* thr,
1132 LockKind lkk, Addr lock_ga )
1133{
1134 Lock* lk;
1135
1136 /* Basically what we need to do is call lockN_acquire_writer.
1137 However, that will barf if any 'invalid' lock states would
1138 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001139 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001140 routine.
1141
1142 Because this routine is only called after successful lock
1143 acquisition, we should not be asked to move the lock into any
1144 invalid states. Requests to do so are bugs in libpthread, since
1145 that should have rejected any such requests. */
1146
sewardjf98e1c02008-10-25 16:22:41 +00001147 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001148 /* Try to find the lock. If we can't, then create a new one with
1149 kind 'lkk'. */
1150 lk = map_locks_lookup_or_create(
1151 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001152 tl_assert( HG_(is_sane_LockN)(lk) );
1153
1154 /* check libhb level entities exist */
1155 tl_assert(thr->hbthr);
1156 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001157
1158 if (lk->heldBy == NULL) {
1159 /* the lock isn't held. Simple. */
1160 tl_assert(!lk->heldW);
1161 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001162 /* acquire a dependency from the lock's VCs */
1163 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001164 goto noerror;
1165 }
1166
1167 /* So the lock is already held. If held as a r-lock then
1168 libpthread must be buggy. */
1169 tl_assert(lk->heldBy);
1170 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001171 HG_(record_error_Misc)(
1172 thr, "Bug in libpthread: write lock "
1173 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001174 goto error;
1175 }
1176
1177 /* So the lock is held in w-mode. If it's held by some other
1178 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001179 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001180
sewardj896f6f92008-08-19 08:38:52 +00001181 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001182 HG_(record_error_Misc)(
1183 thr, "Bug in libpthread: write lock "
1184 "granted on mutex/rwlock which is currently "
1185 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001186 goto error;
1187 }
1188
1189 /* So the lock is already held in w-mode by 'thr'. That means this
1190 is an attempt to lock it recursively, which is only allowable
1191 for LK_mbRec kinded locks. Since this routine is called only
1192 once the lock has been acquired, this must also be a libpthread
1193 bug. */
1194 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001195 HG_(record_error_Misc)(
1196 thr, "Bug in libpthread: recursive write lock "
1197 "granted on mutex/wrlock which does not "
1198 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001199 goto error;
1200 }
1201
1202 /* So we are recursively re-locking a lock we already w-hold. */
1203 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001204 /* acquire a dependency from the lock's VC. Probably pointless,
1205 but also harmless. */
1206 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001207 goto noerror;
1208
1209 noerror:
1210 /* check lock order acquisition graph, and update. This has to
1211 happen before the lock is added to the thread's locksetA/W. */
1212 laog__pre_thread_acquires_lock( thr, lk );
1213 /* update the thread's held-locks set */
1214 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1215 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1216 /* fall through */
1217
1218 error:
sewardjf98e1c02008-10-25 16:22:41 +00001219 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001220}
1221
1222
1223/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1224 updates, and also do all possible error checks. */
1225static
1226void evhH__post_thread_r_acquires_lock ( Thread* thr,
1227 LockKind lkk, Addr lock_ga )
1228{
1229 Lock* lk;
1230
1231 /* Basically what we need to do is call lockN_acquire_reader.
1232 However, that will barf if any 'invalid' lock states would
1233 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001234 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001235 routine.
1236
1237 Because this routine is only called after successful lock
1238 acquisition, we should not be asked to move the lock into any
1239 invalid states. Requests to do so are bugs in libpthread, since
1240 that should have rejected any such requests. */
1241
sewardjf98e1c02008-10-25 16:22:41 +00001242 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001243 /* Try to find the lock. If we can't, then create a new one with
1244 kind 'lkk'. Only a reader-writer lock can be read-locked,
1245 hence the first assertion. */
1246 tl_assert(lkk == LK_rdwr);
1247 lk = map_locks_lookup_or_create(
1248 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001249 tl_assert( HG_(is_sane_LockN)(lk) );
1250
1251 /* check libhb level entities exist */
1252 tl_assert(thr->hbthr);
1253 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001254
1255 if (lk->heldBy == NULL) {
1256 /* the lock isn't held. Simple. */
1257 tl_assert(!lk->heldW);
1258 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001259 /* acquire a dependency from the lock's VC */
1260 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001261 goto noerror;
1262 }
1263
1264 /* So the lock is already held. If held as a w-lock then
1265 libpthread must be buggy. */
1266 tl_assert(lk->heldBy);
1267 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001268 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1269 "granted on rwlock which is "
1270 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001271 goto error;
1272 }
1273
1274 /* Easy enough. In short anybody can get a read-lock on a rwlock
1275 provided it is either unlocked or already in rd-held. */
1276 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001277 /* acquire a dependency from the lock's VC. Probably pointless,
1278 but also harmless. */
1279 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001280 goto noerror;
1281
1282 noerror:
1283 /* check lock order acquisition graph, and update. This has to
1284 happen before the lock is added to the thread's locksetA/W. */
1285 laog__pre_thread_acquires_lock( thr, lk );
1286 /* update the thread's held-locks set */
1287 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1288 /* but don't update thr->locksetW, since lk is only rd-held */
1289 /* fall through */
1290
1291 error:
sewardjf98e1c02008-10-25 16:22:41 +00001292 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001293}
1294
1295
1296/* The lock at 'lock_ga' is just about to be unlocked. Make all
1297 necessary updates, and also do all possible error checks. */
1298static
1299void evhH__pre_thread_releases_lock ( Thread* thr,
1300 Addr lock_ga, Bool isRDWR )
1301{
1302 Lock* lock;
1303 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001304 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001305
1306 /* This routine is called prior to a lock release, before
1307 libpthread has had a chance to validate the call. Hence we need
1308 to detect and reject any attempts to move the lock into an
1309 invalid state. Such attempts are bugs in the client.
1310
1311 isRDWR is True if we know from the wrapper context that lock_ga
1312 should refer to a reader-writer lock, and is False if [ditto]
1313 lock_ga should refer to a standard mutex. */
1314
sewardjf98e1c02008-10-25 16:22:41 +00001315 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001316 lock = map_locks_maybe_lookup( lock_ga );
1317
1318 if (!lock) {
1319 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1320 the client is trying to unlock it. So complain, then ignore
1321 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001322 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001323 return;
1324 }
1325
1326 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001327 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001328
1329 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001330 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1331 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001332 }
1333 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001334 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1335 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001336 }
1337
1338 if (!lock->heldBy) {
1339 /* The lock is not held. This indicates a serious bug in the
1340 client. */
1341 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001342 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001343 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1344 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1345 goto error;
1346 }
1347
sewardjf98e1c02008-10-25 16:22:41 +00001348 /* test just above dominates */
1349 tl_assert(lock->heldBy);
1350 was_heldW = lock->heldW;
1351
sewardjb4112022007-11-09 22:49:28 +00001352 /* The lock is held. Is this thread one of the holders? If not,
1353 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001354 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001355 tl_assert(n >= 0);
1356 if (n == 0) {
1357 /* We are not a current holder of the lock. This is a bug in
1358 the guest, and (per POSIX pthread rules) the unlock
1359 attempt will fail. So just complain and do nothing
1360 else. */
sewardj896f6f92008-08-19 08:38:52 +00001361 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001362 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001363 tl_assert(realOwner != thr);
1364 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1365 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001366 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001367 goto error;
1368 }
1369
1370 /* Ok, we hold the lock 'n' times. */
1371 tl_assert(n >= 1);
1372
1373 lockN_release( lock, thr );
1374
1375 n--;
1376 tl_assert(n >= 0);
1377
1378 if (n > 0) {
1379 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001380 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001381 /* We still hold the lock. So either it's a recursive lock
1382 or a rwlock which is currently r-held. */
1383 tl_assert(lock->kind == LK_mbRec
1384 || (lock->kind == LK_rdwr && !lock->heldW));
1385 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1386 if (lock->heldW)
1387 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1388 else
1389 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1390 } else {
sewardj983f3022009-05-21 14:49:55 +00001391 /* n is zero. This means we don't hold the lock any more. But
1392 if it's a rwlock held in r-mode, someone else could still
1393 hold it. Just do whatever sanity checks we can. */
1394 if (lock->kind == LK_rdwr && lock->heldBy) {
1395 /* It's a rwlock. We no longer hold it but we used to;
1396 nevertheless it still appears to be held by someone else.
1397 The implication is that, prior to this release, it must
1398 have been shared by us and and whoever else is holding it;
1399 which in turn implies it must be r-held, since a lock
1400 can't be w-held by more than one thread. */
1401 /* The lock is now R-held by somebody else: */
1402 tl_assert(lock->heldW == False);
1403 } else {
1404 /* Normal case. It's either not a rwlock, or it's a rwlock
1405 that we used to hold in w-mode (which is pretty much the
1406 same thing as a non-rwlock.) Since this transaction is
1407 atomic (V does not allow multiple threads to run
1408 simultaneously), it must mean the lock is now not held by
1409 anybody. Hence assert for it. */
1410 /* The lock is now not held by anybody: */
1411 tl_assert(!lock->heldBy);
1412 tl_assert(lock->heldW == False);
1413 }
sewardjf98e1c02008-10-25 16:22:41 +00001414 //if (lock->heldBy) {
1415 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1416 //}
sewardjb4112022007-11-09 22:49:28 +00001417 /* update this thread's lockset accordingly. */
1418 thr->locksetA
1419 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1420 thr->locksetW
1421 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001422 /* push our VC into the lock */
1423 tl_assert(thr->hbthr);
1424 tl_assert(lock->hbso);
1425 /* If the lock was previously W-held, then we want to do a
1426 strong send, and if previously R-held, then a weak send. */
1427 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001428 }
1429 /* fall through */
1430
1431 error:
sewardjf98e1c02008-10-25 16:22:41 +00001432 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001433}
1434
1435
sewardj9f569b72008-11-13 13:33:09 +00001436/* ---------------------------------------------------------- */
1437/* -------- Event handlers proper (evh__* functions) -------- */
1438/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001439
1440/* What is the Thread* for the currently running thread? This is
1441 absolutely performance critical. We receive notifications from the
1442 core for client code starts/stops, and cache the looked-up result
1443 in 'current_Thread'. Hence, for the vast majority of requests,
1444 finding the current thread reduces to a read of a global variable,
1445 provided get_current_Thread_in_C_C is inlined.
1446
1447 Outside of client code, current_Thread is NULL, and presumably
1448 any uses of it will cause a segfault. Hence:
1449
1450 - for uses definitely within client code, use
1451 get_current_Thread_in_C_C.
1452
1453 - for all other uses, use get_current_Thread.
1454*/
1455
sewardj23f12002009-07-24 08:45:08 +00001456static Thread *current_Thread = NULL,
1457 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001458
1459static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1460 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1461 tl_assert(current_Thread == NULL);
1462 current_Thread = map_threads_lookup( tid );
1463 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001464 if (current_Thread != current_Thread_prev) {
1465 libhb_Thr_resumes( current_Thread->hbthr );
1466 current_Thread_prev = current_Thread;
1467 }
sewardjb4112022007-11-09 22:49:28 +00001468}
1469static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1470 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1471 tl_assert(current_Thread != NULL);
1472 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001473 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001474}
1475static inline Thread* get_current_Thread_in_C_C ( void ) {
1476 return current_Thread;
1477}
1478static inline Thread* get_current_Thread ( void ) {
1479 ThreadId coretid;
1480 Thread* thr;
1481 thr = get_current_Thread_in_C_C();
1482 if (LIKELY(thr))
1483 return thr;
1484 /* evidently not in client code. Do it the slow way. */
1485 coretid = VG_(get_running_tid)();
1486 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001487 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001488 of initial memory layout) and VG_(get_running_tid)() returns
1489 VG_INVALID_THREADID at that point. */
1490 if (coretid == VG_INVALID_THREADID)
1491 coretid = 1; /* KLUDGE */
1492 thr = map_threads_lookup( coretid );
1493 return thr;
1494}
1495
1496static
1497void evh__new_mem ( Addr a, SizeT len ) {
1498 if (SHOW_EVENTS >= 2)
1499 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1500 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001501 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001502 all__sanity_check("evh__new_mem-post");
1503}
1504
1505static
sewardj1f77fec2010-04-12 19:51:04 +00001506void evh__new_mem_stack ( Addr a, SizeT len ) {
1507 if (SHOW_EVENTS >= 2)
1508 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1509 shadow_mem_make_New( get_current_Thread(),
1510 -VG_STACK_REDZONE_SZB + a, len );
1511 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1512 all__sanity_check("evh__new_mem_stack-post");
1513}
1514
1515static
sewardj7cf4e6b2008-05-01 20:24:26 +00001516void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1517 if (SHOW_EVENTS >= 2)
1518 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1519 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001520 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001521 all__sanity_check("evh__new_mem_w_tid-post");
1522}
1523
1524static
sewardjb4112022007-11-09 22:49:28 +00001525void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001526 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001527 if (SHOW_EVENTS >= 1)
1528 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1529 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1530 if (rr || ww || xx)
1531 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001532 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001533 all__sanity_check("evh__new_mem_w_perms-post");
1534}
1535
1536static
1537void evh__set_perms ( Addr a, SizeT len,
1538 Bool rr, Bool ww, Bool xx ) {
1539 if (SHOW_EVENTS >= 1)
1540 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1541 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1542 /* Hmm. What should we do here, that actually makes any sense?
1543 Let's say: if neither readable nor writable, then declare it
1544 NoAccess, else leave it alone. */
1545 if (!(rr || ww))
1546 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001547 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001548 all__sanity_check("evh__set_perms-post");
1549}
1550
1551static
1552void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001553 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001554 if (SHOW_EVENTS >= 2)
1555 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1556 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001557 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001558 all__sanity_check("evh__die_mem-post");
1559}
1560
1561static
sewardj406bac82010-03-03 23:03:40 +00001562void evh__untrack_mem ( Addr a, SizeT len ) {
1563 // whereas it doesn't ignore this
1564 if (SHOW_EVENTS >= 2)
1565 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1566 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1567 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1568 all__sanity_check("evh__untrack_mem-post");
1569}
1570
1571static
sewardj23f12002009-07-24 08:45:08 +00001572void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1573 if (SHOW_EVENTS >= 2)
1574 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1575 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1576 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1577 all__sanity_check("evh__copy_mem-post");
1578}
1579
1580static
sewardjb4112022007-11-09 22:49:28 +00001581void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1582{
1583 if (SHOW_EVENTS >= 1)
1584 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1585 (Int)parent, (Int)child );
1586
1587 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001588 Thread* thr_p;
1589 Thread* thr_c;
1590 Thr* hbthr_p;
1591 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001592
sewardjf98e1c02008-10-25 16:22:41 +00001593 tl_assert(HG_(is_sane_ThreadId)(parent));
1594 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001595 tl_assert(parent != child);
1596
1597 thr_p = map_threads_maybe_lookup( parent );
1598 thr_c = map_threads_maybe_lookup( child );
1599
1600 tl_assert(thr_p != NULL);
1601 tl_assert(thr_c == NULL);
1602
sewardjf98e1c02008-10-25 16:22:41 +00001603 hbthr_p = thr_p->hbthr;
1604 tl_assert(hbthr_p != NULL);
1605 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001606
sewardjf98e1c02008-10-25 16:22:41 +00001607 hbthr_c = libhb_create ( hbthr_p );
1608
1609 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001610 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001611 thr_c = mk_Thread( hbthr_c );
1612 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1613 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001614
1615 /* and bind it in the thread-map table */
1616 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001617 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1618 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001619
1620 /* Record where the parent is so we can later refer to this in
1621 error messages.
1622
1623 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1624 The stack snapshot is taken immediately after the parent has
1625 returned from its sys_clone call. Unfortunately there is no
1626 unwind info for the insn following "syscall" - reading the
1627 glibc sources confirms this. So we ask for a snapshot to be
1628 taken as if RIP was 3 bytes earlier, in a place where there
1629 is unwind info. Sigh.
1630 */
1631 { Word first_ip_delta = 0;
1632# if defined(VGP_amd64_linux)
1633 first_ip_delta = -3;
1634# endif
1635 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1636 }
sewardjb4112022007-11-09 22:49:28 +00001637 }
1638
sewardjf98e1c02008-10-25 16:22:41 +00001639 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001640 all__sanity_check("evh__pre_thread_create-post");
1641}
1642
1643static
1644void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1645{
1646 Int nHeld;
1647 Thread* thr_q;
1648 if (SHOW_EVENTS >= 1)
1649 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1650 (Int)quit_tid );
1651
1652 /* quit_tid has disappeared without joining to any other thread.
1653 Therefore there is no synchronisation event associated with its
1654 exit and so we have to pretty much treat it as if it was still
1655 alive but mysteriously making no progress. That is because, if
1656 we don't know when it really exited, then we can never say there
1657 is a point in time when we're sure the thread really has
1658 finished, and so we need to consider the possibility that it
1659 lingers indefinitely and continues to interact with other
1660 threads. */
1661 /* However, it might have rendezvous'd with a thread that called
1662 pthread_join with this one as arg, prior to this point (that's
1663 how NPTL works). In which case there has already been a prior
1664 sync event. So in any case, just let the thread exit. On NPTL,
1665 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001666 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001667 thr_q = map_threads_maybe_lookup( quit_tid );
1668 tl_assert(thr_q != NULL);
1669
1670 /* Complain if this thread holds any locks. */
1671 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1672 tl_assert(nHeld >= 0);
1673 if (nHeld > 0) {
1674 HChar buf[80];
1675 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1676 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001677 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001678 }
1679
sewardj23f12002009-07-24 08:45:08 +00001680 /* Not much to do here:
1681 - tell libhb the thread is gone
1682 - clear the map_threads entry, in order that the Valgrind core
1683 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001684 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1685 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001686 tl_assert(thr_q->hbthr);
1687 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001688 tl_assert(thr_q->coretid == quit_tid);
1689 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001690 map_threads_delete( quit_tid );
1691
sewardjf98e1c02008-10-25 16:22:41 +00001692 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001693 all__sanity_check("evh__pre_thread_ll_exit-post");
1694}
1695
sewardj61bc2c52011-02-09 10:34:00 +00001696/* This is called immediately after fork, for the child only. 'tid'
1697 is the only surviving thread (as per POSIX rules on fork() in
1698 threaded programs), so we have to clean up map_threads to remove
1699 entries for any other threads. */
1700static
1701void evh__atfork_child ( ThreadId tid )
1702{
1703 UInt i;
1704 Thread* thr;
1705 /* Slot 0 should never be used. */
1706 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1707 tl_assert(!thr);
1708 /* Clean up all other slots except 'tid'. */
1709 for (i = 1; i < VG_N_THREADS; i++) {
1710 if (i == tid)
1711 continue;
1712 thr = map_threads_maybe_lookup(i);
1713 if (!thr)
1714 continue;
1715 /* Cleanup actions (next 5 lines) copied from end of
1716 evh__pre_thread_ll_exit; keep in sync. */
1717 tl_assert(thr->hbthr);
1718 libhb_async_exit(thr->hbthr);
1719 tl_assert(thr->coretid == i);
1720 thr->coretid = VG_INVALID_THREADID;
1721 map_threads_delete(i);
1722 }
1723}
1724
sewardjf98e1c02008-10-25 16:22:41 +00001725
sewardjb4112022007-11-09 22:49:28 +00001726static
1727void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1728{
sewardjb4112022007-11-09 22:49:28 +00001729 Thread* thr_s;
1730 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001731 Thr* hbthr_s;
1732 Thr* hbthr_q;
1733 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001734
1735 if (SHOW_EVENTS >= 1)
1736 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1737 (Int)stay_tid, quit_thr );
1738
sewardjf98e1c02008-10-25 16:22:41 +00001739 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001740
1741 thr_s = map_threads_maybe_lookup( stay_tid );
1742 thr_q = quit_thr;
1743 tl_assert(thr_s != NULL);
1744 tl_assert(thr_q != NULL);
1745 tl_assert(thr_s != thr_q);
1746
sewardjf98e1c02008-10-25 16:22:41 +00001747 hbthr_s = thr_s->hbthr;
1748 hbthr_q = thr_q->hbthr;
1749 tl_assert(hbthr_s != hbthr_q);
1750 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1751 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001752
sewardjf98e1c02008-10-25 16:22:41 +00001753 /* Allocate a temporary synchronisation object and use it to send
1754 an imaginary message from the quitter to the stayer, the purpose
1755 being to generate a dependence from the quitter to the
1756 stayer. */
1757 so = libhb_so_alloc();
1758 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001759 /* Send last arg of _so_send as False, since the sending thread
1760 doesn't actually exist any more, so we don't want _so_send to
1761 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001762 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1763 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1764 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001765
sewardjf98e1c02008-10-25 16:22:41 +00001766 /* evh__pre_thread_ll_exit issues an error message if the exiting
1767 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001768
1769 /* This holds because, at least when using NPTL as the thread
1770 library, we should be notified the low level thread exit before
1771 we hear of any join event on it. The low level exit
1772 notification feeds through into evh__pre_thread_ll_exit,
1773 which should clear the map_threads entry for it. Hence we
1774 expect there to be no map_threads entry at this point. */
1775 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1776 == VG_INVALID_THREADID);
1777
sewardjf98e1c02008-10-25 16:22:41 +00001778 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001779 all__sanity_check("evh__post_thread_join-post");
1780}
1781
1782static
1783void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1784 Addr a, SizeT size) {
1785 if (SHOW_EVENTS >= 2
1786 || (SHOW_EVENTS >= 1 && size != 1))
1787 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1788 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001789 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001790 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001791 all__sanity_check("evh__pre_mem_read-post");
1792}
1793
1794static
1795void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1796 Char* s, Addr a ) {
1797 Int len;
1798 if (SHOW_EVENTS >= 1)
1799 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1800 (Int)tid, s, (void*)a );
sewardj234e5582011-02-09 12:47:23 +00001801 // Don't segfault if the string starts in an obviously stupid
1802 // place. Actually we should check the whole string, not just
1803 // the start address, but that's too much trouble. At least
1804 // checking the first byte is better than nothing. See #255009.
1805 if (!VG_(am_is_valid_for_client) (a, 1, VKI_PROT_READ))
1806 return;
sewardjb4112022007-11-09 22:49:28 +00001807 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001808 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001809 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001810 all__sanity_check("evh__pre_mem_read_asciiz-post");
1811}
1812
1813static
1814void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1815 Addr a, SizeT size ) {
1816 if (SHOW_EVENTS >= 1)
1817 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1818 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001819 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001820 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001821 all__sanity_check("evh__pre_mem_write-post");
1822}
1823
1824static
1825void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1826 if (SHOW_EVENTS >= 1)
1827 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1828 (void*)a, len, (Int)is_inited );
1829 // FIXME: this is kinda stupid
1830 if (is_inited) {
1831 shadow_mem_make_New(get_current_Thread(), a, len);
1832 } else {
1833 shadow_mem_make_New(get_current_Thread(), a, len);
1834 }
sewardjf98e1c02008-10-25 16:22:41 +00001835 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001836 all__sanity_check("evh__pre_mem_read-post");
1837}
1838
1839static
1840void evh__die_mem_heap ( Addr a, SizeT len ) {
1841 if (SHOW_EVENTS >= 1)
1842 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1843 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001844 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001845 all__sanity_check("evh__pre_mem_read-post");
1846}
1847
sewardj23f12002009-07-24 08:45:08 +00001848/* --- Event handlers called from generated code --- */
1849
sewardjb4112022007-11-09 22:49:28 +00001850static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001851void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001852 Thread* thr = get_current_Thread_in_C_C();
1853 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001854 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001855}
sewardjf98e1c02008-10-25 16:22:41 +00001856
sewardjb4112022007-11-09 22:49:28 +00001857static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001858void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001859 Thread* thr = get_current_Thread_in_C_C();
1860 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001861 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001862}
sewardjf98e1c02008-10-25 16:22:41 +00001863
sewardjb4112022007-11-09 22:49:28 +00001864static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001865void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001866 Thread* thr = get_current_Thread_in_C_C();
1867 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001868 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001869}
sewardjf98e1c02008-10-25 16:22:41 +00001870
sewardjb4112022007-11-09 22:49:28 +00001871static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001872void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001873 Thread* thr = get_current_Thread_in_C_C();
1874 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001875 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001876}
sewardjf98e1c02008-10-25 16:22:41 +00001877
sewardjb4112022007-11-09 22:49:28 +00001878static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001879void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001880 Thread* thr = get_current_Thread_in_C_C();
1881 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001882 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001883}
1884
1885static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001886void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001887 Thread* thr = get_current_Thread_in_C_C();
1888 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001889 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001890}
sewardjf98e1c02008-10-25 16:22:41 +00001891
sewardjb4112022007-11-09 22:49:28 +00001892static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001893void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001894 Thread* thr = get_current_Thread_in_C_C();
1895 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001896 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001897}
sewardjf98e1c02008-10-25 16:22:41 +00001898
sewardjb4112022007-11-09 22:49:28 +00001899static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001900void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001901 Thread* thr = get_current_Thread_in_C_C();
1902 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001903 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001904}
sewardjf98e1c02008-10-25 16:22:41 +00001905
sewardjb4112022007-11-09 22:49:28 +00001906static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001907void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001908 Thread* thr = get_current_Thread_in_C_C();
1909 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001910 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001911}
sewardjf98e1c02008-10-25 16:22:41 +00001912
sewardjb4112022007-11-09 22:49:28 +00001913static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001914void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001915 Thread* thr = get_current_Thread_in_C_C();
1916 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001917 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001918}
1919
sewardjb4112022007-11-09 22:49:28 +00001920
sewardj9f569b72008-11-13 13:33:09 +00001921/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001922/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001923/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001924
1925/* EXPOSITION only: by intercepting lock init events we can show the
1926 user where the lock was initialised, rather than only being able to
1927 show where it was first locked. Intercepting lock initialisations
1928 is not necessary for the basic operation of the race checker. */
1929static
1930void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1931 void* mutex, Word mbRec )
1932{
1933 if (SHOW_EVENTS >= 1)
1934 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1935 (Int)tid, mbRec, (void*)mutex );
1936 tl_assert(mbRec == 0 || mbRec == 1);
1937 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1938 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001939 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001940 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1941}
1942
1943static
1944void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1945{
1946 Thread* thr;
1947 Lock* lk;
1948 if (SHOW_EVENTS >= 1)
1949 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1950 (Int)tid, (void*)mutex );
1951
1952 thr = map_threads_maybe_lookup( tid );
1953 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001954 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001955
1956 lk = map_locks_maybe_lookup( (Addr)mutex );
1957
1958 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001959 HG_(record_error_Misc)(
1960 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001961 }
1962
1963 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001964 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001965 tl_assert( lk->guestaddr == (Addr)mutex );
1966 if (lk->heldBy) {
1967 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001968 HG_(record_error_Misc)(
1969 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001970 /* remove lock from locksets of all owning threads */
1971 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001972 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001973 lk->heldBy = NULL;
1974 lk->heldW = False;
1975 lk->acquired_at = NULL;
1976 }
1977 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001978 tl_assert( HG_(is_sane_LockN)(lk) );
1979
sewardj1cbc12f2008-11-10 16:16:46 +00001980 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001981 map_locks_delete( lk->guestaddr );
1982 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001983 }
1984
sewardjf98e1c02008-10-25 16:22:41 +00001985 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001986 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1987}
1988
1989static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1990 void* mutex, Word isTryLock )
1991{
1992 /* Just check the mutex is sane; nothing else to do. */
1993 // 'mutex' may be invalid - not checked by wrapper
1994 Thread* thr;
1995 Lock* lk;
1996 if (SHOW_EVENTS >= 1)
1997 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1998 (Int)tid, (void*)mutex );
1999
2000 tl_assert(isTryLock == 0 || isTryLock == 1);
2001 thr = map_threads_maybe_lookup( tid );
2002 tl_assert(thr); /* cannot fail - Thread* must already exist */
2003
2004 lk = map_locks_maybe_lookup( (Addr)mutex );
2005
2006 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002007 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2008 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002009 }
2010
2011 if ( lk
2012 && isTryLock == 0
2013 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2014 && lk->heldBy
2015 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00002016 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002017 /* uh, it's a non-recursive lock and we already w-hold it, and
2018 this is a real lock operation (not a speculative "tryLock"
2019 kind of thing). Duh. Deadlock coming up; but at least
2020 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00002021 HChar* errstr = "Attempt to re-lock a "
2022 "non-recursive lock I already hold";
2023 HChar* auxstr = "Lock was previously acquired";
2024 if (lk->acquired_at) {
2025 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2026 } else {
2027 HG_(record_error_Misc)( thr, errstr );
2028 }
sewardjb4112022007-11-09 22:49:28 +00002029 }
2030}
2031
2032static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2033{
2034 // only called if the real library call succeeded - so mutex is sane
2035 Thread* thr;
2036 if (SHOW_EVENTS >= 1)
2037 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2038 (Int)tid, (void*)mutex );
2039
2040 thr = map_threads_maybe_lookup( tid );
2041 tl_assert(thr); /* cannot fail - Thread* must already exist */
2042
2043 evhH__post_thread_w_acquires_lock(
2044 thr,
2045 LK_mbRec, /* if not known, create new lock with this LockKind */
2046 (Addr)mutex
2047 );
2048}
2049
2050static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2051{
2052 // 'mutex' may be invalid - not checked by wrapper
2053 Thread* thr;
2054 if (SHOW_EVENTS >= 1)
2055 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2056 (Int)tid, (void*)mutex );
2057
2058 thr = map_threads_maybe_lookup( tid );
2059 tl_assert(thr); /* cannot fail - Thread* must already exist */
2060
2061 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2062}
2063
2064static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2065{
2066 // only called if the real library call succeeded - so mutex is sane
2067 Thread* thr;
2068 if (SHOW_EVENTS >= 1)
2069 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2070 (Int)tid, (void*)mutex );
2071 thr = map_threads_maybe_lookup( tid );
2072 tl_assert(thr); /* cannot fail - Thread* must already exist */
2073
2074 // anything we should do here?
2075}
2076
2077
sewardj5a644da2009-08-11 10:35:58 +00002078/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002079/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002080/* ------------------------------------------------------- */
2081
2082/* All a bit of a kludge. Pretend we're really dealing with ordinary
2083 pthread_mutex_t's instead, for the most part. */
2084
2085static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2086 void* slock )
2087{
2088 Thread* thr;
2089 Lock* lk;
2090 /* In glibc's kludgey world, we're either initialising or unlocking
2091 it. Since this is the pre-routine, if it is locked, unlock it
2092 and take a dependence edge. Otherwise, do nothing. */
2093
2094 if (SHOW_EVENTS >= 1)
2095 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2096 "(ctid=%d, slock=%p)\n",
2097 (Int)tid, (void*)slock );
2098
2099 thr = map_threads_maybe_lookup( tid );
2100 /* cannot fail - Thread* must already exist */;
2101 tl_assert( HG_(is_sane_Thread)(thr) );
2102
2103 lk = map_locks_maybe_lookup( (Addr)slock );
2104 if (lk && lk->heldBy) {
2105 /* it's held. So do the normal pre-unlock actions, as copied
2106 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2107 duplicates the map_locks_maybe_lookup. */
2108 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2109 False/*!isRDWR*/ );
2110 }
2111}
2112
2113static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2114 void* slock )
2115{
2116 Lock* lk;
2117 /* More kludgery. If the lock has never been seen before, do
2118 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2119 nothing. */
2120
2121 if (SHOW_EVENTS >= 1)
2122 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2123 "(ctid=%d, slock=%p)\n",
2124 (Int)tid, (void*)slock );
2125
2126 lk = map_locks_maybe_lookup( (Addr)slock );
2127 if (!lk) {
2128 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2129 }
2130}
2131
2132static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2133 void* slock, Word isTryLock )
2134{
2135 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2136}
2137
2138static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2139 void* slock )
2140{
2141 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2142}
2143
2144static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2145 void* slock )
2146{
2147 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2148}
2149
2150
sewardj9f569b72008-11-13 13:33:09 +00002151/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002152/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002153/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002154
sewardj02114542009-07-28 20:52:36 +00002155/* A mapping from CV to (the SO associated with it, plus some
2156 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002157 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2158 wait on it completes, we do a 'recv' from the SO. This is believed
2159 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002160 signallings/broadcasts.
2161*/
2162
sewardj02114542009-07-28 20:52:36 +00002163/* .so is the SO for this CV.
2164 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002165
sewardj02114542009-07-28 20:52:36 +00002166 POSIX says effectively that the first pthread_cond_{timed}wait call
2167 causes a dynamic binding between the CV and the mutex, and that
2168 lasts until such time as the waiter count falls to zero. Hence
2169 need to keep track of the number of waiters in order to do
2170 consistency tracking. */
2171typedef
2172 struct {
2173 SO* so; /* libhb-allocated SO */
2174 void* mx_ga; /* addr of associated mutex, if any */
2175 UWord nWaiters; /* # threads waiting on the CV */
2176 }
2177 CVInfo;
2178
2179
2180/* pthread_cond_t* -> CVInfo* */
2181static WordFM* map_cond_to_CVInfo = NULL;
2182
2183static void map_cond_to_CVInfo_INIT ( void ) {
2184 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2185 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2186 "hg.mctCI.1", HG_(free), NULL );
2187 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002188 }
2189}
2190
sewardj02114542009-07-28 20:52:36 +00002191static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002192 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002193 map_cond_to_CVInfo_INIT();
2194 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002195 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002196 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002197 } else {
sewardj02114542009-07-28 20:52:36 +00002198 SO* so = libhb_so_alloc();
2199 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2200 cvi->so = so;
2201 cvi->mx_ga = 0;
2202 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2203 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002204 }
2205}
2206
sewardj02114542009-07-28 20:52:36 +00002207static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002208 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002209 map_cond_to_CVInfo_INIT();
2210 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2211 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002212 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002213 tl_assert(cvi);
2214 tl_assert(cvi->so);
2215 libhb_so_dealloc(cvi->so);
2216 cvi->mx_ga = 0;
2217 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002218 }
2219}
2220
2221static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2222{
sewardjf98e1c02008-10-25 16:22:41 +00002223 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2224 cond to a SO if it is not already so bound, and 'send' on the
2225 SO. This is later used by other thread(s) which successfully
2226 exit from a pthread_cond_wait on the same cv; then they 'recv'
2227 from the SO, thereby acquiring a dependency on this signalling
2228 event. */
sewardjb4112022007-11-09 22:49:28 +00002229 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002230 CVInfo* cvi;
2231 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002232
2233 if (SHOW_EVENTS >= 1)
2234 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2235 (Int)tid, (void*)cond );
2236
sewardjb4112022007-11-09 22:49:28 +00002237 thr = map_threads_maybe_lookup( tid );
2238 tl_assert(thr); /* cannot fail - Thread* must already exist */
2239
sewardj02114542009-07-28 20:52:36 +00002240 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2241 tl_assert(cvi);
2242 tl_assert(cvi->so);
2243
sewardjb4112022007-11-09 22:49:28 +00002244 // error-if: mutex is bogus
2245 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002246 // Hmm. POSIX doesn't actually say that it's an error to call
2247 // pthread_cond_signal with the associated mutex being unlocked.
2248 // Although it does say that it should be "if consistent scheduling
2249 // is desired."
2250 //
2251 // For the moment, disable these checks.
2252 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2253 //if (lk == NULL || cvi->mx_ga == 0) {
2254 // HG_(record_error_Misc)( thr,
2255 // "pthread_cond_{signal,broadcast}: "
2256 // "no or invalid mutex associated with cond");
2257 //}
2258 ///* note: lk could be NULL. Be careful. */
2259 //if (lk) {
2260 // if (lk->kind == LK_rdwr) {
2261 // HG_(record_error_Misc)(thr,
2262 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2263 // }
2264 // if (lk->heldBy == NULL) {
2265 // HG_(record_error_Misc)(thr,
2266 // "pthread_cond_{signal,broadcast}: "
2267 // "associated lock is not held by any thread");
2268 // }
2269 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2270 // HG_(record_error_Misc)(thr,
2271 // "pthread_cond_{signal,broadcast}: "
2272 // "associated lock is not held by calling thread");
2273 // }
2274 //}
sewardjb4112022007-11-09 22:49:28 +00002275
sewardj02114542009-07-28 20:52:36 +00002276 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002277}
2278
2279/* returns True if it reckons 'mutex' is valid and held by this
2280 thread, else False */
2281static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2282 void* cond, void* mutex )
2283{
2284 Thread* thr;
2285 Lock* lk;
2286 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002287 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002288
2289 if (SHOW_EVENTS >= 1)
2290 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2291 "(ctid=%d, cond=%p, mutex=%p)\n",
2292 (Int)tid, (void*)cond, (void*)mutex );
2293
sewardjb4112022007-11-09 22:49:28 +00002294 thr = map_threads_maybe_lookup( tid );
2295 tl_assert(thr); /* cannot fail - Thread* must already exist */
2296
2297 lk = map_locks_maybe_lookup( (Addr)mutex );
2298
2299 /* Check for stupid mutex arguments. There are various ways to be
2300 a bozo. Only complain once, though, even if more than one thing
2301 is wrong. */
2302 if (lk == NULL) {
2303 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002304 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002305 thr,
2306 "pthread_cond_{timed}wait called with invalid mutex" );
2307 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002308 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002309 if (lk->kind == LK_rdwr) {
2310 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002311 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002312 thr, "pthread_cond_{timed}wait called with mutex "
2313 "of type pthread_rwlock_t*" );
2314 } else
2315 if (lk->heldBy == NULL) {
2316 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002317 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002318 thr, "pthread_cond_{timed}wait called with un-held mutex");
2319 } else
2320 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002321 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002322 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002323 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002324 thr, "pthread_cond_{timed}wait called with mutex "
2325 "held by a different thread" );
2326 }
2327 }
2328
2329 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002330 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2331 tl_assert(cvi);
2332 tl_assert(cvi->so);
2333 if (cvi->nWaiters == 0) {
2334 /* form initial (CV,MX) binding */
2335 cvi->mx_ga = mutex;
2336 }
2337 else /* check existing (CV,MX) binding */
2338 if (cvi->mx_ga != mutex) {
2339 HG_(record_error_Misc)(
2340 thr, "pthread_cond_{timed}wait: cond is associated "
2341 "with a different mutex");
2342 }
2343 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002344
2345 return lk_valid;
2346}
2347
2348static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2349 void* cond, void* mutex )
2350{
sewardjf98e1c02008-10-25 16:22:41 +00002351 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2352 the SO for this cond, and 'recv' from it so as to acquire a
2353 dependency edge back to the signaller/broadcaster. */
2354 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002355 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002356
2357 if (SHOW_EVENTS >= 1)
2358 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2359 "(ctid=%d, cond=%p, mutex=%p)\n",
2360 (Int)tid, (void*)cond, (void*)mutex );
2361
sewardjb4112022007-11-09 22:49:28 +00002362 thr = map_threads_maybe_lookup( tid );
2363 tl_assert(thr); /* cannot fail - Thread* must already exist */
2364
2365 // error-if: cond is also associated with a different mutex
2366
sewardj02114542009-07-28 20:52:36 +00002367 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2368 tl_assert(cvi);
2369 tl_assert(cvi->so);
2370 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002371
sewardj02114542009-07-28 20:52:36 +00002372 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002373 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2374 it? If this happened it would surely be a bug in the threads
2375 library. Or one of those fabled "spurious wakeups". */
2376 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2377 "succeeded on"
2378 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002379 }
sewardjf98e1c02008-10-25 16:22:41 +00002380
2381 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002382 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2383
2384 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002385}
2386
2387static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2388 void* cond )
2389{
2390 /* Deal with destroy events. The only purpose is to free storage
2391 associated with the CV, so as to avoid any possible resource
2392 leaks. */
2393 if (SHOW_EVENTS >= 1)
2394 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2395 "(ctid=%d, cond=%p)\n",
2396 (Int)tid, (void*)cond );
2397
sewardj02114542009-07-28 20:52:36 +00002398 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002399}
2400
2401
sewardj9f569b72008-11-13 13:33:09 +00002402/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002403/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002404/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002405
2406/* EXPOSITION only */
2407static
2408void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2409{
2410 if (SHOW_EVENTS >= 1)
2411 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2412 (Int)tid, (void*)rwl );
2413 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002414 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002415 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2416}
2417
2418static
2419void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2420{
2421 Thread* thr;
2422 Lock* lk;
2423 if (SHOW_EVENTS >= 1)
2424 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2425 (Int)tid, (void*)rwl );
2426
2427 thr = map_threads_maybe_lookup( tid );
2428 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002429 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002430
2431 lk = map_locks_maybe_lookup( (Addr)rwl );
2432
2433 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002434 HG_(record_error_Misc)(
2435 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002436 }
2437
2438 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002439 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002440 tl_assert( lk->guestaddr == (Addr)rwl );
2441 if (lk->heldBy) {
2442 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002443 HG_(record_error_Misc)(
2444 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002445 /* remove lock from locksets of all owning threads */
2446 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002447 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002448 lk->heldBy = NULL;
2449 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002450 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002451 }
2452 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002453 tl_assert( HG_(is_sane_LockN)(lk) );
2454
sewardj1cbc12f2008-11-10 16:16:46 +00002455 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002456 map_locks_delete( lk->guestaddr );
2457 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002458 }
2459
sewardjf98e1c02008-10-25 16:22:41 +00002460 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002461 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2462}
2463
2464static
sewardj789c3c52008-02-25 12:10:07 +00002465void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2466 void* rwl,
2467 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002468{
2469 /* Just check the rwl is sane; nothing else to do. */
2470 // 'rwl' may be invalid - not checked by wrapper
2471 Thread* thr;
2472 Lock* lk;
2473 if (SHOW_EVENTS >= 1)
2474 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2475 (Int)tid, (Int)isW, (void*)rwl );
2476
2477 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002478 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002479 thr = map_threads_maybe_lookup( tid );
2480 tl_assert(thr); /* cannot fail - Thread* must already exist */
2481
2482 lk = map_locks_maybe_lookup( (Addr)rwl );
2483 if ( lk
2484 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2485 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002486 HG_(record_error_Misc)(
2487 thr, "pthread_rwlock_{rd,rw}lock with a "
2488 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002489 }
2490}
2491
2492static
2493void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2494{
2495 // only called if the real library call succeeded - so mutex is sane
2496 Thread* thr;
2497 if (SHOW_EVENTS >= 1)
2498 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2499 (Int)tid, (Int)isW, (void*)rwl );
2500
2501 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2502 thr = map_threads_maybe_lookup( tid );
2503 tl_assert(thr); /* cannot fail - Thread* must already exist */
2504
2505 (isW ? evhH__post_thread_w_acquires_lock
2506 : evhH__post_thread_r_acquires_lock)(
2507 thr,
2508 LK_rdwr, /* if not known, create new lock with this LockKind */
2509 (Addr)rwl
2510 );
2511}
2512
2513static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2514{
2515 // 'rwl' may be invalid - not checked by wrapper
2516 Thread* thr;
2517 if (SHOW_EVENTS >= 1)
2518 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2519 (Int)tid, (void*)rwl );
2520
2521 thr = map_threads_maybe_lookup( tid );
2522 tl_assert(thr); /* cannot fail - Thread* must already exist */
2523
2524 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2525}
2526
2527static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2528{
2529 // only called if the real library call succeeded - so mutex is sane
2530 Thread* thr;
2531 if (SHOW_EVENTS >= 1)
2532 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2533 (Int)tid, (void*)rwl );
2534 thr = map_threads_maybe_lookup( tid );
2535 tl_assert(thr); /* cannot fail - Thread* must already exist */
2536
2537 // anything we should do here?
2538}
2539
2540
sewardj9f569b72008-11-13 13:33:09 +00002541/* ---------------------------------------------------------- */
2542/* -------------- events to do with semaphores -------------- */
2543/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002544
sewardj11e352f2007-11-30 11:11:02 +00002545/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002546 variables. */
2547
sewardjf98e1c02008-10-25 16:22:41 +00002548/* For each semaphore, we maintain a stack of SOs. When a 'post'
2549 operation is done on a semaphore (unlocking, essentially), a new SO
2550 is created for the posting thread, the posting thread does a strong
2551 send to it (which merely installs the posting thread's VC in the
2552 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002553
2554 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002555 semaphore, we pop a SO off the semaphore's stack (which should be
2556 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002557 dependencies between posters and waiters of the semaphore.
2558
sewardjf98e1c02008-10-25 16:22:41 +00002559 It may not be necessary to use a stack - perhaps a bag of SOs would
2560 do. But we do need to keep track of how many unused-up posts have
2561 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002562
sewardjf98e1c02008-10-25 16:22:41 +00002563 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002564 twice on S. T3 cannot complete its waits without both T1 and T2
2565 posting. The above mechanism will ensure that T3 acquires
2566 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002567
sewardjf98e1c02008-10-25 16:22:41 +00002568 When a semaphore is initialised with value N, we do as if we'd
2569 posted N times on the semaphore: basically create N SOs and do a
2570 strong send to all of then. This allows up to N waits on the
2571 semaphore to acquire a dependency on the initialisation point,
2572 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002573
2574 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2575 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002576*/
2577
sewardjf98e1c02008-10-25 16:22:41 +00002578/* sem_t* -> XArray* SO* */
2579static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002580
sewardjf98e1c02008-10-25 16:22:41 +00002581static void map_sem_to_SO_stack_INIT ( void ) {
2582 if (map_sem_to_SO_stack == NULL) {
2583 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2584 HG_(free), NULL );
2585 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002586 }
2587}
2588
sewardjf98e1c02008-10-25 16:22:41 +00002589static void push_SO_for_sem ( void* sem, SO* so ) {
2590 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002591 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002592 tl_assert(so);
2593 map_sem_to_SO_stack_INIT();
2594 if (VG_(lookupFM)( map_sem_to_SO_stack,
2595 &keyW, (UWord*)&xa, (UWord)sem )) {
2596 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002597 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002598 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002599 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002600 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2601 VG_(addToXA)( xa, &so );
2602 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002603 }
2604}
2605
sewardjf98e1c02008-10-25 16:22:41 +00002606static SO* mb_pop_SO_for_sem ( void* sem ) {
2607 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002608 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002609 SO* so;
2610 map_sem_to_SO_stack_INIT();
2611 if (VG_(lookupFM)( map_sem_to_SO_stack,
2612 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002613 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002614 Word sz;
2615 tl_assert(keyW == (UWord)sem);
2616 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002617 tl_assert(sz >= 0);
2618 if (sz == 0)
2619 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002620 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2621 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002622 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002623 return so;
sewardjb4112022007-11-09 22:49:28 +00002624 } else {
2625 /* hmm, that's odd. No stack for this semaphore. */
2626 return NULL;
2627 }
2628}
2629
sewardj11e352f2007-11-30 11:11:02 +00002630static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002631{
sewardjf98e1c02008-10-25 16:22:41 +00002632 UWord keyW, valW;
2633 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002634
sewardjb4112022007-11-09 22:49:28 +00002635 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002636 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002637 (Int)tid, (void*)sem );
2638
sewardjf98e1c02008-10-25 16:22:41 +00002639 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002640
sewardjf98e1c02008-10-25 16:22:41 +00002641 /* Empty out the semaphore's SO stack. This way of doing it is
2642 stupid, but at least it's easy. */
2643 while (1) {
2644 so = mb_pop_SO_for_sem( sem );
2645 if (!so) break;
2646 libhb_so_dealloc(so);
2647 }
2648
2649 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2650 XArray* xa = (XArray*)valW;
2651 tl_assert(keyW == (UWord)sem);
2652 tl_assert(xa);
2653 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2654 VG_(deleteXA)(xa);
2655 }
sewardjb4112022007-11-09 22:49:28 +00002656}
2657
sewardj11e352f2007-11-30 11:11:02 +00002658static
2659void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2660{
sewardjf98e1c02008-10-25 16:22:41 +00002661 SO* so;
2662 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002663
2664 if (SHOW_EVENTS >= 1)
2665 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2666 (Int)tid, (void*)sem, value );
2667
sewardjf98e1c02008-10-25 16:22:41 +00002668 thr = map_threads_maybe_lookup( tid );
2669 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002670
sewardjf98e1c02008-10-25 16:22:41 +00002671 /* Empty out the semaphore's SO stack. This way of doing it is
2672 stupid, but at least it's easy. */
2673 while (1) {
2674 so = mb_pop_SO_for_sem( sem );
2675 if (!so) break;
2676 libhb_so_dealloc(so);
2677 }
sewardj11e352f2007-11-30 11:11:02 +00002678
sewardjf98e1c02008-10-25 16:22:41 +00002679 /* If we don't do this check, the following while loop runs us out
2680 of memory for stupid initial values of 'value'. */
2681 if (value > 10000) {
2682 HG_(record_error_Misc)(
2683 thr, "sem_init: initial value exceeds 10000; using 10000" );
2684 value = 10000;
2685 }
sewardj11e352f2007-11-30 11:11:02 +00002686
sewardjf98e1c02008-10-25 16:22:41 +00002687 /* Now create 'valid' new SOs for the thread, do a strong send to
2688 each of them, and push them all on the stack. */
2689 for (; value > 0; value--) {
2690 Thr* hbthr = thr->hbthr;
2691 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002692
sewardjf98e1c02008-10-25 16:22:41 +00002693 so = libhb_so_alloc();
2694 libhb_so_send( hbthr, so, True/*strong send*/ );
2695 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002696 }
2697}
2698
2699static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002700{
sewardjf98e1c02008-10-25 16:22:41 +00002701 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2702 it (iow, write our VC into it, then tick ours), and push the SO
2703 on on a stack of SOs associated with 'sem'. This is later used
2704 by other thread(s) which successfully exit from a sem_wait on
2705 the same sem; by doing a strong recv from SOs popped of the
2706 stack, they acquire dependencies on the posting thread
2707 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002708
sewardjf98e1c02008-10-25 16:22:41 +00002709 Thread* thr;
2710 SO* so;
2711 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002712
2713 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002714 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002715 (Int)tid, (void*)sem );
2716
2717 thr = map_threads_maybe_lookup( tid );
2718 tl_assert(thr); /* cannot fail - Thread* must already exist */
2719
2720 // error-if: sem is bogus
2721
sewardjf98e1c02008-10-25 16:22:41 +00002722 hbthr = thr->hbthr;
2723 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002724
sewardjf98e1c02008-10-25 16:22:41 +00002725 so = libhb_so_alloc();
2726 libhb_so_send( hbthr, so, True/*strong send*/ );
2727 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002728}
2729
sewardj11e352f2007-11-30 11:11:02 +00002730static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002731{
sewardjf98e1c02008-10-25 16:22:41 +00002732 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2733 the 'sem' from this semaphore's SO-stack, and do a strong recv
2734 from it. This creates a dependency back to one of the post-ers
2735 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002736
sewardjf98e1c02008-10-25 16:22:41 +00002737 Thread* thr;
2738 SO* so;
2739 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002740
2741 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002742 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002743 (Int)tid, (void*)sem );
2744
2745 thr = map_threads_maybe_lookup( tid );
2746 tl_assert(thr); /* cannot fail - Thread* must already exist */
2747
2748 // error-if: sem is bogus
2749
sewardjf98e1c02008-10-25 16:22:41 +00002750 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002751
sewardjf98e1c02008-10-25 16:22:41 +00002752 if (so) {
2753 hbthr = thr->hbthr;
2754 tl_assert(hbthr);
2755
2756 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2757 libhb_so_dealloc(so);
2758 } else {
2759 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2760 If this happened it would surely be a bug in the threads
2761 library. */
2762 HG_(record_error_Misc)(
2763 thr, "Bug in libpthread: sem_wait succeeded on"
2764 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002765 }
2766}
2767
2768
sewardj9f569b72008-11-13 13:33:09 +00002769/* -------------------------------------------------------- */
2770/* -------------- events to do with barriers -------------- */
2771/* -------------------------------------------------------- */
2772
2773typedef
2774 struct {
2775 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002776 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002777 UWord size; /* declared size */
2778 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2779 }
2780 Bar;
2781
2782static Bar* new_Bar ( void ) {
2783 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2784 tl_assert(bar);
2785 /* all fields are zero */
2786 tl_assert(bar->initted == False);
2787 return bar;
2788}
2789
2790static void delete_Bar ( Bar* bar ) {
2791 tl_assert(bar);
2792 if (bar->waiting)
2793 VG_(deleteXA)(bar->waiting);
2794 HG_(free)(bar);
2795}
2796
2797/* A mapping which stores auxiliary data for barriers. */
2798
2799/* pthread_barrier_t* -> Bar* */
2800static WordFM* map_barrier_to_Bar = NULL;
2801
2802static void map_barrier_to_Bar_INIT ( void ) {
2803 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2804 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2805 "hg.mbtBI.1", HG_(free), NULL );
2806 tl_assert(map_barrier_to_Bar != NULL);
2807 }
2808}
2809
2810static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2811 UWord key, val;
2812 map_barrier_to_Bar_INIT();
2813 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2814 tl_assert(key == (UWord)barrier);
2815 return (Bar*)val;
2816 } else {
2817 Bar* bar = new_Bar();
2818 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2819 return bar;
2820 }
2821}
2822
2823static void map_barrier_to_Bar_delete ( void* barrier ) {
2824 UWord keyW, valW;
2825 map_barrier_to_Bar_INIT();
2826 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2827 Bar* bar = (Bar*)valW;
2828 tl_assert(keyW == (UWord)barrier);
2829 delete_Bar(bar);
2830 }
2831}
2832
2833
2834static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2835 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002836 UWord count,
2837 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002838{
2839 Thread* thr;
2840 Bar* bar;
2841
2842 if (SHOW_EVENTS >= 1)
2843 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002844 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2845 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002846
2847 thr = map_threads_maybe_lookup( tid );
2848 tl_assert(thr); /* cannot fail - Thread* must already exist */
2849
2850 if (count == 0) {
2851 HG_(record_error_Misc)(
2852 thr, "pthread_barrier_init: 'count' argument is zero"
2853 );
2854 }
2855
sewardj406bac82010-03-03 23:03:40 +00002856 if (resizable != 0 && resizable != 1) {
2857 HG_(record_error_Misc)(
2858 thr, "pthread_barrier_init: invalid 'resizable' argument"
2859 );
2860 }
2861
sewardj9f569b72008-11-13 13:33:09 +00002862 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2863 tl_assert(bar);
2864
2865 if (bar->initted) {
2866 HG_(record_error_Misc)(
2867 thr, "pthread_barrier_init: barrier is already initialised"
2868 );
2869 }
2870
2871 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2872 tl_assert(bar->initted);
2873 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002874 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002875 );
2876 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2877 }
2878 if (!bar->waiting) {
2879 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2880 sizeof(Thread*) );
2881 }
2882
2883 tl_assert(bar->waiting);
2884 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002885 bar->initted = True;
2886 bar->resizable = resizable == 1 ? True : False;
2887 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002888}
2889
2890
2891static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2892 void* barrier )
2893{
sewardj553655c2008-11-14 19:41:19 +00002894 Thread* thr;
2895 Bar* bar;
2896
sewardj9f569b72008-11-13 13:33:09 +00002897 /* Deal with destroy events. The only purpose is to free storage
2898 associated with the barrier, so as to avoid any possible
2899 resource leaks. */
2900 if (SHOW_EVENTS >= 1)
2901 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2902 "(tid=%d, barrier=%p)\n",
2903 (Int)tid, (void*)barrier );
2904
sewardj553655c2008-11-14 19:41:19 +00002905 thr = map_threads_maybe_lookup( tid );
2906 tl_assert(thr); /* cannot fail - Thread* must already exist */
2907
2908 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2909 tl_assert(bar);
2910
2911 if (!bar->initted) {
2912 HG_(record_error_Misc)(
2913 thr, "pthread_barrier_destroy: barrier was never initialised"
2914 );
2915 }
2916
2917 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2918 HG_(record_error_Misc)(
2919 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2920 );
2921 }
2922
sewardj9f569b72008-11-13 13:33:09 +00002923 /* Maybe we shouldn't do this; just let it persist, so that when it
2924 is reinitialised we don't need to do any dynamic memory
2925 allocation? The downside is a potentially unlimited space leak,
2926 if the client creates (in turn) a large number of barriers all
2927 at different locations. Note that if we do later move to the
2928 don't-delete-it scheme, we need to mark the barrier as
2929 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002930 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002931 map_barrier_to_Bar_delete( barrier );
2932}
2933
2934
sewardj406bac82010-03-03 23:03:40 +00002935/* All the threads have arrived. Now do the Interesting Bit. Get a
2936 new synchronisation object and do a weak send to it from all the
2937 participating threads. This makes its vector clocks be the join of
2938 all the individual threads' vector clocks. Then do a strong
2939 receive from it back to all threads, so that their VCs are a copy
2940 of it (hence are all equal to the join of their original VCs.) */
2941static void do_barrier_cross_sync_and_empty ( Bar* bar )
2942{
2943 /* XXX check bar->waiting has no duplicates */
2944 UWord i;
2945 SO* so = libhb_so_alloc();
2946
2947 tl_assert(bar->waiting);
2948 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2949
2950 /* compute the join ... */
2951 for (i = 0; i < bar->size; i++) {
2952 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2953 Thr* hbthr = t->hbthr;
2954 libhb_so_send( hbthr, so, False/*weak send*/ );
2955 }
2956 /* ... and distribute to all threads */
2957 for (i = 0; i < bar->size; i++) {
2958 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2959 Thr* hbthr = t->hbthr;
2960 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2961 }
2962
2963 /* finally, we must empty out the waiting vector */
2964 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2965
2966 /* and we don't need this any more. Perhaps a stack-allocated
2967 SO would be better? */
2968 libhb_so_dealloc(so);
2969}
2970
2971
sewardj9f569b72008-11-13 13:33:09 +00002972static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2973 void* barrier )
2974{
sewardj1c466b72008-11-19 11:52:14 +00002975 /* This function gets called after a client thread calls
2976 pthread_barrier_wait but before it arrives at the real
2977 pthread_barrier_wait.
2978
2979 Why is the following correct? It's a bit subtle.
2980
2981 If this is not the last thread arriving at the barrier, we simply
2982 note its presence and return. Because valgrind (at least as of
2983 Nov 08) is single threaded, we are guaranteed safe from any race
2984 conditions when in this function -- no other client threads are
2985 running.
2986
2987 If this is the last thread, then we are again the only running
2988 thread. All the other threads will have either arrived at the
2989 real pthread_barrier_wait or are on their way to it, but in any
2990 case are guaranteed not to be able to move past it, because this
2991 thread is currently in this function and so has not yet arrived
2992 at the real pthread_barrier_wait. That means that:
2993
2994 1. While we are in this function, none of the other threads
2995 waiting at the barrier can move past it.
2996
2997 2. When this function returns (and simulated execution resumes),
2998 this thread and all other waiting threads will be able to move
2999 past the real barrier.
3000
3001 Because of this, it is now safe to update the vector clocks of
3002 all threads, to represent the fact that they all arrived at the
3003 barrier and have all moved on. There is no danger of any
3004 complications to do with some threads leaving the barrier and
3005 racing back round to the front, whilst others are still leaving
3006 (which is the primary source of complication in correct handling/
3007 implementation of barriers). That can't happen because we update
3008 here our data structures so as to indicate that the threads have
3009 passed the barrier, even though, as per (2) above, they are
3010 guaranteed not to pass the barrier until we return.
3011
3012 This relies crucially on Valgrind being single threaded. If that
3013 changes, this will need to be reconsidered.
3014 */
sewardj9f569b72008-11-13 13:33:09 +00003015 Thread* thr;
3016 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003017 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003018
3019 if (SHOW_EVENTS >= 1)
3020 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3021 "(tid=%d, barrier=%p)\n",
3022 (Int)tid, (void*)barrier );
3023
3024 thr = map_threads_maybe_lookup( tid );
3025 tl_assert(thr); /* cannot fail - Thread* must already exist */
3026
3027 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3028 tl_assert(bar);
3029
3030 if (!bar->initted) {
3031 HG_(record_error_Misc)(
3032 thr, "pthread_barrier_wait: barrier is uninitialised"
3033 );
3034 return; /* client is broken .. avoid assertions below */
3035 }
3036
3037 /* guaranteed by _INIT_PRE above */
3038 tl_assert(bar->size > 0);
3039 tl_assert(bar->waiting);
3040
3041 VG_(addToXA)( bar->waiting, &thr );
3042
3043 /* guaranteed by this function */
3044 present = VG_(sizeXA)(bar->waiting);
3045 tl_assert(present > 0 && present <= bar->size);
3046
3047 if (present < bar->size)
3048 return;
3049
sewardj406bac82010-03-03 23:03:40 +00003050 do_barrier_cross_sync_and_empty(bar);
3051}
sewardj9f569b72008-11-13 13:33:09 +00003052
sewardj9f569b72008-11-13 13:33:09 +00003053
sewardj406bac82010-03-03 23:03:40 +00003054static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3055 void* barrier,
3056 UWord newcount )
3057{
3058 Thread* thr;
3059 Bar* bar;
3060 UWord present;
3061
3062 if (SHOW_EVENTS >= 1)
3063 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3064 "(tid=%d, barrier=%p, newcount=%lu)\n",
3065 (Int)tid, (void*)barrier, newcount );
3066
3067 thr = map_threads_maybe_lookup( tid );
3068 tl_assert(thr); /* cannot fail - Thread* must already exist */
3069
3070 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3071 tl_assert(bar);
3072
3073 if (!bar->initted) {
3074 HG_(record_error_Misc)(
3075 thr, "pthread_barrier_resize: barrier is uninitialised"
3076 );
3077 return; /* client is broken .. avoid assertions below */
3078 }
3079
3080 if (!bar->resizable) {
3081 HG_(record_error_Misc)(
3082 thr, "pthread_barrier_resize: barrier is may not be resized"
3083 );
3084 return; /* client is broken .. avoid assertions below */
3085 }
3086
3087 if (newcount == 0) {
3088 HG_(record_error_Misc)(
3089 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3090 );
3091 return; /* client is broken .. avoid assertions below */
3092 }
3093
3094 /* guaranteed by _INIT_PRE above */
3095 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003096 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003097 /* Guaranteed by this fn */
3098 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003099
sewardj406bac82010-03-03 23:03:40 +00003100 if (newcount >= bar->size) {
3101 /* Increasing the capacity. There's no possibility of threads
3102 moving on from the barrier in this situation, so just note
3103 the fact and do nothing more. */
3104 bar->size = newcount;
3105 } else {
3106 /* Decreasing the capacity. If we decrease it to be equal or
3107 below the number of waiting threads, they will now move past
3108 the barrier, so need to mess with dep edges in the same way
3109 as if the barrier had filled up normally. */
3110 present = VG_(sizeXA)(bar->waiting);
3111 tl_assert(present >= 0 && present <= bar->size);
3112 if (newcount <= present) {
3113 bar->size = present; /* keep the cross_sync call happy */
3114 do_barrier_cross_sync_and_empty(bar);
3115 }
3116 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003117 }
sewardj9f569b72008-11-13 13:33:09 +00003118}
3119
3120
sewardjed2e72e2009-08-14 11:08:24 +00003121/* ----------------------------------------------------- */
3122/* ----- events to do with user-specified HB edges ----- */
3123/* ----------------------------------------------------- */
3124
3125/* A mapping from arbitrary UWord tag to the SO associated with it.
3126 The UWord tags are meaningless to us, interpreted only by the
3127 user. */
3128
3129
3130
3131/* UWord -> SO* */
3132static WordFM* map_usertag_to_SO = NULL;
3133
3134static void map_usertag_to_SO_INIT ( void ) {
3135 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3136 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3137 "hg.mutS.1", HG_(free), NULL );
3138 tl_assert(map_usertag_to_SO != NULL);
3139 }
3140}
3141
3142static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3143 UWord key, val;
3144 map_usertag_to_SO_INIT();
3145 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3146 tl_assert(key == (UWord)usertag);
3147 return (SO*)val;
3148 } else {
3149 SO* so = libhb_so_alloc();
3150 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3151 return so;
3152 }
3153}
3154
3155// If it's ever needed (XXX check before use)
3156//static void map_usertag_to_SO_delete ( UWord usertag ) {
3157// UWord keyW, valW;
3158// map_usertag_to_SO_INIT();
3159// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3160// SO* so = (SO*)valW;
3161// tl_assert(keyW == usertag);
3162// tl_assert(so);
3163// libhb_so_dealloc(so);
3164// }
3165//}
3166
3167
3168static
3169void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3170{
3171 /* TID is just about to notionally sent a message on a notional
3172 abstract synchronisation object whose identity is given by
3173 USERTAG. Bind USERTAG to a real SO if it is not already so
3174 bound, and do a 'strong send' on the SO. This is later used by
3175 other thread(s) which successfully 'receive' from the SO,
3176 thereby acquiring a dependency on this signalling event. */
3177 Thread* thr;
3178 SO* so;
3179
3180 if (SHOW_EVENTS >= 1)
3181 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3182 (Int)tid, usertag );
3183
3184 thr = map_threads_maybe_lookup( tid );
3185 tl_assert(thr); /* cannot fail - Thread* must already exist */
3186
3187 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3188 tl_assert(so);
3189
3190 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3191}
3192
3193static
3194void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3195{
3196 /* TID has just notionally received a message from a notional
3197 abstract synchronisation object whose identity is given by
3198 USERTAG. Bind USERTAG to a real SO if it is not already so
3199 bound. If the SO has at some point in the past been 'sent' on,
3200 to a 'strong receive' on it, thereby acquiring a dependency on
3201 the sender. */
3202 Thread* thr;
3203 SO* so;
3204
3205 if (SHOW_EVENTS >= 1)
3206 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3207 (Int)tid, usertag );
3208
3209 thr = map_threads_maybe_lookup( tid );
3210 tl_assert(thr); /* cannot fail - Thread* must already exist */
3211
3212 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3213 tl_assert(so);
3214
3215 /* Acquire a dependency on it. If the SO has never so far been
3216 sent on, then libhb_so_recv will do nothing. So we're safe
3217 regardless of SO's history. */
3218 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3219}
3220
3221
sewardjb4112022007-11-09 22:49:28 +00003222/*--------------------------------------------------------------*/
3223/*--- Lock acquisition order monitoring ---*/
3224/*--------------------------------------------------------------*/
3225
3226/* FIXME: here are some optimisations still to do in
3227 laog__pre_thread_acquires_lock.
3228
3229 The graph is structured so that if L1 --*--> L2 then L1 must be
3230 acquired before L2.
3231
3232 The common case is that some thread T holds (eg) L1 L2 and L3 and
3233 is repeatedly acquiring and releasing Ln, and there is no ordering
3234 error in what it is doing. Hence it repeatly:
3235
3236 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3237 produces the answer No (because there is no error).
3238
3239 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3240 (because they already got added the first time T acquired Ln).
3241
3242 Hence cache these two events:
3243
3244 (1) Cache result of the query from last time. Invalidate the cache
3245 any time any edges are added to or deleted from laog.
3246
3247 (2) Cache these add-edge requests and ignore them if said edges
3248 have already been added to laog. Invalidate the cache any time
3249 any edges are deleted from laog.
3250*/
3251
3252typedef
3253 struct {
3254 WordSetID inns; /* in univ_laog */
3255 WordSetID outs; /* in univ_laog */
3256 }
3257 LAOGLinks;
3258
3259/* lock order acquisition graph */
3260static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3261
3262/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3263 where that edge was created, so that we can show the user later if
3264 we need to. */
3265typedef
3266 struct {
3267 Addr src_ga; /* Lock guest addresses for */
3268 Addr dst_ga; /* src/dst of the edge */
3269 ExeContext* src_ec; /* And corresponding places where that */
3270 ExeContext* dst_ec; /* ordering was established */
3271 }
3272 LAOGLinkExposition;
3273
sewardj250ec2e2008-02-15 22:02:30 +00003274static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003275 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3276 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3277 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3278 if (llx1->src_ga < llx2->src_ga) return -1;
3279 if (llx1->src_ga > llx2->src_ga) return 1;
3280 if (llx1->dst_ga < llx2->dst_ga) return -1;
3281 if (llx1->dst_ga > llx2->dst_ga) return 1;
3282 return 0;
3283}
3284
3285static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3286/* end EXPOSITION ONLY */
3287
3288
sewardja65db102009-01-26 10:45:16 +00003289__attribute__((noinline))
3290static void laog__init ( void )
3291{
3292 tl_assert(!laog);
3293 tl_assert(!laog_exposition);
3294
3295 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3296 HG_(free), NULL/*unboxedcmp*/ );
3297
3298 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3299 cmp_LAOGLinkExposition );
3300 tl_assert(laog);
3301 tl_assert(laog_exposition);
3302}
3303
sewardjb4112022007-11-09 22:49:28 +00003304static void laog__show ( Char* who ) {
3305 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003306 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003307 Lock* me;
3308 LAOGLinks* links;
3309 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003310 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003311 me = NULL;
3312 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003313 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003314 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003315 tl_assert(me);
3316 tl_assert(links);
3317 VG_(printf)(" node %p:\n", me);
3318 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3319 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003320 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003321 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3322 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003323 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003324 me = NULL;
3325 links = NULL;
3326 }
sewardj896f6f92008-08-19 08:38:52 +00003327 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003328 VG_(printf)("}\n");
3329}
3330
3331__attribute__((noinline))
3332static void laog__add_edge ( Lock* src, Lock* dst ) {
3333 Word keyW;
3334 LAOGLinks* links;
3335 Bool presentF, presentR;
3336 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3337
3338 /* Take the opportunity to sanity check the graph. Record in
3339 presentF if there is already a src->dst mapping in this node's
3340 forwards links, and presentR if there is already a src->dst
3341 mapping in this node's backwards links. They should agree!
3342 Also, we need to know whether the edge was already present so as
3343 to decide whether or not to update the link details mapping. We
3344 can compute presentF and presentR essentially for free, so may
3345 as well do this always. */
3346 presentF = presentR = False;
3347
3348 /* Update the out edges for src */
3349 keyW = 0;
3350 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003351 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003352 WordSetID outs_new;
3353 tl_assert(links);
3354 tl_assert(keyW == (Word)src);
3355 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3356 presentF = outs_new == links->outs;
3357 links->outs = outs_new;
3358 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003359 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003360 links->inns = HG_(emptyWS)( univ_laog );
3361 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003362 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003363 }
3364 /* Update the in edges for dst */
3365 keyW = 0;
3366 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003367 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003368 WordSetID inns_new;
3369 tl_assert(links);
3370 tl_assert(keyW == (Word)dst);
3371 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3372 presentR = inns_new == links->inns;
3373 links->inns = inns_new;
3374 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003375 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003376 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3377 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003378 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003379 }
3380
3381 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3382
3383 if (!presentF && src->acquired_at && dst->acquired_at) {
3384 LAOGLinkExposition expo;
3385 /* If this edge is entering the graph, and we have acquired_at
3386 information for both src and dst, record those acquisition
3387 points. Hence, if there is later a violation of this
3388 ordering, we can show the user the two places in which the
3389 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003390 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003391 src->guestaddr, dst->guestaddr);
3392 expo.src_ga = src->guestaddr;
3393 expo.dst_ga = dst->guestaddr;
3394 expo.src_ec = NULL;
3395 expo.dst_ec = NULL;
3396 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003397 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003398 /* we already have it; do nothing */
3399 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003400 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3401 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003402 expo2->src_ga = src->guestaddr;
3403 expo2->dst_ga = dst->guestaddr;
3404 expo2->src_ec = src->acquired_at;
3405 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003406 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003407 }
3408 }
3409}
3410
3411__attribute__((noinline))
3412static void laog__del_edge ( Lock* src, Lock* dst ) {
3413 Word keyW;
3414 LAOGLinks* links;
3415 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3416 /* Update the out edges for src */
3417 keyW = 0;
3418 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003419 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003420 tl_assert(links);
3421 tl_assert(keyW == (Word)src);
3422 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3423 }
3424 /* Update the in edges for dst */
3425 keyW = 0;
3426 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003427 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003428 tl_assert(links);
3429 tl_assert(keyW == (Word)dst);
3430 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3431 }
3432}
3433
3434__attribute__((noinline))
3435static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3436 Word keyW;
3437 LAOGLinks* links;
3438 keyW = 0;
3439 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003440 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003441 tl_assert(links);
3442 tl_assert(keyW == (Word)lk);
3443 return links->outs;
3444 } else {
3445 return HG_(emptyWS)( univ_laog );
3446 }
3447}
3448
3449__attribute__((noinline))
3450static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3451 Word keyW;
3452 LAOGLinks* links;
3453 keyW = 0;
3454 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003455 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003456 tl_assert(links);
3457 tl_assert(keyW == (Word)lk);
3458 return links->inns;
3459 } else {
3460 return HG_(emptyWS)( univ_laog );
3461 }
3462}
3463
3464__attribute__((noinline))
3465static void laog__sanity_check ( Char* who ) {
3466 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003467 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003468 Lock* me;
3469 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003470 if (UNLIKELY(!laog || !laog_exposition))
3471 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003472 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003473 me = NULL;
3474 links = NULL;
3475 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003476 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003477 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003478 tl_assert(me);
3479 tl_assert(links);
3480 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3481 for (i = 0; i < ws_size; i++) {
3482 if ( ! HG_(elemWS)( univ_laog,
3483 laog__succs( (Lock*)ws_words[i] ),
3484 (Word)me ))
3485 goto bad;
3486 }
3487 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3488 for (i = 0; i < ws_size; i++) {
3489 if ( ! HG_(elemWS)( univ_laog,
3490 laog__preds( (Lock*)ws_words[i] ),
3491 (Word)me ))
3492 goto bad;
3493 }
3494 me = NULL;
3495 links = NULL;
3496 }
sewardj896f6f92008-08-19 08:38:52 +00003497 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003498 return;
3499
3500 bad:
3501 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3502 laog__show(who);
3503 tl_assert(0);
3504}
3505
3506/* If there is a path in laog from 'src' to any of the elements in
3507 'dst', return an arbitrarily chosen element of 'dst' reachable from
3508 'src'. If no path exist from 'src' to any element in 'dst', return
3509 NULL. */
3510__attribute__((noinline))
3511static
3512Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3513{
3514 Lock* ret;
3515 Word i, ssz;
3516 XArray* stack; /* of Lock* */
3517 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3518 Lock* here;
3519 WordSetID succs;
3520 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003521 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003522 //laog__sanity_check();
3523
3524 /* If the destination set is empty, we can never get there from
3525 'src' :-), so don't bother to try */
3526 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3527 return NULL;
3528
3529 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003530 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3531 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003532
3533 (void) VG_(addToXA)( stack, &src );
3534
3535 while (True) {
3536
3537 ssz = VG_(sizeXA)( stack );
3538
3539 if (ssz == 0) { ret = NULL; break; }
3540
3541 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3542 VG_(dropTailXA)( stack, 1 );
3543
3544 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3545
sewardj896f6f92008-08-19 08:38:52 +00003546 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003547 continue;
3548
sewardj896f6f92008-08-19 08:38:52 +00003549 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003550
3551 succs = laog__succs( here );
3552 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3553 for (i = 0; i < succs_size; i++)
3554 (void) VG_(addToXA)( stack, &succs_words[i] );
3555 }
3556
sewardj896f6f92008-08-19 08:38:52 +00003557 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003558 VG_(deleteXA)( stack );
3559 return ret;
3560}
3561
3562
3563/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3564 between 'lk' and the locks already held by 'thr' and issue a
3565 complaint if so. Also, update the ordering graph appropriately.
3566*/
3567__attribute__((noinline))
3568static void laog__pre_thread_acquires_lock (
3569 Thread* thr, /* NB: BEFORE lock is added */
3570 Lock* lk
3571 )
3572{
sewardj250ec2e2008-02-15 22:02:30 +00003573 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003574 Word ls_size, i;
3575 Lock* other;
3576
3577 /* It may be that 'thr' already holds 'lk' and is recursively
3578 relocking in. In this case we just ignore the call. */
3579 /* NB: univ_lsets really is correct here */
3580 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3581 return;
3582
sewardja65db102009-01-26 10:45:16 +00003583 if (UNLIKELY(!laog || !laog_exposition))
3584 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003585
3586 /* First, the check. Complain if there is any path in laog from lk
3587 to any of the locks already held by thr, since if any such path
3588 existed, it would mean that previously lk was acquired before
3589 (rather than after, as we are doing here) at least one of those
3590 locks.
3591 */
3592 other = laog__do_dfs_from_to(lk, thr->locksetA);
3593 if (other) {
3594 LAOGLinkExposition key, *found;
3595 /* So we managed to find a path lk --*--> other in the graph,
3596 which implies that 'lk' should have been acquired before
3597 'other' but is in fact being acquired afterwards. We present
3598 the lk/other arguments to record_error_LockOrder in the order
3599 in which they should have been acquired. */
3600 /* Go look in the laog_exposition mapping, to find the allocation
3601 points for this edge, so we can show the user. */
3602 key.src_ga = lk->guestaddr;
3603 key.dst_ga = other->guestaddr;
3604 key.src_ec = NULL;
3605 key.dst_ec = NULL;
3606 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003607 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003608 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003609 tl_assert(found != &key);
3610 tl_assert(found->src_ga == key.src_ga);
3611 tl_assert(found->dst_ga == key.dst_ga);
3612 tl_assert(found->src_ec);
3613 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003614 HG_(record_error_LockOrder)(
3615 thr, lk->guestaddr, other->guestaddr,
3616 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003617 } else {
3618 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003619 HG_(record_error_LockOrder)(
3620 thr, lk->guestaddr, other->guestaddr,
3621 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003622 }
3623 }
3624
3625 /* Second, add to laog the pairs
3626 (old, lk) | old <- locks already held by thr
3627 Since both old and lk are currently held by thr, their acquired_at
3628 fields must be non-NULL.
3629 */
3630 tl_assert(lk->acquired_at);
3631 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3632 for (i = 0; i < ls_size; i++) {
3633 Lock* old = (Lock*)ls_words[i];
3634 tl_assert(old->acquired_at);
3635 laog__add_edge( old, lk );
3636 }
3637
3638 /* Why "except_Locks" ? We're here because a lock is being
3639 acquired by a thread, and we're in an inconsistent state here.
3640 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3641 When called in this inconsistent state, locks__sanity_check duly
3642 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003643 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003644 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3645}
3646
3647
3648/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3649
3650__attribute__((noinline))
3651static void laog__handle_one_lock_deletion ( Lock* lk )
3652{
3653 WordSetID preds, succs;
3654 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003655 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003656
sewardja65db102009-01-26 10:45:16 +00003657 if (UNLIKELY(!laog || !laog_exposition))
3658 laog__init();
3659
sewardjb4112022007-11-09 22:49:28 +00003660 preds = laog__preds( lk );
3661 succs = laog__succs( lk );
3662
3663 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3664 for (i = 0; i < preds_size; i++)
3665 laog__del_edge( (Lock*)preds_words[i], lk );
3666
3667 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3668 for (j = 0; j < succs_size; j++)
3669 laog__del_edge( lk, (Lock*)succs_words[j] );
3670
3671 for (i = 0; i < preds_size; i++) {
3672 for (j = 0; j < succs_size; j++) {
3673 if (preds_words[i] != succs_words[j]) {
3674 /* This can pass unlocked locks to laog__add_edge, since
3675 we're deleting stuff. So their acquired_at fields may
3676 be NULL. */
3677 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3678 }
3679 }
3680 }
3681}
3682
sewardj1cbc12f2008-11-10 16:16:46 +00003683//__attribute__((noinline))
3684//static void laog__handle_lock_deletions (
3685// WordSetID /* in univ_laog */ locksToDelete
3686// )
3687//{
3688// Word i, ws_size;
3689// UWord* ws_words;
3690//
sewardja65db102009-01-26 10:45:16 +00003691// if (UNLIKELY(!laog || !laog_exposition))
3692// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003693//
3694// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3695// for (i = 0; i < ws_size; i++)
3696// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3697//
3698// if (HG_(clo_sanity_flags) & SCE_LAOG)
3699// all__sanity_check("laog__handle_lock_deletions-post");
3700//}
sewardjb4112022007-11-09 22:49:28 +00003701
3702
3703/*--------------------------------------------------------------*/
3704/*--- Malloc/free replacements ---*/
3705/*--------------------------------------------------------------*/
3706
3707typedef
3708 struct {
3709 void* next; /* required by m_hashtable */
3710 Addr payload; /* ptr to actual block */
3711 SizeT szB; /* size requested */
3712 ExeContext* where; /* where it was allocated */
3713 Thread* thr; /* allocating thread */
3714 }
3715 MallocMeta;
3716
3717/* A hash table of MallocMetas, used to track malloc'd blocks
3718 (obviously). */
3719static VgHashTable hg_mallocmeta_table = NULL;
3720
3721
3722static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003723 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003724 tl_assert(md);
3725 return md;
3726}
3727static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003728 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003729}
3730
3731
3732/* Allocate a client block and set up the metadata for it. */
3733
3734static
3735void* handle_alloc ( ThreadId tid,
3736 SizeT szB, SizeT alignB, Bool is_zeroed )
3737{
3738 Addr p;
3739 MallocMeta* md;
3740
3741 tl_assert( ((SSizeT)szB) >= 0 );
3742 p = (Addr)VG_(cli_malloc)(alignB, szB);
3743 if (!p) {
3744 return NULL;
3745 }
3746 if (is_zeroed)
3747 VG_(memset)((void*)p, 0, szB);
3748
3749 /* Note that map_threads_lookup must succeed (cannot assert), since
3750 memory can only be allocated by currently alive threads, hence
3751 they must have an entry in map_threads. */
3752 md = new_MallocMeta();
3753 md->payload = p;
3754 md->szB = szB;
3755 md->where = VG_(record_ExeContext)( tid, 0 );
3756 md->thr = map_threads_lookup( tid );
3757
3758 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3759
3760 /* Tell the lower level memory wranglers. */
3761 evh__new_mem_heap( p, szB, is_zeroed );
3762
3763 return (void*)p;
3764}
3765
3766/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3767 Cast to a signed type to catch any unexpectedly negative args.
3768 We're assuming here that the size asked for is not greater than
3769 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3770 platforms). */
3771static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3772 if (((SSizeT)n) < 0) return NULL;
3773 return handle_alloc ( tid, n, VG_(clo_alignment),
3774 /*is_zeroed*/False );
3775}
3776static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3777 if (((SSizeT)n) < 0) return NULL;
3778 return handle_alloc ( tid, n, VG_(clo_alignment),
3779 /*is_zeroed*/False );
3780}
3781static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3782 if (((SSizeT)n) < 0) return NULL;
3783 return handle_alloc ( tid, n, VG_(clo_alignment),
3784 /*is_zeroed*/False );
3785}
3786static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3787 if (((SSizeT)n) < 0) return NULL;
3788 return handle_alloc ( tid, n, align,
3789 /*is_zeroed*/False );
3790}
3791static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3792 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3793 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3794 /*is_zeroed*/True );
3795}
3796
3797
3798/* Free a client block, including getting rid of the relevant
3799 metadata. */
3800
3801static void handle_free ( ThreadId tid, void* p )
3802{
3803 MallocMeta *md, *old_md;
3804 SizeT szB;
3805
3806 /* First see if we can find the metadata for 'p'. */
3807 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3808 if (!md)
3809 return; /* apparently freeing a bogus address. Oh well. */
3810
3811 tl_assert(md->payload == (Addr)p);
3812 szB = md->szB;
3813
3814 /* Nuke the metadata block */
3815 old_md = (MallocMeta*)
3816 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3817 tl_assert(old_md); /* it must be present - we just found it */
3818 tl_assert(old_md == md);
3819 tl_assert(old_md->payload == (Addr)p);
3820
3821 VG_(cli_free)((void*)old_md->payload);
3822 delete_MallocMeta(old_md);
3823
3824 /* Tell the lower level memory wranglers. */
3825 evh__die_mem_heap( (Addr)p, szB );
3826}
3827
3828static void hg_cli__free ( ThreadId tid, void* p ) {
3829 handle_free(tid, p);
3830}
3831static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3832 handle_free(tid, p);
3833}
3834static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3835 handle_free(tid, p);
3836}
3837
3838
3839static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3840{
3841 MallocMeta *md, *md_new, *md_tmp;
3842 SizeT i;
3843
3844 Addr payload = (Addr)payloadV;
3845
3846 if (((SSizeT)new_size) < 0) return NULL;
3847
3848 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3849 if (!md)
3850 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3851
3852 tl_assert(md->payload == payload);
3853
3854 if (md->szB == new_size) {
3855 /* size unchanged */
3856 md->where = VG_(record_ExeContext)(tid, 0);
3857 return payloadV;
3858 }
3859
3860 if (md->szB > new_size) {
3861 /* new size is smaller */
3862 md->szB = new_size;
3863 md->where = VG_(record_ExeContext)(tid, 0);
3864 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3865 return payloadV;
3866 }
3867
3868 /* else */ {
3869 /* new size is bigger */
3870 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3871
3872 /* First half kept and copied, second half new */
3873 // FIXME: shouldn't we use a copier which implements the
3874 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003875 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003876 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003877 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003878 /* FIXME: can anything funny happen here? specifically, if the
3879 old range contained a lock, then die_mem_heap will complain.
3880 Is that the correct behaviour? Not sure. */
3881 evh__die_mem_heap( payload, md->szB );
3882
3883 /* Copy from old to new */
3884 for (i = 0; i < md->szB; i++)
3885 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3886
3887 /* Because the metadata hash table is index by payload address,
3888 we have to get rid of the old hash table entry and make a new
3889 one. We can't just modify the existing metadata in place,
3890 because then it would (almost certainly) be in the wrong hash
3891 chain. */
3892 md_new = new_MallocMeta();
3893 *md_new = *md;
3894
3895 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3896 tl_assert(md_tmp);
3897 tl_assert(md_tmp == md);
3898
3899 VG_(cli_free)((void*)md->payload);
3900 delete_MallocMeta(md);
3901
3902 /* Update fields */
3903 md_new->where = VG_(record_ExeContext)( tid, 0 );
3904 md_new->szB = new_size;
3905 md_new->payload = p_new;
3906 md_new->thr = map_threads_lookup( tid );
3907
3908 /* and add */
3909 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3910
3911 return (void*)p_new;
3912 }
3913}
3914
njn8b140de2009-02-17 04:31:18 +00003915static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3916{
3917 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3918
3919 // There may be slop, but pretend there isn't because only the asked-for
3920 // area will have been shadowed properly.
3921 return ( md ? md->szB : 0 );
3922}
3923
sewardjb4112022007-11-09 22:49:28 +00003924
sewardj095d61e2010-03-11 13:43:18 +00003925/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003926 Slow linear search. With a bit of hash table help if 'data_addr'
3927 is either the start of a block or up to 15 word-sized steps along
3928 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003929
3930static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3931{
sewardjc8028ad2010-05-05 09:34:42 +00003932 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3933 right at it. */
3934 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3935 return True;
3936 /* else normal interval rules apply */
3937 if (LIKELY(a < mm->payload)) return False;
3938 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3939 return True;
sewardj095d61e2010-03-11 13:43:18 +00003940}
3941
sewardjc8028ad2010-05-05 09:34:42 +00003942Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003943 /*OUT*/Addr* payload,
3944 /*OUT*/SizeT* szB,
3945 Addr data_addr )
3946{
3947 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003948 Int i;
3949 const Int n_fast_check_words = 16;
3950
3951 /* First, do a few fast searches on the basis that data_addr might
3952 be exactly the start of a block or up to 15 words inside. This
3953 can happen commonly via the creq
3954 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3955 for (i = 0; i < n_fast_check_words; i++) {
3956 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3957 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3958 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3959 goto found;
3960 }
3961
sewardj095d61e2010-03-11 13:43:18 +00003962 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003963 some such, it's hard to see how to do better. We have to check
3964 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003965 VG_(HT_ResetIter)(hg_mallocmeta_table);
3966 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003967 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3968 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003969 }
sewardjc8028ad2010-05-05 09:34:42 +00003970
3971 /* Not found. Bah. */
3972 return False;
3973 /*NOTREACHED*/
3974
3975 found:
3976 tl_assert(mm);
3977 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3978 if (where) *where = mm->where;
3979 if (payload) *payload = mm->payload;
3980 if (szB) *szB = mm->szB;
3981 return True;
sewardj095d61e2010-03-11 13:43:18 +00003982}
3983
3984
sewardjb4112022007-11-09 22:49:28 +00003985/*--------------------------------------------------------------*/
3986/*--- Instrumentation ---*/
3987/*--------------------------------------------------------------*/
3988
3989static void instrument_mem_access ( IRSB* bbOut,
3990 IRExpr* addr,
3991 Int szB,
3992 Bool isStore,
3993 Int hWordTy_szB )
3994{
3995 IRType tyAddr = Ity_INVALID;
3996 HChar* hName = NULL;
3997 void* hAddr = NULL;
3998 Int regparms = 0;
3999 IRExpr** argv = NULL;
4000 IRDirty* di = NULL;
4001
4002 tl_assert(isIRAtom(addr));
4003 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
4004
4005 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
4006 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4007
4008 /* So the effective address is in 'addr' now. */
4009 regparms = 1; // unless stated otherwise
4010 if (isStore) {
4011 switch (szB) {
4012 case 1:
sewardj23f12002009-07-24 08:45:08 +00004013 hName = "evh__mem_help_cwrite_1";
4014 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004015 argv = mkIRExprVec_1( addr );
4016 break;
4017 case 2:
sewardj23f12002009-07-24 08:45:08 +00004018 hName = "evh__mem_help_cwrite_2";
4019 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004020 argv = mkIRExprVec_1( addr );
4021 break;
4022 case 4:
sewardj23f12002009-07-24 08:45:08 +00004023 hName = "evh__mem_help_cwrite_4";
4024 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004025 argv = mkIRExprVec_1( addr );
4026 break;
4027 case 8:
sewardj23f12002009-07-24 08:45:08 +00004028 hName = "evh__mem_help_cwrite_8";
4029 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004030 argv = mkIRExprVec_1( addr );
4031 break;
4032 default:
4033 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4034 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004035 hName = "evh__mem_help_cwrite_N";
4036 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004037 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4038 break;
4039 }
4040 } else {
4041 switch (szB) {
4042 case 1:
sewardj23f12002009-07-24 08:45:08 +00004043 hName = "evh__mem_help_cread_1";
4044 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004045 argv = mkIRExprVec_1( addr );
4046 break;
4047 case 2:
sewardj23f12002009-07-24 08:45:08 +00004048 hName = "evh__mem_help_cread_2";
4049 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004050 argv = mkIRExprVec_1( addr );
4051 break;
4052 case 4:
sewardj23f12002009-07-24 08:45:08 +00004053 hName = "evh__mem_help_cread_4";
4054 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004055 argv = mkIRExprVec_1( addr );
4056 break;
4057 case 8:
sewardj23f12002009-07-24 08:45:08 +00004058 hName = "evh__mem_help_cread_8";
4059 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004060 argv = mkIRExprVec_1( addr );
4061 break;
4062 default:
4063 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4064 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004065 hName = "evh__mem_help_cread_N";
4066 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004067 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4068 break;
4069 }
4070 }
4071
4072 /* Add the helper. */
4073 tl_assert(hName);
4074 tl_assert(hAddr);
4075 tl_assert(argv);
4076 di = unsafeIRDirty_0_N( regparms,
4077 hName, VG_(fnptr_to_fnentry)( hAddr ),
4078 argv );
4079 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4080}
4081
4082
sewardja0eee322009-07-31 08:46:35 +00004083/* Figure out if GA is a guest code address in the dynamic linker, and
4084 if so return True. Otherwise (and in case of any doubt) return
4085 False. (sidedly safe w/ False as the safe value) */
4086static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4087{
4088 DebugInfo* dinfo;
4089 const UChar* soname;
4090 if (0) return False;
4091
sewardje3f1e592009-07-31 09:41:29 +00004092 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004093 if (!dinfo) return False;
4094
sewardje3f1e592009-07-31 09:41:29 +00004095 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004096 tl_assert(soname);
4097 if (0) VG_(printf)("%s\n", soname);
4098
4099# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004100 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004101 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4102 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4103 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4104 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4105# elif defined(VGO_darwin)
4106 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4107# else
4108# error "Unsupported OS"
4109# endif
4110 return False;
4111}
4112
sewardjb4112022007-11-09 22:49:28 +00004113static
4114IRSB* hg_instrument ( VgCallbackClosure* closure,
4115 IRSB* bbIn,
4116 VexGuestLayout* layout,
4117 VexGuestExtents* vge,
4118 IRType gWordTy, IRType hWordTy )
4119{
sewardj1c0ce7a2009-07-01 08:10:49 +00004120 Int i;
4121 IRSB* bbOut;
4122 Addr64 cia; /* address of current insn */
4123 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004124 Bool inLDSO = False;
4125 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004126
4127 if (gWordTy != hWordTy) {
4128 /* We don't currently support this case. */
4129 VG_(tool_panic)("host/guest word size mismatch");
4130 }
4131
sewardja0eee322009-07-31 08:46:35 +00004132 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4133 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4134 }
4135
sewardjb4112022007-11-09 22:49:28 +00004136 /* Set up BB */
4137 bbOut = emptyIRSB();
4138 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4139 bbOut->next = deepCopyIRExpr(bbIn->next);
4140 bbOut->jumpkind = bbIn->jumpkind;
4141
4142 // Copy verbatim any IR preamble preceding the first IMark
4143 i = 0;
4144 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4145 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4146 i++;
4147 }
4148
sewardj1c0ce7a2009-07-01 08:10:49 +00004149 // Get the first statement, and initial cia from it
4150 tl_assert(bbIn->stmts_used > 0);
4151 tl_assert(i < bbIn->stmts_used);
4152 st = bbIn->stmts[i];
4153 tl_assert(Ist_IMark == st->tag);
4154 cia = st->Ist.IMark.addr;
4155 st = NULL;
4156
sewardjb4112022007-11-09 22:49:28 +00004157 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004158 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004159 tl_assert(st);
4160 tl_assert(isFlatIRStmt(st));
4161 switch (st->tag) {
4162 case Ist_NoOp:
4163 case Ist_AbiHint:
4164 case Ist_Put:
4165 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004166 case Ist_Exit:
4167 /* None of these can contain any memory references. */
4168 break;
4169
sewardj1c0ce7a2009-07-01 08:10:49 +00004170 case Ist_IMark:
4171 /* no mem refs, but note the insn address. */
4172 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004173 /* Don't instrument the dynamic linker. It generates a
4174 lot of races which we just expensively suppress, so
4175 it's pointless.
4176
4177 Avoid flooding is_in_dynamic_linker_shared_object with
4178 requests by only checking at transitions between 4K
4179 pages. */
4180 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4181 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4182 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4183 inLDSO = is_in_dynamic_linker_shared_object(cia);
4184 } else {
4185 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4186 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004187 break;
4188
sewardjb4112022007-11-09 22:49:28 +00004189 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004190 switch (st->Ist.MBE.event) {
4191 case Imbe_Fence:
4192 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004193 default:
4194 goto unhandled;
4195 }
sewardjb4112022007-11-09 22:49:28 +00004196 break;
4197
sewardj1c0ce7a2009-07-01 08:10:49 +00004198 case Ist_CAS: {
4199 /* Atomic read-modify-write cycle. Just pretend it's a
4200 read. */
4201 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004202 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4203 if (isDCAS) {
4204 tl_assert(cas->expdHi);
4205 tl_assert(cas->dataHi);
4206 } else {
4207 tl_assert(!cas->expdHi);
4208 tl_assert(!cas->dataHi);
4209 }
4210 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004211 if (!inLDSO) {
4212 instrument_mem_access(
4213 bbOut,
4214 cas->addr,
4215 (isDCAS ? 2 : 1)
4216 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4217 False/*!isStore*/,
4218 sizeofIRType(hWordTy)
4219 );
4220 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004221 break;
4222 }
4223
sewardjdb5907d2009-11-26 17:20:21 +00004224 case Ist_LLSC: {
4225 /* We pretend store-conditionals don't exist, viz, ignore
4226 them. Whereas load-linked's are treated the same as
4227 normal loads. */
4228 IRType dataTy;
4229 if (st->Ist.LLSC.storedata == NULL) {
4230 /* LL */
4231 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004232 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004233 instrument_mem_access(
4234 bbOut,
4235 st->Ist.LLSC.addr,
4236 sizeofIRType(dataTy),
4237 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004238 sizeofIRType(hWordTy)
4239 );
4240 }
sewardjdb5907d2009-11-26 17:20:21 +00004241 } else {
4242 /* SC */
4243 /*ignore */
4244 }
4245 break;
4246 }
4247
4248 case Ist_Store:
4249 /* It seems we pretend that store-conditionals don't
4250 exist, viz, just ignore them ... */
4251 if (!inLDSO) {
4252 instrument_mem_access(
4253 bbOut,
4254 st->Ist.Store.addr,
4255 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4256 True/*isStore*/,
4257 sizeofIRType(hWordTy)
4258 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004259 }
njnb83caf22009-05-25 01:47:56 +00004260 break;
sewardjb4112022007-11-09 22:49:28 +00004261
4262 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004263 /* ... whereas here we don't care whether a load is a
4264 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004265 IRExpr* data = st->Ist.WrTmp.data;
4266 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004267 if (!inLDSO) {
4268 instrument_mem_access(
4269 bbOut,
4270 data->Iex.Load.addr,
4271 sizeofIRType(data->Iex.Load.ty),
4272 False/*!isStore*/,
4273 sizeofIRType(hWordTy)
4274 );
4275 }
sewardjb4112022007-11-09 22:49:28 +00004276 }
4277 break;
4278 }
4279
4280 case Ist_Dirty: {
4281 Int dataSize;
4282 IRDirty* d = st->Ist.Dirty.details;
4283 if (d->mFx != Ifx_None) {
4284 /* This dirty helper accesses memory. Collect the
4285 details. */
4286 tl_assert(d->mAddr != NULL);
4287 tl_assert(d->mSize != 0);
4288 dataSize = d->mSize;
4289 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004290 if (!inLDSO) {
4291 instrument_mem_access(
4292 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4293 sizeofIRType(hWordTy)
4294 );
4295 }
sewardjb4112022007-11-09 22:49:28 +00004296 }
4297 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004298 if (!inLDSO) {
4299 instrument_mem_access(
4300 bbOut, d->mAddr, dataSize, True/*isStore*/,
4301 sizeofIRType(hWordTy)
4302 );
4303 }
sewardjb4112022007-11-09 22:49:28 +00004304 }
4305 } else {
4306 tl_assert(d->mAddr == NULL);
4307 tl_assert(d->mSize == 0);
4308 }
4309 break;
4310 }
4311
4312 default:
sewardjf98e1c02008-10-25 16:22:41 +00004313 unhandled:
4314 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004315 tl_assert(0);
4316
4317 } /* switch (st->tag) */
4318
4319 addStmtToIRSB( bbOut, st );
4320 } /* iterate over bbIn->stmts */
4321
4322 return bbOut;
4323}
4324
4325
4326/*----------------------------------------------------------------*/
4327/*--- Client requests ---*/
4328/*----------------------------------------------------------------*/
4329
4330/* Sheesh. Yet another goddam finite map. */
4331static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4332
4333static void map_pthread_t_to_Thread_INIT ( void ) {
4334 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004335 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4336 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004337 tl_assert(map_pthread_t_to_Thread != NULL);
4338 }
4339}
4340
4341
4342static
4343Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4344{
4345 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4346 return False;
4347
4348 /* Anything that gets past the above check is one of ours, so we
4349 should be able to handle it. */
4350
4351 /* default, meaningless return value, unless otherwise set */
4352 *ret = 0;
4353
4354 switch (args[0]) {
4355
4356 /* --- --- User-visible client requests --- --- */
4357
4358 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004359 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004360 args[1], args[2]);
4361 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004362 are any held locks etc in the area. Calling evh__die_mem
4363 and then evh__new_mem is a bit inefficient; probably just
4364 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004365 if (args[2] > 0) { /* length */
4366 evh__die_mem(args[1], args[2]);
4367 /* and then set it to New */
4368 evh__new_mem(args[1], args[2]);
4369 }
4370 break;
4371
sewardjc8028ad2010-05-05 09:34:42 +00004372 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4373 Addr payload = 0;
4374 SizeT pszB = 0;
4375 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4376 args[1]);
4377 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4378 if (pszB > 0) {
4379 evh__die_mem(payload, pszB);
4380 evh__new_mem(payload, pszB);
4381 }
4382 *ret = pszB;
4383 } else {
4384 *ret = (UWord)-1;
4385 }
4386 break;
4387 }
4388
sewardj406bac82010-03-03 23:03:40 +00004389 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4390 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4391 args[1], args[2]);
4392 if (args[2] > 0) { /* length */
4393 evh__untrack_mem(args[1], args[2]);
4394 }
4395 break;
4396
4397 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4398 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4399 args[1], args[2]);
4400 if (args[2] > 0) { /* length */
4401 evh__new_mem(args[1], args[2]);
4402 }
4403 break;
4404
sewardjb4112022007-11-09 22:49:28 +00004405 /* --- --- Client requests for Helgrind's use only --- --- */
4406
4407 /* Some thread is telling us its pthread_t value. Record the
4408 binding between that and the associated Thread*, so we can
4409 later find the Thread* again when notified of a join by the
4410 thread. */
4411 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4412 Thread* my_thr = NULL;
4413 if (0)
4414 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4415 (void*)args[1]);
4416 map_pthread_t_to_Thread_INIT();
4417 my_thr = map_threads_maybe_lookup( tid );
4418 /* This assertion should hold because the map_threads (tid to
4419 Thread*) binding should have been made at the point of
4420 low-level creation of this thread, which should have
4421 happened prior to us getting this client request for it.
4422 That's because this client request is sent from
4423 client-world from the 'thread_wrapper' function, which
4424 only runs once the thread has been low-level created. */
4425 tl_assert(my_thr != NULL);
4426 /* So now we know that (pthread_t)args[1] is associated with
4427 (Thread*)my_thr. Note that down. */
4428 if (0)
4429 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4430 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004431 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004432 break;
4433 }
4434
4435 case _VG_USERREQ__HG_PTH_API_ERROR: {
4436 Thread* my_thr = NULL;
4437 map_pthread_t_to_Thread_INIT();
4438 my_thr = map_threads_maybe_lookup( tid );
4439 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004440 HG_(record_error_PthAPIerror)(
4441 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004442 break;
4443 }
4444
4445 /* This thread (tid) has completed a join with the quitting
4446 thread whose pthread_t is in args[1]. */
4447 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4448 Thread* thr_q = NULL; /* quitter Thread* */
4449 Bool found = False;
4450 if (0)
4451 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4452 (void*)args[1]);
4453 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004454 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004455 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004456 /* Can this fail? It would mean that our pthread_join
4457 wrapper observed a successful join on args[1] yet that
4458 thread never existed (or at least, it never lodged an
4459 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4460 sounds like a bug in the threads library. */
4461 // FIXME: get rid of this assertion; handle properly
4462 tl_assert(found);
4463 if (found) {
4464 if (0)
4465 VG_(printf)(".................... quitter Thread* = %p\n",
4466 thr_q);
4467 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4468 }
4469 break;
4470 }
4471
4472 /* EXPOSITION only: by intercepting lock init events we can show
4473 the user where the lock was initialised, rather than only
4474 being able to show where it was first locked. Intercepting
4475 lock initialisations is not necessary for the basic operation
4476 of the race checker. */
4477 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4478 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4479 break;
4480
4481 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4482 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4483 break;
4484
4485 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4486 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4487 break;
4488
4489 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4490 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4491 break;
4492
4493 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4494 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4495 break;
4496
4497 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4498 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4499 break;
4500
4501 /* This thread is about to do pthread_cond_signal on the
4502 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4503 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4504 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4505 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4506 break;
4507
4508 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4509 Returns a flag indicating whether or not the mutex is believed to be
4510 valid for this operation. */
4511 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4512 Bool mutex_is_valid
4513 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4514 (void*)args[2] );
4515 *ret = mutex_is_valid ? 1 : 0;
4516 break;
4517 }
4518
sewardjf98e1c02008-10-25 16:22:41 +00004519 /* cond=arg[1] */
4520 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4521 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4522 break;
4523
sewardjb4112022007-11-09 22:49:28 +00004524 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4525 mutex=arg[2] */
4526 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4527 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4528 (void*)args[1], (void*)args[2] );
4529 break;
4530
4531 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4532 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4533 break;
4534
4535 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4536 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4537 break;
4538
sewardj789c3c52008-02-25 12:10:07 +00004539 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004540 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004541 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4542 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004543 break;
4544
4545 /* rwlock=arg[1], isW=arg[2] */
4546 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4547 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4548 break;
4549
4550 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4551 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4552 break;
4553
4554 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4555 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4556 break;
4557
sewardj11e352f2007-11-30 11:11:02 +00004558 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4559 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004560 break;
4561
sewardj11e352f2007-11-30 11:11:02 +00004562 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4563 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004564 break;
4565
sewardj11e352f2007-11-30 11:11:02 +00004566 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4567 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4568 break;
4569
4570 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4571 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004572 break;
4573
sewardj9f569b72008-11-13 13:33:09 +00004574 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004575 /* pth_bar_t*, ulong count, ulong resizable */
4576 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4577 args[2], args[3] );
4578 break;
4579
4580 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4581 /* pth_bar_t*, ulong newcount */
4582 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4583 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004584 break;
4585
4586 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4587 /* pth_bar_t* */
4588 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4589 break;
4590
4591 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4592 /* pth_bar_t* */
4593 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4594 break;
sewardjb4112022007-11-09 22:49:28 +00004595
sewardj5a644da2009-08-11 10:35:58 +00004596 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4597 /* pth_spinlock_t* */
4598 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4599 break;
4600
4601 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4602 /* pth_spinlock_t* */
4603 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4604 break;
4605
4606 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4607 /* pth_spinlock_t*, Word */
4608 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4609 break;
4610
4611 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4612 /* pth_spinlock_t* */
4613 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4614 break;
4615
4616 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4617 /* pth_spinlock_t* */
4618 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4619 break;
4620
sewardjed2e72e2009-08-14 11:08:24 +00004621 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4622 /* char* who */
4623 HChar* who = (HChar*)args[1];
4624 HChar buf[50 + 50];
4625 Thread* thr = map_threads_maybe_lookup( tid );
4626 tl_assert( thr ); /* I must be mapped */
4627 tl_assert( who );
4628 tl_assert( VG_(strlen)(who) <= 50 );
4629 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4630 /* record_error_Misc strdup's buf, so this is safe: */
4631 HG_(record_error_Misc)( thr, buf );
4632 break;
4633 }
4634
4635 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4636 /* UWord arbitrary-SO-tag */
4637 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4638 break;
4639
4640 case _VG_USERREQ__HG_USERSO_RECV_POST:
4641 /* UWord arbitrary-SO-tag */
4642 evh__HG_USERSO_RECV_POST( tid, args[1] );
4643 break;
4644
sewardjb4112022007-11-09 22:49:28 +00004645 default:
4646 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004647 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4648 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004649 }
4650
4651 return True;
4652}
4653
4654
4655/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004656/*--- Setup ---*/
4657/*----------------------------------------------------------------*/
4658
4659static Bool hg_process_cmd_line_option ( Char* arg )
4660{
njn83df0b62009-02-25 01:01:05 +00004661 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004662
njn83df0b62009-02-25 01:01:05 +00004663 if VG_BOOL_CLO(arg, "--track-lockorders",
4664 HG_(clo_track_lockorders)) {}
4665 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4666 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004667
4668 else if VG_XACT_CLO(arg, "--history-level=none",
4669 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004670 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004671 HG_(clo_history_level), 1);
4672 else if VG_XACT_CLO(arg, "--history-level=full",
4673 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004674
sewardjf585e482009-08-16 22:52:29 +00004675 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004676 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004677 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004678 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004679
sewardj11e352f2007-11-30 11:11:02 +00004680 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004681 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004682 Int j;
sewardjb4112022007-11-09 22:49:28 +00004683
njn83df0b62009-02-25 01:01:05 +00004684 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004685 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004686 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004687 return False;
4688 }
sewardj11e352f2007-11-30 11:11:02 +00004689 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004690 if ('0' == tmp_str[j]) { /* do nothing */ }
4691 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004692 else {
sewardj11e352f2007-11-30 11:11:02 +00004693 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004694 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004695 return False;
4696 }
4697 }
sewardjf98e1c02008-10-25 16:22:41 +00004698 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004699 }
4700
4701 else
4702 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4703
4704 return True;
4705}
4706
4707static void hg_print_usage ( void )
4708{
4709 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004710" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004711" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004712" full: show both stack traces for a data race (can be very slow)\n"
4713" approx: full trace for one thread, approx for the other (faster)\n"
4714" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004715" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004716 );
sewardjb4112022007-11-09 22:49:28 +00004717}
4718
4719static void hg_print_debug_usage ( void )
4720{
sewardjb4112022007-11-09 22:49:28 +00004721 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4722 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004723 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004724 " at events (X = 0|1) [000000]\n");
4725 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004726 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004727 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004728 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4729 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004730 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004731 VG_(printf)(" 000010 at lock/unlock events\n");
4732 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004733}
4734
4735static void hg_post_clo_init ( void )
4736{
4737}
4738
4739static void hg_fini ( Int exitcode )
4740{
sewardj2d9e8742009-08-07 15:46:56 +00004741 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4742 VG_(message)(Vg_UserMsg,
4743 "For counts of detected and suppressed errors, "
4744 "rerun with: -v\n");
4745 }
4746
4747 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4748 && HG_(clo_history_level) >= 2) {
4749 VG_(umsg)(
4750 "Use --history-level=approx or =none to gain increased speed, at\n" );
4751 VG_(umsg)(
4752 "the cost of reduced accuracy of conflicting-access information\n");
4753 }
4754
sewardjb4112022007-11-09 22:49:28 +00004755 if (SHOW_DATA_STRUCTURES)
4756 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004757 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004758 all__sanity_check("SK_(fini)");
4759
sewardj2d9e8742009-08-07 15:46:56 +00004760 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004761
4762 if (1) {
4763 VG_(printf)("\n");
4764 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4765 VG_(printf)("\n");
4766 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4767 VG_(printf)("\n");
4768 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4769 }
4770
sewardjf98e1c02008-10-25 16:22:41 +00004771 //zz VG_(printf)("\n");
4772 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4773 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4774 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4775 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4776 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4777 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4778 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4779 //zz stats__hbefore_stk_hwm);
4780 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4781 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004782
4783 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004784 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004785 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004786 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004787 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004788 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004789 (Int)HG_(cardinalityWSU)( univ_laog ));
4790
sewardjd52392d2008-11-08 20:36:26 +00004791 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4792 // stats__ga_LL_adds,
4793 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004794
sewardjf98e1c02008-10-25 16:22:41 +00004795 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4796 HG_(stats__LockN_to_P_queries),
4797 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004798
sewardjf98e1c02008-10-25 16:22:41 +00004799 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4800 HG_(stats__string_table_queries),
4801 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004802 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004803 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004804 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004805 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004806 VG_(printf)(" locks: %'8lu acquires, "
4807 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004808 stats__lockN_acquires,
4809 stats__lockN_releases
4810 );
barta0b6b2c2008-07-07 06:49:24 +00004811 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004812
4813 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004814 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004815 }
4816}
4817
sewardjf98e1c02008-10-25 16:22:41 +00004818/* FIXME: move these somewhere sane */
4819
4820static
4821void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4822{
4823 Thread* thr;
4824 ThreadId tid;
4825 UWord nActual;
4826 tl_assert(hbt);
4827 thr = libhb_get_Thr_opaque( hbt );
4828 tl_assert(thr);
4829 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4830 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4831 NULL, NULL, 0 );
4832 tl_assert(nActual <= nRequest);
4833 for (; nActual < nRequest; nActual++)
4834 frames[nActual] = 0;
4835}
4836
4837static
sewardj23f12002009-07-24 08:45:08 +00004838ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004839{
4840 Thread* thr;
4841 ThreadId tid;
4842 ExeContext* ec;
4843 tl_assert(hbt);
4844 thr = libhb_get_Thr_opaque( hbt );
4845 tl_assert(thr);
4846 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004847 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004848 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004849 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004850}
4851
4852
sewardjb4112022007-11-09 22:49:28 +00004853static void hg_pre_clo_init ( void )
4854{
sewardjf98e1c02008-10-25 16:22:41 +00004855 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004856
sewardjb4112022007-11-09 22:49:28 +00004857 VG_(details_name) ("Helgrind");
4858 VG_(details_version) (NULL);
4859 VG_(details_description) ("a thread error detector");
4860 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004861 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004862 VG_(details_bug_reports_to) (VG_BUGS_TO);
4863 VG_(details_avg_translation_sizeB) ( 200 );
4864
4865 VG_(basic_tool_funcs) (hg_post_clo_init,
4866 hg_instrument,
4867 hg_fini);
4868
4869 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004870 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004871 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004872 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004873 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004874 HG_(update_extra),
4875 HG_(recognised_suppression),
4876 HG_(read_extra_suppression_info),
4877 HG_(error_matches_suppression),
4878 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004879 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004880
sewardj24118492009-07-15 14:50:02 +00004881 VG_(needs_xml_output) ();
4882
sewardjb4112022007-11-09 22:49:28 +00004883 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4884 hg_print_usage,
4885 hg_print_debug_usage);
4886 VG_(needs_client_requests) (hg_handle_client_request);
4887
4888 // FIXME?
4889 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4890 // hg_expensive_sanity_check);
4891
4892 VG_(needs_malloc_replacement) (hg_cli__malloc,
4893 hg_cli____builtin_new,
4894 hg_cli____builtin_vec_new,
4895 hg_cli__memalign,
4896 hg_cli__calloc,
4897 hg_cli__free,
4898 hg_cli____builtin_delete,
4899 hg_cli____builtin_vec_delete,
4900 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004901 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004902 HG_CLI__MALLOC_REDZONE_SZB );
4903
sewardj849b0ed2008-12-21 10:43:10 +00004904 /* 21 Dec 08: disabled this; it mostly causes H to start more
4905 slowly and use significantly more memory, without very often
4906 providing useful results. The user can request to load this
4907 information manually with --read-var-info=yes. */
4908 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004909
4910 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004911 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4912 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004913 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004914 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004915
4916 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004917 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004918
4919 VG_(track_change_mem_mprotect) ( evh__set_perms );
4920
4921 VG_(track_die_mem_stack_signal)( evh__die_mem );
4922 VG_(track_die_mem_brk) ( evh__die_mem );
4923 VG_(track_die_mem_munmap) ( evh__die_mem );
4924 VG_(track_die_mem_stack) ( evh__die_mem );
4925
4926 // FIXME: what is this for?
4927 VG_(track_ban_mem_stack) (NULL);
4928
4929 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4930 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4931 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4932 VG_(track_post_mem_write) (NULL);
4933
4934 /////////////////
4935
4936 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4937 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4938
4939 VG_(track_start_client_code)( evh__start_client_code );
4940 VG_(track_stop_client_code)( evh__stop_client_code );
4941
sewardjf98e1c02008-10-25 16:22:41 +00004942 /////////////////////////////////////////////
4943 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004944 for_libhb__get_EC );
4945 /////////////////////////////////////////////
4946
4947 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004948
4949 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4950 as described in comments at the top of pub_tool_hashtable.h, are
4951 met. Blargh. */
4952 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4953 tl_assert( sizeof(UWord) == sizeof(Addr) );
4954 hg_mallocmeta_table
4955 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4956
sewardj61bc2c52011-02-09 10:34:00 +00004957 // add a callback to clean up on (threaded) fork.
4958 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004959}
4960
4961VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4962
4963/*--------------------------------------------------------------------*/
4964/*--- end hg_main.c ---*/
4965/*--------------------------------------------------------------------*/