blob: e6202623f4d013f0c27562651843fc7e7df78680 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
njn9f207462009-03-10 22:02:09 +000011 Copyright (C) 2007-2009 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
njnf76d27a2009-05-28 01:53:07 +000014 Copyright (C) 2007-2009 Apple, Inc.
15
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardjb4112022007-11-09 22:49:28 +000055
sewardjf98e1c02008-10-25 16:22:41 +000056#include "hg_basics.h"
57#include "hg_wordset.h"
58#include "hg_lock_n_thread.h"
59#include "hg_errors.h"
60
61#include "libhb.h"
62
sewardjb4112022007-11-09 22:49:28 +000063#include "helgrind.h"
64
sewardjf98e1c02008-10-25 16:22:41 +000065
66// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
67
68// FIXME: when client destroys a lock or a CV, remove these
69// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000070
71/*----------------------------------------------------------------*/
72/*--- ---*/
73/*----------------------------------------------------------------*/
74
sewardj11e352f2007-11-30 11:11:02 +000075/* Note this needs to be compiled with -fno-strict-aliasing, since it
76 contains a whole bunch of calls to lookupFM etc which cast between
77 Word and pointer types. gcc rightly complains this breaks ANSI C
78 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
79 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000080*/
sewardjb4112022007-11-09 22:49:28 +000081
sewardjefd3b4d2007-12-02 02:05:23 +000082// FIXME catch sync signals (SEGV, basically) and unlock BHL,
83// if held. Otherwise a LOCK-prefixed insn which segfaults
84// gets Helgrind into a total muddle as the BHL will not be
85// released after the insn.
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME what is supposed to happen to locks in memory which
88// is relocated as a result of client realloc?
89
sewardjb4112022007-11-09 22:49:28 +000090// FIXME put referencing ThreadId into Thread and get
91// rid of the slow reverse mapping function.
92
93// FIXME accesses to NoAccess areas: change state to Excl?
94
95// FIXME report errors for accesses of NoAccess memory?
96
97// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
98// the thread still holds the lock.
99
100/* ------------ Debug/trace options ------------ */
101
102// this is:
103// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
104// happens_before_wrk: 1000
105// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
106#define SHOW_EXPENSIVE_STUFF 0
107
108// 0 for silent, 1 for some stuff, 2 for lots of stuff
109#define SHOW_EVENTS 0
110
sewardjb4112022007-11-09 22:49:28 +0000111
112static void all__sanity_check ( Char* who ); /* fwds */
113
114#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
115
116// 0 for none, 1 for dump at end of run
117#define SHOW_DATA_STRUCTURES 0
118
119
sewardjb4112022007-11-09 22:49:28 +0000120/* ------------ Misc comments ------------ */
121
122// FIXME: don't hardwire initial entries for root thread.
123// Instead, let the pre_thread_ll_create handler do this.
124
sewardjb4112022007-11-09 22:49:28 +0000125
126/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000127/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000128/*----------------------------------------------------------------*/
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Admin linked list of Threads */
131static Thread* admin_threads = NULL;
132
133/* Admin linked list of Locks */
134static Lock* admin_locks = NULL;
135
sewardjb4112022007-11-09 22:49:28 +0000136/* Mapping table for core ThreadIds to Thread* */
137static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138
sewardjb4112022007-11-09 22:49:28 +0000139/* Mapping table for lock guest addresses to Lock* */
140static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141
142/* The word-set universes for thread sets and lock sets. */
143static WordSetU* univ_tsets = NULL; /* sets of Thread* */
144static WordSetU* univ_lsets = NULL; /* sets of Lock* */
145static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
146
147/* never changed; we only care about its address. Is treated as if it
148 was a standard userspace lock. Also we have a Lock* describing it
149 so it can participate in lock sets in the usual way. */
150static Int __bus_lock = 0;
151static Lock* __bus_lock_Lock = NULL;
152
153
154/*----------------------------------------------------------------*/
155/*--- Simple helpers for the data structures ---*/
156/*----------------------------------------------------------------*/
157
158static UWord stats__lockN_acquires = 0;
159static UWord stats__lockN_releases = 0;
160
sewardjf98e1c02008-10-25 16:22:41 +0000161static
162ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000163
164/* --------- Constructors --------- */
165
sewardjf98e1c02008-10-25 16:22:41 +0000166static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000167 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000168 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->locksetA = HG_(emptyWS)( univ_lsets );
170 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000172 thread->hbthr = hbthr;
173 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000174 thread->created_at = NULL;
175 thread->announced = False;
176 thread->errmsg_index = indx++;
177 thread->admin = admin_threads;
178 admin_threads = thread;
179 return thread;
180}
sewardjf98e1c02008-10-25 16:22:41 +0000181
sewardjb4112022007-11-09 22:49:28 +0000182// Make a new lock which is unlocked (hence ownerless)
183static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
184 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000185 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000186 lock->admin = admin_locks;
187 lock->unique = unique++;
188 lock->magic = LockN_MAGIC;
189 lock->appeared_at = NULL;
190 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000191 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000192 lock->guestaddr = guestaddr;
193 lock->kind = kind;
194 lock->heldW = False;
195 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000197 admin_locks = lock;
198 return lock;
199}
sewardjb4112022007-11-09 22:49:28 +0000200
201/* Release storage for a Lock. Also release storage in .heldBy, if
202 any. */
203static void del_LockN ( Lock* lk )
204{
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lk));
206 tl_assert(lk->hbso);
207 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000208 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000209 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000210 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000211 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000212}
213
214/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
215 it. This is done strictly: only combinations resulting from
216 correct program and libpthread behaviour are allowed. */
217static void lockN_acquire_writer ( Lock* lk, Thread* thr )
218{
sewardjf98e1c02008-10-25 16:22:41 +0000219 tl_assert(HG_(is_sane_LockN)(lk));
220 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000221
222 stats__lockN_acquires++;
223
224 /* EXPOSITION only */
225 /* We need to keep recording snapshots of where the lock was
226 acquired, so as to produce better lock-order error messages. */
227 if (lk->acquired_at == NULL) {
228 ThreadId tid;
229 tl_assert(lk->heldBy == NULL);
230 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
231 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000232 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000233 } else {
234 tl_assert(lk->heldBy != NULL);
235 }
236 /* end EXPOSITION only */
237
238 switch (lk->kind) {
239 case LK_nonRec:
240 case_LK_nonRec:
241 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
242 tl_assert(!lk->heldW);
243 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000244 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000245 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000246 break;
247 case LK_mbRec:
248 if (lk->heldBy == NULL)
249 goto case_LK_nonRec;
250 /* 2nd and subsequent locking of a lock by its owner */
251 tl_assert(lk->heldW);
252 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000253 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000254 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000255 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
256 == VG_(sizeTotalBag)(lk->heldBy));
257 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000258 break;
259 case LK_rdwr:
260 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
261 goto case_LK_nonRec;
262 default:
263 tl_assert(0);
264 }
sewardjf98e1c02008-10-25 16:22:41 +0000265 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000266}
267
268static void lockN_acquire_reader ( Lock* lk, Thread* thr )
269{
sewardjf98e1c02008-10-25 16:22:41 +0000270 tl_assert(HG_(is_sane_LockN)(lk));
271 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000272 /* can only add reader to a reader-writer lock. */
273 tl_assert(lk->kind == LK_rdwr);
274 /* lk must be free or already r-held. */
275 tl_assert(lk->heldBy == NULL
276 || (lk->heldBy != NULL && !lk->heldW));
277
278 stats__lockN_acquires++;
279
280 /* EXPOSITION only */
281 /* We need to keep recording snapshots of where the lock was
282 acquired, so as to produce better lock-order error messages. */
283 if (lk->acquired_at == NULL) {
284 ThreadId tid;
285 tl_assert(lk->heldBy == NULL);
286 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
287 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000288 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000289 } else {
290 tl_assert(lk->heldBy != NULL);
291 }
292 /* end EXPOSITION only */
293
294 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000296 } else {
297 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000298 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000299 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000300 }
301 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000302 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000303}
304
305/* Update 'lk' to reflect a release of it by 'thr'. This is done
306 strictly: only combinations resulting from correct program and
307 libpthread behaviour are allowed. */
308
309static void lockN_release ( Lock* lk, Thread* thr )
310{
311 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000312 tl_assert(HG_(is_sane_LockN)(lk));
313 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000314 /* lock must be held by someone */
315 tl_assert(lk->heldBy);
316 stats__lockN_releases++;
317 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000318 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000319 /* thr must actually have been a holder of lk */
320 tl_assert(b);
321 /* normalise */
322 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000323 if (VG_(isEmptyBag)(lk->heldBy)) {
324 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000325 lk->heldBy = NULL;
326 lk->heldW = False;
327 lk->acquired_at = NULL;
328 }
sewardjf98e1c02008-10-25 16:22:41 +0000329 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000330}
331
332static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
333{
334 Thread* thr;
335 if (!lk->heldBy) {
336 tl_assert(!lk->heldW);
337 return;
338 }
339 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000340 VG_(initIterBag)( lk->heldBy );
341 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000342 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000343 tl_assert(HG_(elemWS)( univ_lsets,
344 thr->locksetA, (Word)lk ));
345 thr->locksetA
346 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
347
348 if (lk->heldW) {
349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetW, (Word)lk ));
351 thr->locksetW
352 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
353 }
354 }
sewardj896f6f92008-08-19 08:38:52 +0000355 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000356}
357
sewardjb4112022007-11-09 22:49:28 +0000358
359/*----------------------------------------------------------------*/
360/*--- Print out the primary data structures ---*/
361/*----------------------------------------------------------------*/
362
sewardjd52392d2008-11-08 20:36:26 +0000363//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000364
365#define PP_THREADS (1<<1)
366#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000367#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000368
369
370static const Int sHOW_ADMIN = 0;
371
372static void space ( Int n )
373{
374 Int i;
375 Char spaces[128+1];
376 tl_assert(n >= 0 && n < 128);
377 if (n == 0)
378 return;
379 for (i = 0; i < n; i++)
380 spaces[i] = ' ';
381 spaces[i] = 0;
382 tl_assert(i < 128+1);
383 VG_(printf)("%s", spaces);
384}
385
386static void pp_Thread ( Int d, Thread* t )
387{
388 space(d+0); VG_(printf)("Thread %p {\n", t);
389 if (sHOW_ADMIN) {
390 space(d+3); VG_(printf)("admin %p\n", t->admin);
391 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
392 }
393 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
394 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000395 space(d+0); VG_(printf)("}\n");
396}
397
398static void pp_admin_threads ( Int d )
399{
400 Int i, n;
401 Thread* t;
402 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
403 /* nothing */
404 }
405 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
406 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
407 if (0) {
408 space(n);
409 VG_(printf)("admin_threads record %d of %d:\n", i, n);
410 }
411 pp_Thread(d+3, t);
412 }
barta0b6b2c2008-07-07 06:49:24 +0000413 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000414}
415
416static void pp_map_threads ( Int d )
417{
njn4c245e52009-03-15 23:25:38 +0000418 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000419 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000420 for (i = 0; i < VG_N_THREADS; i++) {
421 if (map_threads[i] != NULL)
422 n++;
423 }
424 VG_(printf)("(%d entries) {\n", n);
425 for (i = 0; i < VG_N_THREADS; i++) {
426 if (map_threads[i] == NULL)
427 continue;
428 space(d+3);
429 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
430 }
431 space(d); VG_(printf)("}\n");
432}
433
434static const HChar* show_LockKind ( LockKind lkk ) {
435 switch (lkk) {
436 case LK_mbRec: return "mbRec";
437 case LK_nonRec: return "nonRec";
438 case LK_rdwr: return "rdwr";
439 default: tl_assert(0);
440 }
441}
442
443static void pp_Lock ( Int d, Lock* lk )
444{
barta0b6b2c2008-07-07 06:49:24 +0000445 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000446 if (sHOW_ADMIN) {
447 space(d+3); VG_(printf)("admin %p\n", lk->admin);
448 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
449 }
450 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
451 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
452 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
453 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
454 if (lk->heldBy) {
455 Thread* thr;
456 Word count;
457 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000458 VG_(initIterBag)( lk->heldBy );
459 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000461 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000462 VG_(printf)("}");
463 }
464 VG_(printf)("\n");
465 space(d+0); VG_(printf)("}\n");
466}
467
468static void pp_admin_locks ( Int d )
469{
470 Int i, n;
471 Lock* lk;
472 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
473 /* nothing */
474 }
475 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
476 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
477 if (0) {
478 space(n);
479 VG_(printf)("admin_locks record %d of %d:\n", i, n);
480 }
481 pp_Lock(d+3, lk);
482 }
barta0b6b2c2008-07-07 06:49:24 +0000483 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000484}
485
486static void pp_map_locks ( Int d )
487{
488 void* gla;
489 Lock* lk;
490 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000491 (Int)VG_(sizeFM)( map_locks ));
492 VG_(initIterFM)( map_locks );
493 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000494 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000495 space(d+3);
496 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
497 }
sewardj896f6f92008-08-19 08:38:52 +0000498 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000499 space(d); VG_(printf)("}\n");
500}
501
sewardjb4112022007-11-09 22:49:28 +0000502static void pp_everything ( Int flags, Char* caller )
503{
504 Int d = 0;
505 VG_(printf)("\n");
506 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
507 if (flags & PP_THREADS) {
508 VG_(printf)("\n");
509 pp_admin_threads(d+3);
510 VG_(printf)("\n");
511 pp_map_threads(d+3);
512 }
513 if (flags & PP_LOCKS) {
514 VG_(printf)("\n");
515 pp_admin_locks(d+3);
516 VG_(printf)("\n");
517 pp_map_locks(d+3);
518 }
sewardjb4112022007-11-09 22:49:28 +0000519
520 VG_(printf)("\n");
521 VG_(printf)("}\n");
522 VG_(printf)("\n");
523}
524
525#undef SHOW_ADMIN
526
527
528/*----------------------------------------------------------------*/
529/*--- Initialise the primary data structures ---*/
530/*----------------------------------------------------------------*/
531
sewardjf98e1c02008-10-25 16:22:41 +0000532static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000533{
sewardjb4112022007-11-09 22:49:28 +0000534 Thread* thr;
535
536 /* Get everything initialised and zeroed. */
537 tl_assert(admin_threads == NULL);
538 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000541
542 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000543 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(map_threads != NULL);
545
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(sizeof(Addr) == sizeof(Word));
547 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
549 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000550 tl_assert(map_locks != NULL);
551
552 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000553 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000554 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000555
556 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000557 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
558 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000559 tl_assert(univ_tsets != NULL);
560
561 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000562 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
563 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000564 tl_assert(univ_lsets != NULL);
565
566 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000567 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
568 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000569 tl_assert(univ_laog != NULL);
570
571 /* Set up entries for the root thread */
572 // FIXME: this assumes that the first real ThreadId is 1
573
sewardjb4112022007-11-09 22:49:28 +0000574 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000575 thr = mk_Thread(hbthr_root);
576 thr->coretid = 1; /* FIXME: hardwires an assumption about the
577 identity of the root thread. */
578 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
579 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000580
sewardjf98e1c02008-10-25 16:22:41 +0000581 /* and bind it in the thread-map table. */
582 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
583 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000584
sewardjf98e1c02008-10-25 16:22:41 +0000585 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000586
587 tl_assert(VG_INVALID_THREADID == 0);
588
589 /* Mark the new bus lock correctly (to stop the sanity checks
590 complaining) */
591 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000592
593 all__sanity_check("initialise_data_structures");
594}
595
596
597/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000598/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000599/*----------------------------------------------------------------*/
600
601/* Doesn't assert if the relevant map_threads entry is NULL. */
602static Thread* map_threads_maybe_lookup ( ThreadId coretid )
603{
604 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000605 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000606 thr = map_threads[coretid];
607 return thr;
608}
609
610/* Asserts if the relevant map_threads entry is NULL. */
611static inline Thread* map_threads_lookup ( ThreadId coretid )
612{
613 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000614 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000615 thr = map_threads[coretid];
616 tl_assert(thr);
617 return thr;
618}
619
sewardjf98e1c02008-10-25 16:22:41 +0000620/* Do a reverse lookup. Does not assert if 'thr' is not found in
621 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000622static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
623{
sewardjf98e1c02008-10-25 16:22:41 +0000624 ThreadId tid;
625 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000626 /* Check nobody used the invalid-threadid slot */
627 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
628 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000629 tid = thr->coretid;
630 tl_assert(HG_(is_sane_ThreadId)(tid));
631 return tid;
sewardjb4112022007-11-09 22:49:28 +0000632}
633
634/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
635 is not found in map_threads. */
636static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
637{
638 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
639 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000640 tl_assert(map_threads[tid]);
641 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000642 return tid;
643}
644
645static void map_threads_delete ( ThreadId coretid )
646{
647 Thread* thr;
648 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000649 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000650 thr = map_threads[coretid];
651 tl_assert(thr);
652 map_threads[coretid] = NULL;
653}
654
655
656/*----------------------------------------------------------------*/
657/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
658/*----------------------------------------------------------------*/
659
660/* Make sure there is a lock table entry for the given (lock) guest
661 address. If not, create one of the stated 'kind' in unheld state.
662 In any case, return the address of the existing or new Lock. */
663static
664Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
665{
666 Bool found;
667 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000669 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000670 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000671 if (!found) {
672 Lock* lock = mk_LockN(lkk, ga);
673 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000675 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000676 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000677 return lock;
678 } else {
679 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000680 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000681 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000682 return oldlock;
683 }
684}
685
686static Lock* map_locks_maybe_lookup ( Addr ga )
687{
688 Bool found;
689 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000690 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000692 return lk;
693}
694
695static void map_locks_delete ( Addr ga )
696{
697 Addr ga2 = 0;
698 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000699 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000700 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 /* delFromFM produces the val which is being deleted, if it is
702 found. So assert it is non-null; that in effect asserts that we
703 are deleting a (ga, Lock) pair which actually exists. */
704 tl_assert(lk != NULL);
705 tl_assert(ga2 == ga);
706}
707
708
sewardjb4112022007-11-09 22:49:28 +0000709
710/*----------------------------------------------------------------*/
711/*--- Sanity checking the data structures ---*/
712/*----------------------------------------------------------------*/
713
714static UWord stats__sanity_checks = 0;
715
sewardjb4112022007-11-09 22:49:28 +0000716static void laog__sanity_check ( Char* who ); /* fwds */
717
718/* REQUIRED INVARIANTS:
719
720 Thread vs Segment/Lock/SecMaps
721
722 for each t in Threads {
723
724 // Thread.lockset: each element is really a valid Lock
725
726 // Thread.lockset: each Lock in set is actually held by that thread
727 for lk in Thread.lockset
728 lk == LockedBy(t)
729
730 // Thread.csegid is a valid SegmentID
731 // and the associated Segment has .thr == t
732
733 }
734
735 all thread Locksets are pairwise empty under intersection
736 (that is, no lock is claimed to be held by more than one thread)
737 -- this is guaranteed if all locks in locksets point back to their
738 owner threads
739
740 Lock vs Thread/Segment/SecMaps
741
742 for each entry (gla, la) in map_locks
743 gla == la->guest_addr
744
745 for each lk in Locks {
746
747 lk->tag is valid
748 lk->guest_addr does not have shadow state NoAccess
749 if lk == LockedBy(t), then t->lockset contains lk
750 if lk == UnlockedBy(segid) then segid is valid SegmentID
751 and can be mapped to a valid Segment(seg)
752 and seg->thr->lockset does not contain lk
753 if lk == UnlockedNew then (no lockset contains lk)
754
755 secmaps for lk has .mbHasLocks == True
756
757 }
758
759 Segment vs Thread/Lock/SecMaps
760
761 the Segment graph is a dag (no cycles)
762 all of the Segment graph must be reachable from the segids
763 mentioned in the Threads
764
765 for seg in Segments {
766
767 seg->thr is a sane Thread
768
769 }
770
771 SecMaps vs Segment/Thread/Lock
772
773 for sm in SecMaps {
774
775 sm properly aligned
776 if any shadow word is ShR or ShM then .mbHasShared == True
777
778 for each Excl(segid) state
779 map_segments_lookup maps to a sane Segment(seg)
780 for each ShM/ShR(tsetid,lsetid) state
781 each lk in lset is a valid Lock
782 each thr in tset is a valid thread, which is non-dead
783
784 }
785*/
786
787
788/* Return True iff 'thr' holds 'lk' in some mode. */
789static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
790{
791 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000792 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000793 else
794 return False;
795}
796
797/* Sanity check Threads, as far as possible */
798__attribute__((noinline))
799static void threads__sanity_check ( Char* who )
800{
801#define BAD(_str) do { how = (_str); goto bad; } while (0)
802 Char* how = "no error";
803 Thread* thr;
804 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000805 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000806 Word ls_size, i;
807 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000808 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000809 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000810 wsA = thr->locksetA;
811 wsW = thr->locksetW;
812 // locks held in W mode are a subset of all locks held
813 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
814 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
815 for (i = 0; i < ls_size; i++) {
816 lk = (Lock*)ls_words[i];
817 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000818 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000819 // Thread.lockset: each Lock in set is actually held by that
820 // thread
821 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000822 }
823 }
824 return;
825 bad:
826 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
827 tl_assert(0);
828#undef BAD
829}
830
831
832/* Sanity check Locks, as far as possible */
833__attribute__((noinline))
834static void locks__sanity_check ( Char* who )
835{
836#define BAD(_str) do { how = (_str); goto bad; } while (0)
837 Char* how = "no error";
838 Addr gla;
839 Lock* lk;
840 Int i;
841 // # entries in admin_locks == # entries in map_locks
842 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
843 ;
sewardj896f6f92008-08-19 08:38:52 +0000844 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000845 // for each entry (gla, lk) in map_locks
846 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000847 VG_(initIterFM)( map_locks );
848 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000849 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000850 if (lk->guestaddr != gla) BAD("2");
851 }
sewardj896f6f92008-08-19 08:38:52 +0000852 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000853 // scan through admin_locks ...
854 for (lk = admin_locks; lk; lk = lk->admin) {
855 // lock is sane. Quite comprehensive, also checks that
856 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000858 // map_locks binds guest address back to this lock
859 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000860 // look at all threads mentioned as holders of this lock. Ensure
861 // this lock is mentioned in their locksets.
862 if (lk->heldBy) {
863 Thread* thr;
864 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000865 VG_(initIterBag)( lk->heldBy );
866 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000867 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000868 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000869 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000870 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000871 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
872 BAD("6");
873 // also check the w-only lockset
874 if (lk->heldW
875 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
876 BAD("7");
877 if ((!lk->heldW)
878 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
879 BAD("8");
880 }
sewardj896f6f92008-08-19 08:38:52 +0000881 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000882 } else {
883 /* lock not held by anybody */
884 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
885 // since lk is unheld, then (no lockset contains lk)
886 // hmm, this is really too expensive to check. Hmm.
887 }
sewardjb4112022007-11-09 22:49:28 +0000888 }
889
890 return;
891 bad:
892 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
893 tl_assert(0);
894#undef BAD
895}
896
897
sewardjb4112022007-11-09 22:49:28 +0000898static void all_except_Locks__sanity_check ( Char* who ) {
899 stats__sanity_checks++;
900 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
901 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000902 laog__sanity_check(who);
903}
904static void all__sanity_check ( Char* who ) {
905 all_except_Locks__sanity_check(who);
906 locks__sanity_check(who);
907}
908
909
910/*----------------------------------------------------------------*/
911/*--- the core memory state machine (msm__* functions) ---*/
912/*----------------------------------------------------------------*/
913
sewardjd52392d2008-11-08 20:36:26 +0000914//static WordSetID add_BHL ( WordSetID lockset ) {
915// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
916//}
917//static WordSetID del_BHL ( WordSetID lockset ) {
918// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
919//}
sewardjb4112022007-11-09 22:49:28 +0000920
921
sewardjd52392d2008-11-08 20:36:26 +0000922///* Last-lock-lossage records. This mechanism exists to help explain
923// to programmers why we are complaining about a race. The idea is to
924// monitor all lockset transitions. When a previously nonempty
925// lockset becomes empty, the lock(s) that just disappeared (the
926// "lossage") are the locks that have consistently protected the
927// location (ga_of_access) in question for the longest time. Most of
928// the time the lossage-set is a single lock. Because the
929// lossage-lock is the one that has survived longest, there is there
930// is a good chance that it is indeed the lock that the programmer
931// intended to use to protect the location.
932//
933// Note that we cannot in general just look at the lossage set when we
934// see a transition to ShM(...,empty-set), because a transition to an
935// empty lockset can happen arbitrarily far before the point where we
936// want to report an error. This is in the case where there are many
937// transitions ShR -> ShR, all with an empty lockset, and only later
938// is there a transition to ShM. So what we want to do is note the
939// lossage lock at the point where a ShR -> ShR transition empties out
940// the lockset, so we can present it later if there should be a
941// transition to ShM.
942//
943// So this function finds such transitions. For each, it associates
944// in ga_to_lastlock, the guest address and the lossage lock. In fact
945// we do not record the Lock* directly as that may disappear later,
946// but instead the ExeContext inside the Lock which says where it was
947// initialised or first locked. ExeContexts are permanent so keeping
948// them indefinitely is safe.
949//
950// A boring detail: the hardware bus lock is not interesting in this
951// respect, so we first remove that from the pre/post locksets.
952//*/
953//
954//static UWord stats__ga_LL_adds = 0;
955//
956//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
957//
958//static
959//void record_last_lock_lossage ( Addr ga_of_access,
960// WordSetID lset_old, WordSetID lset_new )
961//{
962// Lock* lk;
963// Int card_old, card_new;
964//
965// tl_assert(lset_old != lset_new);
966//
967// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
968// (Int)lset_old,
969// HG_(cardinalityWS)(univ_lsets,lset_old),
970// (Int)lset_new,
971// HG_(cardinalityWS)(univ_lsets,lset_new),
972// ga_of_access );
973//
974// /* This is slow, but at least it's simple. The bus hardware lock
975// just confuses the logic, so remove it from the locksets we're
976// considering before doing anything else. */
977// lset_new = del_BHL( lset_new );
978//
979// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
980// /* The post-transition lock set is not empty. So we are not
981// interested. We're only interested in spotting transitions
982// that make locksets become empty. */
983// return;
984// }
985//
986// /* lset_new is now empty */
987// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
988// tl_assert(card_new == 0);
989//
990// lset_old = del_BHL( lset_old );
991// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
992//
993// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
994// (Int)lset_old, card_old, (Int)lset_new, card_new );
995//
996// if (card_old == 0) {
997// /* The old lockset was also empty. Not interesting. */
998// return;
999// }
1000//
1001// tl_assert(card_old > 0);
1002// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1003//
1004// /* Now we know we've got a transition from a nonempty lockset to an
1005// empty one. So lset_old must be the set of locks lost. Record
1006// some details. If there is more than one element in the lossage
1007// set, just choose one arbitrarily -- not the best, but at least
1008// it's simple. */
1009//
1010// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1011// if (0) VG_(printf)("lossage %ld %p\n",
1012// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1013// if (lk->appeared_at) {
1014// if (ga_to_lastlock == NULL)
1015// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1016// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1017// stats__ga_LL_adds++;
1018// }
1019//}
1020//
1021///* This queries the table (ga_to_lastlock) made by
1022// record_last_lock_lossage, when constructing error messages. It
1023// attempts to find the ExeContext of the allocation or initialisation
1024// point for the lossage lock associated with 'ga'. */
1025//
1026//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1027//{
1028// ExeContext* ec_hint = NULL;
1029// if (ga_to_lastlock != NULL
1030// && VG_(lookupFM)(ga_to_lastlock,
1031// NULL, (Word*)&ec_hint, ga)) {
1032// tl_assert(ec_hint != NULL);
1033// return ec_hint;
1034// } else {
1035// return NULL;
1036// }
1037//}
sewardjb4112022007-11-09 22:49:28 +00001038
1039
sewardjb4112022007-11-09 22:49:28 +00001040/*----------------------------------------------------------------*/
1041/*--- Shadow value and address range handlers ---*/
1042/*----------------------------------------------------------------*/
1043
1044static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001046static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001047__attribute__((noinline))
1048static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001049
sewardjb4112022007-11-09 22:49:28 +00001050
1051/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001052/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1053 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1054static void shadow_mem_scopy_range ( Thread* thr,
1055 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001059 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
sewardj23f12002009-07-24 08:45:08 +00001062static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1063{
sewardjf98e1c02008-10-25 16:22:41 +00001064 Thr* hbthr = thr->hbthr;
1065 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001066 LIBHB_CREAD_N(hbthr, a, len);
1067}
1068
1069static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1070 Thr* hbthr = thr->hbthr;
1071 tl_assert(hbthr);
1072 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001073}
1074
1075static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1076{
sewardj23f12002009-07-24 08:45:08 +00001077 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
sewardjb4112022007-11-09 22:49:28 +00001080static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1081{
sewardjb4112022007-11-09 22:49:28 +00001082 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001083 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001084 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
1087
1088/*----------------------------------------------------------------*/
1089/*--- Event handlers (evh__* functions) ---*/
1090/*--- plus helpers (evhH__* functions) ---*/
1091/*----------------------------------------------------------------*/
1092
1093/*--------- Event handler helpers (evhH__* functions) ---------*/
1094
1095/* Create a new segment for 'thr', making it depend (.prev) on its
1096 existing segment, bind together the SegmentID and Segment, and
1097 return both of them. Also update 'thr' so it references the new
1098 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001099//zz static
1100//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1101//zz /*OUT*/Segment** new_segP,
1102//zz Thread* thr )
1103//zz {
1104//zz Segment* cur_seg;
1105//zz tl_assert(new_segP);
1106//zz tl_assert(new_segidP);
1107//zz tl_assert(HG_(is_sane_Thread)(thr));
1108//zz cur_seg = map_segments_lookup( thr->csegid );
1109//zz tl_assert(cur_seg);
1110//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1111//zz at their owner thread. */
1112//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1113//zz *new_segidP = alloc_SegmentID();
1114//zz map_segments_add( *new_segidP, *new_segP );
1115//zz thr->csegid = *new_segidP;
1116//zz }
sewardjb4112022007-11-09 22:49:28 +00001117
1118
1119/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1120 updates, and also do all possible error checks. */
1121static
1122void evhH__post_thread_w_acquires_lock ( Thread* thr,
1123 LockKind lkk, Addr lock_ga )
1124{
1125 Lock* lk;
1126
1127 /* Basically what we need to do is call lockN_acquire_writer.
1128 However, that will barf if any 'invalid' lock states would
1129 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001130 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001131 routine.
1132
1133 Because this routine is only called after successful lock
1134 acquisition, we should not be asked to move the lock into any
1135 invalid states. Requests to do so are bugs in libpthread, since
1136 that should have rejected any such requests. */
1137
sewardjf98e1c02008-10-25 16:22:41 +00001138 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001139 /* Try to find the lock. If we can't, then create a new one with
1140 kind 'lkk'. */
1141 lk = map_locks_lookup_or_create(
1142 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001143 tl_assert( HG_(is_sane_LockN)(lk) );
1144
1145 /* check libhb level entities exist */
1146 tl_assert(thr->hbthr);
1147 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001148
1149 if (lk->heldBy == NULL) {
1150 /* the lock isn't held. Simple. */
1151 tl_assert(!lk->heldW);
1152 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001153 /* acquire a dependency from the lock's VCs */
1154 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001155 goto noerror;
1156 }
1157
1158 /* So the lock is already held. If held as a r-lock then
1159 libpthread must be buggy. */
1160 tl_assert(lk->heldBy);
1161 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001162 HG_(record_error_Misc)(
1163 thr, "Bug in libpthread: write lock "
1164 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001165 goto error;
1166 }
1167
1168 /* So the lock is held in w-mode. If it's held by some other
1169 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001170 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001171
sewardj896f6f92008-08-19 08:38:52 +00001172 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001173 HG_(record_error_Misc)(
1174 thr, "Bug in libpthread: write lock "
1175 "granted on mutex/rwlock which is currently "
1176 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001177 goto error;
1178 }
1179
1180 /* So the lock is already held in w-mode by 'thr'. That means this
1181 is an attempt to lock it recursively, which is only allowable
1182 for LK_mbRec kinded locks. Since this routine is called only
1183 once the lock has been acquired, this must also be a libpthread
1184 bug. */
1185 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001186 HG_(record_error_Misc)(
1187 thr, "Bug in libpthread: recursive write lock "
1188 "granted on mutex/wrlock which does not "
1189 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001190 goto error;
1191 }
1192
1193 /* So we are recursively re-locking a lock we already w-hold. */
1194 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001195 /* acquire a dependency from the lock's VC. Probably pointless,
1196 but also harmless. */
1197 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001198 goto noerror;
1199
1200 noerror:
1201 /* check lock order acquisition graph, and update. This has to
1202 happen before the lock is added to the thread's locksetA/W. */
1203 laog__pre_thread_acquires_lock( thr, lk );
1204 /* update the thread's held-locks set */
1205 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1206 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1207 /* fall through */
1208
1209 error:
sewardjf98e1c02008-10-25 16:22:41 +00001210 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001211}
1212
1213
1214/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1215 updates, and also do all possible error checks. */
1216static
1217void evhH__post_thread_r_acquires_lock ( Thread* thr,
1218 LockKind lkk, Addr lock_ga )
1219{
1220 Lock* lk;
1221
1222 /* Basically what we need to do is call lockN_acquire_reader.
1223 However, that will barf if any 'invalid' lock states would
1224 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001225 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001226 routine.
1227
1228 Because this routine is only called after successful lock
1229 acquisition, we should not be asked to move the lock into any
1230 invalid states. Requests to do so are bugs in libpthread, since
1231 that should have rejected any such requests. */
1232
sewardjf98e1c02008-10-25 16:22:41 +00001233 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001234 /* Try to find the lock. If we can't, then create a new one with
1235 kind 'lkk'. Only a reader-writer lock can be read-locked,
1236 hence the first assertion. */
1237 tl_assert(lkk == LK_rdwr);
1238 lk = map_locks_lookup_or_create(
1239 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001240 tl_assert( HG_(is_sane_LockN)(lk) );
1241
1242 /* check libhb level entities exist */
1243 tl_assert(thr->hbthr);
1244 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001245
1246 if (lk->heldBy == NULL) {
1247 /* the lock isn't held. Simple. */
1248 tl_assert(!lk->heldW);
1249 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001250 /* acquire a dependency from the lock's VC */
1251 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001252 goto noerror;
1253 }
1254
1255 /* So the lock is already held. If held as a w-lock then
1256 libpthread must be buggy. */
1257 tl_assert(lk->heldBy);
1258 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001259 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1260 "granted on rwlock which is "
1261 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001262 goto error;
1263 }
1264
1265 /* Easy enough. In short anybody can get a read-lock on a rwlock
1266 provided it is either unlocked or already in rd-held. */
1267 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001268 /* acquire a dependency from the lock's VC. Probably pointless,
1269 but also harmless. */
1270 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001271 goto noerror;
1272
1273 noerror:
1274 /* check lock order acquisition graph, and update. This has to
1275 happen before the lock is added to the thread's locksetA/W. */
1276 laog__pre_thread_acquires_lock( thr, lk );
1277 /* update the thread's held-locks set */
1278 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1279 /* but don't update thr->locksetW, since lk is only rd-held */
1280 /* fall through */
1281
1282 error:
sewardjf98e1c02008-10-25 16:22:41 +00001283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001284}
1285
1286
1287/* The lock at 'lock_ga' is just about to be unlocked. Make all
1288 necessary updates, and also do all possible error checks. */
1289static
1290void evhH__pre_thread_releases_lock ( Thread* thr,
1291 Addr lock_ga, Bool isRDWR )
1292{
1293 Lock* lock;
1294 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001295 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001296
1297 /* This routine is called prior to a lock release, before
1298 libpthread has had a chance to validate the call. Hence we need
1299 to detect and reject any attempts to move the lock into an
1300 invalid state. Such attempts are bugs in the client.
1301
1302 isRDWR is True if we know from the wrapper context that lock_ga
1303 should refer to a reader-writer lock, and is False if [ditto]
1304 lock_ga should refer to a standard mutex. */
1305
sewardjf98e1c02008-10-25 16:22:41 +00001306 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001307 lock = map_locks_maybe_lookup( lock_ga );
1308
1309 if (!lock) {
1310 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1311 the client is trying to unlock it. So complain, then ignore
1312 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001313 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001314 return;
1315 }
1316
1317 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001318 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001319
1320 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001321 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1322 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001323 }
1324 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001325 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1326 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001327 }
1328
1329 if (!lock->heldBy) {
1330 /* The lock is not held. This indicates a serious bug in the
1331 client. */
1332 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001333 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001334 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1335 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1336 goto error;
1337 }
1338
sewardjf98e1c02008-10-25 16:22:41 +00001339 /* test just above dominates */
1340 tl_assert(lock->heldBy);
1341 was_heldW = lock->heldW;
1342
sewardjb4112022007-11-09 22:49:28 +00001343 /* The lock is held. Is this thread one of the holders? If not,
1344 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001345 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001346 tl_assert(n >= 0);
1347 if (n == 0) {
1348 /* We are not a current holder of the lock. This is a bug in
1349 the guest, and (per POSIX pthread rules) the unlock
1350 attempt will fail. So just complain and do nothing
1351 else. */
sewardj896f6f92008-08-19 08:38:52 +00001352 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001353 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001354 tl_assert(realOwner != thr);
1355 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1356 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001357 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001358 goto error;
1359 }
1360
1361 /* Ok, we hold the lock 'n' times. */
1362 tl_assert(n >= 1);
1363
1364 lockN_release( lock, thr );
1365
1366 n--;
1367 tl_assert(n >= 0);
1368
1369 if (n > 0) {
1370 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001371 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001372 /* We still hold the lock. So either it's a recursive lock
1373 or a rwlock which is currently r-held. */
1374 tl_assert(lock->kind == LK_mbRec
1375 || (lock->kind == LK_rdwr && !lock->heldW));
1376 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1377 if (lock->heldW)
1378 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1379 else
1380 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1381 } else {
sewardj983f3022009-05-21 14:49:55 +00001382 /* n is zero. This means we don't hold the lock any more. But
1383 if it's a rwlock held in r-mode, someone else could still
1384 hold it. Just do whatever sanity checks we can. */
1385 if (lock->kind == LK_rdwr && lock->heldBy) {
1386 /* It's a rwlock. We no longer hold it but we used to;
1387 nevertheless it still appears to be held by someone else.
1388 The implication is that, prior to this release, it must
1389 have been shared by us and and whoever else is holding it;
1390 which in turn implies it must be r-held, since a lock
1391 can't be w-held by more than one thread. */
1392 /* The lock is now R-held by somebody else: */
1393 tl_assert(lock->heldW == False);
1394 } else {
1395 /* Normal case. It's either not a rwlock, or it's a rwlock
1396 that we used to hold in w-mode (which is pretty much the
1397 same thing as a non-rwlock.) Since this transaction is
1398 atomic (V does not allow multiple threads to run
1399 simultaneously), it must mean the lock is now not held by
1400 anybody. Hence assert for it. */
1401 /* The lock is now not held by anybody: */
1402 tl_assert(!lock->heldBy);
1403 tl_assert(lock->heldW == False);
1404 }
sewardjf98e1c02008-10-25 16:22:41 +00001405 //if (lock->heldBy) {
1406 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1407 //}
sewardjb4112022007-11-09 22:49:28 +00001408 /* update this thread's lockset accordingly. */
1409 thr->locksetA
1410 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1411 thr->locksetW
1412 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001413 /* push our VC into the lock */
1414 tl_assert(thr->hbthr);
1415 tl_assert(lock->hbso);
1416 /* If the lock was previously W-held, then we want to do a
1417 strong send, and if previously R-held, then a weak send. */
1418 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001419 }
1420 /* fall through */
1421
1422 error:
sewardjf98e1c02008-10-25 16:22:41 +00001423 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001424}
1425
1426
sewardj9f569b72008-11-13 13:33:09 +00001427/* ---------------------------------------------------------- */
1428/* -------- Event handlers proper (evh__* functions) -------- */
1429/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001430
1431/* What is the Thread* for the currently running thread? This is
1432 absolutely performance critical. We receive notifications from the
1433 core for client code starts/stops, and cache the looked-up result
1434 in 'current_Thread'. Hence, for the vast majority of requests,
1435 finding the current thread reduces to a read of a global variable,
1436 provided get_current_Thread_in_C_C is inlined.
1437
1438 Outside of client code, current_Thread is NULL, and presumably
1439 any uses of it will cause a segfault. Hence:
1440
1441 - for uses definitely within client code, use
1442 get_current_Thread_in_C_C.
1443
1444 - for all other uses, use get_current_Thread.
1445*/
1446
sewardj23f12002009-07-24 08:45:08 +00001447static Thread *current_Thread = NULL,
1448 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001449
1450static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1451 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1452 tl_assert(current_Thread == NULL);
1453 current_Thread = map_threads_lookup( tid );
1454 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001455 if (current_Thread != current_Thread_prev) {
1456 libhb_Thr_resumes( current_Thread->hbthr );
1457 current_Thread_prev = current_Thread;
1458 }
sewardjb4112022007-11-09 22:49:28 +00001459}
1460static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1461 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1462 tl_assert(current_Thread != NULL);
1463 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001464 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001465}
1466static inline Thread* get_current_Thread_in_C_C ( void ) {
1467 return current_Thread;
1468}
1469static inline Thread* get_current_Thread ( void ) {
1470 ThreadId coretid;
1471 Thread* thr;
1472 thr = get_current_Thread_in_C_C();
1473 if (LIKELY(thr))
1474 return thr;
1475 /* evidently not in client code. Do it the slow way. */
1476 coretid = VG_(get_running_tid)();
1477 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001478 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001479 of initial memory layout) and VG_(get_running_tid)() returns
1480 VG_INVALID_THREADID at that point. */
1481 if (coretid == VG_INVALID_THREADID)
1482 coretid = 1; /* KLUDGE */
1483 thr = map_threads_lookup( coretid );
1484 return thr;
1485}
1486
1487static
1488void evh__new_mem ( Addr a, SizeT len ) {
1489 if (SHOW_EVENTS >= 2)
1490 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1491 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001492 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001493 all__sanity_check("evh__new_mem-post");
1494}
1495
1496static
sewardj7cf4e6b2008-05-01 20:24:26 +00001497void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1498 if (SHOW_EVENTS >= 2)
1499 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1500 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001501 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001502 all__sanity_check("evh__new_mem_w_tid-post");
1503}
1504
1505static
sewardjb4112022007-11-09 22:49:28 +00001506void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001507 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001508 if (SHOW_EVENTS >= 1)
1509 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1510 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1511 if (rr || ww || xx)
1512 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001513 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001514 all__sanity_check("evh__new_mem_w_perms-post");
1515}
1516
1517static
1518void evh__set_perms ( Addr a, SizeT len,
1519 Bool rr, Bool ww, Bool xx ) {
1520 if (SHOW_EVENTS >= 1)
1521 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1522 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1523 /* Hmm. What should we do here, that actually makes any sense?
1524 Let's say: if neither readable nor writable, then declare it
1525 NoAccess, else leave it alone. */
1526 if (!(rr || ww))
1527 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001528 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001529 all__sanity_check("evh__set_perms-post");
1530}
1531
1532static
1533void evh__die_mem ( Addr a, SizeT len ) {
1534 if (SHOW_EVENTS >= 2)
1535 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1536 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001537 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001538 all__sanity_check("evh__die_mem-post");
1539}
1540
1541static
sewardj23f12002009-07-24 08:45:08 +00001542void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1543 if (SHOW_EVENTS >= 2)
1544 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1545 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1546 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1547 all__sanity_check("evh__copy_mem-post");
1548}
1549
1550static
sewardjb4112022007-11-09 22:49:28 +00001551void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1552{
1553 if (SHOW_EVENTS >= 1)
1554 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1555 (Int)parent, (Int)child );
1556
1557 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001558 Thread* thr_p;
1559 Thread* thr_c;
1560 Thr* hbthr_p;
1561 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001562
sewardjf98e1c02008-10-25 16:22:41 +00001563 tl_assert(HG_(is_sane_ThreadId)(parent));
1564 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001565 tl_assert(parent != child);
1566
1567 thr_p = map_threads_maybe_lookup( parent );
1568 thr_c = map_threads_maybe_lookup( child );
1569
1570 tl_assert(thr_p != NULL);
1571 tl_assert(thr_c == NULL);
1572
sewardjf98e1c02008-10-25 16:22:41 +00001573 hbthr_p = thr_p->hbthr;
1574 tl_assert(hbthr_p != NULL);
1575 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001576
sewardjf98e1c02008-10-25 16:22:41 +00001577 hbthr_c = libhb_create ( hbthr_p );
1578
1579 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001580 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001581 thr_c = mk_Thread( hbthr_c );
1582 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1583 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001584
1585 /* and bind it in the thread-map table */
1586 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001587 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1588 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001589
1590 /* Record where the parent is so we can later refer to this in
1591 error messages.
1592
1593 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1594 The stack snapshot is taken immediately after the parent has
1595 returned from its sys_clone call. Unfortunately there is no
1596 unwind info for the insn following "syscall" - reading the
1597 glibc sources confirms this. So we ask for a snapshot to be
1598 taken as if RIP was 3 bytes earlier, in a place where there
1599 is unwind info. Sigh.
1600 */
1601 { Word first_ip_delta = 0;
1602# if defined(VGP_amd64_linux)
1603 first_ip_delta = -3;
1604# endif
1605 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1606 }
sewardjb4112022007-11-09 22:49:28 +00001607 }
1608
sewardjf98e1c02008-10-25 16:22:41 +00001609 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001610 all__sanity_check("evh__pre_thread_create-post");
1611}
1612
1613static
1614void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1615{
1616 Int nHeld;
1617 Thread* thr_q;
1618 if (SHOW_EVENTS >= 1)
1619 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1620 (Int)quit_tid );
1621
1622 /* quit_tid has disappeared without joining to any other thread.
1623 Therefore there is no synchronisation event associated with its
1624 exit and so we have to pretty much treat it as if it was still
1625 alive but mysteriously making no progress. That is because, if
1626 we don't know when it really exited, then we can never say there
1627 is a point in time when we're sure the thread really has
1628 finished, and so we need to consider the possibility that it
1629 lingers indefinitely and continues to interact with other
1630 threads. */
1631 /* However, it might have rendezvous'd with a thread that called
1632 pthread_join with this one as arg, prior to this point (that's
1633 how NPTL works). In which case there has already been a prior
1634 sync event. So in any case, just let the thread exit. On NPTL,
1635 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001636 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001637 thr_q = map_threads_maybe_lookup( quit_tid );
1638 tl_assert(thr_q != NULL);
1639
1640 /* Complain if this thread holds any locks. */
1641 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1642 tl_assert(nHeld >= 0);
1643 if (nHeld > 0) {
1644 HChar buf[80];
1645 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1646 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001647 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001648 }
1649
sewardj23f12002009-07-24 08:45:08 +00001650 /* Not much to do here:
1651 - tell libhb the thread is gone
1652 - clear the map_threads entry, in order that the Valgrind core
1653 can re-use it. */
1654 tl_assert(thr_q->hbthr);
1655 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001656 tl_assert(thr_q->coretid == quit_tid);
1657 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001658 map_threads_delete( quit_tid );
1659
sewardjf98e1c02008-10-25 16:22:41 +00001660 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001661 all__sanity_check("evh__pre_thread_ll_exit-post");
1662}
1663
sewardjf98e1c02008-10-25 16:22:41 +00001664
sewardjb4112022007-11-09 22:49:28 +00001665static
1666void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1667{
sewardjb4112022007-11-09 22:49:28 +00001668 Thread* thr_s;
1669 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001670 Thr* hbthr_s;
1671 Thr* hbthr_q;
1672 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001673
1674 if (SHOW_EVENTS >= 1)
1675 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1676 (Int)stay_tid, quit_thr );
1677
sewardjf98e1c02008-10-25 16:22:41 +00001678 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001679
1680 thr_s = map_threads_maybe_lookup( stay_tid );
1681 thr_q = quit_thr;
1682 tl_assert(thr_s != NULL);
1683 tl_assert(thr_q != NULL);
1684 tl_assert(thr_s != thr_q);
1685
sewardjf98e1c02008-10-25 16:22:41 +00001686 hbthr_s = thr_s->hbthr;
1687 hbthr_q = thr_q->hbthr;
1688 tl_assert(hbthr_s != hbthr_q);
1689 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1690 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001691
sewardjf98e1c02008-10-25 16:22:41 +00001692 /* Allocate a temporary synchronisation object and use it to send
1693 an imaginary message from the quitter to the stayer, the purpose
1694 being to generate a dependence from the quitter to the
1695 stayer. */
1696 so = libhb_so_alloc();
1697 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001698 /* Send last arg of _so_send as False, since the sending thread
1699 doesn't actually exist any more, so we don't want _so_send to
1700 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001701 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1702 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1703 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001704
sewardjf98e1c02008-10-25 16:22:41 +00001705 /* evh__pre_thread_ll_exit issues an error message if the exiting
1706 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001707
1708 /* This holds because, at least when using NPTL as the thread
1709 library, we should be notified the low level thread exit before
1710 we hear of any join event on it. The low level exit
1711 notification feeds through into evh__pre_thread_ll_exit,
1712 which should clear the map_threads entry for it. Hence we
1713 expect there to be no map_threads entry at this point. */
1714 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1715 == VG_INVALID_THREADID);
1716
sewardjf98e1c02008-10-25 16:22:41 +00001717 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001718 all__sanity_check("evh__post_thread_join-post");
1719}
1720
1721static
1722void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1723 Addr a, SizeT size) {
1724 if (SHOW_EVENTS >= 2
1725 || (SHOW_EVENTS >= 1 && size != 1))
1726 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1727 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001728 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001729 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001730 all__sanity_check("evh__pre_mem_read-post");
1731}
1732
1733static
1734void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1735 Char* s, Addr a ) {
1736 Int len;
1737 if (SHOW_EVENTS >= 1)
1738 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1739 (Int)tid, s, (void*)a );
1740 // FIXME: think of a less ugly hack
1741 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001742 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001743 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001744 all__sanity_check("evh__pre_mem_read_asciiz-post");
1745}
1746
1747static
1748void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1749 Addr a, SizeT size ) {
1750 if (SHOW_EVENTS >= 1)
1751 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1752 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001753 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001754 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001755 all__sanity_check("evh__pre_mem_write-post");
1756}
1757
1758static
1759void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1760 if (SHOW_EVENTS >= 1)
1761 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1762 (void*)a, len, (Int)is_inited );
1763 // FIXME: this is kinda stupid
1764 if (is_inited) {
1765 shadow_mem_make_New(get_current_Thread(), a, len);
1766 } else {
1767 shadow_mem_make_New(get_current_Thread(), a, len);
1768 }
sewardjf98e1c02008-10-25 16:22:41 +00001769 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001770 all__sanity_check("evh__pre_mem_read-post");
1771}
1772
1773static
1774void evh__die_mem_heap ( Addr a, SizeT len ) {
1775 if (SHOW_EVENTS >= 1)
1776 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1777 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001778 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001779 all__sanity_check("evh__pre_mem_read-post");
1780}
1781
sewardj23f12002009-07-24 08:45:08 +00001782/* --- Event handlers called from generated code --- */
1783
sewardjb4112022007-11-09 22:49:28 +00001784static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001785void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001786 Thread* thr = get_current_Thread_in_C_C();
1787 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001788 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001789}
sewardjf98e1c02008-10-25 16:22:41 +00001790
sewardjb4112022007-11-09 22:49:28 +00001791static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001792void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001793 Thread* thr = get_current_Thread_in_C_C();
1794 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001795 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001796}
sewardjf98e1c02008-10-25 16:22:41 +00001797
sewardjb4112022007-11-09 22:49:28 +00001798static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001799void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001800 Thread* thr = get_current_Thread_in_C_C();
1801 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001802 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001803}
sewardjf98e1c02008-10-25 16:22:41 +00001804
sewardjb4112022007-11-09 22:49:28 +00001805static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001806void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001807 Thread* thr = get_current_Thread_in_C_C();
1808 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001809 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001810}
sewardjf98e1c02008-10-25 16:22:41 +00001811
sewardjb4112022007-11-09 22:49:28 +00001812static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001813void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001816 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001817}
1818
1819static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001820void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001823 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001824}
sewardjf98e1c02008-10-25 16:22:41 +00001825
sewardjb4112022007-11-09 22:49:28 +00001826static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001827void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001830 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001831}
sewardjf98e1c02008-10-25 16:22:41 +00001832
sewardjb4112022007-11-09 22:49:28 +00001833static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001834void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001837 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001838}
sewardjf98e1c02008-10-25 16:22:41 +00001839
sewardjb4112022007-11-09 22:49:28 +00001840static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001841void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001844 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001845}
sewardjf98e1c02008-10-25 16:22:41 +00001846
sewardjb4112022007-11-09 22:49:28 +00001847static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001848void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001851 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001852}
1853
sewardjb4112022007-11-09 22:49:28 +00001854
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001857/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001858
1859/* EXPOSITION only: by intercepting lock init events we can show the
1860 user where the lock was initialised, rather than only being able to
1861 show where it was first locked. Intercepting lock initialisations
1862 is not necessary for the basic operation of the race checker. */
1863static
1864void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1865 void* mutex, Word mbRec )
1866{
1867 if (SHOW_EVENTS >= 1)
1868 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1869 (Int)tid, mbRec, (void*)mutex );
1870 tl_assert(mbRec == 0 || mbRec == 1);
1871 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1872 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001873 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001874 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1875}
1876
1877static
1878void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1879{
1880 Thread* thr;
1881 Lock* lk;
1882 if (SHOW_EVENTS >= 1)
1883 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1884 (Int)tid, (void*)mutex );
1885
1886 thr = map_threads_maybe_lookup( tid );
1887 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001888 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001889
1890 lk = map_locks_maybe_lookup( (Addr)mutex );
1891
1892 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001893 HG_(record_error_Misc)(
1894 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001895 }
1896
1897 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001898 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001899 tl_assert( lk->guestaddr == (Addr)mutex );
1900 if (lk->heldBy) {
1901 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001902 HG_(record_error_Misc)(
1903 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001904 /* remove lock from locksets of all owning threads */
1905 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001906 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001907 lk->heldBy = NULL;
1908 lk->heldW = False;
1909 lk->acquired_at = NULL;
1910 }
1911 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001912 tl_assert( HG_(is_sane_LockN)(lk) );
1913
sewardj1cbc12f2008-11-10 16:16:46 +00001914 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001915 map_locks_delete( lk->guestaddr );
1916 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001917 }
1918
sewardjf98e1c02008-10-25 16:22:41 +00001919 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001920 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1921}
1922
1923static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1924 void* mutex, Word isTryLock )
1925{
1926 /* Just check the mutex is sane; nothing else to do. */
1927 // 'mutex' may be invalid - not checked by wrapper
1928 Thread* thr;
1929 Lock* lk;
1930 if (SHOW_EVENTS >= 1)
1931 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1932 (Int)tid, (void*)mutex );
1933
1934 tl_assert(isTryLock == 0 || isTryLock == 1);
1935 thr = map_threads_maybe_lookup( tid );
1936 tl_assert(thr); /* cannot fail - Thread* must already exist */
1937
1938 lk = map_locks_maybe_lookup( (Addr)mutex );
1939
1940 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001941 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1942 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001943 }
1944
1945 if ( lk
1946 && isTryLock == 0
1947 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1948 && lk->heldBy
1949 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001950 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001951 /* uh, it's a non-recursive lock and we already w-hold it, and
1952 this is a real lock operation (not a speculative "tryLock"
1953 kind of thing). Duh. Deadlock coming up; but at least
1954 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001955 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1956 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001957 }
1958}
1959
1960static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1961{
1962 // only called if the real library call succeeded - so mutex is sane
1963 Thread* thr;
1964 if (SHOW_EVENTS >= 1)
1965 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1966 (Int)tid, (void*)mutex );
1967
1968 thr = map_threads_maybe_lookup( tid );
1969 tl_assert(thr); /* cannot fail - Thread* must already exist */
1970
1971 evhH__post_thread_w_acquires_lock(
1972 thr,
1973 LK_mbRec, /* if not known, create new lock with this LockKind */
1974 (Addr)mutex
1975 );
1976}
1977
1978static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1979{
1980 // 'mutex' may be invalid - not checked by wrapper
1981 Thread* thr;
1982 if (SHOW_EVENTS >= 1)
1983 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1984 (Int)tid, (void*)mutex );
1985
1986 thr = map_threads_maybe_lookup( tid );
1987 tl_assert(thr); /* cannot fail - Thread* must already exist */
1988
1989 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1990}
1991
1992static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1993{
1994 // only called if the real library call succeeded - so mutex is sane
1995 Thread* thr;
1996 if (SHOW_EVENTS >= 1)
1997 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1998 (Int)tid, (void*)mutex );
1999 thr = map_threads_maybe_lookup( tid );
2000 tl_assert(thr); /* cannot fail - Thread* must already exist */
2001
2002 // anything we should do here?
2003}
2004
2005
sewardj9f569b72008-11-13 13:33:09 +00002006/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002007/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002008/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002009
sewardj02114542009-07-28 20:52:36 +00002010/* A mapping from CV to (the SO associated with it, plus some
2011 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002012 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2013 wait on it completes, we do a 'recv' from the SO. This is believed
2014 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002015 signallings/broadcasts.
2016*/
2017
sewardj02114542009-07-28 20:52:36 +00002018/* .so is the SO for this CV.
2019 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002020
sewardj02114542009-07-28 20:52:36 +00002021 POSIX says effectively that the first pthread_cond_{timed}wait call
2022 causes a dynamic binding between the CV and the mutex, and that
2023 lasts until such time as the waiter count falls to zero. Hence
2024 need to keep track of the number of waiters in order to do
2025 consistency tracking. */
2026typedef
2027 struct {
2028 SO* so; /* libhb-allocated SO */
2029 void* mx_ga; /* addr of associated mutex, if any */
2030 UWord nWaiters; /* # threads waiting on the CV */
2031 }
2032 CVInfo;
2033
2034
2035/* pthread_cond_t* -> CVInfo* */
2036static WordFM* map_cond_to_CVInfo = NULL;
2037
2038static void map_cond_to_CVInfo_INIT ( void ) {
2039 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2040 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2041 "hg.mctCI.1", HG_(free), NULL );
2042 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002043 }
2044}
2045
sewardj02114542009-07-28 20:52:36 +00002046static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002047 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002048 map_cond_to_CVInfo_INIT();
2049 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002050 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002051 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002052 } else {
sewardj02114542009-07-28 20:52:36 +00002053 SO* so = libhb_so_alloc();
2054 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2055 cvi->so = so;
2056 cvi->mx_ga = 0;
2057 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2058 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002059 }
2060}
2061
sewardj02114542009-07-28 20:52:36 +00002062static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002063 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002064 map_cond_to_CVInfo_INIT();
2065 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2066 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002067 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002068 tl_assert(cvi);
2069 tl_assert(cvi->so);
2070 libhb_so_dealloc(cvi->so);
2071 cvi->mx_ga = 0;
2072 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002073 }
2074}
2075
2076static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2077{
sewardjf98e1c02008-10-25 16:22:41 +00002078 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2079 cond to a SO if it is not already so bound, and 'send' on the
2080 SO. This is later used by other thread(s) which successfully
2081 exit from a pthread_cond_wait on the same cv; then they 'recv'
2082 from the SO, thereby acquiring a dependency on this signalling
2083 event. */
sewardjb4112022007-11-09 22:49:28 +00002084 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002085 CVInfo* cvi;
2086 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002087
2088 if (SHOW_EVENTS >= 1)
2089 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2090 (Int)tid, (void*)cond );
2091
sewardjb4112022007-11-09 22:49:28 +00002092 thr = map_threads_maybe_lookup( tid );
2093 tl_assert(thr); /* cannot fail - Thread* must already exist */
2094
sewardj02114542009-07-28 20:52:36 +00002095 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2096 tl_assert(cvi);
2097 tl_assert(cvi->so);
2098
sewardjb4112022007-11-09 22:49:28 +00002099 // error-if: mutex is bogus
2100 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002101 // Hmm. POSIX doesn't actually say that it's an error to call
2102 // pthread_cond_signal with the associated mutex being unlocked.
2103 // Although it does say that it should be "if consistent scheduling
2104 // is desired."
2105 //
2106 // For the moment, disable these checks.
2107 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2108 //if (lk == NULL || cvi->mx_ga == 0) {
2109 // HG_(record_error_Misc)( thr,
2110 // "pthread_cond_{signal,broadcast}: "
2111 // "no or invalid mutex associated with cond");
2112 //}
2113 ///* note: lk could be NULL. Be careful. */
2114 //if (lk) {
2115 // if (lk->kind == LK_rdwr) {
2116 // HG_(record_error_Misc)(thr,
2117 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2118 // }
2119 // if (lk->heldBy == NULL) {
2120 // HG_(record_error_Misc)(thr,
2121 // "pthread_cond_{signal,broadcast}: "
2122 // "associated lock is not held by any thread");
2123 // }
2124 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2125 // HG_(record_error_Misc)(thr,
2126 // "pthread_cond_{signal,broadcast}: "
2127 // "associated lock is not held by calling thread");
2128 // }
2129 //}
sewardjb4112022007-11-09 22:49:28 +00002130
sewardj02114542009-07-28 20:52:36 +00002131 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002132}
2133
2134/* returns True if it reckons 'mutex' is valid and held by this
2135 thread, else False */
2136static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2137 void* cond, void* mutex )
2138{
2139 Thread* thr;
2140 Lock* lk;
2141 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002142 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002143
2144 if (SHOW_EVENTS >= 1)
2145 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2146 "(ctid=%d, cond=%p, mutex=%p)\n",
2147 (Int)tid, (void*)cond, (void*)mutex );
2148
sewardjb4112022007-11-09 22:49:28 +00002149 thr = map_threads_maybe_lookup( tid );
2150 tl_assert(thr); /* cannot fail - Thread* must already exist */
2151
2152 lk = map_locks_maybe_lookup( (Addr)mutex );
2153
2154 /* Check for stupid mutex arguments. There are various ways to be
2155 a bozo. Only complain once, though, even if more than one thing
2156 is wrong. */
2157 if (lk == NULL) {
2158 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002159 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002160 thr,
2161 "pthread_cond_{timed}wait called with invalid mutex" );
2162 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002163 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002164 if (lk->kind == LK_rdwr) {
2165 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002166 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002167 thr, "pthread_cond_{timed}wait called with mutex "
2168 "of type pthread_rwlock_t*" );
2169 } else
2170 if (lk->heldBy == NULL) {
2171 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002172 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002173 thr, "pthread_cond_{timed}wait called with un-held mutex");
2174 } else
2175 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002176 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002177 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002178 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002179 thr, "pthread_cond_{timed}wait called with mutex "
2180 "held by a different thread" );
2181 }
2182 }
2183
2184 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002185 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2186 tl_assert(cvi);
2187 tl_assert(cvi->so);
2188 if (cvi->nWaiters == 0) {
2189 /* form initial (CV,MX) binding */
2190 cvi->mx_ga = mutex;
2191 }
2192 else /* check existing (CV,MX) binding */
2193 if (cvi->mx_ga != mutex) {
2194 HG_(record_error_Misc)(
2195 thr, "pthread_cond_{timed}wait: cond is associated "
2196 "with a different mutex");
2197 }
2198 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002199
2200 return lk_valid;
2201}
2202
2203static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2204 void* cond, void* mutex )
2205{
sewardjf98e1c02008-10-25 16:22:41 +00002206 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2207 the SO for this cond, and 'recv' from it so as to acquire a
2208 dependency edge back to the signaller/broadcaster. */
2209 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002210 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002211
2212 if (SHOW_EVENTS >= 1)
2213 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2214 "(ctid=%d, cond=%p, mutex=%p)\n",
2215 (Int)tid, (void*)cond, (void*)mutex );
2216
sewardjb4112022007-11-09 22:49:28 +00002217 thr = map_threads_maybe_lookup( tid );
2218 tl_assert(thr); /* cannot fail - Thread* must already exist */
2219
2220 // error-if: cond is also associated with a different mutex
2221
sewardj02114542009-07-28 20:52:36 +00002222 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2223 tl_assert(cvi);
2224 tl_assert(cvi->so);
2225 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002226
sewardj02114542009-07-28 20:52:36 +00002227 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002228 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2229 it? If this happened it would surely be a bug in the threads
2230 library. Or one of those fabled "spurious wakeups". */
2231 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2232 "succeeded on"
2233 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002234 }
sewardjf98e1c02008-10-25 16:22:41 +00002235
2236 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002237 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2238
2239 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002240}
2241
2242static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2243 void* cond )
2244{
2245 /* Deal with destroy events. The only purpose is to free storage
2246 associated with the CV, so as to avoid any possible resource
2247 leaks. */
2248 if (SHOW_EVENTS >= 1)
2249 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2250 "(ctid=%d, cond=%p)\n",
2251 (Int)tid, (void*)cond );
2252
sewardj02114542009-07-28 20:52:36 +00002253 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002254}
2255
2256
sewardj9f569b72008-11-13 13:33:09 +00002257/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002258/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002259/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002260
2261/* EXPOSITION only */
2262static
2263void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2264{
2265 if (SHOW_EVENTS >= 1)
2266 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2267 (Int)tid, (void*)rwl );
2268 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002269 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002270 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2271}
2272
2273static
2274void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2275{
2276 Thread* thr;
2277 Lock* lk;
2278 if (SHOW_EVENTS >= 1)
2279 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2280 (Int)tid, (void*)rwl );
2281
2282 thr = map_threads_maybe_lookup( tid );
2283 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002284 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002285
2286 lk = map_locks_maybe_lookup( (Addr)rwl );
2287
2288 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002289 HG_(record_error_Misc)(
2290 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002291 }
2292
2293 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002294 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002295 tl_assert( lk->guestaddr == (Addr)rwl );
2296 if (lk->heldBy) {
2297 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002298 HG_(record_error_Misc)(
2299 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002300 /* remove lock from locksets of all owning threads */
2301 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002302 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002303 lk->heldBy = NULL;
2304 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002305 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002306 }
2307 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002308 tl_assert( HG_(is_sane_LockN)(lk) );
2309
sewardj1cbc12f2008-11-10 16:16:46 +00002310 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002311 map_locks_delete( lk->guestaddr );
2312 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002313 }
2314
sewardjf98e1c02008-10-25 16:22:41 +00002315 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002316 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2317}
2318
2319static
sewardj789c3c52008-02-25 12:10:07 +00002320void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2321 void* rwl,
2322 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002323{
2324 /* Just check the rwl is sane; nothing else to do. */
2325 // 'rwl' may be invalid - not checked by wrapper
2326 Thread* thr;
2327 Lock* lk;
2328 if (SHOW_EVENTS >= 1)
2329 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2330 (Int)tid, (Int)isW, (void*)rwl );
2331
2332 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002333 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002334 thr = map_threads_maybe_lookup( tid );
2335 tl_assert(thr); /* cannot fail - Thread* must already exist */
2336
2337 lk = map_locks_maybe_lookup( (Addr)rwl );
2338 if ( lk
2339 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2340 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002341 HG_(record_error_Misc)(
2342 thr, "pthread_rwlock_{rd,rw}lock with a "
2343 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002344 }
2345}
2346
2347static
2348void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2349{
2350 // only called if the real library call succeeded - so mutex is sane
2351 Thread* thr;
2352 if (SHOW_EVENTS >= 1)
2353 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2354 (Int)tid, (Int)isW, (void*)rwl );
2355
2356 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2357 thr = map_threads_maybe_lookup( tid );
2358 tl_assert(thr); /* cannot fail - Thread* must already exist */
2359
2360 (isW ? evhH__post_thread_w_acquires_lock
2361 : evhH__post_thread_r_acquires_lock)(
2362 thr,
2363 LK_rdwr, /* if not known, create new lock with this LockKind */
2364 (Addr)rwl
2365 );
2366}
2367
2368static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2369{
2370 // 'rwl' may be invalid - not checked by wrapper
2371 Thread* thr;
2372 if (SHOW_EVENTS >= 1)
2373 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2374 (Int)tid, (void*)rwl );
2375
2376 thr = map_threads_maybe_lookup( tid );
2377 tl_assert(thr); /* cannot fail - Thread* must already exist */
2378
2379 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2380}
2381
2382static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2383{
2384 // only called if the real library call succeeded - so mutex is sane
2385 Thread* thr;
2386 if (SHOW_EVENTS >= 1)
2387 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2388 (Int)tid, (void*)rwl );
2389 thr = map_threads_maybe_lookup( tid );
2390 tl_assert(thr); /* cannot fail - Thread* must already exist */
2391
2392 // anything we should do here?
2393}
2394
2395
sewardj9f569b72008-11-13 13:33:09 +00002396/* ---------------------------------------------------------- */
2397/* -------------- events to do with semaphores -------------- */
2398/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002399
sewardj11e352f2007-11-30 11:11:02 +00002400/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002401 variables. */
2402
sewardjf98e1c02008-10-25 16:22:41 +00002403/* For each semaphore, we maintain a stack of SOs. When a 'post'
2404 operation is done on a semaphore (unlocking, essentially), a new SO
2405 is created for the posting thread, the posting thread does a strong
2406 send to it (which merely installs the posting thread's VC in the
2407 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002408
2409 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002410 semaphore, we pop a SO off the semaphore's stack (which should be
2411 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002412 dependencies between posters and waiters of the semaphore.
2413
sewardjf98e1c02008-10-25 16:22:41 +00002414 It may not be necessary to use a stack - perhaps a bag of SOs would
2415 do. But we do need to keep track of how many unused-up posts have
2416 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002417
sewardjf98e1c02008-10-25 16:22:41 +00002418 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002419 twice on S. T3 cannot complete its waits without both T1 and T2
2420 posting. The above mechanism will ensure that T3 acquires
2421 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002422
sewardjf98e1c02008-10-25 16:22:41 +00002423 When a semaphore is initialised with value N, we do as if we'd
2424 posted N times on the semaphore: basically create N SOs and do a
2425 strong send to all of then. This allows up to N waits on the
2426 semaphore to acquire a dependency on the initialisation point,
2427 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002428
2429 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2430 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002431*/
2432
sewardjf98e1c02008-10-25 16:22:41 +00002433/* sem_t* -> XArray* SO* */
2434static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002435
sewardjf98e1c02008-10-25 16:22:41 +00002436static void map_sem_to_SO_stack_INIT ( void ) {
2437 if (map_sem_to_SO_stack == NULL) {
2438 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2439 HG_(free), NULL );
2440 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002441 }
2442}
2443
sewardjf98e1c02008-10-25 16:22:41 +00002444static void push_SO_for_sem ( void* sem, SO* so ) {
2445 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002446 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002447 tl_assert(so);
2448 map_sem_to_SO_stack_INIT();
2449 if (VG_(lookupFM)( map_sem_to_SO_stack,
2450 &keyW, (UWord*)&xa, (UWord)sem )) {
2451 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002452 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002453 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002454 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002455 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2456 VG_(addToXA)( xa, &so );
2457 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002458 }
2459}
2460
sewardjf98e1c02008-10-25 16:22:41 +00002461static SO* mb_pop_SO_for_sem ( void* sem ) {
2462 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002463 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002464 SO* so;
2465 map_sem_to_SO_stack_INIT();
2466 if (VG_(lookupFM)( map_sem_to_SO_stack,
2467 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002468 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002469 Word sz;
2470 tl_assert(keyW == (UWord)sem);
2471 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002472 tl_assert(sz >= 0);
2473 if (sz == 0)
2474 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002475 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2476 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002477 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002478 return so;
sewardjb4112022007-11-09 22:49:28 +00002479 } else {
2480 /* hmm, that's odd. No stack for this semaphore. */
2481 return NULL;
2482 }
2483}
2484
sewardj11e352f2007-11-30 11:11:02 +00002485static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002486{
sewardjf98e1c02008-10-25 16:22:41 +00002487 UWord keyW, valW;
2488 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002489
sewardjb4112022007-11-09 22:49:28 +00002490 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002491 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002492 (Int)tid, (void*)sem );
2493
sewardjf98e1c02008-10-25 16:22:41 +00002494 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002495
sewardjf98e1c02008-10-25 16:22:41 +00002496 /* Empty out the semaphore's SO stack. This way of doing it is
2497 stupid, but at least it's easy. */
2498 while (1) {
2499 so = mb_pop_SO_for_sem( sem );
2500 if (!so) break;
2501 libhb_so_dealloc(so);
2502 }
2503
2504 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2505 XArray* xa = (XArray*)valW;
2506 tl_assert(keyW == (UWord)sem);
2507 tl_assert(xa);
2508 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2509 VG_(deleteXA)(xa);
2510 }
sewardjb4112022007-11-09 22:49:28 +00002511}
2512
sewardj11e352f2007-11-30 11:11:02 +00002513static
2514void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2515{
sewardjf98e1c02008-10-25 16:22:41 +00002516 SO* so;
2517 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002518
2519 if (SHOW_EVENTS >= 1)
2520 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2521 (Int)tid, (void*)sem, value );
2522
sewardjf98e1c02008-10-25 16:22:41 +00002523 thr = map_threads_maybe_lookup( tid );
2524 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002525
sewardjf98e1c02008-10-25 16:22:41 +00002526 /* Empty out the semaphore's SO stack. This way of doing it is
2527 stupid, but at least it's easy. */
2528 while (1) {
2529 so = mb_pop_SO_for_sem( sem );
2530 if (!so) break;
2531 libhb_so_dealloc(so);
2532 }
sewardj11e352f2007-11-30 11:11:02 +00002533
sewardjf98e1c02008-10-25 16:22:41 +00002534 /* If we don't do this check, the following while loop runs us out
2535 of memory for stupid initial values of 'value'. */
2536 if (value > 10000) {
2537 HG_(record_error_Misc)(
2538 thr, "sem_init: initial value exceeds 10000; using 10000" );
2539 value = 10000;
2540 }
sewardj11e352f2007-11-30 11:11:02 +00002541
sewardjf98e1c02008-10-25 16:22:41 +00002542 /* Now create 'valid' new SOs for the thread, do a strong send to
2543 each of them, and push them all on the stack. */
2544 for (; value > 0; value--) {
2545 Thr* hbthr = thr->hbthr;
2546 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002547
sewardjf98e1c02008-10-25 16:22:41 +00002548 so = libhb_so_alloc();
2549 libhb_so_send( hbthr, so, True/*strong send*/ );
2550 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002551 }
2552}
2553
2554static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002555{
sewardjf98e1c02008-10-25 16:22:41 +00002556 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2557 it (iow, write our VC into it, then tick ours), and push the SO
2558 on on a stack of SOs associated with 'sem'. This is later used
2559 by other thread(s) which successfully exit from a sem_wait on
2560 the same sem; by doing a strong recv from SOs popped of the
2561 stack, they acquire dependencies on the posting thread
2562 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002563
sewardjf98e1c02008-10-25 16:22:41 +00002564 Thread* thr;
2565 SO* so;
2566 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002567
2568 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002569 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002570 (Int)tid, (void*)sem );
2571
2572 thr = map_threads_maybe_lookup( tid );
2573 tl_assert(thr); /* cannot fail - Thread* must already exist */
2574
2575 // error-if: sem is bogus
2576
sewardjf98e1c02008-10-25 16:22:41 +00002577 hbthr = thr->hbthr;
2578 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002579
sewardjf98e1c02008-10-25 16:22:41 +00002580 so = libhb_so_alloc();
2581 libhb_so_send( hbthr, so, True/*strong send*/ );
2582 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002583}
2584
sewardj11e352f2007-11-30 11:11:02 +00002585static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002586{
sewardjf98e1c02008-10-25 16:22:41 +00002587 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2588 the 'sem' from this semaphore's SO-stack, and do a strong recv
2589 from it. This creates a dependency back to one of the post-ers
2590 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002591
sewardjf98e1c02008-10-25 16:22:41 +00002592 Thread* thr;
2593 SO* so;
2594 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002595
2596 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002597 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002598 (Int)tid, (void*)sem );
2599
2600 thr = map_threads_maybe_lookup( tid );
2601 tl_assert(thr); /* cannot fail - Thread* must already exist */
2602
2603 // error-if: sem is bogus
2604
sewardjf98e1c02008-10-25 16:22:41 +00002605 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002606
sewardjf98e1c02008-10-25 16:22:41 +00002607 if (so) {
2608 hbthr = thr->hbthr;
2609 tl_assert(hbthr);
2610
2611 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2612 libhb_so_dealloc(so);
2613 } else {
2614 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2615 If this happened it would surely be a bug in the threads
2616 library. */
2617 HG_(record_error_Misc)(
2618 thr, "Bug in libpthread: sem_wait succeeded on"
2619 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002620 }
2621}
2622
2623
sewardj9f569b72008-11-13 13:33:09 +00002624/* -------------------------------------------------------- */
2625/* -------------- events to do with barriers -------------- */
2626/* -------------------------------------------------------- */
2627
2628typedef
2629 struct {
2630 Bool initted; /* has it yet been initted by guest? */
2631 UWord size; /* declared size */
2632 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2633 }
2634 Bar;
2635
2636static Bar* new_Bar ( void ) {
2637 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2638 tl_assert(bar);
2639 /* all fields are zero */
2640 tl_assert(bar->initted == False);
2641 return bar;
2642}
2643
2644static void delete_Bar ( Bar* bar ) {
2645 tl_assert(bar);
2646 if (bar->waiting)
2647 VG_(deleteXA)(bar->waiting);
2648 HG_(free)(bar);
2649}
2650
2651/* A mapping which stores auxiliary data for barriers. */
2652
2653/* pthread_barrier_t* -> Bar* */
2654static WordFM* map_barrier_to_Bar = NULL;
2655
2656static void map_barrier_to_Bar_INIT ( void ) {
2657 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2658 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2659 "hg.mbtBI.1", HG_(free), NULL );
2660 tl_assert(map_barrier_to_Bar != NULL);
2661 }
2662}
2663
2664static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2665 UWord key, val;
2666 map_barrier_to_Bar_INIT();
2667 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2668 tl_assert(key == (UWord)barrier);
2669 return (Bar*)val;
2670 } else {
2671 Bar* bar = new_Bar();
2672 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2673 return bar;
2674 }
2675}
2676
2677static void map_barrier_to_Bar_delete ( void* barrier ) {
2678 UWord keyW, valW;
2679 map_barrier_to_Bar_INIT();
2680 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2681 Bar* bar = (Bar*)valW;
2682 tl_assert(keyW == (UWord)barrier);
2683 delete_Bar(bar);
2684 }
2685}
2686
2687
2688static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2689 void* barrier,
2690 UWord count )
2691{
2692 Thread* thr;
2693 Bar* bar;
2694
2695 if (SHOW_EVENTS >= 1)
2696 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2697 "(tid=%d, barrier=%p, count=%lu)\n",
2698 (Int)tid, (void*)barrier, count );
2699
2700 thr = map_threads_maybe_lookup( tid );
2701 tl_assert(thr); /* cannot fail - Thread* must already exist */
2702
2703 if (count == 0) {
2704 HG_(record_error_Misc)(
2705 thr, "pthread_barrier_init: 'count' argument is zero"
2706 );
2707 }
2708
2709 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2710 tl_assert(bar);
2711
2712 if (bar->initted) {
2713 HG_(record_error_Misc)(
2714 thr, "pthread_barrier_init: barrier is already initialised"
2715 );
2716 }
2717
2718 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2719 tl_assert(bar->initted);
2720 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002721 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002722 );
2723 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2724 }
2725 if (!bar->waiting) {
2726 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2727 sizeof(Thread*) );
2728 }
2729
2730 tl_assert(bar->waiting);
2731 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2732 bar->initted = True;
2733 bar->size = count;
2734}
2735
2736
2737static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2738 void* barrier )
2739{
sewardj553655c2008-11-14 19:41:19 +00002740 Thread* thr;
2741 Bar* bar;
2742
sewardj9f569b72008-11-13 13:33:09 +00002743 /* Deal with destroy events. The only purpose is to free storage
2744 associated with the barrier, so as to avoid any possible
2745 resource leaks. */
2746 if (SHOW_EVENTS >= 1)
2747 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2748 "(tid=%d, barrier=%p)\n",
2749 (Int)tid, (void*)barrier );
2750
sewardj553655c2008-11-14 19:41:19 +00002751 thr = map_threads_maybe_lookup( tid );
2752 tl_assert(thr); /* cannot fail - Thread* must already exist */
2753
2754 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2755 tl_assert(bar);
2756
2757 if (!bar->initted) {
2758 HG_(record_error_Misc)(
2759 thr, "pthread_barrier_destroy: barrier was never initialised"
2760 );
2761 }
2762
2763 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2764 HG_(record_error_Misc)(
2765 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2766 );
2767 }
2768
sewardj9f569b72008-11-13 13:33:09 +00002769 /* Maybe we shouldn't do this; just let it persist, so that when it
2770 is reinitialised we don't need to do any dynamic memory
2771 allocation? The downside is a potentially unlimited space leak,
2772 if the client creates (in turn) a large number of barriers all
2773 at different locations. Note that if we do later move to the
2774 don't-delete-it scheme, we need to mark the barrier as
2775 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002776 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002777 map_barrier_to_Bar_delete( barrier );
2778}
2779
2780
2781static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2782 void* barrier )
2783{
sewardj1c466b72008-11-19 11:52:14 +00002784 /* This function gets called after a client thread calls
2785 pthread_barrier_wait but before it arrives at the real
2786 pthread_barrier_wait.
2787
2788 Why is the following correct? It's a bit subtle.
2789
2790 If this is not the last thread arriving at the barrier, we simply
2791 note its presence and return. Because valgrind (at least as of
2792 Nov 08) is single threaded, we are guaranteed safe from any race
2793 conditions when in this function -- no other client threads are
2794 running.
2795
2796 If this is the last thread, then we are again the only running
2797 thread. All the other threads will have either arrived at the
2798 real pthread_barrier_wait or are on their way to it, but in any
2799 case are guaranteed not to be able to move past it, because this
2800 thread is currently in this function and so has not yet arrived
2801 at the real pthread_barrier_wait. That means that:
2802
2803 1. While we are in this function, none of the other threads
2804 waiting at the barrier can move past it.
2805
2806 2. When this function returns (and simulated execution resumes),
2807 this thread and all other waiting threads will be able to move
2808 past the real barrier.
2809
2810 Because of this, it is now safe to update the vector clocks of
2811 all threads, to represent the fact that they all arrived at the
2812 barrier and have all moved on. There is no danger of any
2813 complications to do with some threads leaving the barrier and
2814 racing back round to the front, whilst others are still leaving
2815 (which is the primary source of complication in correct handling/
2816 implementation of barriers). That can't happen because we update
2817 here our data structures so as to indicate that the threads have
2818 passed the barrier, even though, as per (2) above, they are
2819 guaranteed not to pass the barrier until we return.
2820
2821 This relies crucially on Valgrind being single threaded. If that
2822 changes, this will need to be reconsidered.
2823 */
sewardj9f569b72008-11-13 13:33:09 +00002824 Thread* thr;
2825 Bar* bar;
2826 SO* so;
2827 UWord present, i;
2828
2829 if (SHOW_EVENTS >= 1)
2830 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2831 "(tid=%d, barrier=%p)\n",
2832 (Int)tid, (void*)barrier );
2833
2834 thr = map_threads_maybe_lookup( tid );
2835 tl_assert(thr); /* cannot fail - Thread* must already exist */
2836
2837 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2838 tl_assert(bar);
2839
2840 if (!bar->initted) {
2841 HG_(record_error_Misc)(
2842 thr, "pthread_barrier_wait: barrier is uninitialised"
2843 );
2844 return; /* client is broken .. avoid assertions below */
2845 }
2846
2847 /* guaranteed by _INIT_PRE above */
2848 tl_assert(bar->size > 0);
2849 tl_assert(bar->waiting);
2850
2851 VG_(addToXA)( bar->waiting, &thr );
2852
2853 /* guaranteed by this function */
2854 present = VG_(sizeXA)(bar->waiting);
2855 tl_assert(present > 0 && present <= bar->size);
2856
2857 if (present < bar->size)
2858 return;
2859
sewardj553655c2008-11-14 19:41:19 +00002860 /* All the threads have arrived. Now do the Interesting Bit. Get
sewardj9f569b72008-11-13 13:33:09 +00002861 a new synchronisation object and do a weak send to it from all
2862 the participating threads. This makes its vector clocks be the
sewardj553655c2008-11-14 19:41:19 +00002863 join of all the individual threads' vector clocks. Then do a
sewardj9f569b72008-11-13 13:33:09 +00002864 strong receive from it back to all threads, so that their VCs
2865 are a copy of it (hence are all equal to the join of their
2866 original VCs.) */
2867 so = libhb_so_alloc();
2868
2869 /* XXX check ->waiting has no duplicates */
2870
2871 tl_assert(bar->waiting);
2872 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2873
2874 /* compute the join ... */
2875 for (i = 0; i < bar->size; i++) {
2876 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2877 Thr* hbthr = t->hbthr;
2878 libhb_so_send( hbthr, so, False/*weak send*/ );
2879 }
2880 /* ... and distribute to all threads */
2881 for (i = 0; i < bar->size; i++) {
2882 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2883 Thr* hbthr = t->hbthr;
2884 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2885 }
2886
2887 /* finally, we must empty out the waiting vector */
sewardj1c466b72008-11-19 11:52:14 +00002888 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2889
2890 /* and we don't need this any more. Perhaps a stack-allocated
2891 SO would be better? */
2892 libhb_so_dealloc(so);
sewardj9f569b72008-11-13 13:33:09 +00002893}
2894
2895
sewardjb4112022007-11-09 22:49:28 +00002896/*--------------------------------------------------------------*/
2897/*--- Lock acquisition order monitoring ---*/
2898/*--------------------------------------------------------------*/
2899
2900/* FIXME: here are some optimisations still to do in
2901 laog__pre_thread_acquires_lock.
2902
2903 The graph is structured so that if L1 --*--> L2 then L1 must be
2904 acquired before L2.
2905
2906 The common case is that some thread T holds (eg) L1 L2 and L3 and
2907 is repeatedly acquiring and releasing Ln, and there is no ordering
2908 error in what it is doing. Hence it repeatly:
2909
2910 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
2911 produces the answer No (because there is no error).
2912
2913 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
2914 (because they already got added the first time T acquired Ln).
2915
2916 Hence cache these two events:
2917
2918 (1) Cache result of the query from last time. Invalidate the cache
2919 any time any edges are added to or deleted from laog.
2920
2921 (2) Cache these add-edge requests and ignore them if said edges
2922 have already been added to laog. Invalidate the cache any time
2923 any edges are deleted from laog.
2924*/
2925
2926typedef
2927 struct {
2928 WordSetID inns; /* in univ_laog */
2929 WordSetID outs; /* in univ_laog */
2930 }
2931 LAOGLinks;
2932
2933/* lock order acquisition graph */
2934static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
2935
2936/* EXPOSITION ONLY: for each edge in 'laog', record the two places
2937 where that edge was created, so that we can show the user later if
2938 we need to. */
2939typedef
2940 struct {
2941 Addr src_ga; /* Lock guest addresses for */
2942 Addr dst_ga; /* src/dst of the edge */
2943 ExeContext* src_ec; /* And corresponding places where that */
2944 ExeContext* dst_ec; /* ordering was established */
2945 }
2946 LAOGLinkExposition;
2947
sewardj250ec2e2008-02-15 22:02:30 +00002948static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00002949 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
2950 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
2951 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
2952 if (llx1->src_ga < llx2->src_ga) return -1;
2953 if (llx1->src_ga > llx2->src_ga) return 1;
2954 if (llx1->dst_ga < llx2->dst_ga) return -1;
2955 if (llx1->dst_ga > llx2->dst_ga) return 1;
2956 return 0;
2957}
2958
2959static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
2960/* end EXPOSITION ONLY */
2961
2962
sewardja65db102009-01-26 10:45:16 +00002963__attribute__((noinline))
2964static void laog__init ( void )
2965{
2966 tl_assert(!laog);
2967 tl_assert(!laog_exposition);
2968
2969 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
2970 HG_(free), NULL/*unboxedcmp*/ );
2971
2972 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
2973 cmp_LAOGLinkExposition );
2974 tl_assert(laog);
2975 tl_assert(laog_exposition);
2976}
2977
sewardjb4112022007-11-09 22:49:28 +00002978static void laog__show ( Char* who ) {
2979 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00002980 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00002981 Lock* me;
2982 LAOGLinks* links;
2983 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00002984 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00002985 me = NULL;
2986 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00002987 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00002988 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00002989 tl_assert(me);
2990 tl_assert(links);
2991 VG_(printf)(" node %p:\n", me);
2992 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
2993 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002994 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002995 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
2996 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00002997 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00002998 me = NULL;
2999 links = NULL;
3000 }
sewardj896f6f92008-08-19 08:38:52 +00003001 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003002 VG_(printf)("}\n");
3003}
3004
3005__attribute__((noinline))
3006static void laog__add_edge ( Lock* src, Lock* dst ) {
3007 Word keyW;
3008 LAOGLinks* links;
3009 Bool presentF, presentR;
3010 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3011
3012 /* Take the opportunity to sanity check the graph. Record in
3013 presentF if there is already a src->dst mapping in this node's
3014 forwards links, and presentR if there is already a src->dst
3015 mapping in this node's backwards links. They should agree!
3016 Also, we need to know whether the edge was already present so as
3017 to decide whether or not to update the link details mapping. We
3018 can compute presentF and presentR essentially for free, so may
3019 as well do this always. */
3020 presentF = presentR = False;
3021
3022 /* Update the out edges for src */
3023 keyW = 0;
3024 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003025 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003026 WordSetID outs_new;
3027 tl_assert(links);
3028 tl_assert(keyW == (Word)src);
3029 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3030 presentF = outs_new == links->outs;
3031 links->outs = outs_new;
3032 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003033 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003034 links->inns = HG_(emptyWS)( univ_laog );
3035 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003036 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003037 }
3038 /* Update the in edges for dst */
3039 keyW = 0;
3040 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003041 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003042 WordSetID inns_new;
3043 tl_assert(links);
3044 tl_assert(keyW == (Word)dst);
3045 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3046 presentR = inns_new == links->inns;
3047 links->inns = inns_new;
3048 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003049 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003050 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3051 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003052 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003053 }
3054
3055 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3056
3057 if (!presentF && src->acquired_at && dst->acquired_at) {
3058 LAOGLinkExposition expo;
3059 /* If this edge is entering the graph, and we have acquired_at
3060 information for both src and dst, record those acquisition
3061 points. Hence, if there is later a violation of this
3062 ordering, we can show the user the two places in which the
3063 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003064 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003065 src->guestaddr, dst->guestaddr);
3066 expo.src_ga = src->guestaddr;
3067 expo.dst_ga = dst->guestaddr;
3068 expo.src_ec = NULL;
3069 expo.dst_ec = NULL;
3070 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003071 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003072 /* we already have it; do nothing */
3073 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003074 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3075 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003076 expo2->src_ga = src->guestaddr;
3077 expo2->dst_ga = dst->guestaddr;
3078 expo2->src_ec = src->acquired_at;
3079 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003080 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003081 }
3082 }
3083}
3084
3085__attribute__((noinline))
3086static void laog__del_edge ( Lock* src, Lock* dst ) {
3087 Word keyW;
3088 LAOGLinks* links;
3089 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3090 /* Update the out edges for src */
3091 keyW = 0;
3092 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003093 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003094 tl_assert(links);
3095 tl_assert(keyW == (Word)src);
3096 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3097 }
3098 /* Update the in edges for dst */
3099 keyW = 0;
3100 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003101 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003102 tl_assert(links);
3103 tl_assert(keyW == (Word)dst);
3104 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3105 }
3106}
3107
3108__attribute__((noinline))
3109static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3110 Word keyW;
3111 LAOGLinks* links;
3112 keyW = 0;
3113 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003114 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003115 tl_assert(links);
3116 tl_assert(keyW == (Word)lk);
3117 return links->outs;
3118 } else {
3119 return HG_(emptyWS)( univ_laog );
3120 }
3121}
3122
3123__attribute__((noinline))
3124static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3125 Word keyW;
3126 LAOGLinks* links;
3127 keyW = 0;
3128 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003129 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003130 tl_assert(links);
3131 tl_assert(keyW == (Word)lk);
3132 return links->inns;
3133 } else {
3134 return HG_(emptyWS)( univ_laog );
3135 }
3136}
3137
3138__attribute__((noinline))
3139static void laog__sanity_check ( Char* who ) {
3140 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003141 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003142 Lock* me;
3143 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003144 if (UNLIKELY(!laog || !laog_exposition))
3145 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003146 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003147 me = NULL;
3148 links = NULL;
3149 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003150 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003151 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003152 tl_assert(me);
3153 tl_assert(links);
3154 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3155 for (i = 0; i < ws_size; i++) {
3156 if ( ! HG_(elemWS)( univ_laog,
3157 laog__succs( (Lock*)ws_words[i] ),
3158 (Word)me ))
3159 goto bad;
3160 }
3161 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3162 for (i = 0; i < ws_size; i++) {
3163 if ( ! HG_(elemWS)( univ_laog,
3164 laog__preds( (Lock*)ws_words[i] ),
3165 (Word)me ))
3166 goto bad;
3167 }
3168 me = NULL;
3169 links = NULL;
3170 }
sewardj896f6f92008-08-19 08:38:52 +00003171 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003172 return;
3173
3174 bad:
3175 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3176 laog__show(who);
3177 tl_assert(0);
3178}
3179
3180/* If there is a path in laog from 'src' to any of the elements in
3181 'dst', return an arbitrarily chosen element of 'dst' reachable from
3182 'src'. If no path exist from 'src' to any element in 'dst', return
3183 NULL. */
3184__attribute__((noinline))
3185static
3186Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3187{
3188 Lock* ret;
3189 Word i, ssz;
3190 XArray* stack; /* of Lock* */
3191 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3192 Lock* here;
3193 WordSetID succs;
3194 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003195 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003196 //laog__sanity_check();
3197
3198 /* If the destination set is empty, we can never get there from
3199 'src' :-), so don't bother to try */
3200 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3201 return NULL;
3202
3203 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003204 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3205 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003206
3207 (void) VG_(addToXA)( stack, &src );
3208
3209 while (True) {
3210
3211 ssz = VG_(sizeXA)( stack );
3212
3213 if (ssz == 0) { ret = NULL; break; }
3214
3215 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3216 VG_(dropTailXA)( stack, 1 );
3217
3218 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3219
sewardj896f6f92008-08-19 08:38:52 +00003220 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003221 continue;
3222
sewardj896f6f92008-08-19 08:38:52 +00003223 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003224
3225 succs = laog__succs( here );
3226 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3227 for (i = 0; i < succs_size; i++)
3228 (void) VG_(addToXA)( stack, &succs_words[i] );
3229 }
3230
sewardj896f6f92008-08-19 08:38:52 +00003231 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003232 VG_(deleteXA)( stack );
3233 return ret;
3234}
3235
3236
3237/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3238 between 'lk' and the locks already held by 'thr' and issue a
3239 complaint if so. Also, update the ordering graph appropriately.
3240*/
3241__attribute__((noinline))
3242static void laog__pre_thread_acquires_lock (
3243 Thread* thr, /* NB: BEFORE lock is added */
3244 Lock* lk
3245 )
3246{
sewardj250ec2e2008-02-15 22:02:30 +00003247 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003248 Word ls_size, i;
3249 Lock* other;
3250
3251 /* It may be that 'thr' already holds 'lk' and is recursively
3252 relocking in. In this case we just ignore the call. */
3253 /* NB: univ_lsets really is correct here */
3254 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3255 return;
3256
sewardja65db102009-01-26 10:45:16 +00003257 if (UNLIKELY(!laog || !laog_exposition))
3258 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003259
3260 /* First, the check. Complain if there is any path in laog from lk
3261 to any of the locks already held by thr, since if any such path
3262 existed, it would mean that previously lk was acquired before
3263 (rather than after, as we are doing here) at least one of those
3264 locks.
3265 */
3266 other = laog__do_dfs_from_to(lk, thr->locksetA);
3267 if (other) {
3268 LAOGLinkExposition key, *found;
3269 /* So we managed to find a path lk --*--> other in the graph,
3270 which implies that 'lk' should have been acquired before
3271 'other' but is in fact being acquired afterwards. We present
3272 the lk/other arguments to record_error_LockOrder in the order
3273 in which they should have been acquired. */
3274 /* Go look in the laog_exposition mapping, to find the allocation
3275 points for this edge, so we can show the user. */
3276 key.src_ga = lk->guestaddr;
3277 key.dst_ga = other->guestaddr;
3278 key.src_ec = NULL;
3279 key.dst_ec = NULL;
3280 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003281 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003282 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003283 tl_assert(found != &key);
3284 tl_assert(found->src_ga == key.src_ga);
3285 tl_assert(found->dst_ga == key.dst_ga);
3286 tl_assert(found->src_ec);
3287 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003288 HG_(record_error_LockOrder)(
3289 thr, lk->guestaddr, other->guestaddr,
3290 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003291 } else {
3292 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003293 HG_(record_error_LockOrder)(
3294 thr, lk->guestaddr, other->guestaddr,
3295 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003296 }
3297 }
3298
3299 /* Second, add to laog the pairs
3300 (old, lk) | old <- locks already held by thr
3301 Since both old and lk are currently held by thr, their acquired_at
3302 fields must be non-NULL.
3303 */
3304 tl_assert(lk->acquired_at);
3305 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3306 for (i = 0; i < ls_size; i++) {
3307 Lock* old = (Lock*)ls_words[i];
3308 tl_assert(old->acquired_at);
3309 laog__add_edge( old, lk );
3310 }
3311
3312 /* Why "except_Locks" ? We're here because a lock is being
3313 acquired by a thread, and we're in an inconsistent state here.
3314 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3315 When called in this inconsistent state, locks__sanity_check duly
3316 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003317 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003318 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3319}
3320
3321
3322/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3323
3324__attribute__((noinline))
3325static void laog__handle_one_lock_deletion ( Lock* lk )
3326{
3327 WordSetID preds, succs;
3328 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003329 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003330
sewardja65db102009-01-26 10:45:16 +00003331 if (UNLIKELY(!laog || !laog_exposition))
3332 laog__init();
3333
sewardjb4112022007-11-09 22:49:28 +00003334 preds = laog__preds( lk );
3335 succs = laog__succs( lk );
3336
3337 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3338 for (i = 0; i < preds_size; i++)
3339 laog__del_edge( (Lock*)preds_words[i], lk );
3340
3341 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3342 for (j = 0; j < succs_size; j++)
3343 laog__del_edge( lk, (Lock*)succs_words[j] );
3344
3345 for (i = 0; i < preds_size; i++) {
3346 for (j = 0; j < succs_size; j++) {
3347 if (preds_words[i] != succs_words[j]) {
3348 /* This can pass unlocked locks to laog__add_edge, since
3349 we're deleting stuff. So their acquired_at fields may
3350 be NULL. */
3351 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3352 }
3353 }
3354 }
3355}
3356
sewardj1cbc12f2008-11-10 16:16:46 +00003357//__attribute__((noinline))
3358//static void laog__handle_lock_deletions (
3359// WordSetID /* in univ_laog */ locksToDelete
3360// )
3361//{
3362// Word i, ws_size;
3363// UWord* ws_words;
3364//
sewardja65db102009-01-26 10:45:16 +00003365// if (UNLIKELY(!laog || !laog_exposition))
3366// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003367//
3368// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3369// for (i = 0; i < ws_size; i++)
3370// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3371//
3372// if (HG_(clo_sanity_flags) & SCE_LAOG)
3373// all__sanity_check("laog__handle_lock_deletions-post");
3374//}
sewardjb4112022007-11-09 22:49:28 +00003375
3376
3377/*--------------------------------------------------------------*/
3378/*--- Malloc/free replacements ---*/
3379/*--------------------------------------------------------------*/
3380
3381typedef
3382 struct {
3383 void* next; /* required by m_hashtable */
3384 Addr payload; /* ptr to actual block */
3385 SizeT szB; /* size requested */
3386 ExeContext* where; /* where it was allocated */
3387 Thread* thr; /* allocating thread */
3388 }
3389 MallocMeta;
3390
3391/* A hash table of MallocMetas, used to track malloc'd blocks
3392 (obviously). */
3393static VgHashTable hg_mallocmeta_table = NULL;
3394
3395
3396static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003397 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003398 tl_assert(md);
3399 return md;
3400}
3401static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003402 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003403}
3404
3405
3406/* Allocate a client block and set up the metadata for it. */
3407
3408static
3409void* handle_alloc ( ThreadId tid,
3410 SizeT szB, SizeT alignB, Bool is_zeroed )
3411{
3412 Addr p;
3413 MallocMeta* md;
3414
3415 tl_assert( ((SSizeT)szB) >= 0 );
3416 p = (Addr)VG_(cli_malloc)(alignB, szB);
3417 if (!p) {
3418 return NULL;
3419 }
3420 if (is_zeroed)
3421 VG_(memset)((void*)p, 0, szB);
3422
3423 /* Note that map_threads_lookup must succeed (cannot assert), since
3424 memory can only be allocated by currently alive threads, hence
3425 they must have an entry in map_threads. */
3426 md = new_MallocMeta();
3427 md->payload = p;
3428 md->szB = szB;
3429 md->where = VG_(record_ExeContext)( tid, 0 );
3430 md->thr = map_threads_lookup( tid );
3431
3432 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3433
3434 /* Tell the lower level memory wranglers. */
3435 evh__new_mem_heap( p, szB, is_zeroed );
3436
3437 return (void*)p;
3438}
3439
3440/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3441 Cast to a signed type to catch any unexpectedly negative args.
3442 We're assuming here that the size asked for is not greater than
3443 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3444 platforms). */
3445static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3446 if (((SSizeT)n) < 0) return NULL;
3447 return handle_alloc ( tid, n, VG_(clo_alignment),
3448 /*is_zeroed*/False );
3449}
3450static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3451 if (((SSizeT)n) < 0) return NULL;
3452 return handle_alloc ( tid, n, VG_(clo_alignment),
3453 /*is_zeroed*/False );
3454}
3455static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3456 if (((SSizeT)n) < 0) return NULL;
3457 return handle_alloc ( tid, n, VG_(clo_alignment),
3458 /*is_zeroed*/False );
3459}
3460static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3461 if (((SSizeT)n) < 0) return NULL;
3462 return handle_alloc ( tid, n, align,
3463 /*is_zeroed*/False );
3464}
3465static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3466 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3467 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3468 /*is_zeroed*/True );
3469}
3470
3471
3472/* Free a client block, including getting rid of the relevant
3473 metadata. */
3474
3475static void handle_free ( ThreadId tid, void* p )
3476{
3477 MallocMeta *md, *old_md;
3478 SizeT szB;
3479
3480 /* First see if we can find the metadata for 'p'. */
3481 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3482 if (!md)
3483 return; /* apparently freeing a bogus address. Oh well. */
3484
3485 tl_assert(md->payload == (Addr)p);
3486 szB = md->szB;
3487
3488 /* Nuke the metadata block */
3489 old_md = (MallocMeta*)
3490 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3491 tl_assert(old_md); /* it must be present - we just found it */
3492 tl_assert(old_md == md);
3493 tl_assert(old_md->payload == (Addr)p);
3494
3495 VG_(cli_free)((void*)old_md->payload);
3496 delete_MallocMeta(old_md);
3497
3498 /* Tell the lower level memory wranglers. */
3499 evh__die_mem_heap( (Addr)p, szB );
3500}
3501
3502static void hg_cli__free ( ThreadId tid, void* p ) {
3503 handle_free(tid, p);
3504}
3505static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3506 handle_free(tid, p);
3507}
3508static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3509 handle_free(tid, p);
3510}
3511
3512
3513static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3514{
3515 MallocMeta *md, *md_new, *md_tmp;
3516 SizeT i;
3517
3518 Addr payload = (Addr)payloadV;
3519
3520 if (((SSizeT)new_size) < 0) return NULL;
3521
3522 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3523 if (!md)
3524 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3525
3526 tl_assert(md->payload == payload);
3527
3528 if (md->szB == new_size) {
3529 /* size unchanged */
3530 md->where = VG_(record_ExeContext)(tid, 0);
3531 return payloadV;
3532 }
3533
3534 if (md->szB > new_size) {
3535 /* new size is smaller */
3536 md->szB = new_size;
3537 md->where = VG_(record_ExeContext)(tid, 0);
3538 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3539 return payloadV;
3540 }
3541
3542 /* else */ {
3543 /* new size is bigger */
3544 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3545
3546 /* First half kept and copied, second half new */
3547 // FIXME: shouldn't we use a copier which implements the
3548 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003549 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003550 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003551 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003552 /* FIXME: can anything funny happen here? specifically, if the
3553 old range contained a lock, then die_mem_heap will complain.
3554 Is that the correct behaviour? Not sure. */
3555 evh__die_mem_heap( payload, md->szB );
3556
3557 /* Copy from old to new */
3558 for (i = 0; i < md->szB; i++)
3559 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3560
3561 /* Because the metadata hash table is index by payload address,
3562 we have to get rid of the old hash table entry and make a new
3563 one. We can't just modify the existing metadata in place,
3564 because then it would (almost certainly) be in the wrong hash
3565 chain. */
3566 md_new = new_MallocMeta();
3567 *md_new = *md;
3568
3569 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3570 tl_assert(md_tmp);
3571 tl_assert(md_tmp == md);
3572
3573 VG_(cli_free)((void*)md->payload);
3574 delete_MallocMeta(md);
3575
3576 /* Update fields */
3577 md_new->where = VG_(record_ExeContext)( tid, 0 );
3578 md_new->szB = new_size;
3579 md_new->payload = p_new;
3580 md_new->thr = map_threads_lookup( tid );
3581
3582 /* and add */
3583 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3584
3585 return (void*)p_new;
3586 }
3587}
3588
njn8b140de2009-02-17 04:31:18 +00003589static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3590{
3591 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3592
3593 // There may be slop, but pretend there isn't because only the asked-for
3594 // area will have been shadowed properly.
3595 return ( md ? md->szB : 0 );
3596}
3597
sewardjb4112022007-11-09 22:49:28 +00003598
3599/*--------------------------------------------------------------*/
3600/*--- Instrumentation ---*/
3601/*--------------------------------------------------------------*/
3602
3603static void instrument_mem_access ( IRSB* bbOut,
3604 IRExpr* addr,
3605 Int szB,
3606 Bool isStore,
3607 Int hWordTy_szB )
3608{
3609 IRType tyAddr = Ity_INVALID;
3610 HChar* hName = NULL;
3611 void* hAddr = NULL;
3612 Int regparms = 0;
3613 IRExpr** argv = NULL;
3614 IRDirty* di = NULL;
3615
3616 tl_assert(isIRAtom(addr));
3617 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3618
3619 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3620 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3621
3622 /* So the effective address is in 'addr' now. */
3623 regparms = 1; // unless stated otherwise
3624 if (isStore) {
3625 switch (szB) {
3626 case 1:
sewardj23f12002009-07-24 08:45:08 +00003627 hName = "evh__mem_help_cwrite_1";
3628 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003629 argv = mkIRExprVec_1( addr );
3630 break;
3631 case 2:
sewardj23f12002009-07-24 08:45:08 +00003632 hName = "evh__mem_help_cwrite_2";
3633 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003634 argv = mkIRExprVec_1( addr );
3635 break;
3636 case 4:
sewardj23f12002009-07-24 08:45:08 +00003637 hName = "evh__mem_help_cwrite_4";
3638 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003639 argv = mkIRExprVec_1( addr );
3640 break;
3641 case 8:
sewardj23f12002009-07-24 08:45:08 +00003642 hName = "evh__mem_help_cwrite_8";
3643 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003644 argv = mkIRExprVec_1( addr );
3645 break;
3646 default:
3647 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3648 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003649 hName = "evh__mem_help_cwrite_N";
3650 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003651 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3652 break;
3653 }
3654 } else {
3655 switch (szB) {
3656 case 1:
sewardj23f12002009-07-24 08:45:08 +00003657 hName = "evh__mem_help_cread_1";
3658 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003659 argv = mkIRExprVec_1( addr );
3660 break;
3661 case 2:
sewardj23f12002009-07-24 08:45:08 +00003662 hName = "evh__mem_help_cread_2";
3663 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003664 argv = mkIRExprVec_1( addr );
3665 break;
3666 case 4:
sewardj23f12002009-07-24 08:45:08 +00003667 hName = "evh__mem_help_cread_4";
3668 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003669 argv = mkIRExprVec_1( addr );
3670 break;
3671 case 8:
sewardj23f12002009-07-24 08:45:08 +00003672 hName = "evh__mem_help_cread_8";
3673 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003674 argv = mkIRExprVec_1( addr );
3675 break;
3676 default:
3677 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3678 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003679 hName = "evh__mem_help_cread_N";
3680 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003681 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3682 break;
3683 }
3684 }
3685
3686 /* Add the helper. */
3687 tl_assert(hName);
3688 tl_assert(hAddr);
3689 tl_assert(argv);
3690 di = unsafeIRDirty_0_N( regparms,
3691 hName, VG_(fnptr_to_fnentry)( hAddr ),
3692 argv );
3693 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3694}
3695
3696
sewardja0eee322009-07-31 08:46:35 +00003697/* Figure out if GA is a guest code address in the dynamic linker, and
3698 if so return True. Otherwise (and in case of any doubt) return
3699 False. (sidedly safe w/ False as the safe value) */
3700static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3701{
3702 DebugInfo* dinfo;
3703 const UChar* soname;
3704 if (0) return False;
3705
sewardje3f1e592009-07-31 09:41:29 +00003706 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003707 if (!dinfo) return False;
3708
sewardje3f1e592009-07-31 09:41:29 +00003709 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003710 tl_assert(soname);
3711 if (0) VG_(printf)("%s\n", soname);
3712
3713# if defined(VGO_linux)
3714 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3715 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3716 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3717 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3718# elif defined(VGO_darwin)
3719 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3720# else
3721# error "Unsupported OS"
3722# endif
3723 return False;
3724}
3725
sewardjb4112022007-11-09 22:49:28 +00003726static
3727IRSB* hg_instrument ( VgCallbackClosure* closure,
3728 IRSB* bbIn,
3729 VexGuestLayout* layout,
3730 VexGuestExtents* vge,
3731 IRType gWordTy, IRType hWordTy )
3732{
sewardj1c0ce7a2009-07-01 08:10:49 +00003733 Int i;
3734 IRSB* bbOut;
3735 Addr64 cia; /* address of current insn */
3736 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00003737 Bool inLDSO = False;
3738 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00003739
3740 if (gWordTy != hWordTy) {
3741 /* We don't currently support this case. */
3742 VG_(tool_panic)("host/guest word size mismatch");
3743 }
3744
sewardja0eee322009-07-31 08:46:35 +00003745 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
3746 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
3747 }
3748
sewardjb4112022007-11-09 22:49:28 +00003749 /* Set up BB */
3750 bbOut = emptyIRSB();
3751 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3752 bbOut->next = deepCopyIRExpr(bbIn->next);
3753 bbOut->jumpkind = bbIn->jumpkind;
3754
3755 // Copy verbatim any IR preamble preceding the first IMark
3756 i = 0;
3757 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3758 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3759 i++;
3760 }
3761
sewardj1c0ce7a2009-07-01 08:10:49 +00003762 // Get the first statement, and initial cia from it
3763 tl_assert(bbIn->stmts_used > 0);
3764 tl_assert(i < bbIn->stmts_used);
3765 st = bbIn->stmts[i];
3766 tl_assert(Ist_IMark == st->tag);
3767 cia = st->Ist.IMark.addr;
3768 st = NULL;
3769
sewardjb4112022007-11-09 22:49:28 +00003770 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00003771 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00003772 tl_assert(st);
3773 tl_assert(isFlatIRStmt(st));
3774 switch (st->tag) {
3775 case Ist_NoOp:
3776 case Ist_AbiHint:
3777 case Ist_Put:
3778 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00003779 case Ist_Exit:
3780 /* None of these can contain any memory references. */
3781 break;
3782
sewardj1c0ce7a2009-07-01 08:10:49 +00003783 case Ist_IMark:
3784 /* no mem refs, but note the insn address. */
3785 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00003786 /* Don't instrument the dynamic linker. It generates a
3787 lot of races which we just expensively suppress, so
3788 it's pointless.
3789
3790 Avoid flooding is_in_dynamic_linker_shared_object with
3791 requests by only checking at transitions between 4K
3792 pages. */
3793 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
3794 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
3795 inLDSOmask4K = cia & ~(Addr64)0xFFF;
3796 inLDSO = is_in_dynamic_linker_shared_object(cia);
3797 } else {
3798 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
3799 }
sewardj1c0ce7a2009-07-01 08:10:49 +00003800 break;
3801
sewardjb4112022007-11-09 22:49:28 +00003802 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003803 switch (st->Ist.MBE.event) {
3804 case Imbe_Fence:
3805 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00003806 default:
3807 goto unhandled;
3808 }
sewardjb4112022007-11-09 22:49:28 +00003809 break;
3810
sewardj1c0ce7a2009-07-01 08:10:49 +00003811 case Ist_CAS: {
3812 /* Atomic read-modify-write cycle. Just pretend it's a
3813 read. */
3814 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00003815 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
3816 if (isDCAS) {
3817 tl_assert(cas->expdHi);
3818 tl_assert(cas->dataHi);
3819 } else {
3820 tl_assert(!cas->expdHi);
3821 tl_assert(!cas->dataHi);
3822 }
3823 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00003824 if (!inLDSO) {
3825 instrument_mem_access(
3826 bbOut,
3827 cas->addr,
3828 (isDCAS ? 2 : 1)
3829 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
3830 False/*!isStore*/,
3831 sizeofIRType(hWordTy)
3832 );
3833 }
sewardj1c0ce7a2009-07-01 08:10:49 +00003834 break;
3835 }
3836
sewardjb4112022007-11-09 22:49:28 +00003837 case Ist_Store:
sewardj1c0ce7a2009-07-01 08:10:49 +00003838 /* It seems we pretend that store-conditionals don't
3839 exist, viz, just ignore them ... */
3840 if (st->Ist.Store.resSC == IRTemp_INVALID) {
sewardja0eee322009-07-31 08:46:35 +00003841 if (!inLDSO) {
3842 instrument_mem_access(
3843 bbOut,
3844 st->Ist.Store.addr,
3845 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
3846 True/*isStore*/,
3847 sizeofIRType(hWordTy)
3848 );
3849 }
sewardj1c0ce7a2009-07-01 08:10:49 +00003850 }
njnb83caf22009-05-25 01:47:56 +00003851 break;
sewardjb4112022007-11-09 22:49:28 +00003852
3853 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00003854 /* ... whereas here we don't care whether a load is a
3855 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00003856 IRExpr* data = st->Ist.WrTmp.data;
3857 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00003858 if (!inLDSO) {
3859 instrument_mem_access(
3860 bbOut,
3861 data->Iex.Load.addr,
3862 sizeofIRType(data->Iex.Load.ty),
3863 False/*!isStore*/,
3864 sizeofIRType(hWordTy)
3865 );
3866 }
sewardjb4112022007-11-09 22:49:28 +00003867 }
3868 break;
3869 }
3870
3871 case Ist_Dirty: {
3872 Int dataSize;
3873 IRDirty* d = st->Ist.Dirty.details;
3874 if (d->mFx != Ifx_None) {
3875 /* This dirty helper accesses memory. Collect the
3876 details. */
3877 tl_assert(d->mAddr != NULL);
3878 tl_assert(d->mSize != 0);
3879 dataSize = d->mSize;
3880 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00003881 if (!inLDSO) {
3882 instrument_mem_access(
3883 bbOut, d->mAddr, dataSize, False/*!isStore*/,
3884 sizeofIRType(hWordTy)
3885 );
3886 }
sewardjb4112022007-11-09 22:49:28 +00003887 }
3888 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00003889 if (!inLDSO) {
3890 instrument_mem_access(
3891 bbOut, d->mAddr, dataSize, True/*isStore*/,
3892 sizeofIRType(hWordTy)
3893 );
3894 }
sewardjb4112022007-11-09 22:49:28 +00003895 }
3896 } else {
3897 tl_assert(d->mAddr == NULL);
3898 tl_assert(d->mSize == 0);
3899 }
3900 break;
3901 }
3902
3903 default:
sewardjf98e1c02008-10-25 16:22:41 +00003904 unhandled:
3905 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00003906 tl_assert(0);
3907
3908 } /* switch (st->tag) */
3909
3910 addStmtToIRSB( bbOut, st );
3911 } /* iterate over bbIn->stmts */
3912
3913 return bbOut;
3914}
3915
3916
3917/*----------------------------------------------------------------*/
3918/*--- Client requests ---*/
3919/*----------------------------------------------------------------*/
3920
3921/* Sheesh. Yet another goddam finite map. */
3922static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
3923
3924static void map_pthread_t_to_Thread_INIT ( void ) {
3925 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00003926 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
3927 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00003928 tl_assert(map_pthread_t_to_Thread != NULL);
3929 }
3930}
3931
3932
3933static
3934Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
3935{
3936 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
3937 return False;
3938
3939 /* Anything that gets past the above check is one of ours, so we
3940 should be able to handle it. */
3941
3942 /* default, meaningless return value, unless otherwise set */
3943 *ret = 0;
3944
3945 switch (args[0]) {
3946
3947 /* --- --- User-visible client requests --- --- */
3948
3949 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00003950 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00003951 args[1], args[2]);
3952 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00003953 are any held locks etc in the area. Calling evh__die_mem
3954 and then evh__new_mem is a bit inefficient; probably just
3955 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00003956 if (args[2] > 0) { /* length */
3957 evh__die_mem(args[1], args[2]);
3958 /* and then set it to New */
3959 evh__new_mem(args[1], args[2]);
3960 }
3961 break;
3962
3963 /* --- --- Client requests for Helgrind's use only --- --- */
3964
3965 /* Some thread is telling us its pthread_t value. Record the
3966 binding between that and the associated Thread*, so we can
3967 later find the Thread* again when notified of a join by the
3968 thread. */
3969 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
3970 Thread* my_thr = NULL;
3971 if (0)
3972 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
3973 (void*)args[1]);
3974 map_pthread_t_to_Thread_INIT();
3975 my_thr = map_threads_maybe_lookup( tid );
3976 /* This assertion should hold because the map_threads (tid to
3977 Thread*) binding should have been made at the point of
3978 low-level creation of this thread, which should have
3979 happened prior to us getting this client request for it.
3980 That's because this client request is sent from
3981 client-world from the 'thread_wrapper' function, which
3982 only runs once the thread has been low-level created. */
3983 tl_assert(my_thr != NULL);
3984 /* So now we know that (pthread_t)args[1] is associated with
3985 (Thread*)my_thr. Note that down. */
3986 if (0)
3987 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
3988 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00003989 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00003990 break;
3991 }
3992
3993 case _VG_USERREQ__HG_PTH_API_ERROR: {
3994 Thread* my_thr = NULL;
3995 map_pthread_t_to_Thread_INIT();
3996 my_thr = map_threads_maybe_lookup( tid );
3997 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00003998 HG_(record_error_PthAPIerror)(
3999 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004000 break;
4001 }
4002
4003 /* This thread (tid) has completed a join with the quitting
4004 thread whose pthread_t is in args[1]. */
4005 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4006 Thread* thr_q = NULL; /* quitter Thread* */
4007 Bool found = False;
4008 if (0)
4009 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4010 (void*)args[1]);
4011 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004012 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004013 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004014 /* Can this fail? It would mean that our pthread_join
4015 wrapper observed a successful join on args[1] yet that
4016 thread never existed (or at least, it never lodged an
4017 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4018 sounds like a bug in the threads library. */
4019 // FIXME: get rid of this assertion; handle properly
4020 tl_assert(found);
4021 if (found) {
4022 if (0)
4023 VG_(printf)(".................... quitter Thread* = %p\n",
4024 thr_q);
4025 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4026 }
4027 break;
4028 }
4029
4030 /* EXPOSITION only: by intercepting lock init events we can show
4031 the user where the lock was initialised, rather than only
4032 being able to show where it was first locked. Intercepting
4033 lock initialisations is not necessary for the basic operation
4034 of the race checker. */
4035 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4036 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4037 break;
4038
4039 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4040 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4041 break;
4042
4043 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4044 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4045 break;
4046
4047 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4048 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4049 break;
4050
4051 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4052 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4053 break;
4054
4055 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4056 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4057 break;
4058
4059 /* This thread is about to do pthread_cond_signal on the
4060 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4061 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4062 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4063 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4064 break;
4065
4066 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4067 Returns a flag indicating whether or not the mutex is believed to be
4068 valid for this operation. */
4069 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4070 Bool mutex_is_valid
4071 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4072 (void*)args[2] );
4073 *ret = mutex_is_valid ? 1 : 0;
4074 break;
4075 }
4076
sewardjf98e1c02008-10-25 16:22:41 +00004077 /* cond=arg[1] */
4078 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4079 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4080 break;
4081
sewardjb4112022007-11-09 22:49:28 +00004082 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4083 mutex=arg[2] */
4084 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4085 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4086 (void*)args[1], (void*)args[2] );
4087 break;
4088
4089 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4090 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4091 break;
4092
4093 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4094 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4095 break;
4096
sewardj789c3c52008-02-25 12:10:07 +00004097 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004098 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004099 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4100 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004101 break;
4102
4103 /* rwlock=arg[1], isW=arg[2] */
4104 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4105 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4106 break;
4107
4108 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4109 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4110 break;
4111
4112 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4113 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4114 break;
4115
sewardj11e352f2007-11-30 11:11:02 +00004116 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4117 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004118 break;
4119
sewardj11e352f2007-11-30 11:11:02 +00004120 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4121 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004122 break;
4123
sewardj11e352f2007-11-30 11:11:02 +00004124 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4125 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4126 break;
4127
4128 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4129 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004130 break;
4131
sewardj9f569b72008-11-13 13:33:09 +00004132 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
4133 /* pth_bar_t*, ulong */
4134 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1], args[2] );
4135 break;
4136
4137 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4138 /* pth_bar_t* */
4139 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4140 break;
4141
4142 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4143 /* pth_bar_t* */
4144 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4145 break;
sewardjb4112022007-11-09 22:49:28 +00004146
4147 default:
4148 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004149 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4150 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004151 }
4152
4153 return True;
4154}
4155
4156
4157/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004158/*--- Setup ---*/
4159/*----------------------------------------------------------------*/
4160
4161static Bool hg_process_cmd_line_option ( Char* arg )
4162{
njn83df0b62009-02-25 01:01:05 +00004163 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004164
njn83df0b62009-02-25 01:01:05 +00004165 if VG_BOOL_CLO(arg, "--track-lockorders",
4166 HG_(clo_track_lockorders)) {}
4167 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4168 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004169
4170 else if VG_XACT_CLO(arg, "--history-level=none",
4171 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004172 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004173 HG_(clo_history_level), 1);
4174 else if VG_XACT_CLO(arg, "--history-level=full",
4175 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004176
4177 /* If you change the 10k/10mill limits, remember to also change
4178 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004179 else if VG_BINT_CLO(arg, "--conflict-cache-size",
4180 HG_(clo_conflict_cache_size), 10*1000, 10*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004181
sewardj11e352f2007-11-30 11:11:02 +00004182 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004183 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004184 Int j;
sewardjb4112022007-11-09 22:49:28 +00004185
njn83df0b62009-02-25 01:01:05 +00004186 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004187 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004188 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004189 return False;
4190 }
sewardj11e352f2007-11-30 11:11:02 +00004191 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004192 if ('0' == tmp_str[j]) { /* do nothing */ }
4193 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004194 else {
sewardj11e352f2007-11-30 11:11:02 +00004195 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004196 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004197 return False;
4198 }
4199 }
sewardjf98e1c02008-10-25 16:22:41 +00004200 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004201 }
4202
4203 else
4204 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4205
4206 return True;
4207}
4208
4209static void hg_print_usage ( void )
4210{
4211 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004212" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004213" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004214" full: show both stack traces for a data race (can be very slow)\n"
4215" approx: full trace for one thread, approx for the other (faster)\n"
4216" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004217" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004218 );
sewardjb4112022007-11-09 22:49:28 +00004219}
4220
4221static void hg_print_debug_usage ( void )
4222{
sewardjb4112022007-11-09 22:49:28 +00004223 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4224 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004225 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004226 " at events (X = 0|1) [000000]\n");
4227 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004228 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004229 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004230 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4231 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004232 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004233 VG_(printf)(" 000010 at lock/unlock events\n");
4234 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004235}
4236
4237static void hg_post_clo_init ( void )
4238{
4239}
4240
4241static void hg_fini ( Int exitcode )
4242{
sewardj2d9e8742009-08-07 15:46:56 +00004243 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4244 VG_(message)(Vg_UserMsg,
4245 "For counts of detected and suppressed errors, "
4246 "rerun with: -v\n");
4247 }
4248
4249 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4250 && HG_(clo_history_level) >= 2) {
4251 VG_(umsg)(
4252 "Use --history-level=approx or =none to gain increased speed, at\n" );
4253 VG_(umsg)(
4254 "the cost of reduced accuracy of conflicting-access information\n");
4255 }
4256
sewardjb4112022007-11-09 22:49:28 +00004257 if (SHOW_DATA_STRUCTURES)
4258 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004259 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004260 all__sanity_check("SK_(fini)");
4261
sewardj2d9e8742009-08-07 15:46:56 +00004262 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004263
4264 if (1) {
4265 VG_(printf)("\n");
4266 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4267 VG_(printf)("\n");
4268 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4269 VG_(printf)("\n");
4270 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4271 }
4272
sewardjf98e1c02008-10-25 16:22:41 +00004273 //zz VG_(printf)("\n");
4274 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4275 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4276 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4277 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4278 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4279 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4280 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4281 //zz stats__hbefore_stk_hwm);
4282 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4283 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004284
4285 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004286 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004287 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004288 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004289 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004290 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004291 (Int)HG_(cardinalityWSU)( univ_laog ));
4292
sewardjd52392d2008-11-08 20:36:26 +00004293 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4294 // stats__ga_LL_adds,
4295 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004296
sewardjf98e1c02008-10-25 16:22:41 +00004297 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4298 HG_(stats__LockN_to_P_queries),
4299 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004300
sewardjf98e1c02008-10-25 16:22:41 +00004301 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4302 HG_(stats__string_table_queries),
4303 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004304 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004305 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004306 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004307 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004308 VG_(printf)(" locks: %'8lu acquires, "
4309 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004310 stats__lockN_acquires,
4311 stats__lockN_releases
4312 );
barta0b6b2c2008-07-07 06:49:24 +00004313 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004314
4315 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004316 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004317 }
4318}
4319
sewardjf98e1c02008-10-25 16:22:41 +00004320/* FIXME: move these somewhere sane */
4321
4322static
4323void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4324{
4325 Thread* thr;
4326 ThreadId tid;
4327 UWord nActual;
4328 tl_assert(hbt);
4329 thr = libhb_get_Thr_opaque( hbt );
4330 tl_assert(thr);
4331 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4332 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4333 NULL, NULL, 0 );
4334 tl_assert(nActual <= nRequest);
4335 for (; nActual < nRequest; nActual++)
4336 frames[nActual] = 0;
4337}
4338
4339static
sewardj23f12002009-07-24 08:45:08 +00004340ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004341{
4342 Thread* thr;
4343 ThreadId tid;
4344 ExeContext* ec;
4345 tl_assert(hbt);
4346 thr = libhb_get_Thr_opaque( hbt );
4347 tl_assert(thr);
4348 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004349 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004350 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004351 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004352}
4353
4354
sewardjb4112022007-11-09 22:49:28 +00004355static void hg_pre_clo_init ( void )
4356{
sewardjf98e1c02008-10-25 16:22:41 +00004357 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004358
sewardjb4112022007-11-09 22:49:28 +00004359 VG_(details_name) ("Helgrind");
4360 VG_(details_version) (NULL);
4361 VG_(details_description) ("a thread error detector");
4362 VG_(details_copyright_author)(
njn9f207462009-03-10 22:02:09 +00004363 "Copyright (C) 2007-2009, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004364 VG_(details_bug_reports_to) (VG_BUGS_TO);
4365 VG_(details_avg_translation_sizeB) ( 200 );
4366
4367 VG_(basic_tool_funcs) (hg_post_clo_init,
4368 hg_instrument,
4369 hg_fini);
4370
4371 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004372 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004373 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004374 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004375 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004376 HG_(update_extra),
4377 HG_(recognised_suppression),
4378 HG_(read_extra_suppression_info),
4379 HG_(error_matches_suppression),
4380 HG_(get_error_name),
4381 HG_(print_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004382
sewardj24118492009-07-15 14:50:02 +00004383 VG_(needs_xml_output) ();
4384
sewardjb4112022007-11-09 22:49:28 +00004385 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4386 hg_print_usage,
4387 hg_print_debug_usage);
4388 VG_(needs_client_requests) (hg_handle_client_request);
4389
4390 // FIXME?
4391 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4392 // hg_expensive_sanity_check);
4393
4394 VG_(needs_malloc_replacement) (hg_cli__malloc,
4395 hg_cli____builtin_new,
4396 hg_cli____builtin_vec_new,
4397 hg_cli__memalign,
4398 hg_cli__calloc,
4399 hg_cli__free,
4400 hg_cli____builtin_delete,
4401 hg_cli____builtin_vec_delete,
4402 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004403 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004404 HG_CLI__MALLOC_REDZONE_SZB );
4405
sewardj849b0ed2008-12-21 10:43:10 +00004406 /* 21 Dec 08: disabled this; it mostly causes H to start more
4407 slowly and use significantly more memory, without very often
4408 providing useful results. The user can request to load this
4409 information manually with --read-var-info=yes. */
4410 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004411
4412 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004413 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4414 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004415 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4416 VG_(track_new_mem_stack) ( evh__new_mem );
4417
4418 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004419 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004420
4421 VG_(track_change_mem_mprotect) ( evh__set_perms );
4422
4423 VG_(track_die_mem_stack_signal)( evh__die_mem );
4424 VG_(track_die_mem_brk) ( evh__die_mem );
4425 VG_(track_die_mem_munmap) ( evh__die_mem );
4426 VG_(track_die_mem_stack) ( evh__die_mem );
4427
4428 // FIXME: what is this for?
4429 VG_(track_ban_mem_stack) (NULL);
4430
4431 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4432 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4433 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4434 VG_(track_post_mem_write) (NULL);
4435
4436 /////////////////
4437
4438 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4439 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4440
4441 VG_(track_start_client_code)( evh__start_client_code );
4442 VG_(track_stop_client_code)( evh__stop_client_code );
4443
sewardjf98e1c02008-10-25 16:22:41 +00004444 /////////////////////////////////////////////
4445 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004446 for_libhb__get_EC );
4447 /////////////////////////////////////////////
4448
4449 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004450
4451 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4452 as described in comments at the top of pub_tool_hashtable.h, are
4453 met. Blargh. */
4454 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4455 tl_assert( sizeof(UWord) == sizeof(Addr) );
4456 hg_mallocmeta_table
4457 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4458
sewardjb4112022007-11-09 22:49:28 +00004459}
4460
4461VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4462
4463/*--------------------------------------------------------------------*/
4464/*--- end hg_main.c ---*/
4465/*--------------------------------------------------------------------*/