blob: 6af1a733510351fd6f3bb1ee4d107c9b17a5884f [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardj61bc2c52011-02-09 10:34:00 +000055#include "pub_tool_libcproc.h" // VG_(atfork)
sewardjb4112022007-11-09 22:49:28 +000056
sewardjf98e1c02008-10-25 16:22:41 +000057#include "hg_basics.h"
58#include "hg_wordset.h"
59#include "hg_lock_n_thread.h"
60#include "hg_errors.h"
61
62#include "libhb.h"
63
sewardjb4112022007-11-09 22:49:28 +000064#include "helgrind.h"
65
sewardjf98e1c02008-10-25 16:22:41 +000066
67// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
68
69// FIXME: when client destroys a lock or a CV, remove these
70// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000071
72/*----------------------------------------------------------------*/
73/*--- ---*/
74/*----------------------------------------------------------------*/
75
sewardj11e352f2007-11-30 11:11:02 +000076/* Note this needs to be compiled with -fno-strict-aliasing, since it
77 contains a whole bunch of calls to lookupFM etc which cast between
78 Word and pointer types. gcc rightly complains this breaks ANSI C
79 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
80 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000081*/
sewardjb4112022007-11-09 22:49:28 +000082
sewardjefd3b4d2007-12-02 02:05:23 +000083// FIXME catch sync signals (SEGV, basically) and unlock BHL,
84// if held. Otherwise a LOCK-prefixed insn which segfaults
85// gets Helgrind into a total muddle as the BHL will not be
86// released after the insn.
87
sewardjb4112022007-11-09 22:49:28 +000088// FIXME what is supposed to happen to locks in memory which
89// is relocated as a result of client realloc?
90
sewardjb4112022007-11-09 22:49:28 +000091// FIXME put referencing ThreadId into Thread and get
92// rid of the slow reverse mapping function.
93
94// FIXME accesses to NoAccess areas: change state to Excl?
95
96// FIXME report errors for accesses of NoAccess memory?
97
98// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
99// the thread still holds the lock.
100
101/* ------------ Debug/trace options ------------ */
102
103// this is:
104// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
105// happens_before_wrk: 1000
106// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
107#define SHOW_EXPENSIVE_STUFF 0
108
109// 0 for silent, 1 for some stuff, 2 for lots of stuff
110#define SHOW_EVENTS 0
111
sewardjb4112022007-11-09 22:49:28 +0000112
113static void all__sanity_check ( Char* who ); /* fwds */
114
115#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
116
117// 0 for none, 1 for dump at end of run
118#define SHOW_DATA_STRUCTURES 0
119
120
sewardjb4112022007-11-09 22:49:28 +0000121/* ------------ Misc comments ------------ */
122
123// FIXME: don't hardwire initial entries for root thread.
124// Instead, let the pre_thread_ll_create handler do this.
125
sewardjb4112022007-11-09 22:49:28 +0000126
127/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000128/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000129/*----------------------------------------------------------------*/
130
sewardjb4112022007-11-09 22:49:28 +0000131/* Admin linked list of Threads */
132static Thread* admin_threads = NULL;
133
134/* Admin linked list of Locks */
135static Lock* admin_locks = NULL;
136
sewardjb4112022007-11-09 22:49:28 +0000137/* Mapping table for core ThreadIds to Thread* */
138static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
139
sewardjb4112022007-11-09 22:49:28 +0000140/* Mapping table for lock guest addresses to Lock* */
141static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
142
143/* The word-set universes for thread sets and lock sets. */
144static WordSetU* univ_tsets = NULL; /* sets of Thread* */
145static WordSetU* univ_lsets = NULL; /* sets of Lock* */
146static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
147
148/* never changed; we only care about its address. Is treated as if it
149 was a standard userspace lock. Also we have a Lock* describing it
150 so it can participate in lock sets in the usual way. */
151static Int __bus_lock = 0;
152static Lock* __bus_lock_Lock = NULL;
153
154
155/*----------------------------------------------------------------*/
156/*--- Simple helpers for the data structures ---*/
157/*----------------------------------------------------------------*/
158
159static UWord stats__lockN_acquires = 0;
160static UWord stats__lockN_releases = 0;
161
sewardjf98e1c02008-10-25 16:22:41 +0000162static
163ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000164
165/* --------- Constructors --------- */
166
sewardjf98e1c02008-10-25 16:22:41 +0000167static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000168 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000169 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000170 thread->locksetA = HG_(emptyWS)( univ_lsets );
171 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000172 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000173 thread->hbthr = hbthr;
174 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000175 thread->created_at = NULL;
176 thread->announced = False;
177 thread->errmsg_index = indx++;
178 thread->admin = admin_threads;
179 admin_threads = thread;
180 return thread;
181}
sewardjf98e1c02008-10-25 16:22:41 +0000182
sewardjb4112022007-11-09 22:49:28 +0000183// Make a new lock which is unlocked (hence ownerless)
184static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
185 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000186 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000187 lock->admin = admin_locks;
188 lock->unique = unique++;
189 lock->magic = LockN_MAGIC;
190 lock->appeared_at = NULL;
191 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000192 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000193 lock->guestaddr = guestaddr;
194 lock->kind = kind;
195 lock->heldW = False;
196 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000197 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000198 admin_locks = lock;
199 return lock;
200}
sewardjb4112022007-11-09 22:49:28 +0000201
202/* Release storage for a Lock. Also release storage in .heldBy, if
203 any. */
204static void del_LockN ( Lock* lk )
205{
sewardjf98e1c02008-10-25 16:22:41 +0000206 tl_assert(HG_(is_sane_LockN)(lk));
207 tl_assert(lk->hbso);
208 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000209 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000210 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000211 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000212 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000213}
214
215/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
216 it. This is done strictly: only combinations resulting from
217 correct program and libpthread behaviour are allowed. */
218static void lockN_acquire_writer ( Lock* lk, Thread* thr )
219{
sewardjf98e1c02008-10-25 16:22:41 +0000220 tl_assert(HG_(is_sane_LockN)(lk));
221 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000222
223 stats__lockN_acquires++;
224
225 /* EXPOSITION only */
226 /* We need to keep recording snapshots of where the lock was
227 acquired, so as to produce better lock-order error messages. */
228 if (lk->acquired_at == NULL) {
229 ThreadId tid;
230 tl_assert(lk->heldBy == NULL);
231 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
232 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000233 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000234 } else {
235 tl_assert(lk->heldBy != NULL);
236 }
237 /* end EXPOSITION only */
238
239 switch (lk->kind) {
240 case LK_nonRec:
241 case_LK_nonRec:
242 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
243 tl_assert(!lk->heldW);
244 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000245 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000246 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000247 break;
248 case LK_mbRec:
249 if (lk->heldBy == NULL)
250 goto case_LK_nonRec;
251 /* 2nd and subsequent locking of a lock by its owner */
252 tl_assert(lk->heldW);
253 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000254 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000255 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000256 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
257 == VG_(sizeTotalBag)(lk->heldBy));
258 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000259 break;
260 case LK_rdwr:
261 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
262 goto case_LK_nonRec;
263 default:
264 tl_assert(0);
265 }
sewardjf98e1c02008-10-25 16:22:41 +0000266 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000267}
268
269static void lockN_acquire_reader ( Lock* lk, Thread* thr )
270{
sewardjf98e1c02008-10-25 16:22:41 +0000271 tl_assert(HG_(is_sane_LockN)(lk));
272 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000273 /* can only add reader to a reader-writer lock. */
274 tl_assert(lk->kind == LK_rdwr);
275 /* lk must be free or already r-held. */
276 tl_assert(lk->heldBy == NULL
277 || (lk->heldBy != NULL && !lk->heldW));
278
279 stats__lockN_acquires++;
280
281 /* EXPOSITION only */
282 /* We need to keep recording snapshots of where the lock was
283 acquired, so as to produce better lock-order error messages. */
284 if (lk->acquired_at == NULL) {
285 ThreadId tid;
286 tl_assert(lk->heldBy == NULL);
287 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
288 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000289 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000290 } else {
291 tl_assert(lk->heldBy != NULL);
292 }
293 /* end EXPOSITION only */
294
295 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000296 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000297 } else {
298 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000299 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000300 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000301 }
302 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000303 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000304}
305
306/* Update 'lk' to reflect a release of it by 'thr'. This is done
307 strictly: only combinations resulting from correct program and
308 libpthread behaviour are allowed. */
309
310static void lockN_release ( Lock* lk, Thread* thr )
311{
312 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000313 tl_assert(HG_(is_sane_LockN)(lk));
314 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000315 /* lock must be held by someone */
316 tl_assert(lk->heldBy);
317 stats__lockN_releases++;
318 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000319 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000320 /* thr must actually have been a holder of lk */
321 tl_assert(b);
322 /* normalise */
323 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000324 if (VG_(isEmptyBag)(lk->heldBy)) {
325 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000326 lk->heldBy = NULL;
327 lk->heldW = False;
328 lk->acquired_at = NULL;
329 }
sewardjf98e1c02008-10-25 16:22:41 +0000330 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000331}
332
333static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
334{
335 Thread* thr;
336 if (!lk->heldBy) {
337 tl_assert(!lk->heldW);
338 return;
339 }
340 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000341 VG_(initIterBag)( lk->heldBy );
342 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000343 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000344 tl_assert(HG_(elemWS)( univ_lsets,
345 thr->locksetA, (Word)lk ));
346 thr->locksetA
347 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
348
349 if (lk->heldW) {
350 tl_assert(HG_(elemWS)( univ_lsets,
351 thr->locksetW, (Word)lk ));
352 thr->locksetW
353 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
354 }
355 }
sewardj896f6f92008-08-19 08:38:52 +0000356 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000357}
358
sewardjb4112022007-11-09 22:49:28 +0000359
360/*----------------------------------------------------------------*/
361/*--- Print out the primary data structures ---*/
362/*----------------------------------------------------------------*/
363
sewardjd52392d2008-11-08 20:36:26 +0000364//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000365
366#define PP_THREADS (1<<1)
367#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000368#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000369
370
371static const Int sHOW_ADMIN = 0;
372
373static void space ( Int n )
374{
375 Int i;
376 Char spaces[128+1];
377 tl_assert(n >= 0 && n < 128);
378 if (n == 0)
379 return;
380 for (i = 0; i < n; i++)
381 spaces[i] = ' ';
382 spaces[i] = 0;
383 tl_assert(i < 128+1);
384 VG_(printf)("%s", spaces);
385}
386
387static void pp_Thread ( Int d, Thread* t )
388{
389 space(d+0); VG_(printf)("Thread %p {\n", t);
390 if (sHOW_ADMIN) {
391 space(d+3); VG_(printf)("admin %p\n", t->admin);
392 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
393 }
394 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
395 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000396 space(d+0); VG_(printf)("}\n");
397}
398
399static void pp_admin_threads ( Int d )
400{
401 Int i, n;
402 Thread* t;
403 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
404 /* nothing */
405 }
406 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
407 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
408 if (0) {
409 space(n);
410 VG_(printf)("admin_threads record %d of %d:\n", i, n);
411 }
412 pp_Thread(d+3, t);
413 }
barta0b6b2c2008-07-07 06:49:24 +0000414 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000415}
416
417static void pp_map_threads ( Int d )
418{
njn4c245e52009-03-15 23:25:38 +0000419 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000420 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000421 for (i = 0; i < VG_N_THREADS; i++) {
422 if (map_threads[i] != NULL)
423 n++;
424 }
425 VG_(printf)("(%d entries) {\n", n);
426 for (i = 0; i < VG_N_THREADS; i++) {
427 if (map_threads[i] == NULL)
428 continue;
429 space(d+3);
430 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
431 }
432 space(d); VG_(printf)("}\n");
433}
434
435static const HChar* show_LockKind ( LockKind lkk ) {
436 switch (lkk) {
437 case LK_mbRec: return "mbRec";
438 case LK_nonRec: return "nonRec";
439 case LK_rdwr: return "rdwr";
440 default: tl_assert(0);
441 }
442}
443
444static void pp_Lock ( Int d, Lock* lk )
445{
barta0b6b2c2008-07-07 06:49:24 +0000446 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000447 if (sHOW_ADMIN) {
448 space(d+3); VG_(printf)("admin %p\n", lk->admin);
449 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
450 }
451 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
452 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
453 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
454 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
455 if (lk->heldBy) {
456 Thread* thr;
457 Word count;
458 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000459 VG_(initIterBag)( lk->heldBy );
460 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000461 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000462 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000463 VG_(printf)("}");
464 }
465 VG_(printf)("\n");
466 space(d+0); VG_(printf)("}\n");
467}
468
469static void pp_admin_locks ( Int d )
470{
471 Int i, n;
472 Lock* lk;
473 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
474 /* nothing */
475 }
476 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
477 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
478 if (0) {
479 space(n);
480 VG_(printf)("admin_locks record %d of %d:\n", i, n);
481 }
482 pp_Lock(d+3, lk);
483 }
barta0b6b2c2008-07-07 06:49:24 +0000484 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000485}
486
487static void pp_map_locks ( Int d )
488{
489 void* gla;
490 Lock* lk;
491 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000492 (Int)VG_(sizeFM)( map_locks ));
493 VG_(initIterFM)( map_locks );
494 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000495 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000496 space(d+3);
497 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
498 }
sewardj896f6f92008-08-19 08:38:52 +0000499 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000500 space(d); VG_(printf)("}\n");
501}
502
sewardjb4112022007-11-09 22:49:28 +0000503static void pp_everything ( Int flags, Char* caller )
504{
505 Int d = 0;
506 VG_(printf)("\n");
507 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
508 if (flags & PP_THREADS) {
509 VG_(printf)("\n");
510 pp_admin_threads(d+3);
511 VG_(printf)("\n");
512 pp_map_threads(d+3);
513 }
514 if (flags & PP_LOCKS) {
515 VG_(printf)("\n");
516 pp_admin_locks(d+3);
517 VG_(printf)("\n");
518 pp_map_locks(d+3);
519 }
sewardjb4112022007-11-09 22:49:28 +0000520
521 VG_(printf)("\n");
522 VG_(printf)("}\n");
523 VG_(printf)("\n");
524}
525
526#undef SHOW_ADMIN
527
528
529/*----------------------------------------------------------------*/
530/*--- Initialise the primary data structures ---*/
531/*----------------------------------------------------------------*/
532
sewardjf98e1c02008-10-25 16:22:41 +0000533static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000534{
sewardjb4112022007-11-09 22:49:28 +0000535 Thread* thr;
536
537 /* Get everything initialised and zeroed. */
538 tl_assert(admin_threads == NULL);
539 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000540
541 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000542
543 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000544 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000545 tl_assert(map_threads != NULL);
546
sewardjb4112022007-11-09 22:49:28 +0000547 tl_assert(sizeof(Addr) == sizeof(Word));
548 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000549 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
550 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000551 tl_assert(map_locks != NULL);
552
553 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000554 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000555 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000556
557 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000558 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
559 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000560 tl_assert(univ_tsets != NULL);
561
562 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000563 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
564 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000565 tl_assert(univ_lsets != NULL);
566
567 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000568 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
569 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000570 tl_assert(univ_laog != NULL);
571
572 /* Set up entries for the root thread */
573 // FIXME: this assumes that the first real ThreadId is 1
574
sewardjb4112022007-11-09 22:49:28 +0000575 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000576 thr = mk_Thread(hbthr_root);
577 thr->coretid = 1; /* FIXME: hardwires an assumption about the
578 identity of the root thread. */
579 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
580 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000581
sewardjf98e1c02008-10-25 16:22:41 +0000582 /* and bind it in the thread-map table. */
583 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
584 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000585
sewardjf98e1c02008-10-25 16:22:41 +0000586 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000587
588 tl_assert(VG_INVALID_THREADID == 0);
589
590 /* Mark the new bus lock correctly (to stop the sanity checks
591 complaining) */
592 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000593
594 all__sanity_check("initialise_data_structures");
595}
596
597
598/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000599/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000600/*----------------------------------------------------------------*/
601
602/* Doesn't assert if the relevant map_threads entry is NULL. */
603static Thread* map_threads_maybe_lookup ( ThreadId coretid )
604{
605 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000606 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000607 thr = map_threads[coretid];
608 return thr;
609}
610
611/* Asserts if the relevant map_threads entry is NULL. */
612static inline Thread* map_threads_lookup ( ThreadId coretid )
613{
614 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000615 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000616 thr = map_threads[coretid];
617 tl_assert(thr);
618 return thr;
619}
620
sewardjf98e1c02008-10-25 16:22:41 +0000621/* Do a reverse lookup. Does not assert if 'thr' is not found in
622 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000623static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
624{
sewardjf98e1c02008-10-25 16:22:41 +0000625 ThreadId tid;
626 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000627 /* Check nobody used the invalid-threadid slot */
628 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
629 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000630 tid = thr->coretid;
631 tl_assert(HG_(is_sane_ThreadId)(tid));
632 return tid;
sewardjb4112022007-11-09 22:49:28 +0000633}
634
635/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
636 is not found in map_threads. */
637static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
638{
639 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
640 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000641 tl_assert(map_threads[tid]);
642 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000643 return tid;
644}
645
646static void map_threads_delete ( ThreadId coretid )
647{
648 Thread* thr;
649 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000650 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000651 thr = map_threads[coretid];
652 tl_assert(thr);
653 map_threads[coretid] = NULL;
654}
655
656
657/*----------------------------------------------------------------*/
658/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
659/*----------------------------------------------------------------*/
660
661/* Make sure there is a lock table entry for the given (lock) guest
662 address. If not, create one of the stated 'kind' in unheld state.
663 In any case, return the address of the existing or new Lock. */
664static
665Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
666{
667 Bool found;
668 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000669 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000670 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000671 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000672 if (!found) {
673 Lock* lock = mk_LockN(lkk, ga);
674 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000675 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000676 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000677 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000678 return lock;
679 } else {
680 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000681 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000682 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000683 return oldlock;
684 }
685}
686
687static Lock* map_locks_maybe_lookup ( Addr ga )
688{
689 Bool found;
690 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000691 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000692 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000693 return lk;
694}
695
696static void map_locks_delete ( Addr ga )
697{
698 Addr ga2 = 0;
699 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000700 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000701 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000702 /* delFromFM produces the val which is being deleted, if it is
703 found. So assert it is non-null; that in effect asserts that we
704 are deleting a (ga, Lock) pair which actually exists. */
705 tl_assert(lk != NULL);
706 tl_assert(ga2 == ga);
707}
708
709
sewardjb4112022007-11-09 22:49:28 +0000710
711/*----------------------------------------------------------------*/
712/*--- Sanity checking the data structures ---*/
713/*----------------------------------------------------------------*/
714
715static UWord stats__sanity_checks = 0;
716
sewardjb4112022007-11-09 22:49:28 +0000717static void laog__sanity_check ( Char* who ); /* fwds */
718
719/* REQUIRED INVARIANTS:
720
721 Thread vs Segment/Lock/SecMaps
722
723 for each t in Threads {
724
725 // Thread.lockset: each element is really a valid Lock
726
727 // Thread.lockset: each Lock in set is actually held by that thread
728 for lk in Thread.lockset
729 lk == LockedBy(t)
730
731 // Thread.csegid is a valid SegmentID
732 // and the associated Segment has .thr == t
733
734 }
735
736 all thread Locksets are pairwise empty under intersection
737 (that is, no lock is claimed to be held by more than one thread)
738 -- this is guaranteed if all locks in locksets point back to their
739 owner threads
740
741 Lock vs Thread/Segment/SecMaps
742
743 for each entry (gla, la) in map_locks
744 gla == la->guest_addr
745
746 for each lk in Locks {
747
748 lk->tag is valid
749 lk->guest_addr does not have shadow state NoAccess
750 if lk == LockedBy(t), then t->lockset contains lk
751 if lk == UnlockedBy(segid) then segid is valid SegmentID
752 and can be mapped to a valid Segment(seg)
753 and seg->thr->lockset does not contain lk
754 if lk == UnlockedNew then (no lockset contains lk)
755
756 secmaps for lk has .mbHasLocks == True
757
758 }
759
760 Segment vs Thread/Lock/SecMaps
761
762 the Segment graph is a dag (no cycles)
763 all of the Segment graph must be reachable from the segids
764 mentioned in the Threads
765
766 for seg in Segments {
767
768 seg->thr is a sane Thread
769
770 }
771
772 SecMaps vs Segment/Thread/Lock
773
774 for sm in SecMaps {
775
776 sm properly aligned
777 if any shadow word is ShR or ShM then .mbHasShared == True
778
779 for each Excl(segid) state
780 map_segments_lookup maps to a sane Segment(seg)
781 for each ShM/ShR(tsetid,lsetid) state
782 each lk in lset is a valid Lock
783 each thr in tset is a valid thread, which is non-dead
784
785 }
786*/
787
788
789/* Return True iff 'thr' holds 'lk' in some mode. */
790static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
791{
792 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000793 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000794 else
795 return False;
796}
797
798/* Sanity check Threads, as far as possible */
799__attribute__((noinline))
800static void threads__sanity_check ( Char* who )
801{
802#define BAD(_str) do { how = (_str); goto bad; } while (0)
803 Char* how = "no error";
804 Thread* thr;
805 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000806 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000807 Word ls_size, i;
808 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000809 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000810 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000811 wsA = thr->locksetA;
812 wsW = thr->locksetW;
813 // locks held in W mode are a subset of all locks held
814 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
815 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
816 for (i = 0; i < ls_size; i++) {
817 lk = (Lock*)ls_words[i];
818 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000819 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000820 // Thread.lockset: each Lock in set is actually held by that
821 // thread
822 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000823 }
824 }
825 return;
826 bad:
827 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
828 tl_assert(0);
829#undef BAD
830}
831
832
833/* Sanity check Locks, as far as possible */
834__attribute__((noinline))
835static void locks__sanity_check ( Char* who )
836{
837#define BAD(_str) do { how = (_str); goto bad; } while (0)
838 Char* how = "no error";
839 Addr gla;
840 Lock* lk;
841 Int i;
842 // # entries in admin_locks == # entries in map_locks
843 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
844 ;
sewardj896f6f92008-08-19 08:38:52 +0000845 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000846 // for each entry (gla, lk) in map_locks
847 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000848 VG_(initIterFM)( map_locks );
849 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000850 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000851 if (lk->guestaddr != gla) BAD("2");
852 }
sewardj896f6f92008-08-19 08:38:52 +0000853 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000854 // scan through admin_locks ...
855 for (lk = admin_locks; lk; lk = lk->admin) {
856 // lock is sane. Quite comprehensive, also checks that
857 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000858 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000859 // map_locks binds guest address back to this lock
860 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000861 // look at all threads mentioned as holders of this lock. Ensure
862 // this lock is mentioned in their locksets.
863 if (lk->heldBy) {
864 Thread* thr;
865 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000866 VG_(initIterBag)( lk->heldBy );
867 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000868 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000869 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000870 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000871 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000872 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
873 BAD("6");
874 // also check the w-only lockset
875 if (lk->heldW
876 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
877 BAD("7");
878 if ((!lk->heldW)
879 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
880 BAD("8");
881 }
sewardj896f6f92008-08-19 08:38:52 +0000882 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000883 } else {
884 /* lock not held by anybody */
885 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
886 // since lk is unheld, then (no lockset contains lk)
887 // hmm, this is really too expensive to check. Hmm.
888 }
sewardjb4112022007-11-09 22:49:28 +0000889 }
890
891 return;
892 bad:
893 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
894 tl_assert(0);
895#undef BAD
896}
897
898
sewardjb4112022007-11-09 22:49:28 +0000899static void all_except_Locks__sanity_check ( Char* who ) {
900 stats__sanity_checks++;
901 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
902 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000903 laog__sanity_check(who);
904}
905static void all__sanity_check ( Char* who ) {
906 all_except_Locks__sanity_check(who);
907 locks__sanity_check(who);
908}
909
910
911/*----------------------------------------------------------------*/
912/*--- the core memory state machine (msm__* functions) ---*/
913/*----------------------------------------------------------------*/
914
sewardjd52392d2008-11-08 20:36:26 +0000915//static WordSetID add_BHL ( WordSetID lockset ) {
916// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
917//}
918//static WordSetID del_BHL ( WordSetID lockset ) {
919// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
920//}
sewardjb4112022007-11-09 22:49:28 +0000921
922
sewardjd52392d2008-11-08 20:36:26 +0000923///* Last-lock-lossage records. This mechanism exists to help explain
924// to programmers why we are complaining about a race. The idea is to
925// monitor all lockset transitions. When a previously nonempty
926// lockset becomes empty, the lock(s) that just disappeared (the
927// "lossage") are the locks that have consistently protected the
928// location (ga_of_access) in question for the longest time. Most of
929// the time the lossage-set is a single lock. Because the
930// lossage-lock is the one that has survived longest, there is there
931// is a good chance that it is indeed the lock that the programmer
932// intended to use to protect the location.
933//
934// Note that we cannot in general just look at the lossage set when we
935// see a transition to ShM(...,empty-set), because a transition to an
936// empty lockset can happen arbitrarily far before the point where we
937// want to report an error. This is in the case where there are many
938// transitions ShR -> ShR, all with an empty lockset, and only later
939// is there a transition to ShM. So what we want to do is note the
940// lossage lock at the point where a ShR -> ShR transition empties out
941// the lockset, so we can present it later if there should be a
942// transition to ShM.
943//
944// So this function finds such transitions. For each, it associates
945// in ga_to_lastlock, the guest address and the lossage lock. In fact
946// we do not record the Lock* directly as that may disappear later,
947// but instead the ExeContext inside the Lock which says where it was
948// initialised or first locked. ExeContexts are permanent so keeping
949// them indefinitely is safe.
950//
951// A boring detail: the hardware bus lock is not interesting in this
952// respect, so we first remove that from the pre/post locksets.
953//*/
954//
955//static UWord stats__ga_LL_adds = 0;
956//
957//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
958//
959//static
960//void record_last_lock_lossage ( Addr ga_of_access,
961// WordSetID lset_old, WordSetID lset_new )
962//{
963// Lock* lk;
964// Int card_old, card_new;
965//
966// tl_assert(lset_old != lset_new);
967//
968// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
969// (Int)lset_old,
970// HG_(cardinalityWS)(univ_lsets,lset_old),
971// (Int)lset_new,
972// HG_(cardinalityWS)(univ_lsets,lset_new),
973// ga_of_access );
974//
975// /* This is slow, but at least it's simple. The bus hardware lock
976// just confuses the logic, so remove it from the locksets we're
977// considering before doing anything else. */
978// lset_new = del_BHL( lset_new );
979//
980// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
981// /* The post-transition lock set is not empty. So we are not
982// interested. We're only interested in spotting transitions
983// that make locksets become empty. */
984// return;
985// }
986//
987// /* lset_new is now empty */
988// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
989// tl_assert(card_new == 0);
990//
991// lset_old = del_BHL( lset_old );
992// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
993//
994// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
995// (Int)lset_old, card_old, (Int)lset_new, card_new );
996//
997// if (card_old == 0) {
998// /* The old lockset was also empty. Not interesting. */
999// return;
1000// }
1001//
1002// tl_assert(card_old > 0);
1003// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1004//
1005// /* Now we know we've got a transition from a nonempty lockset to an
1006// empty one. So lset_old must be the set of locks lost. Record
1007// some details. If there is more than one element in the lossage
1008// set, just choose one arbitrarily -- not the best, but at least
1009// it's simple. */
1010//
1011// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1012// if (0) VG_(printf)("lossage %ld %p\n",
1013// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1014// if (lk->appeared_at) {
1015// if (ga_to_lastlock == NULL)
1016// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1017// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1018// stats__ga_LL_adds++;
1019// }
1020//}
1021//
1022///* This queries the table (ga_to_lastlock) made by
1023// record_last_lock_lossage, when constructing error messages. It
1024// attempts to find the ExeContext of the allocation or initialisation
1025// point for the lossage lock associated with 'ga'. */
1026//
1027//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1028//{
1029// ExeContext* ec_hint = NULL;
1030// if (ga_to_lastlock != NULL
1031// && VG_(lookupFM)(ga_to_lastlock,
1032// NULL, (Word*)&ec_hint, ga)) {
1033// tl_assert(ec_hint != NULL);
1034// return ec_hint;
1035// } else {
1036// return NULL;
1037// }
1038//}
sewardjb4112022007-11-09 22:49:28 +00001039
1040
sewardjb4112022007-11-09 22:49:28 +00001041/*----------------------------------------------------------------*/
1042/*--- Shadow value and address range handlers ---*/
1043/*----------------------------------------------------------------*/
1044
1045static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001046//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001047static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001048__attribute__((noinline))
1049static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001050
sewardjb4112022007-11-09 22:49:28 +00001051
1052/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001053/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1054 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1055static void shadow_mem_scopy_range ( Thread* thr,
1056 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001057{
1058 Thr* hbthr = thr->hbthr;
1059 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001060 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001061}
1062
sewardj23f12002009-07-24 08:45:08 +00001063static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1064{
sewardjf98e1c02008-10-25 16:22:41 +00001065 Thr* hbthr = thr->hbthr;
1066 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001067 LIBHB_CREAD_N(hbthr, a, len);
1068}
1069
1070static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1071 Thr* hbthr = thr->hbthr;
1072 tl_assert(hbthr);
1073 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001074}
1075
1076static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1077{
sewardj23f12002009-07-24 08:45:08 +00001078 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001079}
1080
sewardjb4112022007-11-09 22:49:28 +00001081static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1082{
sewardjb4112022007-11-09 22:49:28 +00001083 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001084 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001085 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001086}
1087
sewardj406bac82010-03-03 23:03:40 +00001088static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1089{
1090 if (0 && len > 500)
1091 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1092 libhb_srange_untrack( thr->hbthr, aIN, len );
1093}
1094
sewardjb4112022007-11-09 22:49:28 +00001095
1096/*----------------------------------------------------------------*/
1097/*--- Event handlers (evh__* functions) ---*/
1098/*--- plus helpers (evhH__* functions) ---*/
1099/*----------------------------------------------------------------*/
1100
1101/*--------- Event handler helpers (evhH__* functions) ---------*/
1102
1103/* Create a new segment for 'thr', making it depend (.prev) on its
1104 existing segment, bind together the SegmentID and Segment, and
1105 return both of them. Also update 'thr' so it references the new
1106 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001107//zz static
1108//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1109//zz /*OUT*/Segment** new_segP,
1110//zz Thread* thr )
1111//zz {
1112//zz Segment* cur_seg;
1113//zz tl_assert(new_segP);
1114//zz tl_assert(new_segidP);
1115//zz tl_assert(HG_(is_sane_Thread)(thr));
1116//zz cur_seg = map_segments_lookup( thr->csegid );
1117//zz tl_assert(cur_seg);
1118//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1119//zz at their owner thread. */
1120//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1121//zz *new_segidP = alloc_SegmentID();
1122//zz map_segments_add( *new_segidP, *new_segP );
1123//zz thr->csegid = *new_segidP;
1124//zz }
sewardjb4112022007-11-09 22:49:28 +00001125
1126
1127/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1128 updates, and also do all possible error checks. */
1129static
1130void evhH__post_thread_w_acquires_lock ( Thread* thr,
1131 LockKind lkk, Addr lock_ga )
1132{
1133 Lock* lk;
1134
1135 /* Basically what we need to do is call lockN_acquire_writer.
1136 However, that will barf if any 'invalid' lock states would
1137 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001138 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001139 routine.
1140
1141 Because this routine is only called after successful lock
1142 acquisition, we should not be asked to move the lock into any
1143 invalid states. Requests to do so are bugs in libpthread, since
1144 that should have rejected any such requests. */
1145
sewardjf98e1c02008-10-25 16:22:41 +00001146 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001147 /* Try to find the lock. If we can't, then create a new one with
1148 kind 'lkk'. */
1149 lk = map_locks_lookup_or_create(
1150 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001151 tl_assert( HG_(is_sane_LockN)(lk) );
1152
1153 /* check libhb level entities exist */
1154 tl_assert(thr->hbthr);
1155 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001156
1157 if (lk->heldBy == NULL) {
1158 /* the lock isn't held. Simple. */
1159 tl_assert(!lk->heldW);
1160 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001161 /* acquire a dependency from the lock's VCs */
1162 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001163 goto noerror;
1164 }
1165
1166 /* So the lock is already held. If held as a r-lock then
1167 libpthread must be buggy. */
1168 tl_assert(lk->heldBy);
1169 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001170 HG_(record_error_Misc)(
1171 thr, "Bug in libpthread: write lock "
1172 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001173 goto error;
1174 }
1175
1176 /* So the lock is held in w-mode. If it's held by some other
1177 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001178 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001179
sewardj896f6f92008-08-19 08:38:52 +00001180 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001181 HG_(record_error_Misc)(
1182 thr, "Bug in libpthread: write lock "
1183 "granted on mutex/rwlock which is currently "
1184 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001185 goto error;
1186 }
1187
1188 /* So the lock is already held in w-mode by 'thr'. That means this
1189 is an attempt to lock it recursively, which is only allowable
1190 for LK_mbRec kinded locks. Since this routine is called only
1191 once the lock has been acquired, this must also be a libpthread
1192 bug. */
1193 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001194 HG_(record_error_Misc)(
1195 thr, "Bug in libpthread: recursive write lock "
1196 "granted on mutex/wrlock which does not "
1197 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001198 goto error;
1199 }
1200
1201 /* So we are recursively re-locking a lock we already w-hold. */
1202 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001203 /* acquire a dependency from the lock's VC. Probably pointless,
1204 but also harmless. */
1205 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001206 goto noerror;
1207
1208 noerror:
1209 /* check lock order acquisition graph, and update. This has to
1210 happen before the lock is added to the thread's locksetA/W. */
1211 laog__pre_thread_acquires_lock( thr, lk );
1212 /* update the thread's held-locks set */
1213 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1214 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1215 /* fall through */
1216
1217 error:
sewardjf98e1c02008-10-25 16:22:41 +00001218 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001219}
1220
1221
1222/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1223 updates, and also do all possible error checks. */
1224static
1225void evhH__post_thread_r_acquires_lock ( Thread* thr,
1226 LockKind lkk, Addr lock_ga )
1227{
1228 Lock* lk;
1229
1230 /* Basically what we need to do is call lockN_acquire_reader.
1231 However, that will barf if any 'invalid' lock states would
1232 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001233 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001234 routine.
1235
1236 Because this routine is only called after successful lock
1237 acquisition, we should not be asked to move the lock into any
1238 invalid states. Requests to do so are bugs in libpthread, since
1239 that should have rejected any such requests. */
1240
sewardjf98e1c02008-10-25 16:22:41 +00001241 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001242 /* Try to find the lock. If we can't, then create a new one with
1243 kind 'lkk'. Only a reader-writer lock can be read-locked,
1244 hence the first assertion. */
1245 tl_assert(lkk == LK_rdwr);
1246 lk = map_locks_lookup_or_create(
1247 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001248 tl_assert( HG_(is_sane_LockN)(lk) );
1249
1250 /* check libhb level entities exist */
1251 tl_assert(thr->hbthr);
1252 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001253
1254 if (lk->heldBy == NULL) {
1255 /* the lock isn't held. Simple. */
1256 tl_assert(!lk->heldW);
1257 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001258 /* acquire a dependency from the lock's VC */
1259 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001260 goto noerror;
1261 }
1262
1263 /* So the lock is already held. If held as a w-lock then
1264 libpthread must be buggy. */
1265 tl_assert(lk->heldBy);
1266 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001267 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1268 "granted on rwlock which is "
1269 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001270 goto error;
1271 }
1272
1273 /* Easy enough. In short anybody can get a read-lock on a rwlock
1274 provided it is either unlocked or already in rd-held. */
1275 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001276 /* acquire a dependency from the lock's VC. Probably pointless,
1277 but also harmless. */
1278 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001279 goto noerror;
1280
1281 noerror:
1282 /* check lock order acquisition graph, and update. This has to
1283 happen before the lock is added to the thread's locksetA/W. */
1284 laog__pre_thread_acquires_lock( thr, lk );
1285 /* update the thread's held-locks set */
1286 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1287 /* but don't update thr->locksetW, since lk is only rd-held */
1288 /* fall through */
1289
1290 error:
sewardjf98e1c02008-10-25 16:22:41 +00001291 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001292}
1293
1294
1295/* The lock at 'lock_ga' is just about to be unlocked. Make all
1296 necessary updates, and also do all possible error checks. */
1297static
1298void evhH__pre_thread_releases_lock ( Thread* thr,
1299 Addr lock_ga, Bool isRDWR )
1300{
1301 Lock* lock;
1302 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001303 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001304
1305 /* This routine is called prior to a lock release, before
1306 libpthread has had a chance to validate the call. Hence we need
1307 to detect and reject any attempts to move the lock into an
1308 invalid state. Such attempts are bugs in the client.
1309
1310 isRDWR is True if we know from the wrapper context that lock_ga
1311 should refer to a reader-writer lock, and is False if [ditto]
1312 lock_ga should refer to a standard mutex. */
1313
sewardjf98e1c02008-10-25 16:22:41 +00001314 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001315 lock = map_locks_maybe_lookup( lock_ga );
1316
1317 if (!lock) {
1318 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1319 the client is trying to unlock it. So complain, then ignore
1320 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001321 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001322 return;
1323 }
1324
1325 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001326 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001327
1328 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001329 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1330 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001331 }
1332 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001333 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1334 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001335 }
1336
1337 if (!lock->heldBy) {
1338 /* The lock is not held. This indicates a serious bug in the
1339 client. */
1340 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001341 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001342 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1343 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1344 goto error;
1345 }
1346
sewardjf98e1c02008-10-25 16:22:41 +00001347 /* test just above dominates */
1348 tl_assert(lock->heldBy);
1349 was_heldW = lock->heldW;
1350
sewardjb4112022007-11-09 22:49:28 +00001351 /* The lock is held. Is this thread one of the holders? If not,
1352 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001353 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001354 tl_assert(n >= 0);
1355 if (n == 0) {
1356 /* We are not a current holder of the lock. This is a bug in
1357 the guest, and (per POSIX pthread rules) the unlock
1358 attempt will fail. So just complain and do nothing
1359 else. */
sewardj896f6f92008-08-19 08:38:52 +00001360 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001361 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001362 tl_assert(realOwner != thr);
1363 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1364 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001365 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001366 goto error;
1367 }
1368
1369 /* Ok, we hold the lock 'n' times. */
1370 tl_assert(n >= 1);
1371
1372 lockN_release( lock, thr );
1373
1374 n--;
1375 tl_assert(n >= 0);
1376
1377 if (n > 0) {
1378 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001379 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001380 /* We still hold the lock. So either it's a recursive lock
1381 or a rwlock which is currently r-held. */
1382 tl_assert(lock->kind == LK_mbRec
1383 || (lock->kind == LK_rdwr && !lock->heldW));
1384 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1385 if (lock->heldW)
1386 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1387 else
1388 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1389 } else {
sewardj983f3022009-05-21 14:49:55 +00001390 /* n is zero. This means we don't hold the lock any more. But
1391 if it's a rwlock held in r-mode, someone else could still
1392 hold it. Just do whatever sanity checks we can. */
1393 if (lock->kind == LK_rdwr && lock->heldBy) {
1394 /* It's a rwlock. We no longer hold it but we used to;
1395 nevertheless it still appears to be held by someone else.
1396 The implication is that, prior to this release, it must
1397 have been shared by us and and whoever else is holding it;
1398 which in turn implies it must be r-held, since a lock
1399 can't be w-held by more than one thread. */
1400 /* The lock is now R-held by somebody else: */
1401 tl_assert(lock->heldW == False);
1402 } else {
1403 /* Normal case. It's either not a rwlock, or it's a rwlock
1404 that we used to hold in w-mode (which is pretty much the
1405 same thing as a non-rwlock.) Since this transaction is
1406 atomic (V does not allow multiple threads to run
1407 simultaneously), it must mean the lock is now not held by
1408 anybody. Hence assert for it. */
1409 /* The lock is now not held by anybody: */
1410 tl_assert(!lock->heldBy);
1411 tl_assert(lock->heldW == False);
1412 }
sewardjf98e1c02008-10-25 16:22:41 +00001413 //if (lock->heldBy) {
1414 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1415 //}
sewardjb4112022007-11-09 22:49:28 +00001416 /* update this thread's lockset accordingly. */
1417 thr->locksetA
1418 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1419 thr->locksetW
1420 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001421 /* push our VC into the lock */
1422 tl_assert(thr->hbthr);
1423 tl_assert(lock->hbso);
1424 /* If the lock was previously W-held, then we want to do a
1425 strong send, and if previously R-held, then a weak send. */
1426 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001427 }
1428 /* fall through */
1429
1430 error:
sewardjf98e1c02008-10-25 16:22:41 +00001431 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001432}
1433
1434
sewardj9f569b72008-11-13 13:33:09 +00001435/* ---------------------------------------------------------- */
1436/* -------- Event handlers proper (evh__* functions) -------- */
1437/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001438
1439/* What is the Thread* for the currently running thread? This is
1440 absolutely performance critical. We receive notifications from the
1441 core for client code starts/stops, and cache the looked-up result
1442 in 'current_Thread'. Hence, for the vast majority of requests,
1443 finding the current thread reduces to a read of a global variable,
1444 provided get_current_Thread_in_C_C is inlined.
1445
1446 Outside of client code, current_Thread is NULL, and presumably
1447 any uses of it will cause a segfault. Hence:
1448
1449 - for uses definitely within client code, use
1450 get_current_Thread_in_C_C.
1451
1452 - for all other uses, use get_current_Thread.
1453*/
1454
sewardj23f12002009-07-24 08:45:08 +00001455static Thread *current_Thread = NULL,
1456 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001457
1458static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1459 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1460 tl_assert(current_Thread == NULL);
1461 current_Thread = map_threads_lookup( tid );
1462 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001463 if (current_Thread != current_Thread_prev) {
1464 libhb_Thr_resumes( current_Thread->hbthr );
1465 current_Thread_prev = current_Thread;
1466 }
sewardjb4112022007-11-09 22:49:28 +00001467}
1468static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1469 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1470 tl_assert(current_Thread != NULL);
1471 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001472 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001473}
1474static inline Thread* get_current_Thread_in_C_C ( void ) {
1475 return current_Thread;
1476}
1477static inline Thread* get_current_Thread ( void ) {
1478 ThreadId coretid;
1479 Thread* thr;
1480 thr = get_current_Thread_in_C_C();
1481 if (LIKELY(thr))
1482 return thr;
1483 /* evidently not in client code. Do it the slow way. */
1484 coretid = VG_(get_running_tid)();
1485 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001486 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001487 of initial memory layout) and VG_(get_running_tid)() returns
1488 VG_INVALID_THREADID at that point. */
1489 if (coretid == VG_INVALID_THREADID)
1490 coretid = 1; /* KLUDGE */
1491 thr = map_threads_lookup( coretid );
1492 return thr;
1493}
1494
1495static
1496void evh__new_mem ( Addr a, SizeT len ) {
1497 if (SHOW_EVENTS >= 2)
1498 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1499 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001500 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001501 all__sanity_check("evh__new_mem-post");
1502}
1503
1504static
sewardj1f77fec2010-04-12 19:51:04 +00001505void evh__new_mem_stack ( Addr a, SizeT len ) {
1506 if (SHOW_EVENTS >= 2)
1507 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1508 shadow_mem_make_New( get_current_Thread(),
1509 -VG_STACK_REDZONE_SZB + a, len );
1510 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1511 all__sanity_check("evh__new_mem_stack-post");
1512}
1513
1514static
sewardj7cf4e6b2008-05-01 20:24:26 +00001515void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1516 if (SHOW_EVENTS >= 2)
1517 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1518 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001519 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001520 all__sanity_check("evh__new_mem_w_tid-post");
1521}
1522
1523static
sewardjb4112022007-11-09 22:49:28 +00001524void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001525 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001526 if (SHOW_EVENTS >= 1)
1527 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1528 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1529 if (rr || ww || xx)
1530 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001531 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001532 all__sanity_check("evh__new_mem_w_perms-post");
1533}
1534
1535static
1536void evh__set_perms ( Addr a, SizeT len,
1537 Bool rr, Bool ww, Bool xx ) {
1538 if (SHOW_EVENTS >= 1)
1539 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1540 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1541 /* Hmm. What should we do here, that actually makes any sense?
1542 Let's say: if neither readable nor writable, then declare it
1543 NoAccess, else leave it alone. */
1544 if (!(rr || ww))
1545 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001546 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001547 all__sanity_check("evh__set_perms-post");
1548}
1549
1550static
1551void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001552 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001553 if (SHOW_EVENTS >= 2)
1554 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1555 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001556 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001557 all__sanity_check("evh__die_mem-post");
1558}
1559
1560static
sewardj406bac82010-03-03 23:03:40 +00001561void evh__untrack_mem ( Addr a, SizeT len ) {
1562 // whereas it doesn't ignore this
1563 if (SHOW_EVENTS >= 2)
1564 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1565 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1566 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1567 all__sanity_check("evh__untrack_mem-post");
1568}
1569
1570static
sewardj23f12002009-07-24 08:45:08 +00001571void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1572 if (SHOW_EVENTS >= 2)
1573 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1574 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1575 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1576 all__sanity_check("evh__copy_mem-post");
1577}
1578
1579static
sewardjb4112022007-11-09 22:49:28 +00001580void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1581{
1582 if (SHOW_EVENTS >= 1)
1583 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1584 (Int)parent, (Int)child );
1585
1586 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001587 Thread* thr_p;
1588 Thread* thr_c;
1589 Thr* hbthr_p;
1590 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001591
sewardjf98e1c02008-10-25 16:22:41 +00001592 tl_assert(HG_(is_sane_ThreadId)(parent));
1593 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001594 tl_assert(parent != child);
1595
1596 thr_p = map_threads_maybe_lookup( parent );
1597 thr_c = map_threads_maybe_lookup( child );
1598
1599 tl_assert(thr_p != NULL);
1600 tl_assert(thr_c == NULL);
1601
sewardjf98e1c02008-10-25 16:22:41 +00001602 hbthr_p = thr_p->hbthr;
1603 tl_assert(hbthr_p != NULL);
1604 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001605
sewardjf98e1c02008-10-25 16:22:41 +00001606 hbthr_c = libhb_create ( hbthr_p );
1607
1608 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001609 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001610 thr_c = mk_Thread( hbthr_c );
1611 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1612 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001613
1614 /* and bind it in the thread-map table */
1615 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001616 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1617 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001618
1619 /* Record where the parent is so we can later refer to this in
1620 error messages.
1621
1622 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1623 The stack snapshot is taken immediately after the parent has
1624 returned from its sys_clone call. Unfortunately there is no
1625 unwind info for the insn following "syscall" - reading the
1626 glibc sources confirms this. So we ask for a snapshot to be
1627 taken as if RIP was 3 bytes earlier, in a place where there
1628 is unwind info. Sigh.
1629 */
1630 { Word first_ip_delta = 0;
1631# if defined(VGP_amd64_linux)
1632 first_ip_delta = -3;
1633# endif
1634 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1635 }
sewardjb4112022007-11-09 22:49:28 +00001636 }
1637
sewardjf98e1c02008-10-25 16:22:41 +00001638 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001639 all__sanity_check("evh__pre_thread_create-post");
1640}
1641
1642static
1643void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1644{
1645 Int nHeld;
1646 Thread* thr_q;
1647 if (SHOW_EVENTS >= 1)
1648 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1649 (Int)quit_tid );
1650
1651 /* quit_tid has disappeared without joining to any other thread.
1652 Therefore there is no synchronisation event associated with its
1653 exit and so we have to pretty much treat it as if it was still
1654 alive but mysteriously making no progress. That is because, if
1655 we don't know when it really exited, then we can never say there
1656 is a point in time when we're sure the thread really has
1657 finished, and so we need to consider the possibility that it
1658 lingers indefinitely and continues to interact with other
1659 threads. */
1660 /* However, it might have rendezvous'd with a thread that called
1661 pthread_join with this one as arg, prior to this point (that's
1662 how NPTL works). In which case there has already been a prior
1663 sync event. So in any case, just let the thread exit. On NPTL,
1664 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001665 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001666 thr_q = map_threads_maybe_lookup( quit_tid );
1667 tl_assert(thr_q != NULL);
1668
1669 /* Complain if this thread holds any locks. */
1670 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1671 tl_assert(nHeld >= 0);
1672 if (nHeld > 0) {
1673 HChar buf[80];
1674 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1675 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001676 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001677 }
1678
sewardj23f12002009-07-24 08:45:08 +00001679 /* Not much to do here:
1680 - tell libhb the thread is gone
1681 - clear the map_threads entry, in order that the Valgrind core
1682 can re-use it. */
sewardj61bc2c52011-02-09 10:34:00 +00001683 /* Cleanup actions (next 5 lines) copied in evh__atfork_child; keep
1684 in sync. */
sewardj23f12002009-07-24 08:45:08 +00001685 tl_assert(thr_q->hbthr);
1686 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001687 tl_assert(thr_q->coretid == quit_tid);
1688 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001689 map_threads_delete( quit_tid );
1690
sewardjf98e1c02008-10-25 16:22:41 +00001691 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001692 all__sanity_check("evh__pre_thread_ll_exit-post");
1693}
1694
sewardj61bc2c52011-02-09 10:34:00 +00001695/* This is called immediately after fork, for the child only. 'tid'
1696 is the only surviving thread (as per POSIX rules on fork() in
1697 threaded programs), so we have to clean up map_threads to remove
1698 entries for any other threads. */
1699static
1700void evh__atfork_child ( ThreadId tid )
1701{
1702 UInt i;
1703 Thread* thr;
1704 /* Slot 0 should never be used. */
1705 thr = map_threads_maybe_lookup( 0/*INVALID*/ );
1706 tl_assert(!thr);
1707 /* Clean up all other slots except 'tid'. */
1708 for (i = 1; i < VG_N_THREADS; i++) {
1709 if (i == tid)
1710 continue;
1711 thr = map_threads_maybe_lookup(i);
1712 if (!thr)
1713 continue;
1714 /* Cleanup actions (next 5 lines) copied from end of
1715 evh__pre_thread_ll_exit; keep in sync. */
1716 tl_assert(thr->hbthr);
1717 libhb_async_exit(thr->hbthr);
1718 tl_assert(thr->coretid == i);
1719 thr->coretid = VG_INVALID_THREADID;
1720 map_threads_delete(i);
1721 }
1722}
1723
sewardjf98e1c02008-10-25 16:22:41 +00001724
sewardjb4112022007-11-09 22:49:28 +00001725static
1726void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1727{
sewardjb4112022007-11-09 22:49:28 +00001728 Thread* thr_s;
1729 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001730 Thr* hbthr_s;
1731 Thr* hbthr_q;
1732 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001733
1734 if (SHOW_EVENTS >= 1)
1735 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1736 (Int)stay_tid, quit_thr );
1737
sewardjf98e1c02008-10-25 16:22:41 +00001738 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001739
1740 thr_s = map_threads_maybe_lookup( stay_tid );
1741 thr_q = quit_thr;
1742 tl_assert(thr_s != NULL);
1743 tl_assert(thr_q != NULL);
1744 tl_assert(thr_s != thr_q);
1745
sewardjf98e1c02008-10-25 16:22:41 +00001746 hbthr_s = thr_s->hbthr;
1747 hbthr_q = thr_q->hbthr;
1748 tl_assert(hbthr_s != hbthr_q);
1749 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1750 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001751
sewardjf98e1c02008-10-25 16:22:41 +00001752 /* Allocate a temporary synchronisation object and use it to send
1753 an imaginary message from the quitter to the stayer, the purpose
1754 being to generate a dependence from the quitter to the
1755 stayer. */
1756 so = libhb_so_alloc();
1757 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001758 /* Send last arg of _so_send as False, since the sending thread
1759 doesn't actually exist any more, so we don't want _so_send to
1760 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001761 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1762 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1763 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001764
sewardjf98e1c02008-10-25 16:22:41 +00001765 /* evh__pre_thread_ll_exit issues an error message if the exiting
1766 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001767
1768 /* This holds because, at least when using NPTL as the thread
1769 library, we should be notified the low level thread exit before
1770 we hear of any join event on it. The low level exit
1771 notification feeds through into evh__pre_thread_ll_exit,
1772 which should clear the map_threads entry for it. Hence we
1773 expect there to be no map_threads entry at this point. */
1774 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1775 == VG_INVALID_THREADID);
1776
sewardjf98e1c02008-10-25 16:22:41 +00001777 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001778 all__sanity_check("evh__post_thread_join-post");
1779}
1780
1781static
1782void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1783 Addr a, SizeT size) {
1784 if (SHOW_EVENTS >= 2
1785 || (SHOW_EVENTS >= 1 && size != 1))
1786 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1787 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001788 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001789 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001790 all__sanity_check("evh__pre_mem_read-post");
1791}
1792
1793static
1794void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1795 Char* s, Addr a ) {
1796 Int len;
1797 if (SHOW_EVENTS >= 1)
1798 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1799 (Int)tid, s, (void*)a );
1800 // FIXME: think of a less ugly hack
1801 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001802 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001803 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001804 all__sanity_check("evh__pre_mem_read_asciiz-post");
1805}
1806
1807static
1808void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1809 Addr a, SizeT size ) {
1810 if (SHOW_EVENTS >= 1)
1811 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1812 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001813 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001814 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001815 all__sanity_check("evh__pre_mem_write-post");
1816}
1817
1818static
1819void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1820 if (SHOW_EVENTS >= 1)
1821 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1822 (void*)a, len, (Int)is_inited );
1823 // FIXME: this is kinda stupid
1824 if (is_inited) {
1825 shadow_mem_make_New(get_current_Thread(), a, len);
1826 } else {
1827 shadow_mem_make_New(get_current_Thread(), a, len);
1828 }
sewardjf98e1c02008-10-25 16:22:41 +00001829 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001830 all__sanity_check("evh__pre_mem_read-post");
1831}
1832
1833static
1834void evh__die_mem_heap ( Addr a, SizeT len ) {
1835 if (SHOW_EVENTS >= 1)
1836 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1837 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001838 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001839 all__sanity_check("evh__pre_mem_read-post");
1840}
1841
sewardj23f12002009-07-24 08:45:08 +00001842/* --- Event handlers called from generated code --- */
1843
sewardjb4112022007-11-09 22:49:28 +00001844static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001845void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001846 Thread* thr = get_current_Thread_in_C_C();
1847 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001848 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001849}
sewardjf98e1c02008-10-25 16:22:41 +00001850
sewardjb4112022007-11-09 22:49:28 +00001851static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001852void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001853 Thread* thr = get_current_Thread_in_C_C();
1854 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001855 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001856}
sewardjf98e1c02008-10-25 16:22:41 +00001857
sewardjb4112022007-11-09 22:49:28 +00001858static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001859void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001860 Thread* thr = get_current_Thread_in_C_C();
1861 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001862 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001863}
sewardjf98e1c02008-10-25 16:22:41 +00001864
sewardjb4112022007-11-09 22:49:28 +00001865static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001866void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001867 Thread* thr = get_current_Thread_in_C_C();
1868 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001869 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001870}
sewardjf98e1c02008-10-25 16:22:41 +00001871
sewardjb4112022007-11-09 22:49:28 +00001872static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001873void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001874 Thread* thr = get_current_Thread_in_C_C();
1875 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001876 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001877}
1878
1879static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001880void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001881 Thread* thr = get_current_Thread_in_C_C();
1882 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001883 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001884}
sewardjf98e1c02008-10-25 16:22:41 +00001885
sewardjb4112022007-11-09 22:49:28 +00001886static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001887void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001888 Thread* thr = get_current_Thread_in_C_C();
1889 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001890 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001891}
sewardjf98e1c02008-10-25 16:22:41 +00001892
sewardjb4112022007-11-09 22:49:28 +00001893static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001894void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001895 Thread* thr = get_current_Thread_in_C_C();
1896 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001897 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001898}
sewardjf98e1c02008-10-25 16:22:41 +00001899
sewardjb4112022007-11-09 22:49:28 +00001900static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001901void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001902 Thread* thr = get_current_Thread_in_C_C();
1903 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001904 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001905}
sewardjf98e1c02008-10-25 16:22:41 +00001906
sewardjb4112022007-11-09 22:49:28 +00001907static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001908void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001909 Thread* thr = get_current_Thread_in_C_C();
1910 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001911 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001912}
1913
sewardjb4112022007-11-09 22:49:28 +00001914
sewardj9f569b72008-11-13 13:33:09 +00001915/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001916/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001917/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001918
1919/* EXPOSITION only: by intercepting lock init events we can show the
1920 user where the lock was initialised, rather than only being able to
1921 show where it was first locked. Intercepting lock initialisations
1922 is not necessary for the basic operation of the race checker. */
1923static
1924void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1925 void* mutex, Word mbRec )
1926{
1927 if (SHOW_EVENTS >= 1)
1928 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1929 (Int)tid, mbRec, (void*)mutex );
1930 tl_assert(mbRec == 0 || mbRec == 1);
1931 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1932 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001933 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001934 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1935}
1936
1937static
1938void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1939{
1940 Thread* thr;
1941 Lock* lk;
1942 if (SHOW_EVENTS >= 1)
1943 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1944 (Int)tid, (void*)mutex );
1945
1946 thr = map_threads_maybe_lookup( tid );
1947 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001948 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001949
1950 lk = map_locks_maybe_lookup( (Addr)mutex );
1951
1952 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001953 HG_(record_error_Misc)(
1954 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001955 }
1956
1957 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001958 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001959 tl_assert( lk->guestaddr == (Addr)mutex );
1960 if (lk->heldBy) {
1961 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001962 HG_(record_error_Misc)(
1963 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001964 /* remove lock from locksets of all owning threads */
1965 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001966 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001967 lk->heldBy = NULL;
1968 lk->heldW = False;
1969 lk->acquired_at = NULL;
1970 }
1971 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001972 tl_assert( HG_(is_sane_LockN)(lk) );
1973
sewardj1cbc12f2008-11-10 16:16:46 +00001974 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001975 map_locks_delete( lk->guestaddr );
1976 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001977 }
1978
sewardjf98e1c02008-10-25 16:22:41 +00001979 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001980 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1981}
1982
1983static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1984 void* mutex, Word isTryLock )
1985{
1986 /* Just check the mutex is sane; nothing else to do. */
1987 // 'mutex' may be invalid - not checked by wrapper
1988 Thread* thr;
1989 Lock* lk;
1990 if (SHOW_EVENTS >= 1)
1991 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1992 (Int)tid, (void*)mutex );
1993
1994 tl_assert(isTryLock == 0 || isTryLock == 1);
1995 thr = map_threads_maybe_lookup( tid );
1996 tl_assert(thr); /* cannot fail - Thread* must already exist */
1997
1998 lk = map_locks_maybe_lookup( (Addr)mutex );
1999
2000 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00002001 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
2002 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002003 }
2004
2005 if ( lk
2006 && isTryLock == 0
2007 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
2008 && lk->heldBy
2009 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00002010 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00002011 /* uh, it's a non-recursive lock and we already w-hold it, and
2012 this is a real lock operation (not a speculative "tryLock"
2013 kind of thing). Duh. Deadlock coming up; but at least
2014 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00002015 HChar* errstr = "Attempt to re-lock a "
2016 "non-recursive lock I already hold";
2017 HChar* auxstr = "Lock was previously acquired";
2018 if (lk->acquired_at) {
2019 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
2020 } else {
2021 HG_(record_error_Misc)( thr, errstr );
2022 }
sewardjb4112022007-11-09 22:49:28 +00002023 }
2024}
2025
2026static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
2027{
2028 // only called if the real library call succeeded - so mutex is sane
2029 Thread* thr;
2030 if (SHOW_EVENTS >= 1)
2031 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2032 (Int)tid, (void*)mutex );
2033
2034 thr = map_threads_maybe_lookup( tid );
2035 tl_assert(thr); /* cannot fail - Thread* must already exist */
2036
2037 evhH__post_thread_w_acquires_lock(
2038 thr,
2039 LK_mbRec, /* if not known, create new lock with this LockKind */
2040 (Addr)mutex
2041 );
2042}
2043
2044static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2045{
2046 // 'mutex' may be invalid - not checked by wrapper
2047 Thread* thr;
2048 if (SHOW_EVENTS >= 1)
2049 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2050 (Int)tid, (void*)mutex );
2051
2052 thr = map_threads_maybe_lookup( tid );
2053 tl_assert(thr); /* cannot fail - Thread* must already exist */
2054
2055 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2056}
2057
2058static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2059{
2060 // only called if the real library call succeeded - so mutex is sane
2061 Thread* thr;
2062 if (SHOW_EVENTS >= 1)
2063 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2064 (Int)tid, (void*)mutex );
2065 thr = map_threads_maybe_lookup( tid );
2066 tl_assert(thr); /* cannot fail - Thread* must already exist */
2067
2068 // anything we should do here?
2069}
2070
2071
sewardj5a644da2009-08-11 10:35:58 +00002072/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002073/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002074/* ------------------------------------------------------- */
2075
2076/* All a bit of a kludge. Pretend we're really dealing with ordinary
2077 pthread_mutex_t's instead, for the most part. */
2078
2079static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2080 void* slock )
2081{
2082 Thread* thr;
2083 Lock* lk;
2084 /* In glibc's kludgey world, we're either initialising or unlocking
2085 it. Since this is the pre-routine, if it is locked, unlock it
2086 and take a dependence edge. Otherwise, do nothing. */
2087
2088 if (SHOW_EVENTS >= 1)
2089 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2090 "(ctid=%d, slock=%p)\n",
2091 (Int)tid, (void*)slock );
2092
2093 thr = map_threads_maybe_lookup( tid );
2094 /* cannot fail - Thread* must already exist */;
2095 tl_assert( HG_(is_sane_Thread)(thr) );
2096
2097 lk = map_locks_maybe_lookup( (Addr)slock );
2098 if (lk && lk->heldBy) {
2099 /* it's held. So do the normal pre-unlock actions, as copied
2100 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2101 duplicates the map_locks_maybe_lookup. */
2102 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2103 False/*!isRDWR*/ );
2104 }
2105}
2106
2107static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2108 void* slock )
2109{
2110 Lock* lk;
2111 /* More kludgery. If the lock has never been seen before, do
2112 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2113 nothing. */
2114
2115 if (SHOW_EVENTS >= 1)
2116 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2117 "(ctid=%d, slock=%p)\n",
2118 (Int)tid, (void*)slock );
2119
2120 lk = map_locks_maybe_lookup( (Addr)slock );
2121 if (!lk) {
2122 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2123 }
2124}
2125
2126static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2127 void* slock, Word isTryLock )
2128{
2129 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2130}
2131
2132static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2133 void* slock )
2134{
2135 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2136}
2137
2138static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2139 void* slock )
2140{
2141 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2142}
2143
2144
sewardj9f569b72008-11-13 13:33:09 +00002145/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002146/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002147/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002148
sewardj02114542009-07-28 20:52:36 +00002149/* A mapping from CV to (the SO associated with it, plus some
2150 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002151 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2152 wait on it completes, we do a 'recv' from the SO. This is believed
2153 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002154 signallings/broadcasts.
2155*/
2156
sewardj02114542009-07-28 20:52:36 +00002157/* .so is the SO for this CV.
2158 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002159
sewardj02114542009-07-28 20:52:36 +00002160 POSIX says effectively that the first pthread_cond_{timed}wait call
2161 causes a dynamic binding between the CV and the mutex, and that
2162 lasts until such time as the waiter count falls to zero. Hence
2163 need to keep track of the number of waiters in order to do
2164 consistency tracking. */
2165typedef
2166 struct {
2167 SO* so; /* libhb-allocated SO */
2168 void* mx_ga; /* addr of associated mutex, if any */
2169 UWord nWaiters; /* # threads waiting on the CV */
2170 }
2171 CVInfo;
2172
2173
2174/* pthread_cond_t* -> CVInfo* */
2175static WordFM* map_cond_to_CVInfo = NULL;
2176
2177static void map_cond_to_CVInfo_INIT ( void ) {
2178 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2179 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2180 "hg.mctCI.1", HG_(free), NULL );
2181 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002182 }
2183}
2184
sewardj02114542009-07-28 20:52:36 +00002185static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002186 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002187 map_cond_to_CVInfo_INIT();
2188 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002189 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002190 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002191 } else {
sewardj02114542009-07-28 20:52:36 +00002192 SO* so = libhb_so_alloc();
2193 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2194 cvi->so = so;
2195 cvi->mx_ga = 0;
2196 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2197 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002198 }
2199}
2200
sewardj02114542009-07-28 20:52:36 +00002201static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002202 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002203 map_cond_to_CVInfo_INIT();
2204 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2205 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002206 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002207 tl_assert(cvi);
2208 tl_assert(cvi->so);
2209 libhb_so_dealloc(cvi->so);
2210 cvi->mx_ga = 0;
2211 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002212 }
2213}
2214
2215static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2216{
sewardjf98e1c02008-10-25 16:22:41 +00002217 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2218 cond to a SO if it is not already so bound, and 'send' on the
2219 SO. This is later used by other thread(s) which successfully
2220 exit from a pthread_cond_wait on the same cv; then they 'recv'
2221 from the SO, thereby acquiring a dependency on this signalling
2222 event. */
sewardjb4112022007-11-09 22:49:28 +00002223 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002224 CVInfo* cvi;
2225 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002226
2227 if (SHOW_EVENTS >= 1)
2228 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2229 (Int)tid, (void*)cond );
2230
sewardjb4112022007-11-09 22:49:28 +00002231 thr = map_threads_maybe_lookup( tid );
2232 tl_assert(thr); /* cannot fail - Thread* must already exist */
2233
sewardj02114542009-07-28 20:52:36 +00002234 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2235 tl_assert(cvi);
2236 tl_assert(cvi->so);
2237
sewardjb4112022007-11-09 22:49:28 +00002238 // error-if: mutex is bogus
2239 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002240 // Hmm. POSIX doesn't actually say that it's an error to call
2241 // pthread_cond_signal with the associated mutex being unlocked.
2242 // Although it does say that it should be "if consistent scheduling
2243 // is desired."
2244 //
2245 // For the moment, disable these checks.
2246 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2247 //if (lk == NULL || cvi->mx_ga == 0) {
2248 // HG_(record_error_Misc)( thr,
2249 // "pthread_cond_{signal,broadcast}: "
2250 // "no or invalid mutex associated with cond");
2251 //}
2252 ///* note: lk could be NULL. Be careful. */
2253 //if (lk) {
2254 // if (lk->kind == LK_rdwr) {
2255 // HG_(record_error_Misc)(thr,
2256 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2257 // }
2258 // if (lk->heldBy == NULL) {
2259 // HG_(record_error_Misc)(thr,
2260 // "pthread_cond_{signal,broadcast}: "
2261 // "associated lock is not held by any thread");
2262 // }
2263 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2264 // HG_(record_error_Misc)(thr,
2265 // "pthread_cond_{signal,broadcast}: "
2266 // "associated lock is not held by calling thread");
2267 // }
2268 //}
sewardjb4112022007-11-09 22:49:28 +00002269
sewardj02114542009-07-28 20:52:36 +00002270 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002271}
2272
2273/* returns True if it reckons 'mutex' is valid and held by this
2274 thread, else False */
2275static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2276 void* cond, void* mutex )
2277{
2278 Thread* thr;
2279 Lock* lk;
2280 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002281 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002282
2283 if (SHOW_EVENTS >= 1)
2284 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2285 "(ctid=%d, cond=%p, mutex=%p)\n",
2286 (Int)tid, (void*)cond, (void*)mutex );
2287
sewardjb4112022007-11-09 22:49:28 +00002288 thr = map_threads_maybe_lookup( tid );
2289 tl_assert(thr); /* cannot fail - Thread* must already exist */
2290
2291 lk = map_locks_maybe_lookup( (Addr)mutex );
2292
2293 /* Check for stupid mutex arguments. There are various ways to be
2294 a bozo. Only complain once, though, even if more than one thing
2295 is wrong. */
2296 if (lk == NULL) {
2297 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002298 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002299 thr,
2300 "pthread_cond_{timed}wait called with invalid mutex" );
2301 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002302 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002303 if (lk->kind == LK_rdwr) {
2304 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002305 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002306 thr, "pthread_cond_{timed}wait called with mutex "
2307 "of type pthread_rwlock_t*" );
2308 } else
2309 if (lk->heldBy == NULL) {
2310 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002311 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002312 thr, "pthread_cond_{timed}wait called with un-held mutex");
2313 } else
2314 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002315 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002316 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002317 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002318 thr, "pthread_cond_{timed}wait called with mutex "
2319 "held by a different thread" );
2320 }
2321 }
2322
2323 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002324 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2325 tl_assert(cvi);
2326 tl_assert(cvi->so);
2327 if (cvi->nWaiters == 0) {
2328 /* form initial (CV,MX) binding */
2329 cvi->mx_ga = mutex;
2330 }
2331 else /* check existing (CV,MX) binding */
2332 if (cvi->mx_ga != mutex) {
2333 HG_(record_error_Misc)(
2334 thr, "pthread_cond_{timed}wait: cond is associated "
2335 "with a different mutex");
2336 }
2337 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002338
2339 return lk_valid;
2340}
2341
2342static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2343 void* cond, void* mutex )
2344{
sewardjf98e1c02008-10-25 16:22:41 +00002345 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2346 the SO for this cond, and 'recv' from it so as to acquire a
2347 dependency edge back to the signaller/broadcaster. */
2348 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002349 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002350
2351 if (SHOW_EVENTS >= 1)
2352 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2353 "(ctid=%d, cond=%p, mutex=%p)\n",
2354 (Int)tid, (void*)cond, (void*)mutex );
2355
sewardjb4112022007-11-09 22:49:28 +00002356 thr = map_threads_maybe_lookup( tid );
2357 tl_assert(thr); /* cannot fail - Thread* must already exist */
2358
2359 // error-if: cond is also associated with a different mutex
2360
sewardj02114542009-07-28 20:52:36 +00002361 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2362 tl_assert(cvi);
2363 tl_assert(cvi->so);
2364 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002365
sewardj02114542009-07-28 20:52:36 +00002366 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002367 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2368 it? If this happened it would surely be a bug in the threads
2369 library. Or one of those fabled "spurious wakeups". */
2370 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2371 "succeeded on"
2372 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002373 }
sewardjf98e1c02008-10-25 16:22:41 +00002374
2375 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002376 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2377
2378 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002379}
2380
2381static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2382 void* cond )
2383{
2384 /* Deal with destroy events. The only purpose is to free storage
2385 associated with the CV, so as to avoid any possible resource
2386 leaks. */
2387 if (SHOW_EVENTS >= 1)
2388 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2389 "(ctid=%d, cond=%p)\n",
2390 (Int)tid, (void*)cond );
2391
sewardj02114542009-07-28 20:52:36 +00002392 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002393}
2394
2395
sewardj9f569b72008-11-13 13:33:09 +00002396/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002397/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002398/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002399
2400/* EXPOSITION only */
2401static
2402void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2403{
2404 if (SHOW_EVENTS >= 1)
2405 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2406 (Int)tid, (void*)rwl );
2407 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002408 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002409 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2410}
2411
2412static
2413void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2414{
2415 Thread* thr;
2416 Lock* lk;
2417 if (SHOW_EVENTS >= 1)
2418 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2419 (Int)tid, (void*)rwl );
2420
2421 thr = map_threads_maybe_lookup( tid );
2422 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002423 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002424
2425 lk = map_locks_maybe_lookup( (Addr)rwl );
2426
2427 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002428 HG_(record_error_Misc)(
2429 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002430 }
2431
2432 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002433 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002434 tl_assert( lk->guestaddr == (Addr)rwl );
2435 if (lk->heldBy) {
2436 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002437 HG_(record_error_Misc)(
2438 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002439 /* remove lock from locksets of all owning threads */
2440 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002441 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002442 lk->heldBy = NULL;
2443 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002444 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002445 }
2446 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002447 tl_assert( HG_(is_sane_LockN)(lk) );
2448
sewardj1cbc12f2008-11-10 16:16:46 +00002449 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002450 map_locks_delete( lk->guestaddr );
2451 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002452 }
2453
sewardjf98e1c02008-10-25 16:22:41 +00002454 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002455 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2456}
2457
2458static
sewardj789c3c52008-02-25 12:10:07 +00002459void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2460 void* rwl,
2461 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002462{
2463 /* Just check the rwl is sane; nothing else to do. */
2464 // 'rwl' may be invalid - not checked by wrapper
2465 Thread* thr;
2466 Lock* lk;
2467 if (SHOW_EVENTS >= 1)
2468 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2469 (Int)tid, (Int)isW, (void*)rwl );
2470
2471 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002472 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002473 thr = map_threads_maybe_lookup( tid );
2474 tl_assert(thr); /* cannot fail - Thread* must already exist */
2475
2476 lk = map_locks_maybe_lookup( (Addr)rwl );
2477 if ( lk
2478 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2479 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002480 HG_(record_error_Misc)(
2481 thr, "pthread_rwlock_{rd,rw}lock with a "
2482 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002483 }
2484}
2485
2486static
2487void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2488{
2489 // only called if the real library call succeeded - so mutex is sane
2490 Thread* thr;
2491 if (SHOW_EVENTS >= 1)
2492 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2493 (Int)tid, (Int)isW, (void*)rwl );
2494
2495 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2496 thr = map_threads_maybe_lookup( tid );
2497 tl_assert(thr); /* cannot fail - Thread* must already exist */
2498
2499 (isW ? evhH__post_thread_w_acquires_lock
2500 : evhH__post_thread_r_acquires_lock)(
2501 thr,
2502 LK_rdwr, /* if not known, create new lock with this LockKind */
2503 (Addr)rwl
2504 );
2505}
2506
2507static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2508{
2509 // 'rwl' may be invalid - not checked by wrapper
2510 Thread* thr;
2511 if (SHOW_EVENTS >= 1)
2512 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2513 (Int)tid, (void*)rwl );
2514
2515 thr = map_threads_maybe_lookup( tid );
2516 tl_assert(thr); /* cannot fail - Thread* must already exist */
2517
2518 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2519}
2520
2521static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2522{
2523 // only called if the real library call succeeded - so mutex is sane
2524 Thread* thr;
2525 if (SHOW_EVENTS >= 1)
2526 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2527 (Int)tid, (void*)rwl );
2528 thr = map_threads_maybe_lookup( tid );
2529 tl_assert(thr); /* cannot fail - Thread* must already exist */
2530
2531 // anything we should do here?
2532}
2533
2534
sewardj9f569b72008-11-13 13:33:09 +00002535/* ---------------------------------------------------------- */
2536/* -------------- events to do with semaphores -------------- */
2537/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002538
sewardj11e352f2007-11-30 11:11:02 +00002539/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002540 variables. */
2541
sewardjf98e1c02008-10-25 16:22:41 +00002542/* For each semaphore, we maintain a stack of SOs. When a 'post'
2543 operation is done on a semaphore (unlocking, essentially), a new SO
2544 is created for the posting thread, the posting thread does a strong
2545 send to it (which merely installs the posting thread's VC in the
2546 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002547
2548 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002549 semaphore, we pop a SO off the semaphore's stack (which should be
2550 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002551 dependencies between posters and waiters of the semaphore.
2552
sewardjf98e1c02008-10-25 16:22:41 +00002553 It may not be necessary to use a stack - perhaps a bag of SOs would
2554 do. But we do need to keep track of how many unused-up posts have
2555 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002556
sewardjf98e1c02008-10-25 16:22:41 +00002557 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002558 twice on S. T3 cannot complete its waits without both T1 and T2
2559 posting. The above mechanism will ensure that T3 acquires
2560 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002561
sewardjf98e1c02008-10-25 16:22:41 +00002562 When a semaphore is initialised with value N, we do as if we'd
2563 posted N times on the semaphore: basically create N SOs and do a
2564 strong send to all of then. This allows up to N waits on the
2565 semaphore to acquire a dependency on the initialisation point,
2566 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002567
2568 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2569 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002570*/
2571
sewardjf98e1c02008-10-25 16:22:41 +00002572/* sem_t* -> XArray* SO* */
2573static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002574
sewardjf98e1c02008-10-25 16:22:41 +00002575static void map_sem_to_SO_stack_INIT ( void ) {
2576 if (map_sem_to_SO_stack == NULL) {
2577 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2578 HG_(free), NULL );
2579 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002580 }
2581}
2582
sewardjf98e1c02008-10-25 16:22:41 +00002583static void push_SO_for_sem ( void* sem, SO* so ) {
2584 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002585 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002586 tl_assert(so);
2587 map_sem_to_SO_stack_INIT();
2588 if (VG_(lookupFM)( map_sem_to_SO_stack,
2589 &keyW, (UWord*)&xa, (UWord)sem )) {
2590 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002591 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002592 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002593 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002594 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2595 VG_(addToXA)( xa, &so );
2596 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002597 }
2598}
2599
sewardjf98e1c02008-10-25 16:22:41 +00002600static SO* mb_pop_SO_for_sem ( void* sem ) {
2601 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002602 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002603 SO* so;
2604 map_sem_to_SO_stack_INIT();
2605 if (VG_(lookupFM)( map_sem_to_SO_stack,
2606 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002607 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002608 Word sz;
2609 tl_assert(keyW == (UWord)sem);
2610 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002611 tl_assert(sz >= 0);
2612 if (sz == 0)
2613 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002614 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2615 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002616 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002617 return so;
sewardjb4112022007-11-09 22:49:28 +00002618 } else {
2619 /* hmm, that's odd. No stack for this semaphore. */
2620 return NULL;
2621 }
2622}
2623
sewardj11e352f2007-11-30 11:11:02 +00002624static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002625{
sewardjf98e1c02008-10-25 16:22:41 +00002626 UWord keyW, valW;
2627 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002628
sewardjb4112022007-11-09 22:49:28 +00002629 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002630 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002631 (Int)tid, (void*)sem );
2632
sewardjf98e1c02008-10-25 16:22:41 +00002633 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002634
sewardjf98e1c02008-10-25 16:22:41 +00002635 /* Empty out the semaphore's SO stack. This way of doing it is
2636 stupid, but at least it's easy. */
2637 while (1) {
2638 so = mb_pop_SO_for_sem( sem );
2639 if (!so) break;
2640 libhb_so_dealloc(so);
2641 }
2642
2643 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2644 XArray* xa = (XArray*)valW;
2645 tl_assert(keyW == (UWord)sem);
2646 tl_assert(xa);
2647 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2648 VG_(deleteXA)(xa);
2649 }
sewardjb4112022007-11-09 22:49:28 +00002650}
2651
sewardj11e352f2007-11-30 11:11:02 +00002652static
2653void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2654{
sewardjf98e1c02008-10-25 16:22:41 +00002655 SO* so;
2656 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002657
2658 if (SHOW_EVENTS >= 1)
2659 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2660 (Int)tid, (void*)sem, value );
2661
sewardjf98e1c02008-10-25 16:22:41 +00002662 thr = map_threads_maybe_lookup( tid );
2663 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002664
sewardjf98e1c02008-10-25 16:22:41 +00002665 /* Empty out the semaphore's SO stack. This way of doing it is
2666 stupid, but at least it's easy. */
2667 while (1) {
2668 so = mb_pop_SO_for_sem( sem );
2669 if (!so) break;
2670 libhb_so_dealloc(so);
2671 }
sewardj11e352f2007-11-30 11:11:02 +00002672
sewardjf98e1c02008-10-25 16:22:41 +00002673 /* If we don't do this check, the following while loop runs us out
2674 of memory for stupid initial values of 'value'. */
2675 if (value > 10000) {
2676 HG_(record_error_Misc)(
2677 thr, "sem_init: initial value exceeds 10000; using 10000" );
2678 value = 10000;
2679 }
sewardj11e352f2007-11-30 11:11:02 +00002680
sewardjf98e1c02008-10-25 16:22:41 +00002681 /* Now create 'valid' new SOs for the thread, do a strong send to
2682 each of them, and push them all on the stack. */
2683 for (; value > 0; value--) {
2684 Thr* hbthr = thr->hbthr;
2685 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002686
sewardjf98e1c02008-10-25 16:22:41 +00002687 so = libhb_so_alloc();
2688 libhb_so_send( hbthr, so, True/*strong send*/ );
2689 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002690 }
2691}
2692
2693static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002694{
sewardjf98e1c02008-10-25 16:22:41 +00002695 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2696 it (iow, write our VC into it, then tick ours), and push the SO
2697 on on a stack of SOs associated with 'sem'. This is later used
2698 by other thread(s) which successfully exit from a sem_wait on
2699 the same sem; by doing a strong recv from SOs popped of the
2700 stack, they acquire dependencies on the posting thread
2701 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002702
sewardjf98e1c02008-10-25 16:22:41 +00002703 Thread* thr;
2704 SO* so;
2705 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002706
2707 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002708 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002709 (Int)tid, (void*)sem );
2710
2711 thr = map_threads_maybe_lookup( tid );
2712 tl_assert(thr); /* cannot fail - Thread* must already exist */
2713
2714 // error-if: sem is bogus
2715
sewardjf98e1c02008-10-25 16:22:41 +00002716 hbthr = thr->hbthr;
2717 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002718
sewardjf98e1c02008-10-25 16:22:41 +00002719 so = libhb_so_alloc();
2720 libhb_so_send( hbthr, so, True/*strong send*/ );
2721 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002722}
2723
sewardj11e352f2007-11-30 11:11:02 +00002724static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002725{
sewardjf98e1c02008-10-25 16:22:41 +00002726 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2727 the 'sem' from this semaphore's SO-stack, and do a strong recv
2728 from it. This creates a dependency back to one of the post-ers
2729 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002730
sewardjf98e1c02008-10-25 16:22:41 +00002731 Thread* thr;
2732 SO* so;
2733 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002734
2735 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002736 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002737 (Int)tid, (void*)sem );
2738
2739 thr = map_threads_maybe_lookup( tid );
2740 tl_assert(thr); /* cannot fail - Thread* must already exist */
2741
2742 // error-if: sem is bogus
2743
sewardjf98e1c02008-10-25 16:22:41 +00002744 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002745
sewardjf98e1c02008-10-25 16:22:41 +00002746 if (so) {
2747 hbthr = thr->hbthr;
2748 tl_assert(hbthr);
2749
2750 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2751 libhb_so_dealloc(so);
2752 } else {
2753 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2754 If this happened it would surely be a bug in the threads
2755 library. */
2756 HG_(record_error_Misc)(
2757 thr, "Bug in libpthread: sem_wait succeeded on"
2758 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002759 }
2760}
2761
2762
sewardj9f569b72008-11-13 13:33:09 +00002763/* -------------------------------------------------------- */
2764/* -------------- events to do with barriers -------------- */
2765/* -------------------------------------------------------- */
2766
2767typedef
2768 struct {
2769 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002770 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002771 UWord size; /* declared size */
2772 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2773 }
2774 Bar;
2775
2776static Bar* new_Bar ( void ) {
2777 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2778 tl_assert(bar);
2779 /* all fields are zero */
2780 tl_assert(bar->initted == False);
2781 return bar;
2782}
2783
2784static void delete_Bar ( Bar* bar ) {
2785 tl_assert(bar);
2786 if (bar->waiting)
2787 VG_(deleteXA)(bar->waiting);
2788 HG_(free)(bar);
2789}
2790
2791/* A mapping which stores auxiliary data for barriers. */
2792
2793/* pthread_barrier_t* -> Bar* */
2794static WordFM* map_barrier_to_Bar = NULL;
2795
2796static void map_barrier_to_Bar_INIT ( void ) {
2797 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2798 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2799 "hg.mbtBI.1", HG_(free), NULL );
2800 tl_assert(map_barrier_to_Bar != NULL);
2801 }
2802}
2803
2804static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2805 UWord key, val;
2806 map_barrier_to_Bar_INIT();
2807 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2808 tl_assert(key == (UWord)barrier);
2809 return (Bar*)val;
2810 } else {
2811 Bar* bar = new_Bar();
2812 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2813 return bar;
2814 }
2815}
2816
2817static void map_barrier_to_Bar_delete ( void* barrier ) {
2818 UWord keyW, valW;
2819 map_barrier_to_Bar_INIT();
2820 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2821 Bar* bar = (Bar*)valW;
2822 tl_assert(keyW == (UWord)barrier);
2823 delete_Bar(bar);
2824 }
2825}
2826
2827
2828static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2829 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002830 UWord count,
2831 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002832{
2833 Thread* thr;
2834 Bar* bar;
2835
2836 if (SHOW_EVENTS >= 1)
2837 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002838 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2839 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002840
2841 thr = map_threads_maybe_lookup( tid );
2842 tl_assert(thr); /* cannot fail - Thread* must already exist */
2843
2844 if (count == 0) {
2845 HG_(record_error_Misc)(
2846 thr, "pthread_barrier_init: 'count' argument is zero"
2847 );
2848 }
2849
sewardj406bac82010-03-03 23:03:40 +00002850 if (resizable != 0 && resizable != 1) {
2851 HG_(record_error_Misc)(
2852 thr, "pthread_barrier_init: invalid 'resizable' argument"
2853 );
2854 }
2855
sewardj9f569b72008-11-13 13:33:09 +00002856 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2857 tl_assert(bar);
2858
2859 if (bar->initted) {
2860 HG_(record_error_Misc)(
2861 thr, "pthread_barrier_init: barrier is already initialised"
2862 );
2863 }
2864
2865 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2866 tl_assert(bar->initted);
2867 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002868 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002869 );
2870 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2871 }
2872 if (!bar->waiting) {
2873 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2874 sizeof(Thread*) );
2875 }
2876
2877 tl_assert(bar->waiting);
2878 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002879 bar->initted = True;
2880 bar->resizable = resizable == 1 ? True : False;
2881 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002882}
2883
2884
2885static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2886 void* barrier )
2887{
sewardj553655c2008-11-14 19:41:19 +00002888 Thread* thr;
2889 Bar* bar;
2890
sewardj9f569b72008-11-13 13:33:09 +00002891 /* Deal with destroy events. The only purpose is to free storage
2892 associated with the barrier, so as to avoid any possible
2893 resource leaks. */
2894 if (SHOW_EVENTS >= 1)
2895 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2896 "(tid=%d, barrier=%p)\n",
2897 (Int)tid, (void*)barrier );
2898
sewardj553655c2008-11-14 19:41:19 +00002899 thr = map_threads_maybe_lookup( tid );
2900 tl_assert(thr); /* cannot fail - Thread* must already exist */
2901
2902 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2903 tl_assert(bar);
2904
2905 if (!bar->initted) {
2906 HG_(record_error_Misc)(
2907 thr, "pthread_barrier_destroy: barrier was never initialised"
2908 );
2909 }
2910
2911 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2912 HG_(record_error_Misc)(
2913 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2914 );
2915 }
2916
sewardj9f569b72008-11-13 13:33:09 +00002917 /* Maybe we shouldn't do this; just let it persist, so that when it
2918 is reinitialised we don't need to do any dynamic memory
2919 allocation? The downside is a potentially unlimited space leak,
2920 if the client creates (in turn) a large number of barriers all
2921 at different locations. Note that if we do later move to the
2922 don't-delete-it scheme, we need to mark the barrier as
2923 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002924 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002925 map_barrier_to_Bar_delete( barrier );
2926}
2927
2928
sewardj406bac82010-03-03 23:03:40 +00002929/* All the threads have arrived. Now do the Interesting Bit. Get a
2930 new synchronisation object and do a weak send to it from all the
2931 participating threads. This makes its vector clocks be the join of
2932 all the individual threads' vector clocks. Then do a strong
2933 receive from it back to all threads, so that their VCs are a copy
2934 of it (hence are all equal to the join of their original VCs.) */
2935static void do_barrier_cross_sync_and_empty ( Bar* bar )
2936{
2937 /* XXX check bar->waiting has no duplicates */
2938 UWord i;
2939 SO* so = libhb_so_alloc();
2940
2941 tl_assert(bar->waiting);
2942 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2943
2944 /* compute the join ... */
2945 for (i = 0; i < bar->size; i++) {
2946 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2947 Thr* hbthr = t->hbthr;
2948 libhb_so_send( hbthr, so, False/*weak send*/ );
2949 }
2950 /* ... and distribute to all threads */
2951 for (i = 0; i < bar->size; i++) {
2952 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2953 Thr* hbthr = t->hbthr;
2954 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2955 }
2956
2957 /* finally, we must empty out the waiting vector */
2958 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2959
2960 /* and we don't need this any more. Perhaps a stack-allocated
2961 SO would be better? */
2962 libhb_so_dealloc(so);
2963}
2964
2965
sewardj9f569b72008-11-13 13:33:09 +00002966static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2967 void* barrier )
2968{
sewardj1c466b72008-11-19 11:52:14 +00002969 /* This function gets called after a client thread calls
2970 pthread_barrier_wait but before it arrives at the real
2971 pthread_barrier_wait.
2972
2973 Why is the following correct? It's a bit subtle.
2974
2975 If this is not the last thread arriving at the barrier, we simply
2976 note its presence and return. Because valgrind (at least as of
2977 Nov 08) is single threaded, we are guaranteed safe from any race
2978 conditions when in this function -- no other client threads are
2979 running.
2980
2981 If this is the last thread, then we are again the only running
2982 thread. All the other threads will have either arrived at the
2983 real pthread_barrier_wait or are on their way to it, but in any
2984 case are guaranteed not to be able to move past it, because this
2985 thread is currently in this function and so has not yet arrived
2986 at the real pthread_barrier_wait. That means that:
2987
2988 1. While we are in this function, none of the other threads
2989 waiting at the barrier can move past it.
2990
2991 2. When this function returns (and simulated execution resumes),
2992 this thread and all other waiting threads will be able to move
2993 past the real barrier.
2994
2995 Because of this, it is now safe to update the vector clocks of
2996 all threads, to represent the fact that they all arrived at the
2997 barrier and have all moved on. There is no danger of any
2998 complications to do with some threads leaving the barrier and
2999 racing back round to the front, whilst others are still leaving
3000 (which is the primary source of complication in correct handling/
3001 implementation of barriers). That can't happen because we update
3002 here our data structures so as to indicate that the threads have
3003 passed the barrier, even though, as per (2) above, they are
3004 guaranteed not to pass the barrier until we return.
3005
3006 This relies crucially on Valgrind being single threaded. If that
3007 changes, this will need to be reconsidered.
3008 */
sewardj9f569b72008-11-13 13:33:09 +00003009 Thread* thr;
3010 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00003011 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00003012
3013 if (SHOW_EVENTS >= 1)
3014 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
3015 "(tid=%d, barrier=%p)\n",
3016 (Int)tid, (void*)barrier );
3017
3018 thr = map_threads_maybe_lookup( tid );
3019 tl_assert(thr); /* cannot fail - Thread* must already exist */
3020
3021 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3022 tl_assert(bar);
3023
3024 if (!bar->initted) {
3025 HG_(record_error_Misc)(
3026 thr, "pthread_barrier_wait: barrier is uninitialised"
3027 );
3028 return; /* client is broken .. avoid assertions below */
3029 }
3030
3031 /* guaranteed by _INIT_PRE above */
3032 tl_assert(bar->size > 0);
3033 tl_assert(bar->waiting);
3034
3035 VG_(addToXA)( bar->waiting, &thr );
3036
3037 /* guaranteed by this function */
3038 present = VG_(sizeXA)(bar->waiting);
3039 tl_assert(present > 0 && present <= bar->size);
3040
3041 if (present < bar->size)
3042 return;
3043
sewardj406bac82010-03-03 23:03:40 +00003044 do_barrier_cross_sync_and_empty(bar);
3045}
sewardj9f569b72008-11-13 13:33:09 +00003046
sewardj9f569b72008-11-13 13:33:09 +00003047
sewardj406bac82010-03-03 23:03:40 +00003048static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3049 void* barrier,
3050 UWord newcount )
3051{
3052 Thread* thr;
3053 Bar* bar;
3054 UWord present;
3055
3056 if (SHOW_EVENTS >= 1)
3057 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3058 "(tid=%d, barrier=%p, newcount=%lu)\n",
3059 (Int)tid, (void*)barrier, newcount );
3060
3061 thr = map_threads_maybe_lookup( tid );
3062 tl_assert(thr); /* cannot fail - Thread* must already exist */
3063
3064 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3065 tl_assert(bar);
3066
3067 if (!bar->initted) {
3068 HG_(record_error_Misc)(
3069 thr, "pthread_barrier_resize: barrier is uninitialised"
3070 );
3071 return; /* client is broken .. avoid assertions below */
3072 }
3073
3074 if (!bar->resizable) {
3075 HG_(record_error_Misc)(
3076 thr, "pthread_barrier_resize: barrier is may not be resized"
3077 );
3078 return; /* client is broken .. avoid assertions below */
3079 }
3080
3081 if (newcount == 0) {
3082 HG_(record_error_Misc)(
3083 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3084 );
3085 return; /* client is broken .. avoid assertions below */
3086 }
3087
3088 /* guaranteed by _INIT_PRE above */
3089 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003090 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003091 /* Guaranteed by this fn */
3092 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003093
sewardj406bac82010-03-03 23:03:40 +00003094 if (newcount >= bar->size) {
3095 /* Increasing the capacity. There's no possibility of threads
3096 moving on from the barrier in this situation, so just note
3097 the fact and do nothing more. */
3098 bar->size = newcount;
3099 } else {
3100 /* Decreasing the capacity. If we decrease it to be equal or
3101 below the number of waiting threads, they will now move past
3102 the barrier, so need to mess with dep edges in the same way
3103 as if the barrier had filled up normally. */
3104 present = VG_(sizeXA)(bar->waiting);
3105 tl_assert(present >= 0 && present <= bar->size);
3106 if (newcount <= present) {
3107 bar->size = present; /* keep the cross_sync call happy */
3108 do_barrier_cross_sync_and_empty(bar);
3109 }
3110 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003111 }
sewardj9f569b72008-11-13 13:33:09 +00003112}
3113
3114
sewardjed2e72e2009-08-14 11:08:24 +00003115/* ----------------------------------------------------- */
3116/* ----- events to do with user-specified HB edges ----- */
3117/* ----------------------------------------------------- */
3118
3119/* A mapping from arbitrary UWord tag to the SO associated with it.
3120 The UWord tags are meaningless to us, interpreted only by the
3121 user. */
3122
3123
3124
3125/* UWord -> SO* */
3126static WordFM* map_usertag_to_SO = NULL;
3127
3128static void map_usertag_to_SO_INIT ( void ) {
3129 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3130 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3131 "hg.mutS.1", HG_(free), NULL );
3132 tl_assert(map_usertag_to_SO != NULL);
3133 }
3134}
3135
3136static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3137 UWord key, val;
3138 map_usertag_to_SO_INIT();
3139 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3140 tl_assert(key == (UWord)usertag);
3141 return (SO*)val;
3142 } else {
3143 SO* so = libhb_so_alloc();
3144 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3145 return so;
3146 }
3147}
3148
3149// If it's ever needed (XXX check before use)
3150//static void map_usertag_to_SO_delete ( UWord usertag ) {
3151// UWord keyW, valW;
3152// map_usertag_to_SO_INIT();
3153// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3154// SO* so = (SO*)valW;
3155// tl_assert(keyW == usertag);
3156// tl_assert(so);
3157// libhb_so_dealloc(so);
3158// }
3159//}
3160
3161
3162static
3163void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3164{
3165 /* TID is just about to notionally sent a message on a notional
3166 abstract synchronisation object whose identity is given by
3167 USERTAG. Bind USERTAG to a real SO if it is not already so
3168 bound, and do a 'strong send' on the SO. This is later used by
3169 other thread(s) which successfully 'receive' from the SO,
3170 thereby acquiring a dependency on this signalling event. */
3171 Thread* thr;
3172 SO* so;
3173
3174 if (SHOW_EVENTS >= 1)
3175 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3176 (Int)tid, usertag );
3177
3178 thr = map_threads_maybe_lookup( tid );
3179 tl_assert(thr); /* cannot fail - Thread* must already exist */
3180
3181 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3182 tl_assert(so);
3183
3184 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3185}
3186
3187static
3188void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3189{
3190 /* TID has just notionally received a message from a notional
3191 abstract synchronisation object whose identity is given by
3192 USERTAG. Bind USERTAG to a real SO if it is not already so
3193 bound. If the SO has at some point in the past been 'sent' on,
3194 to a 'strong receive' on it, thereby acquiring a dependency on
3195 the sender. */
3196 Thread* thr;
3197 SO* so;
3198
3199 if (SHOW_EVENTS >= 1)
3200 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3201 (Int)tid, usertag );
3202
3203 thr = map_threads_maybe_lookup( tid );
3204 tl_assert(thr); /* cannot fail - Thread* must already exist */
3205
3206 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3207 tl_assert(so);
3208
3209 /* Acquire a dependency on it. If the SO has never so far been
3210 sent on, then libhb_so_recv will do nothing. So we're safe
3211 regardless of SO's history. */
3212 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3213}
3214
3215
sewardjb4112022007-11-09 22:49:28 +00003216/*--------------------------------------------------------------*/
3217/*--- Lock acquisition order monitoring ---*/
3218/*--------------------------------------------------------------*/
3219
3220/* FIXME: here are some optimisations still to do in
3221 laog__pre_thread_acquires_lock.
3222
3223 The graph is structured so that if L1 --*--> L2 then L1 must be
3224 acquired before L2.
3225
3226 The common case is that some thread T holds (eg) L1 L2 and L3 and
3227 is repeatedly acquiring and releasing Ln, and there is no ordering
3228 error in what it is doing. Hence it repeatly:
3229
3230 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3231 produces the answer No (because there is no error).
3232
3233 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3234 (because they already got added the first time T acquired Ln).
3235
3236 Hence cache these two events:
3237
3238 (1) Cache result of the query from last time. Invalidate the cache
3239 any time any edges are added to or deleted from laog.
3240
3241 (2) Cache these add-edge requests and ignore them if said edges
3242 have already been added to laog. Invalidate the cache any time
3243 any edges are deleted from laog.
3244*/
3245
3246typedef
3247 struct {
3248 WordSetID inns; /* in univ_laog */
3249 WordSetID outs; /* in univ_laog */
3250 }
3251 LAOGLinks;
3252
3253/* lock order acquisition graph */
3254static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3255
3256/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3257 where that edge was created, so that we can show the user later if
3258 we need to. */
3259typedef
3260 struct {
3261 Addr src_ga; /* Lock guest addresses for */
3262 Addr dst_ga; /* src/dst of the edge */
3263 ExeContext* src_ec; /* And corresponding places where that */
3264 ExeContext* dst_ec; /* ordering was established */
3265 }
3266 LAOGLinkExposition;
3267
sewardj250ec2e2008-02-15 22:02:30 +00003268static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003269 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3270 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3271 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3272 if (llx1->src_ga < llx2->src_ga) return -1;
3273 if (llx1->src_ga > llx2->src_ga) return 1;
3274 if (llx1->dst_ga < llx2->dst_ga) return -1;
3275 if (llx1->dst_ga > llx2->dst_ga) return 1;
3276 return 0;
3277}
3278
3279static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3280/* end EXPOSITION ONLY */
3281
3282
sewardja65db102009-01-26 10:45:16 +00003283__attribute__((noinline))
3284static void laog__init ( void )
3285{
3286 tl_assert(!laog);
3287 tl_assert(!laog_exposition);
3288
3289 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3290 HG_(free), NULL/*unboxedcmp*/ );
3291
3292 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3293 cmp_LAOGLinkExposition );
3294 tl_assert(laog);
3295 tl_assert(laog_exposition);
3296}
3297
sewardjb4112022007-11-09 22:49:28 +00003298static void laog__show ( Char* who ) {
3299 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003300 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003301 Lock* me;
3302 LAOGLinks* links;
3303 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003304 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003305 me = NULL;
3306 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003307 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003308 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003309 tl_assert(me);
3310 tl_assert(links);
3311 VG_(printf)(" node %p:\n", me);
3312 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3313 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003314 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003315 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3316 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003317 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003318 me = NULL;
3319 links = NULL;
3320 }
sewardj896f6f92008-08-19 08:38:52 +00003321 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003322 VG_(printf)("}\n");
3323}
3324
3325__attribute__((noinline))
3326static void laog__add_edge ( Lock* src, Lock* dst ) {
3327 Word keyW;
3328 LAOGLinks* links;
3329 Bool presentF, presentR;
3330 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3331
3332 /* Take the opportunity to sanity check the graph. Record in
3333 presentF if there is already a src->dst mapping in this node's
3334 forwards links, and presentR if there is already a src->dst
3335 mapping in this node's backwards links. They should agree!
3336 Also, we need to know whether the edge was already present so as
3337 to decide whether or not to update the link details mapping. We
3338 can compute presentF and presentR essentially for free, so may
3339 as well do this always. */
3340 presentF = presentR = False;
3341
3342 /* Update the out edges for src */
3343 keyW = 0;
3344 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003345 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003346 WordSetID outs_new;
3347 tl_assert(links);
3348 tl_assert(keyW == (Word)src);
3349 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3350 presentF = outs_new == links->outs;
3351 links->outs = outs_new;
3352 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003353 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003354 links->inns = HG_(emptyWS)( univ_laog );
3355 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003356 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003357 }
3358 /* Update the in edges for dst */
3359 keyW = 0;
3360 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003361 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003362 WordSetID inns_new;
3363 tl_assert(links);
3364 tl_assert(keyW == (Word)dst);
3365 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3366 presentR = inns_new == links->inns;
3367 links->inns = inns_new;
3368 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003369 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003370 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3371 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003372 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003373 }
3374
3375 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3376
3377 if (!presentF && src->acquired_at && dst->acquired_at) {
3378 LAOGLinkExposition expo;
3379 /* If this edge is entering the graph, and we have acquired_at
3380 information for both src and dst, record those acquisition
3381 points. Hence, if there is later a violation of this
3382 ordering, we can show the user the two places in which the
3383 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003384 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003385 src->guestaddr, dst->guestaddr);
3386 expo.src_ga = src->guestaddr;
3387 expo.dst_ga = dst->guestaddr;
3388 expo.src_ec = NULL;
3389 expo.dst_ec = NULL;
3390 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003391 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003392 /* we already have it; do nothing */
3393 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003394 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3395 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003396 expo2->src_ga = src->guestaddr;
3397 expo2->dst_ga = dst->guestaddr;
3398 expo2->src_ec = src->acquired_at;
3399 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003400 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003401 }
3402 }
3403}
3404
3405__attribute__((noinline))
3406static void laog__del_edge ( Lock* src, Lock* dst ) {
3407 Word keyW;
3408 LAOGLinks* links;
3409 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3410 /* Update the out edges for src */
3411 keyW = 0;
3412 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003413 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003414 tl_assert(links);
3415 tl_assert(keyW == (Word)src);
3416 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3417 }
3418 /* Update the in edges for dst */
3419 keyW = 0;
3420 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003421 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003422 tl_assert(links);
3423 tl_assert(keyW == (Word)dst);
3424 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3425 }
3426}
3427
3428__attribute__((noinline))
3429static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3430 Word keyW;
3431 LAOGLinks* links;
3432 keyW = 0;
3433 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003434 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003435 tl_assert(links);
3436 tl_assert(keyW == (Word)lk);
3437 return links->outs;
3438 } else {
3439 return HG_(emptyWS)( univ_laog );
3440 }
3441}
3442
3443__attribute__((noinline))
3444static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3445 Word keyW;
3446 LAOGLinks* links;
3447 keyW = 0;
3448 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003449 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003450 tl_assert(links);
3451 tl_assert(keyW == (Word)lk);
3452 return links->inns;
3453 } else {
3454 return HG_(emptyWS)( univ_laog );
3455 }
3456}
3457
3458__attribute__((noinline))
3459static void laog__sanity_check ( Char* who ) {
3460 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003461 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003462 Lock* me;
3463 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003464 if (UNLIKELY(!laog || !laog_exposition))
3465 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003466 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003467 me = NULL;
3468 links = NULL;
3469 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003470 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003471 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003472 tl_assert(me);
3473 tl_assert(links);
3474 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3475 for (i = 0; i < ws_size; i++) {
3476 if ( ! HG_(elemWS)( univ_laog,
3477 laog__succs( (Lock*)ws_words[i] ),
3478 (Word)me ))
3479 goto bad;
3480 }
3481 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3482 for (i = 0; i < ws_size; i++) {
3483 if ( ! HG_(elemWS)( univ_laog,
3484 laog__preds( (Lock*)ws_words[i] ),
3485 (Word)me ))
3486 goto bad;
3487 }
3488 me = NULL;
3489 links = NULL;
3490 }
sewardj896f6f92008-08-19 08:38:52 +00003491 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003492 return;
3493
3494 bad:
3495 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3496 laog__show(who);
3497 tl_assert(0);
3498}
3499
3500/* If there is a path in laog from 'src' to any of the elements in
3501 'dst', return an arbitrarily chosen element of 'dst' reachable from
3502 'src'. If no path exist from 'src' to any element in 'dst', return
3503 NULL. */
3504__attribute__((noinline))
3505static
3506Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3507{
3508 Lock* ret;
3509 Word i, ssz;
3510 XArray* stack; /* of Lock* */
3511 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3512 Lock* here;
3513 WordSetID succs;
3514 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003515 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003516 //laog__sanity_check();
3517
3518 /* If the destination set is empty, we can never get there from
3519 'src' :-), so don't bother to try */
3520 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3521 return NULL;
3522
3523 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003524 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3525 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003526
3527 (void) VG_(addToXA)( stack, &src );
3528
3529 while (True) {
3530
3531 ssz = VG_(sizeXA)( stack );
3532
3533 if (ssz == 0) { ret = NULL; break; }
3534
3535 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3536 VG_(dropTailXA)( stack, 1 );
3537
3538 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3539
sewardj896f6f92008-08-19 08:38:52 +00003540 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003541 continue;
3542
sewardj896f6f92008-08-19 08:38:52 +00003543 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003544
3545 succs = laog__succs( here );
3546 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3547 for (i = 0; i < succs_size; i++)
3548 (void) VG_(addToXA)( stack, &succs_words[i] );
3549 }
3550
sewardj896f6f92008-08-19 08:38:52 +00003551 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003552 VG_(deleteXA)( stack );
3553 return ret;
3554}
3555
3556
3557/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3558 between 'lk' and the locks already held by 'thr' and issue a
3559 complaint if so. Also, update the ordering graph appropriately.
3560*/
3561__attribute__((noinline))
3562static void laog__pre_thread_acquires_lock (
3563 Thread* thr, /* NB: BEFORE lock is added */
3564 Lock* lk
3565 )
3566{
sewardj250ec2e2008-02-15 22:02:30 +00003567 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003568 Word ls_size, i;
3569 Lock* other;
3570
3571 /* It may be that 'thr' already holds 'lk' and is recursively
3572 relocking in. In this case we just ignore the call. */
3573 /* NB: univ_lsets really is correct here */
3574 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3575 return;
3576
sewardja65db102009-01-26 10:45:16 +00003577 if (UNLIKELY(!laog || !laog_exposition))
3578 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003579
3580 /* First, the check. Complain if there is any path in laog from lk
3581 to any of the locks already held by thr, since if any such path
3582 existed, it would mean that previously lk was acquired before
3583 (rather than after, as we are doing here) at least one of those
3584 locks.
3585 */
3586 other = laog__do_dfs_from_to(lk, thr->locksetA);
3587 if (other) {
3588 LAOGLinkExposition key, *found;
3589 /* So we managed to find a path lk --*--> other in the graph,
3590 which implies that 'lk' should have been acquired before
3591 'other' but is in fact being acquired afterwards. We present
3592 the lk/other arguments to record_error_LockOrder in the order
3593 in which they should have been acquired. */
3594 /* Go look in the laog_exposition mapping, to find the allocation
3595 points for this edge, so we can show the user. */
3596 key.src_ga = lk->guestaddr;
3597 key.dst_ga = other->guestaddr;
3598 key.src_ec = NULL;
3599 key.dst_ec = NULL;
3600 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003601 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003602 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003603 tl_assert(found != &key);
3604 tl_assert(found->src_ga == key.src_ga);
3605 tl_assert(found->dst_ga == key.dst_ga);
3606 tl_assert(found->src_ec);
3607 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003608 HG_(record_error_LockOrder)(
3609 thr, lk->guestaddr, other->guestaddr,
3610 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003611 } else {
3612 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003613 HG_(record_error_LockOrder)(
3614 thr, lk->guestaddr, other->guestaddr,
3615 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003616 }
3617 }
3618
3619 /* Second, add to laog the pairs
3620 (old, lk) | old <- locks already held by thr
3621 Since both old and lk are currently held by thr, their acquired_at
3622 fields must be non-NULL.
3623 */
3624 tl_assert(lk->acquired_at);
3625 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3626 for (i = 0; i < ls_size; i++) {
3627 Lock* old = (Lock*)ls_words[i];
3628 tl_assert(old->acquired_at);
3629 laog__add_edge( old, lk );
3630 }
3631
3632 /* Why "except_Locks" ? We're here because a lock is being
3633 acquired by a thread, and we're in an inconsistent state here.
3634 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3635 When called in this inconsistent state, locks__sanity_check duly
3636 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003637 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003638 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3639}
3640
3641
3642/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3643
3644__attribute__((noinline))
3645static void laog__handle_one_lock_deletion ( Lock* lk )
3646{
3647 WordSetID preds, succs;
3648 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003649 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003650
sewardja65db102009-01-26 10:45:16 +00003651 if (UNLIKELY(!laog || !laog_exposition))
3652 laog__init();
3653
sewardjb4112022007-11-09 22:49:28 +00003654 preds = laog__preds( lk );
3655 succs = laog__succs( lk );
3656
3657 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3658 for (i = 0; i < preds_size; i++)
3659 laog__del_edge( (Lock*)preds_words[i], lk );
3660
3661 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3662 for (j = 0; j < succs_size; j++)
3663 laog__del_edge( lk, (Lock*)succs_words[j] );
3664
3665 for (i = 0; i < preds_size; i++) {
3666 for (j = 0; j < succs_size; j++) {
3667 if (preds_words[i] != succs_words[j]) {
3668 /* This can pass unlocked locks to laog__add_edge, since
3669 we're deleting stuff. So their acquired_at fields may
3670 be NULL. */
3671 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3672 }
3673 }
3674 }
3675}
3676
sewardj1cbc12f2008-11-10 16:16:46 +00003677//__attribute__((noinline))
3678//static void laog__handle_lock_deletions (
3679// WordSetID /* in univ_laog */ locksToDelete
3680// )
3681//{
3682// Word i, ws_size;
3683// UWord* ws_words;
3684//
sewardja65db102009-01-26 10:45:16 +00003685// if (UNLIKELY(!laog || !laog_exposition))
3686// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003687//
3688// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3689// for (i = 0; i < ws_size; i++)
3690// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3691//
3692// if (HG_(clo_sanity_flags) & SCE_LAOG)
3693// all__sanity_check("laog__handle_lock_deletions-post");
3694//}
sewardjb4112022007-11-09 22:49:28 +00003695
3696
3697/*--------------------------------------------------------------*/
3698/*--- Malloc/free replacements ---*/
3699/*--------------------------------------------------------------*/
3700
3701typedef
3702 struct {
3703 void* next; /* required by m_hashtable */
3704 Addr payload; /* ptr to actual block */
3705 SizeT szB; /* size requested */
3706 ExeContext* where; /* where it was allocated */
3707 Thread* thr; /* allocating thread */
3708 }
3709 MallocMeta;
3710
3711/* A hash table of MallocMetas, used to track malloc'd blocks
3712 (obviously). */
3713static VgHashTable hg_mallocmeta_table = NULL;
3714
3715
3716static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003717 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003718 tl_assert(md);
3719 return md;
3720}
3721static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003722 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003723}
3724
3725
3726/* Allocate a client block and set up the metadata for it. */
3727
3728static
3729void* handle_alloc ( ThreadId tid,
3730 SizeT szB, SizeT alignB, Bool is_zeroed )
3731{
3732 Addr p;
3733 MallocMeta* md;
3734
3735 tl_assert( ((SSizeT)szB) >= 0 );
3736 p = (Addr)VG_(cli_malloc)(alignB, szB);
3737 if (!p) {
3738 return NULL;
3739 }
3740 if (is_zeroed)
3741 VG_(memset)((void*)p, 0, szB);
3742
3743 /* Note that map_threads_lookup must succeed (cannot assert), since
3744 memory can only be allocated by currently alive threads, hence
3745 they must have an entry in map_threads. */
3746 md = new_MallocMeta();
3747 md->payload = p;
3748 md->szB = szB;
3749 md->where = VG_(record_ExeContext)( tid, 0 );
3750 md->thr = map_threads_lookup( tid );
3751
3752 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3753
3754 /* Tell the lower level memory wranglers. */
3755 evh__new_mem_heap( p, szB, is_zeroed );
3756
3757 return (void*)p;
3758}
3759
3760/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3761 Cast to a signed type to catch any unexpectedly negative args.
3762 We're assuming here that the size asked for is not greater than
3763 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3764 platforms). */
3765static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3766 if (((SSizeT)n) < 0) return NULL;
3767 return handle_alloc ( tid, n, VG_(clo_alignment),
3768 /*is_zeroed*/False );
3769}
3770static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3771 if (((SSizeT)n) < 0) return NULL;
3772 return handle_alloc ( tid, n, VG_(clo_alignment),
3773 /*is_zeroed*/False );
3774}
3775static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3776 if (((SSizeT)n) < 0) return NULL;
3777 return handle_alloc ( tid, n, VG_(clo_alignment),
3778 /*is_zeroed*/False );
3779}
3780static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3781 if (((SSizeT)n) < 0) return NULL;
3782 return handle_alloc ( tid, n, align,
3783 /*is_zeroed*/False );
3784}
3785static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3786 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3787 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3788 /*is_zeroed*/True );
3789}
3790
3791
3792/* Free a client block, including getting rid of the relevant
3793 metadata. */
3794
3795static void handle_free ( ThreadId tid, void* p )
3796{
3797 MallocMeta *md, *old_md;
3798 SizeT szB;
3799
3800 /* First see if we can find the metadata for 'p'. */
3801 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3802 if (!md)
3803 return; /* apparently freeing a bogus address. Oh well. */
3804
3805 tl_assert(md->payload == (Addr)p);
3806 szB = md->szB;
3807
3808 /* Nuke the metadata block */
3809 old_md = (MallocMeta*)
3810 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3811 tl_assert(old_md); /* it must be present - we just found it */
3812 tl_assert(old_md == md);
3813 tl_assert(old_md->payload == (Addr)p);
3814
3815 VG_(cli_free)((void*)old_md->payload);
3816 delete_MallocMeta(old_md);
3817
3818 /* Tell the lower level memory wranglers. */
3819 evh__die_mem_heap( (Addr)p, szB );
3820}
3821
3822static void hg_cli__free ( ThreadId tid, void* p ) {
3823 handle_free(tid, p);
3824}
3825static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3826 handle_free(tid, p);
3827}
3828static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3829 handle_free(tid, p);
3830}
3831
3832
3833static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3834{
3835 MallocMeta *md, *md_new, *md_tmp;
3836 SizeT i;
3837
3838 Addr payload = (Addr)payloadV;
3839
3840 if (((SSizeT)new_size) < 0) return NULL;
3841
3842 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3843 if (!md)
3844 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3845
3846 tl_assert(md->payload == payload);
3847
3848 if (md->szB == new_size) {
3849 /* size unchanged */
3850 md->where = VG_(record_ExeContext)(tid, 0);
3851 return payloadV;
3852 }
3853
3854 if (md->szB > new_size) {
3855 /* new size is smaller */
3856 md->szB = new_size;
3857 md->where = VG_(record_ExeContext)(tid, 0);
3858 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3859 return payloadV;
3860 }
3861
3862 /* else */ {
3863 /* new size is bigger */
3864 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3865
3866 /* First half kept and copied, second half new */
3867 // FIXME: shouldn't we use a copier which implements the
3868 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003869 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003870 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003871 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003872 /* FIXME: can anything funny happen here? specifically, if the
3873 old range contained a lock, then die_mem_heap will complain.
3874 Is that the correct behaviour? Not sure. */
3875 evh__die_mem_heap( payload, md->szB );
3876
3877 /* Copy from old to new */
3878 for (i = 0; i < md->szB; i++)
3879 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3880
3881 /* Because the metadata hash table is index by payload address,
3882 we have to get rid of the old hash table entry and make a new
3883 one. We can't just modify the existing metadata in place,
3884 because then it would (almost certainly) be in the wrong hash
3885 chain. */
3886 md_new = new_MallocMeta();
3887 *md_new = *md;
3888
3889 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3890 tl_assert(md_tmp);
3891 tl_assert(md_tmp == md);
3892
3893 VG_(cli_free)((void*)md->payload);
3894 delete_MallocMeta(md);
3895
3896 /* Update fields */
3897 md_new->where = VG_(record_ExeContext)( tid, 0 );
3898 md_new->szB = new_size;
3899 md_new->payload = p_new;
3900 md_new->thr = map_threads_lookup( tid );
3901
3902 /* and add */
3903 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3904
3905 return (void*)p_new;
3906 }
3907}
3908
njn8b140de2009-02-17 04:31:18 +00003909static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3910{
3911 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3912
3913 // There may be slop, but pretend there isn't because only the asked-for
3914 // area will have been shadowed properly.
3915 return ( md ? md->szB : 0 );
3916}
3917
sewardjb4112022007-11-09 22:49:28 +00003918
sewardj095d61e2010-03-11 13:43:18 +00003919/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003920 Slow linear search. With a bit of hash table help if 'data_addr'
3921 is either the start of a block or up to 15 word-sized steps along
3922 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003923
3924static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3925{
sewardjc8028ad2010-05-05 09:34:42 +00003926 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3927 right at it. */
3928 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3929 return True;
3930 /* else normal interval rules apply */
3931 if (LIKELY(a < mm->payload)) return False;
3932 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3933 return True;
sewardj095d61e2010-03-11 13:43:18 +00003934}
3935
sewardjc8028ad2010-05-05 09:34:42 +00003936Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003937 /*OUT*/Addr* payload,
3938 /*OUT*/SizeT* szB,
3939 Addr data_addr )
3940{
3941 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003942 Int i;
3943 const Int n_fast_check_words = 16;
3944
3945 /* First, do a few fast searches on the basis that data_addr might
3946 be exactly the start of a block or up to 15 words inside. This
3947 can happen commonly via the creq
3948 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3949 for (i = 0; i < n_fast_check_words; i++) {
3950 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3951 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3952 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3953 goto found;
3954 }
3955
sewardj095d61e2010-03-11 13:43:18 +00003956 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003957 some such, it's hard to see how to do better. We have to check
3958 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003959 VG_(HT_ResetIter)(hg_mallocmeta_table);
3960 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003961 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3962 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003963 }
sewardjc8028ad2010-05-05 09:34:42 +00003964
3965 /* Not found. Bah. */
3966 return False;
3967 /*NOTREACHED*/
3968
3969 found:
3970 tl_assert(mm);
3971 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3972 if (where) *where = mm->where;
3973 if (payload) *payload = mm->payload;
3974 if (szB) *szB = mm->szB;
3975 return True;
sewardj095d61e2010-03-11 13:43:18 +00003976}
3977
3978
sewardjb4112022007-11-09 22:49:28 +00003979/*--------------------------------------------------------------*/
3980/*--- Instrumentation ---*/
3981/*--------------------------------------------------------------*/
3982
3983static void instrument_mem_access ( IRSB* bbOut,
3984 IRExpr* addr,
3985 Int szB,
3986 Bool isStore,
3987 Int hWordTy_szB )
3988{
3989 IRType tyAddr = Ity_INVALID;
3990 HChar* hName = NULL;
3991 void* hAddr = NULL;
3992 Int regparms = 0;
3993 IRExpr** argv = NULL;
3994 IRDirty* di = NULL;
3995
3996 tl_assert(isIRAtom(addr));
3997 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3998
3999 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
4000 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
4001
4002 /* So the effective address is in 'addr' now. */
4003 regparms = 1; // unless stated otherwise
4004 if (isStore) {
4005 switch (szB) {
4006 case 1:
sewardj23f12002009-07-24 08:45:08 +00004007 hName = "evh__mem_help_cwrite_1";
4008 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00004009 argv = mkIRExprVec_1( addr );
4010 break;
4011 case 2:
sewardj23f12002009-07-24 08:45:08 +00004012 hName = "evh__mem_help_cwrite_2";
4013 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00004014 argv = mkIRExprVec_1( addr );
4015 break;
4016 case 4:
sewardj23f12002009-07-24 08:45:08 +00004017 hName = "evh__mem_help_cwrite_4";
4018 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00004019 argv = mkIRExprVec_1( addr );
4020 break;
4021 case 8:
sewardj23f12002009-07-24 08:45:08 +00004022 hName = "evh__mem_help_cwrite_8";
4023 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00004024 argv = mkIRExprVec_1( addr );
4025 break;
4026 default:
4027 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4028 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004029 hName = "evh__mem_help_cwrite_N";
4030 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00004031 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4032 break;
4033 }
4034 } else {
4035 switch (szB) {
4036 case 1:
sewardj23f12002009-07-24 08:45:08 +00004037 hName = "evh__mem_help_cread_1";
4038 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004039 argv = mkIRExprVec_1( addr );
4040 break;
4041 case 2:
sewardj23f12002009-07-24 08:45:08 +00004042 hName = "evh__mem_help_cread_2";
4043 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004044 argv = mkIRExprVec_1( addr );
4045 break;
4046 case 4:
sewardj23f12002009-07-24 08:45:08 +00004047 hName = "evh__mem_help_cread_4";
4048 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004049 argv = mkIRExprVec_1( addr );
4050 break;
4051 case 8:
sewardj23f12002009-07-24 08:45:08 +00004052 hName = "evh__mem_help_cread_8";
4053 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004054 argv = mkIRExprVec_1( addr );
4055 break;
4056 default:
4057 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4058 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004059 hName = "evh__mem_help_cread_N";
4060 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004061 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4062 break;
4063 }
4064 }
4065
4066 /* Add the helper. */
4067 tl_assert(hName);
4068 tl_assert(hAddr);
4069 tl_assert(argv);
4070 di = unsafeIRDirty_0_N( regparms,
4071 hName, VG_(fnptr_to_fnentry)( hAddr ),
4072 argv );
4073 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4074}
4075
4076
sewardja0eee322009-07-31 08:46:35 +00004077/* Figure out if GA is a guest code address in the dynamic linker, and
4078 if so return True. Otherwise (and in case of any doubt) return
4079 False. (sidedly safe w/ False as the safe value) */
4080static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4081{
4082 DebugInfo* dinfo;
4083 const UChar* soname;
4084 if (0) return False;
4085
sewardje3f1e592009-07-31 09:41:29 +00004086 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004087 if (!dinfo) return False;
4088
sewardje3f1e592009-07-31 09:41:29 +00004089 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004090 tl_assert(soname);
4091 if (0) VG_(printf)("%s\n", soname);
4092
4093# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004094 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004095 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4096 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4097 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4098 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4099# elif defined(VGO_darwin)
4100 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4101# else
4102# error "Unsupported OS"
4103# endif
4104 return False;
4105}
4106
sewardjb4112022007-11-09 22:49:28 +00004107static
4108IRSB* hg_instrument ( VgCallbackClosure* closure,
4109 IRSB* bbIn,
4110 VexGuestLayout* layout,
4111 VexGuestExtents* vge,
4112 IRType gWordTy, IRType hWordTy )
4113{
sewardj1c0ce7a2009-07-01 08:10:49 +00004114 Int i;
4115 IRSB* bbOut;
4116 Addr64 cia; /* address of current insn */
4117 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004118 Bool inLDSO = False;
4119 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004120
4121 if (gWordTy != hWordTy) {
4122 /* We don't currently support this case. */
4123 VG_(tool_panic)("host/guest word size mismatch");
4124 }
4125
sewardja0eee322009-07-31 08:46:35 +00004126 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4127 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4128 }
4129
sewardjb4112022007-11-09 22:49:28 +00004130 /* Set up BB */
4131 bbOut = emptyIRSB();
4132 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4133 bbOut->next = deepCopyIRExpr(bbIn->next);
4134 bbOut->jumpkind = bbIn->jumpkind;
4135
4136 // Copy verbatim any IR preamble preceding the first IMark
4137 i = 0;
4138 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4139 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4140 i++;
4141 }
4142
sewardj1c0ce7a2009-07-01 08:10:49 +00004143 // Get the first statement, and initial cia from it
4144 tl_assert(bbIn->stmts_used > 0);
4145 tl_assert(i < bbIn->stmts_used);
4146 st = bbIn->stmts[i];
4147 tl_assert(Ist_IMark == st->tag);
4148 cia = st->Ist.IMark.addr;
4149 st = NULL;
4150
sewardjb4112022007-11-09 22:49:28 +00004151 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004152 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004153 tl_assert(st);
4154 tl_assert(isFlatIRStmt(st));
4155 switch (st->tag) {
4156 case Ist_NoOp:
4157 case Ist_AbiHint:
4158 case Ist_Put:
4159 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004160 case Ist_Exit:
4161 /* None of these can contain any memory references. */
4162 break;
4163
sewardj1c0ce7a2009-07-01 08:10:49 +00004164 case Ist_IMark:
4165 /* no mem refs, but note the insn address. */
4166 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004167 /* Don't instrument the dynamic linker. It generates a
4168 lot of races which we just expensively suppress, so
4169 it's pointless.
4170
4171 Avoid flooding is_in_dynamic_linker_shared_object with
4172 requests by only checking at transitions between 4K
4173 pages. */
4174 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4175 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4176 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4177 inLDSO = is_in_dynamic_linker_shared_object(cia);
4178 } else {
4179 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4180 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004181 break;
4182
sewardjb4112022007-11-09 22:49:28 +00004183 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004184 switch (st->Ist.MBE.event) {
4185 case Imbe_Fence:
4186 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004187 default:
4188 goto unhandled;
4189 }
sewardjb4112022007-11-09 22:49:28 +00004190 break;
4191
sewardj1c0ce7a2009-07-01 08:10:49 +00004192 case Ist_CAS: {
4193 /* Atomic read-modify-write cycle. Just pretend it's a
4194 read. */
4195 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004196 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4197 if (isDCAS) {
4198 tl_assert(cas->expdHi);
4199 tl_assert(cas->dataHi);
4200 } else {
4201 tl_assert(!cas->expdHi);
4202 tl_assert(!cas->dataHi);
4203 }
4204 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004205 if (!inLDSO) {
4206 instrument_mem_access(
4207 bbOut,
4208 cas->addr,
4209 (isDCAS ? 2 : 1)
4210 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4211 False/*!isStore*/,
4212 sizeofIRType(hWordTy)
4213 );
4214 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004215 break;
4216 }
4217
sewardjdb5907d2009-11-26 17:20:21 +00004218 case Ist_LLSC: {
4219 /* We pretend store-conditionals don't exist, viz, ignore
4220 them. Whereas load-linked's are treated the same as
4221 normal loads. */
4222 IRType dataTy;
4223 if (st->Ist.LLSC.storedata == NULL) {
4224 /* LL */
4225 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004226 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004227 instrument_mem_access(
4228 bbOut,
4229 st->Ist.LLSC.addr,
4230 sizeofIRType(dataTy),
4231 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004232 sizeofIRType(hWordTy)
4233 );
4234 }
sewardjdb5907d2009-11-26 17:20:21 +00004235 } else {
4236 /* SC */
4237 /*ignore */
4238 }
4239 break;
4240 }
4241
4242 case Ist_Store:
4243 /* It seems we pretend that store-conditionals don't
4244 exist, viz, just ignore them ... */
4245 if (!inLDSO) {
4246 instrument_mem_access(
4247 bbOut,
4248 st->Ist.Store.addr,
4249 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4250 True/*isStore*/,
4251 sizeofIRType(hWordTy)
4252 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004253 }
njnb83caf22009-05-25 01:47:56 +00004254 break;
sewardjb4112022007-11-09 22:49:28 +00004255
4256 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004257 /* ... whereas here we don't care whether a load is a
4258 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004259 IRExpr* data = st->Ist.WrTmp.data;
4260 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004261 if (!inLDSO) {
4262 instrument_mem_access(
4263 bbOut,
4264 data->Iex.Load.addr,
4265 sizeofIRType(data->Iex.Load.ty),
4266 False/*!isStore*/,
4267 sizeofIRType(hWordTy)
4268 );
4269 }
sewardjb4112022007-11-09 22:49:28 +00004270 }
4271 break;
4272 }
4273
4274 case Ist_Dirty: {
4275 Int dataSize;
4276 IRDirty* d = st->Ist.Dirty.details;
4277 if (d->mFx != Ifx_None) {
4278 /* This dirty helper accesses memory. Collect the
4279 details. */
4280 tl_assert(d->mAddr != NULL);
4281 tl_assert(d->mSize != 0);
4282 dataSize = d->mSize;
4283 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004284 if (!inLDSO) {
4285 instrument_mem_access(
4286 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4287 sizeofIRType(hWordTy)
4288 );
4289 }
sewardjb4112022007-11-09 22:49:28 +00004290 }
4291 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004292 if (!inLDSO) {
4293 instrument_mem_access(
4294 bbOut, d->mAddr, dataSize, True/*isStore*/,
4295 sizeofIRType(hWordTy)
4296 );
4297 }
sewardjb4112022007-11-09 22:49:28 +00004298 }
4299 } else {
4300 tl_assert(d->mAddr == NULL);
4301 tl_assert(d->mSize == 0);
4302 }
4303 break;
4304 }
4305
4306 default:
sewardjf98e1c02008-10-25 16:22:41 +00004307 unhandled:
4308 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004309 tl_assert(0);
4310
4311 } /* switch (st->tag) */
4312
4313 addStmtToIRSB( bbOut, st );
4314 } /* iterate over bbIn->stmts */
4315
4316 return bbOut;
4317}
4318
4319
4320/*----------------------------------------------------------------*/
4321/*--- Client requests ---*/
4322/*----------------------------------------------------------------*/
4323
4324/* Sheesh. Yet another goddam finite map. */
4325static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4326
4327static void map_pthread_t_to_Thread_INIT ( void ) {
4328 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004329 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4330 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004331 tl_assert(map_pthread_t_to_Thread != NULL);
4332 }
4333}
4334
4335
4336static
4337Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4338{
4339 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4340 return False;
4341
4342 /* Anything that gets past the above check is one of ours, so we
4343 should be able to handle it. */
4344
4345 /* default, meaningless return value, unless otherwise set */
4346 *ret = 0;
4347
4348 switch (args[0]) {
4349
4350 /* --- --- User-visible client requests --- --- */
4351
4352 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004353 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004354 args[1], args[2]);
4355 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004356 are any held locks etc in the area. Calling evh__die_mem
4357 and then evh__new_mem is a bit inefficient; probably just
4358 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004359 if (args[2] > 0) { /* length */
4360 evh__die_mem(args[1], args[2]);
4361 /* and then set it to New */
4362 evh__new_mem(args[1], args[2]);
4363 }
4364 break;
4365
sewardjc8028ad2010-05-05 09:34:42 +00004366 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4367 Addr payload = 0;
4368 SizeT pszB = 0;
4369 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4370 args[1]);
4371 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4372 if (pszB > 0) {
4373 evh__die_mem(payload, pszB);
4374 evh__new_mem(payload, pszB);
4375 }
4376 *ret = pszB;
4377 } else {
4378 *ret = (UWord)-1;
4379 }
4380 break;
4381 }
4382
sewardj406bac82010-03-03 23:03:40 +00004383 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4384 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4385 args[1], args[2]);
4386 if (args[2] > 0) { /* length */
4387 evh__untrack_mem(args[1], args[2]);
4388 }
4389 break;
4390
4391 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4392 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4393 args[1], args[2]);
4394 if (args[2] > 0) { /* length */
4395 evh__new_mem(args[1], args[2]);
4396 }
4397 break;
4398
sewardjb4112022007-11-09 22:49:28 +00004399 /* --- --- Client requests for Helgrind's use only --- --- */
4400
4401 /* Some thread is telling us its pthread_t value. Record the
4402 binding between that and the associated Thread*, so we can
4403 later find the Thread* again when notified of a join by the
4404 thread. */
4405 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4406 Thread* my_thr = NULL;
4407 if (0)
4408 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4409 (void*)args[1]);
4410 map_pthread_t_to_Thread_INIT();
4411 my_thr = map_threads_maybe_lookup( tid );
4412 /* This assertion should hold because the map_threads (tid to
4413 Thread*) binding should have been made at the point of
4414 low-level creation of this thread, which should have
4415 happened prior to us getting this client request for it.
4416 That's because this client request is sent from
4417 client-world from the 'thread_wrapper' function, which
4418 only runs once the thread has been low-level created. */
4419 tl_assert(my_thr != NULL);
4420 /* So now we know that (pthread_t)args[1] is associated with
4421 (Thread*)my_thr. Note that down. */
4422 if (0)
4423 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4424 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004425 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004426 break;
4427 }
4428
4429 case _VG_USERREQ__HG_PTH_API_ERROR: {
4430 Thread* my_thr = NULL;
4431 map_pthread_t_to_Thread_INIT();
4432 my_thr = map_threads_maybe_lookup( tid );
4433 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004434 HG_(record_error_PthAPIerror)(
4435 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004436 break;
4437 }
4438
4439 /* This thread (tid) has completed a join with the quitting
4440 thread whose pthread_t is in args[1]. */
4441 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4442 Thread* thr_q = NULL; /* quitter Thread* */
4443 Bool found = False;
4444 if (0)
4445 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4446 (void*)args[1]);
4447 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004448 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004449 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004450 /* Can this fail? It would mean that our pthread_join
4451 wrapper observed a successful join on args[1] yet that
4452 thread never existed (or at least, it never lodged an
4453 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4454 sounds like a bug in the threads library. */
4455 // FIXME: get rid of this assertion; handle properly
4456 tl_assert(found);
4457 if (found) {
4458 if (0)
4459 VG_(printf)(".................... quitter Thread* = %p\n",
4460 thr_q);
4461 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4462 }
4463 break;
4464 }
4465
4466 /* EXPOSITION only: by intercepting lock init events we can show
4467 the user where the lock was initialised, rather than only
4468 being able to show where it was first locked. Intercepting
4469 lock initialisations is not necessary for the basic operation
4470 of the race checker. */
4471 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4472 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4473 break;
4474
4475 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4476 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4477 break;
4478
4479 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4480 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4481 break;
4482
4483 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4484 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4485 break;
4486
4487 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4488 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4489 break;
4490
4491 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4492 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4493 break;
4494
4495 /* This thread is about to do pthread_cond_signal on the
4496 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4497 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4498 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4499 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4500 break;
4501
4502 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4503 Returns a flag indicating whether or not the mutex is believed to be
4504 valid for this operation. */
4505 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4506 Bool mutex_is_valid
4507 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4508 (void*)args[2] );
4509 *ret = mutex_is_valid ? 1 : 0;
4510 break;
4511 }
4512
sewardjf98e1c02008-10-25 16:22:41 +00004513 /* cond=arg[1] */
4514 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4515 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4516 break;
4517
sewardjb4112022007-11-09 22:49:28 +00004518 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4519 mutex=arg[2] */
4520 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4521 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4522 (void*)args[1], (void*)args[2] );
4523 break;
4524
4525 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4526 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4527 break;
4528
4529 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4530 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4531 break;
4532
sewardj789c3c52008-02-25 12:10:07 +00004533 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004534 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004535 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4536 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004537 break;
4538
4539 /* rwlock=arg[1], isW=arg[2] */
4540 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4541 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4542 break;
4543
4544 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4545 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4546 break;
4547
4548 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4549 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4550 break;
4551
sewardj11e352f2007-11-30 11:11:02 +00004552 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4553 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004554 break;
4555
sewardj11e352f2007-11-30 11:11:02 +00004556 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4557 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004558 break;
4559
sewardj11e352f2007-11-30 11:11:02 +00004560 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4561 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4562 break;
4563
4564 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4565 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004566 break;
4567
sewardj9f569b72008-11-13 13:33:09 +00004568 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004569 /* pth_bar_t*, ulong count, ulong resizable */
4570 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4571 args[2], args[3] );
4572 break;
4573
4574 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4575 /* pth_bar_t*, ulong newcount */
4576 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4577 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004578 break;
4579
4580 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4581 /* pth_bar_t* */
4582 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4583 break;
4584
4585 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4586 /* pth_bar_t* */
4587 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4588 break;
sewardjb4112022007-11-09 22:49:28 +00004589
sewardj5a644da2009-08-11 10:35:58 +00004590 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4591 /* pth_spinlock_t* */
4592 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4593 break;
4594
4595 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4596 /* pth_spinlock_t* */
4597 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4598 break;
4599
4600 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4601 /* pth_spinlock_t*, Word */
4602 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4603 break;
4604
4605 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4606 /* pth_spinlock_t* */
4607 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4608 break;
4609
4610 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4611 /* pth_spinlock_t* */
4612 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4613 break;
4614
sewardjed2e72e2009-08-14 11:08:24 +00004615 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4616 /* char* who */
4617 HChar* who = (HChar*)args[1];
4618 HChar buf[50 + 50];
4619 Thread* thr = map_threads_maybe_lookup( tid );
4620 tl_assert( thr ); /* I must be mapped */
4621 tl_assert( who );
4622 tl_assert( VG_(strlen)(who) <= 50 );
4623 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4624 /* record_error_Misc strdup's buf, so this is safe: */
4625 HG_(record_error_Misc)( thr, buf );
4626 break;
4627 }
4628
4629 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4630 /* UWord arbitrary-SO-tag */
4631 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4632 break;
4633
4634 case _VG_USERREQ__HG_USERSO_RECV_POST:
4635 /* UWord arbitrary-SO-tag */
4636 evh__HG_USERSO_RECV_POST( tid, args[1] );
4637 break;
4638
sewardjb4112022007-11-09 22:49:28 +00004639 default:
4640 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004641 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4642 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004643 }
4644
4645 return True;
4646}
4647
4648
4649/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004650/*--- Setup ---*/
4651/*----------------------------------------------------------------*/
4652
4653static Bool hg_process_cmd_line_option ( Char* arg )
4654{
njn83df0b62009-02-25 01:01:05 +00004655 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004656
njn83df0b62009-02-25 01:01:05 +00004657 if VG_BOOL_CLO(arg, "--track-lockorders",
4658 HG_(clo_track_lockorders)) {}
4659 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4660 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004661
4662 else if VG_XACT_CLO(arg, "--history-level=none",
4663 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004664 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004665 HG_(clo_history_level), 1);
4666 else if VG_XACT_CLO(arg, "--history-level=full",
4667 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004668
sewardjf585e482009-08-16 22:52:29 +00004669 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004670 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004671 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004672 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004673
sewardj11e352f2007-11-30 11:11:02 +00004674 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004675 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004676 Int j;
sewardjb4112022007-11-09 22:49:28 +00004677
njn83df0b62009-02-25 01:01:05 +00004678 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004679 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004680 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004681 return False;
4682 }
sewardj11e352f2007-11-30 11:11:02 +00004683 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004684 if ('0' == tmp_str[j]) { /* do nothing */ }
4685 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004686 else {
sewardj11e352f2007-11-30 11:11:02 +00004687 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004688 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004689 return False;
4690 }
4691 }
sewardjf98e1c02008-10-25 16:22:41 +00004692 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004693 }
4694
4695 else
4696 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4697
4698 return True;
4699}
4700
4701static void hg_print_usage ( void )
4702{
4703 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004704" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004705" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004706" full: show both stack traces for a data race (can be very slow)\n"
4707" approx: full trace for one thread, approx for the other (faster)\n"
4708" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004709" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004710 );
sewardjb4112022007-11-09 22:49:28 +00004711}
4712
4713static void hg_print_debug_usage ( void )
4714{
sewardjb4112022007-11-09 22:49:28 +00004715 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4716 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004717 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004718 " at events (X = 0|1) [000000]\n");
4719 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004720 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004721 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004722 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4723 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004724 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004725 VG_(printf)(" 000010 at lock/unlock events\n");
4726 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004727}
4728
4729static void hg_post_clo_init ( void )
4730{
4731}
4732
4733static void hg_fini ( Int exitcode )
4734{
sewardj2d9e8742009-08-07 15:46:56 +00004735 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4736 VG_(message)(Vg_UserMsg,
4737 "For counts of detected and suppressed errors, "
4738 "rerun with: -v\n");
4739 }
4740
4741 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4742 && HG_(clo_history_level) >= 2) {
4743 VG_(umsg)(
4744 "Use --history-level=approx or =none to gain increased speed, at\n" );
4745 VG_(umsg)(
4746 "the cost of reduced accuracy of conflicting-access information\n");
4747 }
4748
sewardjb4112022007-11-09 22:49:28 +00004749 if (SHOW_DATA_STRUCTURES)
4750 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004751 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004752 all__sanity_check("SK_(fini)");
4753
sewardj2d9e8742009-08-07 15:46:56 +00004754 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004755
4756 if (1) {
4757 VG_(printf)("\n");
4758 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4759 VG_(printf)("\n");
4760 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4761 VG_(printf)("\n");
4762 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4763 }
4764
sewardjf98e1c02008-10-25 16:22:41 +00004765 //zz VG_(printf)("\n");
4766 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4767 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4768 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4769 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4770 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4771 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4772 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4773 //zz stats__hbefore_stk_hwm);
4774 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4775 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004776
4777 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004778 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004779 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004780 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004781 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004782 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004783 (Int)HG_(cardinalityWSU)( univ_laog ));
4784
sewardjd52392d2008-11-08 20:36:26 +00004785 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4786 // stats__ga_LL_adds,
4787 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004788
sewardjf98e1c02008-10-25 16:22:41 +00004789 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4790 HG_(stats__LockN_to_P_queries),
4791 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004792
sewardjf98e1c02008-10-25 16:22:41 +00004793 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4794 HG_(stats__string_table_queries),
4795 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004796 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004797 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004798 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004799 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004800 VG_(printf)(" locks: %'8lu acquires, "
4801 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004802 stats__lockN_acquires,
4803 stats__lockN_releases
4804 );
barta0b6b2c2008-07-07 06:49:24 +00004805 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004806
4807 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004808 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004809 }
4810}
4811
sewardjf98e1c02008-10-25 16:22:41 +00004812/* FIXME: move these somewhere sane */
4813
4814static
4815void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4816{
4817 Thread* thr;
4818 ThreadId tid;
4819 UWord nActual;
4820 tl_assert(hbt);
4821 thr = libhb_get_Thr_opaque( hbt );
4822 tl_assert(thr);
4823 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4824 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4825 NULL, NULL, 0 );
4826 tl_assert(nActual <= nRequest);
4827 for (; nActual < nRequest; nActual++)
4828 frames[nActual] = 0;
4829}
4830
4831static
sewardj23f12002009-07-24 08:45:08 +00004832ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004833{
4834 Thread* thr;
4835 ThreadId tid;
4836 ExeContext* ec;
4837 tl_assert(hbt);
4838 thr = libhb_get_Thr_opaque( hbt );
4839 tl_assert(thr);
4840 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004841 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004842 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004843 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004844}
4845
4846
sewardjb4112022007-11-09 22:49:28 +00004847static void hg_pre_clo_init ( void )
4848{
sewardjf98e1c02008-10-25 16:22:41 +00004849 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004850
sewardjb4112022007-11-09 22:49:28 +00004851 VG_(details_name) ("Helgrind");
4852 VG_(details_version) (NULL);
4853 VG_(details_description) ("a thread error detector");
4854 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004855 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004856 VG_(details_bug_reports_to) (VG_BUGS_TO);
4857 VG_(details_avg_translation_sizeB) ( 200 );
4858
4859 VG_(basic_tool_funcs) (hg_post_clo_init,
4860 hg_instrument,
4861 hg_fini);
4862
4863 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004864 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004865 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004866 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004867 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004868 HG_(update_extra),
4869 HG_(recognised_suppression),
4870 HG_(read_extra_suppression_info),
4871 HG_(error_matches_suppression),
4872 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004873 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004874
sewardj24118492009-07-15 14:50:02 +00004875 VG_(needs_xml_output) ();
4876
sewardjb4112022007-11-09 22:49:28 +00004877 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4878 hg_print_usage,
4879 hg_print_debug_usage);
4880 VG_(needs_client_requests) (hg_handle_client_request);
4881
4882 // FIXME?
4883 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4884 // hg_expensive_sanity_check);
4885
4886 VG_(needs_malloc_replacement) (hg_cli__malloc,
4887 hg_cli____builtin_new,
4888 hg_cli____builtin_vec_new,
4889 hg_cli__memalign,
4890 hg_cli__calloc,
4891 hg_cli__free,
4892 hg_cli____builtin_delete,
4893 hg_cli____builtin_vec_delete,
4894 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004895 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004896 HG_CLI__MALLOC_REDZONE_SZB );
4897
sewardj849b0ed2008-12-21 10:43:10 +00004898 /* 21 Dec 08: disabled this; it mostly causes H to start more
4899 slowly and use significantly more memory, without very often
4900 providing useful results. The user can request to load this
4901 information manually with --read-var-info=yes. */
4902 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004903
4904 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004905 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4906 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004907 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004908 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004909
4910 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004911 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004912
4913 VG_(track_change_mem_mprotect) ( evh__set_perms );
4914
4915 VG_(track_die_mem_stack_signal)( evh__die_mem );
4916 VG_(track_die_mem_brk) ( evh__die_mem );
4917 VG_(track_die_mem_munmap) ( evh__die_mem );
4918 VG_(track_die_mem_stack) ( evh__die_mem );
4919
4920 // FIXME: what is this for?
4921 VG_(track_ban_mem_stack) (NULL);
4922
4923 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4924 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4925 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4926 VG_(track_post_mem_write) (NULL);
4927
4928 /////////////////
4929
4930 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4931 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4932
4933 VG_(track_start_client_code)( evh__start_client_code );
4934 VG_(track_stop_client_code)( evh__stop_client_code );
4935
sewardjf98e1c02008-10-25 16:22:41 +00004936 /////////////////////////////////////////////
4937 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004938 for_libhb__get_EC );
4939 /////////////////////////////////////////////
4940
4941 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004942
4943 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4944 as described in comments at the top of pub_tool_hashtable.h, are
4945 met. Blargh. */
4946 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4947 tl_assert( sizeof(UWord) == sizeof(Addr) );
4948 hg_mallocmeta_table
4949 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4950
sewardj61bc2c52011-02-09 10:34:00 +00004951 // add a callback to clean up on (threaded) fork.
4952 VG_(atfork)(NULL/*pre*/, NULL/*parent*/, evh__atfork_child/*child*/);
sewardjb4112022007-11-09 22:49:28 +00004953}
4954
4955VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4956
4957/*--------------------------------------------------------------------*/
4958/*--- end hg_main.c ---*/
4959/*--------------------------------------------------------------------*/