blob: 19dd1f968eac8a30cfc34ce0c01e7cc712e598f4 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
njn9f207462009-03-10 22:02:09 +000011 Copyright (C) 2007-2009 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
njnf76d27a2009-05-28 01:53:07 +000014 Copyright (C) 2007-2009 Apple, Inc.
15
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardjb4112022007-11-09 22:49:28 +000055
sewardjf98e1c02008-10-25 16:22:41 +000056#include "hg_basics.h"
57#include "hg_wordset.h"
58#include "hg_lock_n_thread.h"
59#include "hg_errors.h"
60
61#include "libhb.h"
62
sewardjb4112022007-11-09 22:49:28 +000063#include "helgrind.h"
64
sewardjf98e1c02008-10-25 16:22:41 +000065
66// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
67
68// FIXME: when client destroys a lock or a CV, remove these
69// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000070
71/*----------------------------------------------------------------*/
72/*--- ---*/
73/*----------------------------------------------------------------*/
74
sewardj11e352f2007-11-30 11:11:02 +000075/* Note this needs to be compiled with -fno-strict-aliasing, since it
76 contains a whole bunch of calls to lookupFM etc which cast between
77 Word and pointer types. gcc rightly complains this breaks ANSI C
78 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
79 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000080*/
sewardjb4112022007-11-09 22:49:28 +000081
sewardjefd3b4d2007-12-02 02:05:23 +000082// FIXME catch sync signals (SEGV, basically) and unlock BHL,
83// if held. Otherwise a LOCK-prefixed insn which segfaults
84// gets Helgrind into a total muddle as the BHL will not be
85// released after the insn.
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME what is supposed to happen to locks in memory which
88// is relocated as a result of client realloc?
89
sewardjb4112022007-11-09 22:49:28 +000090// FIXME put referencing ThreadId into Thread and get
91// rid of the slow reverse mapping function.
92
93// FIXME accesses to NoAccess areas: change state to Excl?
94
95// FIXME report errors for accesses of NoAccess memory?
96
97// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
98// the thread still holds the lock.
99
100/* ------------ Debug/trace options ------------ */
101
102// this is:
103// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
104// happens_before_wrk: 1000
105// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
106#define SHOW_EXPENSIVE_STUFF 0
107
108// 0 for silent, 1 for some stuff, 2 for lots of stuff
109#define SHOW_EVENTS 0
110
sewardjb4112022007-11-09 22:49:28 +0000111
112static void all__sanity_check ( Char* who ); /* fwds */
113
114#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
115
116// 0 for none, 1 for dump at end of run
117#define SHOW_DATA_STRUCTURES 0
118
119
sewardjb4112022007-11-09 22:49:28 +0000120/* ------------ Misc comments ------------ */
121
122// FIXME: don't hardwire initial entries for root thread.
123// Instead, let the pre_thread_ll_create handler do this.
124
sewardjb4112022007-11-09 22:49:28 +0000125
126/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000127/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000128/*----------------------------------------------------------------*/
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Admin linked list of Threads */
131static Thread* admin_threads = NULL;
132
133/* Admin linked list of Locks */
134static Lock* admin_locks = NULL;
135
sewardjb4112022007-11-09 22:49:28 +0000136/* Mapping table for core ThreadIds to Thread* */
137static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138
sewardjb4112022007-11-09 22:49:28 +0000139/* Mapping table for lock guest addresses to Lock* */
140static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141
142/* The word-set universes for thread sets and lock sets. */
143static WordSetU* univ_tsets = NULL; /* sets of Thread* */
144static WordSetU* univ_lsets = NULL; /* sets of Lock* */
145static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
146
147/* never changed; we only care about its address. Is treated as if it
148 was a standard userspace lock. Also we have a Lock* describing it
149 so it can participate in lock sets in the usual way. */
150static Int __bus_lock = 0;
151static Lock* __bus_lock_Lock = NULL;
152
153
154/*----------------------------------------------------------------*/
155/*--- Simple helpers for the data structures ---*/
156/*----------------------------------------------------------------*/
157
158static UWord stats__lockN_acquires = 0;
159static UWord stats__lockN_releases = 0;
160
sewardjf98e1c02008-10-25 16:22:41 +0000161static
162ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000163
164/* --------- Constructors --------- */
165
sewardjf98e1c02008-10-25 16:22:41 +0000166static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000167 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000168 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->locksetA = HG_(emptyWS)( univ_lsets );
170 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000172 thread->hbthr = hbthr;
173 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000174 thread->created_at = NULL;
175 thread->announced = False;
176 thread->errmsg_index = indx++;
177 thread->admin = admin_threads;
178 admin_threads = thread;
179 return thread;
180}
sewardjf98e1c02008-10-25 16:22:41 +0000181
sewardjb4112022007-11-09 22:49:28 +0000182// Make a new lock which is unlocked (hence ownerless)
183static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
184 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000185 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000186 lock->admin = admin_locks;
187 lock->unique = unique++;
188 lock->magic = LockN_MAGIC;
189 lock->appeared_at = NULL;
190 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000191 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000192 lock->guestaddr = guestaddr;
193 lock->kind = kind;
194 lock->heldW = False;
195 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000197 admin_locks = lock;
198 return lock;
199}
sewardjb4112022007-11-09 22:49:28 +0000200
201/* Release storage for a Lock. Also release storage in .heldBy, if
202 any. */
203static void del_LockN ( Lock* lk )
204{
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lk));
206 tl_assert(lk->hbso);
207 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000208 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000209 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000210 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000211 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000212}
213
214/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
215 it. This is done strictly: only combinations resulting from
216 correct program and libpthread behaviour are allowed. */
217static void lockN_acquire_writer ( Lock* lk, Thread* thr )
218{
sewardjf98e1c02008-10-25 16:22:41 +0000219 tl_assert(HG_(is_sane_LockN)(lk));
220 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000221
222 stats__lockN_acquires++;
223
224 /* EXPOSITION only */
225 /* We need to keep recording snapshots of where the lock was
226 acquired, so as to produce better lock-order error messages. */
227 if (lk->acquired_at == NULL) {
228 ThreadId tid;
229 tl_assert(lk->heldBy == NULL);
230 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
231 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000232 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000233 } else {
234 tl_assert(lk->heldBy != NULL);
235 }
236 /* end EXPOSITION only */
237
238 switch (lk->kind) {
239 case LK_nonRec:
240 case_LK_nonRec:
241 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
242 tl_assert(!lk->heldW);
243 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000244 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000245 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000246 break;
247 case LK_mbRec:
248 if (lk->heldBy == NULL)
249 goto case_LK_nonRec;
250 /* 2nd and subsequent locking of a lock by its owner */
251 tl_assert(lk->heldW);
252 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000253 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000254 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000255 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
256 == VG_(sizeTotalBag)(lk->heldBy));
257 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000258 break;
259 case LK_rdwr:
260 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
261 goto case_LK_nonRec;
262 default:
263 tl_assert(0);
264 }
sewardjf98e1c02008-10-25 16:22:41 +0000265 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000266}
267
268static void lockN_acquire_reader ( Lock* lk, Thread* thr )
269{
sewardjf98e1c02008-10-25 16:22:41 +0000270 tl_assert(HG_(is_sane_LockN)(lk));
271 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000272 /* can only add reader to a reader-writer lock. */
273 tl_assert(lk->kind == LK_rdwr);
274 /* lk must be free or already r-held. */
275 tl_assert(lk->heldBy == NULL
276 || (lk->heldBy != NULL && !lk->heldW));
277
278 stats__lockN_acquires++;
279
280 /* EXPOSITION only */
281 /* We need to keep recording snapshots of where the lock was
282 acquired, so as to produce better lock-order error messages. */
283 if (lk->acquired_at == NULL) {
284 ThreadId tid;
285 tl_assert(lk->heldBy == NULL);
286 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
287 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000288 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000289 } else {
290 tl_assert(lk->heldBy != NULL);
291 }
292 /* end EXPOSITION only */
293
294 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000296 } else {
297 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000298 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000299 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000300 }
301 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000302 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000303}
304
305/* Update 'lk' to reflect a release of it by 'thr'. This is done
306 strictly: only combinations resulting from correct program and
307 libpthread behaviour are allowed. */
308
309static void lockN_release ( Lock* lk, Thread* thr )
310{
311 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000312 tl_assert(HG_(is_sane_LockN)(lk));
313 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000314 /* lock must be held by someone */
315 tl_assert(lk->heldBy);
316 stats__lockN_releases++;
317 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000318 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000319 /* thr must actually have been a holder of lk */
320 tl_assert(b);
321 /* normalise */
322 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000323 if (VG_(isEmptyBag)(lk->heldBy)) {
324 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000325 lk->heldBy = NULL;
326 lk->heldW = False;
327 lk->acquired_at = NULL;
328 }
sewardjf98e1c02008-10-25 16:22:41 +0000329 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000330}
331
332static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
333{
334 Thread* thr;
335 if (!lk->heldBy) {
336 tl_assert(!lk->heldW);
337 return;
338 }
339 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000340 VG_(initIterBag)( lk->heldBy );
341 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000342 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000343 tl_assert(HG_(elemWS)( univ_lsets,
344 thr->locksetA, (Word)lk ));
345 thr->locksetA
346 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
347
348 if (lk->heldW) {
349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetW, (Word)lk ));
351 thr->locksetW
352 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
353 }
354 }
sewardj896f6f92008-08-19 08:38:52 +0000355 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000356}
357
sewardjb4112022007-11-09 22:49:28 +0000358
359/*----------------------------------------------------------------*/
360/*--- Print out the primary data structures ---*/
361/*----------------------------------------------------------------*/
362
sewardjd52392d2008-11-08 20:36:26 +0000363//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000364
365#define PP_THREADS (1<<1)
366#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000367#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000368
369
370static const Int sHOW_ADMIN = 0;
371
372static void space ( Int n )
373{
374 Int i;
375 Char spaces[128+1];
376 tl_assert(n >= 0 && n < 128);
377 if (n == 0)
378 return;
379 for (i = 0; i < n; i++)
380 spaces[i] = ' ';
381 spaces[i] = 0;
382 tl_assert(i < 128+1);
383 VG_(printf)("%s", spaces);
384}
385
386static void pp_Thread ( Int d, Thread* t )
387{
388 space(d+0); VG_(printf)("Thread %p {\n", t);
389 if (sHOW_ADMIN) {
390 space(d+3); VG_(printf)("admin %p\n", t->admin);
391 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
392 }
393 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
394 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000395 space(d+0); VG_(printf)("}\n");
396}
397
398static void pp_admin_threads ( Int d )
399{
400 Int i, n;
401 Thread* t;
402 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
403 /* nothing */
404 }
405 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
406 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
407 if (0) {
408 space(n);
409 VG_(printf)("admin_threads record %d of %d:\n", i, n);
410 }
411 pp_Thread(d+3, t);
412 }
barta0b6b2c2008-07-07 06:49:24 +0000413 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000414}
415
416static void pp_map_threads ( Int d )
417{
njn4c245e52009-03-15 23:25:38 +0000418 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000419 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000420 for (i = 0; i < VG_N_THREADS; i++) {
421 if (map_threads[i] != NULL)
422 n++;
423 }
424 VG_(printf)("(%d entries) {\n", n);
425 for (i = 0; i < VG_N_THREADS; i++) {
426 if (map_threads[i] == NULL)
427 continue;
428 space(d+3);
429 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
430 }
431 space(d); VG_(printf)("}\n");
432}
433
434static const HChar* show_LockKind ( LockKind lkk ) {
435 switch (lkk) {
436 case LK_mbRec: return "mbRec";
437 case LK_nonRec: return "nonRec";
438 case LK_rdwr: return "rdwr";
439 default: tl_assert(0);
440 }
441}
442
443static void pp_Lock ( Int d, Lock* lk )
444{
barta0b6b2c2008-07-07 06:49:24 +0000445 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000446 if (sHOW_ADMIN) {
447 space(d+3); VG_(printf)("admin %p\n", lk->admin);
448 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
449 }
450 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
451 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
452 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
453 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
454 if (lk->heldBy) {
455 Thread* thr;
456 Word count;
457 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000458 VG_(initIterBag)( lk->heldBy );
459 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000461 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000462 VG_(printf)("}");
463 }
464 VG_(printf)("\n");
465 space(d+0); VG_(printf)("}\n");
466}
467
468static void pp_admin_locks ( Int d )
469{
470 Int i, n;
471 Lock* lk;
472 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
473 /* nothing */
474 }
475 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
476 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
477 if (0) {
478 space(n);
479 VG_(printf)("admin_locks record %d of %d:\n", i, n);
480 }
481 pp_Lock(d+3, lk);
482 }
barta0b6b2c2008-07-07 06:49:24 +0000483 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000484}
485
486static void pp_map_locks ( Int d )
487{
488 void* gla;
489 Lock* lk;
490 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000491 (Int)VG_(sizeFM)( map_locks ));
492 VG_(initIterFM)( map_locks );
493 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000494 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000495 space(d+3);
496 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
497 }
sewardj896f6f92008-08-19 08:38:52 +0000498 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000499 space(d); VG_(printf)("}\n");
500}
501
sewardjb4112022007-11-09 22:49:28 +0000502static void pp_everything ( Int flags, Char* caller )
503{
504 Int d = 0;
505 VG_(printf)("\n");
506 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
507 if (flags & PP_THREADS) {
508 VG_(printf)("\n");
509 pp_admin_threads(d+3);
510 VG_(printf)("\n");
511 pp_map_threads(d+3);
512 }
513 if (flags & PP_LOCKS) {
514 VG_(printf)("\n");
515 pp_admin_locks(d+3);
516 VG_(printf)("\n");
517 pp_map_locks(d+3);
518 }
sewardjb4112022007-11-09 22:49:28 +0000519
520 VG_(printf)("\n");
521 VG_(printf)("}\n");
522 VG_(printf)("\n");
523}
524
525#undef SHOW_ADMIN
526
527
528/*----------------------------------------------------------------*/
529/*--- Initialise the primary data structures ---*/
530/*----------------------------------------------------------------*/
531
sewardjf98e1c02008-10-25 16:22:41 +0000532static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000533{
sewardjb4112022007-11-09 22:49:28 +0000534 Thread* thr;
535
536 /* Get everything initialised and zeroed. */
537 tl_assert(admin_threads == NULL);
538 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000541
542 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000543 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(map_threads != NULL);
545
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(sizeof(Addr) == sizeof(Word));
547 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
549 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000550 tl_assert(map_locks != NULL);
551
552 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000553 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000554 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000555
556 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000557 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
558 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000559 tl_assert(univ_tsets != NULL);
560
561 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000562 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
563 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000564 tl_assert(univ_lsets != NULL);
565
566 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000567 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
568 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000569 tl_assert(univ_laog != NULL);
570
571 /* Set up entries for the root thread */
572 // FIXME: this assumes that the first real ThreadId is 1
573
sewardjb4112022007-11-09 22:49:28 +0000574 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000575 thr = mk_Thread(hbthr_root);
576 thr->coretid = 1; /* FIXME: hardwires an assumption about the
577 identity of the root thread. */
578 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
579 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000580
sewardjf98e1c02008-10-25 16:22:41 +0000581 /* and bind it in the thread-map table. */
582 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
583 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000584
sewardjf98e1c02008-10-25 16:22:41 +0000585 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000586
587 tl_assert(VG_INVALID_THREADID == 0);
588
589 /* Mark the new bus lock correctly (to stop the sanity checks
590 complaining) */
591 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000592
593 all__sanity_check("initialise_data_structures");
594}
595
596
597/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000598/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000599/*----------------------------------------------------------------*/
600
601/* Doesn't assert if the relevant map_threads entry is NULL. */
602static Thread* map_threads_maybe_lookup ( ThreadId coretid )
603{
604 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000605 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000606 thr = map_threads[coretid];
607 return thr;
608}
609
610/* Asserts if the relevant map_threads entry is NULL. */
611static inline Thread* map_threads_lookup ( ThreadId coretid )
612{
613 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000614 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000615 thr = map_threads[coretid];
616 tl_assert(thr);
617 return thr;
618}
619
sewardjf98e1c02008-10-25 16:22:41 +0000620/* Do a reverse lookup. Does not assert if 'thr' is not found in
621 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000622static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
623{
sewardjf98e1c02008-10-25 16:22:41 +0000624 ThreadId tid;
625 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000626 /* Check nobody used the invalid-threadid slot */
627 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
628 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000629 tid = thr->coretid;
630 tl_assert(HG_(is_sane_ThreadId)(tid));
631 return tid;
sewardjb4112022007-11-09 22:49:28 +0000632}
633
634/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
635 is not found in map_threads. */
636static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
637{
638 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
639 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000640 tl_assert(map_threads[tid]);
641 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000642 return tid;
643}
644
645static void map_threads_delete ( ThreadId coretid )
646{
647 Thread* thr;
648 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000649 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000650 thr = map_threads[coretid];
651 tl_assert(thr);
652 map_threads[coretid] = NULL;
653}
654
655
656/*----------------------------------------------------------------*/
657/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
658/*----------------------------------------------------------------*/
659
660/* Make sure there is a lock table entry for the given (lock) guest
661 address. If not, create one of the stated 'kind' in unheld state.
662 In any case, return the address of the existing or new Lock. */
663static
664Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
665{
666 Bool found;
667 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000669 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000670 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000671 if (!found) {
672 Lock* lock = mk_LockN(lkk, ga);
673 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000675 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000676 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000677 return lock;
678 } else {
679 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000680 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000681 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000682 return oldlock;
683 }
684}
685
686static Lock* map_locks_maybe_lookup ( Addr ga )
687{
688 Bool found;
689 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000690 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000692 return lk;
693}
694
695static void map_locks_delete ( Addr ga )
696{
697 Addr ga2 = 0;
698 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000699 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000700 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 /* delFromFM produces the val which is being deleted, if it is
702 found. So assert it is non-null; that in effect asserts that we
703 are deleting a (ga, Lock) pair which actually exists. */
704 tl_assert(lk != NULL);
705 tl_assert(ga2 == ga);
706}
707
708
sewardjb4112022007-11-09 22:49:28 +0000709
710/*----------------------------------------------------------------*/
711/*--- Sanity checking the data structures ---*/
712/*----------------------------------------------------------------*/
713
714static UWord stats__sanity_checks = 0;
715
sewardjb4112022007-11-09 22:49:28 +0000716static void laog__sanity_check ( Char* who ); /* fwds */
717
718/* REQUIRED INVARIANTS:
719
720 Thread vs Segment/Lock/SecMaps
721
722 for each t in Threads {
723
724 // Thread.lockset: each element is really a valid Lock
725
726 // Thread.lockset: each Lock in set is actually held by that thread
727 for lk in Thread.lockset
728 lk == LockedBy(t)
729
730 // Thread.csegid is a valid SegmentID
731 // and the associated Segment has .thr == t
732
733 }
734
735 all thread Locksets are pairwise empty under intersection
736 (that is, no lock is claimed to be held by more than one thread)
737 -- this is guaranteed if all locks in locksets point back to their
738 owner threads
739
740 Lock vs Thread/Segment/SecMaps
741
742 for each entry (gla, la) in map_locks
743 gla == la->guest_addr
744
745 for each lk in Locks {
746
747 lk->tag is valid
748 lk->guest_addr does not have shadow state NoAccess
749 if lk == LockedBy(t), then t->lockset contains lk
750 if lk == UnlockedBy(segid) then segid is valid SegmentID
751 and can be mapped to a valid Segment(seg)
752 and seg->thr->lockset does not contain lk
753 if lk == UnlockedNew then (no lockset contains lk)
754
755 secmaps for lk has .mbHasLocks == True
756
757 }
758
759 Segment vs Thread/Lock/SecMaps
760
761 the Segment graph is a dag (no cycles)
762 all of the Segment graph must be reachable from the segids
763 mentioned in the Threads
764
765 for seg in Segments {
766
767 seg->thr is a sane Thread
768
769 }
770
771 SecMaps vs Segment/Thread/Lock
772
773 for sm in SecMaps {
774
775 sm properly aligned
776 if any shadow word is ShR or ShM then .mbHasShared == True
777
778 for each Excl(segid) state
779 map_segments_lookup maps to a sane Segment(seg)
780 for each ShM/ShR(tsetid,lsetid) state
781 each lk in lset is a valid Lock
782 each thr in tset is a valid thread, which is non-dead
783
784 }
785*/
786
787
788/* Return True iff 'thr' holds 'lk' in some mode. */
789static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
790{
791 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000792 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000793 else
794 return False;
795}
796
797/* Sanity check Threads, as far as possible */
798__attribute__((noinline))
799static void threads__sanity_check ( Char* who )
800{
801#define BAD(_str) do { how = (_str); goto bad; } while (0)
802 Char* how = "no error";
803 Thread* thr;
804 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000805 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000806 Word ls_size, i;
807 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000808 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000809 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000810 wsA = thr->locksetA;
811 wsW = thr->locksetW;
812 // locks held in W mode are a subset of all locks held
813 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
814 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
815 for (i = 0; i < ls_size; i++) {
816 lk = (Lock*)ls_words[i];
817 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000818 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000819 // Thread.lockset: each Lock in set is actually held by that
820 // thread
821 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000822 }
823 }
824 return;
825 bad:
826 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
827 tl_assert(0);
828#undef BAD
829}
830
831
832/* Sanity check Locks, as far as possible */
833__attribute__((noinline))
834static void locks__sanity_check ( Char* who )
835{
836#define BAD(_str) do { how = (_str); goto bad; } while (0)
837 Char* how = "no error";
838 Addr gla;
839 Lock* lk;
840 Int i;
841 // # entries in admin_locks == # entries in map_locks
842 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
843 ;
sewardj896f6f92008-08-19 08:38:52 +0000844 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000845 // for each entry (gla, lk) in map_locks
846 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000847 VG_(initIterFM)( map_locks );
848 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000849 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000850 if (lk->guestaddr != gla) BAD("2");
851 }
sewardj896f6f92008-08-19 08:38:52 +0000852 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000853 // scan through admin_locks ...
854 for (lk = admin_locks; lk; lk = lk->admin) {
855 // lock is sane. Quite comprehensive, also checks that
856 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000858 // map_locks binds guest address back to this lock
859 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000860 // look at all threads mentioned as holders of this lock. Ensure
861 // this lock is mentioned in their locksets.
862 if (lk->heldBy) {
863 Thread* thr;
864 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000865 VG_(initIterBag)( lk->heldBy );
866 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000867 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000868 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000869 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000870 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000871 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
872 BAD("6");
873 // also check the w-only lockset
874 if (lk->heldW
875 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
876 BAD("7");
877 if ((!lk->heldW)
878 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
879 BAD("8");
880 }
sewardj896f6f92008-08-19 08:38:52 +0000881 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000882 } else {
883 /* lock not held by anybody */
884 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
885 // since lk is unheld, then (no lockset contains lk)
886 // hmm, this is really too expensive to check. Hmm.
887 }
sewardjb4112022007-11-09 22:49:28 +0000888 }
889
890 return;
891 bad:
892 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
893 tl_assert(0);
894#undef BAD
895}
896
897
sewardjb4112022007-11-09 22:49:28 +0000898static void all_except_Locks__sanity_check ( Char* who ) {
899 stats__sanity_checks++;
900 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
901 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000902 laog__sanity_check(who);
903}
904static void all__sanity_check ( Char* who ) {
905 all_except_Locks__sanity_check(who);
906 locks__sanity_check(who);
907}
908
909
910/*----------------------------------------------------------------*/
911/*--- the core memory state machine (msm__* functions) ---*/
912/*----------------------------------------------------------------*/
913
sewardjd52392d2008-11-08 20:36:26 +0000914//static WordSetID add_BHL ( WordSetID lockset ) {
915// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
916//}
917//static WordSetID del_BHL ( WordSetID lockset ) {
918// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
919//}
sewardjb4112022007-11-09 22:49:28 +0000920
921
sewardjd52392d2008-11-08 20:36:26 +0000922///* Last-lock-lossage records. This mechanism exists to help explain
923// to programmers why we are complaining about a race. The idea is to
924// monitor all lockset transitions. When a previously nonempty
925// lockset becomes empty, the lock(s) that just disappeared (the
926// "lossage") are the locks that have consistently protected the
927// location (ga_of_access) in question for the longest time. Most of
928// the time the lossage-set is a single lock. Because the
929// lossage-lock is the one that has survived longest, there is there
930// is a good chance that it is indeed the lock that the programmer
931// intended to use to protect the location.
932//
933// Note that we cannot in general just look at the lossage set when we
934// see a transition to ShM(...,empty-set), because a transition to an
935// empty lockset can happen arbitrarily far before the point where we
936// want to report an error. This is in the case where there are many
937// transitions ShR -> ShR, all with an empty lockset, and only later
938// is there a transition to ShM. So what we want to do is note the
939// lossage lock at the point where a ShR -> ShR transition empties out
940// the lockset, so we can present it later if there should be a
941// transition to ShM.
942//
943// So this function finds such transitions. For each, it associates
944// in ga_to_lastlock, the guest address and the lossage lock. In fact
945// we do not record the Lock* directly as that may disappear later,
946// but instead the ExeContext inside the Lock which says where it was
947// initialised or first locked. ExeContexts are permanent so keeping
948// them indefinitely is safe.
949//
950// A boring detail: the hardware bus lock is not interesting in this
951// respect, so we first remove that from the pre/post locksets.
952//*/
953//
954//static UWord stats__ga_LL_adds = 0;
955//
956//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
957//
958//static
959//void record_last_lock_lossage ( Addr ga_of_access,
960// WordSetID lset_old, WordSetID lset_new )
961//{
962// Lock* lk;
963// Int card_old, card_new;
964//
965// tl_assert(lset_old != lset_new);
966//
967// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
968// (Int)lset_old,
969// HG_(cardinalityWS)(univ_lsets,lset_old),
970// (Int)lset_new,
971// HG_(cardinalityWS)(univ_lsets,lset_new),
972// ga_of_access );
973//
974// /* This is slow, but at least it's simple. The bus hardware lock
975// just confuses the logic, so remove it from the locksets we're
976// considering before doing anything else. */
977// lset_new = del_BHL( lset_new );
978//
979// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
980// /* The post-transition lock set is not empty. So we are not
981// interested. We're only interested in spotting transitions
982// that make locksets become empty. */
983// return;
984// }
985//
986// /* lset_new is now empty */
987// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
988// tl_assert(card_new == 0);
989//
990// lset_old = del_BHL( lset_old );
991// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
992//
993// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
994// (Int)lset_old, card_old, (Int)lset_new, card_new );
995//
996// if (card_old == 0) {
997// /* The old lockset was also empty. Not interesting. */
998// return;
999// }
1000//
1001// tl_assert(card_old > 0);
1002// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1003//
1004// /* Now we know we've got a transition from a nonempty lockset to an
1005// empty one. So lset_old must be the set of locks lost. Record
1006// some details. If there is more than one element in the lossage
1007// set, just choose one arbitrarily -- not the best, but at least
1008// it's simple. */
1009//
1010// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1011// if (0) VG_(printf)("lossage %ld %p\n",
1012// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1013// if (lk->appeared_at) {
1014// if (ga_to_lastlock == NULL)
1015// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1016// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1017// stats__ga_LL_adds++;
1018// }
1019//}
1020//
1021///* This queries the table (ga_to_lastlock) made by
1022// record_last_lock_lossage, when constructing error messages. It
1023// attempts to find the ExeContext of the allocation or initialisation
1024// point for the lossage lock associated with 'ga'. */
1025//
1026//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1027//{
1028// ExeContext* ec_hint = NULL;
1029// if (ga_to_lastlock != NULL
1030// && VG_(lookupFM)(ga_to_lastlock,
1031// NULL, (Word*)&ec_hint, ga)) {
1032// tl_assert(ec_hint != NULL);
1033// return ec_hint;
1034// } else {
1035// return NULL;
1036// }
1037//}
sewardjb4112022007-11-09 22:49:28 +00001038
1039
sewardjb4112022007-11-09 22:49:28 +00001040/*----------------------------------------------------------------*/
1041/*--- Shadow value and address range handlers ---*/
1042/*----------------------------------------------------------------*/
1043
1044static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001046static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001047__attribute__((noinline))
1048static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001049
sewardjb4112022007-11-09 22:49:28 +00001050
1051/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001052/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1053 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1054static void shadow_mem_scopy_range ( Thread* thr,
1055 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001059 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
sewardj23f12002009-07-24 08:45:08 +00001062static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1063{
sewardjf98e1c02008-10-25 16:22:41 +00001064 Thr* hbthr = thr->hbthr;
1065 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001066 LIBHB_CREAD_N(hbthr, a, len);
1067}
1068
1069static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1070 Thr* hbthr = thr->hbthr;
1071 tl_assert(hbthr);
1072 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001073}
1074
1075static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1076{
sewardj23f12002009-07-24 08:45:08 +00001077 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
sewardjb4112022007-11-09 22:49:28 +00001080static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1081{
sewardjb4112022007-11-09 22:49:28 +00001082 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001083 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001084 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
sewardj406bac82010-03-03 23:03:40 +00001087static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1088{
1089 if (0 && len > 500)
1090 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1091 libhb_srange_untrack( thr->hbthr, aIN, len );
1092}
1093
sewardjb4112022007-11-09 22:49:28 +00001094
1095/*----------------------------------------------------------------*/
1096/*--- Event handlers (evh__* functions) ---*/
1097/*--- plus helpers (evhH__* functions) ---*/
1098/*----------------------------------------------------------------*/
1099
1100/*--------- Event handler helpers (evhH__* functions) ---------*/
1101
1102/* Create a new segment for 'thr', making it depend (.prev) on its
1103 existing segment, bind together the SegmentID and Segment, and
1104 return both of them. Also update 'thr' so it references the new
1105 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001106//zz static
1107//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1108//zz /*OUT*/Segment** new_segP,
1109//zz Thread* thr )
1110//zz {
1111//zz Segment* cur_seg;
1112//zz tl_assert(new_segP);
1113//zz tl_assert(new_segidP);
1114//zz tl_assert(HG_(is_sane_Thread)(thr));
1115//zz cur_seg = map_segments_lookup( thr->csegid );
1116//zz tl_assert(cur_seg);
1117//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1118//zz at their owner thread. */
1119//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1120//zz *new_segidP = alloc_SegmentID();
1121//zz map_segments_add( *new_segidP, *new_segP );
1122//zz thr->csegid = *new_segidP;
1123//zz }
sewardjb4112022007-11-09 22:49:28 +00001124
1125
1126/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1127 updates, and also do all possible error checks. */
1128static
1129void evhH__post_thread_w_acquires_lock ( Thread* thr,
1130 LockKind lkk, Addr lock_ga )
1131{
1132 Lock* lk;
1133
1134 /* Basically what we need to do is call lockN_acquire_writer.
1135 However, that will barf if any 'invalid' lock states would
1136 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001137 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001138 routine.
1139
1140 Because this routine is only called after successful lock
1141 acquisition, we should not be asked to move the lock into any
1142 invalid states. Requests to do so are bugs in libpthread, since
1143 that should have rejected any such requests. */
1144
sewardjf98e1c02008-10-25 16:22:41 +00001145 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001146 /* Try to find the lock. If we can't, then create a new one with
1147 kind 'lkk'. */
1148 lk = map_locks_lookup_or_create(
1149 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001150 tl_assert( HG_(is_sane_LockN)(lk) );
1151
1152 /* check libhb level entities exist */
1153 tl_assert(thr->hbthr);
1154 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001155
1156 if (lk->heldBy == NULL) {
1157 /* the lock isn't held. Simple. */
1158 tl_assert(!lk->heldW);
1159 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001160 /* acquire a dependency from the lock's VCs */
1161 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001162 goto noerror;
1163 }
1164
1165 /* So the lock is already held. If held as a r-lock then
1166 libpthread must be buggy. */
1167 tl_assert(lk->heldBy);
1168 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001169 HG_(record_error_Misc)(
1170 thr, "Bug in libpthread: write lock "
1171 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001172 goto error;
1173 }
1174
1175 /* So the lock is held in w-mode. If it's held by some other
1176 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001177 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001178
sewardj896f6f92008-08-19 08:38:52 +00001179 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001180 HG_(record_error_Misc)(
1181 thr, "Bug in libpthread: write lock "
1182 "granted on mutex/rwlock which is currently "
1183 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001184 goto error;
1185 }
1186
1187 /* So the lock is already held in w-mode by 'thr'. That means this
1188 is an attempt to lock it recursively, which is only allowable
1189 for LK_mbRec kinded locks. Since this routine is called only
1190 once the lock has been acquired, this must also be a libpthread
1191 bug. */
1192 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001193 HG_(record_error_Misc)(
1194 thr, "Bug in libpthread: recursive write lock "
1195 "granted on mutex/wrlock which does not "
1196 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001197 goto error;
1198 }
1199
1200 /* So we are recursively re-locking a lock we already w-hold. */
1201 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001202 /* acquire a dependency from the lock's VC. Probably pointless,
1203 but also harmless. */
1204 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001205 goto noerror;
1206
1207 noerror:
1208 /* check lock order acquisition graph, and update. This has to
1209 happen before the lock is added to the thread's locksetA/W. */
1210 laog__pre_thread_acquires_lock( thr, lk );
1211 /* update the thread's held-locks set */
1212 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1213 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1214 /* fall through */
1215
1216 error:
sewardjf98e1c02008-10-25 16:22:41 +00001217 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001218}
1219
1220
1221/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1222 updates, and also do all possible error checks. */
1223static
1224void evhH__post_thread_r_acquires_lock ( Thread* thr,
1225 LockKind lkk, Addr lock_ga )
1226{
1227 Lock* lk;
1228
1229 /* Basically what we need to do is call lockN_acquire_reader.
1230 However, that will barf if any 'invalid' lock states would
1231 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001232 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001233 routine.
1234
1235 Because this routine is only called after successful lock
1236 acquisition, we should not be asked to move the lock into any
1237 invalid states. Requests to do so are bugs in libpthread, since
1238 that should have rejected any such requests. */
1239
sewardjf98e1c02008-10-25 16:22:41 +00001240 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001241 /* Try to find the lock. If we can't, then create a new one with
1242 kind 'lkk'. Only a reader-writer lock can be read-locked,
1243 hence the first assertion. */
1244 tl_assert(lkk == LK_rdwr);
1245 lk = map_locks_lookup_or_create(
1246 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001247 tl_assert( HG_(is_sane_LockN)(lk) );
1248
1249 /* check libhb level entities exist */
1250 tl_assert(thr->hbthr);
1251 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001252
1253 if (lk->heldBy == NULL) {
1254 /* the lock isn't held. Simple. */
1255 tl_assert(!lk->heldW);
1256 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001257 /* acquire a dependency from the lock's VC */
1258 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001259 goto noerror;
1260 }
1261
1262 /* So the lock is already held. If held as a w-lock then
1263 libpthread must be buggy. */
1264 tl_assert(lk->heldBy);
1265 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001266 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1267 "granted on rwlock which is "
1268 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001269 goto error;
1270 }
1271
1272 /* Easy enough. In short anybody can get a read-lock on a rwlock
1273 provided it is either unlocked or already in rd-held. */
1274 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001275 /* acquire a dependency from the lock's VC. Probably pointless,
1276 but also harmless. */
1277 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001278 goto noerror;
1279
1280 noerror:
1281 /* check lock order acquisition graph, and update. This has to
1282 happen before the lock is added to the thread's locksetA/W. */
1283 laog__pre_thread_acquires_lock( thr, lk );
1284 /* update the thread's held-locks set */
1285 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1286 /* but don't update thr->locksetW, since lk is only rd-held */
1287 /* fall through */
1288
1289 error:
sewardjf98e1c02008-10-25 16:22:41 +00001290 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001291}
1292
1293
1294/* The lock at 'lock_ga' is just about to be unlocked. Make all
1295 necessary updates, and also do all possible error checks. */
1296static
1297void evhH__pre_thread_releases_lock ( Thread* thr,
1298 Addr lock_ga, Bool isRDWR )
1299{
1300 Lock* lock;
1301 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001302 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001303
1304 /* This routine is called prior to a lock release, before
1305 libpthread has had a chance to validate the call. Hence we need
1306 to detect and reject any attempts to move the lock into an
1307 invalid state. Such attempts are bugs in the client.
1308
1309 isRDWR is True if we know from the wrapper context that lock_ga
1310 should refer to a reader-writer lock, and is False if [ditto]
1311 lock_ga should refer to a standard mutex. */
1312
sewardjf98e1c02008-10-25 16:22:41 +00001313 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001314 lock = map_locks_maybe_lookup( lock_ga );
1315
1316 if (!lock) {
1317 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1318 the client is trying to unlock it. So complain, then ignore
1319 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001320 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001321 return;
1322 }
1323
1324 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001325 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001326
1327 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001328 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1329 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001330 }
1331 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001332 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1333 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001334 }
1335
1336 if (!lock->heldBy) {
1337 /* The lock is not held. This indicates a serious bug in the
1338 client. */
1339 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001340 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001341 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1342 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1343 goto error;
1344 }
1345
sewardjf98e1c02008-10-25 16:22:41 +00001346 /* test just above dominates */
1347 tl_assert(lock->heldBy);
1348 was_heldW = lock->heldW;
1349
sewardjb4112022007-11-09 22:49:28 +00001350 /* The lock is held. Is this thread one of the holders? If not,
1351 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001352 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001353 tl_assert(n >= 0);
1354 if (n == 0) {
1355 /* We are not a current holder of the lock. This is a bug in
1356 the guest, and (per POSIX pthread rules) the unlock
1357 attempt will fail. So just complain and do nothing
1358 else. */
sewardj896f6f92008-08-19 08:38:52 +00001359 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001360 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001361 tl_assert(realOwner != thr);
1362 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1363 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001364 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001365 goto error;
1366 }
1367
1368 /* Ok, we hold the lock 'n' times. */
1369 tl_assert(n >= 1);
1370
1371 lockN_release( lock, thr );
1372
1373 n--;
1374 tl_assert(n >= 0);
1375
1376 if (n > 0) {
1377 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001378 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001379 /* We still hold the lock. So either it's a recursive lock
1380 or a rwlock which is currently r-held. */
1381 tl_assert(lock->kind == LK_mbRec
1382 || (lock->kind == LK_rdwr && !lock->heldW));
1383 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1384 if (lock->heldW)
1385 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1386 else
1387 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1388 } else {
sewardj983f3022009-05-21 14:49:55 +00001389 /* n is zero. This means we don't hold the lock any more. But
1390 if it's a rwlock held in r-mode, someone else could still
1391 hold it. Just do whatever sanity checks we can. */
1392 if (lock->kind == LK_rdwr && lock->heldBy) {
1393 /* It's a rwlock. We no longer hold it but we used to;
1394 nevertheless it still appears to be held by someone else.
1395 The implication is that, prior to this release, it must
1396 have been shared by us and and whoever else is holding it;
1397 which in turn implies it must be r-held, since a lock
1398 can't be w-held by more than one thread. */
1399 /* The lock is now R-held by somebody else: */
1400 tl_assert(lock->heldW == False);
1401 } else {
1402 /* Normal case. It's either not a rwlock, or it's a rwlock
1403 that we used to hold in w-mode (which is pretty much the
1404 same thing as a non-rwlock.) Since this transaction is
1405 atomic (V does not allow multiple threads to run
1406 simultaneously), it must mean the lock is now not held by
1407 anybody. Hence assert for it. */
1408 /* The lock is now not held by anybody: */
1409 tl_assert(!lock->heldBy);
1410 tl_assert(lock->heldW == False);
1411 }
sewardjf98e1c02008-10-25 16:22:41 +00001412 //if (lock->heldBy) {
1413 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1414 //}
sewardjb4112022007-11-09 22:49:28 +00001415 /* update this thread's lockset accordingly. */
1416 thr->locksetA
1417 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1418 thr->locksetW
1419 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001420 /* push our VC into the lock */
1421 tl_assert(thr->hbthr);
1422 tl_assert(lock->hbso);
1423 /* If the lock was previously W-held, then we want to do a
1424 strong send, and if previously R-held, then a weak send. */
1425 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001426 }
1427 /* fall through */
1428
1429 error:
sewardjf98e1c02008-10-25 16:22:41 +00001430 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001431}
1432
1433
sewardj9f569b72008-11-13 13:33:09 +00001434/* ---------------------------------------------------------- */
1435/* -------- Event handlers proper (evh__* functions) -------- */
1436/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001437
1438/* What is the Thread* for the currently running thread? This is
1439 absolutely performance critical. We receive notifications from the
1440 core for client code starts/stops, and cache the looked-up result
1441 in 'current_Thread'. Hence, for the vast majority of requests,
1442 finding the current thread reduces to a read of a global variable,
1443 provided get_current_Thread_in_C_C is inlined.
1444
1445 Outside of client code, current_Thread is NULL, and presumably
1446 any uses of it will cause a segfault. Hence:
1447
1448 - for uses definitely within client code, use
1449 get_current_Thread_in_C_C.
1450
1451 - for all other uses, use get_current_Thread.
1452*/
1453
sewardj23f12002009-07-24 08:45:08 +00001454static Thread *current_Thread = NULL,
1455 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001456
1457static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1458 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1459 tl_assert(current_Thread == NULL);
1460 current_Thread = map_threads_lookup( tid );
1461 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001462 if (current_Thread != current_Thread_prev) {
1463 libhb_Thr_resumes( current_Thread->hbthr );
1464 current_Thread_prev = current_Thread;
1465 }
sewardjb4112022007-11-09 22:49:28 +00001466}
1467static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1468 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1469 tl_assert(current_Thread != NULL);
1470 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001471 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001472}
1473static inline Thread* get_current_Thread_in_C_C ( void ) {
1474 return current_Thread;
1475}
1476static inline Thread* get_current_Thread ( void ) {
1477 ThreadId coretid;
1478 Thread* thr;
1479 thr = get_current_Thread_in_C_C();
1480 if (LIKELY(thr))
1481 return thr;
1482 /* evidently not in client code. Do it the slow way. */
1483 coretid = VG_(get_running_tid)();
1484 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001485 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001486 of initial memory layout) and VG_(get_running_tid)() returns
1487 VG_INVALID_THREADID at that point. */
1488 if (coretid == VG_INVALID_THREADID)
1489 coretid = 1; /* KLUDGE */
1490 thr = map_threads_lookup( coretid );
1491 return thr;
1492}
1493
1494static
1495void evh__new_mem ( Addr a, SizeT len ) {
1496 if (SHOW_EVENTS >= 2)
1497 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1498 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001499 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001500 all__sanity_check("evh__new_mem-post");
1501}
1502
1503static
sewardj7cf4e6b2008-05-01 20:24:26 +00001504void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1505 if (SHOW_EVENTS >= 2)
1506 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1507 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001508 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001509 all__sanity_check("evh__new_mem_w_tid-post");
1510}
1511
1512static
sewardjb4112022007-11-09 22:49:28 +00001513void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001514 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001515 if (SHOW_EVENTS >= 1)
1516 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1517 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1518 if (rr || ww || xx)
1519 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001520 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001521 all__sanity_check("evh__new_mem_w_perms-post");
1522}
1523
1524static
1525void evh__set_perms ( Addr a, SizeT len,
1526 Bool rr, Bool ww, Bool xx ) {
1527 if (SHOW_EVENTS >= 1)
1528 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1529 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1530 /* Hmm. What should we do here, that actually makes any sense?
1531 Let's say: if neither readable nor writable, then declare it
1532 NoAccess, else leave it alone. */
1533 if (!(rr || ww))
1534 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001535 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001536 all__sanity_check("evh__set_perms-post");
1537}
1538
1539static
1540void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001541 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001542 if (SHOW_EVENTS >= 2)
1543 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1544 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001545 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001546 all__sanity_check("evh__die_mem-post");
1547}
1548
1549static
sewardj406bac82010-03-03 23:03:40 +00001550void evh__untrack_mem ( Addr a, SizeT len ) {
1551 // whereas it doesn't ignore this
1552 if (SHOW_EVENTS >= 2)
1553 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1554 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1555 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1556 all__sanity_check("evh__untrack_mem-post");
1557}
1558
1559static
sewardj23f12002009-07-24 08:45:08 +00001560void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1561 if (SHOW_EVENTS >= 2)
1562 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1563 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1564 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1565 all__sanity_check("evh__copy_mem-post");
1566}
1567
1568static
sewardjb4112022007-11-09 22:49:28 +00001569void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1570{
1571 if (SHOW_EVENTS >= 1)
1572 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1573 (Int)parent, (Int)child );
1574
1575 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001576 Thread* thr_p;
1577 Thread* thr_c;
1578 Thr* hbthr_p;
1579 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001580
sewardjf98e1c02008-10-25 16:22:41 +00001581 tl_assert(HG_(is_sane_ThreadId)(parent));
1582 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001583 tl_assert(parent != child);
1584
1585 thr_p = map_threads_maybe_lookup( parent );
1586 thr_c = map_threads_maybe_lookup( child );
1587
1588 tl_assert(thr_p != NULL);
1589 tl_assert(thr_c == NULL);
1590
sewardjf98e1c02008-10-25 16:22:41 +00001591 hbthr_p = thr_p->hbthr;
1592 tl_assert(hbthr_p != NULL);
1593 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001594
sewardjf98e1c02008-10-25 16:22:41 +00001595 hbthr_c = libhb_create ( hbthr_p );
1596
1597 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001598 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001599 thr_c = mk_Thread( hbthr_c );
1600 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1601 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001602
1603 /* and bind it in the thread-map table */
1604 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001605 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1606 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001607
1608 /* Record where the parent is so we can later refer to this in
1609 error messages.
1610
1611 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1612 The stack snapshot is taken immediately after the parent has
1613 returned from its sys_clone call. Unfortunately there is no
1614 unwind info for the insn following "syscall" - reading the
1615 glibc sources confirms this. So we ask for a snapshot to be
1616 taken as if RIP was 3 bytes earlier, in a place where there
1617 is unwind info. Sigh.
1618 */
1619 { Word first_ip_delta = 0;
1620# if defined(VGP_amd64_linux)
1621 first_ip_delta = -3;
1622# endif
1623 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1624 }
sewardjb4112022007-11-09 22:49:28 +00001625 }
1626
sewardjf98e1c02008-10-25 16:22:41 +00001627 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001628 all__sanity_check("evh__pre_thread_create-post");
1629}
1630
1631static
1632void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1633{
1634 Int nHeld;
1635 Thread* thr_q;
1636 if (SHOW_EVENTS >= 1)
1637 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1638 (Int)quit_tid );
1639
1640 /* quit_tid has disappeared without joining to any other thread.
1641 Therefore there is no synchronisation event associated with its
1642 exit and so we have to pretty much treat it as if it was still
1643 alive but mysteriously making no progress. That is because, if
1644 we don't know when it really exited, then we can never say there
1645 is a point in time when we're sure the thread really has
1646 finished, and so we need to consider the possibility that it
1647 lingers indefinitely and continues to interact with other
1648 threads. */
1649 /* However, it might have rendezvous'd with a thread that called
1650 pthread_join with this one as arg, prior to this point (that's
1651 how NPTL works). In which case there has already been a prior
1652 sync event. So in any case, just let the thread exit. On NPTL,
1653 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001654 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001655 thr_q = map_threads_maybe_lookup( quit_tid );
1656 tl_assert(thr_q != NULL);
1657
1658 /* Complain if this thread holds any locks. */
1659 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1660 tl_assert(nHeld >= 0);
1661 if (nHeld > 0) {
1662 HChar buf[80];
1663 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1664 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001665 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001666 }
1667
sewardj23f12002009-07-24 08:45:08 +00001668 /* Not much to do here:
1669 - tell libhb the thread is gone
1670 - clear the map_threads entry, in order that the Valgrind core
1671 can re-use it. */
1672 tl_assert(thr_q->hbthr);
1673 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001674 tl_assert(thr_q->coretid == quit_tid);
1675 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001676 map_threads_delete( quit_tid );
1677
sewardjf98e1c02008-10-25 16:22:41 +00001678 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001679 all__sanity_check("evh__pre_thread_ll_exit-post");
1680}
1681
sewardjf98e1c02008-10-25 16:22:41 +00001682
sewardjb4112022007-11-09 22:49:28 +00001683static
1684void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1685{
sewardjb4112022007-11-09 22:49:28 +00001686 Thread* thr_s;
1687 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001688 Thr* hbthr_s;
1689 Thr* hbthr_q;
1690 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001691
1692 if (SHOW_EVENTS >= 1)
1693 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1694 (Int)stay_tid, quit_thr );
1695
sewardjf98e1c02008-10-25 16:22:41 +00001696 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001697
1698 thr_s = map_threads_maybe_lookup( stay_tid );
1699 thr_q = quit_thr;
1700 tl_assert(thr_s != NULL);
1701 tl_assert(thr_q != NULL);
1702 tl_assert(thr_s != thr_q);
1703
sewardjf98e1c02008-10-25 16:22:41 +00001704 hbthr_s = thr_s->hbthr;
1705 hbthr_q = thr_q->hbthr;
1706 tl_assert(hbthr_s != hbthr_q);
1707 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1708 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001709
sewardjf98e1c02008-10-25 16:22:41 +00001710 /* Allocate a temporary synchronisation object and use it to send
1711 an imaginary message from the quitter to the stayer, the purpose
1712 being to generate a dependence from the quitter to the
1713 stayer. */
1714 so = libhb_so_alloc();
1715 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001716 /* Send last arg of _so_send as False, since the sending thread
1717 doesn't actually exist any more, so we don't want _so_send to
1718 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001719 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1720 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1721 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001722
sewardjf98e1c02008-10-25 16:22:41 +00001723 /* evh__pre_thread_ll_exit issues an error message if the exiting
1724 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001725
1726 /* This holds because, at least when using NPTL as the thread
1727 library, we should be notified the low level thread exit before
1728 we hear of any join event on it. The low level exit
1729 notification feeds through into evh__pre_thread_ll_exit,
1730 which should clear the map_threads entry for it. Hence we
1731 expect there to be no map_threads entry at this point. */
1732 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1733 == VG_INVALID_THREADID);
1734
sewardjf98e1c02008-10-25 16:22:41 +00001735 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001736 all__sanity_check("evh__post_thread_join-post");
1737}
1738
1739static
1740void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1741 Addr a, SizeT size) {
1742 if (SHOW_EVENTS >= 2
1743 || (SHOW_EVENTS >= 1 && size != 1))
1744 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1745 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001746 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001747 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001748 all__sanity_check("evh__pre_mem_read-post");
1749}
1750
1751static
1752void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1753 Char* s, Addr a ) {
1754 Int len;
1755 if (SHOW_EVENTS >= 1)
1756 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1757 (Int)tid, s, (void*)a );
1758 // FIXME: think of a less ugly hack
1759 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001760 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001761 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001762 all__sanity_check("evh__pre_mem_read_asciiz-post");
1763}
1764
1765static
1766void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1767 Addr a, SizeT size ) {
1768 if (SHOW_EVENTS >= 1)
1769 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1770 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001771 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001772 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001773 all__sanity_check("evh__pre_mem_write-post");
1774}
1775
1776static
1777void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1778 if (SHOW_EVENTS >= 1)
1779 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1780 (void*)a, len, (Int)is_inited );
1781 // FIXME: this is kinda stupid
1782 if (is_inited) {
1783 shadow_mem_make_New(get_current_Thread(), a, len);
1784 } else {
1785 shadow_mem_make_New(get_current_Thread(), a, len);
1786 }
sewardjf98e1c02008-10-25 16:22:41 +00001787 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001788 all__sanity_check("evh__pre_mem_read-post");
1789}
1790
1791static
1792void evh__die_mem_heap ( Addr a, SizeT len ) {
1793 if (SHOW_EVENTS >= 1)
1794 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1795 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001796 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001797 all__sanity_check("evh__pre_mem_read-post");
1798}
1799
sewardj23f12002009-07-24 08:45:08 +00001800/* --- Event handlers called from generated code --- */
1801
sewardjb4112022007-11-09 22:49:28 +00001802static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001803void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001804 Thread* thr = get_current_Thread_in_C_C();
1805 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001806 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001807}
sewardjf98e1c02008-10-25 16:22:41 +00001808
sewardjb4112022007-11-09 22:49:28 +00001809static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001810void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001811 Thread* thr = get_current_Thread_in_C_C();
1812 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001813 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001814}
sewardjf98e1c02008-10-25 16:22:41 +00001815
sewardjb4112022007-11-09 22:49:28 +00001816static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001817void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001818 Thread* thr = get_current_Thread_in_C_C();
1819 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001820 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001821}
sewardjf98e1c02008-10-25 16:22:41 +00001822
sewardjb4112022007-11-09 22:49:28 +00001823static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001824void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001825 Thread* thr = get_current_Thread_in_C_C();
1826 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001827 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001828}
sewardjf98e1c02008-10-25 16:22:41 +00001829
sewardjb4112022007-11-09 22:49:28 +00001830static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001831void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001832 Thread* thr = get_current_Thread_in_C_C();
1833 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001834 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001835}
1836
1837static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001838void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001839 Thread* thr = get_current_Thread_in_C_C();
1840 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001841 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001842}
sewardjf98e1c02008-10-25 16:22:41 +00001843
sewardjb4112022007-11-09 22:49:28 +00001844static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001845void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001846 Thread* thr = get_current_Thread_in_C_C();
1847 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001848 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001849}
sewardjf98e1c02008-10-25 16:22:41 +00001850
sewardjb4112022007-11-09 22:49:28 +00001851static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001852void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001853 Thread* thr = get_current_Thread_in_C_C();
1854 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001855 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001856}
sewardjf98e1c02008-10-25 16:22:41 +00001857
sewardjb4112022007-11-09 22:49:28 +00001858static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001859void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001860 Thread* thr = get_current_Thread_in_C_C();
1861 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001862 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001863}
sewardjf98e1c02008-10-25 16:22:41 +00001864
sewardjb4112022007-11-09 22:49:28 +00001865static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001866void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001867 Thread* thr = get_current_Thread_in_C_C();
1868 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001869 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001870}
1871
sewardjb4112022007-11-09 22:49:28 +00001872
sewardj9f569b72008-11-13 13:33:09 +00001873/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001874/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001875/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001876
1877/* EXPOSITION only: by intercepting lock init events we can show the
1878 user where the lock was initialised, rather than only being able to
1879 show where it was first locked. Intercepting lock initialisations
1880 is not necessary for the basic operation of the race checker. */
1881static
1882void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1883 void* mutex, Word mbRec )
1884{
1885 if (SHOW_EVENTS >= 1)
1886 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1887 (Int)tid, mbRec, (void*)mutex );
1888 tl_assert(mbRec == 0 || mbRec == 1);
1889 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1890 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001891 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001892 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1893}
1894
1895static
1896void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1897{
1898 Thread* thr;
1899 Lock* lk;
1900 if (SHOW_EVENTS >= 1)
1901 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1902 (Int)tid, (void*)mutex );
1903
1904 thr = map_threads_maybe_lookup( tid );
1905 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001906 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001907
1908 lk = map_locks_maybe_lookup( (Addr)mutex );
1909
1910 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001911 HG_(record_error_Misc)(
1912 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001913 }
1914
1915 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001916 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001917 tl_assert( lk->guestaddr == (Addr)mutex );
1918 if (lk->heldBy) {
1919 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001920 HG_(record_error_Misc)(
1921 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001922 /* remove lock from locksets of all owning threads */
1923 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001924 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001925 lk->heldBy = NULL;
1926 lk->heldW = False;
1927 lk->acquired_at = NULL;
1928 }
1929 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001930 tl_assert( HG_(is_sane_LockN)(lk) );
1931
sewardj1cbc12f2008-11-10 16:16:46 +00001932 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001933 map_locks_delete( lk->guestaddr );
1934 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001935 }
1936
sewardjf98e1c02008-10-25 16:22:41 +00001937 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001938 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1939}
1940
1941static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1942 void* mutex, Word isTryLock )
1943{
1944 /* Just check the mutex is sane; nothing else to do. */
1945 // 'mutex' may be invalid - not checked by wrapper
1946 Thread* thr;
1947 Lock* lk;
1948 if (SHOW_EVENTS >= 1)
1949 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1950 (Int)tid, (void*)mutex );
1951
1952 tl_assert(isTryLock == 0 || isTryLock == 1);
1953 thr = map_threads_maybe_lookup( tid );
1954 tl_assert(thr); /* cannot fail - Thread* must already exist */
1955
1956 lk = map_locks_maybe_lookup( (Addr)mutex );
1957
1958 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001959 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1960 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001961 }
1962
1963 if ( lk
1964 && isTryLock == 0
1965 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1966 && lk->heldBy
1967 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001968 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001969 /* uh, it's a non-recursive lock and we already w-hold it, and
1970 this is a real lock operation (not a speculative "tryLock"
1971 kind of thing). Duh. Deadlock coming up; but at least
1972 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001973 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1974 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001975 }
1976}
1977
1978static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1979{
1980 // only called if the real library call succeeded - so mutex is sane
1981 Thread* thr;
1982 if (SHOW_EVENTS >= 1)
1983 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1984 (Int)tid, (void*)mutex );
1985
1986 thr = map_threads_maybe_lookup( tid );
1987 tl_assert(thr); /* cannot fail - Thread* must already exist */
1988
1989 evhH__post_thread_w_acquires_lock(
1990 thr,
1991 LK_mbRec, /* if not known, create new lock with this LockKind */
1992 (Addr)mutex
1993 );
1994}
1995
1996static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1997{
1998 // 'mutex' may be invalid - not checked by wrapper
1999 Thread* thr;
2000 if (SHOW_EVENTS >= 1)
2001 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2002 (Int)tid, (void*)mutex );
2003
2004 thr = map_threads_maybe_lookup( tid );
2005 tl_assert(thr); /* cannot fail - Thread* must already exist */
2006
2007 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2008}
2009
2010static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2011{
2012 // only called if the real library call succeeded - so mutex is sane
2013 Thread* thr;
2014 if (SHOW_EVENTS >= 1)
2015 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2016 (Int)tid, (void*)mutex );
2017 thr = map_threads_maybe_lookup( tid );
2018 tl_assert(thr); /* cannot fail - Thread* must already exist */
2019
2020 // anything we should do here?
2021}
2022
2023
sewardj5a644da2009-08-11 10:35:58 +00002024/* ------------------------------------------------------- */
2025/* -------------- events to do with mutexes -------------- */
2026/* ------------------------------------------------------- */
2027
2028/* All a bit of a kludge. Pretend we're really dealing with ordinary
2029 pthread_mutex_t's instead, for the most part. */
2030
2031static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2032 void* slock )
2033{
2034 Thread* thr;
2035 Lock* lk;
2036 /* In glibc's kludgey world, we're either initialising or unlocking
2037 it. Since this is the pre-routine, if it is locked, unlock it
2038 and take a dependence edge. Otherwise, do nothing. */
2039
2040 if (SHOW_EVENTS >= 1)
2041 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2042 "(ctid=%d, slock=%p)\n",
2043 (Int)tid, (void*)slock );
2044
2045 thr = map_threads_maybe_lookup( tid );
2046 /* cannot fail - Thread* must already exist */;
2047 tl_assert( HG_(is_sane_Thread)(thr) );
2048
2049 lk = map_locks_maybe_lookup( (Addr)slock );
2050 if (lk && lk->heldBy) {
2051 /* it's held. So do the normal pre-unlock actions, as copied
2052 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2053 duplicates the map_locks_maybe_lookup. */
2054 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2055 False/*!isRDWR*/ );
2056 }
2057}
2058
2059static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2060 void* slock )
2061{
2062 Lock* lk;
2063 /* More kludgery. If the lock has never been seen before, do
2064 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2065 nothing. */
2066
2067 if (SHOW_EVENTS >= 1)
2068 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2069 "(ctid=%d, slock=%p)\n",
2070 (Int)tid, (void*)slock );
2071
2072 lk = map_locks_maybe_lookup( (Addr)slock );
2073 if (!lk) {
2074 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2075 }
2076}
2077
2078static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2079 void* slock, Word isTryLock )
2080{
2081 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2082}
2083
2084static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2085 void* slock )
2086{
2087 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2088}
2089
2090static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2091 void* slock )
2092{
2093 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2094}
2095
2096
sewardj9f569b72008-11-13 13:33:09 +00002097/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002098/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002099/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002100
sewardj02114542009-07-28 20:52:36 +00002101/* A mapping from CV to (the SO associated with it, plus some
2102 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002103 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2104 wait on it completes, we do a 'recv' from the SO. This is believed
2105 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002106 signallings/broadcasts.
2107*/
2108
sewardj02114542009-07-28 20:52:36 +00002109/* .so is the SO for this CV.
2110 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002111
sewardj02114542009-07-28 20:52:36 +00002112 POSIX says effectively that the first pthread_cond_{timed}wait call
2113 causes a dynamic binding between the CV and the mutex, and that
2114 lasts until such time as the waiter count falls to zero. Hence
2115 need to keep track of the number of waiters in order to do
2116 consistency tracking. */
2117typedef
2118 struct {
2119 SO* so; /* libhb-allocated SO */
2120 void* mx_ga; /* addr of associated mutex, if any */
2121 UWord nWaiters; /* # threads waiting on the CV */
2122 }
2123 CVInfo;
2124
2125
2126/* pthread_cond_t* -> CVInfo* */
2127static WordFM* map_cond_to_CVInfo = NULL;
2128
2129static void map_cond_to_CVInfo_INIT ( void ) {
2130 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2131 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2132 "hg.mctCI.1", HG_(free), NULL );
2133 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002134 }
2135}
2136
sewardj02114542009-07-28 20:52:36 +00002137static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002138 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002139 map_cond_to_CVInfo_INIT();
2140 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002141 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002142 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002143 } else {
sewardj02114542009-07-28 20:52:36 +00002144 SO* so = libhb_so_alloc();
2145 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2146 cvi->so = so;
2147 cvi->mx_ga = 0;
2148 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2149 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002150 }
2151}
2152
sewardj02114542009-07-28 20:52:36 +00002153static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002154 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002155 map_cond_to_CVInfo_INIT();
2156 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2157 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002158 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002159 tl_assert(cvi);
2160 tl_assert(cvi->so);
2161 libhb_so_dealloc(cvi->so);
2162 cvi->mx_ga = 0;
2163 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002164 }
2165}
2166
2167static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2168{
sewardjf98e1c02008-10-25 16:22:41 +00002169 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2170 cond to a SO if it is not already so bound, and 'send' on the
2171 SO. This is later used by other thread(s) which successfully
2172 exit from a pthread_cond_wait on the same cv; then they 'recv'
2173 from the SO, thereby acquiring a dependency on this signalling
2174 event. */
sewardjb4112022007-11-09 22:49:28 +00002175 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002176 CVInfo* cvi;
2177 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002178
2179 if (SHOW_EVENTS >= 1)
2180 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2181 (Int)tid, (void*)cond );
2182
sewardjb4112022007-11-09 22:49:28 +00002183 thr = map_threads_maybe_lookup( tid );
2184 tl_assert(thr); /* cannot fail - Thread* must already exist */
2185
sewardj02114542009-07-28 20:52:36 +00002186 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2187 tl_assert(cvi);
2188 tl_assert(cvi->so);
2189
sewardjb4112022007-11-09 22:49:28 +00002190 // error-if: mutex is bogus
2191 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002192 // Hmm. POSIX doesn't actually say that it's an error to call
2193 // pthread_cond_signal with the associated mutex being unlocked.
2194 // Although it does say that it should be "if consistent scheduling
2195 // is desired."
2196 //
2197 // For the moment, disable these checks.
2198 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2199 //if (lk == NULL || cvi->mx_ga == 0) {
2200 // HG_(record_error_Misc)( thr,
2201 // "pthread_cond_{signal,broadcast}: "
2202 // "no or invalid mutex associated with cond");
2203 //}
2204 ///* note: lk could be NULL. Be careful. */
2205 //if (lk) {
2206 // if (lk->kind == LK_rdwr) {
2207 // HG_(record_error_Misc)(thr,
2208 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2209 // }
2210 // if (lk->heldBy == NULL) {
2211 // HG_(record_error_Misc)(thr,
2212 // "pthread_cond_{signal,broadcast}: "
2213 // "associated lock is not held by any thread");
2214 // }
2215 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2216 // HG_(record_error_Misc)(thr,
2217 // "pthread_cond_{signal,broadcast}: "
2218 // "associated lock is not held by calling thread");
2219 // }
2220 //}
sewardjb4112022007-11-09 22:49:28 +00002221
sewardj02114542009-07-28 20:52:36 +00002222 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002223}
2224
2225/* returns True if it reckons 'mutex' is valid and held by this
2226 thread, else False */
2227static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2228 void* cond, void* mutex )
2229{
2230 Thread* thr;
2231 Lock* lk;
2232 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002233 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002234
2235 if (SHOW_EVENTS >= 1)
2236 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2237 "(ctid=%d, cond=%p, mutex=%p)\n",
2238 (Int)tid, (void*)cond, (void*)mutex );
2239
sewardjb4112022007-11-09 22:49:28 +00002240 thr = map_threads_maybe_lookup( tid );
2241 tl_assert(thr); /* cannot fail - Thread* must already exist */
2242
2243 lk = map_locks_maybe_lookup( (Addr)mutex );
2244
2245 /* Check for stupid mutex arguments. There are various ways to be
2246 a bozo. Only complain once, though, even if more than one thing
2247 is wrong. */
2248 if (lk == NULL) {
2249 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002250 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002251 thr,
2252 "pthread_cond_{timed}wait called with invalid mutex" );
2253 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002254 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002255 if (lk->kind == LK_rdwr) {
2256 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002257 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002258 thr, "pthread_cond_{timed}wait called with mutex "
2259 "of type pthread_rwlock_t*" );
2260 } else
2261 if (lk->heldBy == NULL) {
2262 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002263 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002264 thr, "pthread_cond_{timed}wait called with un-held mutex");
2265 } else
2266 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002267 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002268 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002269 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002270 thr, "pthread_cond_{timed}wait called with mutex "
2271 "held by a different thread" );
2272 }
2273 }
2274
2275 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002276 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2277 tl_assert(cvi);
2278 tl_assert(cvi->so);
2279 if (cvi->nWaiters == 0) {
2280 /* form initial (CV,MX) binding */
2281 cvi->mx_ga = mutex;
2282 }
2283 else /* check existing (CV,MX) binding */
2284 if (cvi->mx_ga != mutex) {
2285 HG_(record_error_Misc)(
2286 thr, "pthread_cond_{timed}wait: cond is associated "
2287 "with a different mutex");
2288 }
2289 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002290
2291 return lk_valid;
2292}
2293
2294static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2295 void* cond, void* mutex )
2296{
sewardjf98e1c02008-10-25 16:22:41 +00002297 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2298 the SO for this cond, and 'recv' from it so as to acquire a
2299 dependency edge back to the signaller/broadcaster. */
2300 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002301 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002302
2303 if (SHOW_EVENTS >= 1)
2304 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2305 "(ctid=%d, cond=%p, mutex=%p)\n",
2306 (Int)tid, (void*)cond, (void*)mutex );
2307
sewardjb4112022007-11-09 22:49:28 +00002308 thr = map_threads_maybe_lookup( tid );
2309 tl_assert(thr); /* cannot fail - Thread* must already exist */
2310
2311 // error-if: cond is also associated with a different mutex
2312
sewardj02114542009-07-28 20:52:36 +00002313 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2314 tl_assert(cvi);
2315 tl_assert(cvi->so);
2316 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002317
sewardj02114542009-07-28 20:52:36 +00002318 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002319 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2320 it? If this happened it would surely be a bug in the threads
2321 library. Or one of those fabled "spurious wakeups". */
2322 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2323 "succeeded on"
2324 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002325 }
sewardjf98e1c02008-10-25 16:22:41 +00002326
2327 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002328 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2329
2330 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002331}
2332
2333static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2334 void* cond )
2335{
2336 /* Deal with destroy events. The only purpose is to free storage
2337 associated with the CV, so as to avoid any possible resource
2338 leaks. */
2339 if (SHOW_EVENTS >= 1)
2340 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2341 "(ctid=%d, cond=%p)\n",
2342 (Int)tid, (void*)cond );
2343
sewardj02114542009-07-28 20:52:36 +00002344 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002345}
2346
2347
sewardj9f569b72008-11-13 13:33:09 +00002348/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002349/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002350/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002351
2352/* EXPOSITION only */
2353static
2354void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2355{
2356 if (SHOW_EVENTS >= 1)
2357 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2358 (Int)tid, (void*)rwl );
2359 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002360 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002361 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2362}
2363
2364static
2365void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2366{
2367 Thread* thr;
2368 Lock* lk;
2369 if (SHOW_EVENTS >= 1)
2370 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2371 (Int)tid, (void*)rwl );
2372
2373 thr = map_threads_maybe_lookup( tid );
2374 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002375 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002376
2377 lk = map_locks_maybe_lookup( (Addr)rwl );
2378
2379 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002380 HG_(record_error_Misc)(
2381 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002382 }
2383
2384 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002385 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002386 tl_assert( lk->guestaddr == (Addr)rwl );
2387 if (lk->heldBy) {
2388 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002389 HG_(record_error_Misc)(
2390 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002391 /* remove lock from locksets of all owning threads */
2392 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002393 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002394 lk->heldBy = NULL;
2395 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002396 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002397 }
2398 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002399 tl_assert( HG_(is_sane_LockN)(lk) );
2400
sewardj1cbc12f2008-11-10 16:16:46 +00002401 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002402 map_locks_delete( lk->guestaddr );
2403 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002404 }
2405
sewardjf98e1c02008-10-25 16:22:41 +00002406 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002407 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2408}
2409
2410static
sewardj789c3c52008-02-25 12:10:07 +00002411void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2412 void* rwl,
2413 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002414{
2415 /* Just check the rwl is sane; nothing else to do. */
2416 // 'rwl' may be invalid - not checked by wrapper
2417 Thread* thr;
2418 Lock* lk;
2419 if (SHOW_EVENTS >= 1)
2420 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2421 (Int)tid, (Int)isW, (void*)rwl );
2422
2423 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002424 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002425 thr = map_threads_maybe_lookup( tid );
2426 tl_assert(thr); /* cannot fail - Thread* must already exist */
2427
2428 lk = map_locks_maybe_lookup( (Addr)rwl );
2429 if ( lk
2430 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2431 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002432 HG_(record_error_Misc)(
2433 thr, "pthread_rwlock_{rd,rw}lock with a "
2434 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002435 }
2436}
2437
2438static
2439void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2440{
2441 // only called if the real library call succeeded - so mutex is sane
2442 Thread* thr;
2443 if (SHOW_EVENTS >= 1)
2444 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2445 (Int)tid, (Int)isW, (void*)rwl );
2446
2447 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2448 thr = map_threads_maybe_lookup( tid );
2449 tl_assert(thr); /* cannot fail - Thread* must already exist */
2450
2451 (isW ? evhH__post_thread_w_acquires_lock
2452 : evhH__post_thread_r_acquires_lock)(
2453 thr,
2454 LK_rdwr, /* if not known, create new lock with this LockKind */
2455 (Addr)rwl
2456 );
2457}
2458
2459static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2460{
2461 // 'rwl' may be invalid - not checked by wrapper
2462 Thread* thr;
2463 if (SHOW_EVENTS >= 1)
2464 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2465 (Int)tid, (void*)rwl );
2466
2467 thr = map_threads_maybe_lookup( tid );
2468 tl_assert(thr); /* cannot fail - Thread* must already exist */
2469
2470 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2471}
2472
2473static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2474{
2475 // only called if the real library call succeeded - so mutex is sane
2476 Thread* thr;
2477 if (SHOW_EVENTS >= 1)
2478 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2479 (Int)tid, (void*)rwl );
2480 thr = map_threads_maybe_lookup( tid );
2481 tl_assert(thr); /* cannot fail - Thread* must already exist */
2482
2483 // anything we should do here?
2484}
2485
2486
sewardj9f569b72008-11-13 13:33:09 +00002487/* ---------------------------------------------------------- */
2488/* -------------- events to do with semaphores -------------- */
2489/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002490
sewardj11e352f2007-11-30 11:11:02 +00002491/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002492 variables. */
2493
sewardjf98e1c02008-10-25 16:22:41 +00002494/* For each semaphore, we maintain a stack of SOs. When a 'post'
2495 operation is done on a semaphore (unlocking, essentially), a new SO
2496 is created for the posting thread, the posting thread does a strong
2497 send to it (which merely installs the posting thread's VC in the
2498 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002499
2500 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002501 semaphore, we pop a SO off the semaphore's stack (which should be
2502 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002503 dependencies between posters and waiters of the semaphore.
2504
sewardjf98e1c02008-10-25 16:22:41 +00002505 It may not be necessary to use a stack - perhaps a bag of SOs would
2506 do. But we do need to keep track of how many unused-up posts have
2507 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002508
sewardjf98e1c02008-10-25 16:22:41 +00002509 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002510 twice on S. T3 cannot complete its waits without both T1 and T2
2511 posting. The above mechanism will ensure that T3 acquires
2512 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002513
sewardjf98e1c02008-10-25 16:22:41 +00002514 When a semaphore is initialised with value N, we do as if we'd
2515 posted N times on the semaphore: basically create N SOs and do a
2516 strong send to all of then. This allows up to N waits on the
2517 semaphore to acquire a dependency on the initialisation point,
2518 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002519
2520 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2521 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002522*/
2523
sewardjf98e1c02008-10-25 16:22:41 +00002524/* sem_t* -> XArray* SO* */
2525static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002526
sewardjf98e1c02008-10-25 16:22:41 +00002527static void map_sem_to_SO_stack_INIT ( void ) {
2528 if (map_sem_to_SO_stack == NULL) {
2529 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2530 HG_(free), NULL );
2531 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002532 }
2533}
2534
sewardjf98e1c02008-10-25 16:22:41 +00002535static void push_SO_for_sem ( void* sem, SO* so ) {
2536 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002537 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002538 tl_assert(so);
2539 map_sem_to_SO_stack_INIT();
2540 if (VG_(lookupFM)( map_sem_to_SO_stack,
2541 &keyW, (UWord*)&xa, (UWord)sem )) {
2542 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002543 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002544 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002545 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002546 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2547 VG_(addToXA)( xa, &so );
2548 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002549 }
2550}
2551
sewardjf98e1c02008-10-25 16:22:41 +00002552static SO* mb_pop_SO_for_sem ( void* sem ) {
2553 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002554 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002555 SO* so;
2556 map_sem_to_SO_stack_INIT();
2557 if (VG_(lookupFM)( map_sem_to_SO_stack,
2558 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002559 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002560 Word sz;
2561 tl_assert(keyW == (UWord)sem);
2562 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002563 tl_assert(sz >= 0);
2564 if (sz == 0)
2565 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002566 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2567 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002568 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002569 return so;
sewardjb4112022007-11-09 22:49:28 +00002570 } else {
2571 /* hmm, that's odd. No stack for this semaphore. */
2572 return NULL;
2573 }
2574}
2575
sewardj11e352f2007-11-30 11:11:02 +00002576static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002577{
sewardjf98e1c02008-10-25 16:22:41 +00002578 UWord keyW, valW;
2579 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002580
sewardjb4112022007-11-09 22:49:28 +00002581 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002582 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002583 (Int)tid, (void*)sem );
2584
sewardjf98e1c02008-10-25 16:22:41 +00002585 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002586
sewardjf98e1c02008-10-25 16:22:41 +00002587 /* Empty out the semaphore's SO stack. This way of doing it is
2588 stupid, but at least it's easy. */
2589 while (1) {
2590 so = mb_pop_SO_for_sem( sem );
2591 if (!so) break;
2592 libhb_so_dealloc(so);
2593 }
2594
2595 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2596 XArray* xa = (XArray*)valW;
2597 tl_assert(keyW == (UWord)sem);
2598 tl_assert(xa);
2599 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2600 VG_(deleteXA)(xa);
2601 }
sewardjb4112022007-11-09 22:49:28 +00002602}
2603
sewardj11e352f2007-11-30 11:11:02 +00002604static
2605void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2606{
sewardjf98e1c02008-10-25 16:22:41 +00002607 SO* so;
2608 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002609
2610 if (SHOW_EVENTS >= 1)
2611 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2612 (Int)tid, (void*)sem, value );
2613
sewardjf98e1c02008-10-25 16:22:41 +00002614 thr = map_threads_maybe_lookup( tid );
2615 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002616
sewardjf98e1c02008-10-25 16:22:41 +00002617 /* Empty out the semaphore's SO stack. This way of doing it is
2618 stupid, but at least it's easy. */
2619 while (1) {
2620 so = mb_pop_SO_for_sem( sem );
2621 if (!so) break;
2622 libhb_so_dealloc(so);
2623 }
sewardj11e352f2007-11-30 11:11:02 +00002624
sewardjf98e1c02008-10-25 16:22:41 +00002625 /* If we don't do this check, the following while loop runs us out
2626 of memory for stupid initial values of 'value'. */
2627 if (value > 10000) {
2628 HG_(record_error_Misc)(
2629 thr, "sem_init: initial value exceeds 10000; using 10000" );
2630 value = 10000;
2631 }
sewardj11e352f2007-11-30 11:11:02 +00002632
sewardjf98e1c02008-10-25 16:22:41 +00002633 /* Now create 'valid' new SOs for the thread, do a strong send to
2634 each of them, and push them all on the stack. */
2635 for (; value > 0; value--) {
2636 Thr* hbthr = thr->hbthr;
2637 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002638
sewardjf98e1c02008-10-25 16:22:41 +00002639 so = libhb_so_alloc();
2640 libhb_so_send( hbthr, so, True/*strong send*/ );
2641 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002642 }
2643}
2644
2645static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002646{
sewardjf98e1c02008-10-25 16:22:41 +00002647 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2648 it (iow, write our VC into it, then tick ours), and push the SO
2649 on on a stack of SOs associated with 'sem'. This is later used
2650 by other thread(s) which successfully exit from a sem_wait on
2651 the same sem; by doing a strong recv from SOs popped of the
2652 stack, they acquire dependencies on the posting thread
2653 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002654
sewardjf98e1c02008-10-25 16:22:41 +00002655 Thread* thr;
2656 SO* so;
2657 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002658
2659 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002660 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002661 (Int)tid, (void*)sem );
2662
2663 thr = map_threads_maybe_lookup( tid );
2664 tl_assert(thr); /* cannot fail - Thread* must already exist */
2665
2666 // error-if: sem is bogus
2667
sewardjf98e1c02008-10-25 16:22:41 +00002668 hbthr = thr->hbthr;
2669 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002670
sewardjf98e1c02008-10-25 16:22:41 +00002671 so = libhb_so_alloc();
2672 libhb_so_send( hbthr, so, True/*strong send*/ );
2673 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002674}
2675
sewardj11e352f2007-11-30 11:11:02 +00002676static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002677{
sewardjf98e1c02008-10-25 16:22:41 +00002678 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2679 the 'sem' from this semaphore's SO-stack, and do a strong recv
2680 from it. This creates a dependency back to one of the post-ers
2681 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002682
sewardjf98e1c02008-10-25 16:22:41 +00002683 Thread* thr;
2684 SO* so;
2685 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002686
2687 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002688 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002689 (Int)tid, (void*)sem );
2690
2691 thr = map_threads_maybe_lookup( tid );
2692 tl_assert(thr); /* cannot fail - Thread* must already exist */
2693
2694 // error-if: sem is bogus
2695
sewardjf98e1c02008-10-25 16:22:41 +00002696 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002697
sewardjf98e1c02008-10-25 16:22:41 +00002698 if (so) {
2699 hbthr = thr->hbthr;
2700 tl_assert(hbthr);
2701
2702 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2703 libhb_so_dealloc(so);
2704 } else {
2705 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2706 If this happened it would surely be a bug in the threads
2707 library. */
2708 HG_(record_error_Misc)(
2709 thr, "Bug in libpthread: sem_wait succeeded on"
2710 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002711 }
2712}
2713
2714
sewardj9f569b72008-11-13 13:33:09 +00002715/* -------------------------------------------------------- */
2716/* -------------- events to do with barriers -------------- */
2717/* -------------------------------------------------------- */
2718
2719typedef
2720 struct {
2721 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002722 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002723 UWord size; /* declared size */
2724 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2725 }
2726 Bar;
2727
2728static Bar* new_Bar ( void ) {
2729 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2730 tl_assert(bar);
2731 /* all fields are zero */
2732 tl_assert(bar->initted == False);
2733 return bar;
2734}
2735
2736static void delete_Bar ( Bar* bar ) {
2737 tl_assert(bar);
2738 if (bar->waiting)
2739 VG_(deleteXA)(bar->waiting);
2740 HG_(free)(bar);
2741}
2742
2743/* A mapping which stores auxiliary data for barriers. */
2744
2745/* pthread_barrier_t* -> Bar* */
2746static WordFM* map_barrier_to_Bar = NULL;
2747
2748static void map_barrier_to_Bar_INIT ( void ) {
2749 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2750 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2751 "hg.mbtBI.1", HG_(free), NULL );
2752 tl_assert(map_barrier_to_Bar != NULL);
2753 }
2754}
2755
2756static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2757 UWord key, val;
2758 map_barrier_to_Bar_INIT();
2759 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2760 tl_assert(key == (UWord)barrier);
2761 return (Bar*)val;
2762 } else {
2763 Bar* bar = new_Bar();
2764 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2765 return bar;
2766 }
2767}
2768
2769static void map_barrier_to_Bar_delete ( void* barrier ) {
2770 UWord keyW, valW;
2771 map_barrier_to_Bar_INIT();
2772 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2773 Bar* bar = (Bar*)valW;
2774 tl_assert(keyW == (UWord)barrier);
2775 delete_Bar(bar);
2776 }
2777}
2778
2779
2780static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2781 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002782 UWord count,
2783 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002784{
2785 Thread* thr;
2786 Bar* bar;
2787
2788 if (SHOW_EVENTS >= 1)
2789 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002790 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2791 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002792
2793 thr = map_threads_maybe_lookup( tid );
2794 tl_assert(thr); /* cannot fail - Thread* must already exist */
2795
2796 if (count == 0) {
2797 HG_(record_error_Misc)(
2798 thr, "pthread_barrier_init: 'count' argument is zero"
2799 );
2800 }
2801
sewardj406bac82010-03-03 23:03:40 +00002802 if (resizable != 0 && resizable != 1) {
2803 HG_(record_error_Misc)(
2804 thr, "pthread_barrier_init: invalid 'resizable' argument"
2805 );
2806 }
2807
sewardj9f569b72008-11-13 13:33:09 +00002808 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2809 tl_assert(bar);
2810
2811 if (bar->initted) {
2812 HG_(record_error_Misc)(
2813 thr, "pthread_barrier_init: barrier is already initialised"
2814 );
2815 }
2816
2817 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2818 tl_assert(bar->initted);
2819 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002820 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002821 );
2822 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2823 }
2824 if (!bar->waiting) {
2825 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2826 sizeof(Thread*) );
2827 }
2828
2829 tl_assert(bar->waiting);
2830 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002831 bar->initted = True;
2832 bar->resizable = resizable == 1 ? True : False;
2833 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002834}
2835
2836
2837static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2838 void* barrier )
2839{
sewardj553655c2008-11-14 19:41:19 +00002840 Thread* thr;
2841 Bar* bar;
2842
sewardj9f569b72008-11-13 13:33:09 +00002843 /* Deal with destroy events. The only purpose is to free storage
2844 associated with the barrier, so as to avoid any possible
2845 resource leaks. */
2846 if (SHOW_EVENTS >= 1)
2847 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2848 "(tid=%d, barrier=%p)\n",
2849 (Int)tid, (void*)barrier );
2850
sewardj553655c2008-11-14 19:41:19 +00002851 thr = map_threads_maybe_lookup( tid );
2852 tl_assert(thr); /* cannot fail - Thread* must already exist */
2853
2854 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2855 tl_assert(bar);
2856
2857 if (!bar->initted) {
2858 HG_(record_error_Misc)(
2859 thr, "pthread_barrier_destroy: barrier was never initialised"
2860 );
2861 }
2862
2863 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2864 HG_(record_error_Misc)(
2865 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2866 );
2867 }
2868
sewardj9f569b72008-11-13 13:33:09 +00002869 /* Maybe we shouldn't do this; just let it persist, so that when it
2870 is reinitialised we don't need to do any dynamic memory
2871 allocation? The downside is a potentially unlimited space leak,
2872 if the client creates (in turn) a large number of barriers all
2873 at different locations. Note that if we do later move to the
2874 don't-delete-it scheme, we need to mark the barrier as
2875 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002876 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002877 map_barrier_to_Bar_delete( barrier );
2878}
2879
2880
sewardj406bac82010-03-03 23:03:40 +00002881/* All the threads have arrived. Now do the Interesting Bit. Get a
2882 new synchronisation object and do a weak send to it from all the
2883 participating threads. This makes its vector clocks be the join of
2884 all the individual threads' vector clocks. Then do a strong
2885 receive from it back to all threads, so that their VCs are a copy
2886 of it (hence are all equal to the join of their original VCs.) */
2887static void do_barrier_cross_sync_and_empty ( Bar* bar )
2888{
2889 /* XXX check bar->waiting has no duplicates */
2890 UWord i;
2891 SO* so = libhb_so_alloc();
2892
2893 tl_assert(bar->waiting);
2894 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2895
2896 /* compute the join ... */
2897 for (i = 0; i < bar->size; i++) {
2898 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2899 Thr* hbthr = t->hbthr;
2900 libhb_so_send( hbthr, so, False/*weak send*/ );
2901 }
2902 /* ... and distribute to all threads */
2903 for (i = 0; i < bar->size; i++) {
2904 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2905 Thr* hbthr = t->hbthr;
2906 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2907 }
2908
2909 /* finally, we must empty out the waiting vector */
2910 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2911
2912 /* and we don't need this any more. Perhaps a stack-allocated
2913 SO would be better? */
2914 libhb_so_dealloc(so);
2915}
2916
2917
sewardj9f569b72008-11-13 13:33:09 +00002918static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2919 void* barrier )
2920{
sewardj1c466b72008-11-19 11:52:14 +00002921 /* This function gets called after a client thread calls
2922 pthread_barrier_wait but before it arrives at the real
2923 pthread_barrier_wait.
2924
2925 Why is the following correct? It's a bit subtle.
2926
2927 If this is not the last thread arriving at the barrier, we simply
2928 note its presence and return. Because valgrind (at least as of
2929 Nov 08) is single threaded, we are guaranteed safe from any race
2930 conditions when in this function -- no other client threads are
2931 running.
2932
2933 If this is the last thread, then we are again the only running
2934 thread. All the other threads will have either arrived at the
2935 real pthread_barrier_wait or are on their way to it, but in any
2936 case are guaranteed not to be able to move past it, because this
2937 thread is currently in this function and so has not yet arrived
2938 at the real pthread_barrier_wait. That means that:
2939
2940 1. While we are in this function, none of the other threads
2941 waiting at the barrier can move past it.
2942
2943 2. When this function returns (and simulated execution resumes),
2944 this thread and all other waiting threads will be able to move
2945 past the real barrier.
2946
2947 Because of this, it is now safe to update the vector clocks of
2948 all threads, to represent the fact that they all arrived at the
2949 barrier and have all moved on. There is no danger of any
2950 complications to do with some threads leaving the barrier and
2951 racing back round to the front, whilst others are still leaving
2952 (which is the primary source of complication in correct handling/
2953 implementation of barriers). That can't happen because we update
2954 here our data structures so as to indicate that the threads have
2955 passed the barrier, even though, as per (2) above, they are
2956 guaranteed not to pass the barrier until we return.
2957
2958 This relies crucially on Valgrind being single threaded. If that
2959 changes, this will need to be reconsidered.
2960 */
sewardj9f569b72008-11-13 13:33:09 +00002961 Thread* thr;
2962 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002963 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002964
2965 if (SHOW_EVENTS >= 1)
2966 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2967 "(tid=%d, barrier=%p)\n",
2968 (Int)tid, (void*)barrier );
2969
2970 thr = map_threads_maybe_lookup( tid );
2971 tl_assert(thr); /* cannot fail - Thread* must already exist */
2972
2973 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2974 tl_assert(bar);
2975
2976 if (!bar->initted) {
2977 HG_(record_error_Misc)(
2978 thr, "pthread_barrier_wait: barrier is uninitialised"
2979 );
2980 return; /* client is broken .. avoid assertions below */
2981 }
2982
2983 /* guaranteed by _INIT_PRE above */
2984 tl_assert(bar->size > 0);
2985 tl_assert(bar->waiting);
2986
2987 VG_(addToXA)( bar->waiting, &thr );
2988
2989 /* guaranteed by this function */
2990 present = VG_(sizeXA)(bar->waiting);
2991 tl_assert(present > 0 && present <= bar->size);
2992
2993 if (present < bar->size)
2994 return;
2995
sewardj406bac82010-03-03 23:03:40 +00002996 do_barrier_cross_sync_and_empty(bar);
2997}
sewardj9f569b72008-11-13 13:33:09 +00002998
sewardj9f569b72008-11-13 13:33:09 +00002999
sewardj406bac82010-03-03 23:03:40 +00003000static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3001 void* barrier,
3002 UWord newcount )
3003{
3004 Thread* thr;
3005 Bar* bar;
3006 UWord present;
3007
3008 if (SHOW_EVENTS >= 1)
3009 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3010 "(tid=%d, barrier=%p, newcount=%lu)\n",
3011 (Int)tid, (void*)barrier, newcount );
3012
3013 thr = map_threads_maybe_lookup( tid );
3014 tl_assert(thr); /* cannot fail - Thread* must already exist */
3015
3016 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3017 tl_assert(bar);
3018
3019 if (!bar->initted) {
3020 HG_(record_error_Misc)(
3021 thr, "pthread_barrier_resize: barrier is uninitialised"
3022 );
3023 return; /* client is broken .. avoid assertions below */
3024 }
3025
3026 if (!bar->resizable) {
3027 HG_(record_error_Misc)(
3028 thr, "pthread_barrier_resize: barrier is may not be resized"
3029 );
3030 return; /* client is broken .. avoid assertions below */
3031 }
3032
3033 if (newcount == 0) {
3034 HG_(record_error_Misc)(
3035 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3036 );
3037 return; /* client is broken .. avoid assertions below */
3038 }
3039
3040 /* guaranteed by _INIT_PRE above */
3041 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003042 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003043 /* Guaranteed by this fn */
3044 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003045
sewardj406bac82010-03-03 23:03:40 +00003046 if (newcount >= bar->size) {
3047 /* Increasing the capacity. There's no possibility of threads
3048 moving on from the barrier in this situation, so just note
3049 the fact and do nothing more. */
3050 bar->size = newcount;
3051 } else {
3052 /* Decreasing the capacity. If we decrease it to be equal or
3053 below the number of waiting threads, they will now move past
3054 the barrier, so need to mess with dep edges in the same way
3055 as if the barrier had filled up normally. */
3056 present = VG_(sizeXA)(bar->waiting);
3057 tl_assert(present >= 0 && present <= bar->size);
3058 if (newcount <= present) {
3059 bar->size = present; /* keep the cross_sync call happy */
3060 do_barrier_cross_sync_and_empty(bar);
3061 }
3062 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003063 }
sewardj9f569b72008-11-13 13:33:09 +00003064}
3065
3066
sewardjed2e72e2009-08-14 11:08:24 +00003067/* ----------------------------------------------------- */
3068/* ----- events to do with user-specified HB edges ----- */
3069/* ----------------------------------------------------- */
3070
3071/* A mapping from arbitrary UWord tag to the SO associated with it.
3072 The UWord tags are meaningless to us, interpreted only by the
3073 user. */
3074
3075
3076
3077/* UWord -> SO* */
3078static WordFM* map_usertag_to_SO = NULL;
3079
3080static void map_usertag_to_SO_INIT ( void ) {
3081 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3082 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3083 "hg.mutS.1", HG_(free), NULL );
3084 tl_assert(map_usertag_to_SO != NULL);
3085 }
3086}
3087
3088static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3089 UWord key, val;
3090 map_usertag_to_SO_INIT();
3091 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3092 tl_assert(key == (UWord)usertag);
3093 return (SO*)val;
3094 } else {
3095 SO* so = libhb_so_alloc();
3096 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3097 return so;
3098 }
3099}
3100
3101// If it's ever needed (XXX check before use)
3102//static void map_usertag_to_SO_delete ( UWord usertag ) {
3103// UWord keyW, valW;
3104// map_usertag_to_SO_INIT();
3105// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3106// SO* so = (SO*)valW;
3107// tl_assert(keyW == usertag);
3108// tl_assert(so);
3109// libhb_so_dealloc(so);
3110// }
3111//}
3112
3113
3114static
3115void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3116{
3117 /* TID is just about to notionally sent a message on a notional
3118 abstract synchronisation object whose identity is given by
3119 USERTAG. Bind USERTAG to a real SO if it is not already so
3120 bound, and do a 'strong send' on the SO. This is later used by
3121 other thread(s) which successfully 'receive' from the SO,
3122 thereby acquiring a dependency on this signalling event. */
3123 Thread* thr;
3124 SO* so;
3125
3126 if (SHOW_EVENTS >= 1)
3127 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3128 (Int)tid, usertag );
3129
3130 thr = map_threads_maybe_lookup( tid );
3131 tl_assert(thr); /* cannot fail - Thread* must already exist */
3132
3133 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3134 tl_assert(so);
3135
3136 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3137}
3138
3139static
3140void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3141{
3142 /* TID has just notionally received a message from a notional
3143 abstract synchronisation object whose identity is given by
3144 USERTAG. Bind USERTAG to a real SO if it is not already so
3145 bound. If the SO has at some point in the past been 'sent' on,
3146 to a 'strong receive' on it, thereby acquiring a dependency on
3147 the sender. */
3148 Thread* thr;
3149 SO* so;
3150
3151 if (SHOW_EVENTS >= 1)
3152 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3153 (Int)tid, usertag );
3154
3155 thr = map_threads_maybe_lookup( tid );
3156 tl_assert(thr); /* cannot fail - Thread* must already exist */
3157
3158 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3159 tl_assert(so);
3160
3161 /* Acquire a dependency on it. If the SO has never so far been
3162 sent on, then libhb_so_recv will do nothing. So we're safe
3163 regardless of SO's history. */
3164 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3165}
3166
3167
sewardjb4112022007-11-09 22:49:28 +00003168/*--------------------------------------------------------------*/
3169/*--- Lock acquisition order monitoring ---*/
3170/*--------------------------------------------------------------*/
3171
3172/* FIXME: here are some optimisations still to do in
3173 laog__pre_thread_acquires_lock.
3174
3175 The graph is structured so that if L1 --*--> L2 then L1 must be
3176 acquired before L2.
3177
3178 The common case is that some thread T holds (eg) L1 L2 and L3 and
3179 is repeatedly acquiring and releasing Ln, and there is no ordering
3180 error in what it is doing. Hence it repeatly:
3181
3182 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3183 produces the answer No (because there is no error).
3184
3185 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3186 (because they already got added the first time T acquired Ln).
3187
3188 Hence cache these two events:
3189
3190 (1) Cache result of the query from last time. Invalidate the cache
3191 any time any edges are added to or deleted from laog.
3192
3193 (2) Cache these add-edge requests and ignore them if said edges
3194 have already been added to laog. Invalidate the cache any time
3195 any edges are deleted from laog.
3196*/
3197
3198typedef
3199 struct {
3200 WordSetID inns; /* in univ_laog */
3201 WordSetID outs; /* in univ_laog */
3202 }
3203 LAOGLinks;
3204
3205/* lock order acquisition graph */
3206static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3207
3208/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3209 where that edge was created, so that we can show the user later if
3210 we need to. */
3211typedef
3212 struct {
3213 Addr src_ga; /* Lock guest addresses for */
3214 Addr dst_ga; /* src/dst of the edge */
3215 ExeContext* src_ec; /* And corresponding places where that */
3216 ExeContext* dst_ec; /* ordering was established */
3217 }
3218 LAOGLinkExposition;
3219
sewardj250ec2e2008-02-15 22:02:30 +00003220static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003221 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3222 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3223 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3224 if (llx1->src_ga < llx2->src_ga) return -1;
3225 if (llx1->src_ga > llx2->src_ga) return 1;
3226 if (llx1->dst_ga < llx2->dst_ga) return -1;
3227 if (llx1->dst_ga > llx2->dst_ga) return 1;
3228 return 0;
3229}
3230
3231static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3232/* end EXPOSITION ONLY */
3233
3234
sewardja65db102009-01-26 10:45:16 +00003235__attribute__((noinline))
3236static void laog__init ( void )
3237{
3238 tl_assert(!laog);
3239 tl_assert(!laog_exposition);
3240
3241 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3242 HG_(free), NULL/*unboxedcmp*/ );
3243
3244 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3245 cmp_LAOGLinkExposition );
3246 tl_assert(laog);
3247 tl_assert(laog_exposition);
3248}
3249
sewardjb4112022007-11-09 22:49:28 +00003250static void laog__show ( Char* who ) {
3251 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003252 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003253 Lock* me;
3254 LAOGLinks* links;
3255 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003256 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003257 me = NULL;
3258 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003259 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003260 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003261 tl_assert(me);
3262 tl_assert(links);
3263 VG_(printf)(" node %p:\n", me);
3264 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3265 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003266 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003267 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3268 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003269 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003270 me = NULL;
3271 links = NULL;
3272 }
sewardj896f6f92008-08-19 08:38:52 +00003273 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003274 VG_(printf)("}\n");
3275}
3276
3277__attribute__((noinline))
3278static void laog__add_edge ( Lock* src, Lock* dst ) {
3279 Word keyW;
3280 LAOGLinks* links;
3281 Bool presentF, presentR;
3282 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3283
3284 /* Take the opportunity to sanity check the graph. Record in
3285 presentF if there is already a src->dst mapping in this node's
3286 forwards links, and presentR if there is already a src->dst
3287 mapping in this node's backwards links. They should agree!
3288 Also, we need to know whether the edge was already present so as
3289 to decide whether or not to update the link details mapping. We
3290 can compute presentF and presentR essentially for free, so may
3291 as well do this always. */
3292 presentF = presentR = False;
3293
3294 /* Update the out edges for src */
3295 keyW = 0;
3296 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003297 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003298 WordSetID outs_new;
3299 tl_assert(links);
3300 tl_assert(keyW == (Word)src);
3301 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3302 presentF = outs_new == links->outs;
3303 links->outs = outs_new;
3304 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003305 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003306 links->inns = HG_(emptyWS)( univ_laog );
3307 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003308 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003309 }
3310 /* Update the in edges for dst */
3311 keyW = 0;
3312 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003313 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003314 WordSetID inns_new;
3315 tl_assert(links);
3316 tl_assert(keyW == (Word)dst);
3317 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3318 presentR = inns_new == links->inns;
3319 links->inns = inns_new;
3320 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003321 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003322 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3323 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003324 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003325 }
3326
3327 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3328
3329 if (!presentF && src->acquired_at && dst->acquired_at) {
3330 LAOGLinkExposition expo;
3331 /* If this edge is entering the graph, and we have acquired_at
3332 information for both src and dst, record those acquisition
3333 points. Hence, if there is later a violation of this
3334 ordering, we can show the user the two places in which the
3335 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003336 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003337 src->guestaddr, dst->guestaddr);
3338 expo.src_ga = src->guestaddr;
3339 expo.dst_ga = dst->guestaddr;
3340 expo.src_ec = NULL;
3341 expo.dst_ec = NULL;
3342 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003343 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003344 /* we already have it; do nothing */
3345 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003346 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3347 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003348 expo2->src_ga = src->guestaddr;
3349 expo2->dst_ga = dst->guestaddr;
3350 expo2->src_ec = src->acquired_at;
3351 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003352 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003353 }
3354 }
3355}
3356
3357__attribute__((noinline))
3358static void laog__del_edge ( Lock* src, Lock* dst ) {
3359 Word keyW;
3360 LAOGLinks* links;
3361 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3362 /* Update the out edges for src */
3363 keyW = 0;
3364 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003365 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003366 tl_assert(links);
3367 tl_assert(keyW == (Word)src);
3368 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3369 }
3370 /* Update the in edges for dst */
3371 keyW = 0;
3372 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003373 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003374 tl_assert(links);
3375 tl_assert(keyW == (Word)dst);
3376 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3377 }
3378}
3379
3380__attribute__((noinline))
3381static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3382 Word keyW;
3383 LAOGLinks* links;
3384 keyW = 0;
3385 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003386 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003387 tl_assert(links);
3388 tl_assert(keyW == (Word)lk);
3389 return links->outs;
3390 } else {
3391 return HG_(emptyWS)( univ_laog );
3392 }
3393}
3394
3395__attribute__((noinline))
3396static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3397 Word keyW;
3398 LAOGLinks* links;
3399 keyW = 0;
3400 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003401 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003402 tl_assert(links);
3403 tl_assert(keyW == (Word)lk);
3404 return links->inns;
3405 } else {
3406 return HG_(emptyWS)( univ_laog );
3407 }
3408}
3409
3410__attribute__((noinline))
3411static void laog__sanity_check ( Char* who ) {
3412 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003413 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003414 Lock* me;
3415 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003416 if (UNLIKELY(!laog || !laog_exposition))
3417 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003418 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003419 me = NULL;
3420 links = NULL;
3421 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003422 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003423 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003424 tl_assert(me);
3425 tl_assert(links);
3426 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3427 for (i = 0; i < ws_size; i++) {
3428 if ( ! HG_(elemWS)( univ_laog,
3429 laog__succs( (Lock*)ws_words[i] ),
3430 (Word)me ))
3431 goto bad;
3432 }
3433 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3434 for (i = 0; i < ws_size; i++) {
3435 if ( ! HG_(elemWS)( univ_laog,
3436 laog__preds( (Lock*)ws_words[i] ),
3437 (Word)me ))
3438 goto bad;
3439 }
3440 me = NULL;
3441 links = NULL;
3442 }
sewardj896f6f92008-08-19 08:38:52 +00003443 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003444 return;
3445
3446 bad:
3447 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3448 laog__show(who);
3449 tl_assert(0);
3450}
3451
3452/* If there is a path in laog from 'src' to any of the elements in
3453 'dst', return an arbitrarily chosen element of 'dst' reachable from
3454 'src'. If no path exist from 'src' to any element in 'dst', return
3455 NULL. */
3456__attribute__((noinline))
3457static
3458Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3459{
3460 Lock* ret;
3461 Word i, ssz;
3462 XArray* stack; /* of Lock* */
3463 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3464 Lock* here;
3465 WordSetID succs;
3466 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003467 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003468 //laog__sanity_check();
3469
3470 /* If the destination set is empty, we can never get there from
3471 'src' :-), so don't bother to try */
3472 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3473 return NULL;
3474
3475 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003476 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3477 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003478
3479 (void) VG_(addToXA)( stack, &src );
3480
3481 while (True) {
3482
3483 ssz = VG_(sizeXA)( stack );
3484
3485 if (ssz == 0) { ret = NULL; break; }
3486
3487 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3488 VG_(dropTailXA)( stack, 1 );
3489
3490 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3491
sewardj896f6f92008-08-19 08:38:52 +00003492 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003493 continue;
3494
sewardj896f6f92008-08-19 08:38:52 +00003495 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003496
3497 succs = laog__succs( here );
3498 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3499 for (i = 0; i < succs_size; i++)
3500 (void) VG_(addToXA)( stack, &succs_words[i] );
3501 }
3502
sewardj896f6f92008-08-19 08:38:52 +00003503 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003504 VG_(deleteXA)( stack );
3505 return ret;
3506}
3507
3508
3509/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3510 between 'lk' and the locks already held by 'thr' and issue a
3511 complaint if so. Also, update the ordering graph appropriately.
3512*/
3513__attribute__((noinline))
3514static void laog__pre_thread_acquires_lock (
3515 Thread* thr, /* NB: BEFORE lock is added */
3516 Lock* lk
3517 )
3518{
sewardj250ec2e2008-02-15 22:02:30 +00003519 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003520 Word ls_size, i;
3521 Lock* other;
3522
3523 /* It may be that 'thr' already holds 'lk' and is recursively
3524 relocking in. In this case we just ignore the call. */
3525 /* NB: univ_lsets really is correct here */
3526 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3527 return;
3528
sewardja65db102009-01-26 10:45:16 +00003529 if (UNLIKELY(!laog || !laog_exposition))
3530 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003531
3532 /* First, the check. Complain if there is any path in laog from lk
3533 to any of the locks already held by thr, since if any such path
3534 existed, it would mean that previously lk was acquired before
3535 (rather than after, as we are doing here) at least one of those
3536 locks.
3537 */
3538 other = laog__do_dfs_from_to(lk, thr->locksetA);
3539 if (other) {
3540 LAOGLinkExposition key, *found;
3541 /* So we managed to find a path lk --*--> other in the graph,
3542 which implies that 'lk' should have been acquired before
3543 'other' but is in fact being acquired afterwards. We present
3544 the lk/other arguments to record_error_LockOrder in the order
3545 in which they should have been acquired. */
3546 /* Go look in the laog_exposition mapping, to find the allocation
3547 points for this edge, so we can show the user. */
3548 key.src_ga = lk->guestaddr;
3549 key.dst_ga = other->guestaddr;
3550 key.src_ec = NULL;
3551 key.dst_ec = NULL;
3552 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003553 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003554 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003555 tl_assert(found != &key);
3556 tl_assert(found->src_ga == key.src_ga);
3557 tl_assert(found->dst_ga == key.dst_ga);
3558 tl_assert(found->src_ec);
3559 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003560 HG_(record_error_LockOrder)(
3561 thr, lk->guestaddr, other->guestaddr,
3562 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003563 } else {
3564 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003565 HG_(record_error_LockOrder)(
3566 thr, lk->guestaddr, other->guestaddr,
3567 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003568 }
3569 }
3570
3571 /* Second, add to laog the pairs
3572 (old, lk) | old <- locks already held by thr
3573 Since both old and lk are currently held by thr, their acquired_at
3574 fields must be non-NULL.
3575 */
3576 tl_assert(lk->acquired_at);
3577 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3578 for (i = 0; i < ls_size; i++) {
3579 Lock* old = (Lock*)ls_words[i];
3580 tl_assert(old->acquired_at);
3581 laog__add_edge( old, lk );
3582 }
3583
3584 /* Why "except_Locks" ? We're here because a lock is being
3585 acquired by a thread, and we're in an inconsistent state here.
3586 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3587 When called in this inconsistent state, locks__sanity_check duly
3588 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003589 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003590 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3591}
3592
3593
3594/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3595
3596__attribute__((noinline))
3597static void laog__handle_one_lock_deletion ( Lock* lk )
3598{
3599 WordSetID preds, succs;
3600 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003601 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003602
sewardja65db102009-01-26 10:45:16 +00003603 if (UNLIKELY(!laog || !laog_exposition))
3604 laog__init();
3605
sewardjb4112022007-11-09 22:49:28 +00003606 preds = laog__preds( lk );
3607 succs = laog__succs( lk );
3608
3609 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3610 for (i = 0; i < preds_size; i++)
3611 laog__del_edge( (Lock*)preds_words[i], lk );
3612
3613 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3614 for (j = 0; j < succs_size; j++)
3615 laog__del_edge( lk, (Lock*)succs_words[j] );
3616
3617 for (i = 0; i < preds_size; i++) {
3618 for (j = 0; j < succs_size; j++) {
3619 if (preds_words[i] != succs_words[j]) {
3620 /* This can pass unlocked locks to laog__add_edge, since
3621 we're deleting stuff. So their acquired_at fields may
3622 be NULL. */
3623 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3624 }
3625 }
3626 }
3627}
3628
sewardj1cbc12f2008-11-10 16:16:46 +00003629//__attribute__((noinline))
3630//static void laog__handle_lock_deletions (
3631// WordSetID /* in univ_laog */ locksToDelete
3632// )
3633//{
3634// Word i, ws_size;
3635// UWord* ws_words;
3636//
sewardja65db102009-01-26 10:45:16 +00003637// if (UNLIKELY(!laog || !laog_exposition))
3638// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003639//
3640// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3641// for (i = 0; i < ws_size; i++)
3642// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3643//
3644// if (HG_(clo_sanity_flags) & SCE_LAOG)
3645// all__sanity_check("laog__handle_lock_deletions-post");
3646//}
sewardjb4112022007-11-09 22:49:28 +00003647
3648
3649/*--------------------------------------------------------------*/
3650/*--- Malloc/free replacements ---*/
3651/*--------------------------------------------------------------*/
3652
3653typedef
3654 struct {
3655 void* next; /* required by m_hashtable */
3656 Addr payload; /* ptr to actual block */
3657 SizeT szB; /* size requested */
3658 ExeContext* where; /* where it was allocated */
3659 Thread* thr; /* allocating thread */
3660 }
3661 MallocMeta;
3662
3663/* A hash table of MallocMetas, used to track malloc'd blocks
3664 (obviously). */
3665static VgHashTable hg_mallocmeta_table = NULL;
3666
3667
3668static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003669 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003670 tl_assert(md);
3671 return md;
3672}
3673static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003674 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003675}
3676
3677
3678/* Allocate a client block and set up the metadata for it. */
3679
3680static
3681void* handle_alloc ( ThreadId tid,
3682 SizeT szB, SizeT alignB, Bool is_zeroed )
3683{
3684 Addr p;
3685 MallocMeta* md;
3686
3687 tl_assert( ((SSizeT)szB) >= 0 );
3688 p = (Addr)VG_(cli_malloc)(alignB, szB);
3689 if (!p) {
3690 return NULL;
3691 }
3692 if (is_zeroed)
3693 VG_(memset)((void*)p, 0, szB);
3694
3695 /* Note that map_threads_lookup must succeed (cannot assert), since
3696 memory can only be allocated by currently alive threads, hence
3697 they must have an entry in map_threads. */
3698 md = new_MallocMeta();
3699 md->payload = p;
3700 md->szB = szB;
3701 md->where = VG_(record_ExeContext)( tid, 0 );
3702 md->thr = map_threads_lookup( tid );
3703
3704 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3705
3706 /* Tell the lower level memory wranglers. */
3707 evh__new_mem_heap( p, szB, is_zeroed );
3708
3709 return (void*)p;
3710}
3711
3712/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3713 Cast to a signed type to catch any unexpectedly negative args.
3714 We're assuming here that the size asked for is not greater than
3715 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3716 platforms). */
3717static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3718 if (((SSizeT)n) < 0) return NULL;
3719 return handle_alloc ( tid, n, VG_(clo_alignment),
3720 /*is_zeroed*/False );
3721}
3722static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3723 if (((SSizeT)n) < 0) return NULL;
3724 return handle_alloc ( tid, n, VG_(clo_alignment),
3725 /*is_zeroed*/False );
3726}
3727static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3728 if (((SSizeT)n) < 0) return NULL;
3729 return handle_alloc ( tid, n, VG_(clo_alignment),
3730 /*is_zeroed*/False );
3731}
3732static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3733 if (((SSizeT)n) < 0) return NULL;
3734 return handle_alloc ( tid, n, align,
3735 /*is_zeroed*/False );
3736}
3737static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3738 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3739 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3740 /*is_zeroed*/True );
3741}
3742
3743
3744/* Free a client block, including getting rid of the relevant
3745 metadata. */
3746
3747static void handle_free ( ThreadId tid, void* p )
3748{
3749 MallocMeta *md, *old_md;
3750 SizeT szB;
3751
3752 /* First see if we can find the metadata for 'p'. */
3753 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3754 if (!md)
3755 return; /* apparently freeing a bogus address. Oh well. */
3756
3757 tl_assert(md->payload == (Addr)p);
3758 szB = md->szB;
3759
3760 /* Nuke the metadata block */
3761 old_md = (MallocMeta*)
3762 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3763 tl_assert(old_md); /* it must be present - we just found it */
3764 tl_assert(old_md == md);
3765 tl_assert(old_md->payload == (Addr)p);
3766
3767 VG_(cli_free)((void*)old_md->payload);
3768 delete_MallocMeta(old_md);
3769
3770 /* Tell the lower level memory wranglers. */
3771 evh__die_mem_heap( (Addr)p, szB );
3772}
3773
3774static void hg_cli__free ( ThreadId tid, void* p ) {
3775 handle_free(tid, p);
3776}
3777static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3778 handle_free(tid, p);
3779}
3780static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3781 handle_free(tid, p);
3782}
3783
3784
3785static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3786{
3787 MallocMeta *md, *md_new, *md_tmp;
3788 SizeT i;
3789
3790 Addr payload = (Addr)payloadV;
3791
3792 if (((SSizeT)new_size) < 0) return NULL;
3793
3794 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3795 if (!md)
3796 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3797
3798 tl_assert(md->payload == payload);
3799
3800 if (md->szB == new_size) {
3801 /* size unchanged */
3802 md->where = VG_(record_ExeContext)(tid, 0);
3803 return payloadV;
3804 }
3805
3806 if (md->szB > new_size) {
3807 /* new size is smaller */
3808 md->szB = new_size;
3809 md->where = VG_(record_ExeContext)(tid, 0);
3810 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3811 return payloadV;
3812 }
3813
3814 /* else */ {
3815 /* new size is bigger */
3816 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3817
3818 /* First half kept and copied, second half new */
3819 // FIXME: shouldn't we use a copier which implements the
3820 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003821 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003822 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003823 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003824 /* FIXME: can anything funny happen here? specifically, if the
3825 old range contained a lock, then die_mem_heap will complain.
3826 Is that the correct behaviour? Not sure. */
3827 evh__die_mem_heap( payload, md->szB );
3828
3829 /* Copy from old to new */
3830 for (i = 0; i < md->szB; i++)
3831 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3832
3833 /* Because the metadata hash table is index by payload address,
3834 we have to get rid of the old hash table entry and make a new
3835 one. We can't just modify the existing metadata in place,
3836 because then it would (almost certainly) be in the wrong hash
3837 chain. */
3838 md_new = new_MallocMeta();
3839 *md_new = *md;
3840
3841 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3842 tl_assert(md_tmp);
3843 tl_assert(md_tmp == md);
3844
3845 VG_(cli_free)((void*)md->payload);
3846 delete_MallocMeta(md);
3847
3848 /* Update fields */
3849 md_new->where = VG_(record_ExeContext)( tid, 0 );
3850 md_new->szB = new_size;
3851 md_new->payload = p_new;
3852 md_new->thr = map_threads_lookup( tid );
3853
3854 /* and add */
3855 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3856
3857 return (void*)p_new;
3858 }
3859}
3860
njn8b140de2009-02-17 04:31:18 +00003861static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3862{
3863 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3864
3865 // There may be slop, but pretend there isn't because only the asked-for
3866 // area will have been shadowed properly.
3867 return ( md ? md->szB : 0 );
3868}
3869
sewardjb4112022007-11-09 22:49:28 +00003870
3871/*--------------------------------------------------------------*/
3872/*--- Instrumentation ---*/
3873/*--------------------------------------------------------------*/
3874
3875static void instrument_mem_access ( IRSB* bbOut,
3876 IRExpr* addr,
3877 Int szB,
3878 Bool isStore,
3879 Int hWordTy_szB )
3880{
3881 IRType tyAddr = Ity_INVALID;
3882 HChar* hName = NULL;
3883 void* hAddr = NULL;
3884 Int regparms = 0;
3885 IRExpr** argv = NULL;
3886 IRDirty* di = NULL;
3887
3888 tl_assert(isIRAtom(addr));
3889 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3890
3891 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3892 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3893
3894 /* So the effective address is in 'addr' now. */
3895 regparms = 1; // unless stated otherwise
3896 if (isStore) {
3897 switch (szB) {
3898 case 1:
sewardj23f12002009-07-24 08:45:08 +00003899 hName = "evh__mem_help_cwrite_1";
3900 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003901 argv = mkIRExprVec_1( addr );
3902 break;
3903 case 2:
sewardj23f12002009-07-24 08:45:08 +00003904 hName = "evh__mem_help_cwrite_2";
3905 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003906 argv = mkIRExprVec_1( addr );
3907 break;
3908 case 4:
sewardj23f12002009-07-24 08:45:08 +00003909 hName = "evh__mem_help_cwrite_4";
3910 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003911 argv = mkIRExprVec_1( addr );
3912 break;
3913 case 8:
sewardj23f12002009-07-24 08:45:08 +00003914 hName = "evh__mem_help_cwrite_8";
3915 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003916 argv = mkIRExprVec_1( addr );
3917 break;
3918 default:
3919 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3920 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003921 hName = "evh__mem_help_cwrite_N";
3922 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003923 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3924 break;
3925 }
3926 } else {
3927 switch (szB) {
3928 case 1:
sewardj23f12002009-07-24 08:45:08 +00003929 hName = "evh__mem_help_cread_1";
3930 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003931 argv = mkIRExprVec_1( addr );
3932 break;
3933 case 2:
sewardj23f12002009-07-24 08:45:08 +00003934 hName = "evh__mem_help_cread_2";
3935 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003936 argv = mkIRExprVec_1( addr );
3937 break;
3938 case 4:
sewardj23f12002009-07-24 08:45:08 +00003939 hName = "evh__mem_help_cread_4";
3940 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003941 argv = mkIRExprVec_1( addr );
3942 break;
3943 case 8:
sewardj23f12002009-07-24 08:45:08 +00003944 hName = "evh__mem_help_cread_8";
3945 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003946 argv = mkIRExprVec_1( addr );
3947 break;
3948 default:
3949 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3950 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003951 hName = "evh__mem_help_cread_N";
3952 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003953 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3954 break;
3955 }
3956 }
3957
3958 /* Add the helper. */
3959 tl_assert(hName);
3960 tl_assert(hAddr);
3961 tl_assert(argv);
3962 di = unsafeIRDirty_0_N( regparms,
3963 hName, VG_(fnptr_to_fnentry)( hAddr ),
3964 argv );
3965 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3966}
3967
3968
sewardja0eee322009-07-31 08:46:35 +00003969/* Figure out if GA is a guest code address in the dynamic linker, and
3970 if so return True. Otherwise (and in case of any doubt) return
3971 False. (sidedly safe w/ False as the safe value) */
3972static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3973{
3974 DebugInfo* dinfo;
3975 const UChar* soname;
3976 if (0) return False;
3977
sewardje3f1e592009-07-31 09:41:29 +00003978 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003979 if (!dinfo) return False;
3980
sewardje3f1e592009-07-31 09:41:29 +00003981 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003982 tl_assert(soname);
3983 if (0) VG_(printf)("%s\n", soname);
3984
3985# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00003986 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00003987 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3988 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3989 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3990 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3991# elif defined(VGO_darwin)
3992 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3993# else
3994# error "Unsupported OS"
3995# endif
3996 return False;
3997}
3998
sewardjb4112022007-11-09 22:49:28 +00003999static
4000IRSB* hg_instrument ( VgCallbackClosure* closure,
4001 IRSB* bbIn,
4002 VexGuestLayout* layout,
4003 VexGuestExtents* vge,
4004 IRType gWordTy, IRType hWordTy )
4005{
sewardj1c0ce7a2009-07-01 08:10:49 +00004006 Int i;
4007 IRSB* bbOut;
4008 Addr64 cia; /* address of current insn */
4009 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004010 Bool inLDSO = False;
4011 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004012
4013 if (gWordTy != hWordTy) {
4014 /* We don't currently support this case. */
4015 VG_(tool_panic)("host/guest word size mismatch");
4016 }
4017
sewardja0eee322009-07-31 08:46:35 +00004018 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4019 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4020 }
4021
sewardjb4112022007-11-09 22:49:28 +00004022 /* Set up BB */
4023 bbOut = emptyIRSB();
4024 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4025 bbOut->next = deepCopyIRExpr(bbIn->next);
4026 bbOut->jumpkind = bbIn->jumpkind;
4027
4028 // Copy verbatim any IR preamble preceding the first IMark
4029 i = 0;
4030 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4031 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4032 i++;
4033 }
4034
sewardj1c0ce7a2009-07-01 08:10:49 +00004035 // Get the first statement, and initial cia from it
4036 tl_assert(bbIn->stmts_used > 0);
4037 tl_assert(i < bbIn->stmts_used);
4038 st = bbIn->stmts[i];
4039 tl_assert(Ist_IMark == st->tag);
4040 cia = st->Ist.IMark.addr;
4041 st = NULL;
4042
sewardjb4112022007-11-09 22:49:28 +00004043 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004044 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004045 tl_assert(st);
4046 tl_assert(isFlatIRStmt(st));
4047 switch (st->tag) {
4048 case Ist_NoOp:
4049 case Ist_AbiHint:
4050 case Ist_Put:
4051 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004052 case Ist_Exit:
4053 /* None of these can contain any memory references. */
4054 break;
4055
sewardj1c0ce7a2009-07-01 08:10:49 +00004056 case Ist_IMark:
4057 /* no mem refs, but note the insn address. */
4058 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004059 /* Don't instrument the dynamic linker. It generates a
4060 lot of races which we just expensively suppress, so
4061 it's pointless.
4062
4063 Avoid flooding is_in_dynamic_linker_shared_object with
4064 requests by only checking at transitions between 4K
4065 pages. */
4066 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4067 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4068 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4069 inLDSO = is_in_dynamic_linker_shared_object(cia);
4070 } else {
4071 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4072 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004073 break;
4074
sewardjb4112022007-11-09 22:49:28 +00004075 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004076 switch (st->Ist.MBE.event) {
4077 case Imbe_Fence:
4078 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004079 default:
4080 goto unhandled;
4081 }
sewardjb4112022007-11-09 22:49:28 +00004082 break;
4083
sewardj1c0ce7a2009-07-01 08:10:49 +00004084 case Ist_CAS: {
4085 /* Atomic read-modify-write cycle. Just pretend it's a
4086 read. */
4087 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004088 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4089 if (isDCAS) {
4090 tl_assert(cas->expdHi);
4091 tl_assert(cas->dataHi);
4092 } else {
4093 tl_assert(!cas->expdHi);
4094 tl_assert(!cas->dataHi);
4095 }
4096 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004097 if (!inLDSO) {
4098 instrument_mem_access(
4099 bbOut,
4100 cas->addr,
4101 (isDCAS ? 2 : 1)
4102 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4103 False/*!isStore*/,
4104 sizeofIRType(hWordTy)
4105 );
4106 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004107 break;
4108 }
4109
sewardjdb5907d2009-11-26 17:20:21 +00004110 case Ist_LLSC: {
4111 /* We pretend store-conditionals don't exist, viz, ignore
4112 them. Whereas load-linked's are treated the same as
4113 normal loads. */
4114 IRType dataTy;
4115 if (st->Ist.LLSC.storedata == NULL) {
4116 /* LL */
4117 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004118 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004119 instrument_mem_access(
4120 bbOut,
4121 st->Ist.LLSC.addr,
4122 sizeofIRType(dataTy),
4123 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004124 sizeofIRType(hWordTy)
4125 );
4126 }
sewardjdb5907d2009-11-26 17:20:21 +00004127 } else {
4128 /* SC */
4129 /*ignore */
4130 }
4131 break;
4132 }
4133
4134 case Ist_Store:
4135 /* It seems we pretend that store-conditionals don't
4136 exist, viz, just ignore them ... */
4137 if (!inLDSO) {
4138 instrument_mem_access(
4139 bbOut,
4140 st->Ist.Store.addr,
4141 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4142 True/*isStore*/,
4143 sizeofIRType(hWordTy)
4144 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004145 }
njnb83caf22009-05-25 01:47:56 +00004146 break;
sewardjb4112022007-11-09 22:49:28 +00004147
4148 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004149 /* ... whereas here we don't care whether a load is a
4150 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004151 IRExpr* data = st->Ist.WrTmp.data;
4152 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004153 if (!inLDSO) {
4154 instrument_mem_access(
4155 bbOut,
4156 data->Iex.Load.addr,
4157 sizeofIRType(data->Iex.Load.ty),
4158 False/*!isStore*/,
4159 sizeofIRType(hWordTy)
4160 );
4161 }
sewardjb4112022007-11-09 22:49:28 +00004162 }
4163 break;
4164 }
4165
4166 case Ist_Dirty: {
4167 Int dataSize;
4168 IRDirty* d = st->Ist.Dirty.details;
4169 if (d->mFx != Ifx_None) {
4170 /* This dirty helper accesses memory. Collect the
4171 details. */
4172 tl_assert(d->mAddr != NULL);
4173 tl_assert(d->mSize != 0);
4174 dataSize = d->mSize;
4175 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004176 if (!inLDSO) {
4177 instrument_mem_access(
4178 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4179 sizeofIRType(hWordTy)
4180 );
4181 }
sewardjb4112022007-11-09 22:49:28 +00004182 }
4183 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004184 if (!inLDSO) {
4185 instrument_mem_access(
4186 bbOut, d->mAddr, dataSize, True/*isStore*/,
4187 sizeofIRType(hWordTy)
4188 );
4189 }
sewardjb4112022007-11-09 22:49:28 +00004190 }
4191 } else {
4192 tl_assert(d->mAddr == NULL);
4193 tl_assert(d->mSize == 0);
4194 }
4195 break;
4196 }
4197
4198 default:
sewardjf98e1c02008-10-25 16:22:41 +00004199 unhandled:
4200 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004201 tl_assert(0);
4202
4203 } /* switch (st->tag) */
4204
4205 addStmtToIRSB( bbOut, st );
4206 } /* iterate over bbIn->stmts */
4207
4208 return bbOut;
4209}
4210
4211
4212/*----------------------------------------------------------------*/
4213/*--- Client requests ---*/
4214/*----------------------------------------------------------------*/
4215
4216/* Sheesh. Yet another goddam finite map. */
4217static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4218
4219static void map_pthread_t_to_Thread_INIT ( void ) {
4220 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004221 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4222 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004223 tl_assert(map_pthread_t_to_Thread != NULL);
4224 }
4225}
4226
4227
4228static
4229Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4230{
4231 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4232 return False;
4233
4234 /* Anything that gets past the above check is one of ours, so we
4235 should be able to handle it. */
4236
4237 /* default, meaningless return value, unless otherwise set */
4238 *ret = 0;
4239
4240 switch (args[0]) {
4241
4242 /* --- --- User-visible client requests --- --- */
4243
4244 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004245 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004246 args[1], args[2]);
4247 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004248 are any held locks etc in the area. Calling evh__die_mem
4249 and then evh__new_mem is a bit inefficient; probably just
4250 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004251 if (args[2] > 0) { /* length */
4252 evh__die_mem(args[1], args[2]);
4253 /* and then set it to New */
4254 evh__new_mem(args[1], args[2]);
4255 }
4256 break;
4257
sewardj406bac82010-03-03 23:03:40 +00004258 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4259 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4260 args[1], args[2]);
4261 if (args[2] > 0) { /* length */
4262 evh__untrack_mem(args[1], args[2]);
4263 }
4264 break;
4265
4266 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4267 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4268 args[1], args[2]);
4269 if (args[2] > 0) { /* length */
4270 evh__new_mem(args[1], args[2]);
4271 }
4272 break;
4273
sewardjb4112022007-11-09 22:49:28 +00004274 /* --- --- Client requests for Helgrind's use only --- --- */
4275
4276 /* Some thread is telling us its pthread_t value. Record the
4277 binding between that and the associated Thread*, so we can
4278 later find the Thread* again when notified of a join by the
4279 thread. */
4280 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4281 Thread* my_thr = NULL;
4282 if (0)
4283 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4284 (void*)args[1]);
4285 map_pthread_t_to_Thread_INIT();
4286 my_thr = map_threads_maybe_lookup( tid );
4287 /* This assertion should hold because the map_threads (tid to
4288 Thread*) binding should have been made at the point of
4289 low-level creation of this thread, which should have
4290 happened prior to us getting this client request for it.
4291 That's because this client request is sent from
4292 client-world from the 'thread_wrapper' function, which
4293 only runs once the thread has been low-level created. */
4294 tl_assert(my_thr != NULL);
4295 /* So now we know that (pthread_t)args[1] is associated with
4296 (Thread*)my_thr. Note that down. */
4297 if (0)
4298 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4299 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004300 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004301 break;
4302 }
4303
4304 case _VG_USERREQ__HG_PTH_API_ERROR: {
4305 Thread* my_thr = NULL;
4306 map_pthread_t_to_Thread_INIT();
4307 my_thr = map_threads_maybe_lookup( tid );
4308 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004309 HG_(record_error_PthAPIerror)(
4310 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004311 break;
4312 }
4313
4314 /* This thread (tid) has completed a join with the quitting
4315 thread whose pthread_t is in args[1]. */
4316 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4317 Thread* thr_q = NULL; /* quitter Thread* */
4318 Bool found = False;
4319 if (0)
4320 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4321 (void*)args[1]);
4322 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004323 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004324 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004325 /* Can this fail? It would mean that our pthread_join
4326 wrapper observed a successful join on args[1] yet that
4327 thread never existed (or at least, it never lodged an
4328 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4329 sounds like a bug in the threads library. */
4330 // FIXME: get rid of this assertion; handle properly
4331 tl_assert(found);
4332 if (found) {
4333 if (0)
4334 VG_(printf)(".................... quitter Thread* = %p\n",
4335 thr_q);
4336 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4337 }
4338 break;
4339 }
4340
4341 /* EXPOSITION only: by intercepting lock init events we can show
4342 the user where the lock was initialised, rather than only
4343 being able to show where it was first locked. Intercepting
4344 lock initialisations is not necessary for the basic operation
4345 of the race checker. */
4346 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4347 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4348 break;
4349
4350 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4351 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4352 break;
4353
4354 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4355 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4356 break;
4357
4358 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4359 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4360 break;
4361
4362 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4363 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4364 break;
4365
4366 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4367 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4368 break;
4369
4370 /* This thread is about to do pthread_cond_signal on the
4371 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4372 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4373 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4374 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4375 break;
4376
4377 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4378 Returns a flag indicating whether or not the mutex is believed to be
4379 valid for this operation. */
4380 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4381 Bool mutex_is_valid
4382 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4383 (void*)args[2] );
4384 *ret = mutex_is_valid ? 1 : 0;
4385 break;
4386 }
4387
sewardjf98e1c02008-10-25 16:22:41 +00004388 /* cond=arg[1] */
4389 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4390 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4391 break;
4392
sewardjb4112022007-11-09 22:49:28 +00004393 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4394 mutex=arg[2] */
4395 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4396 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4397 (void*)args[1], (void*)args[2] );
4398 break;
4399
4400 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4401 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4402 break;
4403
4404 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4405 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4406 break;
4407
sewardj789c3c52008-02-25 12:10:07 +00004408 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004409 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004410 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4411 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004412 break;
4413
4414 /* rwlock=arg[1], isW=arg[2] */
4415 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4416 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4417 break;
4418
4419 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4420 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4421 break;
4422
4423 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4424 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4425 break;
4426
sewardj11e352f2007-11-30 11:11:02 +00004427 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4428 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004429 break;
4430
sewardj11e352f2007-11-30 11:11:02 +00004431 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4432 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004433 break;
4434
sewardj11e352f2007-11-30 11:11:02 +00004435 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4436 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4437 break;
4438
4439 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4440 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004441 break;
4442
sewardj9f569b72008-11-13 13:33:09 +00004443 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004444 /* pth_bar_t*, ulong count, ulong resizable */
4445 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4446 args[2], args[3] );
4447 break;
4448
4449 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4450 /* pth_bar_t*, ulong newcount */
4451 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4452 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004453 break;
4454
4455 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4456 /* pth_bar_t* */
4457 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4458 break;
4459
4460 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4461 /* pth_bar_t* */
4462 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4463 break;
sewardjb4112022007-11-09 22:49:28 +00004464
sewardj5a644da2009-08-11 10:35:58 +00004465 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4466 /* pth_spinlock_t* */
4467 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4468 break;
4469
4470 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4471 /* pth_spinlock_t* */
4472 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4473 break;
4474
4475 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4476 /* pth_spinlock_t*, Word */
4477 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4478 break;
4479
4480 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4481 /* pth_spinlock_t* */
4482 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4483 break;
4484
4485 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4486 /* pth_spinlock_t* */
4487 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4488 break;
4489
sewardjed2e72e2009-08-14 11:08:24 +00004490 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4491 /* char* who */
4492 HChar* who = (HChar*)args[1];
4493 HChar buf[50 + 50];
4494 Thread* thr = map_threads_maybe_lookup( tid );
4495 tl_assert( thr ); /* I must be mapped */
4496 tl_assert( who );
4497 tl_assert( VG_(strlen)(who) <= 50 );
4498 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4499 /* record_error_Misc strdup's buf, so this is safe: */
4500 HG_(record_error_Misc)( thr, buf );
4501 break;
4502 }
4503
4504 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4505 /* UWord arbitrary-SO-tag */
4506 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4507 break;
4508
4509 case _VG_USERREQ__HG_USERSO_RECV_POST:
4510 /* UWord arbitrary-SO-tag */
4511 evh__HG_USERSO_RECV_POST( tid, args[1] );
4512 break;
4513
sewardjb4112022007-11-09 22:49:28 +00004514 default:
4515 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004516 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4517 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004518 }
4519
4520 return True;
4521}
4522
4523
4524/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004525/*--- Setup ---*/
4526/*----------------------------------------------------------------*/
4527
4528static Bool hg_process_cmd_line_option ( Char* arg )
4529{
njn83df0b62009-02-25 01:01:05 +00004530 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004531
njn83df0b62009-02-25 01:01:05 +00004532 if VG_BOOL_CLO(arg, "--track-lockorders",
4533 HG_(clo_track_lockorders)) {}
4534 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4535 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004536
4537 else if VG_XACT_CLO(arg, "--history-level=none",
4538 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004539 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004540 HG_(clo_history_level), 1);
4541 else if VG_XACT_CLO(arg, "--history-level=full",
4542 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004543
sewardjf585e482009-08-16 22:52:29 +00004544 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004545 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004546 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004547 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004548
sewardj11e352f2007-11-30 11:11:02 +00004549 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004550 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004551 Int j;
sewardjb4112022007-11-09 22:49:28 +00004552
njn83df0b62009-02-25 01:01:05 +00004553 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004554 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004555 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004556 return False;
4557 }
sewardj11e352f2007-11-30 11:11:02 +00004558 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004559 if ('0' == tmp_str[j]) { /* do nothing */ }
4560 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004561 else {
sewardj11e352f2007-11-30 11:11:02 +00004562 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004563 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004564 return False;
4565 }
4566 }
sewardjf98e1c02008-10-25 16:22:41 +00004567 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004568 }
4569
4570 else
4571 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4572
4573 return True;
4574}
4575
4576static void hg_print_usage ( void )
4577{
4578 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004579" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004580" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004581" full: show both stack traces for a data race (can be very slow)\n"
4582" approx: full trace for one thread, approx for the other (faster)\n"
4583" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004584" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004585 );
sewardjb4112022007-11-09 22:49:28 +00004586}
4587
4588static void hg_print_debug_usage ( void )
4589{
sewardjb4112022007-11-09 22:49:28 +00004590 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4591 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004592 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004593 " at events (X = 0|1) [000000]\n");
4594 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004595 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004596 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004597 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4598 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004599 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004600 VG_(printf)(" 000010 at lock/unlock events\n");
4601 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004602}
4603
4604static void hg_post_clo_init ( void )
4605{
4606}
4607
4608static void hg_fini ( Int exitcode )
4609{
sewardj2d9e8742009-08-07 15:46:56 +00004610 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4611 VG_(message)(Vg_UserMsg,
4612 "For counts of detected and suppressed errors, "
4613 "rerun with: -v\n");
4614 }
4615
4616 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4617 && HG_(clo_history_level) >= 2) {
4618 VG_(umsg)(
4619 "Use --history-level=approx or =none to gain increased speed, at\n" );
4620 VG_(umsg)(
4621 "the cost of reduced accuracy of conflicting-access information\n");
4622 }
4623
sewardjb4112022007-11-09 22:49:28 +00004624 if (SHOW_DATA_STRUCTURES)
4625 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004626 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004627 all__sanity_check("SK_(fini)");
4628
sewardj2d9e8742009-08-07 15:46:56 +00004629 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004630
4631 if (1) {
4632 VG_(printf)("\n");
4633 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4634 VG_(printf)("\n");
4635 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4636 VG_(printf)("\n");
4637 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4638 }
4639
sewardjf98e1c02008-10-25 16:22:41 +00004640 //zz VG_(printf)("\n");
4641 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4642 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4643 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4644 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4645 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4646 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4647 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4648 //zz stats__hbefore_stk_hwm);
4649 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4650 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004651
4652 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004653 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004654 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004655 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004656 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004657 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004658 (Int)HG_(cardinalityWSU)( univ_laog ));
4659
sewardjd52392d2008-11-08 20:36:26 +00004660 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4661 // stats__ga_LL_adds,
4662 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004663
sewardjf98e1c02008-10-25 16:22:41 +00004664 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4665 HG_(stats__LockN_to_P_queries),
4666 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004667
sewardjf98e1c02008-10-25 16:22:41 +00004668 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4669 HG_(stats__string_table_queries),
4670 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004671 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004672 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004673 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004674 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004675 VG_(printf)(" locks: %'8lu acquires, "
4676 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004677 stats__lockN_acquires,
4678 stats__lockN_releases
4679 );
barta0b6b2c2008-07-07 06:49:24 +00004680 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004681
4682 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004683 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004684 }
4685}
4686
sewardjf98e1c02008-10-25 16:22:41 +00004687/* FIXME: move these somewhere sane */
4688
4689static
4690void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4691{
4692 Thread* thr;
4693 ThreadId tid;
4694 UWord nActual;
4695 tl_assert(hbt);
4696 thr = libhb_get_Thr_opaque( hbt );
4697 tl_assert(thr);
4698 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4699 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4700 NULL, NULL, 0 );
4701 tl_assert(nActual <= nRequest);
4702 for (; nActual < nRequest; nActual++)
4703 frames[nActual] = 0;
4704}
4705
4706static
sewardj23f12002009-07-24 08:45:08 +00004707ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004708{
4709 Thread* thr;
4710 ThreadId tid;
4711 ExeContext* ec;
4712 tl_assert(hbt);
4713 thr = libhb_get_Thr_opaque( hbt );
4714 tl_assert(thr);
4715 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004716 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004717 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004718 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004719}
4720
4721
sewardjb4112022007-11-09 22:49:28 +00004722static void hg_pre_clo_init ( void )
4723{
sewardjf98e1c02008-10-25 16:22:41 +00004724 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004725
sewardjb4112022007-11-09 22:49:28 +00004726 VG_(details_name) ("Helgrind");
4727 VG_(details_version) (NULL);
4728 VG_(details_description) ("a thread error detector");
4729 VG_(details_copyright_author)(
njn9f207462009-03-10 22:02:09 +00004730 "Copyright (C) 2007-2009, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004731 VG_(details_bug_reports_to) (VG_BUGS_TO);
4732 VG_(details_avg_translation_sizeB) ( 200 );
4733
4734 VG_(basic_tool_funcs) (hg_post_clo_init,
4735 hg_instrument,
4736 hg_fini);
4737
4738 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004739 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004740 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004741 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004742 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004743 HG_(update_extra),
4744 HG_(recognised_suppression),
4745 HG_(read_extra_suppression_info),
4746 HG_(error_matches_suppression),
4747 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004748 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004749
sewardj24118492009-07-15 14:50:02 +00004750 VG_(needs_xml_output) ();
4751
sewardjb4112022007-11-09 22:49:28 +00004752 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4753 hg_print_usage,
4754 hg_print_debug_usage);
4755 VG_(needs_client_requests) (hg_handle_client_request);
4756
4757 // FIXME?
4758 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4759 // hg_expensive_sanity_check);
4760
4761 VG_(needs_malloc_replacement) (hg_cli__malloc,
4762 hg_cli____builtin_new,
4763 hg_cli____builtin_vec_new,
4764 hg_cli__memalign,
4765 hg_cli__calloc,
4766 hg_cli__free,
4767 hg_cli____builtin_delete,
4768 hg_cli____builtin_vec_delete,
4769 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004770 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004771 HG_CLI__MALLOC_REDZONE_SZB );
4772
sewardj849b0ed2008-12-21 10:43:10 +00004773 /* 21 Dec 08: disabled this; it mostly causes H to start more
4774 slowly and use significantly more memory, without very often
4775 providing useful results. The user can request to load this
4776 information manually with --read-var-info=yes. */
4777 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004778
4779 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004780 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4781 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004782 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4783 VG_(track_new_mem_stack) ( evh__new_mem );
4784
4785 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004786 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004787
4788 VG_(track_change_mem_mprotect) ( evh__set_perms );
4789
4790 VG_(track_die_mem_stack_signal)( evh__die_mem );
4791 VG_(track_die_mem_brk) ( evh__die_mem );
4792 VG_(track_die_mem_munmap) ( evh__die_mem );
4793 VG_(track_die_mem_stack) ( evh__die_mem );
4794
4795 // FIXME: what is this for?
4796 VG_(track_ban_mem_stack) (NULL);
4797
4798 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4799 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4800 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4801 VG_(track_post_mem_write) (NULL);
4802
4803 /////////////////
4804
4805 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4806 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4807
4808 VG_(track_start_client_code)( evh__start_client_code );
4809 VG_(track_stop_client_code)( evh__stop_client_code );
4810
sewardjf98e1c02008-10-25 16:22:41 +00004811 /////////////////////////////////////////////
4812 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004813 for_libhb__get_EC );
4814 /////////////////////////////////////////////
4815
4816 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004817
4818 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4819 as described in comments at the top of pub_tool_hashtable.h, are
4820 met. Blargh. */
4821 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4822 tl_assert( sizeof(UWord) == sizeof(Addr) );
4823 hg_mallocmeta_table
4824 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4825
sewardjb4112022007-11-09 22:49:28 +00004826}
4827
4828VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4829
4830/*--------------------------------------------------------------------*/
4831/*--- end hg_main.c ---*/
4832/*--------------------------------------------------------------------*/