blob: d2f34b6895617201be84a209b4e468aad2c72f38 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
njn9f207462009-03-10 22:02:09 +000011 Copyright (C) 2007-2009 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
njnf76d27a2009-05-28 01:53:07 +000014 Copyright (C) 2007-2009 Apple, Inc.
15
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardjb4112022007-11-09 22:49:28 +000055
sewardjf98e1c02008-10-25 16:22:41 +000056#include "hg_basics.h"
57#include "hg_wordset.h"
58#include "hg_lock_n_thread.h"
59#include "hg_errors.h"
60
61#include "libhb.h"
62
sewardjb4112022007-11-09 22:49:28 +000063#include "helgrind.h"
64
sewardjf98e1c02008-10-25 16:22:41 +000065
66// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
67
68// FIXME: when client destroys a lock or a CV, remove these
69// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000070
71/*----------------------------------------------------------------*/
72/*--- ---*/
73/*----------------------------------------------------------------*/
74
sewardj11e352f2007-11-30 11:11:02 +000075/* Note this needs to be compiled with -fno-strict-aliasing, since it
76 contains a whole bunch of calls to lookupFM etc which cast between
77 Word and pointer types. gcc rightly complains this breaks ANSI C
78 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
79 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000080*/
sewardjb4112022007-11-09 22:49:28 +000081
sewardjefd3b4d2007-12-02 02:05:23 +000082// FIXME catch sync signals (SEGV, basically) and unlock BHL,
83// if held. Otherwise a LOCK-prefixed insn which segfaults
84// gets Helgrind into a total muddle as the BHL will not be
85// released after the insn.
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME what is supposed to happen to locks in memory which
88// is relocated as a result of client realloc?
89
sewardjb4112022007-11-09 22:49:28 +000090// FIXME put referencing ThreadId into Thread and get
91// rid of the slow reverse mapping function.
92
93// FIXME accesses to NoAccess areas: change state to Excl?
94
95// FIXME report errors for accesses of NoAccess memory?
96
97// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
98// the thread still holds the lock.
99
100/* ------------ Debug/trace options ------------ */
101
102// this is:
103// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
104// happens_before_wrk: 1000
105// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
106#define SHOW_EXPENSIVE_STUFF 0
107
108// 0 for silent, 1 for some stuff, 2 for lots of stuff
109#define SHOW_EVENTS 0
110
sewardjb4112022007-11-09 22:49:28 +0000111
112static void all__sanity_check ( Char* who ); /* fwds */
113
114#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
115
116// 0 for none, 1 for dump at end of run
117#define SHOW_DATA_STRUCTURES 0
118
119
sewardjb4112022007-11-09 22:49:28 +0000120/* ------------ Misc comments ------------ */
121
122// FIXME: don't hardwire initial entries for root thread.
123// Instead, let the pre_thread_ll_create handler do this.
124
sewardjb4112022007-11-09 22:49:28 +0000125
126/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000127/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000128/*----------------------------------------------------------------*/
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Admin linked list of Threads */
131static Thread* admin_threads = NULL;
132
133/* Admin linked list of Locks */
134static Lock* admin_locks = NULL;
135
sewardjb4112022007-11-09 22:49:28 +0000136/* Mapping table for core ThreadIds to Thread* */
137static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138
sewardjb4112022007-11-09 22:49:28 +0000139/* Mapping table for lock guest addresses to Lock* */
140static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141
142/* The word-set universes for thread sets and lock sets. */
143static WordSetU* univ_tsets = NULL; /* sets of Thread* */
144static WordSetU* univ_lsets = NULL; /* sets of Lock* */
145static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
146
147/* never changed; we only care about its address. Is treated as if it
148 was a standard userspace lock. Also we have a Lock* describing it
149 so it can participate in lock sets in the usual way. */
150static Int __bus_lock = 0;
151static Lock* __bus_lock_Lock = NULL;
152
153
154/*----------------------------------------------------------------*/
155/*--- Simple helpers for the data structures ---*/
156/*----------------------------------------------------------------*/
157
158static UWord stats__lockN_acquires = 0;
159static UWord stats__lockN_releases = 0;
160
sewardjf98e1c02008-10-25 16:22:41 +0000161static
162ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000163
164/* --------- Constructors --------- */
165
sewardjf98e1c02008-10-25 16:22:41 +0000166static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000167 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000168 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->locksetA = HG_(emptyWS)( univ_lsets );
170 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000172 thread->hbthr = hbthr;
173 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000174 thread->created_at = NULL;
175 thread->announced = False;
176 thread->errmsg_index = indx++;
177 thread->admin = admin_threads;
178 admin_threads = thread;
179 return thread;
180}
sewardjf98e1c02008-10-25 16:22:41 +0000181
sewardjb4112022007-11-09 22:49:28 +0000182// Make a new lock which is unlocked (hence ownerless)
183static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
184 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000185 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000186 lock->admin = admin_locks;
187 lock->unique = unique++;
188 lock->magic = LockN_MAGIC;
189 lock->appeared_at = NULL;
190 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000191 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000192 lock->guestaddr = guestaddr;
193 lock->kind = kind;
194 lock->heldW = False;
195 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000197 admin_locks = lock;
198 return lock;
199}
sewardjb4112022007-11-09 22:49:28 +0000200
201/* Release storage for a Lock. Also release storage in .heldBy, if
202 any. */
203static void del_LockN ( Lock* lk )
204{
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lk));
206 tl_assert(lk->hbso);
207 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000208 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000209 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000210 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000211 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000212}
213
214/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
215 it. This is done strictly: only combinations resulting from
216 correct program and libpthread behaviour are allowed. */
217static void lockN_acquire_writer ( Lock* lk, Thread* thr )
218{
sewardjf98e1c02008-10-25 16:22:41 +0000219 tl_assert(HG_(is_sane_LockN)(lk));
220 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000221
222 stats__lockN_acquires++;
223
224 /* EXPOSITION only */
225 /* We need to keep recording snapshots of where the lock was
226 acquired, so as to produce better lock-order error messages. */
227 if (lk->acquired_at == NULL) {
228 ThreadId tid;
229 tl_assert(lk->heldBy == NULL);
230 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
231 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000232 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000233 } else {
234 tl_assert(lk->heldBy != NULL);
235 }
236 /* end EXPOSITION only */
237
238 switch (lk->kind) {
239 case LK_nonRec:
240 case_LK_nonRec:
241 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
242 tl_assert(!lk->heldW);
243 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000244 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000245 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000246 break;
247 case LK_mbRec:
248 if (lk->heldBy == NULL)
249 goto case_LK_nonRec;
250 /* 2nd and subsequent locking of a lock by its owner */
251 tl_assert(lk->heldW);
252 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000253 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000254 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000255 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
256 == VG_(sizeTotalBag)(lk->heldBy));
257 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000258 break;
259 case LK_rdwr:
260 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
261 goto case_LK_nonRec;
262 default:
263 tl_assert(0);
264 }
sewardjf98e1c02008-10-25 16:22:41 +0000265 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000266}
267
268static void lockN_acquire_reader ( Lock* lk, Thread* thr )
269{
sewardjf98e1c02008-10-25 16:22:41 +0000270 tl_assert(HG_(is_sane_LockN)(lk));
271 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000272 /* can only add reader to a reader-writer lock. */
273 tl_assert(lk->kind == LK_rdwr);
274 /* lk must be free or already r-held. */
275 tl_assert(lk->heldBy == NULL
276 || (lk->heldBy != NULL && !lk->heldW));
277
278 stats__lockN_acquires++;
279
280 /* EXPOSITION only */
281 /* We need to keep recording snapshots of where the lock was
282 acquired, so as to produce better lock-order error messages. */
283 if (lk->acquired_at == NULL) {
284 ThreadId tid;
285 tl_assert(lk->heldBy == NULL);
286 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
287 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000288 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000289 } else {
290 tl_assert(lk->heldBy != NULL);
291 }
292 /* end EXPOSITION only */
293
294 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000296 } else {
297 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000298 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000299 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000300 }
301 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000302 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000303}
304
305/* Update 'lk' to reflect a release of it by 'thr'. This is done
306 strictly: only combinations resulting from correct program and
307 libpthread behaviour are allowed. */
308
309static void lockN_release ( Lock* lk, Thread* thr )
310{
311 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000312 tl_assert(HG_(is_sane_LockN)(lk));
313 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000314 /* lock must be held by someone */
315 tl_assert(lk->heldBy);
316 stats__lockN_releases++;
317 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000318 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000319 /* thr must actually have been a holder of lk */
320 tl_assert(b);
321 /* normalise */
322 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000323 if (VG_(isEmptyBag)(lk->heldBy)) {
324 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000325 lk->heldBy = NULL;
326 lk->heldW = False;
327 lk->acquired_at = NULL;
328 }
sewardjf98e1c02008-10-25 16:22:41 +0000329 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000330}
331
332static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
333{
334 Thread* thr;
335 if (!lk->heldBy) {
336 tl_assert(!lk->heldW);
337 return;
338 }
339 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000340 VG_(initIterBag)( lk->heldBy );
341 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000342 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000343 tl_assert(HG_(elemWS)( univ_lsets,
344 thr->locksetA, (Word)lk ));
345 thr->locksetA
346 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
347
348 if (lk->heldW) {
349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetW, (Word)lk ));
351 thr->locksetW
352 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
353 }
354 }
sewardj896f6f92008-08-19 08:38:52 +0000355 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000356}
357
sewardjb4112022007-11-09 22:49:28 +0000358
359/*----------------------------------------------------------------*/
360/*--- Print out the primary data structures ---*/
361/*----------------------------------------------------------------*/
362
sewardjd52392d2008-11-08 20:36:26 +0000363//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000364
365#define PP_THREADS (1<<1)
366#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000367#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000368
369
370static const Int sHOW_ADMIN = 0;
371
372static void space ( Int n )
373{
374 Int i;
375 Char spaces[128+1];
376 tl_assert(n >= 0 && n < 128);
377 if (n == 0)
378 return;
379 for (i = 0; i < n; i++)
380 spaces[i] = ' ';
381 spaces[i] = 0;
382 tl_assert(i < 128+1);
383 VG_(printf)("%s", spaces);
384}
385
386static void pp_Thread ( Int d, Thread* t )
387{
388 space(d+0); VG_(printf)("Thread %p {\n", t);
389 if (sHOW_ADMIN) {
390 space(d+3); VG_(printf)("admin %p\n", t->admin);
391 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
392 }
393 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
394 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000395 space(d+0); VG_(printf)("}\n");
396}
397
398static void pp_admin_threads ( Int d )
399{
400 Int i, n;
401 Thread* t;
402 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
403 /* nothing */
404 }
405 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
406 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
407 if (0) {
408 space(n);
409 VG_(printf)("admin_threads record %d of %d:\n", i, n);
410 }
411 pp_Thread(d+3, t);
412 }
barta0b6b2c2008-07-07 06:49:24 +0000413 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000414}
415
416static void pp_map_threads ( Int d )
417{
njn4c245e52009-03-15 23:25:38 +0000418 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000419 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000420 for (i = 0; i < VG_N_THREADS; i++) {
421 if (map_threads[i] != NULL)
422 n++;
423 }
424 VG_(printf)("(%d entries) {\n", n);
425 for (i = 0; i < VG_N_THREADS; i++) {
426 if (map_threads[i] == NULL)
427 continue;
428 space(d+3);
429 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
430 }
431 space(d); VG_(printf)("}\n");
432}
433
434static const HChar* show_LockKind ( LockKind lkk ) {
435 switch (lkk) {
436 case LK_mbRec: return "mbRec";
437 case LK_nonRec: return "nonRec";
438 case LK_rdwr: return "rdwr";
439 default: tl_assert(0);
440 }
441}
442
443static void pp_Lock ( Int d, Lock* lk )
444{
barta0b6b2c2008-07-07 06:49:24 +0000445 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000446 if (sHOW_ADMIN) {
447 space(d+3); VG_(printf)("admin %p\n", lk->admin);
448 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
449 }
450 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
451 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
452 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
453 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
454 if (lk->heldBy) {
455 Thread* thr;
456 Word count;
457 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000458 VG_(initIterBag)( lk->heldBy );
459 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000461 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000462 VG_(printf)("}");
463 }
464 VG_(printf)("\n");
465 space(d+0); VG_(printf)("}\n");
466}
467
468static void pp_admin_locks ( Int d )
469{
470 Int i, n;
471 Lock* lk;
472 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
473 /* nothing */
474 }
475 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
476 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
477 if (0) {
478 space(n);
479 VG_(printf)("admin_locks record %d of %d:\n", i, n);
480 }
481 pp_Lock(d+3, lk);
482 }
barta0b6b2c2008-07-07 06:49:24 +0000483 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000484}
485
486static void pp_map_locks ( Int d )
487{
488 void* gla;
489 Lock* lk;
490 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000491 (Int)VG_(sizeFM)( map_locks ));
492 VG_(initIterFM)( map_locks );
493 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000494 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000495 space(d+3);
496 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
497 }
sewardj896f6f92008-08-19 08:38:52 +0000498 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000499 space(d); VG_(printf)("}\n");
500}
501
sewardjb4112022007-11-09 22:49:28 +0000502static void pp_everything ( Int flags, Char* caller )
503{
504 Int d = 0;
505 VG_(printf)("\n");
506 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
507 if (flags & PP_THREADS) {
508 VG_(printf)("\n");
509 pp_admin_threads(d+3);
510 VG_(printf)("\n");
511 pp_map_threads(d+3);
512 }
513 if (flags & PP_LOCKS) {
514 VG_(printf)("\n");
515 pp_admin_locks(d+3);
516 VG_(printf)("\n");
517 pp_map_locks(d+3);
518 }
sewardjb4112022007-11-09 22:49:28 +0000519
520 VG_(printf)("\n");
521 VG_(printf)("}\n");
522 VG_(printf)("\n");
523}
524
525#undef SHOW_ADMIN
526
527
528/*----------------------------------------------------------------*/
529/*--- Initialise the primary data structures ---*/
530/*----------------------------------------------------------------*/
531
sewardjf98e1c02008-10-25 16:22:41 +0000532static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000533{
sewardjb4112022007-11-09 22:49:28 +0000534 Thread* thr;
535
536 /* Get everything initialised and zeroed. */
537 tl_assert(admin_threads == NULL);
538 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000541
542 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000543 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(map_threads != NULL);
545
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(sizeof(Addr) == sizeof(Word));
547 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
549 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000550 tl_assert(map_locks != NULL);
551
552 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000553 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000554 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000555
556 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000557 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
558 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000559 tl_assert(univ_tsets != NULL);
560
561 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000562 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
563 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000564 tl_assert(univ_lsets != NULL);
565
566 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000567 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
568 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000569 tl_assert(univ_laog != NULL);
570
571 /* Set up entries for the root thread */
572 // FIXME: this assumes that the first real ThreadId is 1
573
sewardjb4112022007-11-09 22:49:28 +0000574 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000575 thr = mk_Thread(hbthr_root);
576 thr->coretid = 1; /* FIXME: hardwires an assumption about the
577 identity of the root thread. */
578 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
579 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000580
sewardjf98e1c02008-10-25 16:22:41 +0000581 /* and bind it in the thread-map table. */
582 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
583 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000584
sewardjf98e1c02008-10-25 16:22:41 +0000585 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000586
587 tl_assert(VG_INVALID_THREADID == 0);
588
589 /* Mark the new bus lock correctly (to stop the sanity checks
590 complaining) */
591 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000592
593 all__sanity_check("initialise_data_structures");
594}
595
596
597/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000598/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000599/*----------------------------------------------------------------*/
600
601/* Doesn't assert if the relevant map_threads entry is NULL. */
602static Thread* map_threads_maybe_lookup ( ThreadId coretid )
603{
604 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000605 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000606 thr = map_threads[coretid];
607 return thr;
608}
609
610/* Asserts if the relevant map_threads entry is NULL. */
611static inline Thread* map_threads_lookup ( ThreadId coretid )
612{
613 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000614 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000615 thr = map_threads[coretid];
616 tl_assert(thr);
617 return thr;
618}
619
sewardjf98e1c02008-10-25 16:22:41 +0000620/* Do a reverse lookup. Does not assert if 'thr' is not found in
621 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000622static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
623{
sewardjf98e1c02008-10-25 16:22:41 +0000624 ThreadId tid;
625 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000626 /* Check nobody used the invalid-threadid slot */
627 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
628 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000629 tid = thr->coretid;
630 tl_assert(HG_(is_sane_ThreadId)(tid));
631 return tid;
sewardjb4112022007-11-09 22:49:28 +0000632}
633
634/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
635 is not found in map_threads. */
636static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
637{
638 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
639 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000640 tl_assert(map_threads[tid]);
641 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000642 return tid;
643}
644
645static void map_threads_delete ( ThreadId coretid )
646{
647 Thread* thr;
648 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000649 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000650 thr = map_threads[coretid];
651 tl_assert(thr);
652 map_threads[coretid] = NULL;
653}
654
655
656/*----------------------------------------------------------------*/
657/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
658/*----------------------------------------------------------------*/
659
660/* Make sure there is a lock table entry for the given (lock) guest
661 address. If not, create one of the stated 'kind' in unheld state.
662 In any case, return the address of the existing or new Lock. */
663static
664Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
665{
666 Bool found;
667 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000669 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000670 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000671 if (!found) {
672 Lock* lock = mk_LockN(lkk, ga);
673 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000675 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000676 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000677 return lock;
678 } else {
679 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000680 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000681 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000682 return oldlock;
683 }
684}
685
686static Lock* map_locks_maybe_lookup ( Addr ga )
687{
688 Bool found;
689 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000690 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000692 return lk;
693}
694
695static void map_locks_delete ( Addr ga )
696{
697 Addr ga2 = 0;
698 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000699 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000700 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 /* delFromFM produces the val which is being deleted, if it is
702 found. So assert it is non-null; that in effect asserts that we
703 are deleting a (ga, Lock) pair which actually exists. */
704 tl_assert(lk != NULL);
705 tl_assert(ga2 == ga);
706}
707
708
sewardjb4112022007-11-09 22:49:28 +0000709
710/*----------------------------------------------------------------*/
711/*--- Sanity checking the data structures ---*/
712/*----------------------------------------------------------------*/
713
714static UWord stats__sanity_checks = 0;
715
sewardjb4112022007-11-09 22:49:28 +0000716static void laog__sanity_check ( Char* who ); /* fwds */
717
718/* REQUIRED INVARIANTS:
719
720 Thread vs Segment/Lock/SecMaps
721
722 for each t in Threads {
723
724 // Thread.lockset: each element is really a valid Lock
725
726 // Thread.lockset: each Lock in set is actually held by that thread
727 for lk in Thread.lockset
728 lk == LockedBy(t)
729
730 // Thread.csegid is a valid SegmentID
731 // and the associated Segment has .thr == t
732
733 }
734
735 all thread Locksets are pairwise empty under intersection
736 (that is, no lock is claimed to be held by more than one thread)
737 -- this is guaranteed if all locks in locksets point back to their
738 owner threads
739
740 Lock vs Thread/Segment/SecMaps
741
742 for each entry (gla, la) in map_locks
743 gla == la->guest_addr
744
745 for each lk in Locks {
746
747 lk->tag is valid
748 lk->guest_addr does not have shadow state NoAccess
749 if lk == LockedBy(t), then t->lockset contains lk
750 if lk == UnlockedBy(segid) then segid is valid SegmentID
751 and can be mapped to a valid Segment(seg)
752 and seg->thr->lockset does not contain lk
753 if lk == UnlockedNew then (no lockset contains lk)
754
755 secmaps for lk has .mbHasLocks == True
756
757 }
758
759 Segment vs Thread/Lock/SecMaps
760
761 the Segment graph is a dag (no cycles)
762 all of the Segment graph must be reachable from the segids
763 mentioned in the Threads
764
765 for seg in Segments {
766
767 seg->thr is a sane Thread
768
769 }
770
771 SecMaps vs Segment/Thread/Lock
772
773 for sm in SecMaps {
774
775 sm properly aligned
776 if any shadow word is ShR or ShM then .mbHasShared == True
777
778 for each Excl(segid) state
779 map_segments_lookup maps to a sane Segment(seg)
780 for each ShM/ShR(tsetid,lsetid) state
781 each lk in lset is a valid Lock
782 each thr in tset is a valid thread, which is non-dead
783
784 }
785*/
786
787
788/* Return True iff 'thr' holds 'lk' in some mode. */
789static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
790{
791 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000792 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000793 else
794 return False;
795}
796
797/* Sanity check Threads, as far as possible */
798__attribute__((noinline))
799static void threads__sanity_check ( Char* who )
800{
801#define BAD(_str) do { how = (_str); goto bad; } while (0)
802 Char* how = "no error";
803 Thread* thr;
804 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000805 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000806 Word ls_size, i;
807 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000808 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000809 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000810 wsA = thr->locksetA;
811 wsW = thr->locksetW;
812 // locks held in W mode are a subset of all locks held
813 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
814 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
815 for (i = 0; i < ls_size; i++) {
816 lk = (Lock*)ls_words[i];
817 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000818 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000819 // Thread.lockset: each Lock in set is actually held by that
820 // thread
821 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000822 }
823 }
824 return;
825 bad:
826 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
827 tl_assert(0);
828#undef BAD
829}
830
831
832/* Sanity check Locks, as far as possible */
833__attribute__((noinline))
834static void locks__sanity_check ( Char* who )
835{
836#define BAD(_str) do { how = (_str); goto bad; } while (0)
837 Char* how = "no error";
838 Addr gla;
839 Lock* lk;
840 Int i;
841 // # entries in admin_locks == # entries in map_locks
842 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
843 ;
sewardj896f6f92008-08-19 08:38:52 +0000844 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000845 // for each entry (gla, lk) in map_locks
846 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000847 VG_(initIterFM)( map_locks );
848 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000849 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000850 if (lk->guestaddr != gla) BAD("2");
851 }
sewardj896f6f92008-08-19 08:38:52 +0000852 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000853 // scan through admin_locks ...
854 for (lk = admin_locks; lk; lk = lk->admin) {
855 // lock is sane. Quite comprehensive, also checks that
856 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000858 // map_locks binds guest address back to this lock
859 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000860 // look at all threads mentioned as holders of this lock. Ensure
861 // this lock is mentioned in their locksets.
862 if (lk->heldBy) {
863 Thread* thr;
864 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000865 VG_(initIterBag)( lk->heldBy );
866 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000867 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000868 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000869 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000870 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000871 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
872 BAD("6");
873 // also check the w-only lockset
874 if (lk->heldW
875 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
876 BAD("7");
877 if ((!lk->heldW)
878 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
879 BAD("8");
880 }
sewardj896f6f92008-08-19 08:38:52 +0000881 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000882 } else {
883 /* lock not held by anybody */
884 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
885 // since lk is unheld, then (no lockset contains lk)
886 // hmm, this is really too expensive to check. Hmm.
887 }
sewardjb4112022007-11-09 22:49:28 +0000888 }
889
890 return;
891 bad:
892 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
893 tl_assert(0);
894#undef BAD
895}
896
897
sewardjb4112022007-11-09 22:49:28 +0000898static void all_except_Locks__sanity_check ( Char* who ) {
899 stats__sanity_checks++;
900 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
901 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000902 laog__sanity_check(who);
903}
904static void all__sanity_check ( Char* who ) {
905 all_except_Locks__sanity_check(who);
906 locks__sanity_check(who);
907}
908
909
910/*----------------------------------------------------------------*/
911/*--- the core memory state machine (msm__* functions) ---*/
912/*----------------------------------------------------------------*/
913
sewardjd52392d2008-11-08 20:36:26 +0000914//static WordSetID add_BHL ( WordSetID lockset ) {
915// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
916//}
917//static WordSetID del_BHL ( WordSetID lockset ) {
918// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
919//}
sewardjb4112022007-11-09 22:49:28 +0000920
921
sewardjd52392d2008-11-08 20:36:26 +0000922///* Last-lock-lossage records. This mechanism exists to help explain
923// to programmers why we are complaining about a race. The idea is to
924// monitor all lockset transitions. When a previously nonempty
925// lockset becomes empty, the lock(s) that just disappeared (the
926// "lossage") are the locks that have consistently protected the
927// location (ga_of_access) in question for the longest time. Most of
928// the time the lossage-set is a single lock. Because the
929// lossage-lock is the one that has survived longest, there is there
930// is a good chance that it is indeed the lock that the programmer
931// intended to use to protect the location.
932//
933// Note that we cannot in general just look at the lossage set when we
934// see a transition to ShM(...,empty-set), because a transition to an
935// empty lockset can happen arbitrarily far before the point where we
936// want to report an error. This is in the case where there are many
937// transitions ShR -> ShR, all with an empty lockset, and only later
938// is there a transition to ShM. So what we want to do is note the
939// lossage lock at the point where a ShR -> ShR transition empties out
940// the lockset, so we can present it later if there should be a
941// transition to ShM.
942//
943// So this function finds such transitions. For each, it associates
944// in ga_to_lastlock, the guest address and the lossage lock. In fact
945// we do not record the Lock* directly as that may disappear later,
946// but instead the ExeContext inside the Lock which says where it was
947// initialised or first locked. ExeContexts are permanent so keeping
948// them indefinitely is safe.
949//
950// A boring detail: the hardware bus lock is not interesting in this
951// respect, so we first remove that from the pre/post locksets.
952//*/
953//
954//static UWord stats__ga_LL_adds = 0;
955//
956//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
957//
958//static
959//void record_last_lock_lossage ( Addr ga_of_access,
960// WordSetID lset_old, WordSetID lset_new )
961//{
962// Lock* lk;
963// Int card_old, card_new;
964//
965// tl_assert(lset_old != lset_new);
966//
967// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
968// (Int)lset_old,
969// HG_(cardinalityWS)(univ_lsets,lset_old),
970// (Int)lset_new,
971// HG_(cardinalityWS)(univ_lsets,lset_new),
972// ga_of_access );
973//
974// /* This is slow, but at least it's simple. The bus hardware lock
975// just confuses the logic, so remove it from the locksets we're
976// considering before doing anything else. */
977// lset_new = del_BHL( lset_new );
978//
979// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
980// /* The post-transition lock set is not empty. So we are not
981// interested. We're only interested in spotting transitions
982// that make locksets become empty. */
983// return;
984// }
985//
986// /* lset_new is now empty */
987// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
988// tl_assert(card_new == 0);
989//
990// lset_old = del_BHL( lset_old );
991// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
992//
993// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
994// (Int)lset_old, card_old, (Int)lset_new, card_new );
995//
996// if (card_old == 0) {
997// /* The old lockset was also empty. Not interesting. */
998// return;
999// }
1000//
1001// tl_assert(card_old > 0);
1002// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1003//
1004// /* Now we know we've got a transition from a nonempty lockset to an
1005// empty one. So lset_old must be the set of locks lost. Record
1006// some details. If there is more than one element in the lossage
1007// set, just choose one arbitrarily -- not the best, but at least
1008// it's simple. */
1009//
1010// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1011// if (0) VG_(printf)("lossage %ld %p\n",
1012// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1013// if (lk->appeared_at) {
1014// if (ga_to_lastlock == NULL)
1015// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1016// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1017// stats__ga_LL_adds++;
1018// }
1019//}
1020//
1021///* This queries the table (ga_to_lastlock) made by
1022// record_last_lock_lossage, when constructing error messages. It
1023// attempts to find the ExeContext of the allocation or initialisation
1024// point for the lossage lock associated with 'ga'. */
1025//
1026//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1027//{
1028// ExeContext* ec_hint = NULL;
1029// if (ga_to_lastlock != NULL
1030// && VG_(lookupFM)(ga_to_lastlock,
1031// NULL, (Word*)&ec_hint, ga)) {
1032// tl_assert(ec_hint != NULL);
1033// return ec_hint;
1034// } else {
1035// return NULL;
1036// }
1037//}
sewardjb4112022007-11-09 22:49:28 +00001038
1039
sewardjb4112022007-11-09 22:49:28 +00001040/*----------------------------------------------------------------*/
1041/*--- Shadow value and address range handlers ---*/
1042/*----------------------------------------------------------------*/
1043
1044static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001046static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001047__attribute__((noinline))
1048static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001049
sewardjb4112022007-11-09 22:49:28 +00001050
1051/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001052/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1053 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1054static void shadow_mem_scopy_range ( Thread* thr,
1055 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001059 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
sewardj23f12002009-07-24 08:45:08 +00001062static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1063{
sewardjf98e1c02008-10-25 16:22:41 +00001064 Thr* hbthr = thr->hbthr;
1065 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001066 LIBHB_CREAD_N(hbthr, a, len);
1067}
1068
1069static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1070 Thr* hbthr = thr->hbthr;
1071 tl_assert(hbthr);
1072 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001073}
1074
1075static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1076{
sewardj23f12002009-07-24 08:45:08 +00001077 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
sewardjb4112022007-11-09 22:49:28 +00001080static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1081{
sewardjb4112022007-11-09 22:49:28 +00001082 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001083 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001084 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
1087
1088/*----------------------------------------------------------------*/
1089/*--- Event handlers (evh__* functions) ---*/
1090/*--- plus helpers (evhH__* functions) ---*/
1091/*----------------------------------------------------------------*/
1092
1093/*--------- Event handler helpers (evhH__* functions) ---------*/
1094
1095/* Create a new segment for 'thr', making it depend (.prev) on its
1096 existing segment, bind together the SegmentID and Segment, and
1097 return both of them. Also update 'thr' so it references the new
1098 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001099//zz static
1100//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1101//zz /*OUT*/Segment** new_segP,
1102//zz Thread* thr )
1103//zz {
1104//zz Segment* cur_seg;
1105//zz tl_assert(new_segP);
1106//zz tl_assert(new_segidP);
1107//zz tl_assert(HG_(is_sane_Thread)(thr));
1108//zz cur_seg = map_segments_lookup( thr->csegid );
1109//zz tl_assert(cur_seg);
1110//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1111//zz at their owner thread. */
1112//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1113//zz *new_segidP = alloc_SegmentID();
1114//zz map_segments_add( *new_segidP, *new_segP );
1115//zz thr->csegid = *new_segidP;
1116//zz }
sewardjb4112022007-11-09 22:49:28 +00001117
1118
1119/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1120 updates, and also do all possible error checks. */
1121static
1122void evhH__post_thread_w_acquires_lock ( Thread* thr,
1123 LockKind lkk, Addr lock_ga )
1124{
1125 Lock* lk;
1126
1127 /* Basically what we need to do is call lockN_acquire_writer.
1128 However, that will barf if any 'invalid' lock states would
1129 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001130 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001131 routine.
1132
1133 Because this routine is only called after successful lock
1134 acquisition, we should not be asked to move the lock into any
1135 invalid states. Requests to do so are bugs in libpthread, since
1136 that should have rejected any such requests. */
1137
sewardjf98e1c02008-10-25 16:22:41 +00001138 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001139 /* Try to find the lock. If we can't, then create a new one with
1140 kind 'lkk'. */
1141 lk = map_locks_lookup_or_create(
1142 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001143 tl_assert( HG_(is_sane_LockN)(lk) );
1144
1145 /* check libhb level entities exist */
1146 tl_assert(thr->hbthr);
1147 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001148
1149 if (lk->heldBy == NULL) {
1150 /* the lock isn't held. Simple. */
1151 tl_assert(!lk->heldW);
1152 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001153 /* acquire a dependency from the lock's VCs */
1154 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001155 goto noerror;
1156 }
1157
1158 /* So the lock is already held. If held as a r-lock then
1159 libpthread must be buggy. */
1160 tl_assert(lk->heldBy);
1161 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001162 HG_(record_error_Misc)(
1163 thr, "Bug in libpthread: write lock "
1164 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001165 goto error;
1166 }
1167
1168 /* So the lock is held in w-mode. If it's held by some other
1169 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001170 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001171
sewardj896f6f92008-08-19 08:38:52 +00001172 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001173 HG_(record_error_Misc)(
1174 thr, "Bug in libpthread: write lock "
1175 "granted on mutex/rwlock which is currently "
1176 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001177 goto error;
1178 }
1179
1180 /* So the lock is already held in w-mode by 'thr'. That means this
1181 is an attempt to lock it recursively, which is only allowable
1182 for LK_mbRec kinded locks. Since this routine is called only
1183 once the lock has been acquired, this must also be a libpthread
1184 bug. */
1185 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001186 HG_(record_error_Misc)(
1187 thr, "Bug in libpthread: recursive write lock "
1188 "granted on mutex/wrlock which does not "
1189 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001190 goto error;
1191 }
1192
1193 /* So we are recursively re-locking a lock we already w-hold. */
1194 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001195 /* acquire a dependency from the lock's VC. Probably pointless,
1196 but also harmless. */
1197 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001198 goto noerror;
1199
1200 noerror:
1201 /* check lock order acquisition graph, and update. This has to
1202 happen before the lock is added to the thread's locksetA/W. */
1203 laog__pre_thread_acquires_lock( thr, lk );
1204 /* update the thread's held-locks set */
1205 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1206 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1207 /* fall through */
1208
1209 error:
sewardjf98e1c02008-10-25 16:22:41 +00001210 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001211}
1212
1213
1214/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1215 updates, and also do all possible error checks. */
1216static
1217void evhH__post_thread_r_acquires_lock ( Thread* thr,
1218 LockKind lkk, Addr lock_ga )
1219{
1220 Lock* lk;
1221
1222 /* Basically what we need to do is call lockN_acquire_reader.
1223 However, that will barf if any 'invalid' lock states would
1224 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001225 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001226 routine.
1227
1228 Because this routine is only called after successful lock
1229 acquisition, we should not be asked to move the lock into any
1230 invalid states. Requests to do so are bugs in libpthread, since
1231 that should have rejected any such requests. */
1232
sewardjf98e1c02008-10-25 16:22:41 +00001233 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001234 /* Try to find the lock. If we can't, then create a new one with
1235 kind 'lkk'. Only a reader-writer lock can be read-locked,
1236 hence the first assertion. */
1237 tl_assert(lkk == LK_rdwr);
1238 lk = map_locks_lookup_or_create(
1239 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001240 tl_assert( HG_(is_sane_LockN)(lk) );
1241
1242 /* check libhb level entities exist */
1243 tl_assert(thr->hbthr);
1244 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001245
1246 if (lk->heldBy == NULL) {
1247 /* the lock isn't held. Simple. */
1248 tl_assert(!lk->heldW);
1249 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001250 /* acquire a dependency from the lock's VC */
1251 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001252 goto noerror;
1253 }
1254
1255 /* So the lock is already held. If held as a w-lock then
1256 libpthread must be buggy. */
1257 tl_assert(lk->heldBy);
1258 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001259 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1260 "granted on rwlock which is "
1261 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001262 goto error;
1263 }
1264
1265 /* Easy enough. In short anybody can get a read-lock on a rwlock
1266 provided it is either unlocked or already in rd-held. */
1267 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001268 /* acquire a dependency from the lock's VC. Probably pointless,
1269 but also harmless. */
1270 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001271 goto noerror;
1272
1273 noerror:
1274 /* check lock order acquisition graph, and update. This has to
1275 happen before the lock is added to the thread's locksetA/W. */
1276 laog__pre_thread_acquires_lock( thr, lk );
1277 /* update the thread's held-locks set */
1278 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1279 /* but don't update thr->locksetW, since lk is only rd-held */
1280 /* fall through */
1281
1282 error:
sewardjf98e1c02008-10-25 16:22:41 +00001283 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001284}
1285
1286
1287/* The lock at 'lock_ga' is just about to be unlocked. Make all
1288 necessary updates, and also do all possible error checks. */
1289static
1290void evhH__pre_thread_releases_lock ( Thread* thr,
1291 Addr lock_ga, Bool isRDWR )
1292{
1293 Lock* lock;
1294 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001295 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001296
1297 /* This routine is called prior to a lock release, before
1298 libpthread has had a chance to validate the call. Hence we need
1299 to detect and reject any attempts to move the lock into an
1300 invalid state. Such attempts are bugs in the client.
1301
1302 isRDWR is True if we know from the wrapper context that lock_ga
1303 should refer to a reader-writer lock, and is False if [ditto]
1304 lock_ga should refer to a standard mutex. */
1305
sewardjf98e1c02008-10-25 16:22:41 +00001306 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001307 lock = map_locks_maybe_lookup( lock_ga );
1308
1309 if (!lock) {
1310 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1311 the client is trying to unlock it. So complain, then ignore
1312 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001313 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001314 return;
1315 }
1316
1317 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001318 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001319
1320 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001321 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1322 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001323 }
1324 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001325 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1326 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001327 }
1328
1329 if (!lock->heldBy) {
1330 /* The lock is not held. This indicates a serious bug in the
1331 client. */
1332 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001333 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001334 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1335 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1336 goto error;
1337 }
1338
sewardjf98e1c02008-10-25 16:22:41 +00001339 /* test just above dominates */
1340 tl_assert(lock->heldBy);
1341 was_heldW = lock->heldW;
1342
sewardjb4112022007-11-09 22:49:28 +00001343 /* The lock is held. Is this thread one of the holders? If not,
1344 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001345 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001346 tl_assert(n >= 0);
1347 if (n == 0) {
1348 /* We are not a current holder of the lock. This is a bug in
1349 the guest, and (per POSIX pthread rules) the unlock
1350 attempt will fail. So just complain and do nothing
1351 else. */
sewardj896f6f92008-08-19 08:38:52 +00001352 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001353 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001354 tl_assert(realOwner != thr);
1355 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1356 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001357 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001358 goto error;
1359 }
1360
1361 /* Ok, we hold the lock 'n' times. */
1362 tl_assert(n >= 1);
1363
1364 lockN_release( lock, thr );
1365
1366 n--;
1367 tl_assert(n >= 0);
1368
1369 if (n > 0) {
1370 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001371 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001372 /* We still hold the lock. So either it's a recursive lock
1373 or a rwlock which is currently r-held. */
1374 tl_assert(lock->kind == LK_mbRec
1375 || (lock->kind == LK_rdwr && !lock->heldW));
1376 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1377 if (lock->heldW)
1378 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1379 else
1380 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1381 } else {
sewardj983f3022009-05-21 14:49:55 +00001382 /* n is zero. This means we don't hold the lock any more. But
1383 if it's a rwlock held in r-mode, someone else could still
1384 hold it. Just do whatever sanity checks we can. */
1385 if (lock->kind == LK_rdwr && lock->heldBy) {
1386 /* It's a rwlock. We no longer hold it but we used to;
1387 nevertheless it still appears to be held by someone else.
1388 The implication is that, prior to this release, it must
1389 have been shared by us and and whoever else is holding it;
1390 which in turn implies it must be r-held, since a lock
1391 can't be w-held by more than one thread. */
1392 /* The lock is now R-held by somebody else: */
1393 tl_assert(lock->heldW == False);
1394 } else {
1395 /* Normal case. It's either not a rwlock, or it's a rwlock
1396 that we used to hold in w-mode (which is pretty much the
1397 same thing as a non-rwlock.) Since this transaction is
1398 atomic (V does not allow multiple threads to run
1399 simultaneously), it must mean the lock is now not held by
1400 anybody. Hence assert for it. */
1401 /* The lock is now not held by anybody: */
1402 tl_assert(!lock->heldBy);
1403 tl_assert(lock->heldW == False);
1404 }
sewardjf98e1c02008-10-25 16:22:41 +00001405 //if (lock->heldBy) {
1406 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1407 //}
sewardjb4112022007-11-09 22:49:28 +00001408 /* update this thread's lockset accordingly. */
1409 thr->locksetA
1410 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1411 thr->locksetW
1412 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001413 /* push our VC into the lock */
1414 tl_assert(thr->hbthr);
1415 tl_assert(lock->hbso);
1416 /* If the lock was previously W-held, then we want to do a
1417 strong send, and if previously R-held, then a weak send. */
1418 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001419 }
1420 /* fall through */
1421
1422 error:
sewardjf98e1c02008-10-25 16:22:41 +00001423 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001424}
1425
1426
sewardj9f569b72008-11-13 13:33:09 +00001427/* ---------------------------------------------------------- */
1428/* -------- Event handlers proper (evh__* functions) -------- */
1429/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001430
1431/* What is the Thread* for the currently running thread? This is
1432 absolutely performance critical. We receive notifications from the
1433 core for client code starts/stops, and cache the looked-up result
1434 in 'current_Thread'. Hence, for the vast majority of requests,
1435 finding the current thread reduces to a read of a global variable,
1436 provided get_current_Thread_in_C_C is inlined.
1437
1438 Outside of client code, current_Thread is NULL, and presumably
1439 any uses of it will cause a segfault. Hence:
1440
1441 - for uses definitely within client code, use
1442 get_current_Thread_in_C_C.
1443
1444 - for all other uses, use get_current_Thread.
1445*/
1446
sewardj23f12002009-07-24 08:45:08 +00001447static Thread *current_Thread = NULL,
1448 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001449
1450static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1451 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1452 tl_assert(current_Thread == NULL);
1453 current_Thread = map_threads_lookup( tid );
1454 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001455 if (current_Thread != current_Thread_prev) {
1456 libhb_Thr_resumes( current_Thread->hbthr );
1457 current_Thread_prev = current_Thread;
1458 }
sewardjb4112022007-11-09 22:49:28 +00001459}
1460static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1461 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1462 tl_assert(current_Thread != NULL);
1463 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001464 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001465}
1466static inline Thread* get_current_Thread_in_C_C ( void ) {
1467 return current_Thread;
1468}
1469static inline Thread* get_current_Thread ( void ) {
1470 ThreadId coretid;
1471 Thread* thr;
1472 thr = get_current_Thread_in_C_C();
1473 if (LIKELY(thr))
1474 return thr;
1475 /* evidently not in client code. Do it the slow way. */
1476 coretid = VG_(get_running_tid)();
1477 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001478 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001479 of initial memory layout) and VG_(get_running_tid)() returns
1480 VG_INVALID_THREADID at that point. */
1481 if (coretid == VG_INVALID_THREADID)
1482 coretid = 1; /* KLUDGE */
1483 thr = map_threads_lookup( coretid );
1484 return thr;
1485}
1486
1487static
1488void evh__new_mem ( Addr a, SizeT len ) {
1489 if (SHOW_EVENTS >= 2)
1490 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1491 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001492 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001493 all__sanity_check("evh__new_mem-post");
1494}
1495
1496static
sewardj7cf4e6b2008-05-01 20:24:26 +00001497void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1498 if (SHOW_EVENTS >= 2)
1499 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1500 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001501 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001502 all__sanity_check("evh__new_mem_w_tid-post");
1503}
1504
1505static
sewardjb4112022007-11-09 22:49:28 +00001506void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001507 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001508 if (SHOW_EVENTS >= 1)
1509 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1510 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1511 if (rr || ww || xx)
1512 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001513 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001514 all__sanity_check("evh__new_mem_w_perms-post");
1515}
1516
1517static
1518void evh__set_perms ( Addr a, SizeT len,
1519 Bool rr, Bool ww, Bool xx ) {
1520 if (SHOW_EVENTS >= 1)
1521 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1522 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1523 /* Hmm. What should we do here, that actually makes any sense?
1524 Let's say: if neither readable nor writable, then declare it
1525 NoAccess, else leave it alone. */
1526 if (!(rr || ww))
1527 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001528 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001529 all__sanity_check("evh__set_perms-post");
1530}
1531
1532static
1533void evh__die_mem ( Addr a, SizeT len ) {
1534 if (SHOW_EVENTS >= 2)
1535 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1536 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001537 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001538 all__sanity_check("evh__die_mem-post");
1539}
1540
1541static
sewardj23f12002009-07-24 08:45:08 +00001542void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1543 if (SHOW_EVENTS >= 2)
1544 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1545 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1546 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1547 all__sanity_check("evh__copy_mem-post");
1548}
1549
1550static
sewardjb4112022007-11-09 22:49:28 +00001551void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1552{
1553 if (SHOW_EVENTS >= 1)
1554 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1555 (Int)parent, (Int)child );
1556
1557 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001558 Thread* thr_p;
1559 Thread* thr_c;
1560 Thr* hbthr_p;
1561 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001562
sewardjf98e1c02008-10-25 16:22:41 +00001563 tl_assert(HG_(is_sane_ThreadId)(parent));
1564 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001565 tl_assert(parent != child);
1566
1567 thr_p = map_threads_maybe_lookup( parent );
1568 thr_c = map_threads_maybe_lookup( child );
1569
1570 tl_assert(thr_p != NULL);
1571 tl_assert(thr_c == NULL);
1572
sewardjf98e1c02008-10-25 16:22:41 +00001573 hbthr_p = thr_p->hbthr;
1574 tl_assert(hbthr_p != NULL);
1575 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001576
sewardjf98e1c02008-10-25 16:22:41 +00001577 hbthr_c = libhb_create ( hbthr_p );
1578
1579 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001580 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001581 thr_c = mk_Thread( hbthr_c );
1582 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1583 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001584
1585 /* and bind it in the thread-map table */
1586 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001587 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1588 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001589
1590 /* Record where the parent is so we can later refer to this in
1591 error messages.
1592
1593 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1594 The stack snapshot is taken immediately after the parent has
1595 returned from its sys_clone call. Unfortunately there is no
1596 unwind info for the insn following "syscall" - reading the
1597 glibc sources confirms this. So we ask for a snapshot to be
1598 taken as if RIP was 3 bytes earlier, in a place where there
1599 is unwind info. Sigh.
1600 */
1601 { Word first_ip_delta = 0;
1602# if defined(VGP_amd64_linux)
1603 first_ip_delta = -3;
1604# endif
1605 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1606 }
sewardjb4112022007-11-09 22:49:28 +00001607 }
1608
sewardjf98e1c02008-10-25 16:22:41 +00001609 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001610 all__sanity_check("evh__pre_thread_create-post");
1611}
1612
1613static
1614void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1615{
1616 Int nHeld;
1617 Thread* thr_q;
1618 if (SHOW_EVENTS >= 1)
1619 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1620 (Int)quit_tid );
1621
1622 /* quit_tid has disappeared without joining to any other thread.
1623 Therefore there is no synchronisation event associated with its
1624 exit and so we have to pretty much treat it as if it was still
1625 alive but mysteriously making no progress. That is because, if
1626 we don't know when it really exited, then we can never say there
1627 is a point in time when we're sure the thread really has
1628 finished, and so we need to consider the possibility that it
1629 lingers indefinitely and continues to interact with other
1630 threads. */
1631 /* However, it might have rendezvous'd with a thread that called
1632 pthread_join with this one as arg, prior to this point (that's
1633 how NPTL works). In which case there has already been a prior
1634 sync event. So in any case, just let the thread exit. On NPTL,
1635 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001636 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001637 thr_q = map_threads_maybe_lookup( quit_tid );
1638 tl_assert(thr_q != NULL);
1639
1640 /* Complain if this thread holds any locks. */
1641 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1642 tl_assert(nHeld >= 0);
1643 if (nHeld > 0) {
1644 HChar buf[80];
1645 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1646 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001647 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001648 }
1649
sewardj23f12002009-07-24 08:45:08 +00001650 /* Not much to do here:
1651 - tell libhb the thread is gone
1652 - clear the map_threads entry, in order that the Valgrind core
1653 can re-use it. */
1654 tl_assert(thr_q->hbthr);
1655 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001656 tl_assert(thr_q->coretid == quit_tid);
1657 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001658 map_threads_delete( quit_tid );
1659
sewardjf98e1c02008-10-25 16:22:41 +00001660 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001661 all__sanity_check("evh__pre_thread_ll_exit-post");
1662}
1663
sewardjf98e1c02008-10-25 16:22:41 +00001664
sewardjb4112022007-11-09 22:49:28 +00001665static
1666void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1667{
sewardjb4112022007-11-09 22:49:28 +00001668 Thread* thr_s;
1669 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001670 Thr* hbthr_s;
1671 Thr* hbthr_q;
1672 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001673
1674 if (SHOW_EVENTS >= 1)
1675 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1676 (Int)stay_tid, quit_thr );
1677
sewardjf98e1c02008-10-25 16:22:41 +00001678 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001679
1680 thr_s = map_threads_maybe_lookup( stay_tid );
1681 thr_q = quit_thr;
1682 tl_assert(thr_s != NULL);
1683 tl_assert(thr_q != NULL);
1684 tl_assert(thr_s != thr_q);
1685
sewardjf98e1c02008-10-25 16:22:41 +00001686 hbthr_s = thr_s->hbthr;
1687 hbthr_q = thr_q->hbthr;
1688 tl_assert(hbthr_s != hbthr_q);
1689 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1690 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001691
sewardjf98e1c02008-10-25 16:22:41 +00001692 /* Allocate a temporary synchronisation object and use it to send
1693 an imaginary message from the quitter to the stayer, the purpose
1694 being to generate a dependence from the quitter to the
1695 stayer. */
1696 so = libhb_so_alloc();
1697 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001698 /* Send last arg of _so_send as False, since the sending thread
1699 doesn't actually exist any more, so we don't want _so_send to
1700 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001701 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1702 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1703 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001704
sewardjf98e1c02008-10-25 16:22:41 +00001705 /* evh__pre_thread_ll_exit issues an error message if the exiting
1706 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001707
1708 /* This holds because, at least when using NPTL as the thread
1709 library, we should be notified the low level thread exit before
1710 we hear of any join event on it. The low level exit
1711 notification feeds through into evh__pre_thread_ll_exit,
1712 which should clear the map_threads entry for it. Hence we
1713 expect there to be no map_threads entry at this point. */
1714 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1715 == VG_INVALID_THREADID);
1716
sewardjf98e1c02008-10-25 16:22:41 +00001717 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001718 all__sanity_check("evh__post_thread_join-post");
1719}
1720
1721static
1722void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1723 Addr a, SizeT size) {
1724 if (SHOW_EVENTS >= 2
1725 || (SHOW_EVENTS >= 1 && size != 1))
1726 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1727 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001728 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001729 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001730 all__sanity_check("evh__pre_mem_read-post");
1731}
1732
1733static
1734void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1735 Char* s, Addr a ) {
1736 Int len;
1737 if (SHOW_EVENTS >= 1)
1738 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1739 (Int)tid, s, (void*)a );
1740 // FIXME: think of a less ugly hack
1741 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001742 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001743 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001744 all__sanity_check("evh__pre_mem_read_asciiz-post");
1745}
1746
1747static
1748void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1749 Addr a, SizeT size ) {
1750 if (SHOW_EVENTS >= 1)
1751 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1752 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001753 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001754 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001755 all__sanity_check("evh__pre_mem_write-post");
1756}
1757
1758static
1759void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1760 if (SHOW_EVENTS >= 1)
1761 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1762 (void*)a, len, (Int)is_inited );
1763 // FIXME: this is kinda stupid
1764 if (is_inited) {
1765 shadow_mem_make_New(get_current_Thread(), a, len);
1766 } else {
1767 shadow_mem_make_New(get_current_Thread(), a, len);
1768 }
sewardjf98e1c02008-10-25 16:22:41 +00001769 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001770 all__sanity_check("evh__pre_mem_read-post");
1771}
1772
1773static
1774void evh__die_mem_heap ( Addr a, SizeT len ) {
1775 if (SHOW_EVENTS >= 1)
1776 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1777 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001778 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001779 all__sanity_check("evh__pre_mem_read-post");
1780}
1781
sewardj23f12002009-07-24 08:45:08 +00001782/* --- Event handlers called from generated code --- */
1783
sewardjb4112022007-11-09 22:49:28 +00001784static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001785void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001786 Thread* thr = get_current_Thread_in_C_C();
1787 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001788 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001789}
sewardjf98e1c02008-10-25 16:22:41 +00001790
sewardjb4112022007-11-09 22:49:28 +00001791static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001792void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001793 Thread* thr = get_current_Thread_in_C_C();
1794 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001795 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001796}
sewardjf98e1c02008-10-25 16:22:41 +00001797
sewardjb4112022007-11-09 22:49:28 +00001798static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001799void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001800 Thread* thr = get_current_Thread_in_C_C();
1801 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001802 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001803}
sewardjf98e1c02008-10-25 16:22:41 +00001804
sewardjb4112022007-11-09 22:49:28 +00001805static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001806void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001807 Thread* thr = get_current_Thread_in_C_C();
1808 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001809 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001810}
sewardjf98e1c02008-10-25 16:22:41 +00001811
sewardjb4112022007-11-09 22:49:28 +00001812static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001813void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001816 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001817}
1818
1819static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001820void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001823 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001824}
sewardjf98e1c02008-10-25 16:22:41 +00001825
sewardjb4112022007-11-09 22:49:28 +00001826static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001827void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001830 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001831}
sewardjf98e1c02008-10-25 16:22:41 +00001832
sewardjb4112022007-11-09 22:49:28 +00001833static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001834void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001837 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001838}
sewardjf98e1c02008-10-25 16:22:41 +00001839
sewardjb4112022007-11-09 22:49:28 +00001840static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001841void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001844 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001845}
sewardjf98e1c02008-10-25 16:22:41 +00001846
sewardjb4112022007-11-09 22:49:28 +00001847static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001848void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001851 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001852}
1853
sewardjb4112022007-11-09 22:49:28 +00001854
sewardj9f569b72008-11-13 13:33:09 +00001855/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001856/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001857/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001858
1859/* EXPOSITION only: by intercepting lock init events we can show the
1860 user where the lock was initialised, rather than only being able to
1861 show where it was first locked. Intercepting lock initialisations
1862 is not necessary for the basic operation of the race checker. */
1863static
1864void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1865 void* mutex, Word mbRec )
1866{
1867 if (SHOW_EVENTS >= 1)
1868 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1869 (Int)tid, mbRec, (void*)mutex );
1870 tl_assert(mbRec == 0 || mbRec == 1);
1871 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1872 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001873 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001874 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1875}
1876
1877static
1878void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1879{
1880 Thread* thr;
1881 Lock* lk;
1882 if (SHOW_EVENTS >= 1)
1883 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1884 (Int)tid, (void*)mutex );
1885
1886 thr = map_threads_maybe_lookup( tid );
1887 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001888 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001889
1890 lk = map_locks_maybe_lookup( (Addr)mutex );
1891
1892 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001893 HG_(record_error_Misc)(
1894 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001895 }
1896
1897 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001898 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001899 tl_assert( lk->guestaddr == (Addr)mutex );
1900 if (lk->heldBy) {
1901 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001902 HG_(record_error_Misc)(
1903 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001904 /* remove lock from locksets of all owning threads */
1905 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001906 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001907 lk->heldBy = NULL;
1908 lk->heldW = False;
1909 lk->acquired_at = NULL;
1910 }
1911 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001912 tl_assert( HG_(is_sane_LockN)(lk) );
1913
sewardj1cbc12f2008-11-10 16:16:46 +00001914 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001915 map_locks_delete( lk->guestaddr );
1916 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001917 }
1918
sewardjf98e1c02008-10-25 16:22:41 +00001919 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001920 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1921}
1922
1923static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1924 void* mutex, Word isTryLock )
1925{
1926 /* Just check the mutex is sane; nothing else to do. */
1927 // 'mutex' may be invalid - not checked by wrapper
1928 Thread* thr;
1929 Lock* lk;
1930 if (SHOW_EVENTS >= 1)
1931 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1932 (Int)tid, (void*)mutex );
1933
1934 tl_assert(isTryLock == 0 || isTryLock == 1);
1935 thr = map_threads_maybe_lookup( tid );
1936 tl_assert(thr); /* cannot fail - Thread* must already exist */
1937
1938 lk = map_locks_maybe_lookup( (Addr)mutex );
1939
1940 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001941 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1942 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001943 }
1944
1945 if ( lk
1946 && isTryLock == 0
1947 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1948 && lk->heldBy
1949 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001950 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001951 /* uh, it's a non-recursive lock and we already w-hold it, and
1952 this is a real lock operation (not a speculative "tryLock"
1953 kind of thing). Duh. Deadlock coming up; but at least
1954 produce an error message. */
sewardjf98e1c02008-10-25 16:22:41 +00001955 HG_(record_error_Misc)( thr, "Attempt to re-lock a "
1956 "non-recursive lock I already hold" );
sewardjb4112022007-11-09 22:49:28 +00001957 }
1958}
1959
1960static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1961{
1962 // only called if the real library call succeeded - so mutex is sane
1963 Thread* thr;
1964 if (SHOW_EVENTS >= 1)
1965 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
1966 (Int)tid, (void*)mutex );
1967
1968 thr = map_threads_maybe_lookup( tid );
1969 tl_assert(thr); /* cannot fail - Thread* must already exist */
1970
1971 evhH__post_thread_w_acquires_lock(
1972 thr,
1973 LK_mbRec, /* if not known, create new lock with this LockKind */
1974 (Addr)mutex
1975 );
1976}
1977
1978static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
1979{
1980 // 'mutex' may be invalid - not checked by wrapper
1981 Thread* thr;
1982 if (SHOW_EVENTS >= 1)
1983 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
1984 (Int)tid, (void*)mutex );
1985
1986 thr = map_threads_maybe_lookup( tid );
1987 tl_assert(thr); /* cannot fail - Thread* must already exist */
1988
1989 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
1990}
1991
1992static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
1993{
1994 // only called if the real library call succeeded - so mutex is sane
1995 Thread* thr;
1996 if (SHOW_EVENTS >= 1)
1997 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
1998 (Int)tid, (void*)mutex );
1999 thr = map_threads_maybe_lookup( tid );
2000 tl_assert(thr); /* cannot fail - Thread* must already exist */
2001
2002 // anything we should do here?
2003}
2004
2005
sewardj5a644da2009-08-11 10:35:58 +00002006/* ------------------------------------------------------- */
2007/* -------------- events to do with mutexes -------------- */
2008/* ------------------------------------------------------- */
2009
2010/* All a bit of a kludge. Pretend we're really dealing with ordinary
2011 pthread_mutex_t's instead, for the most part. */
2012
2013static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2014 void* slock )
2015{
2016 Thread* thr;
2017 Lock* lk;
2018 /* In glibc's kludgey world, we're either initialising or unlocking
2019 it. Since this is the pre-routine, if it is locked, unlock it
2020 and take a dependence edge. Otherwise, do nothing. */
2021
2022 if (SHOW_EVENTS >= 1)
2023 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2024 "(ctid=%d, slock=%p)\n",
2025 (Int)tid, (void*)slock );
2026
2027 thr = map_threads_maybe_lookup( tid );
2028 /* cannot fail - Thread* must already exist */;
2029 tl_assert( HG_(is_sane_Thread)(thr) );
2030
2031 lk = map_locks_maybe_lookup( (Addr)slock );
2032 if (lk && lk->heldBy) {
2033 /* it's held. So do the normal pre-unlock actions, as copied
2034 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2035 duplicates the map_locks_maybe_lookup. */
2036 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2037 False/*!isRDWR*/ );
2038 }
2039}
2040
2041static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2042 void* slock )
2043{
2044 Lock* lk;
2045 /* More kludgery. If the lock has never been seen before, do
2046 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2047 nothing. */
2048
2049 if (SHOW_EVENTS >= 1)
2050 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2051 "(ctid=%d, slock=%p)\n",
2052 (Int)tid, (void*)slock );
2053
2054 lk = map_locks_maybe_lookup( (Addr)slock );
2055 if (!lk) {
2056 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2057 }
2058}
2059
2060static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2061 void* slock, Word isTryLock )
2062{
2063 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2064}
2065
2066static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2067 void* slock )
2068{
2069 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2070}
2071
2072static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2073 void* slock )
2074{
2075 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2076}
2077
2078
sewardj9f569b72008-11-13 13:33:09 +00002079/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002080/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002081/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002082
sewardj02114542009-07-28 20:52:36 +00002083/* A mapping from CV to (the SO associated with it, plus some
2084 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002085 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2086 wait on it completes, we do a 'recv' from the SO. This is believed
2087 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002088 signallings/broadcasts.
2089*/
2090
sewardj02114542009-07-28 20:52:36 +00002091/* .so is the SO for this CV.
2092 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002093
sewardj02114542009-07-28 20:52:36 +00002094 POSIX says effectively that the first pthread_cond_{timed}wait call
2095 causes a dynamic binding between the CV and the mutex, and that
2096 lasts until such time as the waiter count falls to zero. Hence
2097 need to keep track of the number of waiters in order to do
2098 consistency tracking. */
2099typedef
2100 struct {
2101 SO* so; /* libhb-allocated SO */
2102 void* mx_ga; /* addr of associated mutex, if any */
2103 UWord nWaiters; /* # threads waiting on the CV */
2104 }
2105 CVInfo;
2106
2107
2108/* pthread_cond_t* -> CVInfo* */
2109static WordFM* map_cond_to_CVInfo = NULL;
2110
2111static void map_cond_to_CVInfo_INIT ( void ) {
2112 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2113 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2114 "hg.mctCI.1", HG_(free), NULL );
2115 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002116 }
2117}
2118
sewardj02114542009-07-28 20:52:36 +00002119static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002120 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002121 map_cond_to_CVInfo_INIT();
2122 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002123 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002124 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002125 } else {
sewardj02114542009-07-28 20:52:36 +00002126 SO* so = libhb_so_alloc();
2127 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2128 cvi->so = so;
2129 cvi->mx_ga = 0;
2130 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2131 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002132 }
2133}
2134
sewardj02114542009-07-28 20:52:36 +00002135static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002136 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002137 map_cond_to_CVInfo_INIT();
2138 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2139 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002140 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002141 tl_assert(cvi);
2142 tl_assert(cvi->so);
2143 libhb_so_dealloc(cvi->so);
2144 cvi->mx_ga = 0;
2145 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002146 }
2147}
2148
2149static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2150{
sewardjf98e1c02008-10-25 16:22:41 +00002151 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2152 cond to a SO if it is not already so bound, and 'send' on the
2153 SO. This is later used by other thread(s) which successfully
2154 exit from a pthread_cond_wait on the same cv; then they 'recv'
2155 from the SO, thereby acquiring a dependency on this signalling
2156 event. */
sewardjb4112022007-11-09 22:49:28 +00002157 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002158 CVInfo* cvi;
2159 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002160
2161 if (SHOW_EVENTS >= 1)
2162 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2163 (Int)tid, (void*)cond );
2164
sewardjb4112022007-11-09 22:49:28 +00002165 thr = map_threads_maybe_lookup( tid );
2166 tl_assert(thr); /* cannot fail - Thread* must already exist */
2167
sewardj02114542009-07-28 20:52:36 +00002168 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2169 tl_assert(cvi);
2170 tl_assert(cvi->so);
2171
sewardjb4112022007-11-09 22:49:28 +00002172 // error-if: mutex is bogus
2173 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002174 // Hmm. POSIX doesn't actually say that it's an error to call
2175 // pthread_cond_signal with the associated mutex being unlocked.
2176 // Although it does say that it should be "if consistent scheduling
2177 // is desired."
2178 //
2179 // For the moment, disable these checks.
2180 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2181 //if (lk == NULL || cvi->mx_ga == 0) {
2182 // HG_(record_error_Misc)( thr,
2183 // "pthread_cond_{signal,broadcast}: "
2184 // "no or invalid mutex associated with cond");
2185 //}
2186 ///* note: lk could be NULL. Be careful. */
2187 //if (lk) {
2188 // if (lk->kind == LK_rdwr) {
2189 // HG_(record_error_Misc)(thr,
2190 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2191 // }
2192 // if (lk->heldBy == NULL) {
2193 // HG_(record_error_Misc)(thr,
2194 // "pthread_cond_{signal,broadcast}: "
2195 // "associated lock is not held by any thread");
2196 // }
2197 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2198 // HG_(record_error_Misc)(thr,
2199 // "pthread_cond_{signal,broadcast}: "
2200 // "associated lock is not held by calling thread");
2201 // }
2202 //}
sewardjb4112022007-11-09 22:49:28 +00002203
sewardj02114542009-07-28 20:52:36 +00002204 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002205}
2206
2207/* returns True if it reckons 'mutex' is valid and held by this
2208 thread, else False */
2209static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2210 void* cond, void* mutex )
2211{
2212 Thread* thr;
2213 Lock* lk;
2214 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002215 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002216
2217 if (SHOW_EVENTS >= 1)
2218 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2219 "(ctid=%d, cond=%p, mutex=%p)\n",
2220 (Int)tid, (void*)cond, (void*)mutex );
2221
sewardjb4112022007-11-09 22:49:28 +00002222 thr = map_threads_maybe_lookup( tid );
2223 tl_assert(thr); /* cannot fail - Thread* must already exist */
2224
2225 lk = map_locks_maybe_lookup( (Addr)mutex );
2226
2227 /* Check for stupid mutex arguments. There are various ways to be
2228 a bozo. Only complain once, though, even if more than one thing
2229 is wrong. */
2230 if (lk == NULL) {
2231 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002232 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002233 thr,
2234 "pthread_cond_{timed}wait called with invalid mutex" );
2235 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002236 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002237 if (lk->kind == LK_rdwr) {
2238 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002239 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002240 thr, "pthread_cond_{timed}wait called with mutex "
2241 "of type pthread_rwlock_t*" );
2242 } else
2243 if (lk->heldBy == NULL) {
2244 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002245 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002246 thr, "pthread_cond_{timed}wait called with un-held mutex");
2247 } else
2248 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002249 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002250 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002251 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002252 thr, "pthread_cond_{timed}wait called with mutex "
2253 "held by a different thread" );
2254 }
2255 }
2256
2257 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002258 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2259 tl_assert(cvi);
2260 tl_assert(cvi->so);
2261 if (cvi->nWaiters == 0) {
2262 /* form initial (CV,MX) binding */
2263 cvi->mx_ga = mutex;
2264 }
2265 else /* check existing (CV,MX) binding */
2266 if (cvi->mx_ga != mutex) {
2267 HG_(record_error_Misc)(
2268 thr, "pthread_cond_{timed}wait: cond is associated "
2269 "with a different mutex");
2270 }
2271 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002272
2273 return lk_valid;
2274}
2275
2276static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2277 void* cond, void* mutex )
2278{
sewardjf98e1c02008-10-25 16:22:41 +00002279 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2280 the SO for this cond, and 'recv' from it so as to acquire a
2281 dependency edge back to the signaller/broadcaster. */
2282 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002283 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002284
2285 if (SHOW_EVENTS >= 1)
2286 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2287 "(ctid=%d, cond=%p, mutex=%p)\n",
2288 (Int)tid, (void*)cond, (void*)mutex );
2289
sewardjb4112022007-11-09 22:49:28 +00002290 thr = map_threads_maybe_lookup( tid );
2291 tl_assert(thr); /* cannot fail - Thread* must already exist */
2292
2293 // error-if: cond is also associated with a different mutex
2294
sewardj02114542009-07-28 20:52:36 +00002295 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2296 tl_assert(cvi);
2297 tl_assert(cvi->so);
2298 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002299
sewardj02114542009-07-28 20:52:36 +00002300 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002301 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2302 it? If this happened it would surely be a bug in the threads
2303 library. Or one of those fabled "spurious wakeups". */
2304 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2305 "succeeded on"
2306 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002307 }
sewardjf98e1c02008-10-25 16:22:41 +00002308
2309 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002310 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2311
2312 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002313}
2314
2315static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2316 void* cond )
2317{
2318 /* Deal with destroy events. The only purpose is to free storage
2319 associated with the CV, so as to avoid any possible resource
2320 leaks. */
2321 if (SHOW_EVENTS >= 1)
2322 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2323 "(ctid=%d, cond=%p)\n",
2324 (Int)tid, (void*)cond );
2325
sewardj02114542009-07-28 20:52:36 +00002326 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002327}
2328
2329
sewardj9f569b72008-11-13 13:33:09 +00002330/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002331/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002332/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002333
2334/* EXPOSITION only */
2335static
2336void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2337{
2338 if (SHOW_EVENTS >= 1)
2339 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2340 (Int)tid, (void*)rwl );
2341 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002342 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002343 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2344}
2345
2346static
2347void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2348{
2349 Thread* thr;
2350 Lock* lk;
2351 if (SHOW_EVENTS >= 1)
2352 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2353 (Int)tid, (void*)rwl );
2354
2355 thr = map_threads_maybe_lookup( tid );
2356 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002357 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002358
2359 lk = map_locks_maybe_lookup( (Addr)rwl );
2360
2361 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002362 HG_(record_error_Misc)(
2363 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002364 }
2365
2366 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002367 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002368 tl_assert( lk->guestaddr == (Addr)rwl );
2369 if (lk->heldBy) {
2370 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002371 HG_(record_error_Misc)(
2372 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002373 /* remove lock from locksets of all owning threads */
2374 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002375 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002376 lk->heldBy = NULL;
2377 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002378 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002379 }
2380 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002381 tl_assert( HG_(is_sane_LockN)(lk) );
2382
sewardj1cbc12f2008-11-10 16:16:46 +00002383 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002384 map_locks_delete( lk->guestaddr );
2385 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002386 }
2387
sewardjf98e1c02008-10-25 16:22:41 +00002388 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002389 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2390}
2391
2392static
sewardj789c3c52008-02-25 12:10:07 +00002393void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2394 void* rwl,
2395 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002396{
2397 /* Just check the rwl is sane; nothing else to do. */
2398 // 'rwl' may be invalid - not checked by wrapper
2399 Thread* thr;
2400 Lock* lk;
2401 if (SHOW_EVENTS >= 1)
2402 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2403 (Int)tid, (Int)isW, (void*)rwl );
2404
2405 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002406 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002407 thr = map_threads_maybe_lookup( tid );
2408 tl_assert(thr); /* cannot fail - Thread* must already exist */
2409
2410 lk = map_locks_maybe_lookup( (Addr)rwl );
2411 if ( lk
2412 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2413 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002414 HG_(record_error_Misc)(
2415 thr, "pthread_rwlock_{rd,rw}lock with a "
2416 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002417 }
2418}
2419
2420static
2421void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2422{
2423 // only called if the real library call succeeded - so mutex is sane
2424 Thread* thr;
2425 if (SHOW_EVENTS >= 1)
2426 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2427 (Int)tid, (Int)isW, (void*)rwl );
2428
2429 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2430 thr = map_threads_maybe_lookup( tid );
2431 tl_assert(thr); /* cannot fail - Thread* must already exist */
2432
2433 (isW ? evhH__post_thread_w_acquires_lock
2434 : evhH__post_thread_r_acquires_lock)(
2435 thr,
2436 LK_rdwr, /* if not known, create new lock with this LockKind */
2437 (Addr)rwl
2438 );
2439}
2440
2441static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2442{
2443 // 'rwl' may be invalid - not checked by wrapper
2444 Thread* thr;
2445 if (SHOW_EVENTS >= 1)
2446 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2447 (Int)tid, (void*)rwl );
2448
2449 thr = map_threads_maybe_lookup( tid );
2450 tl_assert(thr); /* cannot fail - Thread* must already exist */
2451
2452 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2453}
2454
2455static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2456{
2457 // only called if the real library call succeeded - so mutex is sane
2458 Thread* thr;
2459 if (SHOW_EVENTS >= 1)
2460 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2461 (Int)tid, (void*)rwl );
2462 thr = map_threads_maybe_lookup( tid );
2463 tl_assert(thr); /* cannot fail - Thread* must already exist */
2464
2465 // anything we should do here?
2466}
2467
2468
sewardj9f569b72008-11-13 13:33:09 +00002469/* ---------------------------------------------------------- */
2470/* -------------- events to do with semaphores -------------- */
2471/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002472
sewardj11e352f2007-11-30 11:11:02 +00002473/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002474 variables. */
2475
sewardjf98e1c02008-10-25 16:22:41 +00002476/* For each semaphore, we maintain a stack of SOs. When a 'post'
2477 operation is done on a semaphore (unlocking, essentially), a new SO
2478 is created for the posting thread, the posting thread does a strong
2479 send to it (which merely installs the posting thread's VC in the
2480 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002481
2482 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002483 semaphore, we pop a SO off the semaphore's stack (which should be
2484 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002485 dependencies between posters and waiters of the semaphore.
2486
sewardjf98e1c02008-10-25 16:22:41 +00002487 It may not be necessary to use a stack - perhaps a bag of SOs would
2488 do. But we do need to keep track of how many unused-up posts have
2489 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002490
sewardjf98e1c02008-10-25 16:22:41 +00002491 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002492 twice on S. T3 cannot complete its waits without both T1 and T2
2493 posting. The above mechanism will ensure that T3 acquires
2494 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002495
sewardjf98e1c02008-10-25 16:22:41 +00002496 When a semaphore is initialised with value N, we do as if we'd
2497 posted N times on the semaphore: basically create N SOs and do a
2498 strong send to all of then. This allows up to N waits on the
2499 semaphore to acquire a dependency on the initialisation point,
2500 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002501
2502 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2503 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002504*/
2505
sewardjf98e1c02008-10-25 16:22:41 +00002506/* sem_t* -> XArray* SO* */
2507static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002508
sewardjf98e1c02008-10-25 16:22:41 +00002509static void map_sem_to_SO_stack_INIT ( void ) {
2510 if (map_sem_to_SO_stack == NULL) {
2511 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2512 HG_(free), NULL );
2513 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002514 }
2515}
2516
sewardjf98e1c02008-10-25 16:22:41 +00002517static void push_SO_for_sem ( void* sem, SO* so ) {
2518 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002519 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002520 tl_assert(so);
2521 map_sem_to_SO_stack_INIT();
2522 if (VG_(lookupFM)( map_sem_to_SO_stack,
2523 &keyW, (UWord*)&xa, (UWord)sem )) {
2524 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002525 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002526 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002527 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002528 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2529 VG_(addToXA)( xa, &so );
2530 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002531 }
2532}
2533
sewardjf98e1c02008-10-25 16:22:41 +00002534static SO* mb_pop_SO_for_sem ( void* sem ) {
2535 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002536 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002537 SO* so;
2538 map_sem_to_SO_stack_INIT();
2539 if (VG_(lookupFM)( map_sem_to_SO_stack,
2540 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002541 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002542 Word sz;
2543 tl_assert(keyW == (UWord)sem);
2544 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002545 tl_assert(sz >= 0);
2546 if (sz == 0)
2547 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002548 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2549 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002550 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002551 return so;
sewardjb4112022007-11-09 22:49:28 +00002552 } else {
2553 /* hmm, that's odd. No stack for this semaphore. */
2554 return NULL;
2555 }
2556}
2557
sewardj11e352f2007-11-30 11:11:02 +00002558static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002559{
sewardjf98e1c02008-10-25 16:22:41 +00002560 UWord keyW, valW;
2561 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002562
sewardjb4112022007-11-09 22:49:28 +00002563 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002564 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002565 (Int)tid, (void*)sem );
2566
sewardjf98e1c02008-10-25 16:22:41 +00002567 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002568
sewardjf98e1c02008-10-25 16:22:41 +00002569 /* Empty out the semaphore's SO stack. This way of doing it is
2570 stupid, but at least it's easy. */
2571 while (1) {
2572 so = mb_pop_SO_for_sem( sem );
2573 if (!so) break;
2574 libhb_so_dealloc(so);
2575 }
2576
2577 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2578 XArray* xa = (XArray*)valW;
2579 tl_assert(keyW == (UWord)sem);
2580 tl_assert(xa);
2581 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2582 VG_(deleteXA)(xa);
2583 }
sewardjb4112022007-11-09 22:49:28 +00002584}
2585
sewardj11e352f2007-11-30 11:11:02 +00002586static
2587void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2588{
sewardjf98e1c02008-10-25 16:22:41 +00002589 SO* so;
2590 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002591
2592 if (SHOW_EVENTS >= 1)
2593 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2594 (Int)tid, (void*)sem, value );
2595
sewardjf98e1c02008-10-25 16:22:41 +00002596 thr = map_threads_maybe_lookup( tid );
2597 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002598
sewardjf98e1c02008-10-25 16:22:41 +00002599 /* Empty out the semaphore's SO stack. This way of doing it is
2600 stupid, but at least it's easy. */
2601 while (1) {
2602 so = mb_pop_SO_for_sem( sem );
2603 if (!so) break;
2604 libhb_so_dealloc(so);
2605 }
sewardj11e352f2007-11-30 11:11:02 +00002606
sewardjf98e1c02008-10-25 16:22:41 +00002607 /* If we don't do this check, the following while loop runs us out
2608 of memory for stupid initial values of 'value'. */
2609 if (value > 10000) {
2610 HG_(record_error_Misc)(
2611 thr, "sem_init: initial value exceeds 10000; using 10000" );
2612 value = 10000;
2613 }
sewardj11e352f2007-11-30 11:11:02 +00002614
sewardjf98e1c02008-10-25 16:22:41 +00002615 /* Now create 'valid' new SOs for the thread, do a strong send to
2616 each of them, and push them all on the stack. */
2617 for (; value > 0; value--) {
2618 Thr* hbthr = thr->hbthr;
2619 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002620
sewardjf98e1c02008-10-25 16:22:41 +00002621 so = libhb_so_alloc();
2622 libhb_so_send( hbthr, so, True/*strong send*/ );
2623 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002624 }
2625}
2626
2627static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002628{
sewardjf98e1c02008-10-25 16:22:41 +00002629 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2630 it (iow, write our VC into it, then tick ours), and push the SO
2631 on on a stack of SOs associated with 'sem'. This is later used
2632 by other thread(s) which successfully exit from a sem_wait on
2633 the same sem; by doing a strong recv from SOs popped of the
2634 stack, they acquire dependencies on the posting thread
2635 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002636
sewardjf98e1c02008-10-25 16:22:41 +00002637 Thread* thr;
2638 SO* so;
2639 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002640
2641 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002642 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002643 (Int)tid, (void*)sem );
2644
2645 thr = map_threads_maybe_lookup( tid );
2646 tl_assert(thr); /* cannot fail - Thread* must already exist */
2647
2648 // error-if: sem is bogus
2649
sewardjf98e1c02008-10-25 16:22:41 +00002650 hbthr = thr->hbthr;
2651 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002652
sewardjf98e1c02008-10-25 16:22:41 +00002653 so = libhb_so_alloc();
2654 libhb_so_send( hbthr, so, True/*strong send*/ );
2655 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002656}
2657
sewardj11e352f2007-11-30 11:11:02 +00002658static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002659{
sewardjf98e1c02008-10-25 16:22:41 +00002660 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2661 the 'sem' from this semaphore's SO-stack, and do a strong recv
2662 from it. This creates a dependency back to one of the post-ers
2663 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002664
sewardjf98e1c02008-10-25 16:22:41 +00002665 Thread* thr;
2666 SO* so;
2667 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002668
2669 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002670 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002671 (Int)tid, (void*)sem );
2672
2673 thr = map_threads_maybe_lookup( tid );
2674 tl_assert(thr); /* cannot fail - Thread* must already exist */
2675
2676 // error-if: sem is bogus
2677
sewardjf98e1c02008-10-25 16:22:41 +00002678 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002679
sewardjf98e1c02008-10-25 16:22:41 +00002680 if (so) {
2681 hbthr = thr->hbthr;
2682 tl_assert(hbthr);
2683
2684 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2685 libhb_so_dealloc(so);
2686 } else {
2687 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2688 If this happened it would surely be a bug in the threads
2689 library. */
2690 HG_(record_error_Misc)(
2691 thr, "Bug in libpthread: sem_wait succeeded on"
2692 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002693 }
2694}
2695
2696
sewardj9f569b72008-11-13 13:33:09 +00002697/* -------------------------------------------------------- */
2698/* -------------- events to do with barriers -------------- */
2699/* -------------------------------------------------------- */
2700
2701typedef
2702 struct {
2703 Bool initted; /* has it yet been initted by guest? */
2704 UWord size; /* declared size */
2705 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2706 }
2707 Bar;
2708
2709static Bar* new_Bar ( void ) {
2710 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2711 tl_assert(bar);
2712 /* all fields are zero */
2713 tl_assert(bar->initted == False);
2714 return bar;
2715}
2716
2717static void delete_Bar ( Bar* bar ) {
2718 tl_assert(bar);
2719 if (bar->waiting)
2720 VG_(deleteXA)(bar->waiting);
2721 HG_(free)(bar);
2722}
2723
2724/* A mapping which stores auxiliary data for barriers. */
2725
2726/* pthread_barrier_t* -> Bar* */
2727static WordFM* map_barrier_to_Bar = NULL;
2728
2729static void map_barrier_to_Bar_INIT ( void ) {
2730 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2731 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2732 "hg.mbtBI.1", HG_(free), NULL );
2733 tl_assert(map_barrier_to_Bar != NULL);
2734 }
2735}
2736
2737static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2738 UWord key, val;
2739 map_barrier_to_Bar_INIT();
2740 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2741 tl_assert(key == (UWord)barrier);
2742 return (Bar*)val;
2743 } else {
2744 Bar* bar = new_Bar();
2745 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2746 return bar;
2747 }
2748}
2749
2750static void map_barrier_to_Bar_delete ( void* barrier ) {
2751 UWord keyW, valW;
2752 map_barrier_to_Bar_INIT();
2753 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2754 Bar* bar = (Bar*)valW;
2755 tl_assert(keyW == (UWord)barrier);
2756 delete_Bar(bar);
2757 }
2758}
2759
2760
2761static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2762 void* barrier,
2763 UWord count )
2764{
2765 Thread* thr;
2766 Bar* bar;
2767
2768 if (SHOW_EVENTS >= 1)
2769 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
2770 "(tid=%d, barrier=%p, count=%lu)\n",
2771 (Int)tid, (void*)barrier, count );
2772
2773 thr = map_threads_maybe_lookup( tid );
2774 tl_assert(thr); /* cannot fail - Thread* must already exist */
2775
2776 if (count == 0) {
2777 HG_(record_error_Misc)(
2778 thr, "pthread_barrier_init: 'count' argument is zero"
2779 );
2780 }
2781
2782 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2783 tl_assert(bar);
2784
2785 if (bar->initted) {
2786 HG_(record_error_Misc)(
2787 thr, "pthread_barrier_init: barrier is already initialised"
2788 );
2789 }
2790
2791 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2792 tl_assert(bar->initted);
2793 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002794 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002795 );
2796 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2797 }
2798 if (!bar->waiting) {
2799 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2800 sizeof(Thread*) );
2801 }
2802
2803 tl_assert(bar->waiting);
2804 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
2805 bar->initted = True;
2806 bar->size = count;
2807}
2808
2809
2810static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2811 void* barrier )
2812{
sewardj553655c2008-11-14 19:41:19 +00002813 Thread* thr;
2814 Bar* bar;
2815
sewardj9f569b72008-11-13 13:33:09 +00002816 /* Deal with destroy events. The only purpose is to free storage
2817 associated with the barrier, so as to avoid any possible
2818 resource leaks. */
2819 if (SHOW_EVENTS >= 1)
2820 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2821 "(tid=%d, barrier=%p)\n",
2822 (Int)tid, (void*)barrier );
2823
sewardj553655c2008-11-14 19:41:19 +00002824 thr = map_threads_maybe_lookup( tid );
2825 tl_assert(thr); /* cannot fail - Thread* must already exist */
2826
2827 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2828 tl_assert(bar);
2829
2830 if (!bar->initted) {
2831 HG_(record_error_Misc)(
2832 thr, "pthread_barrier_destroy: barrier was never initialised"
2833 );
2834 }
2835
2836 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2837 HG_(record_error_Misc)(
2838 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2839 );
2840 }
2841
sewardj9f569b72008-11-13 13:33:09 +00002842 /* Maybe we shouldn't do this; just let it persist, so that when it
2843 is reinitialised we don't need to do any dynamic memory
2844 allocation? The downside is a potentially unlimited space leak,
2845 if the client creates (in turn) a large number of barriers all
2846 at different locations. Note that if we do later move to the
2847 don't-delete-it scheme, we need to mark the barrier as
2848 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002849 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002850 map_barrier_to_Bar_delete( barrier );
2851}
2852
2853
2854static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2855 void* barrier )
2856{
sewardj1c466b72008-11-19 11:52:14 +00002857 /* This function gets called after a client thread calls
2858 pthread_barrier_wait but before it arrives at the real
2859 pthread_barrier_wait.
2860
2861 Why is the following correct? It's a bit subtle.
2862
2863 If this is not the last thread arriving at the barrier, we simply
2864 note its presence and return. Because valgrind (at least as of
2865 Nov 08) is single threaded, we are guaranteed safe from any race
2866 conditions when in this function -- no other client threads are
2867 running.
2868
2869 If this is the last thread, then we are again the only running
2870 thread. All the other threads will have either arrived at the
2871 real pthread_barrier_wait or are on their way to it, but in any
2872 case are guaranteed not to be able to move past it, because this
2873 thread is currently in this function and so has not yet arrived
2874 at the real pthread_barrier_wait. That means that:
2875
2876 1. While we are in this function, none of the other threads
2877 waiting at the barrier can move past it.
2878
2879 2. When this function returns (and simulated execution resumes),
2880 this thread and all other waiting threads will be able to move
2881 past the real barrier.
2882
2883 Because of this, it is now safe to update the vector clocks of
2884 all threads, to represent the fact that they all arrived at the
2885 barrier and have all moved on. There is no danger of any
2886 complications to do with some threads leaving the barrier and
2887 racing back round to the front, whilst others are still leaving
2888 (which is the primary source of complication in correct handling/
2889 implementation of barriers). That can't happen because we update
2890 here our data structures so as to indicate that the threads have
2891 passed the barrier, even though, as per (2) above, they are
2892 guaranteed not to pass the barrier until we return.
2893
2894 This relies crucially on Valgrind being single threaded. If that
2895 changes, this will need to be reconsidered.
2896 */
sewardj9f569b72008-11-13 13:33:09 +00002897 Thread* thr;
2898 Bar* bar;
2899 SO* so;
2900 UWord present, i;
2901
2902 if (SHOW_EVENTS >= 1)
2903 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2904 "(tid=%d, barrier=%p)\n",
2905 (Int)tid, (void*)barrier );
2906
2907 thr = map_threads_maybe_lookup( tid );
2908 tl_assert(thr); /* cannot fail - Thread* must already exist */
2909
2910 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2911 tl_assert(bar);
2912
2913 if (!bar->initted) {
2914 HG_(record_error_Misc)(
2915 thr, "pthread_barrier_wait: barrier is uninitialised"
2916 );
2917 return; /* client is broken .. avoid assertions below */
2918 }
2919
2920 /* guaranteed by _INIT_PRE above */
2921 tl_assert(bar->size > 0);
2922 tl_assert(bar->waiting);
2923
2924 VG_(addToXA)( bar->waiting, &thr );
2925
2926 /* guaranteed by this function */
2927 present = VG_(sizeXA)(bar->waiting);
2928 tl_assert(present > 0 && present <= bar->size);
2929
2930 if (present < bar->size)
2931 return;
2932
sewardj553655c2008-11-14 19:41:19 +00002933 /* All the threads have arrived. Now do the Interesting Bit. Get
sewardj9f569b72008-11-13 13:33:09 +00002934 a new synchronisation object and do a weak send to it from all
2935 the participating threads. This makes its vector clocks be the
sewardj553655c2008-11-14 19:41:19 +00002936 join of all the individual threads' vector clocks. Then do a
sewardj9f569b72008-11-13 13:33:09 +00002937 strong receive from it back to all threads, so that their VCs
2938 are a copy of it (hence are all equal to the join of their
2939 original VCs.) */
2940 so = libhb_so_alloc();
2941
2942 /* XXX check ->waiting has no duplicates */
2943
2944 tl_assert(bar->waiting);
2945 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2946
2947 /* compute the join ... */
2948 for (i = 0; i < bar->size; i++) {
2949 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2950 Thr* hbthr = t->hbthr;
2951 libhb_so_send( hbthr, so, False/*weak send*/ );
2952 }
2953 /* ... and distribute to all threads */
2954 for (i = 0; i < bar->size; i++) {
2955 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2956 Thr* hbthr = t->hbthr;
2957 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2958 }
2959
2960 /* finally, we must empty out the waiting vector */
sewardj1c466b72008-11-19 11:52:14 +00002961 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2962
2963 /* and we don't need this any more. Perhaps a stack-allocated
2964 SO would be better? */
2965 libhb_so_dealloc(so);
sewardj9f569b72008-11-13 13:33:09 +00002966}
2967
2968
sewardjed2e72e2009-08-14 11:08:24 +00002969/* ----------------------------------------------------- */
2970/* ----- events to do with user-specified HB edges ----- */
2971/* ----------------------------------------------------- */
2972
2973/* A mapping from arbitrary UWord tag to the SO associated with it.
2974 The UWord tags are meaningless to us, interpreted only by the
2975 user. */
2976
2977
2978
2979/* UWord -> SO* */
2980static WordFM* map_usertag_to_SO = NULL;
2981
2982static void map_usertag_to_SO_INIT ( void ) {
2983 if (UNLIKELY(map_usertag_to_SO == NULL)) {
2984 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
2985 "hg.mutS.1", HG_(free), NULL );
2986 tl_assert(map_usertag_to_SO != NULL);
2987 }
2988}
2989
2990static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
2991 UWord key, val;
2992 map_usertag_to_SO_INIT();
2993 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
2994 tl_assert(key == (UWord)usertag);
2995 return (SO*)val;
2996 } else {
2997 SO* so = libhb_so_alloc();
2998 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
2999 return so;
3000 }
3001}
3002
3003// If it's ever needed (XXX check before use)
3004//static void map_usertag_to_SO_delete ( UWord usertag ) {
3005// UWord keyW, valW;
3006// map_usertag_to_SO_INIT();
3007// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3008// SO* so = (SO*)valW;
3009// tl_assert(keyW == usertag);
3010// tl_assert(so);
3011// libhb_so_dealloc(so);
3012// }
3013//}
3014
3015
3016static
3017void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3018{
3019 /* TID is just about to notionally sent a message on a notional
3020 abstract synchronisation object whose identity is given by
3021 USERTAG. Bind USERTAG to a real SO if it is not already so
3022 bound, and do a 'strong send' on the SO. This is later used by
3023 other thread(s) which successfully 'receive' from the SO,
3024 thereby acquiring a dependency on this signalling event. */
3025 Thread* thr;
3026 SO* so;
3027
3028 if (SHOW_EVENTS >= 1)
3029 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3030 (Int)tid, usertag );
3031
3032 thr = map_threads_maybe_lookup( tid );
3033 tl_assert(thr); /* cannot fail - Thread* must already exist */
3034
3035 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3036 tl_assert(so);
3037
3038 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3039}
3040
3041static
3042void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3043{
3044 /* TID has just notionally received a message from a notional
3045 abstract synchronisation object whose identity is given by
3046 USERTAG. Bind USERTAG to a real SO if it is not already so
3047 bound. If the SO has at some point in the past been 'sent' on,
3048 to a 'strong receive' on it, thereby acquiring a dependency on
3049 the sender. */
3050 Thread* thr;
3051 SO* so;
3052
3053 if (SHOW_EVENTS >= 1)
3054 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3055 (Int)tid, usertag );
3056
3057 thr = map_threads_maybe_lookup( tid );
3058 tl_assert(thr); /* cannot fail - Thread* must already exist */
3059
3060 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3061 tl_assert(so);
3062
3063 /* Acquire a dependency on it. If the SO has never so far been
3064 sent on, then libhb_so_recv will do nothing. So we're safe
3065 regardless of SO's history. */
3066 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3067}
3068
3069
sewardjb4112022007-11-09 22:49:28 +00003070/*--------------------------------------------------------------*/
3071/*--- Lock acquisition order monitoring ---*/
3072/*--------------------------------------------------------------*/
3073
3074/* FIXME: here are some optimisations still to do in
3075 laog__pre_thread_acquires_lock.
3076
3077 The graph is structured so that if L1 --*--> L2 then L1 must be
3078 acquired before L2.
3079
3080 The common case is that some thread T holds (eg) L1 L2 and L3 and
3081 is repeatedly acquiring and releasing Ln, and there is no ordering
3082 error in what it is doing. Hence it repeatly:
3083
3084 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3085 produces the answer No (because there is no error).
3086
3087 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3088 (because they already got added the first time T acquired Ln).
3089
3090 Hence cache these two events:
3091
3092 (1) Cache result of the query from last time. Invalidate the cache
3093 any time any edges are added to or deleted from laog.
3094
3095 (2) Cache these add-edge requests and ignore them if said edges
3096 have already been added to laog. Invalidate the cache any time
3097 any edges are deleted from laog.
3098*/
3099
3100typedef
3101 struct {
3102 WordSetID inns; /* in univ_laog */
3103 WordSetID outs; /* in univ_laog */
3104 }
3105 LAOGLinks;
3106
3107/* lock order acquisition graph */
3108static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3109
3110/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3111 where that edge was created, so that we can show the user later if
3112 we need to. */
3113typedef
3114 struct {
3115 Addr src_ga; /* Lock guest addresses for */
3116 Addr dst_ga; /* src/dst of the edge */
3117 ExeContext* src_ec; /* And corresponding places where that */
3118 ExeContext* dst_ec; /* ordering was established */
3119 }
3120 LAOGLinkExposition;
3121
sewardj250ec2e2008-02-15 22:02:30 +00003122static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003123 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3124 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3125 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3126 if (llx1->src_ga < llx2->src_ga) return -1;
3127 if (llx1->src_ga > llx2->src_ga) return 1;
3128 if (llx1->dst_ga < llx2->dst_ga) return -1;
3129 if (llx1->dst_ga > llx2->dst_ga) return 1;
3130 return 0;
3131}
3132
3133static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3134/* end EXPOSITION ONLY */
3135
3136
sewardja65db102009-01-26 10:45:16 +00003137__attribute__((noinline))
3138static void laog__init ( void )
3139{
3140 tl_assert(!laog);
3141 tl_assert(!laog_exposition);
3142
3143 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3144 HG_(free), NULL/*unboxedcmp*/ );
3145
3146 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3147 cmp_LAOGLinkExposition );
3148 tl_assert(laog);
3149 tl_assert(laog_exposition);
3150}
3151
sewardjb4112022007-11-09 22:49:28 +00003152static void laog__show ( Char* who ) {
3153 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003154 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003155 Lock* me;
3156 LAOGLinks* links;
3157 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003158 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003159 me = NULL;
3160 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003161 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003162 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003163 tl_assert(me);
3164 tl_assert(links);
3165 VG_(printf)(" node %p:\n", me);
3166 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3167 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003168 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003169 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3170 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003171 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003172 me = NULL;
3173 links = NULL;
3174 }
sewardj896f6f92008-08-19 08:38:52 +00003175 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003176 VG_(printf)("}\n");
3177}
3178
3179__attribute__((noinline))
3180static void laog__add_edge ( Lock* src, Lock* dst ) {
3181 Word keyW;
3182 LAOGLinks* links;
3183 Bool presentF, presentR;
3184 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3185
3186 /* Take the opportunity to sanity check the graph. Record in
3187 presentF if there is already a src->dst mapping in this node's
3188 forwards links, and presentR if there is already a src->dst
3189 mapping in this node's backwards links. They should agree!
3190 Also, we need to know whether the edge was already present so as
3191 to decide whether or not to update the link details mapping. We
3192 can compute presentF and presentR essentially for free, so may
3193 as well do this always. */
3194 presentF = presentR = False;
3195
3196 /* Update the out edges for src */
3197 keyW = 0;
3198 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003199 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003200 WordSetID outs_new;
3201 tl_assert(links);
3202 tl_assert(keyW == (Word)src);
3203 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3204 presentF = outs_new == links->outs;
3205 links->outs = outs_new;
3206 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003207 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003208 links->inns = HG_(emptyWS)( univ_laog );
3209 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003210 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003211 }
3212 /* Update the in edges for dst */
3213 keyW = 0;
3214 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003215 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003216 WordSetID inns_new;
3217 tl_assert(links);
3218 tl_assert(keyW == (Word)dst);
3219 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3220 presentR = inns_new == links->inns;
3221 links->inns = inns_new;
3222 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003223 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003224 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3225 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003226 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003227 }
3228
3229 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3230
3231 if (!presentF && src->acquired_at && dst->acquired_at) {
3232 LAOGLinkExposition expo;
3233 /* If this edge is entering the graph, and we have acquired_at
3234 information for both src and dst, record those acquisition
3235 points. Hence, if there is later a violation of this
3236 ordering, we can show the user the two places in which the
3237 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003238 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003239 src->guestaddr, dst->guestaddr);
3240 expo.src_ga = src->guestaddr;
3241 expo.dst_ga = dst->guestaddr;
3242 expo.src_ec = NULL;
3243 expo.dst_ec = NULL;
3244 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003245 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003246 /* we already have it; do nothing */
3247 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003248 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3249 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003250 expo2->src_ga = src->guestaddr;
3251 expo2->dst_ga = dst->guestaddr;
3252 expo2->src_ec = src->acquired_at;
3253 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003254 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003255 }
3256 }
3257}
3258
3259__attribute__((noinline))
3260static void laog__del_edge ( Lock* src, Lock* dst ) {
3261 Word keyW;
3262 LAOGLinks* links;
3263 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3264 /* Update the out edges for src */
3265 keyW = 0;
3266 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003267 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003268 tl_assert(links);
3269 tl_assert(keyW == (Word)src);
3270 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3271 }
3272 /* Update the in edges for dst */
3273 keyW = 0;
3274 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003275 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003276 tl_assert(links);
3277 tl_assert(keyW == (Word)dst);
3278 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3279 }
3280}
3281
3282__attribute__((noinline))
3283static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3284 Word keyW;
3285 LAOGLinks* links;
3286 keyW = 0;
3287 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003288 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003289 tl_assert(links);
3290 tl_assert(keyW == (Word)lk);
3291 return links->outs;
3292 } else {
3293 return HG_(emptyWS)( univ_laog );
3294 }
3295}
3296
3297__attribute__((noinline))
3298static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3299 Word keyW;
3300 LAOGLinks* links;
3301 keyW = 0;
3302 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003303 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003304 tl_assert(links);
3305 tl_assert(keyW == (Word)lk);
3306 return links->inns;
3307 } else {
3308 return HG_(emptyWS)( univ_laog );
3309 }
3310}
3311
3312__attribute__((noinline))
3313static void laog__sanity_check ( Char* who ) {
3314 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003315 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003316 Lock* me;
3317 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003318 if (UNLIKELY(!laog || !laog_exposition))
3319 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003320 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003321 me = NULL;
3322 links = NULL;
3323 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003324 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003325 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003326 tl_assert(me);
3327 tl_assert(links);
3328 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3329 for (i = 0; i < ws_size; i++) {
3330 if ( ! HG_(elemWS)( univ_laog,
3331 laog__succs( (Lock*)ws_words[i] ),
3332 (Word)me ))
3333 goto bad;
3334 }
3335 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3336 for (i = 0; i < ws_size; i++) {
3337 if ( ! HG_(elemWS)( univ_laog,
3338 laog__preds( (Lock*)ws_words[i] ),
3339 (Word)me ))
3340 goto bad;
3341 }
3342 me = NULL;
3343 links = NULL;
3344 }
sewardj896f6f92008-08-19 08:38:52 +00003345 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003346 return;
3347
3348 bad:
3349 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3350 laog__show(who);
3351 tl_assert(0);
3352}
3353
3354/* If there is a path in laog from 'src' to any of the elements in
3355 'dst', return an arbitrarily chosen element of 'dst' reachable from
3356 'src'. If no path exist from 'src' to any element in 'dst', return
3357 NULL. */
3358__attribute__((noinline))
3359static
3360Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3361{
3362 Lock* ret;
3363 Word i, ssz;
3364 XArray* stack; /* of Lock* */
3365 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3366 Lock* here;
3367 WordSetID succs;
3368 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003369 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003370 //laog__sanity_check();
3371
3372 /* If the destination set is empty, we can never get there from
3373 'src' :-), so don't bother to try */
3374 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3375 return NULL;
3376
3377 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003378 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3379 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003380
3381 (void) VG_(addToXA)( stack, &src );
3382
3383 while (True) {
3384
3385 ssz = VG_(sizeXA)( stack );
3386
3387 if (ssz == 0) { ret = NULL; break; }
3388
3389 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3390 VG_(dropTailXA)( stack, 1 );
3391
3392 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3393
sewardj896f6f92008-08-19 08:38:52 +00003394 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003395 continue;
3396
sewardj896f6f92008-08-19 08:38:52 +00003397 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003398
3399 succs = laog__succs( here );
3400 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3401 for (i = 0; i < succs_size; i++)
3402 (void) VG_(addToXA)( stack, &succs_words[i] );
3403 }
3404
sewardj896f6f92008-08-19 08:38:52 +00003405 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003406 VG_(deleteXA)( stack );
3407 return ret;
3408}
3409
3410
3411/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3412 between 'lk' and the locks already held by 'thr' and issue a
3413 complaint if so. Also, update the ordering graph appropriately.
3414*/
3415__attribute__((noinline))
3416static void laog__pre_thread_acquires_lock (
3417 Thread* thr, /* NB: BEFORE lock is added */
3418 Lock* lk
3419 )
3420{
sewardj250ec2e2008-02-15 22:02:30 +00003421 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003422 Word ls_size, i;
3423 Lock* other;
3424
3425 /* It may be that 'thr' already holds 'lk' and is recursively
3426 relocking in. In this case we just ignore the call. */
3427 /* NB: univ_lsets really is correct here */
3428 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3429 return;
3430
sewardja65db102009-01-26 10:45:16 +00003431 if (UNLIKELY(!laog || !laog_exposition))
3432 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003433
3434 /* First, the check. Complain if there is any path in laog from lk
3435 to any of the locks already held by thr, since if any such path
3436 existed, it would mean that previously lk was acquired before
3437 (rather than after, as we are doing here) at least one of those
3438 locks.
3439 */
3440 other = laog__do_dfs_from_to(lk, thr->locksetA);
3441 if (other) {
3442 LAOGLinkExposition key, *found;
3443 /* So we managed to find a path lk --*--> other in the graph,
3444 which implies that 'lk' should have been acquired before
3445 'other' but is in fact being acquired afterwards. We present
3446 the lk/other arguments to record_error_LockOrder in the order
3447 in which they should have been acquired. */
3448 /* Go look in the laog_exposition mapping, to find the allocation
3449 points for this edge, so we can show the user. */
3450 key.src_ga = lk->guestaddr;
3451 key.dst_ga = other->guestaddr;
3452 key.src_ec = NULL;
3453 key.dst_ec = NULL;
3454 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003455 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003456 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003457 tl_assert(found != &key);
3458 tl_assert(found->src_ga == key.src_ga);
3459 tl_assert(found->dst_ga == key.dst_ga);
3460 tl_assert(found->src_ec);
3461 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003462 HG_(record_error_LockOrder)(
3463 thr, lk->guestaddr, other->guestaddr,
3464 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003465 } else {
3466 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003467 HG_(record_error_LockOrder)(
3468 thr, lk->guestaddr, other->guestaddr,
3469 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003470 }
3471 }
3472
3473 /* Second, add to laog the pairs
3474 (old, lk) | old <- locks already held by thr
3475 Since both old and lk are currently held by thr, their acquired_at
3476 fields must be non-NULL.
3477 */
3478 tl_assert(lk->acquired_at);
3479 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3480 for (i = 0; i < ls_size; i++) {
3481 Lock* old = (Lock*)ls_words[i];
3482 tl_assert(old->acquired_at);
3483 laog__add_edge( old, lk );
3484 }
3485
3486 /* Why "except_Locks" ? We're here because a lock is being
3487 acquired by a thread, and we're in an inconsistent state here.
3488 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3489 When called in this inconsistent state, locks__sanity_check duly
3490 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003491 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003492 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3493}
3494
3495
3496/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3497
3498__attribute__((noinline))
3499static void laog__handle_one_lock_deletion ( Lock* lk )
3500{
3501 WordSetID preds, succs;
3502 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003503 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003504
sewardja65db102009-01-26 10:45:16 +00003505 if (UNLIKELY(!laog || !laog_exposition))
3506 laog__init();
3507
sewardjb4112022007-11-09 22:49:28 +00003508 preds = laog__preds( lk );
3509 succs = laog__succs( lk );
3510
3511 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3512 for (i = 0; i < preds_size; i++)
3513 laog__del_edge( (Lock*)preds_words[i], lk );
3514
3515 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3516 for (j = 0; j < succs_size; j++)
3517 laog__del_edge( lk, (Lock*)succs_words[j] );
3518
3519 for (i = 0; i < preds_size; i++) {
3520 for (j = 0; j < succs_size; j++) {
3521 if (preds_words[i] != succs_words[j]) {
3522 /* This can pass unlocked locks to laog__add_edge, since
3523 we're deleting stuff. So their acquired_at fields may
3524 be NULL. */
3525 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3526 }
3527 }
3528 }
3529}
3530
sewardj1cbc12f2008-11-10 16:16:46 +00003531//__attribute__((noinline))
3532//static void laog__handle_lock_deletions (
3533// WordSetID /* in univ_laog */ locksToDelete
3534// )
3535//{
3536// Word i, ws_size;
3537// UWord* ws_words;
3538//
sewardja65db102009-01-26 10:45:16 +00003539// if (UNLIKELY(!laog || !laog_exposition))
3540// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003541//
3542// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3543// for (i = 0; i < ws_size; i++)
3544// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3545//
3546// if (HG_(clo_sanity_flags) & SCE_LAOG)
3547// all__sanity_check("laog__handle_lock_deletions-post");
3548//}
sewardjb4112022007-11-09 22:49:28 +00003549
3550
3551/*--------------------------------------------------------------*/
3552/*--- Malloc/free replacements ---*/
3553/*--------------------------------------------------------------*/
3554
3555typedef
3556 struct {
3557 void* next; /* required by m_hashtable */
3558 Addr payload; /* ptr to actual block */
3559 SizeT szB; /* size requested */
3560 ExeContext* where; /* where it was allocated */
3561 Thread* thr; /* allocating thread */
3562 }
3563 MallocMeta;
3564
3565/* A hash table of MallocMetas, used to track malloc'd blocks
3566 (obviously). */
3567static VgHashTable hg_mallocmeta_table = NULL;
3568
3569
3570static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003571 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003572 tl_assert(md);
3573 return md;
3574}
3575static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003576 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003577}
3578
3579
3580/* Allocate a client block and set up the metadata for it. */
3581
3582static
3583void* handle_alloc ( ThreadId tid,
3584 SizeT szB, SizeT alignB, Bool is_zeroed )
3585{
3586 Addr p;
3587 MallocMeta* md;
3588
3589 tl_assert( ((SSizeT)szB) >= 0 );
3590 p = (Addr)VG_(cli_malloc)(alignB, szB);
3591 if (!p) {
3592 return NULL;
3593 }
3594 if (is_zeroed)
3595 VG_(memset)((void*)p, 0, szB);
3596
3597 /* Note that map_threads_lookup must succeed (cannot assert), since
3598 memory can only be allocated by currently alive threads, hence
3599 they must have an entry in map_threads. */
3600 md = new_MallocMeta();
3601 md->payload = p;
3602 md->szB = szB;
3603 md->where = VG_(record_ExeContext)( tid, 0 );
3604 md->thr = map_threads_lookup( tid );
3605
3606 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3607
3608 /* Tell the lower level memory wranglers. */
3609 evh__new_mem_heap( p, szB, is_zeroed );
3610
3611 return (void*)p;
3612}
3613
3614/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3615 Cast to a signed type to catch any unexpectedly negative args.
3616 We're assuming here that the size asked for is not greater than
3617 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3618 platforms). */
3619static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3620 if (((SSizeT)n) < 0) return NULL;
3621 return handle_alloc ( tid, n, VG_(clo_alignment),
3622 /*is_zeroed*/False );
3623}
3624static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3625 if (((SSizeT)n) < 0) return NULL;
3626 return handle_alloc ( tid, n, VG_(clo_alignment),
3627 /*is_zeroed*/False );
3628}
3629static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3630 if (((SSizeT)n) < 0) return NULL;
3631 return handle_alloc ( tid, n, VG_(clo_alignment),
3632 /*is_zeroed*/False );
3633}
3634static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3635 if (((SSizeT)n) < 0) return NULL;
3636 return handle_alloc ( tid, n, align,
3637 /*is_zeroed*/False );
3638}
3639static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3640 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3641 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3642 /*is_zeroed*/True );
3643}
3644
3645
3646/* Free a client block, including getting rid of the relevant
3647 metadata. */
3648
3649static void handle_free ( ThreadId tid, void* p )
3650{
3651 MallocMeta *md, *old_md;
3652 SizeT szB;
3653
3654 /* First see if we can find the metadata for 'p'. */
3655 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3656 if (!md)
3657 return; /* apparently freeing a bogus address. Oh well. */
3658
3659 tl_assert(md->payload == (Addr)p);
3660 szB = md->szB;
3661
3662 /* Nuke the metadata block */
3663 old_md = (MallocMeta*)
3664 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3665 tl_assert(old_md); /* it must be present - we just found it */
3666 tl_assert(old_md == md);
3667 tl_assert(old_md->payload == (Addr)p);
3668
3669 VG_(cli_free)((void*)old_md->payload);
3670 delete_MallocMeta(old_md);
3671
3672 /* Tell the lower level memory wranglers. */
3673 evh__die_mem_heap( (Addr)p, szB );
3674}
3675
3676static void hg_cli__free ( ThreadId tid, void* p ) {
3677 handle_free(tid, p);
3678}
3679static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3680 handle_free(tid, p);
3681}
3682static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3683 handle_free(tid, p);
3684}
3685
3686
3687static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3688{
3689 MallocMeta *md, *md_new, *md_tmp;
3690 SizeT i;
3691
3692 Addr payload = (Addr)payloadV;
3693
3694 if (((SSizeT)new_size) < 0) return NULL;
3695
3696 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3697 if (!md)
3698 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3699
3700 tl_assert(md->payload == payload);
3701
3702 if (md->szB == new_size) {
3703 /* size unchanged */
3704 md->where = VG_(record_ExeContext)(tid, 0);
3705 return payloadV;
3706 }
3707
3708 if (md->szB > new_size) {
3709 /* new size is smaller */
3710 md->szB = new_size;
3711 md->where = VG_(record_ExeContext)(tid, 0);
3712 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3713 return payloadV;
3714 }
3715
3716 /* else */ {
3717 /* new size is bigger */
3718 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3719
3720 /* First half kept and copied, second half new */
3721 // FIXME: shouldn't we use a copier which implements the
3722 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003723 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003724 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003725 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003726 /* FIXME: can anything funny happen here? specifically, if the
3727 old range contained a lock, then die_mem_heap will complain.
3728 Is that the correct behaviour? Not sure. */
3729 evh__die_mem_heap( payload, md->szB );
3730
3731 /* Copy from old to new */
3732 for (i = 0; i < md->szB; i++)
3733 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3734
3735 /* Because the metadata hash table is index by payload address,
3736 we have to get rid of the old hash table entry and make a new
3737 one. We can't just modify the existing metadata in place,
3738 because then it would (almost certainly) be in the wrong hash
3739 chain. */
3740 md_new = new_MallocMeta();
3741 *md_new = *md;
3742
3743 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3744 tl_assert(md_tmp);
3745 tl_assert(md_tmp == md);
3746
3747 VG_(cli_free)((void*)md->payload);
3748 delete_MallocMeta(md);
3749
3750 /* Update fields */
3751 md_new->where = VG_(record_ExeContext)( tid, 0 );
3752 md_new->szB = new_size;
3753 md_new->payload = p_new;
3754 md_new->thr = map_threads_lookup( tid );
3755
3756 /* and add */
3757 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3758
3759 return (void*)p_new;
3760 }
3761}
3762
njn8b140de2009-02-17 04:31:18 +00003763static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3764{
3765 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3766
3767 // There may be slop, but pretend there isn't because only the asked-for
3768 // area will have been shadowed properly.
3769 return ( md ? md->szB : 0 );
3770}
3771
sewardjb4112022007-11-09 22:49:28 +00003772
3773/*--------------------------------------------------------------*/
3774/*--- Instrumentation ---*/
3775/*--------------------------------------------------------------*/
3776
3777static void instrument_mem_access ( IRSB* bbOut,
3778 IRExpr* addr,
3779 Int szB,
3780 Bool isStore,
3781 Int hWordTy_szB )
3782{
3783 IRType tyAddr = Ity_INVALID;
3784 HChar* hName = NULL;
3785 void* hAddr = NULL;
3786 Int regparms = 0;
3787 IRExpr** argv = NULL;
3788 IRDirty* di = NULL;
3789
3790 tl_assert(isIRAtom(addr));
3791 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3792
3793 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3794 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3795
3796 /* So the effective address is in 'addr' now. */
3797 regparms = 1; // unless stated otherwise
3798 if (isStore) {
3799 switch (szB) {
3800 case 1:
sewardj23f12002009-07-24 08:45:08 +00003801 hName = "evh__mem_help_cwrite_1";
3802 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003803 argv = mkIRExprVec_1( addr );
3804 break;
3805 case 2:
sewardj23f12002009-07-24 08:45:08 +00003806 hName = "evh__mem_help_cwrite_2";
3807 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003808 argv = mkIRExprVec_1( addr );
3809 break;
3810 case 4:
sewardj23f12002009-07-24 08:45:08 +00003811 hName = "evh__mem_help_cwrite_4";
3812 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003813 argv = mkIRExprVec_1( addr );
3814 break;
3815 case 8:
sewardj23f12002009-07-24 08:45:08 +00003816 hName = "evh__mem_help_cwrite_8";
3817 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003818 argv = mkIRExprVec_1( addr );
3819 break;
3820 default:
3821 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3822 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003823 hName = "evh__mem_help_cwrite_N";
3824 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003825 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3826 break;
3827 }
3828 } else {
3829 switch (szB) {
3830 case 1:
sewardj23f12002009-07-24 08:45:08 +00003831 hName = "evh__mem_help_cread_1";
3832 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00003833 argv = mkIRExprVec_1( addr );
3834 break;
3835 case 2:
sewardj23f12002009-07-24 08:45:08 +00003836 hName = "evh__mem_help_cread_2";
3837 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00003838 argv = mkIRExprVec_1( addr );
3839 break;
3840 case 4:
sewardj23f12002009-07-24 08:45:08 +00003841 hName = "evh__mem_help_cread_4";
3842 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00003843 argv = mkIRExprVec_1( addr );
3844 break;
3845 case 8:
sewardj23f12002009-07-24 08:45:08 +00003846 hName = "evh__mem_help_cread_8";
3847 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00003848 argv = mkIRExprVec_1( addr );
3849 break;
3850 default:
3851 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3852 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003853 hName = "evh__mem_help_cread_N";
3854 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00003855 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
3856 break;
3857 }
3858 }
3859
3860 /* Add the helper. */
3861 tl_assert(hName);
3862 tl_assert(hAddr);
3863 tl_assert(argv);
3864 di = unsafeIRDirty_0_N( regparms,
3865 hName, VG_(fnptr_to_fnentry)( hAddr ),
3866 argv );
3867 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
3868}
3869
3870
sewardja0eee322009-07-31 08:46:35 +00003871/* Figure out if GA is a guest code address in the dynamic linker, and
3872 if so return True. Otherwise (and in case of any doubt) return
3873 False. (sidedly safe w/ False as the safe value) */
3874static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
3875{
3876 DebugInfo* dinfo;
3877 const UChar* soname;
3878 if (0) return False;
3879
sewardje3f1e592009-07-31 09:41:29 +00003880 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00003881 if (!dinfo) return False;
3882
sewardje3f1e592009-07-31 09:41:29 +00003883 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00003884 tl_assert(soname);
3885 if (0) VG_(printf)("%s\n", soname);
3886
3887# if defined(VGO_linux)
3888 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
3889 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
3890 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
3891 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
3892# elif defined(VGO_darwin)
3893 if (VG_STREQ(soname, VG_U_DYLD)) return True;
3894# else
3895# error "Unsupported OS"
3896# endif
3897 return False;
3898}
3899
sewardjb4112022007-11-09 22:49:28 +00003900static
3901IRSB* hg_instrument ( VgCallbackClosure* closure,
3902 IRSB* bbIn,
3903 VexGuestLayout* layout,
3904 VexGuestExtents* vge,
3905 IRType gWordTy, IRType hWordTy )
3906{
sewardj1c0ce7a2009-07-01 08:10:49 +00003907 Int i;
3908 IRSB* bbOut;
3909 Addr64 cia; /* address of current insn */
3910 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00003911 Bool inLDSO = False;
3912 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00003913
3914 if (gWordTy != hWordTy) {
3915 /* We don't currently support this case. */
3916 VG_(tool_panic)("host/guest word size mismatch");
3917 }
3918
sewardja0eee322009-07-31 08:46:35 +00003919 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
3920 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
3921 }
3922
sewardjb4112022007-11-09 22:49:28 +00003923 /* Set up BB */
3924 bbOut = emptyIRSB();
3925 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
3926 bbOut->next = deepCopyIRExpr(bbIn->next);
3927 bbOut->jumpkind = bbIn->jumpkind;
3928
3929 // Copy verbatim any IR preamble preceding the first IMark
3930 i = 0;
3931 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
3932 addStmtToIRSB( bbOut, bbIn->stmts[i] );
3933 i++;
3934 }
3935
sewardj1c0ce7a2009-07-01 08:10:49 +00003936 // Get the first statement, and initial cia from it
3937 tl_assert(bbIn->stmts_used > 0);
3938 tl_assert(i < bbIn->stmts_used);
3939 st = bbIn->stmts[i];
3940 tl_assert(Ist_IMark == st->tag);
3941 cia = st->Ist.IMark.addr;
3942 st = NULL;
3943
sewardjb4112022007-11-09 22:49:28 +00003944 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00003945 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00003946 tl_assert(st);
3947 tl_assert(isFlatIRStmt(st));
3948 switch (st->tag) {
3949 case Ist_NoOp:
3950 case Ist_AbiHint:
3951 case Ist_Put:
3952 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00003953 case Ist_Exit:
3954 /* None of these can contain any memory references. */
3955 break;
3956
sewardj1c0ce7a2009-07-01 08:10:49 +00003957 case Ist_IMark:
3958 /* no mem refs, but note the insn address. */
3959 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00003960 /* Don't instrument the dynamic linker. It generates a
3961 lot of races which we just expensively suppress, so
3962 it's pointless.
3963
3964 Avoid flooding is_in_dynamic_linker_shared_object with
3965 requests by only checking at transitions between 4K
3966 pages. */
3967 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
3968 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
3969 inLDSOmask4K = cia & ~(Addr64)0xFFF;
3970 inLDSO = is_in_dynamic_linker_shared_object(cia);
3971 } else {
3972 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
3973 }
sewardj1c0ce7a2009-07-01 08:10:49 +00003974 break;
3975
sewardjb4112022007-11-09 22:49:28 +00003976 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00003977 switch (st->Ist.MBE.event) {
3978 case Imbe_Fence:
3979 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00003980 default:
3981 goto unhandled;
3982 }
sewardjb4112022007-11-09 22:49:28 +00003983 break;
3984
sewardj1c0ce7a2009-07-01 08:10:49 +00003985 case Ist_CAS: {
3986 /* Atomic read-modify-write cycle. Just pretend it's a
3987 read. */
3988 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00003989 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
3990 if (isDCAS) {
3991 tl_assert(cas->expdHi);
3992 tl_assert(cas->dataHi);
3993 } else {
3994 tl_assert(!cas->expdHi);
3995 tl_assert(!cas->dataHi);
3996 }
3997 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00003998 if (!inLDSO) {
3999 instrument_mem_access(
4000 bbOut,
4001 cas->addr,
4002 (isDCAS ? 2 : 1)
4003 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4004 False/*!isStore*/,
4005 sizeofIRType(hWordTy)
4006 );
4007 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004008 break;
4009 }
4010
sewardjdb5907d2009-11-26 17:20:21 +00004011 case Ist_LLSC: {
4012 /* We pretend store-conditionals don't exist, viz, ignore
4013 them. Whereas load-linked's are treated the same as
4014 normal loads. */
4015 IRType dataTy;
4016 if (st->Ist.LLSC.storedata == NULL) {
4017 /* LL */
4018 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004019 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004020 instrument_mem_access(
4021 bbOut,
4022 st->Ist.LLSC.addr,
4023 sizeofIRType(dataTy),
4024 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004025 sizeofIRType(hWordTy)
4026 );
4027 }
sewardjdb5907d2009-11-26 17:20:21 +00004028 } else {
4029 /* SC */
4030 /*ignore */
4031 }
4032 break;
4033 }
4034
4035 case Ist_Store:
4036 /* It seems we pretend that store-conditionals don't
4037 exist, viz, just ignore them ... */
4038 if (!inLDSO) {
4039 instrument_mem_access(
4040 bbOut,
4041 st->Ist.Store.addr,
4042 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4043 True/*isStore*/,
4044 sizeofIRType(hWordTy)
4045 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004046 }
njnb83caf22009-05-25 01:47:56 +00004047 break;
sewardjb4112022007-11-09 22:49:28 +00004048
4049 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004050 /* ... whereas here we don't care whether a load is a
4051 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004052 IRExpr* data = st->Ist.WrTmp.data;
4053 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004054 if (!inLDSO) {
4055 instrument_mem_access(
4056 bbOut,
4057 data->Iex.Load.addr,
4058 sizeofIRType(data->Iex.Load.ty),
4059 False/*!isStore*/,
4060 sizeofIRType(hWordTy)
4061 );
4062 }
sewardjb4112022007-11-09 22:49:28 +00004063 }
4064 break;
4065 }
4066
4067 case Ist_Dirty: {
4068 Int dataSize;
4069 IRDirty* d = st->Ist.Dirty.details;
4070 if (d->mFx != Ifx_None) {
4071 /* This dirty helper accesses memory. Collect the
4072 details. */
4073 tl_assert(d->mAddr != NULL);
4074 tl_assert(d->mSize != 0);
4075 dataSize = d->mSize;
4076 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004077 if (!inLDSO) {
4078 instrument_mem_access(
4079 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4080 sizeofIRType(hWordTy)
4081 );
4082 }
sewardjb4112022007-11-09 22:49:28 +00004083 }
4084 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004085 if (!inLDSO) {
4086 instrument_mem_access(
4087 bbOut, d->mAddr, dataSize, True/*isStore*/,
4088 sizeofIRType(hWordTy)
4089 );
4090 }
sewardjb4112022007-11-09 22:49:28 +00004091 }
4092 } else {
4093 tl_assert(d->mAddr == NULL);
4094 tl_assert(d->mSize == 0);
4095 }
4096 break;
4097 }
4098
4099 default:
sewardjf98e1c02008-10-25 16:22:41 +00004100 unhandled:
4101 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004102 tl_assert(0);
4103
4104 } /* switch (st->tag) */
4105
4106 addStmtToIRSB( bbOut, st );
4107 } /* iterate over bbIn->stmts */
4108
4109 return bbOut;
4110}
4111
4112
4113/*----------------------------------------------------------------*/
4114/*--- Client requests ---*/
4115/*----------------------------------------------------------------*/
4116
4117/* Sheesh. Yet another goddam finite map. */
4118static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4119
4120static void map_pthread_t_to_Thread_INIT ( void ) {
4121 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004122 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4123 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004124 tl_assert(map_pthread_t_to_Thread != NULL);
4125 }
4126}
4127
4128
4129static
4130Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4131{
4132 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4133 return False;
4134
4135 /* Anything that gets past the above check is one of ours, so we
4136 should be able to handle it. */
4137
4138 /* default, meaningless return value, unless otherwise set */
4139 *ret = 0;
4140
4141 switch (args[0]) {
4142
4143 /* --- --- User-visible client requests --- --- */
4144
4145 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004146 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004147 args[1], args[2]);
4148 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004149 are any held locks etc in the area. Calling evh__die_mem
4150 and then evh__new_mem is a bit inefficient; probably just
4151 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004152 if (args[2] > 0) { /* length */
4153 evh__die_mem(args[1], args[2]);
4154 /* and then set it to New */
4155 evh__new_mem(args[1], args[2]);
4156 }
4157 break;
4158
4159 /* --- --- Client requests for Helgrind's use only --- --- */
4160
4161 /* Some thread is telling us its pthread_t value. Record the
4162 binding between that and the associated Thread*, so we can
4163 later find the Thread* again when notified of a join by the
4164 thread. */
4165 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4166 Thread* my_thr = NULL;
4167 if (0)
4168 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4169 (void*)args[1]);
4170 map_pthread_t_to_Thread_INIT();
4171 my_thr = map_threads_maybe_lookup( tid );
4172 /* This assertion should hold because the map_threads (tid to
4173 Thread*) binding should have been made at the point of
4174 low-level creation of this thread, which should have
4175 happened prior to us getting this client request for it.
4176 That's because this client request is sent from
4177 client-world from the 'thread_wrapper' function, which
4178 only runs once the thread has been low-level created. */
4179 tl_assert(my_thr != NULL);
4180 /* So now we know that (pthread_t)args[1] is associated with
4181 (Thread*)my_thr. Note that down. */
4182 if (0)
4183 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4184 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004185 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004186 break;
4187 }
4188
4189 case _VG_USERREQ__HG_PTH_API_ERROR: {
4190 Thread* my_thr = NULL;
4191 map_pthread_t_to_Thread_INIT();
4192 my_thr = map_threads_maybe_lookup( tid );
4193 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004194 HG_(record_error_PthAPIerror)(
4195 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004196 break;
4197 }
4198
4199 /* This thread (tid) has completed a join with the quitting
4200 thread whose pthread_t is in args[1]. */
4201 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4202 Thread* thr_q = NULL; /* quitter Thread* */
4203 Bool found = False;
4204 if (0)
4205 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4206 (void*)args[1]);
4207 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004208 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004209 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004210 /* Can this fail? It would mean that our pthread_join
4211 wrapper observed a successful join on args[1] yet that
4212 thread never existed (or at least, it never lodged an
4213 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4214 sounds like a bug in the threads library. */
4215 // FIXME: get rid of this assertion; handle properly
4216 tl_assert(found);
4217 if (found) {
4218 if (0)
4219 VG_(printf)(".................... quitter Thread* = %p\n",
4220 thr_q);
4221 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4222 }
4223 break;
4224 }
4225
4226 /* EXPOSITION only: by intercepting lock init events we can show
4227 the user where the lock was initialised, rather than only
4228 being able to show where it was first locked. Intercepting
4229 lock initialisations is not necessary for the basic operation
4230 of the race checker. */
4231 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4232 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4233 break;
4234
4235 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4236 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4237 break;
4238
4239 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4240 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4241 break;
4242
4243 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4244 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4245 break;
4246
4247 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4248 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4249 break;
4250
4251 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4252 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4253 break;
4254
4255 /* This thread is about to do pthread_cond_signal on the
4256 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4257 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4258 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4259 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4260 break;
4261
4262 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4263 Returns a flag indicating whether or not the mutex is believed to be
4264 valid for this operation. */
4265 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4266 Bool mutex_is_valid
4267 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4268 (void*)args[2] );
4269 *ret = mutex_is_valid ? 1 : 0;
4270 break;
4271 }
4272
sewardjf98e1c02008-10-25 16:22:41 +00004273 /* cond=arg[1] */
4274 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4275 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4276 break;
4277
sewardjb4112022007-11-09 22:49:28 +00004278 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4279 mutex=arg[2] */
4280 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4281 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4282 (void*)args[1], (void*)args[2] );
4283 break;
4284
4285 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4286 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4287 break;
4288
4289 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4290 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4291 break;
4292
sewardj789c3c52008-02-25 12:10:07 +00004293 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004294 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004295 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4296 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004297 break;
4298
4299 /* rwlock=arg[1], isW=arg[2] */
4300 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4301 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4302 break;
4303
4304 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4305 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4306 break;
4307
4308 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4309 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4310 break;
4311
sewardj11e352f2007-11-30 11:11:02 +00004312 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4313 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004314 break;
4315
sewardj11e352f2007-11-30 11:11:02 +00004316 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4317 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004318 break;
4319
sewardj11e352f2007-11-30 11:11:02 +00004320 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4321 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4322 break;
4323
4324 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4325 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004326 break;
4327
sewardj9f569b72008-11-13 13:33:09 +00004328 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
4329 /* pth_bar_t*, ulong */
4330 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1], args[2] );
4331 break;
4332
4333 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4334 /* pth_bar_t* */
4335 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4336 break;
4337
4338 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4339 /* pth_bar_t* */
4340 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4341 break;
sewardjb4112022007-11-09 22:49:28 +00004342
sewardj5a644da2009-08-11 10:35:58 +00004343 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4344 /* pth_spinlock_t* */
4345 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4346 break;
4347
4348 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4349 /* pth_spinlock_t* */
4350 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4351 break;
4352
4353 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4354 /* pth_spinlock_t*, Word */
4355 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4356 break;
4357
4358 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4359 /* pth_spinlock_t* */
4360 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4361 break;
4362
4363 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4364 /* pth_spinlock_t* */
4365 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4366 break;
4367
sewardjed2e72e2009-08-14 11:08:24 +00004368 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4369 /* char* who */
4370 HChar* who = (HChar*)args[1];
4371 HChar buf[50 + 50];
4372 Thread* thr = map_threads_maybe_lookup( tid );
4373 tl_assert( thr ); /* I must be mapped */
4374 tl_assert( who );
4375 tl_assert( VG_(strlen)(who) <= 50 );
4376 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4377 /* record_error_Misc strdup's buf, so this is safe: */
4378 HG_(record_error_Misc)( thr, buf );
4379 break;
4380 }
4381
4382 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4383 /* UWord arbitrary-SO-tag */
4384 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4385 break;
4386
4387 case _VG_USERREQ__HG_USERSO_RECV_POST:
4388 /* UWord arbitrary-SO-tag */
4389 evh__HG_USERSO_RECV_POST( tid, args[1] );
4390 break;
4391
sewardjb4112022007-11-09 22:49:28 +00004392 default:
4393 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004394 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4395 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004396 }
4397
4398 return True;
4399}
4400
4401
4402/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004403/*--- Setup ---*/
4404/*----------------------------------------------------------------*/
4405
4406static Bool hg_process_cmd_line_option ( Char* arg )
4407{
njn83df0b62009-02-25 01:01:05 +00004408 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004409
njn83df0b62009-02-25 01:01:05 +00004410 if VG_BOOL_CLO(arg, "--track-lockorders",
4411 HG_(clo_track_lockorders)) {}
4412 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4413 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004414
4415 else if VG_XACT_CLO(arg, "--history-level=none",
4416 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004417 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004418 HG_(clo_history_level), 1);
4419 else if VG_XACT_CLO(arg, "--history-level=full",
4420 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004421
sewardjf585e482009-08-16 22:52:29 +00004422 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004423 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004424 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004425 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004426
sewardj11e352f2007-11-30 11:11:02 +00004427 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004428 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004429 Int j;
sewardjb4112022007-11-09 22:49:28 +00004430
njn83df0b62009-02-25 01:01:05 +00004431 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004432 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004433 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004434 return False;
4435 }
sewardj11e352f2007-11-30 11:11:02 +00004436 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004437 if ('0' == tmp_str[j]) { /* do nothing */ }
4438 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004439 else {
sewardj11e352f2007-11-30 11:11:02 +00004440 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004441 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004442 return False;
4443 }
4444 }
sewardjf98e1c02008-10-25 16:22:41 +00004445 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004446 }
4447
4448 else
4449 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4450
4451 return True;
4452}
4453
4454static void hg_print_usage ( void )
4455{
4456 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004457" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004458" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004459" full: show both stack traces for a data race (can be very slow)\n"
4460" approx: full trace for one thread, approx for the other (faster)\n"
4461" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004462" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004463 );
sewardjb4112022007-11-09 22:49:28 +00004464}
4465
4466static void hg_print_debug_usage ( void )
4467{
sewardjb4112022007-11-09 22:49:28 +00004468 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4469 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004470 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004471 " at events (X = 0|1) [000000]\n");
4472 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004473 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004474 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004475 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4476 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004477 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004478 VG_(printf)(" 000010 at lock/unlock events\n");
4479 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004480}
4481
4482static void hg_post_clo_init ( void )
4483{
4484}
4485
4486static void hg_fini ( Int exitcode )
4487{
sewardj2d9e8742009-08-07 15:46:56 +00004488 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4489 VG_(message)(Vg_UserMsg,
4490 "For counts of detected and suppressed errors, "
4491 "rerun with: -v\n");
4492 }
4493
4494 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4495 && HG_(clo_history_level) >= 2) {
4496 VG_(umsg)(
4497 "Use --history-level=approx or =none to gain increased speed, at\n" );
4498 VG_(umsg)(
4499 "the cost of reduced accuracy of conflicting-access information\n");
4500 }
4501
sewardjb4112022007-11-09 22:49:28 +00004502 if (SHOW_DATA_STRUCTURES)
4503 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004504 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004505 all__sanity_check("SK_(fini)");
4506
sewardj2d9e8742009-08-07 15:46:56 +00004507 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004508
4509 if (1) {
4510 VG_(printf)("\n");
4511 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4512 VG_(printf)("\n");
4513 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4514 VG_(printf)("\n");
4515 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4516 }
4517
sewardjf98e1c02008-10-25 16:22:41 +00004518 //zz VG_(printf)("\n");
4519 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4520 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4521 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4522 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4523 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4524 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4525 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4526 //zz stats__hbefore_stk_hwm);
4527 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4528 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004529
4530 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004531 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004532 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004533 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004534 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004535 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004536 (Int)HG_(cardinalityWSU)( univ_laog ));
4537
sewardjd52392d2008-11-08 20:36:26 +00004538 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4539 // stats__ga_LL_adds,
4540 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004541
sewardjf98e1c02008-10-25 16:22:41 +00004542 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4543 HG_(stats__LockN_to_P_queries),
4544 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004545
sewardjf98e1c02008-10-25 16:22:41 +00004546 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4547 HG_(stats__string_table_queries),
4548 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004549 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004550 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004551 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004552 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004553 VG_(printf)(" locks: %'8lu acquires, "
4554 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004555 stats__lockN_acquires,
4556 stats__lockN_releases
4557 );
barta0b6b2c2008-07-07 06:49:24 +00004558 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004559
4560 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004561 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004562 }
4563}
4564
sewardjf98e1c02008-10-25 16:22:41 +00004565/* FIXME: move these somewhere sane */
4566
4567static
4568void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4569{
4570 Thread* thr;
4571 ThreadId tid;
4572 UWord nActual;
4573 tl_assert(hbt);
4574 thr = libhb_get_Thr_opaque( hbt );
4575 tl_assert(thr);
4576 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4577 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4578 NULL, NULL, 0 );
4579 tl_assert(nActual <= nRequest);
4580 for (; nActual < nRequest; nActual++)
4581 frames[nActual] = 0;
4582}
4583
4584static
sewardj23f12002009-07-24 08:45:08 +00004585ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004586{
4587 Thread* thr;
4588 ThreadId tid;
4589 ExeContext* ec;
4590 tl_assert(hbt);
4591 thr = libhb_get_Thr_opaque( hbt );
4592 tl_assert(thr);
4593 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004594 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004595 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004596 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004597}
4598
4599
sewardjb4112022007-11-09 22:49:28 +00004600static void hg_pre_clo_init ( void )
4601{
sewardjf98e1c02008-10-25 16:22:41 +00004602 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004603
sewardjb4112022007-11-09 22:49:28 +00004604 VG_(details_name) ("Helgrind");
4605 VG_(details_version) (NULL);
4606 VG_(details_description) ("a thread error detector");
4607 VG_(details_copyright_author)(
njn9f207462009-03-10 22:02:09 +00004608 "Copyright (C) 2007-2009, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004609 VG_(details_bug_reports_to) (VG_BUGS_TO);
4610 VG_(details_avg_translation_sizeB) ( 200 );
4611
4612 VG_(basic_tool_funcs) (hg_post_clo_init,
4613 hg_instrument,
4614 hg_fini);
4615
4616 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004617 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004618 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004619 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004620 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004621 HG_(update_extra),
4622 HG_(recognised_suppression),
4623 HG_(read_extra_suppression_info),
4624 HG_(error_matches_suppression),
4625 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004626 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004627
sewardj24118492009-07-15 14:50:02 +00004628 VG_(needs_xml_output) ();
4629
sewardjb4112022007-11-09 22:49:28 +00004630 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4631 hg_print_usage,
4632 hg_print_debug_usage);
4633 VG_(needs_client_requests) (hg_handle_client_request);
4634
4635 // FIXME?
4636 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4637 // hg_expensive_sanity_check);
4638
4639 VG_(needs_malloc_replacement) (hg_cli__malloc,
4640 hg_cli____builtin_new,
4641 hg_cli____builtin_vec_new,
4642 hg_cli__memalign,
4643 hg_cli__calloc,
4644 hg_cli__free,
4645 hg_cli____builtin_delete,
4646 hg_cli____builtin_vec_delete,
4647 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004648 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004649 HG_CLI__MALLOC_REDZONE_SZB );
4650
sewardj849b0ed2008-12-21 10:43:10 +00004651 /* 21 Dec 08: disabled this; it mostly causes H to start more
4652 slowly and use significantly more memory, without very often
4653 providing useful results. The user can request to load this
4654 information manually with --read-var-info=yes. */
4655 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004656
4657 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004658 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4659 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004660 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
4661 VG_(track_new_mem_stack) ( evh__new_mem );
4662
4663 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004664 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004665
4666 VG_(track_change_mem_mprotect) ( evh__set_perms );
4667
4668 VG_(track_die_mem_stack_signal)( evh__die_mem );
4669 VG_(track_die_mem_brk) ( evh__die_mem );
4670 VG_(track_die_mem_munmap) ( evh__die_mem );
4671 VG_(track_die_mem_stack) ( evh__die_mem );
4672
4673 // FIXME: what is this for?
4674 VG_(track_ban_mem_stack) (NULL);
4675
4676 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4677 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4678 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4679 VG_(track_post_mem_write) (NULL);
4680
4681 /////////////////
4682
4683 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4684 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4685
4686 VG_(track_start_client_code)( evh__start_client_code );
4687 VG_(track_stop_client_code)( evh__stop_client_code );
4688
sewardjf98e1c02008-10-25 16:22:41 +00004689 /////////////////////////////////////////////
4690 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004691 for_libhb__get_EC );
4692 /////////////////////////////////////////////
4693
4694 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004695
4696 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4697 as described in comments at the top of pub_tool_hashtable.h, are
4698 met. Blargh. */
4699 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4700 tl_assert( sizeof(UWord) == sizeof(Addr) );
4701 hg_mallocmeta_table
4702 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4703
sewardjb4112022007-11-09 22:49:28 +00004704}
4705
4706VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4707
4708/*--------------------------------------------------------------------*/
4709/*--- end hg_main.c ---*/
4710/*--------------------------------------------------------------------*/