blob: 1be4f1382b37e854eb9cf592965e31c4d81f2022 [file] [log] [blame]
sewardjb4112022007-11-09 22:49:28 +00001
2/*--------------------------------------------------------------------*/
3/*--- Helgrind: a Valgrind tool for detecting errors ---*/
4/*--- in threaded programs. hg_main.c ---*/
5/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Helgrind, a Valgrind tool for detecting errors
9 in threaded programs.
10
sewardj9eecbbb2010-05-03 21:37:12 +000011 Copyright (C) 2007-2010 OpenWorks LLP
sewardjb4112022007-11-09 22:49:28 +000012 info@open-works.co.uk
13
sewardj9eecbbb2010-05-03 21:37:12 +000014 Copyright (C) 2007-2010 Apple, Inc.
njnf76d27a2009-05-28 01:53:07 +000015
sewardjb4112022007-11-09 22:49:28 +000016 This program is free software; you can redistribute it and/or
17 modify it under the terms of the GNU General Public License as
18 published by the Free Software Foundation; either version 2 of the
19 License, or (at your option) any later version.
20
21 This program is distributed in the hope that it will be useful, but
22 WITHOUT ANY WARRANTY; without even the implied warranty of
23 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
24 General Public License for more details.
25
26 You should have received a copy of the GNU General Public License
27 along with this program; if not, write to the Free Software
28 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
29 02111-1307, USA.
30
31 The GNU General Public License is contained in the file COPYING.
32
33 Neither the names of the U.S. Department of Energy nor the
34 University of California nor the names of its contributors may be
35 used to endorse or promote products derived from this software
36 without prior written permission.
37*/
38
39#include "pub_tool_basics.h"
sewardjb4112022007-11-09 22:49:28 +000040#include "pub_tool_libcassert.h"
41#include "pub_tool_libcbase.h"
42#include "pub_tool_libcprint.h"
sewardjb4112022007-11-09 22:49:28 +000043#include "pub_tool_threadstate.h"
44#include "pub_tool_tooliface.h"
45#include "pub_tool_hashtable.h"
46#include "pub_tool_replacemalloc.h"
47#include "pub_tool_machine.h"
48#include "pub_tool_options.h"
49#include "pub_tool_xarray.h"
50#include "pub_tool_stacktrace.h"
sewardj896f6f92008-08-19 08:38:52 +000051#include "pub_tool_wordfm.h"
sewardja0eee322009-07-31 08:46:35 +000052#include "pub_tool_debuginfo.h" // VG_(find_seginfo), VG_(seginfo_soname)
53#include "pub_tool_redir.h" // sonames for the dynamic linkers
54#include "pub_tool_vki.h" // VKI_PAGE_SIZE
sewardjb4112022007-11-09 22:49:28 +000055
sewardjf98e1c02008-10-25 16:22:41 +000056#include "hg_basics.h"
57#include "hg_wordset.h"
58#include "hg_lock_n_thread.h"
59#include "hg_errors.h"
60
61#include "libhb.h"
62
sewardjb4112022007-11-09 22:49:28 +000063#include "helgrind.h"
64
sewardjf98e1c02008-10-25 16:22:41 +000065
66// FIXME: new_mem_w_tid ignores the supplied tid. (wtf?!)
67
68// FIXME: when client destroys a lock or a CV, remove these
69// from our mappings, so that the associated SO can be freed up
sewardjb4112022007-11-09 22:49:28 +000070
71/*----------------------------------------------------------------*/
72/*--- ---*/
73/*----------------------------------------------------------------*/
74
sewardj11e352f2007-11-30 11:11:02 +000075/* Note this needs to be compiled with -fno-strict-aliasing, since it
76 contains a whole bunch of calls to lookupFM etc which cast between
77 Word and pointer types. gcc rightly complains this breaks ANSI C
78 strict aliasing rules, at -O2. No complaints at -O, but -O2 gives
79 worthwhile performance benefits over -O.
sewardjc17be792007-11-10 22:50:13 +000080*/
sewardjb4112022007-11-09 22:49:28 +000081
sewardjefd3b4d2007-12-02 02:05:23 +000082// FIXME catch sync signals (SEGV, basically) and unlock BHL,
83// if held. Otherwise a LOCK-prefixed insn which segfaults
84// gets Helgrind into a total muddle as the BHL will not be
85// released after the insn.
86
sewardjb4112022007-11-09 22:49:28 +000087// FIXME what is supposed to happen to locks in memory which
88// is relocated as a result of client realloc?
89
sewardjb4112022007-11-09 22:49:28 +000090// FIXME put referencing ThreadId into Thread and get
91// rid of the slow reverse mapping function.
92
93// FIXME accesses to NoAccess areas: change state to Excl?
94
95// FIXME report errors for accesses of NoAccess memory?
96
97// FIXME pth_cond_wait/timedwait wrappers. Even if these fail,
98// the thread still holds the lock.
99
100/* ------------ Debug/trace options ------------ */
101
102// this is:
103// shadow_mem_make_NoAccess: 29156 SMs, 1728 scanned
104// happens_before_wrk: 1000
105// ev__post_thread_join: 3360 SMs, 29 scanned, 252 re-Excls
106#define SHOW_EXPENSIVE_STUFF 0
107
108// 0 for silent, 1 for some stuff, 2 for lots of stuff
109#define SHOW_EVENTS 0
110
sewardjb4112022007-11-09 22:49:28 +0000111
112static void all__sanity_check ( Char* who ); /* fwds */
113
114#define HG_CLI__MALLOC_REDZONE_SZB 16 /* let's say */
115
116// 0 for none, 1 for dump at end of run
117#define SHOW_DATA_STRUCTURES 0
118
119
sewardjb4112022007-11-09 22:49:28 +0000120/* ------------ Misc comments ------------ */
121
122// FIXME: don't hardwire initial entries for root thread.
123// Instead, let the pre_thread_ll_create handler do this.
124
sewardjb4112022007-11-09 22:49:28 +0000125
126/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000127/*--- Primary data structures ---*/
sewardjb4112022007-11-09 22:49:28 +0000128/*----------------------------------------------------------------*/
129
sewardjb4112022007-11-09 22:49:28 +0000130/* Admin linked list of Threads */
131static Thread* admin_threads = NULL;
132
133/* Admin linked list of Locks */
134static Lock* admin_locks = NULL;
135
sewardjb4112022007-11-09 22:49:28 +0000136/* Mapping table for core ThreadIds to Thread* */
137static Thread** map_threads = NULL; /* Array[VG_N_THREADS] of Thread* */
138
sewardjb4112022007-11-09 22:49:28 +0000139/* Mapping table for lock guest addresses to Lock* */
140static WordFM* map_locks = NULL; /* WordFM LockAddr Lock* */
141
142/* The word-set universes for thread sets and lock sets. */
143static WordSetU* univ_tsets = NULL; /* sets of Thread* */
144static WordSetU* univ_lsets = NULL; /* sets of Lock* */
145static WordSetU* univ_laog = NULL; /* sets of Lock*, for LAOG */
146
147/* never changed; we only care about its address. Is treated as if it
148 was a standard userspace lock. Also we have a Lock* describing it
149 so it can participate in lock sets in the usual way. */
150static Int __bus_lock = 0;
151static Lock* __bus_lock_Lock = NULL;
152
153
154/*----------------------------------------------------------------*/
155/*--- Simple helpers for the data structures ---*/
156/*----------------------------------------------------------------*/
157
158static UWord stats__lockN_acquires = 0;
159static UWord stats__lockN_releases = 0;
160
sewardjf98e1c02008-10-25 16:22:41 +0000161static
162ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr ); /*fwds*/
sewardjb4112022007-11-09 22:49:28 +0000163
164/* --------- Constructors --------- */
165
sewardjf98e1c02008-10-25 16:22:41 +0000166static Thread* mk_Thread ( Thr* hbthr ) {
sewardjb4112022007-11-09 22:49:28 +0000167 static Int indx = 1;
sewardjf98e1c02008-10-25 16:22:41 +0000168 Thread* thread = HG_(zalloc)( "hg.mk_Thread.1", sizeof(Thread) );
sewardjb4112022007-11-09 22:49:28 +0000169 thread->locksetA = HG_(emptyWS)( univ_lsets );
170 thread->locksetW = HG_(emptyWS)( univ_lsets );
sewardjb4112022007-11-09 22:49:28 +0000171 thread->magic = Thread_MAGIC;
sewardjf98e1c02008-10-25 16:22:41 +0000172 thread->hbthr = hbthr;
173 thread->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +0000174 thread->created_at = NULL;
175 thread->announced = False;
176 thread->errmsg_index = indx++;
177 thread->admin = admin_threads;
178 admin_threads = thread;
179 return thread;
180}
sewardjf98e1c02008-10-25 16:22:41 +0000181
sewardjb4112022007-11-09 22:49:28 +0000182// Make a new lock which is unlocked (hence ownerless)
183static Lock* mk_LockN ( LockKind kind, Addr guestaddr ) {
184 static ULong unique = 0;
sewardjf98e1c02008-10-25 16:22:41 +0000185 Lock* lock = HG_(zalloc)( "hg.mk_Lock.1", sizeof(Lock) );
sewardjb4112022007-11-09 22:49:28 +0000186 lock->admin = admin_locks;
187 lock->unique = unique++;
188 lock->magic = LockN_MAGIC;
189 lock->appeared_at = NULL;
190 lock->acquired_at = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000191 lock->hbso = libhb_so_alloc();
sewardjb4112022007-11-09 22:49:28 +0000192 lock->guestaddr = guestaddr;
193 lock->kind = kind;
194 lock->heldW = False;
195 lock->heldBy = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000196 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +0000197 admin_locks = lock;
198 return lock;
199}
sewardjb4112022007-11-09 22:49:28 +0000200
201/* Release storage for a Lock. Also release storage in .heldBy, if
202 any. */
203static void del_LockN ( Lock* lk )
204{
sewardjf98e1c02008-10-25 16:22:41 +0000205 tl_assert(HG_(is_sane_LockN)(lk));
206 tl_assert(lk->hbso);
207 libhb_so_dealloc(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +0000208 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000209 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000210 VG_(memset)(lk, 0xAA, sizeof(*lk));
sewardjf98e1c02008-10-25 16:22:41 +0000211 HG_(free)(lk);
sewardjb4112022007-11-09 22:49:28 +0000212}
213
214/* Update 'lk' to reflect that 'thr' now has a write-acquisition of
215 it. This is done strictly: only combinations resulting from
216 correct program and libpthread behaviour are allowed. */
217static void lockN_acquire_writer ( Lock* lk, Thread* thr )
218{
sewardjf98e1c02008-10-25 16:22:41 +0000219 tl_assert(HG_(is_sane_LockN)(lk));
220 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000221
222 stats__lockN_acquires++;
223
224 /* EXPOSITION only */
225 /* We need to keep recording snapshots of where the lock was
226 acquired, so as to produce better lock-order error messages. */
227 if (lk->acquired_at == NULL) {
228 ThreadId tid;
229 tl_assert(lk->heldBy == NULL);
230 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
231 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000232 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000233 } else {
234 tl_assert(lk->heldBy != NULL);
235 }
236 /* end EXPOSITION only */
237
238 switch (lk->kind) {
239 case LK_nonRec:
240 case_LK_nonRec:
241 tl_assert(lk->heldBy == NULL); /* can't w-lock recursively */
242 tl_assert(!lk->heldW);
243 lk->heldW = True;
sewardjf98e1c02008-10-25 16:22:41 +0000244 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNaw.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000245 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000246 break;
247 case LK_mbRec:
248 if (lk->heldBy == NULL)
249 goto case_LK_nonRec;
250 /* 2nd and subsequent locking of a lock by its owner */
251 tl_assert(lk->heldW);
252 /* assert: lk is only held by one thread .. */
sewardj896f6f92008-08-19 08:38:52 +0000253 tl_assert(VG_(sizeUniqueBag(lk->heldBy)) == 1);
sewardjb4112022007-11-09 22:49:28 +0000254 /* assert: .. and that thread is 'thr'. */
sewardj896f6f92008-08-19 08:38:52 +0000255 tl_assert(VG_(elemBag)(lk->heldBy, (Word)thr)
256 == VG_(sizeTotalBag)(lk->heldBy));
257 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000258 break;
259 case LK_rdwr:
260 tl_assert(lk->heldBy == NULL && !lk->heldW); /* must be unheld */
261 goto case_LK_nonRec;
262 default:
263 tl_assert(0);
264 }
sewardjf98e1c02008-10-25 16:22:41 +0000265 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000266}
267
268static void lockN_acquire_reader ( Lock* lk, Thread* thr )
269{
sewardjf98e1c02008-10-25 16:22:41 +0000270 tl_assert(HG_(is_sane_LockN)(lk));
271 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000272 /* can only add reader to a reader-writer lock. */
273 tl_assert(lk->kind == LK_rdwr);
274 /* lk must be free or already r-held. */
275 tl_assert(lk->heldBy == NULL
276 || (lk->heldBy != NULL && !lk->heldW));
277
278 stats__lockN_acquires++;
279
280 /* EXPOSITION only */
281 /* We need to keep recording snapshots of where the lock was
282 acquired, so as to produce better lock-order error messages. */
283 if (lk->acquired_at == NULL) {
284 ThreadId tid;
285 tl_assert(lk->heldBy == NULL);
286 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
287 lk->acquired_at
sewardjf98e1c02008-10-25 16:22:41 +0000288 = VG_(record_ExeContext)(tid, 0/*first_ip_delta*/);
sewardjb4112022007-11-09 22:49:28 +0000289 } else {
290 tl_assert(lk->heldBy != NULL);
291 }
292 /* end EXPOSITION only */
293
294 if (lk->heldBy) {
sewardj896f6f92008-08-19 08:38:52 +0000295 VG_(addToBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000296 } else {
297 lk->heldW = False;
sewardjf98e1c02008-10-25 16:22:41 +0000298 lk->heldBy = VG_(newBag)( HG_(zalloc), "hg.lNar.1", HG_(free) );
sewardj896f6f92008-08-19 08:38:52 +0000299 VG_(addToBag)( lk->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +0000300 }
301 tl_assert(!lk->heldW);
sewardjf98e1c02008-10-25 16:22:41 +0000302 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000303}
304
305/* Update 'lk' to reflect a release of it by 'thr'. This is done
306 strictly: only combinations resulting from correct program and
307 libpthread behaviour are allowed. */
308
309static void lockN_release ( Lock* lk, Thread* thr )
310{
311 Bool b;
sewardjf98e1c02008-10-25 16:22:41 +0000312 tl_assert(HG_(is_sane_LockN)(lk));
313 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000314 /* lock must be held by someone */
315 tl_assert(lk->heldBy);
316 stats__lockN_releases++;
317 /* Remove it from the holder set */
sewardj896f6f92008-08-19 08:38:52 +0000318 b = VG_(delFromBag)(lk->heldBy, (Word)thr);
sewardjb4112022007-11-09 22:49:28 +0000319 /* thr must actually have been a holder of lk */
320 tl_assert(b);
321 /* normalise */
322 tl_assert(lk->acquired_at);
sewardj896f6f92008-08-19 08:38:52 +0000323 if (VG_(isEmptyBag)(lk->heldBy)) {
324 VG_(deleteBag)(lk->heldBy);
sewardjb4112022007-11-09 22:49:28 +0000325 lk->heldBy = NULL;
326 lk->heldW = False;
327 lk->acquired_at = NULL;
328 }
sewardjf98e1c02008-10-25 16:22:41 +0000329 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +0000330}
331
332static void remove_Lock_from_locksets_of_all_owning_Threads( Lock* lk )
333{
334 Thread* thr;
335 if (!lk->heldBy) {
336 tl_assert(!lk->heldW);
337 return;
338 }
339 /* for each thread that holds this lock do ... */
sewardj896f6f92008-08-19 08:38:52 +0000340 VG_(initIterBag)( lk->heldBy );
341 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, NULL )) {
sewardjf98e1c02008-10-25 16:22:41 +0000342 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000343 tl_assert(HG_(elemWS)( univ_lsets,
344 thr->locksetA, (Word)lk ));
345 thr->locksetA
346 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lk );
347
348 if (lk->heldW) {
349 tl_assert(HG_(elemWS)( univ_lsets,
350 thr->locksetW, (Word)lk ));
351 thr->locksetW
352 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lk );
353 }
354 }
sewardj896f6f92008-08-19 08:38:52 +0000355 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000356}
357
sewardjb4112022007-11-09 22:49:28 +0000358
359/*----------------------------------------------------------------*/
360/*--- Print out the primary data structures ---*/
361/*----------------------------------------------------------------*/
362
sewardjd52392d2008-11-08 20:36:26 +0000363//static WordSetID del_BHL ( WordSetID lockset ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +0000364
365#define PP_THREADS (1<<1)
366#define PP_LOCKS (1<<2)
sewardjf98e1c02008-10-25 16:22:41 +0000367#define PP_ALL (PP_THREADS | PP_LOCKS)
sewardjb4112022007-11-09 22:49:28 +0000368
369
370static const Int sHOW_ADMIN = 0;
371
372static void space ( Int n )
373{
374 Int i;
375 Char spaces[128+1];
376 tl_assert(n >= 0 && n < 128);
377 if (n == 0)
378 return;
379 for (i = 0; i < n; i++)
380 spaces[i] = ' ';
381 spaces[i] = 0;
382 tl_assert(i < 128+1);
383 VG_(printf)("%s", spaces);
384}
385
386static void pp_Thread ( Int d, Thread* t )
387{
388 space(d+0); VG_(printf)("Thread %p {\n", t);
389 if (sHOW_ADMIN) {
390 space(d+3); VG_(printf)("admin %p\n", t->admin);
391 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)t->magic);
392 }
393 space(d+3); VG_(printf)("locksetA %d\n", (Int)t->locksetA);
394 space(d+3); VG_(printf)("locksetW %d\n", (Int)t->locksetW);
sewardjb4112022007-11-09 22:49:28 +0000395 space(d+0); VG_(printf)("}\n");
396}
397
398static void pp_admin_threads ( Int d )
399{
400 Int i, n;
401 Thread* t;
402 for (n = 0, t = admin_threads; t; n++, t = t->admin) {
403 /* nothing */
404 }
405 space(d); VG_(printf)("admin_threads (%d records) {\n", n);
406 for (i = 0, t = admin_threads; t; i++, t = t->admin) {
407 if (0) {
408 space(n);
409 VG_(printf)("admin_threads record %d of %d:\n", i, n);
410 }
411 pp_Thread(d+3, t);
412 }
barta0b6b2c2008-07-07 06:49:24 +0000413 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000414}
415
416static void pp_map_threads ( Int d )
417{
njn4c245e52009-03-15 23:25:38 +0000418 Int i, n = 0;
sewardjb4112022007-11-09 22:49:28 +0000419 space(d); VG_(printf)("map_threads ");
sewardjb4112022007-11-09 22:49:28 +0000420 for (i = 0; i < VG_N_THREADS; i++) {
421 if (map_threads[i] != NULL)
422 n++;
423 }
424 VG_(printf)("(%d entries) {\n", n);
425 for (i = 0; i < VG_N_THREADS; i++) {
426 if (map_threads[i] == NULL)
427 continue;
428 space(d+3);
429 VG_(printf)("coretid %d -> Thread %p\n", i, map_threads[i]);
430 }
431 space(d); VG_(printf)("}\n");
432}
433
434static const HChar* show_LockKind ( LockKind lkk ) {
435 switch (lkk) {
436 case LK_mbRec: return "mbRec";
437 case LK_nonRec: return "nonRec";
438 case LK_rdwr: return "rdwr";
439 default: tl_assert(0);
440 }
441}
442
443static void pp_Lock ( Int d, Lock* lk )
444{
barta0b6b2c2008-07-07 06:49:24 +0000445 space(d+0); VG_(printf)("Lock %p (ga %#lx) {\n", lk, lk->guestaddr);
sewardjb4112022007-11-09 22:49:28 +0000446 if (sHOW_ADMIN) {
447 space(d+3); VG_(printf)("admin %p\n", lk->admin);
448 space(d+3); VG_(printf)("magic 0x%x\n", (UInt)lk->magic);
449 }
450 space(d+3); VG_(printf)("unique %llu\n", lk->unique);
451 space(d+3); VG_(printf)("kind %s\n", show_LockKind(lk->kind));
452 space(d+3); VG_(printf)("heldW %s\n", lk->heldW ? "yes" : "no");
453 space(d+3); VG_(printf)("heldBy %p", lk->heldBy);
454 if (lk->heldBy) {
455 Thread* thr;
456 Word count;
457 VG_(printf)(" { ");
sewardj896f6f92008-08-19 08:38:52 +0000458 VG_(initIterBag)( lk->heldBy );
459 while (VG_(nextIterBag)( lk->heldBy, (Word*)&thr, &count ))
sewardjb4112022007-11-09 22:49:28 +0000460 VG_(printf)("%lu:%p ", count, thr);
sewardj896f6f92008-08-19 08:38:52 +0000461 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000462 VG_(printf)("}");
463 }
464 VG_(printf)("\n");
465 space(d+0); VG_(printf)("}\n");
466}
467
468static void pp_admin_locks ( Int d )
469{
470 Int i, n;
471 Lock* lk;
472 for (n = 0, lk = admin_locks; lk; n++, lk = lk->admin) {
473 /* nothing */
474 }
475 space(d); VG_(printf)("admin_locks (%d records) {\n", n);
476 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin) {
477 if (0) {
478 space(n);
479 VG_(printf)("admin_locks record %d of %d:\n", i, n);
480 }
481 pp_Lock(d+3, lk);
482 }
barta0b6b2c2008-07-07 06:49:24 +0000483 space(d); VG_(printf)("}\n");
sewardjb4112022007-11-09 22:49:28 +0000484}
485
486static void pp_map_locks ( Int d )
487{
488 void* gla;
489 Lock* lk;
490 space(d); VG_(printf)("map_locks (%d entries) {\n",
sewardj896f6f92008-08-19 08:38:52 +0000491 (Int)VG_(sizeFM)( map_locks ));
492 VG_(initIterFM)( map_locks );
493 while (VG_(nextIterFM)( map_locks, (Word*)&gla,
sewardjb5f29642007-11-16 12:02:43 +0000494 (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000495 space(d+3);
496 VG_(printf)("guest %p -> Lock %p\n", gla, lk);
497 }
sewardj896f6f92008-08-19 08:38:52 +0000498 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000499 space(d); VG_(printf)("}\n");
500}
501
sewardjb4112022007-11-09 22:49:28 +0000502static void pp_everything ( Int flags, Char* caller )
503{
504 Int d = 0;
505 VG_(printf)("\n");
506 VG_(printf)("All_Data_Structures (caller = \"%s\") {\n", caller);
507 if (flags & PP_THREADS) {
508 VG_(printf)("\n");
509 pp_admin_threads(d+3);
510 VG_(printf)("\n");
511 pp_map_threads(d+3);
512 }
513 if (flags & PP_LOCKS) {
514 VG_(printf)("\n");
515 pp_admin_locks(d+3);
516 VG_(printf)("\n");
517 pp_map_locks(d+3);
518 }
sewardjb4112022007-11-09 22:49:28 +0000519
520 VG_(printf)("\n");
521 VG_(printf)("}\n");
522 VG_(printf)("\n");
523}
524
525#undef SHOW_ADMIN
526
527
528/*----------------------------------------------------------------*/
529/*--- Initialise the primary data structures ---*/
530/*----------------------------------------------------------------*/
531
sewardjf98e1c02008-10-25 16:22:41 +0000532static void initialise_data_structures ( Thr* hbthr_root )
sewardjb4112022007-11-09 22:49:28 +0000533{
sewardjb4112022007-11-09 22:49:28 +0000534 Thread* thr;
535
536 /* Get everything initialised and zeroed. */
537 tl_assert(admin_threads == NULL);
538 tl_assert(admin_locks == NULL);
sewardjb4112022007-11-09 22:49:28 +0000539
540 tl_assert(sizeof(Addr) == sizeof(Word));
sewardjb4112022007-11-09 22:49:28 +0000541
542 tl_assert(map_threads == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000543 map_threads = HG_(zalloc)( "hg.ids.1", VG_N_THREADS * sizeof(Thread*) );
sewardjb4112022007-11-09 22:49:28 +0000544 tl_assert(map_threads != NULL);
545
sewardjb4112022007-11-09 22:49:28 +0000546 tl_assert(sizeof(Addr) == sizeof(Word));
547 tl_assert(map_locks == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000548 map_locks = VG_(newFM)( HG_(zalloc), "hg.ids.2", HG_(free),
549 NULL/*unboxed Word cmp*/);
sewardjb4112022007-11-09 22:49:28 +0000550 tl_assert(map_locks != NULL);
551
552 __bus_lock_Lock = mk_LockN( LK_nonRec, (Addr)&__bus_lock );
sewardjf98e1c02008-10-25 16:22:41 +0000553 tl_assert(HG_(is_sane_LockN)(__bus_lock_Lock));
sewardj896f6f92008-08-19 08:38:52 +0000554 VG_(addToFM)( map_locks, (Word)&__bus_lock, (Word)__bus_lock_Lock );
sewardjb4112022007-11-09 22:49:28 +0000555
556 tl_assert(univ_tsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000557 univ_tsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.3", HG_(free),
558 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000559 tl_assert(univ_tsets != NULL);
560
561 tl_assert(univ_lsets == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000562 univ_lsets = HG_(newWordSetU)( HG_(zalloc), "hg.ids.4", HG_(free),
563 8/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000564 tl_assert(univ_lsets != NULL);
565
566 tl_assert(univ_laog == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000567 univ_laog = HG_(newWordSetU)( HG_(zalloc), "hg.ids.5 (univ_laog)",
568 HG_(free), 24/*cacheSize*/ );
sewardjb4112022007-11-09 22:49:28 +0000569 tl_assert(univ_laog != NULL);
570
571 /* Set up entries for the root thread */
572 // FIXME: this assumes that the first real ThreadId is 1
573
sewardjb4112022007-11-09 22:49:28 +0000574 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +0000575 thr = mk_Thread(hbthr_root);
576 thr->coretid = 1; /* FIXME: hardwires an assumption about the
577 identity of the root thread. */
578 tl_assert( libhb_get_Thr_opaque(hbthr_root) == NULL );
579 libhb_set_Thr_opaque(hbthr_root, thr);
sewardjb4112022007-11-09 22:49:28 +0000580
sewardjf98e1c02008-10-25 16:22:41 +0000581 /* and bind it in the thread-map table. */
582 tl_assert(HG_(is_sane_ThreadId)(thr->coretid));
583 tl_assert(thr->coretid != VG_INVALID_THREADID);
sewardjb4112022007-11-09 22:49:28 +0000584
sewardjf98e1c02008-10-25 16:22:41 +0000585 map_threads[thr->coretid] = thr;
sewardjb4112022007-11-09 22:49:28 +0000586
587 tl_assert(VG_INVALID_THREADID == 0);
588
589 /* Mark the new bus lock correctly (to stop the sanity checks
590 complaining) */
591 tl_assert( sizeof(__bus_lock) == 4 );
sewardjb4112022007-11-09 22:49:28 +0000592
593 all__sanity_check("initialise_data_structures");
594}
595
596
597/*----------------------------------------------------------------*/
sewardjf98e1c02008-10-25 16:22:41 +0000598/*--- map_threads :: array[core-ThreadId] of Thread* ---*/
sewardjb4112022007-11-09 22:49:28 +0000599/*----------------------------------------------------------------*/
600
601/* Doesn't assert if the relevant map_threads entry is NULL. */
602static Thread* map_threads_maybe_lookup ( ThreadId coretid )
603{
604 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000605 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000606 thr = map_threads[coretid];
607 return thr;
608}
609
610/* Asserts if the relevant map_threads entry is NULL. */
611static inline Thread* map_threads_lookup ( ThreadId coretid )
612{
613 Thread* thr;
sewardjf98e1c02008-10-25 16:22:41 +0000614 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000615 thr = map_threads[coretid];
616 tl_assert(thr);
617 return thr;
618}
619
sewardjf98e1c02008-10-25 16:22:41 +0000620/* Do a reverse lookup. Does not assert if 'thr' is not found in
621 map_threads. */
sewardjb4112022007-11-09 22:49:28 +0000622static ThreadId map_threads_maybe_reverse_lookup_SLOW ( Thread* thr )
623{
sewardjf98e1c02008-10-25 16:22:41 +0000624 ThreadId tid;
625 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000626 /* Check nobody used the invalid-threadid slot */
627 tl_assert(VG_INVALID_THREADID >= 0 && VG_INVALID_THREADID < VG_N_THREADS);
628 tl_assert(map_threads[VG_INVALID_THREADID] == NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000629 tid = thr->coretid;
630 tl_assert(HG_(is_sane_ThreadId)(tid));
631 return tid;
sewardjb4112022007-11-09 22:49:28 +0000632}
633
634/* Do a reverse lookup. Warning: POTENTIALLY SLOW. Asserts if 'thr'
635 is not found in map_threads. */
636static ThreadId map_threads_reverse_lookup_SLOW ( Thread* thr )
637{
638 ThreadId tid = map_threads_maybe_reverse_lookup_SLOW( thr );
639 tl_assert(tid != VG_INVALID_THREADID);
sewardjf98e1c02008-10-25 16:22:41 +0000640 tl_assert(map_threads[tid]);
641 tl_assert(map_threads[tid]->coretid == tid);
sewardjb4112022007-11-09 22:49:28 +0000642 return tid;
643}
644
645static void map_threads_delete ( ThreadId coretid )
646{
647 Thread* thr;
648 tl_assert(coretid != 0);
sewardjf98e1c02008-10-25 16:22:41 +0000649 tl_assert( HG_(is_sane_ThreadId)(coretid) );
sewardjb4112022007-11-09 22:49:28 +0000650 thr = map_threads[coretid];
651 tl_assert(thr);
652 map_threads[coretid] = NULL;
653}
654
655
656/*----------------------------------------------------------------*/
657/*--- map_locks :: WordFM guest-Addr-of-lock Lock* ---*/
658/*----------------------------------------------------------------*/
659
660/* Make sure there is a lock table entry for the given (lock) guest
661 address. If not, create one of the stated 'kind' in unheld state.
662 In any case, return the address of the existing or new Lock. */
663static
664Lock* map_locks_lookup_or_create ( LockKind lkk, Addr ga, ThreadId tid )
665{
666 Bool found;
667 Lock* oldlock = NULL;
sewardjf98e1c02008-10-25 16:22:41 +0000668 tl_assert(HG_(is_sane_ThreadId)(tid));
sewardj896f6f92008-08-19 08:38:52 +0000669 found = VG_(lookupFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000670 NULL, (Word*)&oldlock, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000671 if (!found) {
672 Lock* lock = mk_LockN(lkk, ga);
673 lock->appeared_at = VG_(record_ExeContext)( tid, 0 );
sewardjf98e1c02008-10-25 16:22:41 +0000674 tl_assert(HG_(is_sane_LockN)(lock));
sewardj896f6f92008-08-19 08:38:52 +0000675 VG_(addToFM)( map_locks, (Word)ga, (Word)lock );
sewardjb4112022007-11-09 22:49:28 +0000676 tl_assert(oldlock == NULL);
sewardjb4112022007-11-09 22:49:28 +0000677 return lock;
678 } else {
679 tl_assert(oldlock != NULL);
sewardjf98e1c02008-10-25 16:22:41 +0000680 tl_assert(HG_(is_sane_LockN)(oldlock));
sewardjb4112022007-11-09 22:49:28 +0000681 tl_assert(oldlock->guestaddr == ga);
sewardjb4112022007-11-09 22:49:28 +0000682 return oldlock;
683 }
684}
685
686static Lock* map_locks_maybe_lookup ( Addr ga )
687{
688 Bool found;
689 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000690 found = VG_(lookupFM)( map_locks, NULL, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000691 tl_assert(found ? lk != NULL : lk == NULL);
sewardjb4112022007-11-09 22:49:28 +0000692 return lk;
693}
694
695static void map_locks_delete ( Addr ga )
696{
697 Addr ga2 = 0;
698 Lock* lk = NULL;
sewardj896f6f92008-08-19 08:38:52 +0000699 VG_(delFromFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000700 (Word*)&ga2, (Word*)&lk, (Word)ga );
sewardjb4112022007-11-09 22:49:28 +0000701 /* delFromFM produces the val which is being deleted, if it is
702 found. So assert it is non-null; that in effect asserts that we
703 are deleting a (ga, Lock) pair which actually exists. */
704 tl_assert(lk != NULL);
705 tl_assert(ga2 == ga);
706}
707
708
sewardjb4112022007-11-09 22:49:28 +0000709
710/*----------------------------------------------------------------*/
711/*--- Sanity checking the data structures ---*/
712/*----------------------------------------------------------------*/
713
714static UWord stats__sanity_checks = 0;
715
sewardjb4112022007-11-09 22:49:28 +0000716static void laog__sanity_check ( Char* who ); /* fwds */
717
718/* REQUIRED INVARIANTS:
719
720 Thread vs Segment/Lock/SecMaps
721
722 for each t in Threads {
723
724 // Thread.lockset: each element is really a valid Lock
725
726 // Thread.lockset: each Lock in set is actually held by that thread
727 for lk in Thread.lockset
728 lk == LockedBy(t)
729
730 // Thread.csegid is a valid SegmentID
731 // and the associated Segment has .thr == t
732
733 }
734
735 all thread Locksets are pairwise empty under intersection
736 (that is, no lock is claimed to be held by more than one thread)
737 -- this is guaranteed if all locks in locksets point back to their
738 owner threads
739
740 Lock vs Thread/Segment/SecMaps
741
742 for each entry (gla, la) in map_locks
743 gla == la->guest_addr
744
745 for each lk in Locks {
746
747 lk->tag is valid
748 lk->guest_addr does not have shadow state NoAccess
749 if lk == LockedBy(t), then t->lockset contains lk
750 if lk == UnlockedBy(segid) then segid is valid SegmentID
751 and can be mapped to a valid Segment(seg)
752 and seg->thr->lockset does not contain lk
753 if lk == UnlockedNew then (no lockset contains lk)
754
755 secmaps for lk has .mbHasLocks == True
756
757 }
758
759 Segment vs Thread/Lock/SecMaps
760
761 the Segment graph is a dag (no cycles)
762 all of the Segment graph must be reachable from the segids
763 mentioned in the Threads
764
765 for seg in Segments {
766
767 seg->thr is a sane Thread
768
769 }
770
771 SecMaps vs Segment/Thread/Lock
772
773 for sm in SecMaps {
774
775 sm properly aligned
776 if any shadow word is ShR or ShM then .mbHasShared == True
777
778 for each Excl(segid) state
779 map_segments_lookup maps to a sane Segment(seg)
780 for each ShM/ShR(tsetid,lsetid) state
781 each lk in lset is a valid Lock
782 each thr in tset is a valid thread, which is non-dead
783
784 }
785*/
786
787
788/* Return True iff 'thr' holds 'lk' in some mode. */
789static Bool thread_is_a_holder_of_Lock ( Thread* thr, Lock* lk )
790{
791 if (lk->heldBy)
sewardj896f6f92008-08-19 08:38:52 +0000792 return VG_(elemBag)( lk->heldBy, (Word)thr ) > 0;
sewardjb4112022007-11-09 22:49:28 +0000793 else
794 return False;
795}
796
797/* Sanity check Threads, as far as possible */
798__attribute__((noinline))
799static void threads__sanity_check ( Char* who )
800{
801#define BAD(_str) do { how = (_str); goto bad; } while (0)
802 Char* how = "no error";
803 Thread* thr;
804 WordSetID wsA, wsW;
sewardj250ec2e2008-02-15 22:02:30 +0000805 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +0000806 Word ls_size, i;
807 Lock* lk;
sewardjb4112022007-11-09 22:49:28 +0000808 for (thr = admin_threads; thr; thr = thr->admin) {
sewardjf98e1c02008-10-25 16:22:41 +0000809 if (!HG_(is_sane_Thread)(thr)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000810 wsA = thr->locksetA;
811 wsW = thr->locksetW;
812 // locks held in W mode are a subset of all locks held
813 if (!HG_(isSubsetOf)( univ_lsets, wsW, wsA )) BAD("7");
814 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, wsA );
815 for (i = 0; i < ls_size; i++) {
816 lk = (Lock*)ls_words[i];
817 // Thread.lockset: each element is really a valid Lock
sewardjf98e1c02008-10-25 16:22:41 +0000818 if (!HG_(is_sane_LockN)(lk)) BAD("2");
sewardjb4112022007-11-09 22:49:28 +0000819 // Thread.lockset: each Lock in set is actually held by that
820 // thread
821 if (!thread_is_a_holder_of_Lock(thr,lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000822 }
823 }
824 return;
825 bad:
826 VG_(printf)("threads__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
827 tl_assert(0);
828#undef BAD
829}
830
831
832/* Sanity check Locks, as far as possible */
833__attribute__((noinline))
834static void locks__sanity_check ( Char* who )
835{
836#define BAD(_str) do { how = (_str); goto bad; } while (0)
837 Char* how = "no error";
838 Addr gla;
839 Lock* lk;
840 Int i;
841 // # entries in admin_locks == # entries in map_locks
842 for (i = 0, lk = admin_locks; lk; i++, lk = lk->admin)
843 ;
sewardj896f6f92008-08-19 08:38:52 +0000844 if (i != VG_(sizeFM)(map_locks)) BAD("1");
sewardjb4112022007-11-09 22:49:28 +0000845 // for each entry (gla, lk) in map_locks
846 // gla == lk->guest_addr
sewardj896f6f92008-08-19 08:38:52 +0000847 VG_(initIterFM)( map_locks );
848 while (VG_(nextIterFM)( map_locks,
sewardjb5f29642007-11-16 12:02:43 +0000849 (Word*)&gla, (Word*)&lk )) {
sewardjb4112022007-11-09 22:49:28 +0000850 if (lk->guestaddr != gla) BAD("2");
851 }
sewardj896f6f92008-08-19 08:38:52 +0000852 VG_(doneIterFM)( map_locks );
sewardjb4112022007-11-09 22:49:28 +0000853 // scan through admin_locks ...
854 for (lk = admin_locks; lk; lk = lk->admin) {
855 // lock is sane. Quite comprehensive, also checks that
856 // referenced (holder) threads are sane.
sewardjf98e1c02008-10-25 16:22:41 +0000857 if (!HG_(is_sane_LockN)(lk)) BAD("3");
sewardjb4112022007-11-09 22:49:28 +0000858 // map_locks binds guest address back to this lock
859 if (lk != map_locks_maybe_lookup(lk->guestaddr)) BAD("4");
sewardjb4112022007-11-09 22:49:28 +0000860 // look at all threads mentioned as holders of this lock. Ensure
861 // this lock is mentioned in their locksets.
862 if (lk->heldBy) {
863 Thread* thr;
864 Word count;
sewardj896f6f92008-08-19 08:38:52 +0000865 VG_(initIterBag)( lk->heldBy );
866 while (VG_(nextIterBag)( lk->heldBy,
sewardjb5f29642007-11-16 12:02:43 +0000867 (Word*)&thr, &count )) {
sewardjf98e1c02008-10-25 16:22:41 +0000868 // HG_(is_sane_LockN) above ensures these
sewardjb4112022007-11-09 22:49:28 +0000869 tl_assert(count >= 1);
sewardjf98e1c02008-10-25 16:22:41 +0000870 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +0000871 if (!HG_(elemWS)(univ_lsets, thr->locksetA, (Word)lk))
872 BAD("6");
873 // also check the w-only lockset
874 if (lk->heldW
875 && !HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
876 BAD("7");
877 if ((!lk->heldW)
878 && HG_(elemWS)(univ_lsets, thr->locksetW, (Word)lk))
879 BAD("8");
880 }
sewardj896f6f92008-08-19 08:38:52 +0000881 VG_(doneIterBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +0000882 } else {
883 /* lock not held by anybody */
884 if (lk->heldW) BAD("9"); /* should be False if !heldBy */
885 // since lk is unheld, then (no lockset contains lk)
886 // hmm, this is really too expensive to check. Hmm.
887 }
sewardjb4112022007-11-09 22:49:28 +0000888 }
889
890 return;
891 bad:
892 VG_(printf)("locks__sanity_check: who=\"%s\", bad=\"%s\"\n", who, how);
893 tl_assert(0);
894#undef BAD
895}
896
897
sewardjb4112022007-11-09 22:49:28 +0000898static void all_except_Locks__sanity_check ( Char* who ) {
899 stats__sanity_checks++;
900 if (0) VG_(printf)("all_except_Locks__sanity_check(%s)\n", who);
901 threads__sanity_check(who);
sewardjb4112022007-11-09 22:49:28 +0000902 laog__sanity_check(who);
903}
904static void all__sanity_check ( Char* who ) {
905 all_except_Locks__sanity_check(who);
906 locks__sanity_check(who);
907}
908
909
910/*----------------------------------------------------------------*/
911/*--- the core memory state machine (msm__* functions) ---*/
912/*----------------------------------------------------------------*/
913
sewardjd52392d2008-11-08 20:36:26 +0000914//static WordSetID add_BHL ( WordSetID lockset ) {
915// return HG_(addToWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
916//}
917//static WordSetID del_BHL ( WordSetID lockset ) {
918// return HG_(delFromWS)( univ_lsets, lockset, (Word)__bus_lock_Lock );
919//}
sewardjb4112022007-11-09 22:49:28 +0000920
921
sewardjd52392d2008-11-08 20:36:26 +0000922///* Last-lock-lossage records. This mechanism exists to help explain
923// to programmers why we are complaining about a race. The idea is to
924// monitor all lockset transitions. When a previously nonempty
925// lockset becomes empty, the lock(s) that just disappeared (the
926// "lossage") are the locks that have consistently protected the
927// location (ga_of_access) in question for the longest time. Most of
928// the time the lossage-set is a single lock. Because the
929// lossage-lock is the one that has survived longest, there is there
930// is a good chance that it is indeed the lock that the programmer
931// intended to use to protect the location.
932//
933// Note that we cannot in general just look at the lossage set when we
934// see a transition to ShM(...,empty-set), because a transition to an
935// empty lockset can happen arbitrarily far before the point where we
936// want to report an error. This is in the case where there are many
937// transitions ShR -> ShR, all with an empty lockset, and only later
938// is there a transition to ShM. So what we want to do is note the
939// lossage lock at the point where a ShR -> ShR transition empties out
940// the lockset, so we can present it later if there should be a
941// transition to ShM.
942//
943// So this function finds such transitions. For each, it associates
944// in ga_to_lastlock, the guest address and the lossage lock. In fact
945// we do not record the Lock* directly as that may disappear later,
946// but instead the ExeContext inside the Lock which says where it was
947// initialised or first locked. ExeContexts are permanent so keeping
948// them indefinitely is safe.
949//
950// A boring detail: the hardware bus lock is not interesting in this
951// respect, so we first remove that from the pre/post locksets.
952//*/
953//
954//static UWord stats__ga_LL_adds = 0;
955//
956//static WordFM* ga_to_lastlock = NULL; /* GuestAddr -> ExeContext* */
957//
958//static
959//void record_last_lock_lossage ( Addr ga_of_access,
960// WordSetID lset_old, WordSetID lset_new )
961//{
962// Lock* lk;
963// Int card_old, card_new;
964//
965// tl_assert(lset_old != lset_new);
966//
967// if (0) VG_(printf)("XX1: %d (card %ld) -> %d (card %ld) %#lx\n",
968// (Int)lset_old,
969// HG_(cardinalityWS)(univ_lsets,lset_old),
970// (Int)lset_new,
971// HG_(cardinalityWS)(univ_lsets,lset_new),
972// ga_of_access );
973//
974// /* This is slow, but at least it's simple. The bus hardware lock
975// just confuses the logic, so remove it from the locksets we're
976// considering before doing anything else. */
977// lset_new = del_BHL( lset_new );
978//
979// if (!HG_(isEmptyWS)( univ_lsets, lset_new )) {
980// /* The post-transition lock set is not empty. So we are not
981// interested. We're only interested in spotting transitions
982// that make locksets become empty. */
983// return;
984// }
985//
986// /* lset_new is now empty */
987// card_new = HG_(cardinalityWS)( univ_lsets, lset_new );
988// tl_assert(card_new == 0);
989//
990// lset_old = del_BHL( lset_old );
991// card_old = HG_(cardinalityWS)( univ_lsets, lset_old );
992//
993// if (0) VG_(printf)(" X2: %d (card %d) -> %d (card %d)\n",
994// (Int)lset_old, card_old, (Int)lset_new, card_new );
995//
996// if (card_old == 0) {
997// /* The old lockset was also empty. Not interesting. */
998// return;
999// }
1000//
1001// tl_assert(card_old > 0);
1002// tl_assert(!HG_(isEmptyWS)( univ_lsets, lset_old ));
1003//
1004// /* Now we know we've got a transition from a nonempty lockset to an
1005// empty one. So lset_old must be the set of locks lost. Record
1006// some details. If there is more than one element in the lossage
1007// set, just choose one arbitrarily -- not the best, but at least
1008// it's simple. */
1009//
1010// lk = (Lock*)HG_(anyElementOfWS)( univ_lsets, lset_old );
1011// if (0) VG_(printf)("lossage %ld %p\n",
1012// HG_(cardinalityWS)( univ_lsets, lset_old), lk );
1013// if (lk->appeared_at) {
1014// if (ga_to_lastlock == NULL)
1015// ga_to_lastlock = VG_(newFM)( HG_(zalloc), "hg.rlll.1", HG_(free), NULL );
1016// VG_(addToFM)( ga_to_lastlock, ga_of_access, (Word)lk->appeared_at );
1017// stats__ga_LL_adds++;
1018// }
1019//}
1020//
1021///* This queries the table (ga_to_lastlock) made by
1022// record_last_lock_lossage, when constructing error messages. It
1023// attempts to find the ExeContext of the allocation or initialisation
1024// point for the lossage lock associated with 'ga'. */
1025//
1026//static ExeContext* maybe_get_lastlock_initpoint ( Addr ga )
1027//{
1028// ExeContext* ec_hint = NULL;
1029// if (ga_to_lastlock != NULL
1030// && VG_(lookupFM)(ga_to_lastlock,
1031// NULL, (Word*)&ec_hint, ga)) {
1032// tl_assert(ec_hint != NULL);
1033// return ec_hint;
1034// } else {
1035// return NULL;
1036// }
1037//}
sewardjb4112022007-11-09 22:49:28 +00001038
1039
sewardjb4112022007-11-09 22:49:28 +00001040/*----------------------------------------------------------------*/
1041/*--- Shadow value and address range handlers ---*/
1042/*----------------------------------------------------------------*/
1043
1044static void laog__pre_thread_acquires_lock ( Thread*, Lock* ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001045//static void laog__handle_lock_deletions ( WordSetID ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001046static inline Thread* get_current_Thread ( void ); /* fwds */
sewardj1cbc12f2008-11-10 16:16:46 +00001047__attribute__((noinline))
1048static void laog__handle_one_lock_deletion ( Lock* lk ); /* fwds */
sewardjb4112022007-11-09 22:49:28 +00001049
sewardjb4112022007-11-09 22:49:28 +00001050
1051/* Block-copy states (needed for implementing realloc()). */
sewardj23f12002009-07-24 08:45:08 +00001052/* FIXME this copies shadow memory; it doesn't apply the MSM to it.
1053 Is that a problem? (hence 'scopy' rather than 'ccopy') */
1054static void shadow_mem_scopy_range ( Thread* thr,
1055 Addr src, Addr dst, SizeT len )
sewardjf98e1c02008-10-25 16:22:41 +00001056{
1057 Thr* hbthr = thr->hbthr;
1058 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001059 libhb_copy_shadow_state( hbthr, src, dst, len );
sewardjb4112022007-11-09 22:49:28 +00001060}
1061
sewardj23f12002009-07-24 08:45:08 +00001062static void shadow_mem_cread_range ( Thread* thr, Addr a, SizeT len )
1063{
sewardjf98e1c02008-10-25 16:22:41 +00001064 Thr* hbthr = thr->hbthr;
1065 tl_assert(hbthr);
sewardj23f12002009-07-24 08:45:08 +00001066 LIBHB_CREAD_N(hbthr, a, len);
1067}
1068
1069static void shadow_mem_cwrite_range ( Thread* thr, Addr a, SizeT len ) {
1070 Thr* hbthr = thr->hbthr;
1071 tl_assert(hbthr);
1072 LIBHB_CWRITE_N(hbthr, a, len);
sewardjb4112022007-11-09 22:49:28 +00001073}
1074
1075static void shadow_mem_make_New ( Thread* thr, Addr a, SizeT len )
1076{
sewardj23f12002009-07-24 08:45:08 +00001077 libhb_srange_new( thr->hbthr, a, len );
sewardjb4112022007-11-09 22:49:28 +00001078}
1079
sewardjb4112022007-11-09 22:49:28 +00001080static void shadow_mem_make_NoAccess ( Thread* thr, Addr aIN, SizeT len )
1081{
sewardjb4112022007-11-09 22:49:28 +00001082 if (0 && len > 500)
barta0b6b2c2008-07-07 06:49:24 +00001083 VG_(printf)("make NoAccess ( %#lx, %ld )\n", aIN, len );
sewardj23f12002009-07-24 08:45:08 +00001084 libhb_srange_noaccess( thr->hbthr, aIN, len );
sewardjb4112022007-11-09 22:49:28 +00001085}
1086
sewardj406bac82010-03-03 23:03:40 +00001087static void shadow_mem_make_Untracked ( Thread* thr, Addr aIN, SizeT len )
1088{
1089 if (0 && len > 500)
1090 VG_(printf)("make Untracked ( %#lx, %ld )\n", aIN, len );
1091 libhb_srange_untrack( thr->hbthr, aIN, len );
1092}
1093
sewardjb4112022007-11-09 22:49:28 +00001094
1095/*----------------------------------------------------------------*/
1096/*--- Event handlers (evh__* functions) ---*/
1097/*--- plus helpers (evhH__* functions) ---*/
1098/*----------------------------------------------------------------*/
1099
1100/*--------- Event handler helpers (evhH__* functions) ---------*/
1101
1102/* Create a new segment for 'thr', making it depend (.prev) on its
1103 existing segment, bind together the SegmentID and Segment, and
1104 return both of them. Also update 'thr' so it references the new
1105 Segment. */
sewardjf98e1c02008-10-25 16:22:41 +00001106//zz static
1107//zz void evhH__start_new_segment_for_thread ( /*OUT*/SegmentID* new_segidP,
1108//zz /*OUT*/Segment** new_segP,
1109//zz Thread* thr )
1110//zz {
1111//zz Segment* cur_seg;
1112//zz tl_assert(new_segP);
1113//zz tl_assert(new_segidP);
1114//zz tl_assert(HG_(is_sane_Thread)(thr));
1115//zz cur_seg = map_segments_lookup( thr->csegid );
1116//zz tl_assert(cur_seg);
1117//zz tl_assert(cur_seg->thr == thr); /* all sane segs should point back
1118//zz at their owner thread. */
1119//zz *new_segP = mk_Segment( thr, cur_seg, NULL/*other*/ );
1120//zz *new_segidP = alloc_SegmentID();
1121//zz map_segments_add( *new_segidP, *new_segP );
1122//zz thr->csegid = *new_segidP;
1123//zz }
sewardjb4112022007-11-09 22:49:28 +00001124
1125
1126/* The lock at 'lock_ga' has acquired a writer. Make all necessary
1127 updates, and also do all possible error checks. */
1128static
1129void evhH__post_thread_w_acquires_lock ( Thread* thr,
1130 LockKind lkk, Addr lock_ga )
1131{
1132 Lock* lk;
1133
1134 /* Basically what we need to do is call lockN_acquire_writer.
1135 However, that will barf if any 'invalid' lock states would
1136 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001137 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001138 routine.
1139
1140 Because this routine is only called after successful lock
1141 acquisition, we should not be asked to move the lock into any
1142 invalid states. Requests to do so are bugs in libpthread, since
1143 that should have rejected any such requests. */
1144
sewardjf98e1c02008-10-25 16:22:41 +00001145 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001146 /* Try to find the lock. If we can't, then create a new one with
1147 kind 'lkk'. */
1148 lk = map_locks_lookup_or_create(
1149 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001150 tl_assert( HG_(is_sane_LockN)(lk) );
1151
1152 /* check libhb level entities exist */
1153 tl_assert(thr->hbthr);
1154 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001155
1156 if (lk->heldBy == NULL) {
1157 /* the lock isn't held. Simple. */
1158 tl_assert(!lk->heldW);
1159 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001160 /* acquire a dependency from the lock's VCs */
1161 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001162 goto noerror;
1163 }
1164
1165 /* So the lock is already held. If held as a r-lock then
1166 libpthread must be buggy. */
1167 tl_assert(lk->heldBy);
1168 if (!lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001169 HG_(record_error_Misc)(
1170 thr, "Bug in libpthread: write lock "
1171 "granted on rwlock which is currently rd-held");
sewardjb4112022007-11-09 22:49:28 +00001172 goto error;
1173 }
1174
1175 /* So the lock is held in w-mode. If it's held by some other
1176 thread, then libpthread must be buggy. */
sewardj896f6f92008-08-19 08:38:52 +00001177 tl_assert(VG_(sizeUniqueBag)(lk->heldBy) == 1); /* from precondition */
sewardjb4112022007-11-09 22:49:28 +00001178
sewardj896f6f92008-08-19 08:38:52 +00001179 if (thr != (Thread*)VG_(anyElementOfBag)(lk->heldBy)) {
sewardjf98e1c02008-10-25 16:22:41 +00001180 HG_(record_error_Misc)(
1181 thr, "Bug in libpthread: write lock "
1182 "granted on mutex/rwlock which is currently "
1183 "wr-held by a different thread");
sewardjb4112022007-11-09 22:49:28 +00001184 goto error;
1185 }
1186
1187 /* So the lock is already held in w-mode by 'thr'. That means this
1188 is an attempt to lock it recursively, which is only allowable
1189 for LK_mbRec kinded locks. Since this routine is called only
1190 once the lock has been acquired, this must also be a libpthread
1191 bug. */
1192 if (lk->kind != LK_mbRec) {
sewardjf98e1c02008-10-25 16:22:41 +00001193 HG_(record_error_Misc)(
1194 thr, "Bug in libpthread: recursive write lock "
1195 "granted on mutex/wrlock which does not "
1196 "support recursion");
sewardjb4112022007-11-09 22:49:28 +00001197 goto error;
1198 }
1199
1200 /* So we are recursively re-locking a lock we already w-hold. */
1201 lockN_acquire_writer( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001202 /* acquire a dependency from the lock's VC. Probably pointless,
1203 but also harmless. */
1204 libhb_so_recv( thr->hbthr, lk->hbso, True/*strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001205 goto noerror;
1206
1207 noerror:
1208 /* check lock order acquisition graph, and update. This has to
1209 happen before the lock is added to the thread's locksetA/W. */
1210 laog__pre_thread_acquires_lock( thr, lk );
1211 /* update the thread's held-locks set */
1212 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1213 thr->locksetW = HG_(addToWS)( univ_lsets, thr->locksetW, (Word)lk );
1214 /* fall through */
1215
1216 error:
sewardjf98e1c02008-10-25 16:22:41 +00001217 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001218}
1219
1220
1221/* The lock at 'lock_ga' has acquired a reader. Make all necessary
1222 updates, and also do all possible error checks. */
1223static
1224void evhH__post_thread_r_acquires_lock ( Thread* thr,
1225 LockKind lkk, Addr lock_ga )
1226{
1227 Lock* lk;
1228
1229 /* Basically what we need to do is call lockN_acquire_reader.
1230 However, that will barf if any 'invalid' lock states would
1231 result. Therefore check before calling. Side effect is that
sewardjf98e1c02008-10-25 16:22:41 +00001232 'HG_(is_sane_LockN)(lk)' is both a pre- and post-condition of this
sewardjb4112022007-11-09 22:49:28 +00001233 routine.
1234
1235 Because this routine is only called after successful lock
1236 acquisition, we should not be asked to move the lock into any
1237 invalid states. Requests to do so are bugs in libpthread, since
1238 that should have rejected any such requests. */
1239
sewardjf98e1c02008-10-25 16:22:41 +00001240 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001241 /* Try to find the lock. If we can't, then create a new one with
1242 kind 'lkk'. Only a reader-writer lock can be read-locked,
1243 hence the first assertion. */
1244 tl_assert(lkk == LK_rdwr);
1245 lk = map_locks_lookup_or_create(
1246 lkk, lock_ga, map_threads_reverse_lookup_SLOW(thr) );
sewardjf98e1c02008-10-25 16:22:41 +00001247 tl_assert( HG_(is_sane_LockN)(lk) );
1248
1249 /* check libhb level entities exist */
1250 tl_assert(thr->hbthr);
1251 tl_assert(lk->hbso);
sewardjb4112022007-11-09 22:49:28 +00001252
1253 if (lk->heldBy == NULL) {
1254 /* the lock isn't held. Simple. */
1255 tl_assert(!lk->heldW);
1256 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001257 /* acquire a dependency from the lock's VC */
1258 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001259 goto noerror;
1260 }
1261
1262 /* So the lock is already held. If held as a w-lock then
1263 libpthread must be buggy. */
1264 tl_assert(lk->heldBy);
1265 if (lk->heldW) {
sewardjf98e1c02008-10-25 16:22:41 +00001266 HG_(record_error_Misc)( thr, "Bug in libpthread: read lock "
1267 "granted on rwlock which is "
1268 "currently wr-held");
sewardjb4112022007-11-09 22:49:28 +00001269 goto error;
1270 }
1271
1272 /* Easy enough. In short anybody can get a read-lock on a rwlock
1273 provided it is either unlocked or already in rd-held. */
1274 lockN_acquire_reader( lk, thr );
sewardjf98e1c02008-10-25 16:22:41 +00001275 /* acquire a dependency from the lock's VC. Probably pointless,
1276 but also harmless. */
1277 libhb_so_recv( thr->hbthr, lk->hbso, False/*!strong_recv*/ );
sewardjb4112022007-11-09 22:49:28 +00001278 goto noerror;
1279
1280 noerror:
1281 /* check lock order acquisition graph, and update. This has to
1282 happen before the lock is added to the thread's locksetA/W. */
1283 laog__pre_thread_acquires_lock( thr, lk );
1284 /* update the thread's held-locks set */
1285 thr->locksetA = HG_(addToWS)( univ_lsets, thr->locksetA, (Word)lk );
1286 /* but don't update thr->locksetW, since lk is only rd-held */
1287 /* fall through */
1288
1289 error:
sewardjf98e1c02008-10-25 16:22:41 +00001290 tl_assert(HG_(is_sane_LockN)(lk));
sewardjb4112022007-11-09 22:49:28 +00001291}
1292
1293
1294/* The lock at 'lock_ga' is just about to be unlocked. Make all
1295 necessary updates, and also do all possible error checks. */
1296static
1297void evhH__pre_thread_releases_lock ( Thread* thr,
1298 Addr lock_ga, Bool isRDWR )
1299{
1300 Lock* lock;
1301 Word n;
sewardjf98e1c02008-10-25 16:22:41 +00001302 Bool was_heldW;
sewardjb4112022007-11-09 22:49:28 +00001303
1304 /* This routine is called prior to a lock release, before
1305 libpthread has had a chance to validate the call. Hence we need
1306 to detect and reject any attempts to move the lock into an
1307 invalid state. Such attempts are bugs in the client.
1308
1309 isRDWR is True if we know from the wrapper context that lock_ga
1310 should refer to a reader-writer lock, and is False if [ditto]
1311 lock_ga should refer to a standard mutex. */
1312
sewardjf98e1c02008-10-25 16:22:41 +00001313 tl_assert(HG_(is_sane_Thread)(thr));
sewardjb4112022007-11-09 22:49:28 +00001314 lock = map_locks_maybe_lookup( lock_ga );
1315
1316 if (!lock) {
1317 /* We know nothing about a lock at 'lock_ga'. Nevertheless
1318 the client is trying to unlock it. So complain, then ignore
1319 the attempt. */
sewardjf98e1c02008-10-25 16:22:41 +00001320 HG_(record_error_UnlockBogus)( thr, lock_ga );
sewardjb4112022007-11-09 22:49:28 +00001321 return;
1322 }
1323
1324 tl_assert(lock->guestaddr == lock_ga);
sewardjf98e1c02008-10-25 16:22:41 +00001325 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001326
1327 if (isRDWR && lock->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001328 HG_(record_error_Misc)( thr, "pthread_rwlock_unlock with a "
1329 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001330 }
1331 if ((!isRDWR) && lock->kind == LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00001332 HG_(record_error_Misc)( thr, "pthread_mutex_unlock with a "
1333 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001334 }
1335
1336 if (!lock->heldBy) {
1337 /* The lock is not held. This indicates a serious bug in the
1338 client. */
1339 tl_assert(!lock->heldW);
sewardjf98e1c02008-10-25 16:22:41 +00001340 HG_(record_error_UnlockUnlocked)( thr, lock );
sewardjb4112022007-11-09 22:49:28 +00001341 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1342 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1343 goto error;
1344 }
1345
sewardjf98e1c02008-10-25 16:22:41 +00001346 /* test just above dominates */
1347 tl_assert(lock->heldBy);
1348 was_heldW = lock->heldW;
1349
sewardjb4112022007-11-09 22:49:28 +00001350 /* The lock is held. Is this thread one of the holders? If not,
1351 report a bug in the client. */
sewardj896f6f92008-08-19 08:38:52 +00001352 n = VG_(elemBag)( lock->heldBy, (Word)thr );
sewardjb4112022007-11-09 22:49:28 +00001353 tl_assert(n >= 0);
1354 if (n == 0) {
1355 /* We are not a current holder of the lock. This is a bug in
1356 the guest, and (per POSIX pthread rules) the unlock
1357 attempt will fail. So just complain and do nothing
1358 else. */
sewardj896f6f92008-08-19 08:38:52 +00001359 Thread* realOwner = (Thread*)VG_(anyElementOfBag)( lock->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001360 tl_assert(HG_(is_sane_Thread)(realOwner));
sewardjb4112022007-11-09 22:49:28 +00001361 tl_assert(realOwner != thr);
1362 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1363 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
sewardjf98e1c02008-10-25 16:22:41 +00001364 HG_(record_error_UnlockForeign)( thr, realOwner, lock );
sewardjb4112022007-11-09 22:49:28 +00001365 goto error;
1366 }
1367
1368 /* Ok, we hold the lock 'n' times. */
1369 tl_assert(n >= 1);
1370
1371 lockN_release( lock, thr );
1372
1373 n--;
1374 tl_assert(n >= 0);
1375
1376 if (n > 0) {
1377 tl_assert(lock->heldBy);
sewardj896f6f92008-08-19 08:38:52 +00001378 tl_assert(n == VG_(elemBag)( lock->heldBy, (Word)thr ));
sewardjb4112022007-11-09 22:49:28 +00001379 /* We still hold the lock. So either it's a recursive lock
1380 or a rwlock which is currently r-held. */
1381 tl_assert(lock->kind == LK_mbRec
1382 || (lock->kind == LK_rdwr && !lock->heldW));
1383 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lock ));
1384 if (lock->heldW)
1385 tl_assert(HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1386 else
1387 tl_assert(!HG_(elemWS)( univ_lsets, thr->locksetW, (Word)lock ));
1388 } else {
sewardj983f3022009-05-21 14:49:55 +00001389 /* n is zero. This means we don't hold the lock any more. But
1390 if it's a rwlock held in r-mode, someone else could still
1391 hold it. Just do whatever sanity checks we can. */
1392 if (lock->kind == LK_rdwr && lock->heldBy) {
1393 /* It's a rwlock. We no longer hold it but we used to;
1394 nevertheless it still appears to be held by someone else.
1395 The implication is that, prior to this release, it must
1396 have been shared by us and and whoever else is holding it;
1397 which in turn implies it must be r-held, since a lock
1398 can't be w-held by more than one thread. */
1399 /* The lock is now R-held by somebody else: */
1400 tl_assert(lock->heldW == False);
1401 } else {
1402 /* Normal case. It's either not a rwlock, or it's a rwlock
1403 that we used to hold in w-mode (which is pretty much the
1404 same thing as a non-rwlock.) Since this transaction is
1405 atomic (V does not allow multiple threads to run
1406 simultaneously), it must mean the lock is now not held by
1407 anybody. Hence assert for it. */
1408 /* The lock is now not held by anybody: */
1409 tl_assert(!lock->heldBy);
1410 tl_assert(lock->heldW == False);
1411 }
sewardjf98e1c02008-10-25 16:22:41 +00001412 //if (lock->heldBy) {
1413 // tl_assert(0 == VG_(elemBag)( lock->heldBy, (Word)thr ));
1414 //}
sewardjb4112022007-11-09 22:49:28 +00001415 /* update this thread's lockset accordingly. */
1416 thr->locksetA
1417 = HG_(delFromWS)( univ_lsets, thr->locksetA, (Word)lock );
1418 thr->locksetW
1419 = HG_(delFromWS)( univ_lsets, thr->locksetW, (Word)lock );
sewardjf98e1c02008-10-25 16:22:41 +00001420 /* push our VC into the lock */
1421 tl_assert(thr->hbthr);
1422 tl_assert(lock->hbso);
1423 /* If the lock was previously W-held, then we want to do a
1424 strong send, and if previously R-held, then a weak send. */
1425 libhb_so_send( thr->hbthr, lock->hbso, was_heldW );
sewardjb4112022007-11-09 22:49:28 +00001426 }
1427 /* fall through */
1428
1429 error:
sewardjf98e1c02008-10-25 16:22:41 +00001430 tl_assert(HG_(is_sane_LockN)(lock));
sewardjb4112022007-11-09 22:49:28 +00001431}
1432
1433
sewardj9f569b72008-11-13 13:33:09 +00001434/* ---------------------------------------------------------- */
1435/* -------- Event handlers proper (evh__* functions) -------- */
1436/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001437
1438/* What is the Thread* for the currently running thread? This is
1439 absolutely performance critical. We receive notifications from the
1440 core for client code starts/stops, and cache the looked-up result
1441 in 'current_Thread'. Hence, for the vast majority of requests,
1442 finding the current thread reduces to a read of a global variable,
1443 provided get_current_Thread_in_C_C is inlined.
1444
1445 Outside of client code, current_Thread is NULL, and presumably
1446 any uses of it will cause a segfault. Hence:
1447
1448 - for uses definitely within client code, use
1449 get_current_Thread_in_C_C.
1450
1451 - for all other uses, use get_current_Thread.
1452*/
1453
sewardj23f12002009-07-24 08:45:08 +00001454static Thread *current_Thread = NULL,
1455 *current_Thread_prev = NULL;
sewardjb4112022007-11-09 22:49:28 +00001456
1457static void evh__start_client_code ( ThreadId tid, ULong nDisp ) {
1458 if (0) VG_(printf)("start %d %llu\n", (Int)tid, nDisp);
1459 tl_assert(current_Thread == NULL);
1460 current_Thread = map_threads_lookup( tid );
1461 tl_assert(current_Thread != NULL);
sewardj23f12002009-07-24 08:45:08 +00001462 if (current_Thread != current_Thread_prev) {
1463 libhb_Thr_resumes( current_Thread->hbthr );
1464 current_Thread_prev = current_Thread;
1465 }
sewardjb4112022007-11-09 22:49:28 +00001466}
1467static void evh__stop_client_code ( ThreadId tid, ULong nDisp ) {
1468 if (0) VG_(printf)(" stop %d %llu\n", (Int)tid, nDisp);
1469 tl_assert(current_Thread != NULL);
1470 current_Thread = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00001471 libhb_maybe_GC();
sewardjb4112022007-11-09 22:49:28 +00001472}
1473static inline Thread* get_current_Thread_in_C_C ( void ) {
1474 return current_Thread;
1475}
1476static inline Thread* get_current_Thread ( void ) {
1477 ThreadId coretid;
1478 Thread* thr;
1479 thr = get_current_Thread_in_C_C();
1480 if (LIKELY(thr))
1481 return thr;
1482 /* evidently not in client code. Do it the slow way. */
1483 coretid = VG_(get_running_tid)();
1484 /* FIXME: get rid of the following kludge. It exists because
sewardjf98e1c02008-10-25 16:22:41 +00001485 evh__new_mem is called during initialisation (as notification
sewardjb4112022007-11-09 22:49:28 +00001486 of initial memory layout) and VG_(get_running_tid)() returns
1487 VG_INVALID_THREADID at that point. */
1488 if (coretid == VG_INVALID_THREADID)
1489 coretid = 1; /* KLUDGE */
1490 thr = map_threads_lookup( coretid );
1491 return thr;
1492}
1493
1494static
1495void evh__new_mem ( Addr a, SizeT len ) {
1496 if (SHOW_EVENTS >= 2)
1497 VG_(printf)("evh__new_mem(%p, %lu)\n", (void*)a, len );
1498 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001499 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001500 all__sanity_check("evh__new_mem-post");
1501}
1502
1503static
sewardj1f77fec2010-04-12 19:51:04 +00001504void evh__new_mem_stack ( Addr a, SizeT len ) {
1505 if (SHOW_EVENTS >= 2)
1506 VG_(printf)("evh__new_mem_stack(%p, %lu)\n", (void*)a, len );
1507 shadow_mem_make_New( get_current_Thread(),
1508 -VG_STACK_REDZONE_SZB + a, len );
1509 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1510 all__sanity_check("evh__new_mem_stack-post");
1511}
1512
1513static
sewardj7cf4e6b2008-05-01 20:24:26 +00001514void evh__new_mem_w_tid ( Addr a, SizeT len, ThreadId tid ) {
1515 if (SHOW_EVENTS >= 2)
1516 VG_(printf)("evh__new_mem_w_tid(%p, %lu)\n", (void*)a, len );
1517 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001518 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardj7cf4e6b2008-05-01 20:24:26 +00001519 all__sanity_check("evh__new_mem_w_tid-post");
1520}
1521
1522static
sewardjb4112022007-11-09 22:49:28 +00001523void evh__new_mem_w_perms ( Addr a, SizeT len,
sewardj9c606bd2008-09-18 18:12:50 +00001524 Bool rr, Bool ww, Bool xx, ULong di_handle ) {
sewardjb4112022007-11-09 22:49:28 +00001525 if (SHOW_EVENTS >= 1)
1526 VG_(printf)("evh__new_mem_w_perms(%p, %lu, %d,%d,%d)\n",
1527 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1528 if (rr || ww || xx)
1529 shadow_mem_make_New( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001530 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001531 all__sanity_check("evh__new_mem_w_perms-post");
1532}
1533
1534static
1535void evh__set_perms ( Addr a, SizeT len,
1536 Bool rr, Bool ww, Bool xx ) {
1537 if (SHOW_EVENTS >= 1)
1538 VG_(printf)("evh__set_perms(%p, %lu, %d,%d,%d)\n",
1539 (void*)a, len, (Int)rr, (Int)ww, (Int)xx );
1540 /* Hmm. What should we do here, that actually makes any sense?
1541 Let's say: if neither readable nor writable, then declare it
1542 NoAccess, else leave it alone. */
1543 if (!(rr || ww))
1544 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001545 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001546 all__sanity_check("evh__set_perms-post");
1547}
1548
1549static
1550void evh__die_mem ( Addr a, SizeT len ) {
sewardj406bac82010-03-03 23:03:40 +00001551 // urr, libhb ignores this.
sewardjb4112022007-11-09 22:49:28 +00001552 if (SHOW_EVENTS >= 2)
1553 VG_(printf)("evh__die_mem(%p, %lu)\n", (void*)a, len );
1554 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001555 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001556 all__sanity_check("evh__die_mem-post");
1557}
1558
1559static
sewardj406bac82010-03-03 23:03:40 +00001560void evh__untrack_mem ( Addr a, SizeT len ) {
1561 // whereas it doesn't ignore this
1562 if (SHOW_EVENTS >= 2)
1563 VG_(printf)("evh__untrack_mem(%p, %lu)\n", (void*)a, len );
1564 shadow_mem_make_Untracked( get_current_Thread(), a, len );
1565 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1566 all__sanity_check("evh__untrack_mem-post");
1567}
1568
1569static
sewardj23f12002009-07-24 08:45:08 +00001570void evh__copy_mem ( Addr src, Addr dst, SizeT len ) {
1571 if (SHOW_EVENTS >= 2)
1572 VG_(printf)("evh__copy_mem(%p, %p, %lu)\n", (void*)src, (void*)dst, len );
1573 shadow_mem_scopy_range( get_current_Thread(), src, dst, len );
1574 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
1575 all__sanity_check("evh__copy_mem-post");
1576}
1577
1578static
sewardjb4112022007-11-09 22:49:28 +00001579void evh__pre_thread_ll_create ( ThreadId parent, ThreadId child )
1580{
1581 if (SHOW_EVENTS >= 1)
1582 VG_(printf)("evh__pre_thread_ll_create(p=%d, c=%d)\n",
1583 (Int)parent, (Int)child );
1584
1585 if (parent != VG_INVALID_THREADID) {
sewardjf98e1c02008-10-25 16:22:41 +00001586 Thread* thr_p;
1587 Thread* thr_c;
1588 Thr* hbthr_p;
1589 Thr* hbthr_c;
sewardjb4112022007-11-09 22:49:28 +00001590
sewardjf98e1c02008-10-25 16:22:41 +00001591 tl_assert(HG_(is_sane_ThreadId)(parent));
1592 tl_assert(HG_(is_sane_ThreadId)(child));
sewardjb4112022007-11-09 22:49:28 +00001593 tl_assert(parent != child);
1594
1595 thr_p = map_threads_maybe_lookup( parent );
1596 thr_c = map_threads_maybe_lookup( child );
1597
1598 tl_assert(thr_p != NULL);
1599 tl_assert(thr_c == NULL);
1600
sewardjf98e1c02008-10-25 16:22:41 +00001601 hbthr_p = thr_p->hbthr;
1602 tl_assert(hbthr_p != NULL);
1603 tl_assert( libhb_get_Thr_opaque(hbthr_p) == thr_p );
sewardjb4112022007-11-09 22:49:28 +00001604
sewardjf98e1c02008-10-25 16:22:41 +00001605 hbthr_c = libhb_create ( hbthr_p );
1606
1607 /* Create a new thread record for the child. */
sewardjb4112022007-11-09 22:49:28 +00001608 /* a Thread for the new thread ... */
sewardjf98e1c02008-10-25 16:22:41 +00001609 thr_c = mk_Thread( hbthr_c );
1610 tl_assert( libhb_get_Thr_opaque(hbthr_c) == NULL );
1611 libhb_set_Thr_opaque(hbthr_c, thr_c);
sewardjb4112022007-11-09 22:49:28 +00001612
1613 /* and bind it in the thread-map table */
1614 map_threads[child] = thr_c;
sewardjf98e1c02008-10-25 16:22:41 +00001615 tl_assert(thr_c->coretid == VG_INVALID_THREADID);
1616 thr_c->coretid = child;
sewardjb4112022007-11-09 22:49:28 +00001617
1618 /* Record where the parent is so we can later refer to this in
1619 error messages.
1620
1621 On amd64-linux, this entails a nasty glibc-2.5 specific hack.
1622 The stack snapshot is taken immediately after the parent has
1623 returned from its sys_clone call. Unfortunately there is no
1624 unwind info for the insn following "syscall" - reading the
1625 glibc sources confirms this. So we ask for a snapshot to be
1626 taken as if RIP was 3 bytes earlier, in a place where there
1627 is unwind info. Sigh.
1628 */
1629 { Word first_ip_delta = 0;
1630# if defined(VGP_amd64_linux)
1631 first_ip_delta = -3;
1632# endif
1633 thr_c->created_at = VG_(record_ExeContext)(parent, first_ip_delta);
1634 }
sewardjb4112022007-11-09 22:49:28 +00001635 }
1636
sewardjf98e1c02008-10-25 16:22:41 +00001637 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001638 all__sanity_check("evh__pre_thread_create-post");
1639}
1640
1641static
1642void evh__pre_thread_ll_exit ( ThreadId quit_tid )
1643{
1644 Int nHeld;
1645 Thread* thr_q;
1646 if (SHOW_EVENTS >= 1)
1647 VG_(printf)("evh__pre_thread_ll_exit(thr=%d)\n",
1648 (Int)quit_tid );
1649
1650 /* quit_tid has disappeared without joining to any other thread.
1651 Therefore there is no synchronisation event associated with its
1652 exit and so we have to pretty much treat it as if it was still
1653 alive but mysteriously making no progress. That is because, if
1654 we don't know when it really exited, then we can never say there
1655 is a point in time when we're sure the thread really has
1656 finished, and so we need to consider the possibility that it
1657 lingers indefinitely and continues to interact with other
1658 threads. */
1659 /* However, it might have rendezvous'd with a thread that called
1660 pthread_join with this one as arg, prior to this point (that's
1661 how NPTL works). In which case there has already been a prior
1662 sync event. So in any case, just let the thread exit. On NPTL,
1663 all thread exits go through here. */
sewardjf98e1c02008-10-25 16:22:41 +00001664 tl_assert(HG_(is_sane_ThreadId)(quit_tid));
sewardjb4112022007-11-09 22:49:28 +00001665 thr_q = map_threads_maybe_lookup( quit_tid );
1666 tl_assert(thr_q != NULL);
1667
1668 /* Complain if this thread holds any locks. */
1669 nHeld = HG_(cardinalityWS)( univ_lsets, thr_q->locksetA );
1670 tl_assert(nHeld >= 0);
1671 if (nHeld > 0) {
1672 HChar buf[80];
1673 VG_(sprintf)(buf, "Exiting thread still holds %d lock%s",
1674 nHeld, nHeld > 1 ? "s" : "");
sewardjf98e1c02008-10-25 16:22:41 +00001675 HG_(record_error_Misc)( thr_q, buf );
sewardjb4112022007-11-09 22:49:28 +00001676 }
1677
sewardj23f12002009-07-24 08:45:08 +00001678 /* Not much to do here:
1679 - tell libhb the thread is gone
1680 - clear the map_threads entry, in order that the Valgrind core
1681 can re-use it. */
1682 tl_assert(thr_q->hbthr);
1683 libhb_async_exit(thr_q->hbthr);
sewardjf98e1c02008-10-25 16:22:41 +00001684 tl_assert(thr_q->coretid == quit_tid);
1685 thr_q->coretid = VG_INVALID_THREADID;
sewardjb4112022007-11-09 22:49:28 +00001686 map_threads_delete( quit_tid );
1687
sewardjf98e1c02008-10-25 16:22:41 +00001688 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001689 all__sanity_check("evh__pre_thread_ll_exit-post");
1690}
1691
sewardjf98e1c02008-10-25 16:22:41 +00001692
sewardjb4112022007-11-09 22:49:28 +00001693static
1694void evh__HG_PTHREAD_JOIN_POST ( ThreadId stay_tid, Thread* quit_thr )
1695{
sewardjb4112022007-11-09 22:49:28 +00001696 Thread* thr_s;
1697 Thread* thr_q;
sewardjf98e1c02008-10-25 16:22:41 +00001698 Thr* hbthr_s;
1699 Thr* hbthr_q;
1700 SO* so;
sewardjb4112022007-11-09 22:49:28 +00001701
1702 if (SHOW_EVENTS >= 1)
1703 VG_(printf)("evh__post_thread_join(stayer=%d, quitter=%p)\n",
1704 (Int)stay_tid, quit_thr );
1705
sewardjf98e1c02008-10-25 16:22:41 +00001706 tl_assert(HG_(is_sane_ThreadId)(stay_tid));
sewardjb4112022007-11-09 22:49:28 +00001707
1708 thr_s = map_threads_maybe_lookup( stay_tid );
1709 thr_q = quit_thr;
1710 tl_assert(thr_s != NULL);
1711 tl_assert(thr_q != NULL);
1712 tl_assert(thr_s != thr_q);
1713
sewardjf98e1c02008-10-25 16:22:41 +00001714 hbthr_s = thr_s->hbthr;
1715 hbthr_q = thr_q->hbthr;
1716 tl_assert(hbthr_s != hbthr_q);
1717 tl_assert( libhb_get_Thr_opaque(hbthr_s) == thr_s );
1718 tl_assert( libhb_get_Thr_opaque(hbthr_q) == thr_q );
sewardjb4112022007-11-09 22:49:28 +00001719
sewardjf98e1c02008-10-25 16:22:41 +00001720 /* Allocate a temporary synchronisation object and use it to send
1721 an imaginary message from the quitter to the stayer, the purpose
1722 being to generate a dependence from the quitter to the
1723 stayer. */
1724 so = libhb_so_alloc();
1725 tl_assert(so);
sewardj23f12002009-07-24 08:45:08 +00001726 /* Send last arg of _so_send as False, since the sending thread
1727 doesn't actually exist any more, so we don't want _so_send to
1728 try taking stack snapshots of it. */
sewardjf98e1c02008-10-25 16:22:41 +00001729 libhb_so_send(hbthr_q, so, True/*strong_send*/);
1730 libhb_so_recv(hbthr_s, so, True/*strong_recv*/);
1731 libhb_so_dealloc(so);
sewardjb4112022007-11-09 22:49:28 +00001732
sewardjf98e1c02008-10-25 16:22:41 +00001733 /* evh__pre_thread_ll_exit issues an error message if the exiting
1734 thread holds any locks. No need to check here. */
sewardjb4112022007-11-09 22:49:28 +00001735
1736 /* This holds because, at least when using NPTL as the thread
1737 library, we should be notified the low level thread exit before
1738 we hear of any join event on it. The low level exit
1739 notification feeds through into evh__pre_thread_ll_exit,
1740 which should clear the map_threads entry for it. Hence we
1741 expect there to be no map_threads entry at this point. */
1742 tl_assert( map_threads_maybe_reverse_lookup_SLOW(thr_q)
1743 == VG_INVALID_THREADID);
1744
sewardjf98e1c02008-10-25 16:22:41 +00001745 if (HG_(clo_sanity_flags) & SCE_THREADS)
sewardjb4112022007-11-09 22:49:28 +00001746 all__sanity_check("evh__post_thread_join-post");
1747}
1748
1749static
1750void evh__pre_mem_read ( CorePart part, ThreadId tid, Char* s,
1751 Addr a, SizeT size) {
1752 if (SHOW_EVENTS >= 2
1753 || (SHOW_EVENTS >= 1 && size != 1))
1754 VG_(printf)("evh__pre_mem_read(ctid=%d, \"%s\", %p, %lu)\n",
1755 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001756 shadow_mem_cread_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001757 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001758 all__sanity_check("evh__pre_mem_read-post");
1759}
1760
1761static
1762void evh__pre_mem_read_asciiz ( CorePart part, ThreadId tid,
1763 Char* s, Addr a ) {
1764 Int len;
1765 if (SHOW_EVENTS >= 1)
1766 VG_(printf)("evh__pre_mem_asciiz(ctid=%d, \"%s\", %p)\n",
1767 (Int)tid, s, (void*)a );
1768 // FIXME: think of a less ugly hack
1769 len = VG_(strlen)( (Char*) a );
sewardj23f12002009-07-24 08:45:08 +00001770 shadow_mem_cread_range( map_threads_lookup(tid), a, len+1 );
sewardjf98e1c02008-10-25 16:22:41 +00001771 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001772 all__sanity_check("evh__pre_mem_read_asciiz-post");
1773}
1774
1775static
1776void evh__pre_mem_write ( CorePart part, ThreadId tid, Char* s,
1777 Addr a, SizeT size ) {
1778 if (SHOW_EVENTS >= 1)
1779 VG_(printf)("evh__pre_mem_write(ctid=%d, \"%s\", %p, %lu)\n",
1780 (Int)tid, s, (void*)a, size );
sewardj23f12002009-07-24 08:45:08 +00001781 shadow_mem_cwrite_range( map_threads_lookup(tid), a, size);
sewardjf98e1c02008-10-25 16:22:41 +00001782 if (size >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001783 all__sanity_check("evh__pre_mem_write-post");
1784}
1785
1786static
1787void evh__new_mem_heap ( Addr a, SizeT len, Bool is_inited ) {
1788 if (SHOW_EVENTS >= 1)
1789 VG_(printf)("evh__new_mem_heap(%p, %lu, inited=%d)\n",
1790 (void*)a, len, (Int)is_inited );
1791 // FIXME: this is kinda stupid
1792 if (is_inited) {
1793 shadow_mem_make_New(get_current_Thread(), a, len);
1794 } else {
1795 shadow_mem_make_New(get_current_Thread(), a, len);
1796 }
sewardjf98e1c02008-10-25 16:22:41 +00001797 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001798 all__sanity_check("evh__pre_mem_read-post");
1799}
1800
1801static
1802void evh__die_mem_heap ( Addr a, SizeT len ) {
1803 if (SHOW_EVENTS >= 1)
1804 VG_(printf)("evh__die_mem_heap(%p, %lu)\n", (void*)a, len );
1805 shadow_mem_make_NoAccess( get_current_Thread(), a, len );
sewardjf98e1c02008-10-25 16:22:41 +00001806 if (len >= SCE_BIGRANGE_T && (HG_(clo_sanity_flags) & SCE_BIGRANGE))
sewardjb4112022007-11-09 22:49:28 +00001807 all__sanity_check("evh__pre_mem_read-post");
1808}
1809
sewardj23f12002009-07-24 08:45:08 +00001810/* --- Event handlers called from generated code --- */
1811
sewardjb4112022007-11-09 22:49:28 +00001812static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001813void evh__mem_help_cread_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001814 Thread* thr = get_current_Thread_in_C_C();
1815 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001816 LIBHB_CREAD_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001817}
sewardjf98e1c02008-10-25 16:22:41 +00001818
sewardjb4112022007-11-09 22:49:28 +00001819static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001820void evh__mem_help_cread_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001821 Thread* thr = get_current_Thread_in_C_C();
1822 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001823 LIBHB_CREAD_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001824}
sewardjf98e1c02008-10-25 16:22:41 +00001825
sewardjb4112022007-11-09 22:49:28 +00001826static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001827void evh__mem_help_cread_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001828 Thread* thr = get_current_Thread_in_C_C();
1829 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001830 LIBHB_CREAD_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001831}
sewardjf98e1c02008-10-25 16:22:41 +00001832
sewardjb4112022007-11-09 22:49:28 +00001833static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001834void evh__mem_help_cread_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001835 Thread* thr = get_current_Thread_in_C_C();
1836 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001837 LIBHB_CREAD_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001838}
sewardjf98e1c02008-10-25 16:22:41 +00001839
sewardjb4112022007-11-09 22:49:28 +00001840static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001841void evh__mem_help_cread_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001842 Thread* thr = get_current_Thread_in_C_C();
1843 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001844 LIBHB_CREAD_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001845}
1846
1847static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001848void evh__mem_help_cwrite_1(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001849 Thread* thr = get_current_Thread_in_C_C();
1850 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001851 LIBHB_CWRITE_1(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001852}
sewardjf98e1c02008-10-25 16:22:41 +00001853
sewardjb4112022007-11-09 22:49:28 +00001854static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001855void evh__mem_help_cwrite_2(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001856 Thread* thr = get_current_Thread_in_C_C();
1857 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001858 LIBHB_CWRITE_2(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001859}
sewardjf98e1c02008-10-25 16:22:41 +00001860
sewardjb4112022007-11-09 22:49:28 +00001861static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001862void evh__mem_help_cwrite_4(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001863 Thread* thr = get_current_Thread_in_C_C();
1864 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001865 LIBHB_CWRITE_4(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001866}
sewardjf98e1c02008-10-25 16:22:41 +00001867
sewardjb4112022007-11-09 22:49:28 +00001868static VG_REGPARM(1)
sewardj23f12002009-07-24 08:45:08 +00001869void evh__mem_help_cwrite_8(Addr a) {
sewardjf98e1c02008-10-25 16:22:41 +00001870 Thread* thr = get_current_Thread_in_C_C();
1871 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001872 LIBHB_CWRITE_8(hbthr, a);
sewardjb4112022007-11-09 22:49:28 +00001873}
sewardjf98e1c02008-10-25 16:22:41 +00001874
sewardjb4112022007-11-09 22:49:28 +00001875static VG_REGPARM(2)
sewardj23f12002009-07-24 08:45:08 +00001876void evh__mem_help_cwrite_N(Addr a, SizeT size) {
sewardjf98e1c02008-10-25 16:22:41 +00001877 Thread* thr = get_current_Thread_in_C_C();
1878 Thr* hbthr = thr->hbthr;
sewardj23f12002009-07-24 08:45:08 +00001879 LIBHB_CWRITE_N(hbthr, a, size);
sewardjb4112022007-11-09 22:49:28 +00001880}
1881
sewardjb4112022007-11-09 22:49:28 +00001882
sewardj9f569b72008-11-13 13:33:09 +00001883/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001884/* -------------- events to do with mutexes -------------- */
sewardj9f569b72008-11-13 13:33:09 +00001885/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00001886
1887/* EXPOSITION only: by intercepting lock init events we can show the
1888 user where the lock was initialised, rather than only being able to
1889 show where it was first locked. Intercepting lock initialisations
1890 is not necessary for the basic operation of the race checker. */
1891static
1892void evh__HG_PTHREAD_MUTEX_INIT_POST( ThreadId tid,
1893 void* mutex, Word mbRec )
1894{
1895 if (SHOW_EVENTS >= 1)
1896 VG_(printf)("evh__hg_PTHREAD_MUTEX_INIT_POST(ctid=%d, mbRec=%ld, %p)\n",
1897 (Int)tid, mbRec, (void*)mutex );
1898 tl_assert(mbRec == 0 || mbRec == 1);
1899 map_locks_lookup_or_create( mbRec ? LK_mbRec : LK_nonRec,
1900 (Addr)mutex, tid );
sewardjf98e1c02008-10-25 16:22:41 +00001901 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001902 all__sanity_check("evh__hg_PTHREAD_MUTEX_INIT_POST");
1903}
1904
1905static
1906void evh__HG_PTHREAD_MUTEX_DESTROY_PRE( ThreadId tid, void* mutex )
1907{
1908 Thread* thr;
1909 Lock* lk;
1910 if (SHOW_EVENTS >= 1)
1911 VG_(printf)("evh__hg_PTHREAD_MUTEX_DESTROY_PRE(ctid=%d, %p)\n",
1912 (Int)tid, (void*)mutex );
1913
1914 thr = map_threads_maybe_lookup( tid );
1915 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00001916 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00001917
1918 lk = map_locks_maybe_lookup( (Addr)mutex );
1919
1920 if (lk == NULL || (lk->kind != LK_nonRec && lk->kind != LK_mbRec)) {
sewardjf98e1c02008-10-25 16:22:41 +00001921 HG_(record_error_Misc)(
1922 thr, "pthread_mutex_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00001923 }
1924
1925 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00001926 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00001927 tl_assert( lk->guestaddr == (Addr)mutex );
1928 if (lk->heldBy) {
1929 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00001930 HG_(record_error_Misc)(
1931 thr, "pthread_mutex_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00001932 /* remove lock from locksets of all owning threads */
1933 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00001934 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00001935 lk->heldBy = NULL;
1936 lk->heldW = False;
1937 lk->acquired_at = NULL;
1938 }
1939 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00001940 tl_assert( HG_(is_sane_LockN)(lk) );
1941
sewardj1cbc12f2008-11-10 16:16:46 +00001942 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00001943 map_locks_delete( lk->guestaddr );
1944 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00001945 }
1946
sewardjf98e1c02008-10-25 16:22:41 +00001947 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00001948 all__sanity_check("evh__hg_PTHREAD_MUTEX_DESTROY_PRE");
1949}
1950
1951static void evh__HG_PTHREAD_MUTEX_LOCK_PRE ( ThreadId tid,
1952 void* mutex, Word isTryLock )
1953{
1954 /* Just check the mutex is sane; nothing else to do. */
1955 // 'mutex' may be invalid - not checked by wrapper
1956 Thread* thr;
1957 Lock* lk;
1958 if (SHOW_EVENTS >= 1)
1959 VG_(printf)("evh__hg_PTHREAD_MUTEX_LOCK_PRE(ctid=%d, mutex=%p)\n",
1960 (Int)tid, (void*)mutex );
1961
1962 tl_assert(isTryLock == 0 || isTryLock == 1);
1963 thr = map_threads_maybe_lookup( tid );
1964 tl_assert(thr); /* cannot fail - Thread* must already exist */
1965
1966 lk = map_locks_maybe_lookup( (Addr)mutex );
1967
1968 if (lk && (lk->kind == LK_rdwr)) {
sewardjf98e1c02008-10-25 16:22:41 +00001969 HG_(record_error_Misc)( thr, "pthread_mutex_lock with a "
1970 "pthread_rwlock_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00001971 }
1972
1973 if ( lk
1974 && isTryLock == 0
1975 && (lk->kind == LK_nonRec || lk->kind == LK_rdwr)
1976 && lk->heldBy
1977 && lk->heldW
sewardj896f6f92008-08-19 08:38:52 +00001978 && VG_(elemBag)( lk->heldBy, (Word)thr ) > 0 ) {
sewardjb4112022007-11-09 22:49:28 +00001979 /* uh, it's a non-recursive lock and we already w-hold it, and
1980 this is a real lock operation (not a speculative "tryLock"
1981 kind of thing). Duh. Deadlock coming up; but at least
1982 produce an error message. */
sewardj8fef6252010-07-29 05:28:02 +00001983 HChar* errstr = "Attempt to re-lock a "
1984 "non-recursive lock I already hold";
1985 HChar* auxstr = "Lock was previously acquired";
1986 if (lk->acquired_at) {
1987 HG_(record_error_Misc_w_aux)( thr, errstr, auxstr, lk->acquired_at );
1988 } else {
1989 HG_(record_error_Misc)( thr, errstr );
1990 }
sewardjb4112022007-11-09 22:49:28 +00001991 }
1992}
1993
1994static void evh__HG_PTHREAD_MUTEX_LOCK_POST ( ThreadId tid, void* mutex )
1995{
1996 // only called if the real library call succeeded - so mutex is sane
1997 Thread* thr;
1998 if (SHOW_EVENTS >= 1)
1999 VG_(printf)("evh__HG_PTHREAD_MUTEX_LOCK_POST(ctid=%d, mutex=%p)\n",
2000 (Int)tid, (void*)mutex );
2001
2002 thr = map_threads_maybe_lookup( tid );
2003 tl_assert(thr); /* cannot fail - Thread* must already exist */
2004
2005 evhH__post_thread_w_acquires_lock(
2006 thr,
2007 LK_mbRec, /* if not known, create new lock with this LockKind */
2008 (Addr)mutex
2009 );
2010}
2011
2012static void evh__HG_PTHREAD_MUTEX_UNLOCK_PRE ( ThreadId tid, void* mutex )
2013{
2014 // 'mutex' may be invalid - not checked by wrapper
2015 Thread* thr;
2016 if (SHOW_EVENTS >= 1)
2017 VG_(printf)("evh__HG_PTHREAD_MUTEX_UNLOCK_PRE(ctid=%d, mutex=%p)\n",
2018 (Int)tid, (void*)mutex );
2019
2020 thr = map_threads_maybe_lookup( tid );
2021 tl_assert(thr); /* cannot fail - Thread* must already exist */
2022
2023 evhH__pre_thread_releases_lock( thr, (Addr)mutex, False/*!isRDWR*/ );
2024}
2025
2026static void evh__HG_PTHREAD_MUTEX_UNLOCK_POST ( ThreadId tid, void* mutex )
2027{
2028 // only called if the real library call succeeded - so mutex is sane
2029 Thread* thr;
2030 if (SHOW_EVENTS >= 1)
2031 VG_(printf)("evh__hg_PTHREAD_MUTEX_UNLOCK_POST(ctid=%d, mutex=%p)\n",
2032 (Int)tid, (void*)mutex );
2033 thr = map_threads_maybe_lookup( tid );
2034 tl_assert(thr); /* cannot fail - Thread* must already exist */
2035
2036 // anything we should do here?
2037}
2038
2039
sewardj5a644da2009-08-11 10:35:58 +00002040/* ------------------------------------------------------- */
sewardj1f77fec2010-04-12 19:51:04 +00002041/* -------------- events to do with spinlocks ------------ */
sewardj5a644da2009-08-11 10:35:58 +00002042/* ------------------------------------------------------- */
2043
2044/* All a bit of a kludge. Pretend we're really dealing with ordinary
2045 pthread_mutex_t's instead, for the most part. */
2046
2047static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( ThreadId tid,
2048 void* slock )
2049{
2050 Thread* thr;
2051 Lock* lk;
2052 /* In glibc's kludgey world, we're either initialising or unlocking
2053 it. Since this is the pre-routine, if it is locked, unlock it
2054 and take a dependence edge. Otherwise, do nothing. */
2055
2056 if (SHOW_EVENTS >= 1)
2057 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE"
2058 "(ctid=%d, slock=%p)\n",
2059 (Int)tid, (void*)slock );
2060
2061 thr = map_threads_maybe_lookup( tid );
2062 /* cannot fail - Thread* must already exist */;
2063 tl_assert( HG_(is_sane_Thread)(thr) );
2064
2065 lk = map_locks_maybe_lookup( (Addr)slock );
2066 if (lk && lk->heldBy) {
2067 /* it's held. So do the normal pre-unlock actions, as copied
2068 from evh__HG_PTHREAD_MUTEX_UNLOCK_PRE. This stupidly
2069 duplicates the map_locks_maybe_lookup. */
2070 evhH__pre_thread_releases_lock( thr, (Addr)slock,
2071 False/*!isRDWR*/ );
2072 }
2073}
2074
2075static void evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( ThreadId tid,
2076 void* slock )
2077{
2078 Lock* lk;
2079 /* More kludgery. If the lock has never been seen before, do
2080 actions as per evh__HG_PTHREAD_MUTEX_INIT_POST. Else do
2081 nothing. */
2082
2083 if (SHOW_EVENTS >= 1)
2084 VG_(printf)("evh__hg_PTHREAD_SPIN_INIT_OR_UNLOCK_POST"
2085 "(ctid=%d, slock=%p)\n",
2086 (Int)tid, (void*)slock );
2087
2088 lk = map_locks_maybe_lookup( (Addr)slock );
2089 if (!lk) {
2090 map_locks_lookup_or_create( LK_nonRec, (Addr)slock, tid );
2091 }
2092}
2093
2094static void evh__HG_PTHREAD_SPIN_LOCK_PRE( ThreadId tid,
2095 void* slock, Word isTryLock )
2096{
2097 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, slock, isTryLock );
2098}
2099
2100static void evh__HG_PTHREAD_SPIN_LOCK_POST( ThreadId tid,
2101 void* slock )
2102{
2103 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, slock );
2104}
2105
2106static void evh__HG_PTHREAD_SPIN_DESTROY_PRE( ThreadId tid,
2107 void* slock )
2108{
2109 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, slock );
2110}
2111
2112
sewardj9f569b72008-11-13 13:33:09 +00002113/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002114/* --------------- events to do with CVs --------------- */
sewardj9f569b72008-11-13 13:33:09 +00002115/* ----------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002116
sewardj02114542009-07-28 20:52:36 +00002117/* A mapping from CV to (the SO associated with it, plus some
2118 auxiliary data for error checking). When the CV is
sewardjf98e1c02008-10-25 16:22:41 +00002119 signalled/broadcasted upon, we do a 'send' into the SO, and when a
2120 wait on it completes, we do a 'recv' from the SO. This is believed
2121 to give the correct happens-before events arising from CV
sewardjb4112022007-11-09 22:49:28 +00002122 signallings/broadcasts.
2123*/
2124
sewardj02114542009-07-28 20:52:36 +00002125/* .so is the SO for this CV.
2126 .mx_ga is the associated mutex, when .nWaiters > 0
sewardjb4112022007-11-09 22:49:28 +00002127
sewardj02114542009-07-28 20:52:36 +00002128 POSIX says effectively that the first pthread_cond_{timed}wait call
2129 causes a dynamic binding between the CV and the mutex, and that
2130 lasts until such time as the waiter count falls to zero. Hence
2131 need to keep track of the number of waiters in order to do
2132 consistency tracking. */
2133typedef
2134 struct {
2135 SO* so; /* libhb-allocated SO */
2136 void* mx_ga; /* addr of associated mutex, if any */
2137 UWord nWaiters; /* # threads waiting on the CV */
2138 }
2139 CVInfo;
2140
2141
2142/* pthread_cond_t* -> CVInfo* */
2143static WordFM* map_cond_to_CVInfo = NULL;
2144
2145static void map_cond_to_CVInfo_INIT ( void ) {
2146 if (UNLIKELY(map_cond_to_CVInfo == NULL)) {
2147 map_cond_to_CVInfo = VG_(newFM)( HG_(zalloc),
2148 "hg.mctCI.1", HG_(free), NULL );
2149 tl_assert(map_cond_to_CVInfo != NULL);
sewardjf98e1c02008-10-25 16:22:41 +00002150 }
2151}
2152
sewardj02114542009-07-28 20:52:36 +00002153static CVInfo* map_cond_to_CVInfo_lookup_or_alloc ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002154 UWord key, val;
sewardj02114542009-07-28 20:52:36 +00002155 map_cond_to_CVInfo_INIT();
2156 if (VG_(lookupFM)( map_cond_to_CVInfo, &key, &val, (UWord)cond )) {
sewardjf98e1c02008-10-25 16:22:41 +00002157 tl_assert(key == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002158 return (CVInfo*)val;
sewardjf98e1c02008-10-25 16:22:41 +00002159 } else {
sewardj02114542009-07-28 20:52:36 +00002160 SO* so = libhb_so_alloc();
2161 CVInfo* cvi = HG_(zalloc)("hg.mctCloa.1", sizeof(CVInfo));
2162 cvi->so = so;
2163 cvi->mx_ga = 0;
2164 VG_(addToFM)( map_cond_to_CVInfo, (UWord)cond, (UWord)cvi );
2165 return cvi;
sewardjf98e1c02008-10-25 16:22:41 +00002166 }
2167}
2168
sewardj02114542009-07-28 20:52:36 +00002169static void map_cond_to_CVInfo_delete ( void* cond ) {
sewardjf98e1c02008-10-25 16:22:41 +00002170 UWord keyW, valW;
sewardj02114542009-07-28 20:52:36 +00002171 map_cond_to_CVInfo_INIT();
2172 if (VG_(delFromFM)( map_cond_to_CVInfo, &keyW, &valW, (UWord)cond )) {
2173 CVInfo* cvi = (CVInfo*)valW;
sewardjf98e1c02008-10-25 16:22:41 +00002174 tl_assert(keyW == (UWord)cond);
sewardj02114542009-07-28 20:52:36 +00002175 tl_assert(cvi);
2176 tl_assert(cvi->so);
2177 libhb_so_dealloc(cvi->so);
2178 cvi->mx_ga = 0;
2179 HG_(free)(cvi);
sewardjb4112022007-11-09 22:49:28 +00002180 }
2181}
2182
2183static void evh__HG_PTHREAD_COND_SIGNAL_PRE ( ThreadId tid, void* cond )
2184{
sewardjf98e1c02008-10-25 16:22:41 +00002185 /* 'tid' has signalled on 'cond'. As per the comment above, bind
2186 cond to a SO if it is not already so bound, and 'send' on the
2187 SO. This is later used by other thread(s) which successfully
2188 exit from a pthread_cond_wait on the same cv; then they 'recv'
2189 from the SO, thereby acquiring a dependency on this signalling
2190 event. */
sewardjb4112022007-11-09 22:49:28 +00002191 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002192 CVInfo* cvi;
2193 //Lock* lk;
sewardjb4112022007-11-09 22:49:28 +00002194
2195 if (SHOW_EVENTS >= 1)
2196 VG_(printf)("evh__HG_PTHREAD_COND_SIGNAL_PRE(ctid=%d, cond=%p)\n",
2197 (Int)tid, (void*)cond );
2198
sewardjb4112022007-11-09 22:49:28 +00002199 thr = map_threads_maybe_lookup( tid );
2200 tl_assert(thr); /* cannot fail - Thread* must already exist */
2201
sewardj02114542009-07-28 20:52:36 +00002202 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2203 tl_assert(cvi);
2204 tl_assert(cvi->so);
2205
sewardjb4112022007-11-09 22:49:28 +00002206 // error-if: mutex is bogus
2207 // error-if: mutex is not locked
sewardj02114542009-07-28 20:52:36 +00002208 // Hmm. POSIX doesn't actually say that it's an error to call
2209 // pthread_cond_signal with the associated mutex being unlocked.
2210 // Although it does say that it should be "if consistent scheduling
2211 // is desired."
2212 //
2213 // For the moment, disable these checks.
2214 //lk = map_locks_maybe_lookup(cvi->mx_ga);
2215 //if (lk == NULL || cvi->mx_ga == 0) {
2216 // HG_(record_error_Misc)( thr,
2217 // "pthread_cond_{signal,broadcast}: "
2218 // "no or invalid mutex associated with cond");
2219 //}
2220 ///* note: lk could be NULL. Be careful. */
2221 //if (lk) {
2222 // if (lk->kind == LK_rdwr) {
2223 // HG_(record_error_Misc)(thr,
2224 // "pthread_cond_{signal,broadcast}: associated lock is a rwlock");
2225 // }
2226 // if (lk->heldBy == NULL) {
2227 // HG_(record_error_Misc)(thr,
2228 // "pthread_cond_{signal,broadcast}: "
2229 // "associated lock is not held by any thread");
2230 // }
2231 // if (lk->heldBy != NULL && 0 == VG_(elemBag)(lk->heldBy, (Word)thr)) {
2232 // HG_(record_error_Misc)(thr,
2233 // "pthread_cond_{signal,broadcast}: "
2234 // "associated lock is not held by calling thread");
2235 // }
2236 //}
sewardjb4112022007-11-09 22:49:28 +00002237
sewardj02114542009-07-28 20:52:36 +00002238 libhb_so_send( thr->hbthr, cvi->so, True/*strong_send*/ );
sewardjb4112022007-11-09 22:49:28 +00002239}
2240
2241/* returns True if it reckons 'mutex' is valid and held by this
2242 thread, else False */
2243static Bool evh__HG_PTHREAD_COND_WAIT_PRE ( ThreadId tid,
2244 void* cond, void* mutex )
2245{
2246 Thread* thr;
2247 Lock* lk;
2248 Bool lk_valid = True;
sewardj02114542009-07-28 20:52:36 +00002249 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002250
2251 if (SHOW_EVENTS >= 1)
2252 VG_(printf)("evh__hg_PTHREAD_COND_WAIT_PRE"
2253 "(ctid=%d, cond=%p, mutex=%p)\n",
2254 (Int)tid, (void*)cond, (void*)mutex );
2255
sewardjb4112022007-11-09 22:49:28 +00002256 thr = map_threads_maybe_lookup( tid );
2257 tl_assert(thr); /* cannot fail - Thread* must already exist */
2258
2259 lk = map_locks_maybe_lookup( (Addr)mutex );
2260
2261 /* Check for stupid mutex arguments. There are various ways to be
2262 a bozo. Only complain once, though, even if more than one thing
2263 is wrong. */
2264 if (lk == NULL) {
2265 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002266 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002267 thr,
2268 "pthread_cond_{timed}wait called with invalid mutex" );
2269 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002270 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002271 if (lk->kind == LK_rdwr) {
2272 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002273 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002274 thr, "pthread_cond_{timed}wait called with mutex "
2275 "of type pthread_rwlock_t*" );
2276 } else
2277 if (lk->heldBy == NULL) {
2278 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002279 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002280 thr, "pthread_cond_{timed}wait called with un-held mutex");
2281 } else
2282 if (lk->heldBy != NULL
sewardj896f6f92008-08-19 08:38:52 +00002283 && VG_(elemBag)( lk->heldBy, (Word)thr ) == 0) {
sewardjb4112022007-11-09 22:49:28 +00002284 lk_valid = False;
sewardjf98e1c02008-10-25 16:22:41 +00002285 HG_(record_error_Misc)(
sewardjb4112022007-11-09 22:49:28 +00002286 thr, "pthread_cond_{timed}wait called with mutex "
2287 "held by a different thread" );
2288 }
2289 }
2290
2291 // error-if: cond is also associated with a different mutex
sewardj02114542009-07-28 20:52:36 +00002292 cvi = map_cond_to_CVInfo_lookup_or_alloc(cond);
2293 tl_assert(cvi);
2294 tl_assert(cvi->so);
2295 if (cvi->nWaiters == 0) {
2296 /* form initial (CV,MX) binding */
2297 cvi->mx_ga = mutex;
2298 }
2299 else /* check existing (CV,MX) binding */
2300 if (cvi->mx_ga != mutex) {
2301 HG_(record_error_Misc)(
2302 thr, "pthread_cond_{timed}wait: cond is associated "
2303 "with a different mutex");
2304 }
2305 cvi->nWaiters++;
sewardjb4112022007-11-09 22:49:28 +00002306
2307 return lk_valid;
2308}
2309
2310static void evh__HG_PTHREAD_COND_WAIT_POST ( ThreadId tid,
2311 void* cond, void* mutex )
2312{
sewardjf98e1c02008-10-25 16:22:41 +00002313 /* A pthread_cond_wait(cond, mutex) completed successfully. Find
2314 the SO for this cond, and 'recv' from it so as to acquire a
2315 dependency edge back to the signaller/broadcaster. */
2316 Thread* thr;
sewardj02114542009-07-28 20:52:36 +00002317 CVInfo* cvi;
sewardjb4112022007-11-09 22:49:28 +00002318
2319 if (SHOW_EVENTS >= 1)
2320 VG_(printf)("evh__HG_PTHREAD_COND_WAIT_POST"
2321 "(ctid=%d, cond=%p, mutex=%p)\n",
2322 (Int)tid, (void*)cond, (void*)mutex );
2323
sewardjb4112022007-11-09 22:49:28 +00002324 thr = map_threads_maybe_lookup( tid );
2325 tl_assert(thr); /* cannot fail - Thread* must already exist */
2326
2327 // error-if: cond is also associated with a different mutex
2328
sewardj02114542009-07-28 20:52:36 +00002329 cvi = map_cond_to_CVInfo_lookup_or_alloc( cond );
2330 tl_assert(cvi);
2331 tl_assert(cvi->so);
2332 tl_assert(cvi->nWaiters > 0);
sewardjb4112022007-11-09 22:49:28 +00002333
sewardj02114542009-07-28 20:52:36 +00002334 if (!libhb_so_everSent(cvi->so)) {
sewardjf98e1c02008-10-25 16:22:41 +00002335 /* Hmm. How can a wait on 'cond' succeed if nobody signalled
2336 it? If this happened it would surely be a bug in the threads
2337 library. Or one of those fabled "spurious wakeups". */
2338 HG_(record_error_Misc)( thr, "Bug in libpthread: pthread_cond_wait "
2339 "succeeded on"
2340 " without prior pthread_cond_post");
sewardjb4112022007-11-09 22:49:28 +00002341 }
sewardjf98e1c02008-10-25 16:22:41 +00002342
2343 /* anyway, acquire a dependency on it. */
sewardj02114542009-07-28 20:52:36 +00002344 libhb_so_recv( thr->hbthr, cvi->so, True/*strong_recv*/ );
2345
2346 cvi->nWaiters--;
sewardjf98e1c02008-10-25 16:22:41 +00002347}
2348
2349static void evh__HG_PTHREAD_COND_DESTROY_PRE ( ThreadId tid,
2350 void* cond )
2351{
2352 /* Deal with destroy events. The only purpose is to free storage
2353 associated with the CV, so as to avoid any possible resource
2354 leaks. */
2355 if (SHOW_EVENTS >= 1)
2356 VG_(printf)("evh__HG_PTHREAD_COND_DESTROY_PRE"
2357 "(ctid=%d, cond=%p)\n",
2358 (Int)tid, (void*)cond );
2359
sewardj02114542009-07-28 20:52:36 +00002360 map_cond_to_CVInfo_delete( cond );
sewardjb4112022007-11-09 22:49:28 +00002361}
2362
2363
sewardj9f569b72008-11-13 13:33:09 +00002364/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002365/* -------------- events to do with rwlocks -------------- */
sewardj9f569b72008-11-13 13:33:09 +00002366/* ------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002367
2368/* EXPOSITION only */
2369static
2370void evh__HG_PTHREAD_RWLOCK_INIT_POST( ThreadId tid, void* rwl )
2371{
2372 if (SHOW_EVENTS >= 1)
2373 VG_(printf)("evh__hg_PTHREAD_RWLOCK_INIT_POST(ctid=%d, %p)\n",
2374 (Int)tid, (void*)rwl );
2375 map_locks_lookup_or_create( LK_rdwr, (Addr)rwl, tid );
sewardjf98e1c02008-10-25 16:22:41 +00002376 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002377 all__sanity_check("evh__hg_PTHREAD_RWLOCK_INIT_POST");
2378}
2379
2380static
2381void evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( ThreadId tid, void* rwl )
2382{
2383 Thread* thr;
2384 Lock* lk;
2385 if (SHOW_EVENTS >= 1)
2386 VG_(printf)("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE(ctid=%d, %p)\n",
2387 (Int)tid, (void*)rwl );
2388
2389 thr = map_threads_maybe_lookup( tid );
2390 /* cannot fail - Thread* must already exist */
sewardjf98e1c02008-10-25 16:22:41 +00002391 tl_assert( HG_(is_sane_Thread)(thr) );
sewardjb4112022007-11-09 22:49:28 +00002392
2393 lk = map_locks_maybe_lookup( (Addr)rwl );
2394
2395 if (lk == NULL || lk->kind != LK_rdwr) {
sewardjf98e1c02008-10-25 16:22:41 +00002396 HG_(record_error_Misc)(
2397 thr, "pthread_rwlock_destroy with invalid argument" );
sewardjb4112022007-11-09 22:49:28 +00002398 }
2399
2400 if (lk) {
sewardjf98e1c02008-10-25 16:22:41 +00002401 tl_assert( HG_(is_sane_LockN)(lk) );
sewardjb4112022007-11-09 22:49:28 +00002402 tl_assert( lk->guestaddr == (Addr)rwl );
2403 if (lk->heldBy) {
2404 /* Basically act like we unlocked the lock */
sewardjf98e1c02008-10-25 16:22:41 +00002405 HG_(record_error_Misc)(
2406 thr, "pthread_rwlock_destroy of a locked mutex" );
sewardjb4112022007-11-09 22:49:28 +00002407 /* remove lock from locksets of all owning threads */
2408 remove_Lock_from_locksets_of_all_owning_Threads( lk );
sewardj896f6f92008-08-19 08:38:52 +00002409 VG_(deleteBag)( lk->heldBy );
sewardjb4112022007-11-09 22:49:28 +00002410 lk->heldBy = NULL;
2411 lk->heldW = False;
sewardj1c7e8332007-11-29 13:04:03 +00002412 lk->acquired_at = NULL;
sewardjb4112022007-11-09 22:49:28 +00002413 }
2414 tl_assert( !lk->heldBy );
sewardjf98e1c02008-10-25 16:22:41 +00002415 tl_assert( HG_(is_sane_LockN)(lk) );
2416
sewardj1cbc12f2008-11-10 16:16:46 +00002417 laog__handle_one_lock_deletion(lk);
sewardjf98e1c02008-10-25 16:22:41 +00002418 map_locks_delete( lk->guestaddr );
2419 del_LockN( lk );
sewardjb4112022007-11-09 22:49:28 +00002420 }
2421
sewardjf98e1c02008-10-25 16:22:41 +00002422 if (HG_(clo_sanity_flags) & SCE_LOCKS)
sewardjb4112022007-11-09 22:49:28 +00002423 all__sanity_check("evh__hg_PTHREAD_RWLOCK_DESTROY_PRE");
2424}
2425
2426static
sewardj789c3c52008-02-25 12:10:07 +00002427void evh__HG_PTHREAD_RWLOCK_LOCK_PRE ( ThreadId tid,
2428 void* rwl,
2429 Word isW, Word isTryLock )
sewardjb4112022007-11-09 22:49:28 +00002430{
2431 /* Just check the rwl is sane; nothing else to do. */
2432 // 'rwl' may be invalid - not checked by wrapper
2433 Thread* thr;
2434 Lock* lk;
2435 if (SHOW_EVENTS >= 1)
2436 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_PRE(ctid=%d, isW=%d, %p)\n",
2437 (Int)tid, (Int)isW, (void*)rwl );
2438
2439 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
sewardj789c3c52008-02-25 12:10:07 +00002440 tl_assert(isTryLock == 0 || isTryLock == 1); /* assured us by wrapper */
sewardjb4112022007-11-09 22:49:28 +00002441 thr = map_threads_maybe_lookup( tid );
2442 tl_assert(thr); /* cannot fail - Thread* must already exist */
2443
2444 lk = map_locks_maybe_lookup( (Addr)rwl );
2445 if ( lk
2446 && (lk->kind == LK_nonRec || lk->kind == LK_mbRec) ) {
2447 /* Wrong kind of lock. Duh. */
sewardjf98e1c02008-10-25 16:22:41 +00002448 HG_(record_error_Misc)(
2449 thr, "pthread_rwlock_{rd,rw}lock with a "
2450 "pthread_mutex_t* argument " );
sewardjb4112022007-11-09 22:49:28 +00002451 }
2452}
2453
2454static
2455void evh__HG_PTHREAD_RWLOCK_LOCK_POST ( ThreadId tid, void* rwl, Word isW )
2456{
2457 // only called if the real library call succeeded - so mutex is sane
2458 Thread* thr;
2459 if (SHOW_EVENTS >= 1)
2460 VG_(printf)("evh__hg_PTHREAD_RWLOCK_LOCK_POST(ctid=%d, isW=%d, %p)\n",
2461 (Int)tid, (Int)isW, (void*)rwl );
2462
2463 tl_assert(isW == 0 || isW == 1); /* assured us by wrapper */
2464 thr = map_threads_maybe_lookup( tid );
2465 tl_assert(thr); /* cannot fail - Thread* must already exist */
2466
2467 (isW ? evhH__post_thread_w_acquires_lock
2468 : evhH__post_thread_r_acquires_lock)(
2469 thr,
2470 LK_rdwr, /* if not known, create new lock with this LockKind */
2471 (Addr)rwl
2472 );
2473}
2474
2475static void evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE ( ThreadId tid, void* rwl )
2476{
2477 // 'rwl' may be invalid - not checked by wrapper
2478 Thread* thr;
2479 if (SHOW_EVENTS >= 1)
2480 VG_(printf)("evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE(ctid=%d, rwl=%p)\n",
2481 (Int)tid, (void*)rwl );
2482
2483 thr = map_threads_maybe_lookup( tid );
2484 tl_assert(thr); /* cannot fail - Thread* must already exist */
2485
2486 evhH__pre_thread_releases_lock( thr, (Addr)rwl, True/*isRDWR*/ );
2487}
2488
2489static void evh__HG_PTHREAD_RWLOCK_UNLOCK_POST ( ThreadId tid, void* rwl )
2490{
2491 // only called if the real library call succeeded - so mutex is sane
2492 Thread* thr;
2493 if (SHOW_EVENTS >= 1)
2494 VG_(printf)("evh__hg_PTHREAD_RWLOCK_UNLOCK_POST(ctid=%d, rwl=%p)\n",
2495 (Int)tid, (void*)rwl );
2496 thr = map_threads_maybe_lookup( tid );
2497 tl_assert(thr); /* cannot fail - Thread* must already exist */
2498
2499 // anything we should do here?
2500}
2501
2502
sewardj9f569b72008-11-13 13:33:09 +00002503/* ---------------------------------------------------------- */
2504/* -------------- events to do with semaphores -------------- */
2505/* ---------------------------------------------------------- */
sewardjb4112022007-11-09 22:49:28 +00002506
sewardj11e352f2007-11-30 11:11:02 +00002507/* This is similar to but not identical to the handling for condition
sewardjb4112022007-11-09 22:49:28 +00002508 variables. */
2509
sewardjf98e1c02008-10-25 16:22:41 +00002510/* For each semaphore, we maintain a stack of SOs. When a 'post'
2511 operation is done on a semaphore (unlocking, essentially), a new SO
2512 is created for the posting thread, the posting thread does a strong
2513 send to it (which merely installs the posting thread's VC in the
2514 SO), and the SO is pushed on the semaphore's stack.
sewardjb4112022007-11-09 22:49:28 +00002515
2516 Later, when a (probably different) thread completes 'wait' on the
sewardjf98e1c02008-10-25 16:22:41 +00002517 semaphore, we pop a SO off the semaphore's stack (which should be
2518 nonempty), and do a strong recv from it. This mechanism creates
sewardjb4112022007-11-09 22:49:28 +00002519 dependencies between posters and waiters of the semaphore.
2520
sewardjf98e1c02008-10-25 16:22:41 +00002521 It may not be necessary to use a stack - perhaps a bag of SOs would
2522 do. But we do need to keep track of how many unused-up posts have
2523 happened for the semaphore.
sewardjb4112022007-11-09 22:49:28 +00002524
sewardjf98e1c02008-10-25 16:22:41 +00002525 Imagine T1 and T2 both post once on a semaphore S, and T3 waits
sewardjb4112022007-11-09 22:49:28 +00002526 twice on S. T3 cannot complete its waits without both T1 and T2
2527 posting. The above mechanism will ensure that T3 acquires
2528 dependencies on both T1 and T2.
sewardj11e352f2007-11-30 11:11:02 +00002529
sewardjf98e1c02008-10-25 16:22:41 +00002530 When a semaphore is initialised with value N, we do as if we'd
2531 posted N times on the semaphore: basically create N SOs and do a
2532 strong send to all of then. This allows up to N waits on the
2533 semaphore to acquire a dependency on the initialisation point,
2534 which AFAICS is the correct behaviour.
sewardj11e352f2007-11-30 11:11:02 +00002535
2536 We don't emit an error for DESTROY_PRE on a semaphore we don't know
2537 about. We should.
sewardjb4112022007-11-09 22:49:28 +00002538*/
2539
sewardjf98e1c02008-10-25 16:22:41 +00002540/* sem_t* -> XArray* SO* */
2541static WordFM* map_sem_to_SO_stack = NULL;
sewardjb4112022007-11-09 22:49:28 +00002542
sewardjf98e1c02008-10-25 16:22:41 +00002543static void map_sem_to_SO_stack_INIT ( void ) {
2544 if (map_sem_to_SO_stack == NULL) {
2545 map_sem_to_SO_stack = VG_(newFM)( HG_(zalloc), "hg.mstSs.1",
2546 HG_(free), NULL );
2547 tl_assert(map_sem_to_SO_stack != NULL);
sewardjb4112022007-11-09 22:49:28 +00002548 }
2549}
2550
sewardjf98e1c02008-10-25 16:22:41 +00002551static void push_SO_for_sem ( void* sem, SO* so ) {
2552 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002553 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002554 tl_assert(so);
2555 map_sem_to_SO_stack_INIT();
2556 if (VG_(lookupFM)( map_sem_to_SO_stack,
2557 &keyW, (UWord*)&xa, (UWord)sem )) {
2558 tl_assert(keyW == (UWord)sem);
sewardjb4112022007-11-09 22:49:28 +00002559 tl_assert(xa);
sewardjf98e1c02008-10-25 16:22:41 +00002560 VG_(addToXA)( xa, &so );
sewardjb4112022007-11-09 22:49:28 +00002561 } else {
sewardjf98e1c02008-10-25 16:22:41 +00002562 xa = VG_(newXA)( HG_(zalloc), "hg.pSfs.1", HG_(free), sizeof(SO*) );
2563 VG_(addToXA)( xa, &so );
2564 VG_(addToFM)( map_sem_to_SO_stack, (Word)sem, (Word)xa );
sewardjb4112022007-11-09 22:49:28 +00002565 }
2566}
2567
sewardjf98e1c02008-10-25 16:22:41 +00002568static SO* mb_pop_SO_for_sem ( void* sem ) {
2569 UWord keyW;
sewardjb4112022007-11-09 22:49:28 +00002570 XArray* xa;
sewardjf98e1c02008-10-25 16:22:41 +00002571 SO* so;
2572 map_sem_to_SO_stack_INIT();
2573 if (VG_(lookupFM)( map_sem_to_SO_stack,
2574 &keyW, (UWord*)&xa, (UWord)sem )) {
sewardjb4112022007-11-09 22:49:28 +00002575 /* xa is the stack for this semaphore. */
sewardjf98e1c02008-10-25 16:22:41 +00002576 Word sz;
2577 tl_assert(keyW == (UWord)sem);
2578 sz = VG_(sizeXA)( xa );
sewardjb4112022007-11-09 22:49:28 +00002579 tl_assert(sz >= 0);
2580 if (sz == 0)
2581 return NULL; /* odd, the stack is empty */
sewardjf98e1c02008-10-25 16:22:41 +00002582 so = *(SO**)VG_(indexXA)( xa, sz-1 );
2583 tl_assert(so);
sewardjb4112022007-11-09 22:49:28 +00002584 VG_(dropTailXA)( xa, 1 );
sewardjf98e1c02008-10-25 16:22:41 +00002585 return so;
sewardjb4112022007-11-09 22:49:28 +00002586 } else {
2587 /* hmm, that's odd. No stack for this semaphore. */
2588 return NULL;
2589 }
2590}
2591
sewardj11e352f2007-11-30 11:11:02 +00002592static void evh__HG_POSIX_SEM_DESTROY_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002593{
sewardjf98e1c02008-10-25 16:22:41 +00002594 UWord keyW, valW;
2595 SO* so;
sewardjb4112022007-11-09 22:49:28 +00002596
sewardjb4112022007-11-09 22:49:28 +00002597 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002598 VG_(printf)("evh__HG_POSIX_SEM_DESTROY_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002599 (Int)tid, (void*)sem );
2600
sewardjf98e1c02008-10-25 16:22:41 +00002601 map_sem_to_SO_stack_INIT();
sewardjb4112022007-11-09 22:49:28 +00002602
sewardjf98e1c02008-10-25 16:22:41 +00002603 /* Empty out the semaphore's SO stack. This way of doing it is
2604 stupid, but at least it's easy. */
2605 while (1) {
2606 so = mb_pop_SO_for_sem( sem );
2607 if (!so) break;
2608 libhb_so_dealloc(so);
2609 }
2610
2611 if (VG_(delFromFM)( map_sem_to_SO_stack, &keyW, &valW, (UWord)sem )) {
2612 XArray* xa = (XArray*)valW;
2613 tl_assert(keyW == (UWord)sem);
2614 tl_assert(xa);
2615 tl_assert(VG_(sizeXA)(xa) == 0); /* preceding loop just emptied it */
2616 VG_(deleteXA)(xa);
2617 }
sewardjb4112022007-11-09 22:49:28 +00002618}
2619
sewardj11e352f2007-11-30 11:11:02 +00002620static
2621void evh__HG_POSIX_SEM_INIT_POST ( ThreadId tid, void* sem, UWord value )
2622{
sewardjf98e1c02008-10-25 16:22:41 +00002623 SO* so;
2624 Thread* thr;
sewardj11e352f2007-11-30 11:11:02 +00002625
2626 if (SHOW_EVENTS >= 1)
2627 VG_(printf)("evh__HG_POSIX_SEM_INIT_POST(ctid=%d, sem=%p, value=%lu)\n",
2628 (Int)tid, (void*)sem, value );
2629
sewardjf98e1c02008-10-25 16:22:41 +00002630 thr = map_threads_maybe_lookup( tid );
2631 tl_assert(thr); /* cannot fail - Thread* must already exist */
sewardj11e352f2007-11-30 11:11:02 +00002632
sewardjf98e1c02008-10-25 16:22:41 +00002633 /* Empty out the semaphore's SO stack. This way of doing it is
2634 stupid, but at least it's easy. */
2635 while (1) {
2636 so = mb_pop_SO_for_sem( sem );
2637 if (!so) break;
2638 libhb_so_dealloc(so);
2639 }
sewardj11e352f2007-11-30 11:11:02 +00002640
sewardjf98e1c02008-10-25 16:22:41 +00002641 /* If we don't do this check, the following while loop runs us out
2642 of memory for stupid initial values of 'value'. */
2643 if (value > 10000) {
2644 HG_(record_error_Misc)(
2645 thr, "sem_init: initial value exceeds 10000; using 10000" );
2646 value = 10000;
2647 }
sewardj11e352f2007-11-30 11:11:02 +00002648
sewardjf98e1c02008-10-25 16:22:41 +00002649 /* Now create 'valid' new SOs for the thread, do a strong send to
2650 each of them, and push them all on the stack. */
2651 for (; value > 0; value--) {
2652 Thr* hbthr = thr->hbthr;
2653 tl_assert(hbthr);
sewardj11e352f2007-11-30 11:11:02 +00002654
sewardjf98e1c02008-10-25 16:22:41 +00002655 so = libhb_so_alloc();
2656 libhb_so_send( hbthr, so, True/*strong send*/ );
2657 push_SO_for_sem( sem, so );
sewardj11e352f2007-11-30 11:11:02 +00002658 }
2659}
2660
2661static void evh__HG_POSIX_SEM_POST_PRE ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002662{
sewardjf98e1c02008-10-25 16:22:41 +00002663 /* 'tid' has posted on 'sem'. Create a new SO, do a strong send to
2664 it (iow, write our VC into it, then tick ours), and push the SO
2665 on on a stack of SOs associated with 'sem'. This is later used
2666 by other thread(s) which successfully exit from a sem_wait on
2667 the same sem; by doing a strong recv from SOs popped of the
2668 stack, they acquire dependencies on the posting thread
2669 segment(s). */
sewardjb4112022007-11-09 22:49:28 +00002670
sewardjf98e1c02008-10-25 16:22:41 +00002671 Thread* thr;
2672 SO* so;
2673 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002674
2675 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002676 VG_(printf)("evh__HG_POSIX_SEM_POST_PRE(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002677 (Int)tid, (void*)sem );
2678
2679 thr = map_threads_maybe_lookup( tid );
2680 tl_assert(thr); /* cannot fail - Thread* must already exist */
2681
2682 // error-if: sem is bogus
2683
sewardjf98e1c02008-10-25 16:22:41 +00002684 hbthr = thr->hbthr;
2685 tl_assert(hbthr);
sewardjb4112022007-11-09 22:49:28 +00002686
sewardjf98e1c02008-10-25 16:22:41 +00002687 so = libhb_so_alloc();
2688 libhb_so_send( hbthr, so, True/*strong send*/ );
2689 push_SO_for_sem( sem, so );
sewardjb4112022007-11-09 22:49:28 +00002690}
2691
sewardj11e352f2007-11-30 11:11:02 +00002692static void evh__HG_POSIX_SEM_WAIT_POST ( ThreadId tid, void* sem )
sewardjb4112022007-11-09 22:49:28 +00002693{
sewardjf98e1c02008-10-25 16:22:41 +00002694 /* A sem_wait(sem) completed successfully. Pop the posting-SO for
2695 the 'sem' from this semaphore's SO-stack, and do a strong recv
2696 from it. This creates a dependency back to one of the post-ers
2697 for the semaphore. */
sewardjb4112022007-11-09 22:49:28 +00002698
sewardjf98e1c02008-10-25 16:22:41 +00002699 Thread* thr;
2700 SO* so;
2701 Thr* hbthr;
sewardjb4112022007-11-09 22:49:28 +00002702
2703 if (SHOW_EVENTS >= 1)
sewardj11e352f2007-11-30 11:11:02 +00002704 VG_(printf)("evh__HG_POSIX_SEM_WAIT_POST(ctid=%d, sem=%p)\n",
sewardjb4112022007-11-09 22:49:28 +00002705 (Int)tid, (void*)sem );
2706
2707 thr = map_threads_maybe_lookup( tid );
2708 tl_assert(thr); /* cannot fail - Thread* must already exist */
2709
2710 // error-if: sem is bogus
2711
sewardjf98e1c02008-10-25 16:22:41 +00002712 so = mb_pop_SO_for_sem( sem );
sewardjb4112022007-11-09 22:49:28 +00002713
sewardjf98e1c02008-10-25 16:22:41 +00002714 if (so) {
2715 hbthr = thr->hbthr;
2716 tl_assert(hbthr);
2717
2718 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2719 libhb_so_dealloc(so);
2720 } else {
2721 /* Hmm. How can a wait on 'sem' succeed if nobody posted to it?
2722 If this happened it would surely be a bug in the threads
2723 library. */
2724 HG_(record_error_Misc)(
2725 thr, "Bug in libpthread: sem_wait succeeded on"
2726 " semaphore without prior sem_post");
sewardjb4112022007-11-09 22:49:28 +00002727 }
2728}
2729
2730
sewardj9f569b72008-11-13 13:33:09 +00002731/* -------------------------------------------------------- */
2732/* -------------- events to do with barriers -------------- */
2733/* -------------------------------------------------------- */
2734
2735typedef
2736 struct {
2737 Bool initted; /* has it yet been initted by guest? */
sewardj406bac82010-03-03 23:03:40 +00002738 Bool resizable; /* is resizing allowed? */
sewardj9f569b72008-11-13 13:33:09 +00002739 UWord size; /* declared size */
2740 XArray* waiting; /* XA of Thread*. # present is 0 .. .size */
2741 }
2742 Bar;
2743
2744static Bar* new_Bar ( void ) {
2745 Bar* bar = HG_(zalloc)( "hg.nB.1 (new_Bar)", sizeof(Bar) );
2746 tl_assert(bar);
2747 /* all fields are zero */
2748 tl_assert(bar->initted == False);
2749 return bar;
2750}
2751
2752static void delete_Bar ( Bar* bar ) {
2753 tl_assert(bar);
2754 if (bar->waiting)
2755 VG_(deleteXA)(bar->waiting);
2756 HG_(free)(bar);
2757}
2758
2759/* A mapping which stores auxiliary data for barriers. */
2760
2761/* pthread_barrier_t* -> Bar* */
2762static WordFM* map_barrier_to_Bar = NULL;
2763
2764static void map_barrier_to_Bar_INIT ( void ) {
2765 if (UNLIKELY(map_barrier_to_Bar == NULL)) {
2766 map_barrier_to_Bar = VG_(newFM)( HG_(zalloc),
2767 "hg.mbtBI.1", HG_(free), NULL );
2768 tl_assert(map_barrier_to_Bar != NULL);
2769 }
2770}
2771
2772static Bar* map_barrier_to_Bar_lookup_or_alloc ( void* barrier ) {
2773 UWord key, val;
2774 map_barrier_to_Bar_INIT();
2775 if (VG_(lookupFM)( map_barrier_to_Bar, &key, &val, (UWord)barrier )) {
2776 tl_assert(key == (UWord)barrier);
2777 return (Bar*)val;
2778 } else {
2779 Bar* bar = new_Bar();
2780 VG_(addToFM)( map_barrier_to_Bar, (UWord)barrier, (UWord)bar );
2781 return bar;
2782 }
2783}
2784
2785static void map_barrier_to_Bar_delete ( void* barrier ) {
2786 UWord keyW, valW;
2787 map_barrier_to_Bar_INIT();
2788 if (VG_(delFromFM)( map_barrier_to_Bar, &keyW, &valW, (UWord)barrier )) {
2789 Bar* bar = (Bar*)valW;
2790 tl_assert(keyW == (UWord)barrier);
2791 delete_Bar(bar);
2792 }
2793}
2794
2795
2796static void evh__HG_PTHREAD_BARRIER_INIT_PRE ( ThreadId tid,
2797 void* barrier,
sewardj406bac82010-03-03 23:03:40 +00002798 UWord count,
2799 UWord resizable )
sewardj9f569b72008-11-13 13:33:09 +00002800{
2801 Thread* thr;
2802 Bar* bar;
2803
2804 if (SHOW_EVENTS >= 1)
2805 VG_(printf)("evh__HG_PTHREAD_BARRIER_INIT_PRE"
sewardj406bac82010-03-03 23:03:40 +00002806 "(tid=%d, barrier=%p, count=%lu, resizable=%lu)\n",
2807 (Int)tid, (void*)barrier, count, resizable );
sewardj9f569b72008-11-13 13:33:09 +00002808
2809 thr = map_threads_maybe_lookup( tid );
2810 tl_assert(thr); /* cannot fail - Thread* must already exist */
2811
2812 if (count == 0) {
2813 HG_(record_error_Misc)(
2814 thr, "pthread_barrier_init: 'count' argument is zero"
2815 );
2816 }
2817
sewardj406bac82010-03-03 23:03:40 +00002818 if (resizable != 0 && resizable != 1) {
2819 HG_(record_error_Misc)(
2820 thr, "pthread_barrier_init: invalid 'resizable' argument"
2821 );
2822 }
2823
sewardj9f569b72008-11-13 13:33:09 +00002824 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2825 tl_assert(bar);
2826
2827 if (bar->initted) {
2828 HG_(record_error_Misc)(
2829 thr, "pthread_barrier_init: barrier is already initialised"
2830 );
2831 }
2832
2833 if (bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2834 tl_assert(bar->initted);
2835 HG_(record_error_Misc)(
sewardj553655c2008-11-14 19:41:19 +00002836 thr, "pthread_barrier_init: threads are waiting at barrier"
sewardj9f569b72008-11-13 13:33:09 +00002837 );
2838 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2839 }
2840 if (!bar->waiting) {
2841 bar->waiting = VG_(newXA)( HG_(zalloc), "hg.eHPBIP.1", HG_(free),
2842 sizeof(Thread*) );
2843 }
2844
2845 tl_assert(bar->waiting);
2846 tl_assert(VG_(sizeXA)(bar->waiting) == 0);
sewardj406bac82010-03-03 23:03:40 +00002847 bar->initted = True;
2848 bar->resizable = resizable == 1 ? True : False;
2849 bar->size = count;
sewardj9f569b72008-11-13 13:33:09 +00002850}
2851
2852
2853static void evh__HG_PTHREAD_BARRIER_DESTROY_PRE ( ThreadId tid,
2854 void* barrier )
2855{
sewardj553655c2008-11-14 19:41:19 +00002856 Thread* thr;
2857 Bar* bar;
2858
sewardj9f569b72008-11-13 13:33:09 +00002859 /* Deal with destroy events. The only purpose is to free storage
2860 associated with the barrier, so as to avoid any possible
2861 resource leaks. */
2862 if (SHOW_EVENTS >= 1)
2863 VG_(printf)("evh__HG_PTHREAD_BARRIER_DESTROY_PRE"
2864 "(tid=%d, barrier=%p)\n",
2865 (Int)tid, (void*)barrier );
2866
sewardj553655c2008-11-14 19:41:19 +00002867 thr = map_threads_maybe_lookup( tid );
2868 tl_assert(thr); /* cannot fail - Thread* must already exist */
2869
2870 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2871 tl_assert(bar);
2872
2873 if (!bar->initted) {
2874 HG_(record_error_Misc)(
2875 thr, "pthread_barrier_destroy: barrier was never initialised"
2876 );
2877 }
2878
2879 if (bar->initted && bar->waiting && VG_(sizeXA)(bar->waiting) > 0) {
2880 HG_(record_error_Misc)(
2881 thr, "pthread_barrier_destroy: threads are waiting at barrier"
2882 );
2883 }
2884
sewardj9f569b72008-11-13 13:33:09 +00002885 /* Maybe we shouldn't do this; just let it persist, so that when it
2886 is reinitialised we don't need to do any dynamic memory
2887 allocation? The downside is a potentially unlimited space leak,
2888 if the client creates (in turn) a large number of barriers all
2889 at different locations. Note that if we do later move to the
2890 don't-delete-it scheme, we need to mark the barrier as
2891 uninitialised again since otherwise a later _init call will
sewardj553655c2008-11-14 19:41:19 +00002892 elicit a duplicate-init error. */
sewardj9f569b72008-11-13 13:33:09 +00002893 map_barrier_to_Bar_delete( barrier );
2894}
2895
2896
sewardj406bac82010-03-03 23:03:40 +00002897/* All the threads have arrived. Now do the Interesting Bit. Get a
2898 new synchronisation object and do a weak send to it from all the
2899 participating threads. This makes its vector clocks be the join of
2900 all the individual threads' vector clocks. Then do a strong
2901 receive from it back to all threads, so that their VCs are a copy
2902 of it (hence are all equal to the join of their original VCs.) */
2903static void do_barrier_cross_sync_and_empty ( Bar* bar )
2904{
2905 /* XXX check bar->waiting has no duplicates */
2906 UWord i;
2907 SO* so = libhb_so_alloc();
2908
2909 tl_assert(bar->waiting);
2910 tl_assert(VG_(sizeXA)(bar->waiting) == bar->size);
2911
2912 /* compute the join ... */
2913 for (i = 0; i < bar->size; i++) {
2914 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2915 Thr* hbthr = t->hbthr;
2916 libhb_so_send( hbthr, so, False/*weak send*/ );
2917 }
2918 /* ... and distribute to all threads */
2919 for (i = 0; i < bar->size; i++) {
2920 Thread* t = *(Thread**)VG_(indexXA)(bar->waiting, i);
2921 Thr* hbthr = t->hbthr;
2922 libhb_so_recv( hbthr, so, True/*strong recv*/ );
2923 }
2924
2925 /* finally, we must empty out the waiting vector */
2926 VG_(dropTailXA)(bar->waiting, VG_(sizeXA)(bar->waiting));
2927
2928 /* and we don't need this any more. Perhaps a stack-allocated
2929 SO would be better? */
2930 libhb_so_dealloc(so);
2931}
2932
2933
sewardj9f569b72008-11-13 13:33:09 +00002934static void evh__HG_PTHREAD_BARRIER_WAIT_PRE ( ThreadId tid,
2935 void* barrier )
2936{
sewardj1c466b72008-11-19 11:52:14 +00002937 /* This function gets called after a client thread calls
2938 pthread_barrier_wait but before it arrives at the real
2939 pthread_barrier_wait.
2940
2941 Why is the following correct? It's a bit subtle.
2942
2943 If this is not the last thread arriving at the barrier, we simply
2944 note its presence and return. Because valgrind (at least as of
2945 Nov 08) is single threaded, we are guaranteed safe from any race
2946 conditions when in this function -- no other client threads are
2947 running.
2948
2949 If this is the last thread, then we are again the only running
2950 thread. All the other threads will have either arrived at the
2951 real pthread_barrier_wait or are on their way to it, but in any
2952 case are guaranteed not to be able to move past it, because this
2953 thread is currently in this function and so has not yet arrived
2954 at the real pthread_barrier_wait. That means that:
2955
2956 1. While we are in this function, none of the other threads
2957 waiting at the barrier can move past it.
2958
2959 2. When this function returns (and simulated execution resumes),
2960 this thread and all other waiting threads will be able to move
2961 past the real barrier.
2962
2963 Because of this, it is now safe to update the vector clocks of
2964 all threads, to represent the fact that they all arrived at the
2965 barrier and have all moved on. There is no danger of any
2966 complications to do with some threads leaving the barrier and
2967 racing back round to the front, whilst others are still leaving
2968 (which is the primary source of complication in correct handling/
2969 implementation of barriers). That can't happen because we update
2970 here our data structures so as to indicate that the threads have
2971 passed the barrier, even though, as per (2) above, they are
2972 guaranteed not to pass the barrier until we return.
2973
2974 This relies crucially on Valgrind being single threaded. If that
2975 changes, this will need to be reconsidered.
2976 */
sewardj9f569b72008-11-13 13:33:09 +00002977 Thread* thr;
2978 Bar* bar;
sewardj406bac82010-03-03 23:03:40 +00002979 UWord present;
sewardj9f569b72008-11-13 13:33:09 +00002980
2981 if (SHOW_EVENTS >= 1)
2982 VG_(printf)("evh__HG_PTHREAD_BARRIER_WAIT_PRE"
2983 "(tid=%d, barrier=%p)\n",
2984 (Int)tid, (void*)barrier );
2985
2986 thr = map_threads_maybe_lookup( tid );
2987 tl_assert(thr); /* cannot fail - Thread* must already exist */
2988
2989 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
2990 tl_assert(bar);
2991
2992 if (!bar->initted) {
2993 HG_(record_error_Misc)(
2994 thr, "pthread_barrier_wait: barrier is uninitialised"
2995 );
2996 return; /* client is broken .. avoid assertions below */
2997 }
2998
2999 /* guaranteed by _INIT_PRE above */
3000 tl_assert(bar->size > 0);
3001 tl_assert(bar->waiting);
3002
3003 VG_(addToXA)( bar->waiting, &thr );
3004
3005 /* guaranteed by this function */
3006 present = VG_(sizeXA)(bar->waiting);
3007 tl_assert(present > 0 && present <= bar->size);
3008
3009 if (present < bar->size)
3010 return;
3011
sewardj406bac82010-03-03 23:03:40 +00003012 do_barrier_cross_sync_and_empty(bar);
3013}
sewardj9f569b72008-11-13 13:33:09 +00003014
sewardj9f569b72008-11-13 13:33:09 +00003015
sewardj406bac82010-03-03 23:03:40 +00003016static void evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( ThreadId tid,
3017 void* barrier,
3018 UWord newcount )
3019{
3020 Thread* thr;
3021 Bar* bar;
3022 UWord present;
3023
3024 if (SHOW_EVENTS >= 1)
3025 VG_(printf)("evh__HG_PTHREAD_BARRIER_RESIZE_PRE"
3026 "(tid=%d, barrier=%p, newcount=%lu)\n",
3027 (Int)tid, (void*)barrier, newcount );
3028
3029 thr = map_threads_maybe_lookup( tid );
3030 tl_assert(thr); /* cannot fail - Thread* must already exist */
3031
3032 bar = map_barrier_to_Bar_lookup_or_alloc(barrier);
3033 tl_assert(bar);
3034
3035 if (!bar->initted) {
3036 HG_(record_error_Misc)(
3037 thr, "pthread_barrier_resize: barrier is uninitialised"
3038 );
3039 return; /* client is broken .. avoid assertions below */
3040 }
3041
3042 if (!bar->resizable) {
3043 HG_(record_error_Misc)(
3044 thr, "pthread_barrier_resize: barrier is may not be resized"
3045 );
3046 return; /* client is broken .. avoid assertions below */
3047 }
3048
3049 if (newcount == 0) {
3050 HG_(record_error_Misc)(
3051 thr, "pthread_barrier_resize: 'newcount' argument is zero"
3052 );
3053 return; /* client is broken .. avoid assertions below */
3054 }
3055
3056 /* guaranteed by _INIT_PRE above */
3057 tl_assert(bar->size > 0);
sewardj9f569b72008-11-13 13:33:09 +00003058 tl_assert(bar->waiting);
sewardj406bac82010-03-03 23:03:40 +00003059 /* Guaranteed by this fn */
3060 tl_assert(newcount > 0);
sewardj9f569b72008-11-13 13:33:09 +00003061
sewardj406bac82010-03-03 23:03:40 +00003062 if (newcount >= bar->size) {
3063 /* Increasing the capacity. There's no possibility of threads
3064 moving on from the barrier in this situation, so just note
3065 the fact and do nothing more. */
3066 bar->size = newcount;
3067 } else {
3068 /* Decreasing the capacity. If we decrease it to be equal or
3069 below the number of waiting threads, they will now move past
3070 the barrier, so need to mess with dep edges in the same way
3071 as if the barrier had filled up normally. */
3072 present = VG_(sizeXA)(bar->waiting);
3073 tl_assert(present >= 0 && present <= bar->size);
3074 if (newcount <= present) {
3075 bar->size = present; /* keep the cross_sync call happy */
3076 do_barrier_cross_sync_and_empty(bar);
3077 }
3078 bar->size = newcount;
sewardj9f569b72008-11-13 13:33:09 +00003079 }
sewardj9f569b72008-11-13 13:33:09 +00003080}
3081
3082
sewardjed2e72e2009-08-14 11:08:24 +00003083/* ----------------------------------------------------- */
3084/* ----- events to do with user-specified HB edges ----- */
3085/* ----------------------------------------------------- */
3086
3087/* A mapping from arbitrary UWord tag to the SO associated with it.
3088 The UWord tags are meaningless to us, interpreted only by the
3089 user. */
3090
3091
3092
3093/* UWord -> SO* */
3094static WordFM* map_usertag_to_SO = NULL;
3095
3096static void map_usertag_to_SO_INIT ( void ) {
3097 if (UNLIKELY(map_usertag_to_SO == NULL)) {
3098 map_usertag_to_SO = VG_(newFM)( HG_(zalloc),
3099 "hg.mutS.1", HG_(free), NULL );
3100 tl_assert(map_usertag_to_SO != NULL);
3101 }
3102}
3103
3104static SO* map_usertag_to_SO_lookup_or_alloc ( UWord usertag ) {
3105 UWord key, val;
3106 map_usertag_to_SO_INIT();
3107 if (VG_(lookupFM)( map_usertag_to_SO, &key, &val, usertag )) {
3108 tl_assert(key == (UWord)usertag);
3109 return (SO*)val;
3110 } else {
3111 SO* so = libhb_so_alloc();
3112 VG_(addToFM)( map_usertag_to_SO, usertag, (UWord)so );
3113 return so;
3114 }
3115}
3116
3117// If it's ever needed (XXX check before use)
3118//static void map_usertag_to_SO_delete ( UWord usertag ) {
3119// UWord keyW, valW;
3120// map_usertag_to_SO_INIT();
3121// if (VG_(delFromFM)( map_usertag_to_SO, &keyW, &valW, usertag )) {
3122// SO* so = (SO*)valW;
3123// tl_assert(keyW == usertag);
3124// tl_assert(so);
3125// libhb_so_dealloc(so);
3126// }
3127//}
3128
3129
3130static
3131void evh__HG_USERSO_SEND_PRE ( ThreadId tid, UWord usertag )
3132{
3133 /* TID is just about to notionally sent a message on a notional
3134 abstract synchronisation object whose identity is given by
3135 USERTAG. Bind USERTAG to a real SO if it is not already so
3136 bound, and do a 'strong send' on the SO. This is later used by
3137 other thread(s) which successfully 'receive' from the SO,
3138 thereby acquiring a dependency on this signalling event. */
3139 Thread* thr;
3140 SO* so;
3141
3142 if (SHOW_EVENTS >= 1)
3143 VG_(printf)("evh__HG_USERSO_SEND_PRE(ctid=%d, usertag=%#lx)\n",
3144 (Int)tid, usertag );
3145
3146 thr = map_threads_maybe_lookup( tid );
3147 tl_assert(thr); /* cannot fail - Thread* must already exist */
3148
3149 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3150 tl_assert(so);
3151
3152 libhb_so_send( thr->hbthr, so, True/*strong_send*/ );
3153}
3154
3155static
3156void evh__HG_USERSO_RECV_POST ( ThreadId tid, UWord usertag )
3157{
3158 /* TID has just notionally received a message from a notional
3159 abstract synchronisation object whose identity is given by
3160 USERTAG. Bind USERTAG to a real SO if it is not already so
3161 bound. If the SO has at some point in the past been 'sent' on,
3162 to a 'strong receive' on it, thereby acquiring a dependency on
3163 the sender. */
3164 Thread* thr;
3165 SO* so;
3166
3167 if (SHOW_EVENTS >= 1)
3168 VG_(printf)("evh__HG_USERSO_RECV_POST(ctid=%d, usertag=%#lx)\n",
3169 (Int)tid, usertag );
3170
3171 thr = map_threads_maybe_lookup( tid );
3172 tl_assert(thr); /* cannot fail - Thread* must already exist */
3173
3174 so = map_usertag_to_SO_lookup_or_alloc( usertag );
3175 tl_assert(so);
3176
3177 /* Acquire a dependency on it. If the SO has never so far been
3178 sent on, then libhb_so_recv will do nothing. So we're safe
3179 regardless of SO's history. */
3180 libhb_so_recv( thr->hbthr, so, True/*strong_recv*/ );
3181}
3182
3183
sewardjb4112022007-11-09 22:49:28 +00003184/*--------------------------------------------------------------*/
3185/*--- Lock acquisition order monitoring ---*/
3186/*--------------------------------------------------------------*/
3187
3188/* FIXME: here are some optimisations still to do in
3189 laog__pre_thread_acquires_lock.
3190
3191 The graph is structured so that if L1 --*--> L2 then L1 must be
3192 acquired before L2.
3193
3194 The common case is that some thread T holds (eg) L1 L2 and L3 and
3195 is repeatedly acquiring and releasing Ln, and there is no ordering
3196 error in what it is doing. Hence it repeatly:
3197
3198 (1) searches laog to see if Ln --*--> {L1,L2,L3}, which always
3199 produces the answer No (because there is no error).
3200
3201 (2) adds edges {L1,L2,L3} --> Ln to laog, which are already present
3202 (because they already got added the first time T acquired Ln).
3203
3204 Hence cache these two events:
3205
3206 (1) Cache result of the query from last time. Invalidate the cache
3207 any time any edges are added to or deleted from laog.
3208
3209 (2) Cache these add-edge requests and ignore them if said edges
3210 have already been added to laog. Invalidate the cache any time
3211 any edges are deleted from laog.
3212*/
3213
3214typedef
3215 struct {
3216 WordSetID inns; /* in univ_laog */
3217 WordSetID outs; /* in univ_laog */
3218 }
3219 LAOGLinks;
3220
3221/* lock order acquisition graph */
3222static WordFM* laog = NULL; /* WordFM Lock* LAOGLinks* */
3223
3224/* EXPOSITION ONLY: for each edge in 'laog', record the two places
3225 where that edge was created, so that we can show the user later if
3226 we need to. */
3227typedef
3228 struct {
3229 Addr src_ga; /* Lock guest addresses for */
3230 Addr dst_ga; /* src/dst of the edge */
3231 ExeContext* src_ec; /* And corresponding places where that */
3232 ExeContext* dst_ec; /* ordering was established */
3233 }
3234 LAOGLinkExposition;
3235
sewardj250ec2e2008-02-15 22:02:30 +00003236static Word cmp_LAOGLinkExposition ( UWord llx1W, UWord llx2W ) {
sewardjb4112022007-11-09 22:49:28 +00003237 /* Compare LAOGLinkExposition*s by (src_ga,dst_ga) field pair. */
3238 LAOGLinkExposition* llx1 = (LAOGLinkExposition*)llx1W;
3239 LAOGLinkExposition* llx2 = (LAOGLinkExposition*)llx2W;
3240 if (llx1->src_ga < llx2->src_ga) return -1;
3241 if (llx1->src_ga > llx2->src_ga) return 1;
3242 if (llx1->dst_ga < llx2->dst_ga) return -1;
3243 if (llx1->dst_ga > llx2->dst_ga) return 1;
3244 return 0;
3245}
3246
3247static WordFM* laog_exposition = NULL; /* WordFM LAOGLinkExposition* NULL */
3248/* end EXPOSITION ONLY */
3249
3250
sewardja65db102009-01-26 10:45:16 +00003251__attribute__((noinline))
3252static void laog__init ( void )
3253{
3254 tl_assert(!laog);
3255 tl_assert(!laog_exposition);
3256
3257 laog = VG_(newFM)( HG_(zalloc), "hg.laog__init.1",
3258 HG_(free), NULL/*unboxedcmp*/ );
3259
3260 laog_exposition = VG_(newFM)( HG_(zalloc), "hg.laog__init.2", HG_(free),
3261 cmp_LAOGLinkExposition );
3262 tl_assert(laog);
3263 tl_assert(laog_exposition);
3264}
3265
sewardjb4112022007-11-09 22:49:28 +00003266static void laog__show ( Char* who ) {
3267 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003268 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003269 Lock* me;
3270 LAOGLinks* links;
3271 VG_(printf)("laog (requested by %s) {\n", who);
sewardj896f6f92008-08-19 08:38:52 +00003272 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003273 me = NULL;
3274 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003275 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003276 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003277 tl_assert(me);
3278 tl_assert(links);
3279 VG_(printf)(" node %p:\n", me);
3280 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3281 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003282 VG_(printf)(" inn %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003283 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3284 for (i = 0; i < ws_size; i++)
barta0b6b2c2008-07-07 06:49:24 +00003285 VG_(printf)(" out %#lx\n", ws_words[i] );
sewardjb4112022007-11-09 22:49:28 +00003286 me = NULL;
3287 links = NULL;
3288 }
sewardj896f6f92008-08-19 08:38:52 +00003289 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003290 VG_(printf)("}\n");
3291}
3292
3293__attribute__((noinline))
3294static void laog__add_edge ( Lock* src, Lock* dst ) {
3295 Word keyW;
3296 LAOGLinks* links;
3297 Bool presentF, presentR;
3298 if (0) VG_(printf)("laog__add_edge %p %p\n", src, dst);
3299
3300 /* Take the opportunity to sanity check the graph. Record in
3301 presentF if there is already a src->dst mapping in this node's
3302 forwards links, and presentR if there is already a src->dst
3303 mapping in this node's backwards links. They should agree!
3304 Also, we need to know whether the edge was already present so as
3305 to decide whether or not to update the link details mapping. We
3306 can compute presentF and presentR essentially for free, so may
3307 as well do this always. */
3308 presentF = presentR = False;
3309
3310 /* Update the out edges for src */
3311 keyW = 0;
3312 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003313 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003314 WordSetID outs_new;
3315 tl_assert(links);
3316 tl_assert(keyW == (Word)src);
3317 outs_new = HG_(addToWS)( univ_laog, links->outs, (Word)dst );
3318 presentF = outs_new == links->outs;
3319 links->outs = outs_new;
3320 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003321 links = HG_(zalloc)("hg.lae.1", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003322 links->inns = HG_(emptyWS)( univ_laog );
3323 links->outs = HG_(singletonWS)( univ_laog, (Word)dst );
sewardj896f6f92008-08-19 08:38:52 +00003324 VG_(addToFM)( laog, (Word)src, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003325 }
3326 /* Update the in edges for dst */
3327 keyW = 0;
3328 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003329 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003330 WordSetID inns_new;
3331 tl_assert(links);
3332 tl_assert(keyW == (Word)dst);
3333 inns_new = HG_(addToWS)( univ_laog, links->inns, (Word)src );
3334 presentR = inns_new == links->inns;
3335 links->inns = inns_new;
3336 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003337 links = HG_(zalloc)("hg.lae.2", sizeof(LAOGLinks));
sewardjb4112022007-11-09 22:49:28 +00003338 links->inns = HG_(singletonWS)( univ_laog, (Word)src );
3339 links->outs = HG_(emptyWS)( univ_laog );
sewardj896f6f92008-08-19 08:38:52 +00003340 VG_(addToFM)( laog, (Word)dst, (Word)links );
sewardjb4112022007-11-09 22:49:28 +00003341 }
3342
3343 tl_assert( (presentF && presentR) || (!presentF && !presentR) );
3344
3345 if (!presentF && src->acquired_at && dst->acquired_at) {
3346 LAOGLinkExposition expo;
3347 /* If this edge is entering the graph, and we have acquired_at
3348 information for both src and dst, record those acquisition
3349 points. Hence, if there is later a violation of this
3350 ordering, we can show the user the two places in which the
3351 required src-dst ordering was previously established. */
barta0b6b2c2008-07-07 06:49:24 +00003352 if (0) VG_(printf)("acquire edge %#lx %#lx\n",
sewardjb4112022007-11-09 22:49:28 +00003353 src->guestaddr, dst->guestaddr);
3354 expo.src_ga = src->guestaddr;
3355 expo.dst_ga = dst->guestaddr;
3356 expo.src_ec = NULL;
3357 expo.dst_ec = NULL;
3358 tl_assert(laog_exposition);
sewardj896f6f92008-08-19 08:38:52 +00003359 if (VG_(lookupFM)( laog_exposition, NULL, NULL, (Word)&expo )) {
sewardjb4112022007-11-09 22:49:28 +00003360 /* we already have it; do nothing */
3361 } else {
sewardjf98e1c02008-10-25 16:22:41 +00003362 LAOGLinkExposition* expo2 = HG_(zalloc)("hg.lae.3",
3363 sizeof(LAOGLinkExposition));
sewardjb4112022007-11-09 22:49:28 +00003364 expo2->src_ga = src->guestaddr;
3365 expo2->dst_ga = dst->guestaddr;
3366 expo2->src_ec = src->acquired_at;
3367 expo2->dst_ec = dst->acquired_at;
sewardj896f6f92008-08-19 08:38:52 +00003368 VG_(addToFM)( laog_exposition, (Word)expo2, (Word)NULL );
sewardjb4112022007-11-09 22:49:28 +00003369 }
3370 }
3371}
3372
3373__attribute__((noinline))
3374static void laog__del_edge ( Lock* src, Lock* dst ) {
3375 Word keyW;
3376 LAOGLinks* links;
3377 if (0) VG_(printf)("laog__del_edge %p %p\n", src, dst);
3378 /* Update the out edges for src */
3379 keyW = 0;
3380 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003381 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)src )) {
sewardjb4112022007-11-09 22:49:28 +00003382 tl_assert(links);
3383 tl_assert(keyW == (Word)src);
3384 links->outs = HG_(delFromWS)( univ_laog, links->outs, (Word)dst );
3385 }
3386 /* Update the in edges for dst */
3387 keyW = 0;
3388 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003389 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)dst )) {
sewardjb4112022007-11-09 22:49:28 +00003390 tl_assert(links);
3391 tl_assert(keyW == (Word)dst);
3392 links->inns = HG_(delFromWS)( univ_laog, links->inns, (Word)src );
3393 }
3394}
3395
3396__attribute__((noinline))
3397static WordSetID /* in univ_laog */ laog__succs ( Lock* lk ) {
3398 Word keyW;
3399 LAOGLinks* links;
3400 keyW = 0;
3401 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003402 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003403 tl_assert(links);
3404 tl_assert(keyW == (Word)lk);
3405 return links->outs;
3406 } else {
3407 return HG_(emptyWS)( univ_laog );
3408 }
3409}
3410
3411__attribute__((noinline))
3412static WordSetID /* in univ_laog */ laog__preds ( Lock* lk ) {
3413 Word keyW;
3414 LAOGLinks* links;
3415 keyW = 0;
3416 links = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003417 if (VG_(lookupFM)( laog, &keyW, (Word*)&links, (Word)lk )) {
sewardjb4112022007-11-09 22:49:28 +00003418 tl_assert(links);
3419 tl_assert(keyW == (Word)lk);
3420 return links->inns;
3421 } else {
3422 return HG_(emptyWS)( univ_laog );
3423 }
3424}
3425
3426__attribute__((noinline))
3427static void laog__sanity_check ( Char* who ) {
3428 Word i, ws_size;
sewardj250ec2e2008-02-15 22:02:30 +00003429 UWord* ws_words;
sewardjb4112022007-11-09 22:49:28 +00003430 Lock* me;
3431 LAOGLinks* links;
sewardja65db102009-01-26 10:45:16 +00003432 if (UNLIKELY(!laog || !laog_exposition))
3433 laog__init();
sewardj896f6f92008-08-19 08:38:52 +00003434 VG_(initIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003435 me = NULL;
3436 links = NULL;
3437 if (0) VG_(printf)("laog sanity check\n");
sewardj896f6f92008-08-19 08:38:52 +00003438 while (VG_(nextIterFM)( laog, (Word*)&me,
sewardjb5f29642007-11-16 12:02:43 +00003439 (Word*)&links )) {
sewardjb4112022007-11-09 22:49:28 +00003440 tl_assert(me);
3441 tl_assert(links);
3442 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->inns );
3443 for (i = 0; i < ws_size; i++) {
3444 if ( ! HG_(elemWS)( univ_laog,
3445 laog__succs( (Lock*)ws_words[i] ),
3446 (Word)me ))
3447 goto bad;
3448 }
3449 HG_(getPayloadWS)( &ws_words, &ws_size, univ_laog, links->outs );
3450 for (i = 0; i < ws_size; i++) {
3451 if ( ! HG_(elemWS)( univ_laog,
3452 laog__preds( (Lock*)ws_words[i] ),
3453 (Word)me ))
3454 goto bad;
3455 }
3456 me = NULL;
3457 links = NULL;
3458 }
sewardj896f6f92008-08-19 08:38:52 +00003459 VG_(doneIterFM)( laog );
sewardjb4112022007-11-09 22:49:28 +00003460 return;
3461
3462 bad:
3463 VG_(printf)("laog__sanity_check(%s) FAILED\n", who);
3464 laog__show(who);
3465 tl_assert(0);
3466}
3467
3468/* If there is a path in laog from 'src' to any of the elements in
3469 'dst', return an arbitrarily chosen element of 'dst' reachable from
3470 'src'. If no path exist from 'src' to any element in 'dst', return
3471 NULL. */
3472__attribute__((noinline))
3473static
3474Lock* laog__do_dfs_from_to ( Lock* src, WordSetID dsts /* univ_lsets */ )
3475{
3476 Lock* ret;
3477 Word i, ssz;
3478 XArray* stack; /* of Lock* */
3479 WordFM* visited; /* Lock* -> void, iow, Set(Lock*) */
3480 Lock* here;
3481 WordSetID succs;
3482 Word succs_size;
sewardj250ec2e2008-02-15 22:02:30 +00003483 UWord* succs_words;
sewardjb4112022007-11-09 22:49:28 +00003484 //laog__sanity_check();
3485
3486 /* If the destination set is empty, we can never get there from
3487 'src' :-), so don't bother to try */
3488 if (HG_(isEmptyWS)( univ_lsets, dsts ))
3489 return NULL;
3490
3491 ret = NULL;
sewardjf98e1c02008-10-25 16:22:41 +00003492 stack = VG_(newXA)( HG_(zalloc), "hg.lddft.1", HG_(free), sizeof(Lock*) );
3493 visited = VG_(newFM)( HG_(zalloc), "hg.lddft.2", HG_(free), NULL/*unboxedcmp*/ );
sewardjb4112022007-11-09 22:49:28 +00003494
3495 (void) VG_(addToXA)( stack, &src );
3496
3497 while (True) {
3498
3499 ssz = VG_(sizeXA)( stack );
3500
3501 if (ssz == 0) { ret = NULL; break; }
3502
3503 here = *(Lock**) VG_(indexXA)( stack, ssz-1 );
3504 VG_(dropTailXA)( stack, 1 );
3505
3506 if (HG_(elemWS)( univ_lsets, dsts, (Word)here )) { ret = here; break; }
3507
sewardj896f6f92008-08-19 08:38:52 +00003508 if (VG_(lookupFM)( visited, NULL, NULL, (Word)here ))
sewardjb4112022007-11-09 22:49:28 +00003509 continue;
3510
sewardj896f6f92008-08-19 08:38:52 +00003511 VG_(addToFM)( visited, (Word)here, 0 );
sewardjb4112022007-11-09 22:49:28 +00003512
3513 succs = laog__succs( here );
3514 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3515 for (i = 0; i < succs_size; i++)
3516 (void) VG_(addToXA)( stack, &succs_words[i] );
3517 }
3518
sewardj896f6f92008-08-19 08:38:52 +00003519 VG_(deleteFM)( visited, NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003520 VG_(deleteXA)( stack );
3521 return ret;
3522}
3523
3524
3525/* Thread 'thr' is acquiring 'lk'. Check for inconsistent ordering
3526 between 'lk' and the locks already held by 'thr' and issue a
3527 complaint if so. Also, update the ordering graph appropriately.
3528*/
3529__attribute__((noinline))
3530static void laog__pre_thread_acquires_lock (
3531 Thread* thr, /* NB: BEFORE lock is added */
3532 Lock* lk
3533 )
3534{
sewardj250ec2e2008-02-15 22:02:30 +00003535 UWord* ls_words;
sewardjb4112022007-11-09 22:49:28 +00003536 Word ls_size, i;
3537 Lock* other;
3538
3539 /* It may be that 'thr' already holds 'lk' and is recursively
3540 relocking in. In this case we just ignore the call. */
3541 /* NB: univ_lsets really is correct here */
3542 if (HG_(elemWS)( univ_lsets, thr->locksetA, (Word)lk ))
3543 return;
3544
sewardja65db102009-01-26 10:45:16 +00003545 if (UNLIKELY(!laog || !laog_exposition))
3546 laog__init();
sewardjb4112022007-11-09 22:49:28 +00003547
3548 /* First, the check. Complain if there is any path in laog from lk
3549 to any of the locks already held by thr, since if any such path
3550 existed, it would mean that previously lk was acquired before
3551 (rather than after, as we are doing here) at least one of those
3552 locks.
3553 */
3554 other = laog__do_dfs_from_to(lk, thr->locksetA);
3555 if (other) {
3556 LAOGLinkExposition key, *found;
3557 /* So we managed to find a path lk --*--> other in the graph,
3558 which implies that 'lk' should have been acquired before
3559 'other' but is in fact being acquired afterwards. We present
3560 the lk/other arguments to record_error_LockOrder in the order
3561 in which they should have been acquired. */
3562 /* Go look in the laog_exposition mapping, to find the allocation
3563 points for this edge, so we can show the user. */
3564 key.src_ga = lk->guestaddr;
3565 key.dst_ga = other->guestaddr;
3566 key.src_ec = NULL;
3567 key.dst_ec = NULL;
3568 found = NULL;
sewardj896f6f92008-08-19 08:38:52 +00003569 if (VG_(lookupFM)( laog_exposition,
sewardjb5f29642007-11-16 12:02:43 +00003570 (Word*)&found, NULL, (Word)&key )) {
sewardjb4112022007-11-09 22:49:28 +00003571 tl_assert(found != &key);
3572 tl_assert(found->src_ga == key.src_ga);
3573 tl_assert(found->dst_ga == key.dst_ga);
3574 tl_assert(found->src_ec);
3575 tl_assert(found->dst_ec);
sewardjf98e1c02008-10-25 16:22:41 +00003576 HG_(record_error_LockOrder)(
3577 thr, lk->guestaddr, other->guestaddr,
3578 found->src_ec, found->dst_ec );
sewardjb4112022007-11-09 22:49:28 +00003579 } else {
3580 /* Hmm. This can't happen (can it?) */
sewardjf98e1c02008-10-25 16:22:41 +00003581 HG_(record_error_LockOrder)(
3582 thr, lk->guestaddr, other->guestaddr,
3583 NULL, NULL );
sewardjb4112022007-11-09 22:49:28 +00003584 }
3585 }
3586
3587 /* Second, add to laog the pairs
3588 (old, lk) | old <- locks already held by thr
3589 Since both old and lk are currently held by thr, their acquired_at
3590 fields must be non-NULL.
3591 */
3592 tl_assert(lk->acquired_at);
3593 HG_(getPayloadWS)( &ls_words, &ls_size, univ_lsets, thr->locksetA );
3594 for (i = 0; i < ls_size; i++) {
3595 Lock* old = (Lock*)ls_words[i];
3596 tl_assert(old->acquired_at);
3597 laog__add_edge( old, lk );
3598 }
3599
3600 /* Why "except_Locks" ? We're here because a lock is being
3601 acquired by a thread, and we're in an inconsistent state here.
3602 See the call points in evhH__post_thread_{r,w}_acquires_lock.
3603 When called in this inconsistent state, locks__sanity_check duly
3604 barfs. */
sewardjf98e1c02008-10-25 16:22:41 +00003605 if (HG_(clo_sanity_flags) & SCE_LAOG)
sewardjb4112022007-11-09 22:49:28 +00003606 all_except_Locks__sanity_check("laog__pre_thread_acquires_lock-post");
3607}
3608
3609
3610/* Delete from 'laog' any pair mentioning a lock in locksToDelete */
3611
3612__attribute__((noinline))
3613static void laog__handle_one_lock_deletion ( Lock* lk )
3614{
3615 WordSetID preds, succs;
3616 Word preds_size, succs_size, i, j;
sewardj250ec2e2008-02-15 22:02:30 +00003617 UWord *preds_words, *succs_words;
sewardjb4112022007-11-09 22:49:28 +00003618
sewardja65db102009-01-26 10:45:16 +00003619 if (UNLIKELY(!laog || !laog_exposition))
3620 laog__init();
3621
sewardjb4112022007-11-09 22:49:28 +00003622 preds = laog__preds( lk );
3623 succs = laog__succs( lk );
3624
3625 HG_(getPayloadWS)( &preds_words, &preds_size, univ_laog, preds );
3626 for (i = 0; i < preds_size; i++)
3627 laog__del_edge( (Lock*)preds_words[i], lk );
3628
3629 HG_(getPayloadWS)( &succs_words, &succs_size, univ_laog, succs );
3630 for (j = 0; j < succs_size; j++)
3631 laog__del_edge( lk, (Lock*)succs_words[j] );
3632
3633 for (i = 0; i < preds_size; i++) {
3634 for (j = 0; j < succs_size; j++) {
3635 if (preds_words[i] != succs_words[j]) {
3636 /* This can pass unlocked locks to laog__add_edge, since
3637 we're deleting stuff. So their acquired_at fields may
3638 be NULL. */
3639 laog__add_edge( (Lock*)preds_words[i], (Lock*)succs_words[j] );
3640 }
3641 }
3642 }
3643}
3644
sewardj1cbc12f2008-11-10 16:16:46 +00003645//__attribute__((noinline))
3646//static void laog__handle_lock_deletions (
3647// WordSetID /* in univ_laog */ locksToDelete
3648// )
3649//{
3650// Word i, ws_size;
3651// UWord* ws_words;
3652//
sewardja65db102009-01-26 10:45:16 +00003653// if (UNLIKELY(!laog || !laog_exposition))
3654// laog__init();
sewardj1cbc12f2008-11-10 16:16:46 +00003655//
3656// HG_(getPayloadWS)( &ws_words, &ws_size, univ_lsets, locksToDelete );
3657// for (i = 0; i < ws_size; i++)
3658// laog__handle_one_lock_deletion( (Lock*)ws_words[i] );
3659//
3660// if (HG_(clo_sanity_flags) & SCE_LAOG)
3661// all__sanity_check("laog__handle_lock_deletions-post");
3662//}
sewardjb4112022007-11-09 22:49:28 +00003663
3664
3665/*--------------------------------------------------------------*/
3666/*--- Malloc/free replacements ---*/
3667/*--------------------------------------------------------------*/
3668
3669typedef
3670 struct {
3671 void* next; /* required by m_hashtable */
3672 Addr payload; /* ptr to actual block */
3673 SizeT szB; /* size requested */
3674 ExeContext* where; /* where it was allocated */
3675 Thread* thr; /* allocating thread */
3676 }
3677 MallocMeta;
3678
3679/* A hash table of MallocMetas, used to track malloc'd blocks
3680 (obviously). */
3681static VgHashTable hg_mallocmeta_table = NULL;
3682
3683
3684static MallocMeta* new_MallocMeta ( void ) {
sewardjf98e1c02008-10-25 16:22:41 +00003685 MallocMeta* md = HG_(zalloc)( "hg.new_MallocMeta.1", sizeof(MallocMeta) );
sewardjb4112022007-11-09 22:49:28 +00003686 tl_assert(md);
3687 return md;
3688}
3689static void delete_MallocMeta ( MallocMeta* md ) {
sewardjf98e1c02008-10-25 16:22:41 +00003690 HG_(free)(md);
sewardjb4112022007-11-09 22:49:28 +00003691}
3692
3693
3694/* Allocate a client block and set up the metadata for it. */
3695
3696static
3697void* handle_alloc ( ThreadId tid,
3698 SizeT szB, SizeT alignB, Bool is_zeroed )
3699{
3700 Addr p;
3701 MallocMeta* md;
3702
3703 tl_assert( ((SSizeT)szB) >= 0 );
3704 p = (Addr)VG_(cli_malloc)(alignB, szB);
3705 if (!p) {
3706 return NULL;
3707 }
3708 if (is_zeroed)
3709 VG_(memset)((void*)p, 0, szB);
3710
3711 /* Note that map_threads_lookup must succeed (cannot assert), since
3712 memory can only be allocated by currently alive threads, hence
3713 they must have an entry in map_threads. */
3714 md = new_MallocMeta();
3715 md->payload = p;
3716 md->szB = szB;
3717 md->where = VG_(record_ExeContext)( tid, 0 );
3718 md->thr = map_threads_lookup( tid );
3719
3720 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md );
3721
3722 /* Tell the lower level memory wranglers. */
3723 evh__new_mem_heap( p, szB, is_zeroed );
3724
3725 return (void*)p;
3726}
3727
3728/* Re the checks for less-than-zero (also in hg_cli__realloc below):
3729 Cast to a signed type to catch any unexpectedly negative args.
3730 We're assuming here that the size asked for is not greater than
3731 2^31 bytes (for 32-bit platforms) or 2^63 bytes (for 64-bit
3732 platforms). */
3733static void* hg_cli__malloc ( ThreadId tid, SizeT n ) {
3734 if (((SSizeT)n) < 0) return NULL;
3735 return handle_alloc ( tid, n, VG_(clo_alignment),
3736 /*is_zeroed*/False );
3737}
3738static void* hg_cli____builtin_new ( ThreadId tid, SizeT n ) {
3739 if (((SSizeT)n) < 0) return NULL;
3740 return handle_alloc ( tid, n, VG_(clo_alignment),
3741 /*is_zeroed*/False );
3742}
3743static void* hg_cli____builtin_vec_new ( ThreadId tid, SizeT n ) {
3744 if (((SSizeT)n) < 0) return NULL;
3745 return handle_alloc ( tid, n, VG_(clo_alignment),
3746 /*is_zeroed*/False );
3747}
3748static void* hg_cli__memalign ( ThreadId tid, SizeT align, SizeT n ) {
3749 if (((SSizeT)n) < 0) return NULL;
3750 return handle_alloc ( tid, n, align,
3751 /*is_zeroed*/False );
3752}
3753static void* hg_cli__calloc ( ThreadId tid, SizeT nmemb, SizeT size1 ) {
3754 if ( ((SSizeT)nmemb) < 0 || ((SSizeT)size1) < 0 ) return NULL;
3755 return handle_alloc ( tid, nmemb*size1, VG_(clo_alignment),
3756 /*is_zeroed*/True );
3757}
3758
3759
3760/* Free a client block, including getting rid of the relevant
3761 metadata. */
3762
3763static void handle_free ( ThreadId tid, void* p )
3764{
3765 MallocMeta *md, *old_md;
3766 SizeT szB;
3767
3768 /* First see if we can find the metadata for 'p'. */
3769 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3770 if (!md)
3771 return; /* apparently freeing a bogus address. Oh well. */
3772
3773 tl_assert(md->payload == (Addr)p);
3774 szB = md->szB;
3775
3776 /* Nuke the metadata block */
3777 old_md = (MallocMeta*)
3778 VG_(HT_remove)( hg_mallocmeta_table, (UWord)p );
3779 tl_assert(old_md); /* it must be present - we just found it */
3780 tl_assert(old_md == md);
3781 tl_assert(old_md->payload == (Addr)p);
3782
3783 VG_(cli_free)((void*)old_md->payload);
3784 delete_MallocMeta(old_md);
3785
3786 /* Tell the lower level memory wranglers. */
3787 evh__die_mem_heap( (Addr)p, szB );
3788}
3789
3790static void hg_cli__free ( ThreadId tid, void* p ) {
3791 handle_free(tid, p);
3792}
3793static void hg_cli____builtin_delete ( ThreadId tid, void* p ) {
3794 handle_free(tid, p);
3795}
3796static void hg_cli____builtin_vec_delete ( ThreadId tid, void* p ) {
3797 handle_free(tid, p);
3798}
3799
3800
3801static void* hg_cli__realloc ( ThreadId tid, void* payloadV, SizeT new_size )
3802{
3803 MallocMeta *md, *md_new, *md_tmp;
3804 SizeT i;
3805
3806 Addr payload = (Addr)payloadV;
3807
3808 if (((SSizeT)new_size) < 0) return NULL;
3809
3810 md = (MallocMeta*) VG_(HT_lookup)( hg_mallocmeta_table, (UWord)payload );
3811 if (!md)
3812 return NULL; /* apparently realloc-ing a bogus address. Oh well. */
3813
3814 tl_assert(md->payload == payload);
3815
3816 if (md->szB == new_size) {
3817 /* size unchanged */
3818 md->where = VG_(record_ExeContext)(tid, 0);
3819 return payloadV;
3820 }
3821
3822 if (md->szB > new_size) {
3823 /* new size is smaller */
3824 md->szB = new_size;
3825 md->where = VG_(record_ExeContext)(tid, 0);
3826 evh__die_mem_heap( md->payload + new_size, md->szB - new_size );
3827 return payloadV;
3828 }
3829
3830 /* else */ {
3831 /* new size is bigger */
3832 Addr p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
3833
3834 /* First half kept and copied, second half new */
3835 // FIXME: shouldn't we use a copier which implements the
3836 // memory state machine?
sewardj23f12002009-07-24 08:45:08 +00003837 evh__copy_mem( payload, p_new, md->szB );
sewardjb4112022007-11-09 22:49:28 +00003838 evh__new_mem_heap ( p_new + md->szB, new_size - md->szB,
sewardjf98e1c02008-10-25 16:22:41 +00003839 /*inited*/False );
sewardjb4112022007-11-09 22:49:28 +00003840 /* FIXME: can anything funny happen here? specifically, if the
3841 old range contained a lock, then die_mem_heap will complain.
3842 Is that the correct behaviour? Not sure. */
3843 evh__die_mem_heap( payload, md->szB );
3844
3845 /* Copy from old to new */
3846 for (i = 0; i < md->szB; i++)
3847 ((UChar*)p_new)[i] = ((UChar*)payload)[i];
3848
3849 /* Because the metadata hash table is index by payload address,
3850 we have to get rid of the old hash table entry and make a new
3851 one. We can't just modify the existing metadata in place,
3852 because then it would (almost certainly) be in the wrong hash
3853 chain. */
3854 md_new = new_MallocMeta();
3855 *md_new = *md;
3856
3857 md_tmp = VG_(HT_remove)( hg_mallocmeta_table, payload );
3858 tl_assert(md_tmp);
3859 tl_assert(md_tmp == md);
3860
3861 VG_(cli_free)((void*)md->payload);
3862 delete_MallocMeta(md);
3863
3864 /* Update fields */
3865 md_new->where = VG_(record_ExeContext)( tid, 0 );
3866 md_new->szB = new_size;
3867 md_new->payload = p_new;
3868 md_new->thr = map_threads_lookup( tid );
3869
3870 /* and add */
3871 VG_(HT_add_node)( hg_mallocmeta_table, (VgHashNode*)md_new );
3872
3873 return (void*)p_new;
3874 }
3875}
3876
njn8b140de2009-02-17 04:31:18 +00003877static SizeT hg_cli_malloc_usable_size ( ThreadId tid, void* p )
3878{
3879 MallocMeta *md = VG_(HT_lookup)( hg_mallocmeta_table, (UWord)p );
3880
3881 // There may be slop, but pretend there isn't because only the asked-for
3882 // area will have been shadowed properly.
3883 return ( md ? md->szB : 0 );
3884}
3885
sewardjb4112022007-11-09 22:49:28 +00003886
sewardj095d61e2010-03-11 13:43:18 +00003887/* For error creation: map 'data_addr' to a malloc'd chunk, if any.
sewardjc8028ad2010-05-05 09:34:42 +00003888 Slow linear search. With a bit of hash table help if 'data_addr'
3889 is either the start of a block or up to 15 word-sized steps along
3890 from the start of a block. */
sewardj095d61e2010-03-11 13:43:18 +00003891
3892static inline Bool addr_is_in_MM_Chunk( MallocMeta* mm, Addr a )
3893{
sewardjc8028ad2010-05-05 09:34:42 +00003894 /* Accept 'a' as within 'mm' if 'mm's size is zero and 'a' points
3895 right at it. */
3896 if (UNLIKELY(mm->szB == 0 && a == mm->payload))
3897 return True;
3898 /* else normal interval rules apply */
3899 if (LIKELY(a < mm->payload)) return False;
3900 if (LIKELY(a >= mm->payload + mm->szB)) return False;
3901 return True;
sewardj095d61e2010-03-11 13:43:18 +00003902}
3903
sewardjc8028ad2010-05-05 09:34:42 +00003904Bool HG_(mm_find_containing_block)( /*OUT*/ExeContext** where,
sewardj095d61e2010-03-11 13:43:18 +00003905 /*OUT*/Addr* payload,
3906 /*OUT*/SizeT* szB,
3907 Addr data_addr )
3908{
3909 MallocMeta* mm;
sewardjc8028ad2010-05-05 09:34:42 +00003910 Int i;
3911 const Int n_fast_check_words = 16;
3912
3913 /* First, do a few fast searches on the basis that data_addr might
3914 be exactly the start of a block or up to 15 words inside. This
3915 can happen commonly via the creq
3916 _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK. */
3917 for (i = 0; i < n_fast_check_words; i++) {
3918 mm = VG_(HT_lookup)( hg_mallocmeta_table,
3919 data_addr - (UWord)(UInt)i * sizeof(UWord) );
3920 if (UNLIKELY(mm && addr_is_in_MM_Chunk(mm, data_addr)))
3921 goto found;
3922 }
3923
sewardj095d61e2010-03-11 13:43:18 +00003924 /* Well, this totally sucks. But without using an interval tree or
sewardjc8028ad2010-05-05 09:34:42 +00003925 some such, it's hard to see how to do better. We have to check
3926 every block in the entire table. */
sewardj095d61e2010-03-11 13:43:18 +00003927 VG_(HT_ResetIter)(hg_mallocmeta_table);
3928 while ( (mm = VG_(HT_Next)(hg_mallocmeta_table)) ) {
sewardjc8028ad2010-05-05 09:34:42 +00003929 if (UNLIKELY(addr_is_in_MM_Chunk(mm, data_addr)))
3930 goto found;
sewardj095d61e2010-03-11 13:43:18 +00003931 }
sewardjc8028ad2010-05-05 09:34:42 +00003932
3933 /* Not found. Bah. */
3934 return False;
3935 /*NOTREACHED*/
3936
3937 found:
3938 tl_assert(mm);
3939 tl_assert(addr_is_in_MM_Chunk(mm, data_addr));
3940 if (where) *where = mm->where;
3941 if (payload) *payload = mm->payload;
3942 if (szB) *szB = mm->szB;
3943 return True;
sewardj095d61e2010-03-11 13:43:18 +00003944}
3945
3946
sewardjb4112022007-11-09 22:49:28 +00003947/*--------------------------------------------------------------*/
3948/*--- Instrumentation ---*/
3949/*--------------------------------------------------------------*/
3950
3951static void instrument_mem_access ( IRSB* bbOut,
3952 IRExpr* addr,
3953 Int szB,
3954 Bool isStore,
3955 Int hWordTy_szB )
3956{
3957 IRType tyAddr = Ity_INVALID;
3958 HChar* hName = NULL;
3959 void* hAddr = NULL;
3960 Int regparms = 0;
3961 IRExpr** argv = NULL;
3962 IRDirty* di = NULL;
3963
3964 tl_assert(isIRAtom(addr));
3965 tl_assert(hWordTy_szB == 4 || hWordTy_szB == 8);
3966
3967 tyAddr = typeOfIRExpr( bbOut->tyenv, addr );
3968 tl_assert(tyAddr == Ity_I32 || tyAddr == Ity_I64);
3969
3970 /* So the effective address is in 'addr' now. */
3971 regparms = 1; // unless stated otherwise
3972 if (isStore) {
3973 switch (szB) {
3974 case 1:
sewardj23f12002009-07-24 08:45:08 +00003975 hName = "evh__mem_help_cwrite_1";
3976 hAddr = &evh__mem_help_cwrite_1;
sewardjb4112022007-11-09 22:49:28 +00003977 argv = mkIRExprVec_1( addr );
3978 break;
3979 case 2:
sewardj23f12002009-07-24 08:45:08 +00003980 hName = "evh__mem_help_cwrite_2";
3981 hAddr = &evh__mem_help_cwrite_2;
sewardjb4112022007-11-09 22:49:28 +00003982 argv = mkIRExprVec_1( addr );
3983 break;
3984 case 4:
sewardj23f12002009-07-24 08:45:08 +00003985 hName = "evh__mem_help_cwrite_4";
3986 hAddr = &evh__mem_help_cwrite_4;
sewardjb4112022007-11-09 22:49:28 +00003987 argv = mkIRExprVec_1( addr );
3988 break;
3989 case 8:
sewardj23f12002009-07-24 08:45:08 +00003990 hName = "evh__mem_help_cwrite_8";
3991 hAddr = &evh__mem_help_cwrite_8;
sewardjb4112022007-11-09 22:49:28 +00003992 argv = mkIRExprVec_1( addr );
3993 break;
3994 default:
3995 tl_assert(szB > 8 && szB <= 512); /* stay sane */
3996 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00003997 hName = "evh__mem_help_cwrite_N";
3998 hAddr = &evh__mem_help_cwrite_N;
sewardjb4112022007-11-09 22:49:28 +00003999 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4000 break;
4001 }
4002 } else {
4003 switch (szB) {
4004 case 1:
sewardj23f12002009-07-24 08:45:08 +00004005 hName = "evh__mem_help_cread_1";
4006 hAddr = &evh__mem_help_cread_1;
sewardjb4112022007-11-09 22:49:28 +00004007 argv = mkIRExprVec_1( addr );
4008 break;
4009 case 2:
sewardj23f12002009-07-24 08:45:08 +00004010 hName = "evh__mem_help_cread_2";
4011 hAddr = &evh__mem_help_cread_2;
sewardjb4112022007-11-09 22:49:28 +00004012 argv = mkIRExprVec_1( addr );
4013 break;
4014 case 4:
sewardj23f12002009-07-24 08:45:08 +00004015 hName = "evh__mem_help_cread_4";
4016 hAddr = &evh__mem_help_cread_4;
sewardjb4112022007-11-09 22:49:28 +00004017 argv = mkIRExprVec_1( addr );
4018 break;
4019 case 8:
sewardj23f12002009-07-24 08:45:08 +00004020 hName = "evh__mem_help_cread_8";
4021 hAddr = &evh__mem_help_cread_8;
sewardjb4112022007-11-09 22:49:28 +00004022 argv = mkIRExprVec_1( addr );
4023 break;
4024 default:
4025 tl_assert(szB > 8 && szB <= 512); /* stay sane */
4026 regparms = 2;
sewardj23f12002009-07-24 08:45:08 +00004027 hName = "evh__mem_help_cread_N";
4028 hAddr = &evh__mem_help_cread_N;
sewardjb4112022007-11-09 22:49:28 +00004029 argv = mkIRExprVec_2( addr, mkIRExpr_HWord( szB ));
4030 break;
4031 }
4032 }
4033
4034 /* Add the helper. */
4035 tl_assert(hName);
4036 tl_assert(hAddr);
4037 tl_assert(argv);
4038 di = unsafeIRDirty_0_N( regparms,
4039 hName, VG_(fnptr_to_fnentry)( hAddr ),
4040 argv );
4041 addStmtToIRSB( bbOut, IRStmt_Dirty(di) );
4042}
4043
4044
sewardja0eee322009-07-31 08:46:35 +00004045/* Figure out if GA is a guest code address in the dynamic linker, and
4046 if so return True. Otherwise (and in case of any doubt) return
4047 False. (sidedly safe w/ False as the safe value) */
4048static Bool is_in_dynamic_linker_shared_object( Addr64 ga )
4049{
4050 DebugInfo* dinfo;
4051 const UChar* soname;
4052 if (0) return False;
4053
sewardje3f1e592009-07-31 09:41:29 +00004054 dinfo = VG_(find_DebugInfo)( (Addr)ga );
sewardja0eee322009-07-31 08:46:35 +00004055 if (!dinfo) return False;
4056
sewardje3f1e592009-07-31 09:41:29 +00004057 soname = VG_(DebugInfo_get_soname)(dinfo);
sewardja0eee322009-07-31 08:46:35 +00004058 tl_assert(soname);
4059 if (0) VG_(printf)("%s\n", soname);
4060
4061# if defined(VGO_linux)
sewardj651cfa42010-01-11 13:02:19 +00004062 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_3)) return True;
sewardja0eee322009-07-31 08:46:35 +00004063 if (VG_STREQ(soname, VG_U_LD_LINUX_SO_2)) return True;
4064 if (VG_STREQ(soname, VG_U_LD_LINUX_X86_64_SO_2)) return True;
4065 if (VG_STREQ(soname, VG_U_LD64_SO_1)) return True;
4066 if (VG_STREQ(soname, VG_U_LD_SO_1)) return True;
4067# elif defined(VGO_darwin)
4068 if (VG_STREQ(soname, VG_U_DYLD)) return True;
4069# else
4070# error "Unsupported OS"
4071# endif
4072 return False;
4073}
4074
sewardjb4112022007-11-09 22:49:28 +00004075static
4076IRSB* hg_instrument ( VgCallbackClosure* closure,
4077 IRSB* bbIn,
4078 VexGuestLayout* layout,
4079 VexGuestExtents* vge,
4080 IRType gWordTy, IRType hWordTy )
4081{
sewardj1c0ce7a2009-07-01 08:10:49 +00004082 Int i;
4083 IRSB* bbOut;
4084 Addr64 cia; /* address of current insn */
4085 IRStmt* st;
sewardja0eee322009-07-31 08:46:35 +00004086 Bool inLDSO = False;
4087 Addr64 inLDSOmask4K = 1; /* mismatches on first check */
sewardjb4112022007-11-09 22:49:28 +00004088
4089 if (gWordTy != hWordTy) {
4090 /* We don't currently support this case. */
4091 VG_(tool_panic)("host/guest word size mismatch");
4092 }
4093
sewardja0eee322009-07-31 08:46:35 +00004094 if (VKI_PAGE_SIZE < 4096 || VG_(log2)(VKI_PAGE_SIZE) == -1) {
4095 VG_(tool_panic)("implausible or too-small VKI_PAGE_SIZE");
4096 }
4097
sewardjb4112022007-11-09 22:49:28 +00004098 /* Set up BB */
4099 bbOut = emptyIRSB();
4100 bbOut->tyenv = deepCopyIRTypeEnv(bbIn->tyenv);
4101 bbOut->next = deepCopyIRExpr(bbIn->next);
4102 bbOut->jumpkind = bbIn->jumpkind;
4103
4104 // Copy verbatim any IR preamble preceding the first IMark
4105 i = 0;
4106 while (i < bbIn->stmts_used && bbIn->stmts[i]->tag != Ist_IMark) {
4107 addStmtToIRSB( bbOut, bbIn->stmts[i] );
4108 i++;
4109 }
4110
sewardj1c0ce7a2009-07-01 08:10:49 +00004111 // Get the first statement, and initial cia from it
4112 tl_assert(bbIn->stmts_used > 0);
4113 tl_assert(i < bbIn->stmts_used);
4114 st = bbIn->stmts[i];
4115 tl_assert(Ist_IMark == st->tag);
4116 cia = st->Ist.IMark.addr;
4117 st = NULL;
4118
sewardjb4112022007-11-09 22:49:28 +00004119 for (/*use current i*/; i < bbIn->stmts_used; i++) {
sewardj1c0ce7a2009-07-01 08:10:49 +00004120 st = bbIn->stmts[i];
sewardjb4112022007-11-09 22:49:28 +00004121 tl_assert(st);
4122 tl_assert(isFlatIRStmt(st));
4123 switch (st->tag) {
4124 case Ist_NoOp:
4125 case Ist_AbiHint:
4126 case Ist_Put:
4127 case Ist_PutI:
sewardjb4112022007-11-09 22:49:28 +00004128 case Ist_Exit:
4129 /* None of these can contain any memory references. */
4130 break;
4131
sewardj1c0ce7a2009-07-01 08:10:49 +00004132 case Ist_IMark:
4133 /* no mem refs, but note the insn address. */
4134 cia = st->Ist.IMark.addr;
sewardja0eee322009-07-31 08:46:35 +00004135 /* Don't instrument the dynamic linker. It generates a
4136 lot of races which we just expensively suppress, so
4137 it's pointless.
4138
4139 Avoid flooding is_in_dynamic_linker_shared_object with
4140 requests by only checking at transitions between 4K
4141 pages. */
4142 if ((cia & ~(Addr64)0xFFF) != inLDSOmask4K) {
4143 if (0) VG_(printf)("NEW %#lx\n", (Addr)cia);
4144 inLDSOmask4K = cia & ~(Addr64)0xFFF;
4145 inLDSO = is_in_dynamic_linker_shared_object(cia);
4146 } else {
4147 if (0) VG_(printf)("old %#lx\n", (Addr)cia);
4148 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004149 break;
4150
sewardjb4112022007-11-09 22:49:28 +00004151 case Ist_MBE:
sewardjf98e1c02008-10-25 16:22:41 +00004152 switch (st->Ist.MBE.event) {
4153 case Imbe_Fence:
4154 break; /* not interesting */
sewardjf98e1c02008-10-25 16:22:41 +00004155 default:
4156 goto unhandled;
4157 }
sewardjb4112022007-11-09 22:49:28 +00004158 break;
4159
sewardj1c0ce7a2009-07-01 08:10:49 +00004160 case Ist_CAS: {
4161 /* Atomic read-modify-write cycle. Just pretend it's a
4162 read. */
4163 IRCAS* cas = st->Ist.CAS.details;
sewardj23f12002009-07-24 08:45:08 +00004164 Bool isDCAS = cas->oldHi != IRTemp_INVALID;
4165 if (isDCAS) {
4166 tl_assert(cas->expdHi);
4167 tl_assert(cas->dataHi);
4168 } else {
4169 tl_assert(!cas->expdHi);
4170 tl_assert(!cas->dataHi);
4171 }
4172 /* Just be boring about it. */
sewardja0eee322009-07-31 08:46:35 +00004173 if (!inLDSO) {
4174 instrument_mem_access(
4175 bbOut,
4176 cas->addr,
4177 (isDCAS ? 2 : 1)
4178 * sizeofIRType(typeOfIRExpr(bbIn->tyenv, cas->dataLo)),
4179 False/*!isStore*/,
4180 sizeofIRType(hWordTy)
4181 );
4182 }
sewardj1c0ce7a2009-07-01 08:10:49 +00004183 break;
4184 }
4185
sewardjdb5907d2009-11-26 17:20:21 +00004186 case Ist_LLSC: {
4187 /* We pretend store-conditionals don't exist, viz, ignore
4188 them. Whereas load-linked's are treated the same as
4189 normal loads. */
4190 IRType dataTy;
4191 if (st->Ist.LLSC.storedata == NULL) {
4192 /* LL */
4193 dataTy = typeOfIRTemp(bbIn->tyenv, st->Ist.LLSC.result);
sewardja0eee322009-07-31 08:46:35 +00004194 if (!inLDSO) {
sewardjdb5907d2009-11-26 17:20:21 +00004195 instrument_mem_access(
4196 bbOut,
4197 st->Ist.LLSC.addr,
4198 sizeofIRType(dataTy),
4199 False/*!isStore*/,
sewardja0eee322009-07-31 08:46:35 +00004200 sizeofIRType(hWordTy)
4201 );
4202 }
sewardjdb5907d2009-11-26 17:20:21 +00004203 } else {
4204 /* SC */
4205 /*ignore */
4206 }
4207 break;
4208 }
4209
4210 case Ist_Store:
4211 /* It seems we pretend that store-conditionals don't
4212 exist, viz, just ignore them ... */
4213 if (!inLDSO) {
4214 instrument_mem_access(
4215 bbOut,
4216 st->Ist.Store.addr,
4217 sizeofIRType(typeOfIRExpr(bbIn->tyenv, st->Ist.Store.data)),
4218 True/*isStore*/,
4219 sizeofIRType(hWordTy)
4220 );
sewardj1c0ce7a2009-07-01 08:10:49 +00004221 }
njnb83caf22009-05-25 01:47:56 +00004222 break;
sewardjb4112022007-11-09 22:49:28 +00004223
4224 case Ist_WrTmp: {
sewardj1c0ce7a2009-07-01 08:10:49 +00004225 /* ... whereas here we don't care whether a load is a
4226 vanilla one or a load-linked. */
sewardjb4112022007-11-09 22:49:28 +00004227 IRExpr* data = st->Ist.WrTmp.data;
4228 if (data->tag == Iex_Load) {
sewardja0eee322009-07-31 08:46:35 +00004229 if (!inLDSO) {
4230 instrument_mem_access(
4231 bbOut,
4232 data->Iex.Load.addr,
4233 sizeofIRType(data->Iex.Load.ty),
4234 False/*!isStore*/,
4235 sizeofIRType(hWordTy)
4236 );
4237 }
sewardjb4112022007-11-09 22:49:28 +00004238 }
4239 break;
4240 }
4241
4242 case Ist_Dirty: {
4243 Int dataSize;
4244 IRDirty* d = st->Ist.Dirty.details;
4245 if (d->mFx != Ifx_None) {
4246 /* This dirty helper accesses memory. Collect the
4247 details. */
4248 tl_assert(d->mAddr != NULL);
4249 tl_assert(d->mSize != 0);
4250 dataSize = d->mSize;
4251 if (d->mFx == Ifx_Read || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004252 if (!inLDSO) {
4253 instrument_mem_access(
4254 bbOut, d->mAddr, dataSize, False/*!isStore*/,
4255 sizeofIRType(hWordTy)
4256 );
4257 }
sewardjb4112022007-11-09 22:49:28 +00004258 }
4259 if (d->mFx == Ifx_Write || d->mFx == Ifx_Modify) {
sewardja0eee322009-07-31 08:46:35 +00004260 if (!inLDSO) {
4261 instrument_mem_access(
4262 bbOut, d->mAddr, dataSize, True/*isStore*/,
4263 sizeofIRType(hWordTy)
4264 );
4265 }
sewardjb4112022007-11-09 22:49:28 +00004266 }
4267 } else {
4268 tl_assert(d->mAddr == NULL);
4269 tl_assert(d->mSize == 0);
4270 }
4271 break;
4272 }
4273
4274 default:
sewardjf98e1c02008-10-25 16:22:41 +00004275 unhandled:
4276 ppIRStmt(st);
sewardjb4112022007-11-09 22:49:28 +00004277 tl_assert(0);
4278
4279 } /* switch (st->tag) */
4280
4281 addStmtToIRSB( bbOut, st );
4282 } /* iterate over bbIn->stmts */
4283
4284 return bbOut;
4285}
4286
4287
4288/*----------------------------------------------------------------*/
4289/*--- Client requests ---*/
4290/*----------------------------------------------------------------*/
4291
4292/* Sheesh. Yet another goddam finite map. */
4293static WordFM* map_pthread_t_to_Thread = NULL; /* pthread_t -> Thread* */
4294
4295static void map_pthread_t_to_Thread_INIT ( void ) {
4296 if (UNLIKELY(map_pthread_t_to_Thread == NULL)) {
sewardjf98e1c02008-10-25 16:22:41 +00004297 map_pthread_t_to_Thread = VG_(newFM)( HG_(zalloc), "hg.mpttT.1",
4298 HG_(free), NULL );
sewardjb4112022007-11-09 22:49:28 +00004299 tl_assert(map_pthread_t_to_Thread != NULL);
4300 }
4301}
4302
4303
4304static
4305Bool hg_handle_client_request ( ThreadId tid, UWord* args, UWord* ret)
4306{
4307 if (!VG_IS_TOOL_USERREQ('H','G',args[0]))
4308 return False;
4309
4310 /* Anything that gets past the above check is one of ours, so we
4311 should be able to handle it. */
4312
4313 /* default, meaningless return value, unless otherwise set */
4314 *ret = 0;
4315
4316 switch (args[0]) {
4317
4318 /* --- --- User-visible client requests --- --- */
4319
4320 case VG_USERREQ__HG_CLEAN_MEMORY:
barta0b6b2c2008-07-07 06:49:24 +00004321 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY(%#lx,%ld)\n",
sewardjb4112022007-11-09 22:49:28 +00004322 args[1], args[2]);
4323 /* Call die_mem to (expensively) tidy up properly, if there
sewardjf98e1c02008-10-25 16:22:41 +00004324 are any held locks etc in the area. Calling evh__die_mem
4325 and then evh__new_mem is a bit inefficient; probably just
4326 the latter would do. */
sewardjb4112022007-11-09 22:49:28 +00004327 if (args[2] > 0) { /* length */
4328 evh__die_mem(args[1], args[2]);
4329 /* and then set it to New */
4330 evh__new_mem(args[1], args[2]);
4331 }
4332 break;
4333
sewardjc8028ad2010-05-05 09:34:42 +00004334 case _VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK: {
4335 Addr payload = 0;
4336 SizeT pszB = 0;
4337 if (0) VG_(printf)("VG_USERREQ__HG_CLEAN_MEMORY_HEAPBLOCK(%#lx)\n",
4338 args[1]);
4339 if (HG_(mm_find_containing_block)(NULL, &payload, &pszB, args[1])) {
4340 if (pszB > 0) {
4341 evh__die_mem(payload, pszB);
4342 evh__new_mem(payload, pszB);
4343 }
4344 *ret = pszB;
4345 } else {
4346 *ret = (UWord)-1;
4347 }
4348 break;
4349 }
4350
sewardj406bac82010-03-03 23:03:40 +00004351 case _VG_USERREQ__HG_ARANGE_MAKE_UNTRACKED:
4352 if (0) VG_(printf)("HG_ARANGE_MAKE_UNTRACKED(%#lx,%ld)\n",
4353 args[1], args[2]);
4354 if (args[2] > 0) { /* length */
4355 evh__untrack_mem(args[1], args[2]);
4356 }
4357 break;
4358
4359 case _VG_USERREQ__HG_ARANGE_MAKE_TRACKED:
4360 if (0) VG_(printf)("HG_ARANGE_MAKE_TRACKED(%#lx,%ld)\n",
4361 args[1], args[2]);
4362 if (args[2] > 0) { /* length */
4363 evh__new_mem(args[1], args[2]);
4364 }
4365 break;
4366
sewardjb4112022007-11-09 22:49:28 +00004367 /* --- --- Client requests for Helgrind's use only --- --- */
4368
4369 /* Some thread is telling us its pthread_t value. Record the
4370 binding between that and the associated Thread*, so we can
4371 later find the Thread* again when notified of a join by the
4372 thread. */
4373 case _VG_USERREQ__HG_SET_MY_PTHREAD_T: {
4374 Thread* my_thr = NULL;
4375 if (0)
4376 VG_(printf)("SET_MY_PTHREAD_T (tid %d): pthread_t = %p\n", (Int)tid,
4377 (void*)args[1]);
4378 map_pthread_t_to_Thread_INIT();
4379 my_thr = map_threads_maybe_lookup( tid );
4380 /* This assertion should hold because the map_threads (tid to
4381 Thread*) binding should have been made at the point of
4382 low-level creation of this thread, which should have
4383 happened prior to us getting this client request for it.
4384 That's because this client request is sent from
4385 client-world from the 'thread_wrapper' function, which
4386 only runs once the thread has been low-level created. */
4387 tl_assert(my_thr != NULL);
4388 /* So now we know that (pthread_t)args[1] is associated with
4389 (Thread*)my_thr. Note that down. */
4390 if (0)
4391 VG_(printf)("XXXX: bind pthread_t %p to Thread* %p\n",
4392 (void*)args[1], (void*)my_thr );
sewardj896f6f92008-08-19 08:38:52 +00004393 VG_(addToFM)( map_pthread_t_to_Thread, (Word)args[1], (Word)my_thr );
sewardjb4112022007-11-09 22:49:28 +00004394 break;
4395 }
4396
4397 case _VG_USERREQ__HG_PTH_API_ERROR: {
4398 Thread* my_thr = NULL;
4399 map_pthread_t_to_Thread_INIT();
4400 my_thr = map_threads_maybe_lookup( tid );
4401 tl_assert(my_thr); /* See justification above in SET_MY_PTHREAD_T */
sewardjf98e1c02008-10-25 16:22:41 +00004402 HG_(record_error_PthAPIerror)(
4403 my_thr, (HChar*)args[1], (Word)args[2], (HChar*)args[3] );
sewardjb4112022007-11-09 22:49:28 +00004404 break;
4405 }
4406
4407 /* This thread (tid) has completed a join with the quitting
4408 thread whose pthread_t is in args[1]. */
4409 case _VG_USERREQ__HG_PTHREAD_JOIN_POST: {
4410 Thread* thr_q = NULL; /* quitter Thread* */
4411 Bool found = False;
4412 if (0)
4413 VG_(printf)("NOTIFY_JOIN_COMPLETE (tid %d): quitter = %p\n", (Int)tid,
4414 (void*)args[1]);
4415 map_pthread_t_to_Thread_INIT();
sewardj896f6f92008-08-19 08:38:52 +00004416 found = VG_(lookupFM)( map_pthread_t_to_Thread,
sewardjb5f29642007-11-16 12:02:43 +00004417 NULL, (Word*)&thr_q, (Word)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004418 /* Can this fail? It would mean that our pthread_join
4419 wrapper observed a successful join on args[1] yet that
4420 thread never existed (or at least, it never lodged an
4421 entry in the mapping (via SET_MY_PTHREAD_T)). Which
4422 sounds like a bug in the threads library. */
4423 // FIXME: get rid of this assertion; handle properly
4424 tl_assert(found);
4425 if (found) {
4426 if (0)
4427 VG_(printf)(".................... quitter Thread* = %p\n",
4428 thr_q);
4429 evh__HG_PTHREAD_JOIN_POST( tid, thr_q );
4430 }
4431 break;
4432 }
4433
4434 /* EXPOSITION only: by intercepting lock init events we can show
4435 the user where the lock was initialised, rather than only
4436 being able to show where it was first locked. Intercepting
4437 lock initialisations is not necessary for the basic operation
4438 of the race checker. */
4439 case _VG_USERREQ__HG_PTHREAD_MUTEX_INIT_POST:
4440 evh__HG_PTHREAD_MUTEX_INIT_POST( tid, (void*)args[1], args[2] );
4441 break;
4442
4443 case _VG_USERREQ__HG_PTHREAD_MUTEX_DESTROY_PRE:
4444 evh__HG_PTHREAD_MUTEX_DESTROY_PRE( tid, (void*)args[1] );
4445 break;
4446
4447 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_PRE: // pth_mx_t*
4448 evh__HG_PTHREAD_MUTEX_UNLOCK_PRE( tid, (void*)args[1] );
4449 break;
4450
4451 case _VG_USERREQ__HG_PTHREAD_MUTEX_UNLOCK_POST: // pth_mx_t*
4452 evh__HG_PTHREAD_MUTEX_UNLOCK_POST( tid, (void*)args[1] );
4453 break;
4454
4455 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_PRE: // pth_mx_t*, Word
4456 evh__HG_PTHREAD_MUTEX_LOCK_PRE( tid, (void*)args[1], args[2] );
4457 break;
4458
4459 case _VG_USERREQ__HG_PTHREAD_MUTEX_LOCK_POST: // pth_mx_t*
4460 evh__HG_PTHREAD_MUTEX_LOCK_POST( tid, (void*)args[1] );
4461 break;
4462
4463 /* This thread is about to do pthread_cond_signal on the
4464 pthread_cond_t* in arg[1]. Ditto pthread_cond_broadcast. */
4465 case _VG_USERREQ__HG_PTHREAD_COND_SIGNAL_PRE:
4466 case _VG_USERREQ__HG_PTHREAD_COND_BROADCAST_PRE:
4467 evh__HG_PTHREAD_COND_SIGNAL_PRE( tid, (void*)args[1] );
4468 break;
4469
4470 /* Entry into pthread_cond_wait, cond=arg[1], mutex=arg[2].
4471 Returns a flag indicating whether or not the mutex is believed to be
4472 valid for this operation. */
4473 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_PRE: {
4474 Bool mutex_is_valid
4475 = evh__HG_PTHREAD_COND_WAIT_PRE( tid, (void*)args[1],
4476 (void*)args[2] );
4477 *ret = mutex_is_valid ? 1 : 0;
4478 break;
4479 }
4480
sewardjf98e1c02008-10-25 16:22:41 +00004481 /* cond=arg[1] */
4482 case _VG_USERREQ__HG_PTHREAD_COND_DESTROY_PRE:
4483 evh__HG_PTHREAD_COND_DESTROY_PRE( tid, (void*)args[1] );
4484 break;
4485
sewardjb4112022007-11-09 22:49:28 +00004486 /* Thread successfully completed pthread_cond_wait, cond=arg[1],
4487 mutex=arg[2] */
4488 case _VG_USERREQ__HG_PTHREAD_COND_WAIT_POST:
4489 evh__HG_PTHREAD_COND_WAIT_POST( tid,
4490 (void*)args[1], (void*)args[2] );
4491 break;
4492
4493 case _VG_USERREQ__HG_PTHREAD_RWLOCK_INIT_POST:
4494 evh__HG_PTHREAD_RWLOCK_INIT_POST( tid, (void*)args[1] );
4495 break;
4496
4497 case _VG_USERREQ__HG_PTHREAD_RWLOCK_DESTROY_PRE:
4498 evh__HG_PTHREAD_RWLOCK_DESTROY_PRE( tid, (void*)args[1] );
4499 break;
4500
sewardj789c3c52008-02-25 12:10:07 +00004501 /* rwlock=arg[1], isW=arg[2], isTryLock=arg[3] */
sewardjb4112022007-11-09 22:49:28 +00004502 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_PRE:
sewardj789c3c52008-02-25 12:10:07 +00004503 evh__HG_PTHREAD_RWLOCK_LOCK_PRE( tid, (void*)args[1],
4504 args[2], args[3] );
sewardjb4112022007-11-09 22:49:28 +00004505 break;
4506
4507 /* rwlock=arg[1], isW=arg[2] */
4508 case _VG_USERREQ__HG_PTHREAD_RWLOCK_LOCK_POST:
4509 evh__HG_PTHREAD_RWLOCK_LOCK_POST( tid, (void*)args[1], args[2] );
4510 break;
4511
4512 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_PRE:
4513 evh__HG_PTHREAD_RWLOCK_UNLOCK_PRE( tid, (void*)args[1] );
4514 break;
4515
4516 case _VG_USERREQ__HG_PTHREAD_RWLOCK_UNLOCK_POST:
4517 evh__HG_PTHREAD_RWLOCK_UNLOCK_POST( tid, (void*)args[1] );
4518 break;
4519
sewardj11e352f2007-11-30 11:11:02 +00004520 case _VG_USERREQ__HG_POSIX_SEM_INIT_POST: /* sem_t*, unsigned long */
4521 evh__HG_POSIX_SEM_INIT_POST( tid, (void*)args[1], args[2] );
sewardjb4112022007-11-09 22:49:28 +00004522 break;
4523
sewardj11e352f2007-11-30 11:11:02 +00004524 case _VG_USERREQ__HG_POSIX_SEM_DESTROY_PRE: /* sem_t* */
4525 evh__HG_POSIX_SEM_DESTROY_PRE( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004526 break;
4527
sewardj11e352f2007-11-30 11:11:02 +00004528 case _VG_USERREQ__HG_POSIX_SEM_POST_PRE: /* sem_t* */
4529 evh__HG_POSIX_SEM_POST_PRE( tid, (void*)args[1] );
4530 break;
4531
4532 case _VG_USERREQ__HG_POSIX_SEM_WAIT_POST: /* sem_t* */
4533 evh__HG_POSIX_SEM_WAIT_POST( tid, (void*)args[1] );
sewardjb4112022007-11-09 22:49:28 +00004534 break;
4535
sewardj9f569b72008-11-13 13:33:09 +00004536 case _VG_USERREQ__HG_PTHREAD_BARRIER_INIT_PRE:
sewardj406bac82010-03-03 23:03:40 +00004537 /* pth_bar_t*, ulong count, ulong resizable */
4538 evh__HG_PTHREAD_BARRIER_INIT_PRE( tid, (void*)args[1],
4539 args[2], args[3] );
4540 break;
4541
4542 case _VG_USERREQ__HG_PTHREAD_BARRIER_RESIZE_PRE:
4543 /* pth_bar_t*, ulong newcount */
4544 evh__HG_PTHREAD_BARRIER_RESIZE_PRE ( tid, (void*)args[1],
4545 args[2] );
sewardj9f569b72008-11-13 13:33:09 +00004546 break;
4547
4548 case _VG_USERREQ__HG_PTHREAD_BARRIER_WAIT_PRE:
4549 /* pth_bar_t* */
4550 evh__HG_PTHREAD_BARRIER_WAIT_PRE( tid, (void*)args[1] );
4551 break;
4552
4553 case _VG_USERREQ__HG_PTHREAD_BARRIER_DESTROY_PRE:
4554 /* pth_bar_t* */
4555 evh__HG_PTHREAD_BARRIER_DESTROY_PRE( tid, (void*)args[1] );
4556 break;
sewardjb4112022007-11-09 22:49:28 +00004557
sewardj5a644da2009-08-11 10:35:58 +00004558 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE:
4559 /* pth_spinlock_t* */
4560 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_PRE( tid, (void*)args[1] );
4561 break;
4562
4563 case _VG_USERREQ__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST:
4564 /* pth_spinlock_t* */
4565 evh__HG_PTHREAD_SPIN_INIT_OR_UNLOCK_POST( tid, (void*)args[1] );
4566 break;
4567
4568 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_PRE:
4569 /* pth_spinlock_t*, Word */
4570 evh__HG_PTHREAD_SPIN_LOCK_PRE( tid, (void*)args[1], args[2] );
4571 break;
4572
4573 case _VG_USERREQ__HG_PTHREAD_SPIN_LOCK_POST:
4574 /* pth_spinlock_t* */
4575 evh__HG_PTHREAD_SPIN_LOCK_POST( tid, (void*)args[1] );
4576 break;
4577
4578 case _VG_USERREQ__HG_PTHREAD_SPIN_DESTROY_PRE:
4579 /* pth_spinlock_t* */
4580 evh__HG_PTHREAD_SPIN_DESTROY_PRE( tid, (void*)args[1] );
4581 break;
4582
sewardjed2e72e2009-08-14 11:08:24 +00004583 case _VG_USERREQ__HG_CLIENTREQ_UNIMP: {
4584 /* char* who */
4585 HChar* who = (HChar*)args[1];
4586 HChar buf[50 + 50];
4587 Thread* thr = map_threads_maybe_lookup( tid );
4588 tl_assert( thr ); /* I must be mapped */
4589 tl_assert( who );
4590 tl_assert( VG_(strlen)(who) <= 50 );
4591 VG_(sprintf)(buf, "Unimplemented client request macro \"%s\"", who );
4592 /* record_error_Misc strdup's buf, so this is safe: */
4593 HG_(record_error_Misc)( thr, buf );
4594 break;
4595 }
4596
4597 case _VG_USERREQ__HG_USERSO_SEND_PRE:
4598 /* UWord arbitrary-SO-tag */
4599 evh__HG_USERSO_SEND_PRE( tid, args[1] );
4600 break;
4601
4602 case _VG_USERREQ__HG_USERSO_RECV_POST:
4603 /* UWord arbitrary-SO-tag */
4604 evh__HG_USERSO_RECV_POST( tid, args[1] );
4605 break;
4606
sewardjb4112022007-11-09 22:49:28 +00004607 default:
4608 /* Unhandled Helgrind client request! */
sewardjf98e1c02008-10-25 16:22:41 +00004609 tl_assert2(0, "unhandled Helgrind client request 0x%lx",
4610 args[0]);
sewardjb4112022007-11-09 22:49:28 +00004611 }
4612
4613 return True;
4614}
4615
4616
4617/*----------------------------------------------------------------*/
sewardjb4112022007-11-09 22:49:28 +00004618/*--- Setup ---*/
4619/*----------------------------------------------------------------*/
4620
4621static Bool hg_process_cmd_line_option ( Char* arg )
4622{
njn83df0b62009-02-25 01:01:05 +00004623 Char* tmp_str;
sewardjb4112022007-11-09 22:49:28 +00004624
njn83df0b62009-02-25 01:01:05 +00004625 if VG_BOOL_CLO(arg, "--track-lockorders",
4626 HG_(clo_track_lockorders)) {}
4627 else if VG_BOOL_CLO(arg, "--cmp-race-err-addrs",
4628 HG_(clo_cmp_race_err_addrs)) {}
sewardj23f12002009-07-24 08:45:08 +00004629
4630 else if VG_XACT_CLO(arg, "--history-level=none",
4631 HG_(clo_history_level), 0);
sewardjf3861392009-08-02 10:16:03 +00004632 else if VG_XACT_CLO(arg, "--history-level=approx",
sewardj23f12002009-07-24 08:45:08 +00004633 HG_(clo_history_level), 1);
4634 else if VG_XACT_CLO(arg, "--history-level=full",
4635 HG_(clo_history_level), 2);
sewardj849b0ed2008-12-21 10:43:10 +00004636
sewardjf585e482009-08-16 22:52:29 +00004637 /* If you change the 10k/30mill limits, remember to also change
sewardj849b0ed2008-12-21 10:43:10 +00004638 them in assertions at the top of event_map_maybe_GC. */
njn83df0b62009-02-25 01:01:05 +00004639 else if VG_BINT_CLO(arg, "--conflict-cache-size",
sewardjf585e482009-08-16 22:52:29 +00004640 HG_(clo_conflict_cache_size), 10*1000, 30*1000*1000) {}
sewardjb4112022007-11-09 22:49:28 +00004641
sewardj11e352f2007-11-30 11:11:02 +00004642 /* "stuvwx" --> stuvwx (binary) */
njn83df0b62009-02-25 01:01:05 +00004643 else if VG_STR_CLO(arg, "--hg-sanity-flags", tmp_str) {
sewardjb4112022007-11-09 22:49:28 +00004644 Int j;
sewardjb4112022007-11-09 22:49:28 +00004645
njn83df0b62009-02-25 01:01:05 +00004646 if (6 != VG_(strlen)(tmp_str)) {
sewardjb4112022007-11-09 22:49:28 +00004647 VG_(message)(Vg_UserMsg,
sewardj24118492009-07-15 14:50:02 +00004648 "--hg-sanity-flags argument must have 6 digits\n");
sewardjb4112022007-11-09 22:49:28 +00004649 return False;
4650 }
sewardj11e352f2007-11-30 11:11:02 +00004651 for (j = 0; j < 6; j++) {
njn83df0b62009-02-25 01:01:05 +00004652 if ('0' == tmp_str[j]) { /* do nothing */ }
4653 else if ('1' == tmp_str[j]) HG_(clo_sanity_flags) |= (1 << (6-1-j));
sewardjb4112022007-11-09 22:49:28 +00004654 else {
sewardj11e352f2007-11-30 11:11:02 +00004655 VG_(message)(Vg_UserMsg, "--hg-sanity-flags argument can "
sewardj24118492009-07-15 14:50:02 +00004656 "only contain 0s and 1s\n");
sewardjb4112022007-11-09 22:49:28 +00004657 return False;
4658 }
4659 }
sewardjf98e1c02008-10-25 16:22:41 +00004660 if (0) VG_(printf)("XXX sanity flags: 0x%lx\n", HG_(clo_sanity_flags));
sewardjb4112022007-11-09 22:49:28 +00004661 }
4662
4663 else
4664 return VG_(replacement_malloc_process_cmd_line_option)(arg);
4665
4666 return True;
4667}
4668
4669static void hg_print_usage ( void )
4670{
4671 VG_(printf)(
sewardj849b0ed2008-12-21 10:43:10 +00004672" --track-lockorders=no|yes show lock ordering errors? [yes]\n"
njnf6e8ca92009-08-07 02:18:00 +00004673" --history-level=none|approx|full [full]\n"
sewardjf3861392009-08-02 10:16:03 +00004674" full: show both stack traces for a data race (can be very slow)\n"
4675" approx: full trace for one thread, approx for the other (faster)\n"
4676" none: only show trace for one thread in a race (fastest)\n"
sewardj23f12002009-07-24 08:45:08 +00004677" --conflict-cache-size=N size of 'full' history cache [1000000]\n"
sewardjb4112022007-11-09 22:49:28 +00004678 );
sewardjb4112022007-11-09 22:49:28 +00004679}
4680
4681static void hg_print_debug_usage ( void )
4682{
sewardjb4112022007-11-09 22:49:28 +00004683 VG_(printf)(" --cmp-race-err-addrs=no|yes are data addresses in "
4684 "race errors significant? [no]\n");
sewardj849b0ed2008-12-21 10:43:10 +00004685 VG_(printf)(" --hg-sanity-flags=<XXXXXX> sanity check "
sewardj11e352f2007-11-30 11:11:02 +00004686 " at events (X = 0|1) [000000]\n");
4687 VG_(printf)(" --hg-sanity-flags values:\n");
sewardj11e352f2007-11-30 11:11:02 +00004688 VG_(printf)(" 010000 after changes to "
sewardjb4112022007-11-09 22:49:28 +00004689 "lock-order-acquisition-graph\n");
sewardj11e352f2007-11-30 11:11:02 +00004690 VG_(printf)(" 001000 at memory accesses (NB: not currently used)\n");
4691 VG_(printf)(" 000100 at mem permission setting for "
sewardjb4112022007-11-09 22:49:28 +00004692 "ranges >= %d bytes\n", SCE_BIGRANGE_T);
sewardj11e352f2007-11-30 11:11:02 +00004693 VG_(printf)(" 000010 at lock/unlock events\n");
4694 VG_(printf)(" 000001 at thread create/join events\n");
sewardjb4112022007-11-09 22:49:28 +00004695}
4696
4697static void hg_post_clo_init ( void )
4698{
4699}
4700
4701static void hg_fini ( Int exitcode )
4702{
sewardj2d9e8742009-08-07 15:46:56 +00004703 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)) {
4704 VG_(message)(Vg_UserMsg,
4705 "For counts of detected and suppressed errors, "
4706 "rerun with: -v\n");
4707 }
4708
4709 if (VG_(clo_verbosity) == 1 && !VG_(clo_xml)
4710 && HG_(clo_history_level) >= 2) {
4711 VG_(umsg)(
4712 "Use --history-level=approx or =none to gain increased speed, at\n" );
4713 VG_(umsg)(
4714 "the cost of reduced accuracy of conflicting-access information\n");
4715 }
4716
sewardjb4112022007-11-09 22:49:28 +00004717 if (SHOW_DATA_STRUCTURES)
4718 pp_everything( PP_ALL, "SK_(fini)" );
sewardjf98e1c02008-10-25 16:22:41 +00004719 if (HG_(clo_sanity_flags))
sewardjb4112022007-11-09 22:49:28 +00004720 all__sanity_check("SK_(fini)");
4721
sewardj2d9e8742009-08-07 15:46:56 +00004722 if (VG_(clo_stats)) {
sewardjb4112022007-11-09 22:49:28 +00004723
4724 if (1) {
4725 VG_(printf)("\n");
4726 HG_(ppWSUstats)( univ_tsets, "univ_tsets" );
4727 VG_(printf)("\n");
4728 HG_(ppWSUstats)( univ_lsets, "univ_lsets" );
4729 VG_(printf)("\n");
4730 HG_(ppWSUstats)( univ_laog, "univ_laog" );
4731 }
4732
sewardjf98e1c02008-10-25 16:22:41 +00004733 //zz VG_(printf)("\n");
4734 //zz VG_(printf)(" hbefore: %'10lu queries\n", stats__hbefore_queries);
4735 //zz VG_(printf)(" hbefore: %'10lu cache 0 hits\n", stats__hbefore_cache0s);
4736 //zz VG_(printf)(" hbefore: %'10lu cache > 0 hits\n", stats__hbefore_cacheNs);
4737 //zz VG_(printf)(" hbefore: %'10lu graph searches\n", stats__hbefore_gsearches);
4738 //zz VG_(printf)(" hbefore: %'10lu of which slow\n",
4739 //zz stats__hbefore_gsearches - stats__hbefore_gsearchFs);
4740 //zz VG_(printf)(" hbefore: %'10lu stack high water mark\n",
4741 //zz stats__hbefore_stk_hwm);
4742 //zz VG_(printf)(" hbefore: %'10lu cache invals\n", stats__hbefore_invals);
4743 //zz VG_(printf)(" hbefore: %'10lu probes\n", stats__hbefore_probes);
sewardjb4112022007-11-09 22:49:28 +00004744
4745 VG_(printf)("\n");
barta0b6b2c2008-07-07 06:49:24 +00004746 VG_(printf)(" locksets: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004747 (Int)HG_(cardinalityWSU)( univ_lsets ));
barta0b6b2c2008-07-07 06:49:24 +00004748 VG_(printf)(" threadsets: %'8d unique thread sets\n",
sewardjb4112022007-11-09 22:49:28 +00004749 (Int)HG_(cardinalityWSU)( univ_tsets ));
barta0b6b2c2008-07-07 06:49:24 +00004750 VG_(printf)(" univ_laog: %'8d unique lock sets\n",
sewardjb4112022007-11-09 22:49:28 +00004751 (Int)HG_(cardinalityWSU)( univ_laog ));
4752
sewardjd52392d2008-11-08 20:36:26 +00004753 //VG_(printf)("L(ast)L(ock) map: %'8lu inserts (%d map size)\n",
4754 // stats__ga_LL_adds,
4755 // (Int)(ga_to_lastlock ? VG_(sizeFM)( ga_to_lastlock ) : 0) );
sewardjb4112022007-11-09 22:49:28 +00004756
sewardjf98e1c02008-10-25 16:22:41 +00004757 VG_(printf)(" LockN-to-P map: %'8llu queries (%llu map size)\n",
4758 HG_(stats__LockN_to_P_queries),
4759 HG_(stats__LockN_to_P_get_map_size)() );
sewardjb4112022007-11-09 22:49:28 +00004760
sewardjf98e1c02008-10-25 16:22:41 +00004761 VG_(printf)("string table map: %'8llu queries (%llu map size)\n",
4762 HG_(stats__string_table_queries),
4763 HG_(stats__string_table_get_map_size)() );
barta0b6b2c2008-07-07 06:49:24 +00004764 VG_(printf)(" LAOG: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004765 (Int)(laog ? VG_(sizeFM)( laog ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004766 VG_(printf)(" LAOG exposition: %'8d map size\n",
sewardj896f6f92008-08-19 08:38:52 +00004767 (Int)(laog_exposition ? VG_(sizeFM)( laog_exposition ) : 0));
barta0b6b2c2008-07-07 06:49:24 +00004768 VG_(printf)(" locks: %'8lu acquires, "
4769 "%'lu releases\n",
sewardjb4112022007-11-09 22:49:28 +00004770 stats__lockN_acquires,
4771 stats__lockN_releases
4772 );
barta0b6b2c2008-07-07 06:49:24 +00004773 VG_(printf)(" sanity checks: %'8lu\n", stats__sanity_checks);
sewardjb4112022007-11-09 22:49:28 +00004774
4775 VG_(printf)("\n");
sewardjf98e1c02008-10-25 16:22:41 +00004776 libhb_shutdown(True);
sewardjb4112022007-11-09 22:49:28 +00004777 }
4778}
4779
sewardjf98e1c02008-10-25 16:22:41 +00004780/* FIXME: move these somewhere sane */
4781
4782static
4783void for_libhb__get_stacktrace ( Thr* hbt, Addr* frames, UWord nRequest )
4784{
4785 Thread* thr;
4786 ThreadId tid;
4787 UWord nActual;
4788 tl_assert(hbt);
4789 thr = libhb_get_Thr_opaque( hbt );
4790 tl_assert(thr);
4791 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
4792 nActual = (UWord)VG_(get_StackTrace)( tid, frames, (UInt)nRequest,
4793 NULL, NULL, 0 );
4794 tl_assert(nActual <= nRequest);
4795 for (; nActual < nRequest; nActual++)
4796 frames[nActual] = 0;
4797}
4798
4799static
sewardj23f12002009-07-24 08:45:08 +00004800ExeContext* for_libhb__get_EC ( Thr* hbt )
sewardjf98e1c02008-10-25 16:22:41 +00004801{
4802 Thread* thr;
4803 ThreadId tid;
4804 ExeContext* ec;
4805 tl_assert(hbt);
4806 thr = libhb_get_Thr_opaque( hbt );
4807 tl_assert(thr);
4808 tid = map_threads_maybe_reverse_lookup_SLOW(thr);
sewardj23f12002009-07-24 08:45:08 +00004809 /* this will assert if tid is invalid */
sewardjf98e1c02008-10-25 16:22:41 +00004810 ec = VG_(record_ExeContext)( tid, 0 );
sewardjd52392d2008-11-08 20:36:26 +00004811 return ec;
sewardjf98e1c02008-10-25 16:22:41 +00004812}
4813
4814
sewardjb4112022007-11-09 22:49:28 +00004815static void hg_pre_clo_init ( void )
4816{
sewardjf98e1c02008-10-25 16:22:41 +00004817 Thr* hbthr_root;
njnf76d27a2009-05-28 01:53:07 +00004818
sewardjb4112022007-11-09 22:49:28 +00004819 VG_(details_name) ("Helgrind");
4820 VG_(details_version) (NULL);
4821 VG_(details_description) ("a thread error detector");
4822 VG_(details_copyright_author)(
sewardj9eecbbb2010-05-03 21:37:12 +00004823 "Copyright (C) 2007-2010, and GNU GPL'd, by OpenWorks LLP et al.");
sewardjb4112022007-11-09 22:49:28 +00004824 VG_(details_bug_reports_to) (VG_BUGS_TO);
4825 VG_(details_avg_translation_sizeB) ( 200 );
4826
4827 VG_(basic_tool_funcs) (hg_post_clo_init,
4828 hg_instrument,
4829 hg_fini);
4830
4831 VG_(needs_core_errors) ();
sewardjf98e1c02008-10-25 16:22:41 +00004832 VG_(needs_tool_errors) (HG_(eq_Error),
sewardj24118492009-07-15 14:50:02 +00004833 HG_(before_pp_Error),
sewardjf98e1c02008-10-25 16:22:41 +00004834 HG_(pp_Error),
sewardjb4112022007-11-09 22:49:28 +00004835 False,/*show TIDs for errors*/
sewardjf98e1c02008-10-25 16:22:41 +00004836 HG_(update_extra),
4837 HG_(recognised_suppression),
4838 HG_(read_extra_suppression_info),
4839 HG_(error_matches_suppression),
4840 HG_(get_error_name),
sewardj588adef2009-08-15 22:41:51 +00004841 HG_(get_extra_suppression_info));
sewardjb4112022007-11-09 22:49:28 +00004842
sewardj24118492009-07-15 14:50:02 +00004843 VG_(needs_xml_output) ();
4844
sewardjb4112022007-11-09 22:49:28 +00004845 VG_(needs_command_line_options)(hg_process_cmd_line_option,
4846 hg_print_usage,
4847 hg_print_debug_usage);
4848 VG_(needs_client_requests) (hg_handle_client_request);
4849
4850 // FIXME?
4851 //VG_(needs_sanity_checks) (hg_cheap_sanity_check,
4852 // hg_expensive_sanity_check);
4853
4854 VG_(needs_malloc_replacement) (hg_cli__malloc,
4855 hg_cli____builtin_new,
4856 hg_cli____builtin_vec_new,
4857 hg_cli__memalign,
4858 hg_cli__calloc,
4859 hg_cli__free,
4860 hg_cli____builtin_delete,
4861 hg_cli____builtin_vec_delete,
4862 hg_cli__realloc,
njn8b140de2009-02-17 04:31:18 +00004863 hg_cli_malloc_usable_size,
sewardjb4112022007-11-09 22:49:28 +00004864 HG_CLI__MALLOC_REDZONE_SZB );
4865
sewardj849b0ed2008-12-21 10:43:10 +00004866 /* 21 Dec 08: disabled this; it mostly causes H to start more
4867 slowly and use significantly more memory, without very often
4868 providing useful results. The user can request to load this
4869 information manually with --read-var-info=yes. */
4870 if (0) VG_(needs_var_info)(); /* optional */
sewardjb4112022007-11-09 22:49:28 +00004871
4872 VG_(track_new_mem_startup) ( evh__new_mem_w_perms );
sewardj7cf4e6b2008-05-01 20:24:26 +00004873 VG_(track_new_mem_stack_signal)( evh__new_mem_w_tid );
4874 VG_(track_new_mem_brk) ( evh__new_mem_w_tid );
sewardjb4112022007-11-09 22:49:28 +00004875 VG_(track_new_mem_mmap) ( evh__new_mem_w_perms );
sewardj1f77fec2010-04-12 19:51:04 +00004876 VG_(track_new_mem_stack) ( evh__new_mem_stack );
sewardjb4112022007-11-09 22:49:28 +00004877
4878 // FIXME: surely this isn't thread-aware
sewardj23f12002009-07-24 08:45:08 +00004879 VG_(track_copy_mem_remap) ( evh__copy_mem );
sewardjb4112022007-11-09 22:49:28 +00004880
4881 VG_(track_change_mem_mprotect) ( evh__set_perms );
4882
4883 VG_(track_die_mem_stack_signal)( evh__die_mem );
4884 VG_(track_die_mem_brk) ( evh__die_mem );
4885 VG_(track_die_mem_munmap) ( evh__die_mem );
4886 VG_(track_die_mem_stack) ( evh__die_mem );
4887
4888 // FIXME: what is this for?
4889 VG_(track_ban_mem_stack) (NULL);
4890
4891 VG_(track_pre_mem_read) ( evh__pre_mem_read );
4892 VG_(track_pre_mem_read_asciiz) ( evh__pre_mem_read_asciiz );
4893 VG_(track_pre_mem_write) ( evh__pre_mem_write );
4894 VG_(track_post_mem_write) (NULL);
4895
4896 /////////////////
4897
4898 VG_(track_pre_thread_ll_create)( evh__pre_thread_ll_create );
4899 VG_(track_pre_thread_ll_exit) ( evh__pre_thread_ll_exit );
4900
4901 VG_(track_start_client_code)( evh__start_client_code );
4902 VG_(track_stop_client_code)( evh__stop_client_code );
4903
sewardjf98e1c02008-10-25 16:22:41 +00004904 /////////////////////////////////////////////
4905 hbthr_root = libhb_init( for_libhb__get_stacktrace,
sewardjf98e1c02008-10-25 16:22:41 +00004906 for_libhb__get_EC );
4907 /////////////////////////////////////////////
4908
4909 initialise_data_structures(hbthr_root);
sewardjb4112022007-11-09 22:49:28 +00004910
4911 /* Ensure that requirements for "dodgy C-as-C++ style inheritance"
4912 as described in comments at the top of pub_tool_hashtable.h, are
4913 met. Blargh. */
4914 tl_assert( sizeof(void*) == sizeof(struct _MallocMeta*) );
4915 tl_assert( sizeof(UWord) == sizeof(Addr) );
4916 hg_mallocmeta_table
4917 = VG_(HT_construct)( "hg_malloc_metadata_table" );
4918
sewardjb4112022007-11-09 22:49:28 +00004919}
4920
4921VG_DETERMINE_INTERFACE_VERSION(hg_pre_clo_init)
4922
4923/*--------------------------------------------------------------------*/
4924/*--- end hg_main.c ---*/
4925/*--------------------------------------------------------------------*/