blob: f16cf630b1e46cdb9b9a07242d6c6f1dd9988445 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Helgrind, a Valgrind skin for detecting
9 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
11 Copyright (C) 2000-2002 Nicholas Nethercote
12 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
sewardj7f3ad222002-11-13 22:11:53 +000033#include "helgrind.h"
njn25e49d8e72002-09-23 09:36:25 +000034
njn27f1a382002-11-08 15:48:16 +000035VG_DETERMINE_INTERFACE_VERSION
njn25e49d8e72002-09-23 09:36:25 +000036
37static UInt n_eraser_warnings = 0;
sewardjff2c9232002-11-13 21:44:39 +000038static UInt n_lockorder_warnings = 0;
njn25e49d8e72002-09-23 09:36:25 +000039
40/*------------------------------------------------------------*/
41/*--- Debug guff ---*/
42/*------------------------------------------------------------*/
43
sewardje11d6c82002-12-15 02:00:41 +000044#define DEBUG_LOCK_TABLE 0 /* Print lock table at end */
njn25e49d8e72002-09-23 09:36:25 +000045
46#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
47#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
48#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
49#define DEBUG_ACCESSES 0 /* Print reads, writes */
50#define DEBUG_MEM_LOCKSET_CHANGES 0
51 /* Print when an address's lockset
52 changes; only useful with
53 DEBUG_ACCESSES */
sewardj8fac99a2002-11-13 22:31:26 +000054#define SLOW_ASSERTS 0 /* do expensive asserts */
njn25e49d8e72002-09-23 09:36:25 +000055#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
56
sewardj8fac99a2002-11-13 22:31:26 +000057#if SLOW_ASSERTS
58#define SK_ASSERT(x) sk_assert(x)
59#else
60#define SK_ASSERT(x)
61#endif
62
njn25e49d8e72002-09-23 09:36:25 +000063/* heavyweight LockSet sanity checking:
64 0 == never
65 1 == after important ops
66 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
67 */
68#define LOCKSET_SANITY 0
69
sewardj8fac99a2002-11-13 22:31:26 +000070/* Rotate an unsigned quantity left */
71#define ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x)*8)-(n))))
72
73/* round a up to the next multiple of N. N must be a power of 2 */
74#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
75
76/* Round a down to the next multiple of N. N must be a power of 2 */
77#define ROUNDDN(a, N) ((a) & ~(N-1))
njn25e49d8e72002-09-23 09:36:25 +000078
79/*------------------------------------------------------------*/
sewardjf6374322002-11-13 22:35:55 +000080/*--- Command line options ---*/
81/*------------------------------------------------------------*/
82
83static enum {
84 EC_None,
85 EC_Some,
86 EC_All
87} clo_execontext = EC_None;
88
sewardje1a39f42002-12-15 01:56:17 +000089static Bool clo_priv_stacks = False;
sewardjf6374322002-11-13 22:35:55 +000090
91/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000092/*--- Crude profiling machinery. ---*/
93/*------------------------------------------------------------*/
94
95// PPP: work out if I want this
96
97#define PROF_EVENT(x)
98#if 0
99#ifdef VG_PROFILE_MEMORY
100
101#define N_PROF_EVENTS 150
102
103static UInt event_ctr[N_PROF_EVENTS];
104
105void VGE_(done_prof_mem) ( void )
106{
107 Int i;
108 for (i = 0; i < N_PROF_EVENTS; i++) {
109 if ((i % 10) == 0)
110 VG_(printf)("\n");
111 if (event_ctr[i] > 0)
112 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
113 }
114 VG_(printf)("\n");
115}
116
117#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000118 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000119 event_ctr[ev]++; \
120 } while (False);
121
122#else
123
124//static void init_prof_mem ( void ) { }
125// void VG_(done_prof_mem) ( void ) { }
126
127#define PROF_EVENT(ev) /* */
128
129#endif /* VG_PROFILE_MEMORY */
130
131/* Event index. If just the name of the fn is given, this means the
132 number of calls to the fn. Otherwise it is the specified event.
133
134 [PPP: snip event numbers...]
135*/
136#endif /* 0 */
137
138
139/*------------------------------------------------------------*/
140/*--- Data defns. ---*/
141/*------------------------------------------------------------*/
142
njn3e884182003-04-15 13:03:23 +0000143typedef
144 struct _HG_Chunk {
145 struct _HG_Chunk* next;
146 Addr data; /* ptr to actual block */
147 UInt size; /* size requested */
148 ExeContext* where; /* where it was allocated */
149 ThreadId tid; /* allocating thread */
150 }
151 HG_Chunk;
152
njn25e49d8e72002-09-23 09:36:25 +0000153typedef enum
sewardj7f3ad222002-11-13 22:11:53 +0000154 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit, Vge_Error }
njn25e49d8e72002-09-23 09:36:25 +0000155 VgeInitStatus;
156
sewardjc808ef52002-11-13 22:43:26 +0000157
njn25e49d8e72002-09-23 09:36:25 +0000158/* Should add up to 32 to fit in one word */
159#define OTHER_BITS 30
160#define STATE_BITS 2
161
162#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
163
164/* This is for indicating that a memory block has been initialised but not
165 * really directly by a particular thread... (eg. text/data initialised
166 * automatically at startup).
167 * Must be different to virgin_word.other */
168#define TID_INDICATING_NONVIRGIN 1
169
sewardjc4a810d2002-11-13 22:25:51 +0000170/* Magic packed TLS used for error suppression; if word state is Excl
171 and tid is this, then it means all access are OK without changing
172 state and without raising any more errors */
173#define TLSP_INDICATING_ALL ((1 << OTHER_BITS) - 1)
sewardj16748af2002-10-22 04:55:54 +0000174
njn25e49d8e72002-09-23 09:36:25 +0000175/* Number of entries must fit in STATE_BITS bits */
176typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
177
sewardjc808ef52002-11-13 22:43:26 +0000178static inline const Char *pp_state(pth_state st)
179{
180 const Char *ret;
181
182 switch(st) {
183 case Vge_Virgin: ret = "virgin"; break;
184 case Vge_Excl: ret = "exclusive"; break;
185 case Vge_Shar: ret = "shared RO"; break;
186 case Vge_SharMod: ret = "shared RW"; break;
187 default: ret = "???";
188 }
189 return ret;
190}
191
njn25e49d8e72002-09-23 09:36:25 +0000192typedef
193 struct {
sewardj8fac99a2002-11-13 22:31:26 +0000194 /* gcc arranges this bitfield with state in the 2LSB and other
195 in the 30MSB, which is what we want */
njn25e49d8e72002-09-23 09:36:25 +0000196 UInt state:STATE_BITS;
sewardj8fac99a2002-11-13 22:31:26 +0000197 UInt other:OTHER_BITS;
njn25e49d8e72002-09-23 09:36:25 +0000198 } shadow_word;
199
sewardj8fac99a2002-11-13 22:31:26 +0000200#define SW(st, other) ((shadow_word) { st, other })
201
njn25e49d8e72002-09-23 09:36:25 +0000202typedef
203 struct {
204 shadow_word swords[ESEC_MAP_WORDS];
205 }
206 ESecMap;
207
208static ESecMap* primary_map[ 65536 ];
209static ESecMap distinguished_secondary_map;
210
sewardj8fac99a2002-11-13 22:31:26 +0000211static const shadow_word virgin_sword = SW(Vge_Virgin, 0);
212static const shadow_word error_sword = SW(Vge_Excl, TLSP_INDICATING_ALL);
njn25e49d8e72002-09-23 09:36:25 +0000213
214#define VGE_IS_DISTINGUISHED_SM(smap) \
215 ((smap) == &distinguished_secondary_map)
216
217#define ENSURE_MAPPABLE(addr,caller) \
218 do { \
219 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
220 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
221 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
222 } \
223 } while(0)
224
225
sewardjc808ef52002-11-13 22:43:26 +0000226/* Parallel map which contains execution contexts when words last
227 changed state (if required) */
sewardj499e3de2002-11-13 22:22:25 +0000228
sewardjc808ef52002-11-13 22:43:26 +0000229typedef struct EC_EIP {
230 union u_ec_eip {
231 Addr eip;
232 ExeContext *ec;
sewardj72baa7a2002-12-09 23:32:58 +0000233 } uu_ec_eip;
sewardjc808ef52002-11-13 22:43:26 +0000234 UInt state:STATE_BITS;
235 UInt tls:OTHER_BITS; /* packed TLS */
sewardj499e3de2002-11-13 22:22:25 +0000236} EC_EIP;
237
sewardjc808ef52002-11-13 22:43:26 +0000238#define NULL_EC_EIP ((EC_EIP){ { 0 }, 0, 0})
239
240#define EIP(eip, prev, tls) ((EC_EIP) { (union u_ec_eip)(eip), (prev).state, packTLS(tls) })
241#define EC(ec, prev, tls) ((EC_EIP) { (union u_ec_eip)(ec), (prev).state, packTLS(tls) })
242
243static inline UInt packEC(ExeContext *ec)
244{
245 SK_ASSERT(((UInt)ec & ((1 << STATE_BITS)-1)) == 0);
246 return ((UInt)ec) >> STATE_BITS;
247}
248
249static inline ExeContext *unpackEC(UInt i)
250{
251 return (ExeContext *)(i << STATE_BITS);
252}
253
254/* Lose 2 LSB of eip */
255static inline UInt packEIP(Addr eip)
256{
257 return ((UInt)eip) >> STATE_BITS;
258}
259
260static inline Addr unpackEIP(UInt i)
261{
262 return (Addr)(i << STATE_BITS);
263}
sewardj499e3de2002-11-13 22:22:25 +0000264
265typedef struct {
266 EC_EIP execontext[ESEC_MAP_WORDS];
267} ExeContextMap;
268
269static ExeContextMap** execontext_map;
270
271static inline void setExeContext(Addr a, EC_EIP ec)
272{
273 UInt idx = (a >> 16) & 0xffff;
274 UInt off = (a >> 2) & 0x3fff;
275
276 if (execontext_map[idx] == NULL) {
277 execontext_map[idx] = VG_(malloc)(sizeof(ExeContextMap));
278 VG_(memset)(execontext_map[idx], 0, sizeof(ExeContextMap));
279 }
280
281 execontext_map[idx]->execontext[off] = ec;
282}
283
284static inline EC_EIP getExeContext(Addr a)
285{
286 UInt idx = (a >> 16) & 0xffff;
287 UInt off = (a >> 2) & 0x3fff;
sewardjc808ef52002-11-13 22:43:26 +0000288 EC_EIP ec = NULL_EC_EIP;
sewardj499e3de2002-11-13 22:22:25 +0000289
290 if (execontext_map[idx] != NULL)
291 ec = execontext_map[idx]->execontext[off];
292
293 return ec;
294}
295
njn25e49d8e72002-09-23 09:36:25 +0000296/*------------------------------------------------------------*/
sewardjc4a810d2002-11-13 22:25:51 +0000297/*--- Thread lifetime segments ---*/
298/*------------------------------------------------------------*/
299
300/*
301 * This mechanism deals with the common case of a parent thread
302 * creating a structure for a child thread, and then passing ownership
303 * of the structure to that thread. It similarly copes with a child
304 * thread passing information back to another thread waiting to join
305 * on it.
306 *
307 * Each thread's lifetime can be partitioned into segments. Those
308 * segments are arranged to form an interference graph which indicates
309 * whether two thread lifetime segments can possibly be concurrent.
310 * If not, then memory with is exclusively accessed by one TLS can be
311 * passed on to another TLS without an error occuring, and without
312 * moving it from Excl state.
313 *
314 * At present this only considers thread creation and join as
315 * synchronisation events for creating new lifetime segments, but
316 * others may be possible (like mutex operations).
317 */
318
319typedef struct _ThreadLifeSeg ThreadLifeSeg;
320
321struct _ThreadLifeSeg {
322 ThreadId tid;
323 ThreadLifeSeg *prior[2]; /* Previous lifetime segments */
324 UInt refcount; /* Number of memory locations pointing here */
325 UInt mark; /* mark used for graph traversal */
326 ThreadLifeSeg *next; /* list of all TLS */
327};
328
329static ThreadLifeSeg *all_tls;
330static UInt tls_since_gc;
331#define TLS_SINCE_GC 10000
332
333/* current mark used for TLS graph traversal */
334static UInt tlsmark;
335
336static ThreadLifeSeg *thread_seg[VG_N_THREADS];
337
338
339static void tls_gc(void)
340{
341 /* XXX later. Walk through all TLSs and look for ones with 0
342 refcount and remove them from the structure and free them.
343 Could probably get rid of ThreadLifeSeg.refcount and simply use
344 mark-sweep from the shadow table. */
345 VG_(printf)("WRITEME: TLS GC\n");
346}
347
348static void newTLS(ThreadId tid)
349{
350 static const Bool debug = False;
351 ThreadLifeSeg *tls;
352
353 /* Initial NULL */
354 if (thread_seg[tid] == NULL) {
355 tls = VG_(malloc)(sizeof(*tls));
356 tls->tid = tid;
357 tls->prior[0] = tls->prior[1] = NULL;
358 tls->refcount = 0;
359 tls->mark = tlsmark-1;
360
361 tls->next = all_tls;
362 all_tls = tls;
363 tls_since_gc++;
364
365 thread_seg[tid] = tls;
366 return;
367 }
368
369 /* Previous TLS was unused, so just recycle */
370 if (thread_seg[tid]->refcount == 0) {
371 if (debug)
372 VG_(printf)("newTLS; recycling TLS %p for tid %u\n",
373 thread_seg[tid], tid);
374 return;
375 }
376
377 /* Use existing TLS for this tid as a prior for new TLS */
378 tls = VG_(malloc)(sizeof(*tls));
379 tls->tid = tid;
380 tls->prior[0] = thread_seg[tid];
381 tls->prior[1] = NULL;
382 tls->refcount = 0;
383 tls->mark = tlsmark-1;
384
385 tls->next = all_tls;
386 all_tls = tls;
387 if (++tls_since_gc > TLS_SINCE_GC) {
388 tls_gc();
389 tls_since_gc = 0;
390 }
391
392 if (debug)
393 VG_(printf)("newTLS: made new TLS %p for tid %u (prior %p(%u))\n",
394 tls, tid, tls->prior[0], tls->prior[0]->tid);
395
396 thread_seg[tid] = tls;
397}
398
399/* clear out a TLS for a thread that's died */
400static void clearTLS(ThreadId tid)
401{
402 newTLS(tid);
403
404 thread_seg[tid]->prior[0] = NULL;
405 thread_seg[tid]->prior[1] = NULL;
406}
407
408static void addPriorTLS(ThreadId tid, ThreadId prior)
409{
410 static const Bool debug = False;
411 ThreadLifeSeg *tls = thread_seg[tid];
412
413 if (debug)
414 VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
415 thread_seg[prior], prior, tls, tid);
416
417 sk_assert(thread_seg[tid] != NULL);
418 sk_assert(thread_seg[prior] != NULL);
419
420 if (tls->prior[0] == NULL)
421 tls->prior[0] = thread_seg[prior];
422 else {
423 sk_assert(tls->prior[1] == NULL);
424 tls->prior[1] = thread_seg[prior];
425 }
426}
427
428/* Return True if prior is definitely not concurrent with tls */
429static Bool tlsIsDisjoint(const ThreadLifeSeg *tls,
430 const ThreadLifeSeg *prior)
431{
432 Bool isPrior(const ThreadLifeSeg *t) {
433 if (t == NULL || t->mark == tlsmark)
434 return False;
435
436 if (t == prior)
437 return True;
438
439 ((ThreadLifeSeg *)t)->mark = tlsmark;
440
441 return isPrior(t->prior[0]) || isPrior(t->prior[1]);
442 }
443 tlsmark++; /* new traversal mark */
444
445 return isPrior(tls);
446}
447
448static inline UInt packTLS(ThreadLifeSeg *tls)
449{
sewardj8fac99a2002-11-13 22:31:26 +0000450 SK_ASSERT(((UInt)tls & ((1 << STATE_BITS)-1)) == 0);
sewardjc4a810d2002-11-13 22:25:51 +0000451 return ((UInt)tls) >> STATE_BITS;
452}
453
454static inline ThreadLifeSeg *unpackTLS(UInt i)
455{
456 return (ThreadLifeSeg *)(i << STATE_BITS);
457}
458
459/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000460/*--- Low-level support for memory tracking. ---*/
461/*------------------------------------------------------------*/
462
463/*
464 All reads and writes are recorded in the memory map, which
465 records the state of all memory in the process. The memory map is
466 organised like that for normal Valgrind, except each that everything
467 is done at word-level instead of byte-level, and each word has only
468 one word of shadow (instead of 36 bits).
469
470 As for normal Valgrind there is a distinguished secondary map. But we're
471 working at word-granularity, so it has 16k word entries instead of 64k byte
472 entries. Lookup is done as follows:
473
474 bits 31..16: primary map lookup
475 bits 15.. 2: secondary map lookup
476 bits 1.. 0: ignored
477*/
478
479
480/*------------------------------------------------------------*/
481/*--- Basic bitmap management, reading and writing. ---*/
482/*------------------------------------------------------------*/
483
484/* Allocate and initialise a secondary map, marking all words as virgin. */
485
486/* Just a value that isn't a real pointer */
487#define SEC_MAP_ACCESS (shadow_word*)0x99
488
489
490static
491ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
492{
493 ESecMap* map;
494 UInt i;
495 //PROF_EVENT(10); PPP
496
497 /* It just happens that a SecMap occupies exactly 18 pages --
498 although this isn't important, so the following assert is
499 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000500 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000501 map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller );
502
503 for (i = 0; i < ESEC_MAP_WORDS; i++)
504 map->swords[i] = virgin_sword;
505
506 return map;
507}
508
509
510/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
511 * word gets set. */
512static __inline__
513void set_sword ( Addr a, shadow_word sword )
514{
515 ESecMap* sm;
sewardjc4a810d2002-11-13 22:25:51 +0000516 shadow_word *oldsw;
njn25e49d8e72002-09-23 09:36:25 +0000517
518 //PROF_EVENT(23); PPP
519 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
520
521 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
522 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000523 sk_assert(sm != &distinguished_secondary_map);
sewardjc4a810d2002-11-13 22:25:51 +0000524 oldsw = &sm->swords[(a & 0xFFFC) >> 2];
525 if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
526 ThreadLifeSeg *tls = unpackTLS(oldsw->other);
527 tls->refcount--;
528 }
529
530 if (sword.state == Vge_Excl && sword.other != TLSP_INDICATING_ALL) {
531 ThreadLifeSeg *tls = unpackTLS(sword.other);
532 tls->refcount++;
533 }
534
njn25e49d8e72002-09-23 09:36:25 +0000535 sm->swords[(a & 0xFFFC) >> 2] = sword;
536
537 if (VGE_IS_DISTINGUISHED_SM(sm)) {
538 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
539 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000540 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000541 }
542}
543
544
545static __inline__
546shadow_word* get_sword_addr ( Addr a )
547{
548 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
549 ESecMap* sm = primary_map[a >> 16];
550 UInt sm_off = (a & 0xFFFC) >> 2;
551
552 if (VGE_IS_DISTINGUISHED_SM(sm)) {
553 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
554 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000555 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000556 return SEC_MAP_ACCESS;
557 }
558
559 //PROF_EVENT(21); PPP
560 return & (sm->swords[sm_off]);
561}
562
563
564// SSS: rename these so they're not so similar to memcheck, unless it's
565// appropriate of course
566
567static __inline__
568void init_virgin_sword(Addr a)
569{
sewardj499e3de2002-11-13 22:22:25 +0000570 if (clo_execontext != EC_None)
571 setExeContext(a, NULL_EC_EIP);
njn25e49d8e72002-09-23 09:36:25 +0000572 set_sword(a, virgin_sword);
573}
574
sewardj7f3ad222002-11-13 22:11:53 +0000575static __inline__
576void init_error_sword(Addr a)
577{
578 set_sword(a, error_sword);
579}
njn25e49d8e72002-09-23 09:36:25 +0000580
njn25e49d8e72002-09-23 09:36:25 +0000581static __inline__
582void init_nonvirgin_sword(Addr a)
583{
584 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000585 ThreadId tid = VG_(get_current_or_recent_tid)();
sewardjc4a810d2002-11-13 22:25:51 +0000586 ThreadLifeSeg *tls;
njn25e49d8e72002-09-23 09:36:25 +0000587
sewardjb52a1b02002-10-23 21:38:22 +0000588 sk_assert(tid != VG_INVALID_THREADID);
sewardjc4a810d2002-11-13 22:25:51 +0000589 tls = thread_seg[tid];
590
sewardj8fac99a2002-11-13 22:31:26 +0000591 sword = SW(Vge_Excl, packTLS(tls));
njn25e49d8e72002-09-23 09:36:25 +0000592 set_sword(a, sword);
593}
594
595
596/* In this case, we treat it for Eraser's sake like virgin (it hasn't
597 * been inited by a particular thread, it's just done automatically upon
598 * startup), but we mark its .state specially so it doesn't look like an
599 * uninited read. */
600static __inline__
601void init_magically_inited_sword(Addr a)
602{
603 shadow_word sword;
604
sewardjb52a1b02002-10-23 21:38:22 +0000605 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sewardj8fac99a2002-11-13 22:31:26 +0000606
607 sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
608
njn25e49d8e72002-09-23 09:36:25 +0000609 set_sword(a, virgin_sword);
610}
611
sewardjc26cc252002-10-23 21:58:55 +0000612
sewardj274c6012002-10-22 04:54:55 +0000613/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000614/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000615/*------------------------------------------------------------*/
616
sewardj39a4d842002-11-13 22:14:30 +0000617typedef struct _Mutex Mutex; /* forward decl */
sewardj4bffb232002-11-13 21:46:34 +0000618typedef struct _LockSet LockSet;
619
sewardj16748af2002-10-22 04:55:54 +0000620typedef enum MutexState {
621 MxUnknown, /* don't know */
622 MxUnlocked, /* unlocked */
623 MxLocked, /* locked */
624 MxDead /* destroyed */
625} MutexState;
626
sewardj39a4d842002-11-13 22:14:30 +0000627struct _Mutex {
sewardjdac0a442002-11-13 22:08:40 +0000628 Addr mutexp;
sewardj39a4d842002-11-13 22:14:30 +0000629 Mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000630
631 MutexState state; /* mutex state */
632 ThreadId tid; /* owner */
633 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000634
sewardj4bffb232002-11-13 21:46:34 +0000635 const LockSet *lockdep; /* set of locks we depend on */
sewardjc26cc252002-10-23 21:58:55 +0000636 UInt mark; /* mark for graph traversal */
637};
sewardj16748af2002-10-22 04:55:54 +0000638
sewardj39a4d842002-11-13 22:14:30 +0000639static inline Int mutex_cmp(const Mutex *a, const Mutex *b)
sewardj4bffb232002-11-13 21:46:34 +0000640{
sewardjdac0a442002-11-13 22:08:40 +0000641 return a->mutexp - b->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000642}
njn25e49d8e72002-09-23 09:36:25 +0000643
sewardj274c6012002-10-22 04:54:55 +0000644struct _LockSet {
sewardj4bffb232002-11-13 21:46:34 +0000645 UInt setsize; /* number of members */
646 UInt hash; /* hash code */
647 LockSet *next; /* next in hash chain */
sewardj39a4d842002-11-13 22:14:30 +0000648 const Mutex *mutex[0]; /* locks */
sewardj274c6012002-10-22 04:54:55 +0000649};
sewardj4bffb232002-11-13 21:46:34 +0000650
651static const LockSet *emptyset;
njn25e49d8e72002-09-23 09:36:25 +0000652
653/* Each one is an index into the lockset table. */
sewardj4bffb232002-11-13 21:46:34 +0000654static const LockSet *thread_locks[VG_N_THREADS];
njn25e49d8e72002-09-23 09:36:25 +0000655
sewardjdac0a442002-11-13 22:08:40 +0000656#define LOCKSET_HASH_SZ 1021
njn25e49d8e72002-09-23 09:36:25 +0000657
sewardj4bffb232002-11-13 21:46:34 +0000658static LockSet *lockset_hash[LOCKSET_HASH_SZ];
njn25e49d8e72002-09-23 09:36:25 +0000659
sewardj4bffb232002-11-13 21:46:34 +0000660/* Pack and unpack a LockSet pointer into shadow_word.other */
sewardj8fac99a2002-11-13 22:31:26 +0000661static inline UInt packLockSet(const LockSet *p)
njn25e49d8e72002-09-23 09:36:25 +0000662{
sewardj4bffb232002-11-13 21:46:34 +0000663 UInt id;
664
sewardj8fac99a2002-11-13 22:31:26 +0000665 SK_ASSERT(((UInt)p & ((1 << STATE_BITS)-1)) == 0);
sewardj4bffb232002-11-13 21:46:34 +0000666 id = ((UInt)p) >> STATE_BITS;
667
668 return id;
njn25e49d8e72002-09-23 09:36:25 +0000669}
670
sewardj8fac99a2002-11-13 22:31:26 +0000671static inline const LockSet *unpackLockSet(UInt id)
njn25e49d8e72002-09-23 09:36:25 +0000672{
sewardj4bffb232002-11-13 21:46:34 +0000673 return (LockSet *)(id << STATE_BITS);
njn25e49d8e72002-09-23 09:36:25 +0000674}
675
njn25e49d8e72002-09-23 09:36:25 +0000676static
sewardj4bffb232002-11-13 21:46:34 +0000677void pp_LockSet(const LockSet* p)
njn25e49d8e72002-09-23 09:36:25 +0000678{
sewardj4bffb232002-11-13 21:46:34 +0000679 int i;
680
njn25e49d8e72002-09-23 09:36:25 +0000681 VG_(printf)("{ ");
sewardj4bffb232002-11-13 21:46:34 +0000682 for(i = 0; i < p->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000683 const Mutex *mx = p->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000684
685 VG_(printf)("%p%(y ", mx->mutexp, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000686 }
687 VG_(printf)("}\n");
688}
689
690
sewardj4bffb232002-11-13 21:46:34 +0000691static void print_LockSet(const Char *s, const LockSet *ls)
692{
693 VG_(printf)("%s: ", s);
694 pp_LockSet(ls);
695}
696
697/* Compute the hash of a LockSet */
698static inline UInt hash_LockSet_w_wo(const LockSet *ls,
sewardj39a4d842002-11-13 22:14:30 +0000699 const Mutex *with,
700 const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000701{
702 UInt i;
sewardj8fac99a2002-11-13 22:31:26 +0000703 UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sewardj4bffb232002-11-13 21:46:34 +0000704
705 sk_assert(with == NULL || with != without);
706
707 for(i = 0; with != NULL || i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000708 const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000709
710 if (without && mutex_cmp(without, mx) == 0)
711 continue;
712
713 if (with && (mx == NULL || mutex_cmp(with, mx) < 0)) {
714 mx = with;
715 with = NULL;
716 i--;
717 }
718
sewardj8fac99a2002-11-13 22:31:26 +0000719 hash = ROTL(hash, 17);
sewardj4bffb232002-11-13 21:46:34 +0000720 hash ^= (UInt)mx->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000721 }
722
723 return hash % LOCKSET_HASH_SZ;
724}
725
sewardj39a4d842002-11-13 22:14:30 +0000726static inline UInt hash_LockSet_with(const LockSet *ls, const Mutex *with)
sewardj4bffb232002-11-13 21:46:34 +0000727{
728 UInt hash = hash_LockSet_w_wo(ls, with, NULL);
729
730 if (0)
731 VG_(printf)("hash_with %p+%p -> %d\n", ls, with->mutexp, hash);
732
733 return hash;
734}
735
sewardj39a4d842002-11-13 22:14:30 +0000736static inline UInt hash_LockSet_without(const LockSet *ls, const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000737{
738 UInt hash = hash_LockSet_w_wo(ls, NULL, without);
739
740 if (0)
741 VG_(printf)("hash_with %p-%p -> %d\n", ls, without->mutexp, hash);
742
743 return hash;
744}
745
746static inline UInt hash_LockSet(const LockSet *ls)
747{
748 UInt hash = hash_LockSet_w_wo(ls, NULL, NULL);
749
750 if (0)
751 VG_(printf)("hash %p -> %d\n", ls, hash);
752
753 return hash;
754}
755
756static
757Bool structural_eq_LockSet(const LockSet* a, const LockSet* b)
njn25e49d8e72002-09-23 09:36:25 +0000758{
759 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000760
sewardj4bffb232002-11-13 21:46:34 +0000761 if (a == b)
762 return True;
763 if (a->setsize != b->setsize)
764 return False;
njn25e49d8e72002-09-23 09:36:25 +0000765
sewardj4bffb232002-11-13 21:46:34 +0000766 for(i = 0; i < a->setsize; i++) {
767 if (mutex_cmp(a->mutex[i], b->mutex[i]) != 0)
njn25e49d8e72002-09-23 09:36:25 +0000768 return False;
njn25e49d8e72002-09-23 09:36:25 +0000769 }
770
sewardj4bffb232002-11-13 21:46:34 +0000771 return True;
njn25e49d8e72002-09-23 09:36:25 +0000772}
773
774
775/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
776 * doesn't do the insertion. Returns True if they match.
777 */
778static Bool
sewardj4bffb232002-11-13 21:46:34 +0000779weird_LockSet_equals(const LockSet* a, const LockSet* b,
sewardj39a4d842002-11-13 22:14:30 +0000780 const Mutex *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000781{
sewardjc26cc252002-10-23 21:58:55 +0000782 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +0000783 Int ia, ib;
sewardjc26cc252002-10-23 21:58:55 +0000784
njn25e49d8e72002-09-23 09:36:25 +0000785 /* Idea is to try and match each element of b against either an
786 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000787
788 if (debug) {
789 print_LockSet("weird_LockSet_equals a", a);
790 print_LockSet(" b", b);
791 VG_(printf)( " missing: %p%(y\n",
792 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000793 }
sewardjc26cc252002-10-23 21:58:55 +0000794
sewardj4bffb232002-11-13 21:46:34 +0000795 if ((a->setsize + 1) != b->setsize) {
796 if (debug)
797 VG_(printf)(" fastpath length mismatch -> 0\n");
798 return False;
799 }
800
sewardjc26cc252002-10-23 21:58:55 +0000801 /* There are three phases to this compare:
802 1 the section from the start of a up to missing_mutex
803 2 missing mutex itself
804 3 the section after missing_mutex to the end of a
805 */
806
sewardj4bffb232002-11-13 21:46:34 +0000807 ia = 0;
808 ib = 0;
809
sewardjc26cc252002-10-23 21:58:55 +0000810 /* 1: up to missing_mutex */
sewardj4bffb232002-11-13 21:46:34 +0000811 for(; ia < a->setsize && mutex_cmp(a->mutex[ia], missing_mutex) < 0; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000812 if (debug) {
813 print_LockSet(" 1:a", a);
814 print_LockSet(" 1:b", b);
815 }
sewardj4bffb232002-11-13 21:46:34 +0000816 if (ib == b->setsize || mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000817 return False;
sewardjc26cc252002-10-23 21:58:55 +0000818 }
819
820 /* 2: missing_mutex itself */
821 if (debug) {
822 VG_(printf)( " 2:missing: %p%(y\n",
823 missing_mutex->mutexp, missing_mutex->mutexp);
824 print_LockSet(" 2: b", b);
825 }
826
sewardj4bffb232002-11-13 21:46:34 +0000827 sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
sewardjc26cc252002-10-23 21:58:55 +0000828
sewardj4bffb232002-11-13 21:46:34 +0000829 if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000830 return False;
831
sewardj4bffb232002-11-13 21:46:34 +0000832 ib++;
sewardjc26cc252002-10-23 21:58:55 +0000833
834 /* 3: after missing_mutex to end */
835
sewardj4bffb232002-11-13 21:46:34 +0000836 for(; ia < a->setsize && ib < b->setsize; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000837 if (debug) {
838 print_LockSet(" 3:a", a);
839 print_LockSet(" 3:b", b);
840 }
sewardj4bffb232002-11-13 21:46:34 +0000841 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000842 return False;
sewardjc26cc252002-10-23 21:58:55 +0000843 }
844
845 if (debug)
sewardj4bffb232002-11-13 21:46:34 +0000846 VG_(printf)(" ia=%d ib=%d --> %d\n", ia, ib, ia == a->setsize && ib == b->setsize);
sewardjc26cc252002-10-23 21:58:55 +0000847
sewardj4bffb232002-11-13 21:46:34 +0000848 return ia == a->setsize && ib == b->setsize;
849}
850
851
852
853static const LockSet *lookup_LockSet(const LockSet *set)
854{
855 UInt bucket = set->hash;
856 LockSet *ret;
857
858 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
859 if (set == ret || structural_eq_LockSet(set, ret))
860 return ret;
861
862 return NULL;
863}
864
sewardj39a4d842002-11-13 22:14:30 +0000865static const LockSet *lookup_LockSet_with(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000866{
867 UInt bucket = hash_LockSet_with(set, mutex);
868 const LockSet *ret;
869
870 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
871 if (weird_LockSet_equals(set, ret, mutex))
872 return ret;
873
874 return NULL;
875}
876
sewardj39a4d842002-11-13 22:14:30 +0000877static const LockSet *lookup_LockSet_without(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000878{
879 UInt bucket = hash_LockSet_without(set, mutex);
880 const LockSet *ret;
881
882 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
883 if (weird_LockSet_equals(ret, set, mutex))
884 return ret;
885
886 return NULL;
887}
888
889static void insert_LockSet(LockSet *set)
890{
891 UInt hash = hash_LockSet(set);
892
893 set->hash = hash;
894
895 sk_assert(lookup_LockSet(set) == NULL);
896
897 set->next = lockset_hash[hash];
898 lockset_hash[hash] = set;
899}
900
901static inline
902LockSet *alloc_LockSet(UInt setsize)
903{
sewardj39a4d842002-11-13 22:14:30 +0000904 LockSet *ret = VG_(malloc)(sizeof(*ret) + sizeof(Mutex *) * setsize);
sewardj4bffb232002-11-13 21:46:34 +0000905 ret->setsize = setsize;
906 return ret;
907}
908
909static inline
910void free_LockSet(LockSet *p)
911{
912 /* assert: not present in hash */
913 VG_(free)(p);
914}
915
njnb4aee052003-04-15 14:09:58 +0000916static
sewardj4bffb232002-11-13 21:46:34 +0000917void pp_all_LockSets ( void )
918{
919 Int i;
920 Int sets, buckets;
921
922 sets = buckets = 0;
923 for (i = 0; i < LOCKSET_HASH_SZ; i++) {
924 const LockSet *ls = lockset_hash[i];
925 Bool first = True;
926
sewardj4bffb232002-11-13 21:46:34 +0000927 for(; ls != NULL; ls = ls->next) {
sewardjdac0a442002-11-13 22:08:40 +0000928 if (first) {
929 buckets++;
930 VG_(printf)("[%4d] = ", i);
931 } else
932 VG_(printf)(" ");
933
sewardj4bffb232002-11-13 21:46:34 +0000934 sets++;
935 first = False;
936 pp_LockSet(ls);
937 }
938 }
939
940 VG_(printf)("%d distinct LockSets in %d buckets\n", sets, buckets);
941}
942
943static inline Bool isempty(const LockSet *ls)
944{
945 return ls == NULL || ls->setsize == 0;
946}
947
sewardj39a4d842002-11-13 22:14:30 +0000948static Bool ismember(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +0000949{
950 Int i;
951
952 /* XXX use binary search */
953 for(i = 0; i < ls->setsize; i++)
954 if (mutex_cmp(mx, ls->mutex[i]) == 0)
955 return True;
956
957 return False;
958}
959
960/* Check invariants:
961 - all locksets are unique
962 - each set is an array in strictly increasing order of mutex addr
963*/
964static
965void sanity_check_locksets ( const Char* caller )
966{
967 Int i;
968 const Char *badness;
969 LockSet *ls;
970
971 for(i = 0; i < LOCKSET_HASH_SZ; i++) {
972
973 for(ls = lockset_hash[i]; ls != NULL; ls = ls->next) {
sewardj39a4d842002-11-13 22:14:30 +0000974 const Mutex *prev;
sewardj4bffb232002-11-13 21:46:34 +0000975 Int j;
976
977 if (hash_LockSet(ls) != ls->hash) {
978 badness = "mismatched hash";
979 goto bad;
980 }
981 if (ls->hash != i) {
982 badness = "wrong bucket";
983 goto bad;
984 }
985 if (lookup_LockSet(ls) != ls) {
986 badness = "non-unique set";
987 goto bad;
988 }
989
990 prev = ls->mutex[0];
991 for(j = 1; j < ls->setsize; j++) {
992 if (mutex_cmp(prev, ls->mutex[j]) >= 0) {
993 badness = "mutexes out of order";
994 goto bad;
995 }
996 }
997 }
998 }
999 return;
1000
1001 bad:
1002 VG_(printf)("sanity_check_locksets: "
1003 "i = %d, ls=%p badness = %s, caller = %s\n",
1004 i, ls, badness, caller);
1005 pp_all_LockSets();
1006 VG_(skin_panic)("sanity_check_locksets");
1007}
1008
1009static
sewardj39a4d842002-11-13 22:14:30 +00001010LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +00001011{
1012 static const Bool debug = False;
1013 LockSet *ret = NULL;
1014 Int i, j;
1015
1016 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1017 VG_(printf)("add-IN mutex %p%(y\n", mx->mutexp, mx->mutexp);
1018 print_LockSet("add-IN", ls);
1019 }
1020
1021 if (debug || LOCKSET_SANITY)
1022 sanity_check_locksets("add-IN");
1023
1024 sk_assert(!ismember(ls, mx));
1025
1026 ret = alloc_LockSet(ls->setsize+1);
1027
1028 for(i = j = 0; i < ls->setsize; i++) {
1029 if (debug)
1030 VG_(printf)("i=%d j=%d ls->mutex[i]=%p mx=%p\n",
1031 i, j, ls->mutex[i]->mutexp, mx ? mx->mutexp : 0);
1032 if (mx && mutex_cmp(mx, ls->mutex[i]) < 0) {
1033 ret->mutex[j++] = mx;
1034 mx = NULL;
1035 }
1036 ret->mutex[j++] = ls->mutex[i];
1037 }
1038
1039 /* not added in loop - must be after */
1040 if (mx)
1041 ret->mutex[j++] = mx;
1042
1043 sk_assert(j == ret->setsize);
1044
1045 if (debug || LOCKSET_SANITY) {
1046 print_LockSet("add-OUT", ret);
1047 sanity_check_locksets("add-OUT");
1048 }
1049 return ret;
1050}
1051
1052/* Builds ls with mx removed. mx should actually be in ls!
1053 (a checked assertion). Resulting set should not already
1054 exist in the table (unchecked).
1055*/
1056static
sewardj39a4d842002-11-13 22:14:30 +00001057LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
sewardj4bffb232002-11-13 21:46:34 +00001058{
1059 static const Bool debug = False;
1060 LockSet *ret = NULL;
1061 Int i, j;
1062
1063 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1064 print_LockSet("remove-IN", ls);
1065 }
1066
1067 if (debug || LOCKSET_SANITY)
1068 sanity_check_locksets("remove-IN");
1069
1070 sk_assert(ismember(ls, mx));
1071
1072 ret = alloc_LockSet(ls->setsize-1);
1073
1074 for(i = j = 0; i < ls->setsize; i++) {
1075 if (mutex_cmp(ls->mutex[i], mx) == 0)
1076 continue;
1077 ret->mutex[j++] = ls->mutex[i];
1078 }
1079
1080 sk_assert(j == ret->setsize);
1081
1082 if (debug || LOCKSET_SANITY) {
1083 print_LockSet("remove-OUT", ret);
1084 sanity_check_locksets("remove-OUT");
1085 }
1086 return ret;
njn25e49d8e72002-09-23 09:36:25 +00001087}
1088
1089
1090/* Builds the intersection, and then unbuilds it if it's already in the table.
1091 */
sewardj4bffb232002-11-13 21:46:34 +00001092static const LockSet *_intersect(const LockSet *a, const LockSet *b)
njn25e49d8e72002-09-23 09:36:25 +00001093{
sewardj4bffb232002-11-13 21:46:34 +00001094 static const Bool debug = False;
1095 Int iret;
1096 Int ia, ib;
1097 Int size;
1098 LockSet *ret;
1099 const LockSet *found;
njn25e49d8e72002-09-23 09:36:25 +00001100
sewardj4bffb232002-11-13 21:46:34 +00001101 if (debug || LOCKSET_SANITY)
1102 sanity_check_locksets("intersect-IN");
njn25e49d8e72002-09-23 09:36:25 +00001103
sewardj4bffb232002-11-13 21:46:34 +00001104 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1105 print_LockSet("intersect a", a);
1106 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +00001107 }
1108
sewardj4bffb232002-11-13 21:46:34 +00001109 /* count the size of the new set */
1110 size = 0;
1111 ia = ib = 0;
1112 for(size = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1113 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1114 size++;
1115 ia++;
1116 ib++;
1117 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1118 ia++;
1119 } else {
1120 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1121 ib++;
1122 }
njn25e49d8e72002-09-23 09:36:25 +00001123 }
1124
sewardj4bffb232002-11-13 21:46:34 +00001125 /* Build the intersection of the two sets */
1126 ret = alloc_LockSet(size);
1127 for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1128 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1129 sk_assert(iret < ret->setsize);
1130 ret->mutex[iret++] = a->mutex[ia];
1131 ia++;
1132 ib++;
1133 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1134 ia++;
1135 } else {
1136 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1137 ib++;
1138 }
1139 }
1140
1141 ret->hash = hash_LockSet(ret);
1142
njn25e49d8e72002-09-23 09:36:25 +00001143 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001144 found = lookup_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001145
sewardj4bffb232002-11-13 21:46:34 +00001146 if (found != NULL) {
1147 free_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001148 } else {
sewardj4bffb232002-11-13 21:46:34 +00001149 insert_LockSet(ret);
1150 found = ret;
njn25e49d8e72002-09-23 09:36:25 +00001151 }
1152
sewardj4bffb232002-11-13 21:46:34 +00001153 if (debug || LOCKSET_SANITY) {
1154 print_LockSet("intersect-OUT", found);
1155 sanity_check_locksets("intersect-OUT");
1156 }
njn25e49d8e72002-09-23 09:36:25 +00001157
sewardj4bffb232002-11-13 21:46:34 +00001158 return found;
njn25e49d8e72002-09-23 09:36:25 +00001159}
1160
sewardj4bffb232002-11-13 21:46:34 +00001161/* inline the fastpath */
1162static inline const LockSet *intersect(const LockSet *a, const LockSet *b)
sewardjc26cc252002-10-23 21:58:55 +00001163{
sewardj4bffb232002-11-13 21:46:34 +00001164 static const Bool debug = False;
sewardjc26cc252002-10-23 21:58:55 +00001165
1166 /* Fast case -- when the two are the same */
sewardj4bffb232002-11-13 21:46:34 +00001167 if (a == b) {
1168 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1169 print_LockSet("intersect-same fastpath", a);
sewardjc26cc252002-10-23 21:58:55 +00001170 }
sewardj4bffb232002-11-13 21:46:34 +00001171 return a;
sewardjc26cc252002-10-23 21:58:55 +00001172 }
1173
sewardj4bffb232002-11-13 21:46:34 +00001174 if (isempty(a) || isempty(b)) {
1175 if (debug)
1176 VG_(printf)("intersect empty fastpath\n");
1177 return emptyset;
1178 }
1179
1180 return _intersect(a, b);
1181}
1182
1183
1184static const LockSet *ls_union(const LockSet *a, const LockSet *b)
1185{
1186 static const Bool debug = False;
1187 Int iret;
1188 Int ia, ib;
1189 Int size;
1190 LockSet *ret;
1191 const LockSet *found;
1192
1193 if (debug || LOCKSET_SANITY)
1194 sanity_check_locksets("union-IN");
1195
1196 /* Fast case -- when the two are the same */
1197 if (a == b) {
1198 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1199 print_LockSet("union-same fastpath", a);
1200 }
1201 return a;
1202 }
1203
1204 if (isempty(a)) {
1205 if (debug)
1206 print_LockSet("union a=empty b", b);
1207 return b;
1208 }
1209 if (isempty(b)) {
1210 if (debug)
1211 print_LockSet("union b=empty a", a);
1212 return a;
1213 }
1214
1215 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
sewardjc26cc252002-10-23 21:58:55 +00001216 print_LockSet("union a", a);
1217 print_LockSet("union b", b);
1218 }
1219
sewardj4bffb232002-11-13 21:46:34 +00001220 /* count the size of the new set */
1221 for(size = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1222 Int cmp;
sewardjc26cc252002-10-23 21:58:55 +00001223
sewardj4bffb232002-11-13 21:46:34 +00001224 if ((ia < a->setsize) && (ib < b->setsize))
1225 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1226 else if (ia == a->setsize)
1227 cmp = 1;
1228 else
1229 cmp = -1;
1230
1231 if (cmp == 0) {
1232 size++;
1233 ia++;
1234 ib++;
1235 } else if (cmp < 0) {
1236 size++;
1237 ia++;
1238 } else {
1239 sk_assert(cmp > 0);
1240 size++;
1241 ib++;
1242 }
sewardjc26cc252002-10-23 21:58:55 +00001243 }
1244
sewardj4bffb232002-11-13 21:46:34 +00001245 /* Build the intersection of the two sets */
1246 ret = alloc_LockSet(size);
1247 for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1248 Int cmp;
1249 sk_assert(iret < ret->setsize);
1250
1251 if ((ia < a->setsize) && (ib < b->setsize))
1252 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1253 else if (ia == a->setsize)
1254 cmp = 1;
1255 else
1256 cmp = -1;
1257
1258 if (cmp == 0) {
1259 ret->mutex[iret++] = a->mutex[ia];
1260 ia++;
1261 ib++;
1262 } else if (cmp < 0) {
1263 ret->mutex[iret++] = a->mutex[ia];
1264 ia++;
1265 } else {
1266 sk_assert(cmp > 0);
1267 ret->mutex[iret++] = b->mutex[ib];
1268 ib++;
1269 }
1270 }
1271
1272 sk_assert(iret == ret->setsize);
1273
1274 ret->hash = hash_LockSet(ret);
1275
sewardjc26cc252002-10-23 21:58:55 +00001276 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001277 found = lookup_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001278
sewardj4bffb232002-11-13 21:46:34 +00001279 if (found != NULL) {
1280 if (debug)
1281 print_LockSet("union found existing set", found);
1282 free_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001283 } else {
sewardj4bffb232002-11-13 21:46:34 +00001284 if (debug)
1285 print_LockSet("union inserting new set", ret);
1286 insert_LockSet(ret);
1287 found = ret;
sewardjc26cc252002-10-23 21:58:55 +00001288 }
1289
sewardj4bffb232002-11-13 21:46:34 +00001290 if (debug || LOCKSET_SANITY) {
1291 print_LockSet("union-OUT", found);
sewardjc26cc252002-10-23 21:58:55 +00001292 sanity_check_locksets("union-OUT");
sewardj4bffb232002-11-13 21:46:34 +00001293 }
sewardjc26cc252002-10-23 21:58:55 +00001294
sewardj4bffb232002-11-13 21:46:34 +00001295 return found;
sewardjc26cc252002-10-23 21:58:55 +00001296}
1297
1298/*------------------------------------------------------------*/
sewardjdac0a442002-11-13 22:08:40 +00001299/*--- Implementation of mutex structure. ---*/
1300/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +00001301
1302static UInt graph_mark; /* current mark we're using for graph traversal */
1303
sewardj39a4d842002-11-13 22:14:30 +00001304static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardjc26cc252002-10-23 21:58:55 +00001305 Char *str, ExeContext *ec);
sewardj39a4d842002-11-13 22:14:30 +00001306static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00001307 const LockSet *lockset_holding,
1308 const LockSet *lockset_prev);
sewardjc26cc252002-10-23 21:58:55 +00001309
sewardj39a4d842002-11-13 22:14:30 +00001310static void set_mutex_state(Mutex *mutex, MutexState state,
sewardjdac0a442002-11-13 22:08:40 +00001311 ThreadId tid, ThreadState *tst);
1312
1313#define M_MUTEX_HASHSZ 1021
1314
sewardj39a4d842002-11-13 22:14:30 +00001315static Mutex *mutex_hash[M_MUTEX_HASHSZ];
sewardjdac0a442002-11-13 22:08:40 +00001316static UInt total_mutexes;
1317
1318static const Char *pp_MutexState(MutexState st)
1319{
1320 switch(st) {
1321 case MxLocked: return "Locked";
1322 case MxUnlocked: return "Unlocked";
1323 case MxDead: return "Dead";
1324 case MxUnknown: return "Unknown";
1325 }
1326 return "???";
1327}
1328
1329static void pp_all_mutexes()
1330{
1331 Int i;
1332 Int locks, buckets;
1333
1334 locks = buckets = 0;
1335 for(i = 0; i < M_MUTEX_HASHSZ; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001336 Mutex *mx;
sewardjdac0a442002-11-13 22:08:40 +00001337 Bool first = True;
1338
1339 for(mx = mutex_hash[i]; mx != NULL; mx = mx->next) {
1340 if (first) {
1341 buckets++;
1342 VG_(printf)("[%4d] = ", i);
1343 } else
1344 VG_(printf)(" ");
1345 locks++;
1346 first = False;
1347 VG_(printf)("%p [%8s] -> %p%(y\n",
1348 mx, pp_MutexState(mx->state), mx->mutexp, mx->mutexp);
1349 }
1350 }
1351
1352 VG_(printf)("%d locks in %d buckets (%d allocated)\n",
1353 locks, buckets, total_mutexes);
1354}
sewardjc26cc252002-10-23 21:58:55 +00001355
sewardj39a4d842002-11-13 22:14:30 +00001356/* find or create a Mutex for a program's mutex use */
1357static Mutex *get_mutex(Addr mutexp)
sewardjc26cc252002-10-23 21:58:55 +00001358{
1359 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
sewardj39a4d842002-11-13 22:14:30 +00001360 Mutex *mp;
sewardjc26cc252002-10-23 21:58:55 +00001361
1362 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
1363 if (mp->mutexp == mutexp)
1364 return mp;
1365
sewardjdac0a442002-11-13 22:08:40 +00001366 total_mutexes++;
1367
sewardjc26cc252002-10-23 21:58:55 +00001368 mp = VG_(malloc)(sizeof(*mp));
1369 mp->mutexp = mutexp;
1370 mp->next = mutex_hash[bucket];
1371 mutex_hash[bucket] = mp;
1372
1373 mp->state = MxUnknown;
1374 mp->tid = VG_INVALID_THREADID;
1375 mp->location = NULL;
1376
sewardj4bffb232002-11-13 21:46:34 +00001377 mp->lockdep = emptyset;
sewardjc26cc252002-10-23 21:58:55 +00001378 mp->mark = graph_mark - 1;
1379
1380 return mp;
1381}
1382
sewardjdac0a442002-11-13 22:08:40 +00001383/* Find all mutexes in a range of memory, and call the callback.
1384 Remove the mutex from the hash if the callback returns True (mutex
1385 structure itself is not freed, because it may be pointed to by a
1386 LockSet. */
sewardj39a4d842002-11-13 22:14:30 +00001387static void find_mutex_range(Addr start, Addr end, Bool (*action)(Mutex *))
sewardjc26cc252002-10-23 21:58:55 +00001388{
sewardjdac0a442002-11-13 22:08:40 +00001389 UInt first = start % M_MUTEX_HASHSZ;
1390 UInt last = (end+1) % M_MUTEX_HASHSZ;
1391 UInt i;
1392
1393 /* Single pass over the hash table, looking for likely hashes */
1394 for(i = first; i != last; ) {
sewardj39a4d842002-11-13 22:14:30 +00001395 Mutex *mx;
1396 Mutex **prev = &mutex_hash[i];
sewardjdac0a442002-11-13 22:08:40 +00001397
1398 for(mx = mutex_hash[i]; mx != NULL; prev = &mx->next, mx = mx->next) {
1399 if (mx->mutexp >= start && mx->mutexp < end && (*action)(mx))
1400 *prev = mx->next;
1401 }
1402
1403 if (++i == M_MUTEX_HASHSZ)
1404 i = 0;
sewardjc26cc252002-10-23 21:58:55 +00001405 }
sewardjc26cc252002-10-23 21:58:55 +00001406}
1407
1408#define MARK_LOOP (graph_mark+0)
1409#define MARK_DONE (graph_mark+1)
1410
sewardj39a4d842002-11-13 22:14:30 +00001411static Bool check_cycle(const Mutex *start, const LockSet* lockset)
sewardjc26cc252002-10-23 21:58:55 +00001412{
sewardj39a4d842002-11-13 22:14:30 +00001413 Bool check_cycle_inner(const Mutex *mutex, const LockSet *ls)
sewardjff2c9232002-11-13 21:44:39 +00001414 {
1415 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +00001416 Int i;
sewardjff2c9232002-11-13 21:44:39 +00001417
1418 if (mutex->mark == MARK_LOOP)
1419 return True; /* found cycle */
1420 if (mutex->mark == MARK_DONE)
1421 return False; /* been here before, its OK */
1422
sewardj39a4d842002-11-13 22:14:30 +00001423 ((Mutex*)mutex)->mark = MARK_LOOP;
sewardjff2c9232002-11-13 21:44:39 +00001424
1425 if (debug)
1426 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
1427 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
sewardj4bffb232002-11-13 21:46:34 +00001428 for(i = 0; i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001429 const Mutex *mx = ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +00001430
sewardjff2c9232002-11-13 21:44:39 +00001431 if (debug)
1432 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
1433 mutex->mutexp, ls,
sewardj4bffb232002-11-13 21:46:34 +00001434 mx->mutexp, mx->mutexp);
1435 if (check_cycle_inner(mx, mx->lockdep))
sewardjff2c9232002-11-13 21:44:39 +00001436 return True;
1437 }
sewardj39a4d842002-11-13 22:14:30 +00001438 ((Mutex*)mutex)->mark = MARK_DONE;
sewardjff2c9232002-11-13 21:44:39 +00001439
1440 return False;
1441 }
1442
sewardjc26cc252002-10-23 21:58:55 +00001443 graph_mark += 2; /* clear all marks */
1444
sewardj4bffb232002-11-13 21:46:34 +00001445 return check_cycle_inner(start, lockset);
sewardjc26cc252002-10-23 21:58:55 +00001446}
1447
sewardjdca84112002-11-13 22:29:34 +00001448/* test to see if a mutex state change would be problematic; this
1449 makes no changes to the mutex state. This should be called before
1450 the locking thread has actually blocked. */
1451static void test_mutex_state(Mutex *mutex, MutexState state,
1452 ThreadId tid, ThreadState *tst)
sewardjc26cc252002-10-23 21:58:55 +00001453{
1454 static const Bool debug = False;
1455
sewardjc26cc252002-10-23 21:58:55 +00001456 if (mutex->state == MxDead) {
sewardjdac0a442002-11-13 22:08:40 +00001457 Char *str;
1458
1459 switch(state) {
1460 case MxLocked: str = "lock dead mutex"; break;
1461 case MxUnlocked: str = "unlock dead mutex"; break;
1462 default: str = "operate on dead mutex"; break;
1463 }
1464
sewardjc26cc252002-10-23 21:58:55 +00001465 /* can't do anything legal to a destroyed mutex */
sewardjdac0a442002-11-13 22:08:40 +00001466 record_mutex_error(tid, mutex, str, mutex->location);
sewardjc26cc252002-10-23 21:58:55 +00001467 return;
1468 }
1469
1470 switch(state) {
1471 case MxLocked:
sewardjdca84112002-11-13 22:29:34 +00001472 sk_assert(!check_cycle(mutex, mutex->lockdep));
1473
1474 if (debug)
1475 print_LockSet("thread holding", thread_locks[tid]);
1476
1477 if (check_cycle(mutex, thread_locks[tid]))
1478 record_lockgraph_error(tid, mutex, thread_locks[tid], mutex->lockdep);
1479 else {
1480 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
1481
1482 if (debug) {
1483 VG_(printf)("giving mutex %p%(y lockdep = %p ",
1484 mutex->mutexp, mutex->mutexp, mutex->lockdep);
1485 print_LockSet("lockdep", mutex->lockdep);
1486 }
1487 }
1488 break;
1489
1490 case MxUnlocked:
1491 if (debug)
1492 print_LockSet("thread holding", thread_locks[tid]);
1493
1494 if (mutex->state != MxLocked) {
1495 record_mutex_error(tid, mutex,
1496 "unlock non-locked mutex", mutex->location);
1497 }
1498 if (mutex->tid != tid) {
1499 record_mutex_error(tid, mutex,
1500 "unlock someone else's mutex", mutex->location);
1501 }
1502 break;
1503
1504 case MxDead:
1505 break;
1506
1507 default:
1508 break;
1509 }
1510}
1511
1512/* Update a mutex state. Expects most error testing and reporting to
1513 have happened in test_mutex_state(). The assumption is that no
1514 client code is run by thread tid between test and set, either
1515 because it is blocked or test and set are called together
1516 atomically.
1517
1518 Setting state to MxDead is the exception, since that can happen as
1519 a result of any thread freeing memory; in this case set_mutex_state
1520 does all the error reporting as well.
1521*/
1522static void set_mutex_state(Mutex *mutex, MutexState state,
1523 ThreadId tid, ThreadState *tst)
1524{
1525 static const Bool debug = False;
1526
1527 if (debug)
1528 VG_(printf)("\ntid %d changing mutex (%p)->%p%(y state %s -> %s\n",
1529 tid, mutex, mutex->mutexp, mutex->mutexp,
1530 pp_MutexState(mutex->state), pp_MutexState(state));
1531
1532 if (mutex->state == MxDead) {
1533 /* can't do anything legal to a destroyed mutex */
1534 return;
1535 }
1536
1537 switch(state) {
1538 case MxLocked:
sewardj4bffb232002-11-13 21:46:34 +00001539 if (mutex->state == MxLocked) {
1540 if (mutex->tid != tid)
1541 record_mutex_error(tid, mutex, "take lock held by someone else",
1542 mutex->location);
1543 else
1544 record_mutex_error(tid, mutex, "take lock we already hold",
1545 mutex->location);
1546
1547 VG_(skin_panic)("core should have checked this\n");
1548 break;
1549 }
sewardjc26cc252002-10-23 21:58:55 +00001550
1551 sk_assert(!check_cycle(mutex, mutex->lockdep));
1552
sewardjc26cc252002-10-23 21:58:55 +00001553 mutex->tid = tid;
1554 break;
1555
1556 case MxUnlocked:
1557 if (debug)
sewardj4bffb232002-11-13 21:46:34 +00001558 print_LockSet("thread holding", thread_locks[tid]);
sewardjc26cc252002-10-23 21:58:55 +00001559
sewardjdca84112002-11-13 22:29:34 +00001560 if (mutex->state != MxLocked || mutex->tid != tid)
1561 break;
1562
sewardjc26cc252002-10-23 21:58:55 +00001563 mutex->tid = VG_INVALID_THREADID;
1564 break;
1565
sewardjdac0a442002-11-13 22:08:40 +00001566 case MxDead:
1567 if (mutex->state == MxLocked) {
1568 /* forcably remove offending lock from thread's lockset */
1569 sk_assert(ismember(thread_locks[mutex->tid], mutex));
1570 thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
1571 mutex->tid = VG_INVALID_THREADID;
1572
1573 record_mutex_error(tid, mutex,
1574 "free locked mutex", mutex->location);
1575 }
1576 break;
1577
sewardjc26cc252002-10-23 21:58:55 +00001578 default:
1579 break;
1580 }
1581
1582 mutex->location = VG_(get_ExeContext)(tst);
1583 mutex->state = state;
1584}
njn25e49d8e72002-09-23 09:36:25 +00001585
1586/*------------------------------------------------------------*/
1587/*--- Setting and checking permissions. ---*/
1588/*------------------------------------------------------------*/
1589
1590static
1591void set_address_range_state ( Addr a, UInt len /* in bytes */,
1592 VgeInitStatus status )
1593{
sewardj1806d7f2002-10-22 05:05:49 +00001594 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00001595
sewardjdac0a442002-11-13 22:08:40 +00001596 /* only clean up dead mutexes */
sewardj39a4d842002-11-13 22:14:30 +00001597 Bool cleanmx(Mutex *mx) {
sewardjdac0a442002-11-13 22:08:40 +00001598 return mx->state == MxDead;
1599 }
1600
1601
njn25e49d8e72002-09-23 09:36:25 +00001602# if DEBUG_MAKE_ACCESSES
1603 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
1604# endif
1605 //PROF_EVENT(30); PPP
1606
1607 if (len == 0)
1608 return;
1609
1610 if (len > 100 * 1000 * 1000)
1611 VG_(message)(Vg_UserMsg,
1612 "Warning: set address range state: large range %d",
1613 len);
1614
1615 VGP_PUSHCC(VgpSARP);
1616
sewardjdac0a442002-11-13 22:08:40 +00001617 /* Remove mutexes in recycled memory range from hash */
1618 find_mutex_range(a, a+len, cleanmx);
1619
njn25e49d8e72002-09-23 09:36:25 +00001620 /* Memory block may not be aligned or a whole word multiple. In neat cases,
1621 * we have to init len/4 words (len is in bytes). In nasty cases, it's
1622 * len/4+1 words. This works out which it is by aligning the block and
1623 * seeing if the end byte is in the same word as it is for the unaligned
1624 * block; if not, it's the awkward case. */
sewardj8fac99a2002-11-13 22:31:26 +00001625 end = ROUNDUP(a + len, 4);
1626 a = ROUNDDN(a, 4);
njn25e49d8e72002-09-23 09:36:25 +00001627
1628 /* Do it ... */
1629 switch (status) {
1630 case Vge_VirginInit:
1631 for ( ; a < end; a += 4) {
1632 //PROF_EVENT(31); PPP
1633 init_virgin_sword(a);
1634 }
1635 break;
1636
1637 case Vge_NonVirginInit:
1638 for ( ; a < end; a += 4) {
1639 //PROF_EVENT(31); PPP
1640 init_nonvirgin_sword(a);
1641 }
1642 break;
1643
1644 case Vge_SegmentInit:
1645 for ( ; a < end; a += 4) {
1646 //PROF_EVENT(31); PPP
1647 init_magically_inited_sword(a);
1648 }
1649 break;
sewardj7f3ad222002-11-13 22:11:53 +00001650
1651 case Vge_Error:
1652 for ( ; a < end; a += 4) {
1653 //PROF_EVENT(31); PPP
1654 init_error_sword(a);
1655 }
1656 break;
njn25e49d8e72002-09-23 09:36:25 +00001657
1658 default:
1659 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001660 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001661 }
1662
1663 /* Check that zero page and highest page have not been written to
1664 -- this could happen with buggy syscall wrappers. Today
1665 (2001-04-26) had precisely such a problem with
1666 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001667 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001668 VGP_POPCC(VgpSARP);
1669}
1670
1671
1672static void make_segment_readable ( Addr a, UInt len )
1673{
1674 //PROF_EVENT(??); PPP
1675 set_address_range_state ( a, len, Vge_SegmentInit );
1676}
1677
1678static void make_writable ( Addr a, UInt len )
1679{
1680 //PROF_EVENT(36); PPP
1681 set_address_range_state( a, len, Vge_VirginInit );
1682}
1683
1684static void make_readable ( Addr a, UInt len )
1685{
1686 //PROF_EVENT(37); PPP
sewardj499e3de2002-11-13 22:22:25 +00001687 set_address_range_state( a, len, Vge_VirginInit );
njn25e49d8e72002-09-23 09:36:25 +00001688}
1689
1690
njn25e49d8e72002-09-23 09:36:25 +00001691/* Block-copy states (needed for implementing realloc()). */
1692static void copy_address_range_state(Addr src, Addr dst, UInt len)
1693{
1694 UInt i;
1695
1696 //PROF_EVENT(40); PPP
1697 for (i = 0; i < len; i += 4) {
1698 shadow_word sword = *(get_sword_addr ( src+i ));
1699 //PROF_EVENT(41); PPP
1700 set_sword ( dst+i, sword );
1701 }
1702}
1703
1704// SSS: put these somewhere better
sewardj0f811692002-10-22 04:59:26 +00001705static void eraser_mem_read (Addr a, UInt data_size, ThreadState *tst);
1706static void eraser_mem_write(Addr a, UInt data_size, ThreadState *tst);
sewardja5b3aec2002-10-22 05:09:36 +00001707
1708#define REGPARM(x) __attribute__((regparm (x)))
1709
1710static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1711static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1712static void eraser_mem_help_read_4(Addr a) REGPARM(1);
1713static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
1714
1715static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1716static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1717static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
1718static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001719
sewardj7a5ebcf2002-11-13 22:42:13 +00001720static void bus_lock(void);
1721static void bus_unlock(void);
1722
njn25e49d8e72002-09-23 09:36:25 +00001723static
1724void eraser_pre_mem_read(CorePart part, ThreadState* tst,
1725 Char* s, UInt base, UInt size )
1726{
sewardj0f811692002-10-22 04:59:26 +00001727 eraser_mem_read(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +00001728}
1729
1730static
1731void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst,
1732 Char* s, UInt base )
1733{
sewardj0f811692002-10-22 04:59:26 +00001734 eraser_mem_read(base, VG_(strlen)((Char*)base), tst);
njn25e49d8e72002-09-23 09:36:25 +00001735}
1736
1737static
1738void eraser_pre_mem_write(CorePart part, ThreadState* tst,
1739 Char* s, UInt base, UInt size )
1740{
sewardj0f811692002-10-22 04:59:26 +00001741 eraser_mem_write(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +00001742}
1743
1744
1745
1746static
1747void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1748{
njn1f3a9092002-10-04 09:22:30 +00001749 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001750 make_segment_readable(a, len);
1751}
1752
1753
1754static
1755void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1756{
1757 if (is_inited) {
1758 make_readable(a, len);
1759 } else {
1760 make_writable(a, len);
1761 }
1762}
1763
1764static
1765void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001766 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001767{
1768 if (rr) make_readable(a, len);
1769 else if (ww) make_writable(a, len);
1770 /* else do nothing */
1771}
1772
sewardjf6374322002-11-13 22:35:55 +00001773static
1774void eraser_new_mem_stack_private(Addr a, UInt len)
1775{
1776 set_address_range_state(a, len, Vge_NonVirginInit);
1777}
1778
1779static
1780void eraser_new_mem_stack(Addr a, UInt len)
1781{
1782 set_address_range_state(a, len, Vge_VirginInit);
1783}
njn25e49d8e72002-09-23 09:36:25 +00001784
1785/*--------------------------------------------------------------*/
1786/*--- Initialise the memory audit system on program startup. ---*/
1787/*--------------------------------------------------------------*/
1788
1789static
1790void init_shadow_memory(void)
1791{
1792 Int i;
1793
1794 for (i = 0; i < ESEC_MAP_WORDS; i++)
1795 distinguished_secondary_map.swords[i] = virgin_sword;
1796
1797 /* These entries gradually get overwritten as the used address
1798 space expands. */
1799 for (i = 0; i < 65536; i++)
1800 primary_map[i] = &distinguished_secondary_map;
1801}
1802
1803
njn3e884182003-04-15 13:03:23 +00001804/*------------------------------------------------------------*/
1805/*--- malloc() et al replacements ---*/
1806/*------------------------------------------------------------*/
1807
njnb4aee052003-04-15 14:09:58 +00001808static VgHashTable hg_malloc_list = NULL;
njn3e884182003-04-15 13:03:23 +00001809
1810#define N_FREED_CHUNKS 2
1811static Int freechunkptr = 0;
1812static HG_Chunk *freechunks[N_FREED_CHUNKS];
1813
1814/* Use a small redzone (paranoia) */
1815UInt VG_(vg_malloc_redzone_szB) = 4;
1816
1817
1818/* Allocate a user-chunk of size bytes. Also allocate its shadow
1819 block, make the shadow block point at the user block. Put the
1820 shadow chunk on the appropriate list, and set all memory
1821 protections correctly. */
1822
1823static void add_HG_Chunk ( ThreadState* tst, Addr p, UInt size )
1824{
1825 HG_Chunk* hc;
1826
1827 hc = VG_(malloc)(sizeof(HG_Chunk));
1828 hc->data = p;
1829 hc->size = size;
1830 hc->where = VG_(get_ExeContext)(tst);
1831 hc->tid = VG_(get_tid_from_ThreadState)(tst);
1832
1833 VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
1834}
1835
1836/* Allocate memory and note change in memory available */
1837static __inline__
1838void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
1839 Bool is_zeroed )
1840{
1841 Addr p;
1842
1843 p = (Addr)VG_(cli_malloc)(alignment, size);
1844 add_HG_Chunk ( tst, p, size );
1845 eraser_new_mem_heap( p, size, is_zeroed );
1846
1847 return (void*)p;
1848}
1849
1850void* SK_(malloc) ( ThreadState* tst, Int n )
1851{
1852 return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
1853}
1854
1855void* SK_(__builtin_new) ( ThreadState* tst, Int n )
1856{
1857 return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
1858}
1859
1860void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n )
1861{
1862 return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
1863}
1864
1865void* SK_(memalign) ( ThreadState* tst, Int align, Int n )
1866{
1867 return alloc_and_new_mem ( tst, n, align, /*is_zeroed*/False );
1868}
1869
1870void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
1871{
1872 void* p;
1873 Int size, i;
1874
1875 size = nmemb * size1;
1876
1877 p = alloc_and_new_mem ( tst, size, VG_(clo_alignment), /*is_zeroed*/True );
1878 for (i = 0; i < size; i++) /* calloc() is zeroed */
1879 ((UChar*)p)[i] = 0;
1880 return p;
1881}
1882
1883static
1884void die_and_free_mem ( ThreadState* tst, HG_Chunk* hc,
1885 HG_Chunk** prev_chunks_next_ptr )
1886{
1887 ThreadId tid = VG_(get_tid_from_ThreadState)(tst);
1888 Addr start = hc->data;
1889 Addr end = start + hc->size;
1890
1891 Bool deadmx(Mutex *mx) {
1892 if (mx->state != MxDead)
1893 set_mutex_state(mx, MxDead, tid, tst);
1894
1895 return False;
1896 }
1897
1898 /* Remove hc from the malloclist using prev_chunks_next_ptr to
1899 avoid repeating the hash table lookup. Can't remove until at least
1900 after free and free_mismatch errors are done because they use
1901 describe_addr() which looks for it in malloclist. */
1902 *prev_chunks_next_ptr = hc->next;
1903
1904 /* Record where freed */
1905 hc->where = VG_(get_ExeContext) ( tst );
1906
1907 /* maintain a small window so that the error reporting machinery
1908 knows about this memory */
1909 if (freechunks[freechunkptr] != NULL) {
1910 /* free HG_Chunk */
1911 HG_Chunk* sc1 = freechunks[freechunkptr];
1912 VG_(cli_free) ( (void*)(sc1->data) );
1913 VG_(free) ( sc1 );
1914 }
1915
1916 freechunks[freechunkptr] = hc;
1917
1918 if (++freechunkptr == N_FREED_CHUNKS)
1919 freechunkptr = 0;
1920
1921 /* mark all mutexes in range dead */
1922 find_mutex_range(start, end, deadmx);
1923}
1924
1925
1926static __inline__
1927void handle_free ( ThreadState* tst, void* p )
1928{
1929 HG_Chunk* hc;
1930 HG_Chunk** prev_chunks_next_ptr;
1931
1932 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1933 (VgHashNode***)&prev_chunks_next_ptr );
1934 if (hc == NULL) {
1935 return;
1936 }
1937 die_and_free_mem ( tst, hc, prev_chunks_next_ptr );
1938}
1939
1940void SK_(free) ( ThreadState* tst, void* p )
1941{
1942 handle_free(tst, p);
1943}
1944
1945void SK_(__builtin_delete) ( ThreadState* tst, void* p )
1946{
1947 handle_free(tst, p);
1948}
1949
1950void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p )
1951{
1952 handle_free(tst, p);
1953}
1954
1955void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
1956{
1957 HG_Chunk *hc;
1958 HG_Chunk **prev_chunks_next_ptr;
1959 UInt i;
1960
1961 /* First try and find the block. */
1962 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1963 (VgHashNode***)&prev_chunks_next_ptr );
1964
1965 if (hc == NULL) {
1966 return NULL;
1967 }
1968
1969 if (hc->size == new_size) {
1970 /* size unchanged */
1971 return p;
1972
1973 } else if (hc->size > new_size) {
1974 /* new size is smaller */
1975 hc->size = new_size;
1976 return p;
1977
1978 } else {
1979 /* new size is bigger */
1980 Addr p_new;
1981
1982 /* Get new memory */
1983 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
1984
1985 /* First half kept and copied, second half new */
1986 copy_address_range_state( (Addr)p, p_new, hc->size );
1987 eraser_new_mem_heap ( p_new+hc->size, new_size-hc->size,
1988 /*inited*/False );
1989
1990 /* Copy from old to new */
1991 for (i = 0; i < hc->size; i++)
1992 ((UChar*)p_new)[i] = ((UChar*)p)[i];
1993
1994 /* Free old memory */
1995 die_and_free_mem ( tst, hc, prev_chunks_next_ptr );
1996
1997 /* this has to be after die_and_free_mem, otherwise the
1998 former succeeds in shorting out the new block, not the
1999 old, in the case when both are on the same list. */
2000 add_HG_Chunk ( tst, p_new, new_size );
2001
2002 return (void*)p_new;
2003 }
2004}
2005
njn25e49d8e72002-09-23 09:36:25 +00002006/*--------------------------------------------------------------*/
2007/*--- Machinery to support sanity checking ---*/
2008/*--------------------------------------------------------------*/
2009
2010/* Check that nobody has spuriously claimed that the first or last 16
2011 pages (64 KB) of address space have become accessible. Failure of
2012 the following do not per se indicate an internal consistency
2013 problem, but they are so likely to that we really want to know
2014 about it if so. */
2015
2016Bool SK_(cheap_sanity_check) ( void )
2017{
sewardjd5815ec2003-04-06 12:23:27 +00002018 if (VGE_IS_DISTINGUISHED_SM(primary_map[0])
2019 /* kludge: kernel drops a page up at top of address range for
2020 magic "optimized syscalls", so we can no longer check the
2021 highest page */
2022 /* && VGE_IS_DISTINGUISHED_SM(primary_map[65535]) */
2023 )
njn25e49d8e72002-09-23 09:36:25 +00002024 return True;
2025 else
2026 return False;
2027}
2028
2029
2030Bool SK_(expensive_sanity_check)(void)
2031{
2032 Int i;
2033
2034 /* Make sure nobody changed the distinguished secondary. */
2035 for (i = 0; i < ESEC_MAP_WORDS; i++)
2036 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
2037 distinguished_secondary_map.swords[i].state != virgin_sword.state)
2038 return False;
2039
2040 return True;
2041}
2042
2043
2044/*--------------------------------------------------------------*/
2045/*--- Instrumentation ---*/
2046/*--------------------------------------------------------------*/
2047
sewardjf6374322002-11-13 22:35:55 +00002048static UInt stk_ld, nonstk_ld, stk_st, nonstk_st;
2049
njn25e49d8e72002-09-23 09:36:25 +00002050/* Create and return an instrumented version of cb_in. Free cb_in
2051 before returning. */
2052UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
2053{
2054 UCodeBlock* cb;
2055 Int i;
2056 UInstr* u_in;
2057 Int t_size = INVALID_TEMPREG;
sewardjf6374322002-11-13 22:35:55 +00002058 Int ntemps;
2059 Bool *stackref = NULL;
sewardj7a5ebcf2002-11-13 22:42:13 +00002060 Bool locked = False; /* lock prefix */
njn25e49d8e72002-09-23 09:36:25 +00002061
njn810086f2002-11-14 12:42:47 +00002062 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002063
sewardjf6374322002-11-13 22:35:55 +00002064 /* stackref[] is used for super-simple value tracking to keep note
2065 of which tempregs currently hold a value which is derived from
2066 ESP or EBP, and is therefore likely stack-relative if used as
2067 the address for LOAD or STORE. */
njn810086f2002-11-14 12:42:47 +00002068 ntemps = VG_(get_num_temps)(cb);
sewardjf6374322002-11-13 22:35:55 +00002069 stackref = VG_(malloc)(sizeof(*stackref) * ntemps);
2070 VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps);
2071
njn810086f2002-11-14 12:42:47 +00002072 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
2073 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00002074
njn25e49d8e72002-09-23 09:36:25 +00002075 switch (u_in->opcode) {
2076
2077 case NOP: case CALLM_S: case CALLM_E:
2078 break;
sewardjf6374322002-11-13 22:35:55 +00002079
sewardj7a5ebcf2002-11-13 22:42:13 +00002080 case LOCK:
2081 locked = True;
2082 uInstr0(cb, CCALL, 0);
2083 uCCall(cb, (Addr)bus_lock, 0, 0, False);
2084 break;
2085
2086 case JMP: case INCEIP:
2087 if (locked) {
2088 uInstr0(cb, CCALL, 0);
2089 uCCall(cb, (Addr)bus_unlock, 0, 0, False);
2090 }
2091 locked = False;
2092 VG_(copy_UInstr)(cb, u_in);
2093 break;
2094
sewardjf6374322002-11-13 22:35:55 +00002095 case GET:
2096 sk_assert(u_in->tag1 == ArchReg);
2097 sk_assert(u_in->tag2 == TempReg);
2098 sk_assert(u_in->val2 < ntemps);
2099
2100 stackref[u_in->val2] = (u_in->size == 4 &&
2101 (u_in->val1 == R_ESP || u_in->val1 == R_EBP));
2102 VG_(copy_UInstr)(cb, u_in);
2103 break;
2104
2105 case MOV:
2106 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2107 sk_assert(u_in->tag2 == TempReg);
2108 stackref[u_in->val2] = stackref[u_in->val1];
2109 }
2110 VG_(copy_UInstr)(cb, u_in);
2111 break;
2112
2113 case LEA1:
2114 case ADD: case SUB:
2115 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2116 sk_assert(u_in->tag2 == TempReg);
2117 stackref[u_in->val2] |= stackref[u_in->val1];
2118 }
2119 VG_(copy_UInstr)(cb, u_in);
2120 break;
njn25e49d8e72002-09-23 09:36:25 +00002121
sewardja5b3aec2002-10-22 05:09:36 +00002122 case LOAD: {
2123 void (*help)(Addr);
2124 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002125 sk_assert(u_in->tag1 == TempReg);
2126
2127 if (!clo_priv_stacks || !stackref[u_in->val1]) {
2128 nonstk_ld++;
2129
2130 switch(u_in->size) {
2131 case 1: help = eraser_mem_help_read_1; break;
2132 case 2: help = eraser_mem_help_read_2; break;
2133 case 4: help = eraser_mem_help_read_4; break;
2134 default:
2135 VG_(skin_panic)("bad size");
2136 }
sewardja5b3aec2002-10-22 05:09:36 +00002137
sewardjf6374322002-11-13 22:35:55 +00002138 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
2139 uCCall(cb, (Addr)help, 1, 1, False);
2140 } else
2141 stk_ld++;
njn25e49d8e72002-09-23 09:36:25 +00002142
sewardja5b3aec2002-10-22 05:09:36 +00002143 VG_(copy_UInstr)(cb, u_in);
2144 t_size = INVALID_TEMPREG;
2145 break;
2146 }
2147
2148 case FPU_R: {
njne427a662002-10-02 11:08:25 +00002149 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002150 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002151
2152 t_size = newTemp(cb);
2153 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2154 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00002155
sewardja5b3aec2002-10-22 05:09:36 +00002156 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2157 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
njn25e49d8e72002-09-23 09:36:25 +00002158
sewardja5b3aec2002-10-22 05:09:36 +00002159 VG_(copy_UInstr)(cb, u_in);
2160 t_size = INVALID_TEMPREG;
2161 break;
2162 }
2163
2164 case STORE: {
2165 void (*help)(Addr, UInt);
2166 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002167 sk_assert(u_in->tag2 == TempReg);
sewardja5b3aec2002-10-22 05:09:36 +00002168
sewardjf6374322002-11-13 22:35:55 +00002169 if (!clo_priv_stacks || !stackref[u_in->val2]) {
2170 nonstk_st++;
2171
2172 switch(u_in->size) {
2173 case 1: help = eraser_mem_help_write_1; break;
2174 case 2: help = eraser_mem_help_write_2; break;
2175 case 4: help = eraser_mem_help_write_4; break;
2176 default:
2177 VG_(skin_panic)("bad size");
2178 }
2179
2180 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
2181 uCCall(cb, (Addr)help, 2, 2, False);
2182 } else
2183 stk_st++;
sewardja5b3aec2002-10-22 05:09:36 +00002184
2185 VG_(copy_UInstr)(cb, u_in);
2186 t_size = INVALID_TEMPREG;
2187 break;
2188 }
2189
2190 case FPU_W: {
njne427a662002-10-02 11:08:25 +00002191 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002192 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002193
2194 t_size = newTemp(cb);
2195 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2196 uLiteral(cb, (UInt)u_in->size);
2197 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2198 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2199
2200 VG_(copy_UInstr)(cb, u_in);
2201 t_size = INVALID_TEMPREG;
2202 break;
2203 }
njn25e49d8e72002-09-23 09:36:25 +00002204
sewardj3d7c9c82003-03-26 21:08:13 +00002205 case MMX1: case MMX2: case MMX3:
2206 case MMX2_MemRd: case MMX2_MemWr:
2207 case MMX2_RegRd: case MMX2_RegWr:
2208 VG_(skin_panic)(
2209 "I don't know how to instrument MMXish stuff (yet)");
2210 break;
2211
njn25e49d8e72002-09-23 09:36:25 +00002212 default:
sewardjf6374322002-11-13 22:35:55 +00002213 /* conservative tromping */
2214 if (0 && u_in->tag1 == TempReg) /* can val1 ever be dest? */
2215 stackref[u_in->val1] = False;
2216 if (u_in->tag2 == TempReg)
2217 stackref[u_in->val2] = False;
2218 if (u_in->tag3 == TempReg)
2219 stackref[u_in->val3] = False;
njn4ba5a792002-09-30 10:23:54 +00002220 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00002221 break;
2222 }
2223 }
2224
sewardjf6374322002-11-13 22:35:55 +00002225 VG_(free)(stackref);
njn4ba5a792002-09-30 10:23:54 +00002226 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002227 return cb;
2228}
2229
2230
2231/*--------------------------------------------------------------------*/
2232/*--- Error and suppression handling ---*/
2233/*--------------------------------------------------------------------*/
2234
2235typedef
2236 enum {
2237 /* Possible data race */
2238 EraserSupp
2239 }
2240 EraserSuppKind;
2241
2242/* What kind of error it is. */
2243typedef
2244 enum {
sewardj16748af2002-10-22 04:55:54 +00002245 EraserErr, /* data-race */
2246 MutexErr, /* mutex operations */
sewardjff2c9232002-11-13 21:44:39 +00002247 LockGraphErr, /* mutex order error */
njn25e49d8e72002-09-23 09:36:25 +00002248 }
2249 EraserErrorKind;
2250
sewardj16748af2002-10-22 04:55:54 +00002251/* The classification of a faulting address. */
2252typedef
2253 enum { Undescribed, /* as-yet unclassified */
2254 Stack,
2255 Unknown, /* classification yielded nothing useful */
sewardjdac0a442002-11-13 22:08:40 +00002256 Mallocd,
2257 Freed,
sewardj16748af2002-10-22 04:55:54 +00002258 Segment
2259 }
2260 AddrKind;
2261/* Records info about a faulting address. */
2262typedef
2263 struct {
2264 /* ALL */
2265 AddrKind akind;
2266 /* Freed, Mallocd */
2267 Int blksize;
2268 /* Freed, Mallocd */
2269 Int rwoffset;
2270 /* Freed, Mallocd */
2271 ExeContext* lastchange;
2272 ThreadId lasttid;
2273 /* Stack */
2274 ThreadId stack_tid;
2275 /* Segment */
2276 const Char* filename;
2277 const Char* section;
2278 /* True if is just-below %esp -- could be a gcc bug. */
2279 Bool maybe_gcc;
2280 }
2281 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00002282
sewardj16748af2002-10-22 04:55:54 +00002283/* What kind of memory access is involved in the error? */
2284typedef
2285 enum { ReadAxs, WriteAxs, ExecAxs }
2286 AxsKind;
2287
2288/* Extra context for memory errors */
2289typedef
2290 struct {
2291 AxsKind axskind;
2292 Int size;
2293 AddrInfo addrinfo;
2294 Bool isWrite;
2295 shadow_word prevstate;
sewardjff2c9232002-11-13 21:44:39 +00002296 /* MutexErr, LockGraphErr */
sewardj39a4d842002-11-13 22:14:30 +00002297 Mutex *mutex;
sewardj499e3de2002-11-13 22:22:25 +00002298 EC_EIP lasttouched;
sewardj16748af2002-10-22 04:55:54 +00002299 ThreadId lasttid;
sewardjff2c9232002-11-13 21:44:39 +00002300 /* LockGraphErr */
sewardj4bffb232002-11-13 21:46:34 +00002301 const LockSet *held_lockset;
2302 const LockSet *prev_lockset;
sewardj16748af2002-10-22 04:55:54 +00002303 }
2304 HelgrindError;
2305
2306static __inline__
2307void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002308{
sewardj16748af2002-10-22 04:55:54 +00002309 ai->akind = Unknown;
2310 ai->blksize = 0;
2311 ai->rwoffset = 0;
2312 ai->lastchange = NULL;
2313 ai->lasttid = VG_INVALID_THREADID;
2314 ai->filename = NULL;
2315 ai->section = "???";
2316 ai->stack_tid = VG_INVALID_THREADID;
2317 ai->maybe_gcc = False;
njn25e49d8e72002-09-23 09:36:25 +00002318}
2319
sewardj16748af2002-10-22 04:55:54 +00002320static __inline__
2321void clear_HelgrindError ( HelgrindError* err_extra )
2322{
2323 err_extra->axskind = ReadAxs;
2324 err_extra->size = 0;
2325 err_extra->mutex = NULL;
sewardj499e3de2002-11-13 22:22:25 +00002326 err_extra->lasttouched= NULL_EC_EIP;
sewardj16748af2002-10-22 04:55:54 +00002327 err_extra->lasttid = VG_INVALID_THREADID;
sewardjff2c9232002-11-13 21:44:39 +00002328 err_extra->prev_lockset = 0;
2329 err_extra->held_lockset = 0;
sewardj8fac99a2002-11-13 22:31:26 +00002330 err_extra->prevstate = SW(Vge_Virgin, 0);
sewardj16748af2002-10-22 04:55:54 +00002331 clear_AddrInfo ( &err_extra->addrinfo );
2332 err_extra->isWrite = False;
2333}
2334
2335
2336
2337/* Describe an address as best you can, for error messages,
2338 putting the result in ai. */
2339
2340static void describe_addr ( Addr a, AddrInfo* ai )
2341{
njn3e884182003-04-15 13:03:23 +00002342 HG_Chunk* hc;
sewardjdac0a442002-11-13 22:08:40 +00002343 Int i;
sewardj16748af2002-10-22 04:55:54 +00002344
2345 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
2346
2347 /* Closure for searching thread stacks */
2348 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
2349 {
2350 return (stack_min <= a && a <= stack_max);
2351 }
2352 /* Closure for searching malloc'd and free'd lists */
njn3e884182003-04-15 13:03:23 +00002353 Bool addr_is_in_block(VgHashNode *node)
sewardj16748af2002-10-22 04:55:54 +00002354 {
njn3e884182003-04-15 13:03:23 +00002355 HG_Chunk* hc2 = (HG_Chunk*)node;
2356 return (hc2->data <= a && a < hc2->data + hc2->size);
sewardj16748af2002-10-22 04:55:54 +00002357 }
2358
2359 /* Search for it in segments */
2360 {
2361 const SegInfo *seg;
2362
2363 for(seg = VG_(next_seginfo)(NULL);
2364 seg != NULL;
2365 seg = VG_(next_seginfo)(seg)) {
2366 Addr base = VG_(seg_start)(seg);
2367 UInt size = VG_(seg_size)(seg);
2368 const UChar *filename = VG_(seg_filename)(seg);
2369
2370 if (a >= base && a < base+size) {
2371 ai->akind = Segment;
2372 ai->blksize = size;
2373 ai->rwoffset = a - base;
2374 ai->filename = filename;
2375
2376 switch(VG_(seg_sect_kind)(a)) {
2377 case Vg_SectText: ai->section = "text"; break;
2378 case Vg_SectData: ai->section = "data"; break;
2379 case Vg_SectBSS: ai->section = "BSS"; break;
2380 case Vg_SectGOT: ai->section = "GOT"; break;
2381 case Vg_SectPLT: ai->section = "PLT"; break;
2382 case Vg_SectUnknown:
2383 default:
2384 ai->section = "???"; break;
2385 }
2386
2387 return;
2388 }
2389 }
2390 }
2391
2392 /* Search for a currently malloc'd block which might bracket it. */
njn3e884182003-04-15 13:03:23 +00002393 hc = (HG_Chunk*)VG_(HT_first_match)(hg_malloc_list, addr_is_in_block);
2394 if (NULL != hc) {
sewardj16748af2002-10-22 04:55:54 +00002395 ai->akind = Mallocd;
njn3e884182003-04-15 13:03:23 +00002396 ai->blksize = hc->size;
2397 ai->rwoffset = (Int)a - (Int)(hc->data);
2398 ai->lastchange = hc->where;
2399 ai->lasttid = hc->tid;
sewardj16748af2002-10-22 04:55:54 +00002400 return;
2401 }
sewardjdac0a442002-11-13 22:08:40 +00002402
2403 /* Look in recently freed memory */
2404 for(i = 0; i < N_FREED_CHUNKS; i++) {
njn3e884182003-04-15 13:03:23 +00002405 hc = freechunks[i];
2406 if (hc == NULL)
sewardjdac0a442002-11-13 22:08:40 +00002407 continue;
2408
njn3e884182003-04-15 13:03:23 +00002409 if (a >= hc->data && a < hc->data + hc->size) {
sewardjdac0a442002-11-13 22:08:40 +00002410 ai->akind = Freed;
njn3e884182003-04-15 13:03:23 +00002411 ai->blksize = hc->size;
2412 ai->rwoffset = a - hc->data;
2413 ai->lastchange = hc->where;
2414 ai->lasttid = hc->tid;
sewardjdac0a442002-11-13 22:08:40 +00002415 return;
2416 }
2417 }
2418
sewardj16748af2002-10-22 04:55:54 +00002419 /* Clueless ... */
2420 ai->akind = Unknown;
2421 return;
2422}
2423
2424
njn810086f2002-11-14 12:42:47 +00002425/* Creates a copy of the `extra' part, updates the copy with address info if
2426 necessary, and returns the copy. */
2427void* SK_(dup_extra_and_update)(Error* err)
sewardj16748af2002-10-22 04:55:54 +00002428{
njn810086f2002-11-14 12:42:47 +00002429 HelgrindError* new_extra;
sewardj16748af2002-10-22 04:55:54 +00002430
njn810086f2002-11-14 12:42:47 +00002431 new_extra = VG_(malloc)(sizeof(HelgrindError));
2432 *new_extra = *((HelgrindError*)VG_(get_error_extra)(err));
sewardj16748af2002-10-22 04:55:54 +00002433
njn810086f2002-11-14 12:42:47 +00002434 if (new_extra->addrinfo.akind == Undescribed)
2435 describe_addr ( VG_(get_error_address)(err), &(new_extra->addrinfo) );
sewardj16748af2002-10-22 04:55:54 +00002436
njn810086f2002-11-14 12:42:47 +00002437 return new_extra;
sewardj16748af2002-10-22 04:55:54 +00002438}
2439
sewardj0f811692002-10-22 04:59:26 +00002440static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write,
2441 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00002442{
sewardjc4a810d2002-11-13 22:25:51 +00002443 shadow_word *sw;
sewardj16748af2002-10-22 04:55:54 +00002444 HelgrindError err_extra;
2445
sewardjff2c9232002-11-13 21:44:39 +00002446 n_eraser_warnings++;
2447
sewardj16748af2002-10-22 04:55:54 +00002448 clear_HelgrindError(&err_extra);
2449 err_extra.isWrite = is_write;
2450 err_extra.addrinfo.akind = Undescribed;
2451 err_extra.prevstate = prevstate;
sewardj499e3de2002-11-13 22:22:25 +00002452 if (clo_execontext)
2453 err_extra.lasttouched = getExeContext(a);
sewardj0f811692002-10-22 04:59:26 +00002454 VG_(maybe_record_error)( tst, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00002455 (is_write ? "writing" : "reading"),
2456 &err_extra);
2457
sewardjc4a810d2002-11-13 22:25:51 +00002458 sw = get_sword_addr(a);
2459 if (sw->state == Vge_Excl && sw->other != TLSP_INDICATING_ALL) {
2460 ThreadLifeSeg *tls = unpackTLS(sw->other);
2461 tls->refcount--;
2462 }
2463
sewardj7f3ad222002-11-13 22:11:53 +00002464 set_sword(a, error_sword);
sewardj16748af2002-10-22 04:55:54 +00002465}
2466
sewardj39a4d842002-11-13 22:14:30 +00002467static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardj16748af2002-10-22 04:55:54 +00002468 Char *str, ExeContext *ec)
2469{
2470 HelgrindError err_extra;
2471
2472 clear_HelgrindError(&err_extra);
2473 err_extra.addrinfo.akind = Undescribed;
2474 err_extra.mutex = mutex;
sewardjc808ef52002-11-13 22:43:26 +00002475 err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
sewardj16748af2002-10-22 04:55:54 +00002476 err_extra.lasttid = tid;
2477
2478 VG_(maybe_record_error)(VG_(get_ThreadState)(tid), MutexErr,
2479 (Addr)mutex->mutexp, str, &err_extra);
2480}
njn25e49d8e72002-09-23 09:36:25 +00002481
sewardj39a4d842002-11-13 22:14:30 +00002482static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00002483 const LockSet *lockset_holding,
2484 const LockSet *lockset_prev)
sewardjff2c9232002-11-13 21:44:39 +00002485{
2486 HelgrindError err_extra;
2487
2488 n_lockorder_warnings++;
2489
2490 clear_HelgrindError(&err_extra);
2491 err_extra.addrinfo.akind = Undescribed;
2492 err_extra.mutex = mutex;
2493
sewardjc808ef52002-11-13 22:43:26 +00002494 err_extra.lasttouched = EC(mutex->location, virgin_sword, 0);
sewardjff2c9232002-11-13 21:44:39 +00002495 err_extra.held_lockset = lockset_holding;
2496 err_extra.prev_lockset = lockset_prev;
2497
2498 VG_(maybe_record_error)(VG_(get_ThreadState)(tid), LockGraphErr,
sewardjdac0a442002-11-13 22:08:40 +00002499 mutex->mutexp, "", &err_extra);
sewardjff2c9232002-11-13 21:44:39 +00002500}
2501
njn810086f2002-11-14 12:42:47 +00002502Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
njn25e49d8e72002-09-23 09:36:25 +00002503{
njn810086f2002-11-14 12:42:47 +00002504 Char *e1s, *e2s;
sewardj16748af2002-10-22 04:55:54 +00002505
njn810086f2002-11-14 12:42:47 +00002506 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2507
2508 switch (VG_(get_error_kind)(e1)) {
sewardj16748af2002-10-22 04:55:54 +00002509 case EraserErr:
njn810086f2002-11-14 12:42:47 +00002510 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002511
2512 case MutexErr:
njn810086f2002-11-14 12:42:47 +00002513 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002514 }
2515
njn810086f2002-11-14 12:42:47 +00002516 e1s = VG_(get_error_string)(e1);
2517 e2s = VG_(get_error_string)(e2);
2518 if (e1s != e2s) return False;
2519 if (0 != VG_(strcmp)(e1s, e2s)) return False;
njn25e49d8e72002-09-23 09:36:25 +00002520 return True;
2521}
2522
sewardj16748af2002-10-22 04:55:54 +00002523static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002524{
sewardj16748af2002-10-22 04:55:54 +00002525 switch (ai->akind) {
2526 case Stack:
2527 VG_(message)(Vg_UserMsg,
2528 " Address %p is on thread %d's stack",
2529 a, ai->stack_tid);
2530 break;
2531 case Unknown:
2532 if (ai->maybe_gcc) {
2533 VG_(message)(Vg_UserMsg,
2534 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
2535 a);
2536 VG_(message)(Vg_UserMsg,
2537 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
2538 } else {
2539 VG_(message)(Vg_UserMsg,
2540 " Address %p is not stack'd, malloc'd or free'd", a);
2541 }
2542 break;
2543 case Segment:
2544 VG_(message)(Vg_UserMsg,
2545 " Address %p is in %s section of %s",
2546 a, ai->section, ai->filename);
2547 break;
sewardjdac0a442002-11-13 22:08:40 +00002548 case Mallocd:
2549 case Freed: {
sewardj16748af2002-10-22 04:55:54 +00002550 UInt delta;
2551 UChar* relative;
2552 if (ai->rwoffset < 0) {
2553 delta = (UInt)(- ai->rwoffset);
2554 relative = "before";
2555 } else if (ai->rwoffset >= ai->blksize) {
2556 delta = ai->rwoffset - ai->blksize;
2557 relative = "after";
2558 } else {
2559 delta = ai->rwoffset;
2560 relative = "inside";
2561 }
2562 VG_(message)(Vg_UserMsg,
sewardj499e3de2002-11-13 22:22:25 +00002563 " Address %p is %d bytes %s a block of size %d %s by thread %d",
sewardj16748af2002-10-22 04:55:54 +00002564 a, delta, relative,
2565 ai->blksize,
sewardjdac0a442002-11-13 22:08:40 +00002566 ai->akind == Mallocd ? "alloc'd" : "freed",
sewardj16748af2002-10-22 04:55:54 +00002567 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00002568
sewardj16748af2002-10-22 04:55:54 +00002569 VG_(pp_ExeContext)(ai->lastchange);
2570 break;
2571 }
2572 default:
2573 VG_(skin_panic)("pp_AddrInfo");
2574 }
njn25e49d8e72002-09-23 09:36:25 +00002575}
2576
sewardj4bffb232002-11-13 21:46:34 +00002577static Char *lockset_str(const Char *prefix, const LockSet *lockset)
sewardjff2c9232002-11-13 21:44:39 +00002578{
sewardjff2c9232002-11-13 21:44:39 +00002579 Char *buf, *cp;
sewardj4bffb232002-11-13 21:46:34 +00002580 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002581
sewardj4bffb232002-11-13 21:46:34 +00002582 buf = VG_(malloc)((prefix == NULL ? 0 : VG_(strlen)(prefix)) +
2583 lockset->setsize * 120 +
2584 1);
sewardjff2c9232002-11-13 21:44:39 +00002585
2586 cp = buf;
2587 if (prefix)
2588 cp += VG_(sprintf)(cp, "%s", prefix);
2589
sewardj4bffb232002-11-13 21:46:34 +00002590 for(i = 0; i < lockset->setsize; i++)
2591 cp += VG_(sprintf)(cp, "%p%(y, ", lockset->mutex[i]->mutexp,
2592 lockset->mutex[i]->mutexp);
sewardjff2c9232002-11-13 21:44:39 +00002593
sewardj4bffb232002-11-13 21:46:34 +00002594 if (lockset->setsize)
sewardjff2c9232002-11-13 21:44:39 +00002595 cp[-2] = '\0';
2596 else
2597 *cp = '\0';
2598
2599 return buf;
2600}
njn25e49d8e72002-09-23 09:36:25 +00002601
njn43c799e2003-04-08 00:08:52 +00002602void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +00002603{
njn810086f2002-11-14 12:42:47 +00002604 HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err);
sewardj16748af2002-10-22 04:55:54 +00002605 Char buf[100];
2606 Char *msg = buf;
sewardj4bffb232002-11-13 21:46:34 +00002607 const LockSet *ls;
sewardj16748af2002-10-22 04:55:54 +00002608
2609 *msg = '\0';
2610
njn810086f2002-11-14 12:42:47 +00002611 switch(VG_(get_error_kind)(err)) {
2612 case EraserErr: {
2613 Addr err_addr = VG_(get_error_address)(err);
2614
sewardj16748af2002-10-22 04:55:54 +00002615 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
njn810086f2002-11-14 12:42:47 +00002616 VG_(get_error_string)(err), err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002617 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn810086f2002-11-14 12:42:47 +00002618 pp_AddrInfo(err_addr, &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002619
2620 switch(extra->prevstate.state) {
2621 case Vge_Virgin:
2622 /* shouldn't be possible to go directly from virgin -> error */
2623 VG_(sprintf)(buf, "virgin!?");
2624 break;
2625
sewardjc4a810d2002-11-13 22:25:51 +00002626 case Vge_Excl: {
2627 ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
2628
2629 sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
2630 VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
sewardj16748af2002-10-22 04:55:54 +00002631 break;
sewardjc4a810d2002-11-13 22:25:51 +00002632 }
sewardj16748af2002-10-22 04:55:54 +00002633
2634 case Vge_Shar:
sewardjff2c9232002-11-13 21:44:39 +00002635 case Vge_SharMod:
sewardj8fac99a2002-11-13 22:31:26 +00002636 ls = unpackLockSet(extra->prevstate.other);
sewardj4bffb232002-11-13 21:46:34 +00002637
2638 if (isempty(ls)) {
sewardj16748af2002-10-22 04:55:54 +00002639 VG_(sprintf)(buf, "shared %s, no locks",
2640 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
2641 break;
2642 }
2643
sewardjff2c9232002-11-13 21:44:39 +00002644 msg = lockset_str(extra->prevstate.state == Vge_Shar ?
2645 "shared RO, locked by:" :
sewardj4bffb232002-11-13 21:46:34 +00002646 "shared RW, locked by:", ls);
sewardj16748af2002-10-22 04:55:54 +00002647
sewardj16748af2002-10-22 04:55:54 +00002648 break;
2649 }
sewardj16748af2002-10-22 04:55:54 +00002650
sewardj499e3de2002-11-13 22:22:25 +00002651 if (*msg)
sewardj16748af2002-10-22 04:55:54 +00002652 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
sewardj499e3de2002-11-13 22:22:25 +00002653
sewardj72baa7a2002-12-09 23:32:58 +00002654 if (clo_execontext == EC_Some
2655 && extra->lasttouched.uu_ec_eip.eip != 0) {
sewardj499e3de2002-11-13 22:22:25 +00002656 Char file[100];
2657 UInt line;
sewardj72baa7a2002-12-09 23:32:58 +00002658 Addr eip = extra->lasttouched.uu_ec_eip.eip;
sewardj499e3de2002-11-13 22:22:25 +00002659
sewardjc808ef52002-11-13 22:43:26 +00002660 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u",
njn810086f2002-11-14 12:42:47 +00002661 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002662 pp_state(extra->lasttouched.state),
2663 unpackTLS(extra->lasttouched.tls)->tid);
sewardj499e3de2002-11-13 22:22:25 +00002664
2665 if (VG_(get_filename_linenum)(eip, file, sizeof(file), &line)) {
2666 VG_(message)(Vg_UserMsg, " at %p: %y (%s:%u)",
2667 eip, eip, file, line);
2668 } else if (VG_(get_objname)(eip, file, sizeof(file))) {
2669 VG_(message)(Vg_UserMsg, " at %p: %y (in %s)",
2670 eip, eip, file);
2671 } else {
2672 VG_(message)(Vg_UserMsg, " at %p: %y", eip, eip);
2673 }
sewardj72baa7a2002-12-09 23:32:58 +00002674 } else if (clo_execontext == EC_All
2675 && extra->lasttouched.uu_ec_eip.ec != NULL) {
sewardjc808ef52002-11-13 22:43:26 +00002676 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u",
njn810086f2002-11-14 12:42:47 +00002677 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002678 pp_state(extra->lasttouched.state),
2679 unpackTLS(extra->lasttouched.tls)->tid);
sewardj72baa7a2002-12-09 23:32:58 +00002680 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj499e3de2002-11-13 22:22:25 +00002681 }
sewardj16748af2002-10-22 04:55:54 +00002682 break;
njn810086f2002-11-14 12:42:47 +00002683 }
sewardj16748af2002-10-22 04:55:54 +00002684
2685 case MutexErr:
sewardj499e3de2002-11-13 22:22:25 +00002686 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s",
njn810086f2002-11-14 12:42:47 +00002687 VG_(get_error_address)(err),
2688 VG_(get_error_address)(err),
2689 VG_(get_error_string)(err));
njn43c799e2003-04-08 00:08:52 +00002690 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardj72baa7a2002-12-09 23:32:58 +00002691 if (extra->lasttouched.uu_ec_eip.ec != NULL) {
sewardj499e3de2002-11-13 22:22:25 +00002692 VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid);
sewardj72baa7a2002-12-09 23:32:58 +00002693 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj16748af2002-10-22 04:55:54 +00002694 }
njn810086f2002-11-14 12:42:47 +00002695 pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002696 break;
sewardjff2c9232002-11-13 21:44:39 +00002697
2698 case LockGraphErr: {
sewardj4bffb232002-11-13 21:46:34 +00002699 const LockSet *heldset = extra->held_lockset;
njn810086f2002-11-14 12:42:47 +00002700 Addr err_addr = VG_(get_error_address)(err);
sewardj4bffb232002-11-13 21:46:34 +00002701 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002702
2703 msg = lockset_str(NULL, heldset);
2704
2705 VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order",
njn810086f2002-11-14 12:42:47 +00002706 err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002707 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardjff2c9232002-11-13 21:44:39 +00002708 VG_(message)(Vg_UserMsg, " while holding locks %s", msg);
2709
sewardj4bffb232002-11-13 21:46:34 +00002710 for(i = 0; i < heldset->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00002711 const Mutex *lsmx = heldset->mutex[i];
sewardjff2c9232002-11-13 21:44:39 +00002712
sewardj542494b2002-11-13 22:46:13 +00002713 /* needs to be a recursive search+display */
2714 if (0 && !ismember(lsmx->lockdep, extra->mutex))
sewardjff2c9232002-11-13 21:44:39 +00002715 continue;
2716
2717 VG_(message)(Vg_UserMsg, " %p%(y last locked at",
2718 lsmx->mutexp, lsmx->mutexp);
2719 VG_(pp_ExeContext)(lsmx->location);
2720 VG_(free)(msg);
sewardj4bffb232002-11-13 21:46:34 +00002721 msg = lockset_str(NULL, lsmx->lockdep);
sewardjff2c9232002-11-13 21:44:39 +00002722 VG_(message)(Vg_UserMsg, " while depending on locks %s", msg);
2723 }
2724
2725 break;
sewardj16748af2002-10-22 04:55:54 +00002726 }
sewardjff2c9232002-11-13 21:44:39 +00002727 }
2728
2729 if (msg != buf)
2730 VG_(free)(msg);
njn25e49d8e72002-09-23 09:36:25 +00002731}
2732
2733
njn810086f2002-11-14 12:42:47 +00002734Bool SK_(recognised_suppression) ( Char* name, Supp *su )
njn25e49d8e72002-09-23 09:36:25 +00002735{
2736 if (0 == VG_(strcmp)(name, "Eraser")) {
njn810086f2002-11-14 12:42:47 +00002737 VG_(set_supp_kind)(su, EraserSupp);
njn25e49d8e72002-09-23 09:36:25 +00002738 return True;
2739 } else {
2740 return False;
2741 }
2742}
2743
2744
njn810086f2002-11-14 12:42:47 +00002745Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +00002746{
2747 /* do nothing -- no extra suppression info present. Return True to
2748 indicate nothing bad happened. */
2749 return True;
2750}
2751
2752
njn810086f2002-11-14 12:42:47 +00002753Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn25e49d8e72002-09-23 09:36:25 +00002754{
njn810086f2002-11-14 12:42:47 +00002755 sk_assert(VG_(get_supp_kind) (su) == EraserSupp);
2756 sk_assert(VG_(get_error_kind)(err) == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00002757 return True;
2758}
2759
njn43c799e2003-04-08 00:08:52 +00002760extern Char* SK_(get_error_name) ( Error* err )
2761{
2762 if (EraserErr == VG_(get_error_kind)(err)) {
2763 return "Eraser";
2764 } else {
2765 return NULL; /* Other errors types can't be suppressed */
2766 }
2767}
2768
2769extern void SK_(print_extra_suppression_info) ( Error* err )
2770{
2771 /* Do nothing */
2772}
njn25e49d8e72002-09-23 09:36:25 +00002773
sewardjdca84112002-11-13 22:29:34 +00002774static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
2775{
2776 Mutex *mutex = get_mutex((Addr)void_mutex);
2777
2778 test_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
2779}
2780
njn25e49d8e72002-09-23 09:36:25 +00002781static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
2782{
sewardj4bffb232002-11-13 21:46:34 +00002783 static const Bool debug = False;
sewardj39a4d842002-11-13 22:14:30 +00002784 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002785 const LockSet* ls;
2786
sewardj16748af2002-10-22 04:55:54 +00002787 set_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
2788
njn25e49d8e72002-09-23 09:36:25 +00002789# if DEBUG_LOCKS
sewardjdac0a442002-11-13 22:08:40 +00002790 VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002791# endif
2792
njn25e49d8e72002-09-23 09:36:25 +00002793 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
2794# if LOCKSET_SANITY > 1
2795 sanity_check_locksets("eraser_post_mutex_lock-IN");
2796# endif
2797
sewardj4bffb232002-11-13 21:46:34 +00002798 ls = lookup_LockSet_with(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002799
sewardj4bffb232002-11-13 21:46:34 +00002800 if (ls == NULL) {
2801 LockSet *newset = add_LockSet(thread_locks[tid], mutex);
2802 insert_LockSet(newset);
2803 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002804 }
sewardj4bffb232002-11-13 21:46:34 +00002805 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002806
sewardj4bffb232002-11-13 21:46:34 +00002807 if (debug || DEBUG_LOCKS)
2808 VG_(printf)("tid %u now has lockset %p\n", tid, ls);
njn25e49d8e72002-09-23 09:36:25 +00002809
sewardj4bffb232002-11-13 21:46:34 +00002810 if (debug || LOCKSET_SANITY > 1)
2811 sanity_check_locksets("eraser_post_mutex_lock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002812}
2813
2814
2815static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
2816{
sewardjc26cc252002-10-23 21:58:55 +00002817 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00002818 Int i = 0;
sewardj39a4d842002-11-13 22:14:30 +00002819 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002820 const LockSet *ls;
2821
sewardjdca84112002-11-13 22:29:34 +00002822 test_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
sewardj16748af2002-10-22 04:55:54 +00002823 set_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
2824
sewardjdac0a442002-11-13 22:08:40 +00002825 if (!ismember(thread_locks[tid], mutex))
2826 return;
2827
sewardjc26cc252002-10-23 21:58:55 +00002828 if (debug || DEBUG_LOCKS)
2829 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002830
sewardjc26cc252002-10-23 21:58:55 +00002831 if (debug || LOCKSET_SANITY > 1)
2832 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00002833
sewardj4bffb232002-11-13 21:46:34 +00002834 ls = lookup_LockSet_without(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002835
sewardj4bffb232002-11-13 21:46:34 +00002836 if (ls == NULL) {
2837 LockSet *newset = remove_LockSet(thread_locks[tid], mutex);
2838 insert_LockSet(newset);
2839 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002840 }
2841
2842 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00002843 if (debug || DEBUG_LOCKS)
sewardj4bffb232002-11-13 21:46:34 +00002844 VG_(printf)("tid %u reverts from %p to lockset %p\n",
sewardjc26cc252002-10-23 21:58:55 +00002845 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00002846
sewardj4bffb232002-11-13 21:46:34 +00002847 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002848
sewardjc26cc252002-10-23 21:58:55 +00002849 if (debug || LOCKSET_SANITY > 1)
2850 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002851}
2852
2853
2854/* ---------------------------------------------------------------------
2855 Checking memory reads and writes
2856 ------------------------------------------------------------------ */
2857
2858/* Behaviour on reads and writes:
2859 *
2860 * VIR EXCL SHAR SH_MOD
2861 * ----------------------------------------------------------------
2862 * rd/wr, 1st thread | - EXCL - -
2863 * rd, new thread | - SHAR - -
2864 * wr, new thread | - SH_MOD - -
2865 * rd | error! - SHAR SH_MOD
2866 * wr | EXCL - SH_MOD SH_MOD
2867 * ----------------------------------------------------------------
2868 */
2869
sewardj8fac99a2002-11-13 22:31:26 +00002870static inline
njn25e49d8e72002-09-23 09:36:25 +00002871void dump_around_a(Addr a)
2872{
2873 UInt i;
2874 shadow_word* sword;
2875 VG_(printf)("NEARBY:\n");
2876 for (i = a - 12; i <= a + 12; i += 4) {
2877 sword = get_sword_addr(i);
2878 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
2879 }
2880}
njn25e49d8e72002-09-23 09:36:25 +00002881
2882#if DEBUG_ACCESSES
2883 #define DEBUG_STATE(args...) \
2884 VG_(printf)("(%u) ", size), \
2885 VG_(printf)(args)
2886#else
2887 #define DEBUG_STATE(args...)
2888#endif
2889
sewardj18cd4a52002-11-13 22:37:41 +00002890static void eraser_mem_read_word(Addr a, ThreadId tid, ThreadState *tst)
2891{
sewardj72baa7a2002-12-09 23:32:58 +00002892 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002893 shadow_word prevstate;
2894 ThreadLifeSeg *tls;
2895 const LockSet *ls;
2896 Bool statechange = False;
2897
2898 static const void *const states[4] = {
2899 [Vge_Virgin] &&st_virgin,
2900 [Vge_Excl] &&st_excl,
2901 [Vge_Shar] &&st_shar,
2902 [Vge_SharMod] &&st_sharmod,
2903 };
2904
2905 tls = thread_seg[tid];
2906 sk_assert(tls != NULL && tls->tid == tid);
2907
2908 sword = get_sword_addr(a);
2909 if (sword == SEC_MAP_ACCESS) {
2910 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
2911 return;
2912 }
2913
2914 prevstate = *sword;
2915
2916 goto *states[sword->state];
2917
2918 /* This looks like reading of unitialised memory, may be legit. Eg.
2919 * calloc() zeroes its values, so untouched memory may actually be
2920 * initialised. Leave that stuff to Valgrind. */
2921 st_virgin:
2922 if (TID_INDICATING_NONVIRGIN == sword->other) {
2923 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
2924 if (DEBUG_VIRGIN_READS)
2925 dump_around_a(a);
2926 } else {
2927 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
2928 }
2929 statechange = True;
2930 *sword = SW(Vge_Excl, packTLS(tls)); /* remember exclusive owner */
2931 tls->refcount++;
2932 goto done;
2933
2934 st_excl: {
2935 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
2936
2937 if (tls == sw_tls) {
2938 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
2939 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
2940 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
2941 } else if (tlsIsDisjoint(tls, sw_tls)) {
2942 DEBUG_STATE("Read EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
2943 statechange = True;
2944 sword->other = packTLS(tls);
2945 sw_tls->refcount--;
2946 tls->refcount++;
2947 } else {
2948 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sw_tls->tid, a, tid);
2949 sw_tls->refcount--;
2950 statechange = True;
2951 *sword = SW(Vge_Shar, packLockSet(thread_locks[tid]));
2952
2953 if (DEBUG_MEM_LOCKSET_CHANGES)
2954 print_LockSet("excl read locks", unpackLockSet(sword->other));
2955 }
2956 goto done;
2957 }
2958
2959 st_shar:
2960 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
2961 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
2962 thread_locks[tid]));
2963 statechange = sword->other != prevstate.other;
2964 goto done;
2965
2966 st_sharmod:
2967 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
2968 ls = intersect(unpackLockSet(sword->other),
2969 thread_locks[tid]);
2970 sword->other = packLockSet(ls);
2971
2972 statechange = sword->other != prevstate.other;
2973
2974 if (isempty(ls)) {
2975 record_eraser_error(tst, a, False /* !is_write */, prevstate);
2976 }
2977 goto done;
2978
2979 done:
2980 if (clo_execontext != EC_None && statechange) {
2981 EC_EIP eceip;
2982
2983 if (clo_execontext == EC_Some)
sewardjc808ef52002-11-13 22:43:26 +00002984 eceip = EIP(VG_(get_EIP)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002985 else
sewardjc808ef52002-11-13 22:43:26 +00002986 eceip = EC(VG_(get_ExeContext)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002987 setExeContext(a, eceip);
2988 }
2989}
njn25e49d8e72002-09-23 09:36:25 +00002990
sewardj0f811692002-10-22 04:59:26 +00002991static void eraser_mem_read(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00002992{
sewardj0f811692002-10-22 04:59:26 +00002993 ThreadId tid;
sewardj8fac99a2002-11-13 22:31:26 +00002994 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00002995
sewardj8fac99a2002-11-13 22:31:26 +00002996 end = ROUNDUP(a+size, 4);
2997 a = ROUNDDN(a, 4);
2998
sewardj499e3de2002-11-13 22:22:25 +00002999 if (tst == NULL)
3000 tid = VG_(get_current_tid)();
3001 else
3002 tid = VG_(get_tid_from_ThreadState)(tst);
sewardj0f811692002-10-22 04:59:26 +00003003
sewardj18cd4a52002-11-13 22:37:41 +00003004
3005 for ( ; a < end; a += 4)
3006 eraser_mem_read_word(a, tid, tst);
3007}
3008
3009static void eraser_mem_write_word(Addr a, ThreadId tid, ThreadState *tst)
3010{
3011 ThreadLifeSeg *tls;
sewardj72baa7a2002-12-09 23:32:58 +00003012 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00003013 shadow_word prevstate;
3014 Bool statechange = False;
3015 static const void *const states[4] = {
3016 [Vge_Virgin] &&st_virgin,
3017 [Vge_Excl] &&st_excl,
3018 [Vge_Shar] &&st_shar,
3019 [Vge_SharMod] &&st_sharmod,
3020 };
3021
sewardjc4a810d2002-11-13 22:25:51 +00003022 tls = thread_seg[tid];
3023 sk_assert(tls != NULL && tls->tid == tid);
3024
sewardj18cd4a52002-11-13 22:37:41 +00003025 sword = get_sword_addr(a);
3026 if (sword == SEC_MAP_ACCESS) {
3027 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
3028 return;
3029 }
njn25e49d8e72002-09-23 09:36:25 +00003030
sewardj18cd4a52002-11-13 22:37:41 +00003031 prevstate = *sword;
njn25e49d8e72002-09-23 09:36:25 +00003032
sewardj18cd4a52002-11-13 22:37:41 +00003033 goto *states[sword->state];
sewardj16748af2002-10-22 04:55:54 +00003034
sewardj18cd4a52002-11-13 22:37:41 +00003035 st_virgin:
3036 if (TID_INDICATING_NONVIRGIN == sword->other)
3037 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
3038 else
3039 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
3040 statechange = True;
3041 *sword = SW(Vge_Excl, packTLS(tls));/* remember exclusive owner */
3042 tls->refcount++;
3043 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003044
sewardj18cd4a52002-11-13 22:37:41 +00003045 st_excl: {
3046 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
3047
3048 if (tls == sw_tls) {
3049 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
3050 goto done;
3051 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
3052 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
3053 goto done;
3054 } else if (tlsIsDisjoint(tls, sw_tls)) {
3055 DEBUG_STATE("Write EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
3056 sword->other = packTLS(tls);
3057 sw_tls->refcount--;
sewardjc4a810d2002-11-13 22:25:51 +00003058 tls->refcount++;
sewardj8fac99a2002-11-13 22:31:26 +00003059 goto done;
sewardj18cd4a52002-11-13 22:37:41 +00003060 } else {
3061 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sw_tls->tid, a, tid);
3062 statechange = True;
3063 sw_tls->refcount--;
3064 *sword = SW(Vge_SharMod, packLockSet(thread_locks[tid]));
3065 if(DEBUG_MEM_LOCKSET_CHANGES)
3066 print_LockSet("excl write locks", unpackLockSet(sword->other));
3067 goto SHARED_MODIFIED;
sewardjc4a810d2002-11-13 22:25:51 +00003068 }
sewardj18cd4a52002-11-13 22:37:41 +00003069 }
njn25e49d8e72002-09-23 09:36:25 +00003070
sewardj18cd4a52002-11-13 22:37:41 +00003071 st_shar:
3072 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
3073 sword->state = Vge_SharMod;
3074 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3075 thread_locks[tid]));
3076 statechange = True;
3077 goto SHARED_MODIFIED;
njn25e49d8e72002-09-23 09:36:25 +00003078
sewardj18cd4a52002-11-13 22:37:41 +00003079 st_sharmod:
3080 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
3081 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3082 thread_locks[tid]));
3083 statechange = sword->other != prevstate.other;
njn25e49d8e72002-09-23 09:36:25 +00003084
sewardj18cd4a52002-11-13 22:37:41 +00003085 SHARED_MODIFIED:
3086 if (isempty(unpackLockSet(sword->other))) {
3087 record_eraser_error(tst, a, True /* is_write */, prevstate);
3088 }
3089 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003090
sewardj18cd4a52002-11-13 22:37:41 +00003091 done:
3092 if (clo_execontext != EC_None && statechange) {
3093 EC_EIP eceip;
sewardj499e3de2002-11-13 22:22:25 +00003094
sewardj18cd4a52002-11-13 22:37:41 +00003095 if (clo_execontext == EC_Some)
sewardjc808ef52002-11-13 22:43:26 +00003096 eceip = EIP(VG_(get_EIP)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003097 else
sewardjc808ef52002-11-13 22:43:26 +00003098 eceip = EC(VG_(get_ExeContext)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003099 setExeContext(a, eceip);
njn25e49d8e72002-09-23 09:36:25 +00003100 }
3101}
3102
sewardj0f811692002-10-22 04:59:26 +00003103static void eraser_mem_write(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00003104{
sewardj0f811692002-10-22 04:59:26 +00003105 ThreadId tid;
sewardj8fac99a2002-11-13 22:31:26 +00003106 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003107
sewardj8fac99a2002-11-13 22:31:26 +00003108 end = ROUNDUP(a+size, 4);
3109 a = ROUNDDN(a, 4);
3110
sewardj499e3de2002-11-13 22:22:25 +00003111 if (tst == NULL)
3112 tid = VG_(get_current_tid)();
3113 else
3114 tid = VG_(get_tid_from_ThreadState)(tst);
3115
sewardj18cd4a52002-11-13 22:37:41 +00003116 for ( ; a < end; a += 4)
3117 eraser_mem_write_word(a, tid, tst);
njn25e49d8e72002-09-23 09:36:25 +00003118}
3119
3120#undef DEBUG_STATE
3121
sewardja5b3aec2002-10-22 05:09:36 +00003122static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00003123{
sewardja5b3aec2002-10-22 05:09:36 +00003124 eraser_mem_read(a, 1, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00003125}
3126
sewardja5b3aec2002-10-22 05:09:36 +00003127static void eraser_mem_help_read_2(Addr a)
3128{
3129 eraser_mem_read(a, 2, NULL);
3130}
3131
3132static void eraser_mem_help_read_4(Addr a)
3133{
3134 eraser_mem_read(a, 4, NULL);
3135}
3136
3137static void eraser_mem_help_read_N(Addr a, UInt size)
3138{
sewardjc26cc252002-10-23 21:58:55 +00003139 eraser_mem_read(a, size, NULL);
sewardja5b3aec2002-10-22 05:09:36 +00003140}
3141
3142static void eraser_mem_help_write_1(Addr a, UInt val)
3143{
3144 if (*(UChar *)a != val)
3145 eraser_mem_write(a, 1, NULL);
3146}
3147static void eraser_mem_help_write_2(Addr a, UInt val)
3148{
3149 if (*(UShort *)a != val)
3150 eraser_mem_write(a, 2, NULL);
3151}
3152static void eraser_mem_help_write_4(Addr a, UInt val)
3153{
3154 if (*(UInt *)a != val)
3155 eraser_mem_write(a, 4, NULL);
3156}
3157static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00003158{
sewardj0f811692002-10-22 04:59:26 +00003159 eraser_mem_write(a, size, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00003160}
njn25e49d8e72002-09-23 09:36:25 +00003161
sewardjc4a810d2002-11-13 22:25:51 +00003162static void hg_thread_create(ThreadId parent, ThreadId child)
3163{
3164 if (0)
3165 VG_(printf)("CREATE: %u creating %u\n", parent, child);
3166
3167 newTLS(child);
3168 addPriorTLS(child, parent);
3169
3170 newTLS(parent);
3171}
3172
3173static void hg_thread_join(ThreadId joiner, ThreadId joinee)
3174{
3175 if (0)
3176 VG_(printf)("JOIN: %u joining on %u\n", joiner, joinee);
3177
3178 newTLS(joiner);
3179 addPriorTLS(joiner, joinee);
3180
3181 clearTLS(joinee);
3182}
3183
sewardj7a5ebcf2002-11-13 22:42:13 +00003184static Int __BUS_HARDWARE_LOCK__;
3185
3186static void bus_lock(void)
3187{
3188 ThreadId tid = VG_(get_current_tid)();
3189 eraser_pre_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3190 eraser_post_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3191}
3192
3193static void bus_unlock(void)
3194{
3195 ThreadId tid = VG_(get_current_tid)();
3196 eraser_post_mutex_unlock(tid, &__BUS_HARDWARE_LOCK__);
3197}
3198
njn25e49d8e72002-09-23 09:36:25 +00003199/*--------------------------------------------------------------------*/
sewardj7f3ad222002-11-13 22:11:53 +00003200/*--- Client requests ---*/
3201/*--------------------------------------------------------------------*/
3202
3203Bool SK_(handle_client_request)(ThreadState *tst, UInt *args, UInt *ret)
3204{
3205 if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
3206 return False;
3207
3208 switch(args[0]) {
3209 case VG_USERREQ__HG_CLEAN_MEMORY:
3210 set_address_range_state(args[1], args[2], Vge_VirginInit);
3211 *ret = 0; /* meaningless */
3212 break;
3213
3214 case VG_USERREQ__HG_KNOWN_RACE:
3215 set_address_range_state(args[1], args[2], Vge_Error);
3216 *ret = 0; /* meaningless */
3217 break;
3218
3219 default:
3220 return False;
3221 }
3222
3223 return True;
3224}
3225
3226
3227/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003228/*--- Setup ---*/
3229/*--------------------------------------------------------------------*/
3230
njn810086f2002-11-14 12:42:47 +00003231void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00003232{
3233 Int i;
sewardj4bffb232002-11-13 21:46:34 +00003234 LockSet *empty;
njn25e49d8e72002-09-23 09:36:25 +00003235
njn810086f2002-11-14 12:42:47 +00003236 VG_(details_name) ("Helgrind");
3237 VG_(details_version) (NULL);
3238 VG_(details_description) ("a data race detector");
3239 VG_(details_copyright_author)(
3240 "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote.");
3241 VG_(details_bug_reports_to) ("njn25@cam.ac.uk");
sewardj78210aa2002-12-01 02:55:46 +00003242 VG_(details_avg_translation_sizeB) ( 115 );
njn25e49d8e72002-09-23 09:36:25 +00003243
njn810086f2002-11-14 12:42:47 +00003244 VG_(needs_core_errors)();
3245 VG_(needs_skin_errors)();
3246 VG_(needs_data_syms)();
njn810086f2002-11-14 12:42:47 +00003247 VG_(needs_client_requests)();
3248 VG_(needs_command_line_options)();
njn25e49d8e72002-09-23 09:36:25 +00003249
njn810086f2002-11-14 12:42:47 +00003250 VG_(track_new_mem_startup) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003251
njn810086f2002-11-14 12:42:47 +00003252 /* stack ones not decided until VG_(post_clo_init)() */
njn25e49d8e72002-09-23 09:36:25 +00003253
njn810086f2002-11-14 12:42:47 +00003254 VG_(track_new_mem_brk) (& make_writable);
3255 VG_(track_new_mem_mmap) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003256
njn810086f2002-11-14 12:42:47 +00003257 VG_(track_change_mem_mprotect) (& eraser_set_perms);
njn25e49d8e72002-09-23 09:36:25 +00003258
njn810086f2002-11-14 12:42:47 +00003259 VG_(track_ban_mem_stack) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003260
njn810086f2002-11-14 12:42:47 +00003261 VG_(track_die_mem_stack) (NULL);
njn810086f2002-11-14 12:42:47 +00003262 VG_(track_die_mem_stack_signal) (NULL);
3263 VG_(track_die_mem_brk) (NULL);
3264 VG_(track_die_mem_munmap) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003265
njn810086f2002-11-14 12:42:47 +00003266 VG_(track_pre_mem_read) (& eraser_pre_mem_read);
3267 VG_(track_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz);
3268 VG_(track_pre_mem_write) (& eraser_pre_mem_write);
3269 VG_(track_post_mem_write) (NULL);
3270
3271 VG_(track_post_thread_create) (& hg_thread_create);
3272 VG_(track_post_thread_join) (& hg_thread_join);
3273
3274 VG_(track_post_mutex_lock) (& eraser_pre_mutex_lock);
3275 VG_(track_post_mutex_lock) (& eraser_post_mutex_lock);
3276 VG_(track_post_mutex_unlock) (& eraser_post_mutex_unlock);
sewardjc4a810d2002-11-13 22:25:51 +00003277
sewardja5b3aec2002-10-22 05:09:36 +00003278 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
3279 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
3280 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
3281 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
3282
3283 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
3284 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
3285 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
3286 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00003287
sewardj7a5ebcf2002-11-13 22:42:13 +00003288 VG_(register_noncompact_helper)((Addr) & bus_lock);
3289 VG_(register_noncompact_helper)((Addr) & bus_unlock);
3290
sewardj4bffb232002-11-13 21:46:34 +00003291 for(i = 0; i < LOCKSET_HASH_SZ; i++)
3292 lockset_hash[i] = NULL;
3293
3294 empty = alloc_LockSet(0);
3295 insert_LockSet(empty);
3296 emptyset = empty;
3297
sewardjc4a810d2002-11-13 22:25:51 +00003298 /* Init lock table and thread segments */
3299 for (i = 0; i < VG_N_THREADS; i++) {
sewardjdac0a442002-11-13 22:08:40 +00003300 thread_locks[i] = empty;
njn25e49d8e72002-09-23 09:36:25 +00003301
sewardjc4a810d2002-11-13 22:25:51 +00003302 newTLS(i);
3303 }
3304
njn25e49d8e72002-09-23 09:36:25 +00003305 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00003306 hg_malloc_list = VG_(HT_construct)();
njn25e49d8e72002-09-23 09:36:25 +00003307}
3308
sewardjf6374322002-11-13 22:35:55 +00003309static Bool match_Bool(Char *arg, Char *argstr, Bool *ret)
3310{
3311 Int len = VG_(strlen)(argstr);
3312
3313 if (VG_(strncmp)(arg, argstr, len) == 0) {
3314 if (VG_(strcmp)(arg+len, "yes") == 0) {
3315 *ret = True;
3316 return True;
3317 } else if (VG_(strcmp)(arg+len, "no") == 0) {
3318 *ret = False;
3319 return True;
3320 } else
3321 VG_(bad_option)(arg);
3322 }
3323 return False;
3324}
3325
sewardj406270b2002-11-13 22:18:09 +00003326static Bool match_str(Char *arg, Char *argstr, Char **ret)
3327{
3328 Int len = VG_(strlen)(argstr);
3329
3330 if (VG_(strncmp)(arg, argstr, len) == 0) {
3331 *ret = VG_(strdup)(arg+len);
3332 return True;
3333 }
3334
3335 return False;
3336}
sewardj406270b2002-11-13 22:18:09 +00003337
3338Bool SK_(process_cmd_line_option)(Char* arg)
3339{
sewardj499e3de2002-11-13 22:22:25 +00003340 Char *str;
3341
3342 if (match_str(arg, "--show-last-access=", &str)) {
3343 Bool ok = True;
3344 if (VG_(strcmp)(str, "no") == 0)
3345 clo_execontext = EC_None;
3346 else if (VG_(strcmp)(str, "some") == 0)
3347 clo_execontext = EC_Some;
3348 else if (VG_(strcmp)(str, "all") == 0)
3349 clo_execontext = EC_All;
3350 else {
3351 ok = False;
3352 VG_(bad_option)(arg);
3353 }
3354
3355 VG_(free)(str);
3356 if (ok)
3357 return True;
3358 }
3359
sewardjf6374322002-11-13 22:35:55 +00003360 if (match_Bool(arg, "--private-stacks=", &clo_priv_stacks))
3361 return True;
3362
njn3e884182003-04-15 13:03:23 +00003363 return VG_(replacement_malloc_process_cmd_line_option)(arg);
sewardj406270b2002-11-13 22:18:09 +00003364}
3365
njn3e884182003-04-15 13:03:23 +00003366void SK_(print_usage)(void)
sewardj406270b2002-11-13 22:18:09 +00003367{
njn3e884182003-04-15 13:03:23 +00003368 VG_(printf)(
sewardje11d6c82002-12-15 02:00:41 +00003369" --private-stacks=yes|no assume thread stacks are used privately [no]\n"
3370" --show-last-access=no|some|all\n"
3371" show location of last word access on error [no]\n"
njn3e884182003-04-15 13:03:23 +00003372 );
3373 VG_(replacement_malloc_print_usage)();
sewardj406270b2002-11-13 22:18:09 +00003374}
3375
njn3e884182003-04-15 13:03:23 +00003376void SK_(print_debug_usage)(void)
3377{
3378 VG_(replacement_malloc_print_debug_usage)();
3379}
njn25e49d8e72002-09-23 09:36:25 +00003380
3381void SK_(post_clo_init)(void)
3382{
njn810086f2002-11-14 12:42:47 +00003383 void (*stack_tracker)(Addr a, UInt len);
3384
sewardj499e3de2002-11-13 22:22:25 +00003385 if (clo_execontext) {
3386 execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536);
3387 VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536);
3388 }
sewardjf6374322002-11-13 22:35:55 +00003389
njn810086f2002-11-14 12:42:47 +00003390 if (clo_priv_stacks)
3391 stack_tracker = & eraser_new_mem_stack_private;
3392 else
3393 stack_tracker = & eraser_new_mem_stack;
sewardjf6374322002-11-13 22:35:55 +00003394
njn810086f2002-11-14 12:42:47 +00003395 VG_(track_new_mem_stack) (stack_tracker);
njn810086f2002-11-14 12:42:47 +00003396 VG_(track_new_mem_stack_signal) (stack_tracker);
njn25e49d8e72002-09-23 09:36:25 +00003397}
3398
3399
3400void SK_(fini)(void)
3401{
sewardjdac0a442002-11-13 22:08:40 +00003402 if (DEBUG_LOCK_TABLE) {
sewardj4bffb232002-11-13 21:46:34 +00003403 pp_all_LockSets();
sewardjdac0a442002-11-13 22:08:40 +00003404 pp_all_mutexes();
3405 }
sewardj4bffb232002-11-13 21:46:34 +00003406
3407 if (LOCKSET_SANITY)
3408 sanity_check_locksets("SK_(fini)");
3409
sewardjff2c9232002-11-13 21:44:39 +00003410 VG_(message)(Vg_UserMsg, "%u possible data races found; %u lock order problems",
3411 n_eraser_warnings, n_lockorder_warnings);
sewardjf6374322002-11-13 22:35:55 +00003412
3413 if (0)
3414 VG_(printf)("stk_ld:%u+stk_st:%u = %u nonstk_ld:%u+nonstk_st:%u = %u %u%%\n",
3415 stk_ld, stk_st, stk_ld + stk_st,
3416 nonstk_ld, nonstk_st, nonstk_ld + nonstk_st,
3417 ((stk_ld+stk_st)*100) / (stk_ld + stk_st + nonstk_ld + nonstk_st));
njn25e49d8e72002-09-23 09:36:25 +00003418}
3419
3420/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00003421/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00003422/*--------------------------------------------------------------------*/