blob: e85d15f08f810a0ca7b6ef61ad54f714476d3eb7 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Helgrind, a Valgrind skin for detecting
9 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
njn0e1b5142003-04-15 14:58:06 +000011 Copyright (C) 2002-2003 Nicholas Nethercote
njn25e49d8e72002-09-23 09:36:25 +000012 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
sewardj7f3ad222002-11-13 22:11:53 +000033#include "helgrind.h"
njn25e49d8e72002-09-23 09:36:25 +000034
njn27f1a382002-11-08 15:48:16 +000035VG_DETERMINE_INTERFACE_VERSION
njn25e49d8e72002-09-23 09:36:25 +000036
37static UInt n_eraser_warnings = 0;
sewardjff2c9232002-11-13 21:44:39 +000038static UInt n_lockorder_warnings = 0;
njn25e49d8e72002-09-23 09:36:25 +000039
40/*------------------------------------------------------------*/
41/*--- Debug guff ---*/
42/*------------------------------------------------------------*/
43
sewardje11d6c82002-12-15 02:00:41 +000044#define DEBUG_LOCK_TABLE 0 /* Print lock table at end */
njn25e49d8e72002-09-23 09:36:25 +000045
46#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
47#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
48#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
49#define DEBUG_ACCESSES 0 /* Print reads, writes */
50#define DEBUG_MEM_LOCKSET_CHANGES 0
51 /* Print when an address's lockset
52 changes; only useful with
53 DEBUG_ACCESSES */
sewardj8fac99a2002-11-13 22:31:26 +000054#define SLOW_ASSERTS 0 /* do expensive asserts */
njn25e49d8e72002-09-23 09:36:25 +000055#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
56
sewardj8fac99a2002-11-13 22:31:26 +000057#if SLOW_ASSERTS
58#define SK_ASSERT(x) sk_assert(x)
59#else
60#define SK_ASSERT(x)
61#endif
62
njn25e49d8e72002-09-23 09:36:25 +000063/* heavyweight LockSet sanity checking:
64 0 == never
65 1 == after important ops
66 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
67 */
68#define LOCKSET_SANITY 0
69
sewardj8fac99a2002-11-13 22:31:26 +000070/* Rotate an unsigned quantity left */
71#define ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x)*8)-(n))))
72
73/* round a up to the next multiple of N. N must be a power of 2 */
74#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
75
76/* Round a down to the next multiple of N. N must be a power of 2 */
77#define ROUNDDN(a, N) ((a) & ~(N-1))
njn25e49d8e72002-09-23 09:36:25 +000078
79/*------------------------------------------------------------*/
sewardjf6374322002-11-13 22:35:55 +000080/*--- Command line options ---*/
81/*------------------------------------------------------------*/
82
83static enum {
84 EC_None,
85 EC_Some,
86 EC_All
87} clo_execontext = EC_None;
88
sewardje1a39f42002-12-15 01:56:17 +000089static Bool clo_priv_stacks = False;
sewardjf6374322002-11-13 22:35:55 +000090
91/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000092/*--- Crude profiling machinery. ---*/
93/*------------------------------------------------------------*/
94
95// PPP: work out if I want this
96
97#define PROF_EVENT(x)
98#if 0
99#ifdef VG_PROFILE_MEMORY
100
101#define N_PROF_EVENTS 150
102
103static UInt event_ctr[N_PROF_EVENTS];
104
105void VGE_(done_prof_mem) ( void )
106{
107 Int i;
108 for (i = 0; i < N_PROF_EVENTS; i++) {
109 if ((i % 10) == 0)
110 VG_(printf)("\n");
111 if (event_ctr[i] > 0)
112 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
113 }
114 VG_(printf)("\n");
115}
116
117#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000118 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000119 event_ctr[ev]++; \
120 } while (False);
121
122#else
123
124//static void init_prof_mem ( void ) { }
125// void VG_(done_prof_mem) ( void ) { }
126
127#define PROF_EVENT(ev) /* */
128
129#endif /* VG_PROFILE_MEMORY */
130
131/* Event index. If just the name of the fn is given, this means the
132 number of calls to the fn. Otherwise it is the specified event.
133
134 [PPP: snip event numbers...]
135*/
136#endif /* 0 */
137
138
139/*------------------------------------------------------------*/
140/*--- Data defns. ---*/
141/*------------------------------------------------------------*/
142
njn3e884182003-04-15 13:03:23 +0000143typedef
144 struct _HG_Chunk {
145 struct _HG_Chunk* next;
146 Addr data; /* ptr to actual block */
147 UInt size; /* size requested */
148 ExeContext* where; /* where it was allocated */
149 ThreadId tid; /* allocating thread */
150 }
151 HG_Chunk;
152
njn25e49d8e72002-09-23 09:36:25 +0000153typedef enum
sewardj7f3ad222002-11-13 22:11:53 +0000154 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit, Vge_Error }
njn25e49d8e72002-09-23 09:36:25 +0000155 VgeInitStatus;
156
sewardjc808ef52002-11-13 22:43:26 +0000157
njn25e49d8e72002-09-23 09:36:25 +0000158/* Should add up to 32 to fit in one word */
159#define OTHER_BITS 30
160#define STATE_BITS 2
161
162#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
163
164/* This is for indicating that a memory block has been initialised but not
165 * really directly by a particular thread... (eg. text/data initialised
166 * automatically at startup).
167 * Must be different to virgin_word.other */
168#define TID_INDICATING_NONVIRGIN 1
169
sewardjc4a810d2002-11-13 22:25:51 +0000170/* Magic packed TLS used for error suppression; if word state is Excl
171 and tid is this, then it means all access are OK without changing
172 state and without raising any more errors */
173#define TLSP_INDICATING_ALL ((1 << OTHER_BITS) - 1)
sewardj16748af2002-10-22 04:55:54 +0000174
njn25e49d8e72002-09-23 09:36:25 +0000175/* Number of entries must fit in STATE_BITS bits */
176typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
177
sewardjc808ef52002-11-13 22:43:26 +0000178static inline const Char *pp_state(pth_state st)
179{
180 const Char *ret;
181
182 switch(st) {
183 case Vge_Virgin: ret = "virgin"; break;
184 case Vge_Excl: ret = "exclusive"; break;
185 case Vge_Shar: ret = "shared RO"; break;
186 case Vge_SharMod: ret = "shared RW"; break;
187 default: ret = "???";
188 }
189 return ret;
190}
191
njn25e49d8e72002-09-23 09:36:25 +0000192typedef
193 struct {
sewardj8fac99a2002-11-13 22:31:26 +0000194 /* gcc arranges this bitfield with state in the 2LSB and other
195 in the 30MSB, which is what we want */
njn25e49d8e72002-09-23 09:36:25 +0000196 UInt state:STATE_BITS;
sewardj8fac99a2002-11-13 22:31:26 +0000197 UInt other:OTHER_BITS;
njn25e49d8e72002-09-23 09:36:25 +0000198 } shadow_word;
199
sewardj8fac99a2002-11-13 22:31:26 +0000200#define SW(st, other) ((shadow_word) { st, other })
201
njn25e49d8e72002-09-23 09:36:25 +0000202typedef
203 struct {
204 shadow_word swords[ESEC_MAP_WORDS];
205 }
206 ESecMap;
207
208static ESecMap* primary_map[ 65536 ];
209static ESecMap distinguished_secondary_map;
210
sewardj8fac99a2002-11-13 22:31:26 +0000211static const shadow_word virgin_sword = SW(Vge_Virgin, 0);
212static const shadow_word error_sword = SW(Vge_Excl, TLSP_INDICATING_ALL);
njn25e49d8e72002-09-23 09:36:25 +0000213
214#define VGE_IS_DISTINGUISHED_SM(smap) \
215 ((smap) == &distinguished_secondary_map)
216
217#define ENSURE_MAPPABLE(addr,caller) \
218 do { \
219 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
220 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
221 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
222 } \
223 } while(0)
224
225
sewardjc808ef52002-11-13 22:43:26 +0000226/* Parallel map which contains execution contexts when words last
227 changed state (if required) */
sewardj499e3de2002-11-13 22:22:25 +0000228
sewardjc808ef52002-11-13 22:43:26 +0000229typedef struct EC_EIP {
230 union u_ec_eip {
231 Addr eip;
232 ExeContext *ec;
sewardj72baa7a2002-12-09 23:32:58 +0000233 } uu_ec_eip;
sewardjc808ef52002-11-13 22:43:26 +0000234 UInt state:STATE_BITS;
235 UInt tls:OTHER_BITS; /* packed TLS */
sewardj499e3de2002-11-13 22:22:25 +0000236} EC_EIP;
237
sewardjc808ef52002-11-13 22:43:26 +0000238#define NULL_EC_EIP ((EC_EIP){ { 0 }, 0, 0})
239
240#define EIP(eip, prev, tls) ((EC_EIP) { (union u_ec_eip)(eip), (prev).state, packTLS(tls) })
241#define EC(ec, prev, tls) ((EC_EIP) { (union u_ec_eip)(ec), (prev).state, packTLS(tls) })
242
243static inline UInt packEC(ExeContext *ec)
244{
245 SK_ASSERT(((UInt)ec & ((1 << STATE_BITS)-1)) == 0);
246 return ((UInt)ec) >> STATE_BITS;
247}
248
249static inline ExeContext *unpackEC(UInt i)
250{
251 return (ExeContext *)(i << STATE_BITS);
252}
253
254/* Lose 2 LSB of eip */
255static inline UInt packEIP(Addr eip)
256{
257 return ((UInt)eip) >> STATE_BITS;
258}
259
260static inline Addr unpackEIP(UInt i)
261{
262 return (Addr)(i << STATE_BITS);
263}
sewardj499e3de2002-11-13 22:22:25 +0000264
265typedef struct {
266 EC_EIP execontext[ESEC_MAP_WORDS];
267} ExeContextMap;
268
269static ExeContextMap** execontext_map;
270
271static inline void setExeContext(Addr a, EC_EIP ec)
272{
273 UInt idx = (a >> 16) & 0xffff;
274 UInt off = (a >> 2) & 0x3fff;
275
276 if (execontext_map[idx] == NULL) {
277 execontext_map[idx] = VG_(malloc)(sizeof(ExeContextMap));
278 VG_(memset)(execontext_map[idx], 0, sizeof(ExeContextMap));
279 }
280
281 execontext_map[idx]->execontext[off] = ec;
282}
283
284static inline EC_EIP getExeContext(Addr a)
285{
286 UInt idx = (a >> 16) & 0xffff;
287 UInt off = (a >> 2) & 0x3fff;
sewardjc808ef52002-11-13 22:43:26 +0000288 EC_EIP ec = NULL_EC_EIP;
sewardj499e3de2002-11-13 22:22:25 +0000289
290 if (execontext_map[idx] != NULL)
291 ec = execontext_map[idx]->execontext[off];
292
293 return ec;
294}
295
njn25e49d8e72002-09-23 09:36:25 +0000296/*------------------------------------------------------------*/
sewardjc4a810d2002-11-13 22:25:51 +0000297/*--- Thread lifetime segments ---*/
298/*------------------------------------------------------------*/
299
300/*
301 * This mechanism deals with the common case of a parent thread
302 * creating a structure for a child thread, and then passing ownership
303 * of the structure to that thread. It similarly copes with a child
304 * thread passing information back to another thread waiting to join
305 * on it.
306 *
307 * Each thread's lifetime can be partitioned into segments. Those
308 * segments are arranged to form an interference graph which indicates
309 * whether two thread lifetime segments can possibly be concurrent.
310 * If not, then memory with is exclusively accessed by one TLS can be
311 * passed on to another TLS without an error occuring, and without
312 * moving it from Excl state.
313 *
314 * At present this only considers thread creation and join as
315 * synchronisation events for creating new lifetime segments, but
316 * others may be possible (like mutex operations).
317 */
318
319typedef struct _ThreadLifeSeg ThreadLifeSeg;
320
321struct _ThreadLifeSeg {
322 ThreadId tid;
323 ThreadLifeSeg *prior[2]; /* Previous lifetime segments */
324 UInt refcount; /* Number of memory locations pointing here */
325 UInt mark; /* mark used for graph traversal */
326 ThreadLifeSeg *next; /* list of all TLS */
327};
328
329static ThreadLifeSeg *all_tls;
330static UInt tls_since_gc;
331#define TLS_SINCE_GC 10000
332
333/* current mark used for TLS graph traversal */
334static UInt tlsmark;
335
336static ThreadLifeSeg *thread_seg[VG_N_THREADS];
337
338
339static void tls_gc(void)
340{
341 /* XXX later. Walk through all TLSs and look for ones with 0
342 refcount and remove them from the structure and free them.
343 Could probably get rid of ThreadLifeSeg.refcount and simply use
344 mark-sweep from the shadow table. */
345 VG_(printf)("WRITEME: TLS GC\n");
346}
347
348static void newTLS(ThreadId tid)
349{
350 static const Bool debug = False;
351 ThreadLifeSeg *tls;
352
353 /* Initial NULL */
354 if (thread_seg[tid] == NULL) {
355 tls = VG_(malloc)(sizeof(*tls));
356 tls->tid = tid;
357 tls->prior[0] = tls->prior[1] = NULL;
358 tls->refcount = 0;
359 tls->mark = tlsmark-1;
360
361 tls->next = all_tls;
362 all_tls = tls;
363 tls_since_gc++;
364
365 thread_seg[tid] = tls;
366 return;
367 }
368
369 /* Previous TLS was unused, so just recycle */
370 if (thread_seg[tid]->refcount == 0) {
371 if (debug)
372 VG_(printf)("newTLS; recycling TLS %p for tid %u\n",
373 thread_seg[tid], tid);
374 return;
375 }
376
377 /* Use existing TLS for this tid as a prior for new TLS */
378 tls = VG_(malloc)(sizeof(*tls));
379 tls->tid = tid;
380 tls->prior[0] = thread_seg[tid];
381 tls->prior[1] = NULL;
382 tls->refcount = 0;
383 tls->mark = tlsmark-1;
384
385 tls->next = all_tls;
386 all_tls = tls;
387 if (++tls_since_gc > TLS_SINCE_GC) {
388 tls_gc();
389 tls_since_gc = 0;
390 }
391
392 if (debug)
393 VG_(printf)("newTLS: made new TLS %p for tid %u (prior %p(%u))\n",
394 tls, tid, tls->prior[0], tls->prior[0]->tid);
395
396 thread_seg[tid] = tls;
397}
398
399/* clear out a TLS for a thread that's died */
400static void clearTLS(ThreadId tid)
401{
402 newTLS(tid);
403
404 thread_seg[tid]->prior[0] = NULL;
405 thread_seg[tid]->prior[1] = NULL;
406}
407
408static void addPriorTLS(ThreadId tid, ThreadId prior)
409{
410 static const Bool debug = False;
411 ThreadLifeSeg *tls = thread_seg[tid];
412
413 if (debug)
414 VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
415 thread_seg[prior], prior, tls, tid);
416
417 sk_assert(thread_seg[tid] != NULL);
418 sk_assert(thread_seg[prior] != NULL);
419
420 if (tls->prior[0] == NULL)
421 tls->prior[0] = thread_seg[prior];
422 else {
423 sk_assert(tls->prior[1] == NULL);
424 tls->prior[1] = thread_seg[prior];
425 }
426}
427
428/* Return True if prior is definitely not concurrent with tls */
429static Bool tlsIsDisjoint(const ThreadLifeSeg *tls,
430 const ThreadLifeSeg *prior)
431{
432 Bool isPrior(const ThreadLifeSeg *t) {
433 if (t == NULL || t->mark == tlsmark)
434 return False;
435
436 if (t == prior)
437 return True;
438
439 ((ThreadLifeSeg *)t)->mark = tlsmark;
440
441 return isPrior(t->prior[0]) || isPrior(t->prior[1]);
442 }
443 tlsmark++; /* new traversal mark */
444
445 return isPrior(tls);
446}
447
448static inline UInt packTLS(ThreadLifeSeg *tls)
449{
sewardj8fac99a2002-11-13 22:31:26 +0000450 SK_ASSERT(((UInt)tls & ((1 << STATE_BITS)-1)) == 0);
sewardjc4a810d2002-11-13 22:25:51 +0000451 return ((UInt)tls) >> STATE_BITS;
452}
453
454static inline ThreadLifeSeg *unpackTLS(UInt i)
455{
456 return (ThreadLifeSeg *)(i << STATE_BITS);
457}
458
459/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000460/*--- Low-level support for memory tracking. ---*/
461/*------------------------------------------------------------*/
462
463/*
464 All reads and writes are recorded in the memory map, which
465 records the state of all memory in the process. The memory map is
466 organised like that for normal Valgrind, except each that everything
467 is done at word-level instead of byte-level, and each word has only
468 one word of shadow (instead of 36 bits).
469
470 As for normal Valgrind there is a distinguished secondary map. But we're
471 working at word-granularity, so it has 16k word entries instead of 64k byte
472 entries. Lookup is done as follows:
473
474 bits 31..16: primary map lookup
475 bits 15.. 2: secondary map lookup
476 bits 1.. 0: ignored
477*/
478
479
480/*------------------------------------------------------------*/
481/*--- Basic bitmap management, reading and writing. ---*/
482/*------------------------------------------------------------*/
483
484/* Allocate and initialise a secondary map, marking all words as virgin. */
485
486/* Just a value that isn't a real pointer */
487#define SEC_MAP_ACCESS (shadow_word*)0x99
488
489
490static
491ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
492{
493 ESecMap* map;
494 UInt i;
495 //PROF_EVENT(10); PPP
496
497 /* It just happens that a SecMap occupies exactly 18 pages --
498 although this isn't important, so the following assert is
499 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000500 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000501 map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller );
502
503 for (i = 0; i < ESEC_MAP_WORDS; i++)
504 map->swords[i] = virgin_sword;
505
506 return map;
507}
508
509
510/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
511 * word gets set. */
512static __inline__
513void set_sword ( Addr a, shadow_word sword )
514{
515 ESecMap* sm;
sewardjc4a810d2002-11-13 22:25:51 +0000516 shadow_word *oldsw;
njn25e49d8e72002-09-23 09:36:25 +0000517
518 //PROF_EVENT(23); PPP
519 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
520
521 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
522 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000523 sk_assert(sm != &distinguished_secondary_map);
sewardjc4a810d2002-11-13 22:25:51 +0000524 oldsw = &sm->swords[(a & 0xFFFC) >> 2];
525 if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
526 ThreadLifeSeg *tls = unpackTLS(oldsw->other);
527 tls->refcount--;
528 }
529
530 if (sword.state == Vge_Excl && sword.other != TLSP_INDICATING_ALL) {
531 ThreadLifeSeg *tls = unpackTLS(sword.other);
532 tls->refcount++;
533 }
534
njn25e49d8e72002-09-23 09:36:25 +0000535 sm->swords[(a & 0xFFFC) >> 2] = sword;
536
537 if (VGE_IS_DISTINGUISHED_SM(sm)) {
538 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
539 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000540 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000541 }
542}
543
544
545static __inline__
546shadow_word* get_sword_addr ( Addr a )
547{
548 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
549 ESecMap* sm = primary_map[a >> 16];
550 UInt sm_off = (a & 0xFFFC) >> 2;
551
552 if (VGE_IS_DISTINGUISHED_SM(sm)) {
553 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
554 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000555 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000556 return SEC_MAP_ACCESS;
557 }
558
559 //PROF_EVENT(21); PPP
560 return & (sm->swords[sm_off]);
561}
562
563
564// SSS: rename these so they're not so similar to memcheck, unless it's
565// appropriate of course
566
567static __inline__
568void init_virgin_sword(Addr a)
569{
sewardj499e3de2002-11-13 22:22:25 +0000570 if (clo_execontext != EC_None)
571 setExeContext(a, NULL_EC_EIP);
njn25e49d8e72002-09-23 09:36:25 +0000572 set_sword(a, virgin_sword);
573}
574
sewardj7f3ad222002-11-13 22:11:53 +0000575static __inline__
576void init_error_sword(Addr a)
577{
578 set_sword(a, error_sword);
579}
njn25e49d8e72002-09-23 09:36:25 +0000580
njn25e49d8e72002-09-23 09:36:25 +0000581static __inline__
582void init_nonvirgin_sword(Addr a)
583{
584 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000585 ThreadId tid = VG_(get_current_or_recent_tid)();
sewardjc4a810d2002-11-13 22:25:51 +0000586 ThreadLifeSeg *tls;
njn25e49d8e72002-09-23 09:36:25 +0000587
sewardjb52a1b02002-10-23 21:38:22 +0000588 sk_assert(tid != VG_INVALID_THREADID);
sewardjc4a810d2002-11-13 22:25:51 +0000589 tls = thread_seg[tid];
590
sewardj8fac99a2002-11-13 22:31:26 +0000591 sword = SW(Vge_Excl, packTLS(tls));
njn25e49d8e72002-09-23 09:36:25 +0000592 set_sword(a, sword);
593}
594
595
596/* In this case, we treat it for Eraser's sake like virgin (it hasn't
597 * been inited by a particular thread, it's just done automatically upon
598 * startup), but we mark its .state specially so it doesn't look like an
599 * uninited read. */
600static __inline__
601void init_magically_inited_sword(Addr a)
602{
603 shadow_word sword;
604
sewardjb52a1b02002-10-23 21:38:22 +0000605 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sewardj8fac99a2002-11-13 22:31:26 +0000606
607 sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
608
njn25e49d8e72002-09-23 09:36:25 +0000609 set_sword(a, virgin_sword);
610}
611
sewardjc26cc252002-10-23 21:58:55 +0000612
sewardj274c6012002-10-22 04:54:55 +0000613/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000614/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000615/*------------------------------------------------------------*/
616
sewardj39a4d842002-11-13 22:14:30 +0000617typedef struct _Mutex Mutex; /* forward decl */
sewardj4bffb232002-11-13 21:46:34 +0000618typedef struct _LockSet LockSet;
619
sewardj16748af2002-10-22 04:55:54 +0000620typedef enum MutexState {
621 MxUnknown, /* don't know */
622 MxUnlocked, /* unlocked */
623 MxLocked, /* locked */
624 MxDead /* destroyed */
625} MutexState;
626
sewardj39a4d842002-11-13 22:14:30 +0000627struct _Mutex {
sewardjdac0a442002-11-13 22:08:40 +0000628 Addr mutexp;
sewardj39a4d842002-11-13 22:14:30 +0000629 Mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000630
631 MutexState state; /* mutex state */
632 ThreadId tid; /* owner */
633 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000634
sewardj4bffb232002-11-13 21:46:34 +0000635 const LockSet *lockdep; /* set of locks we depend on */
sewardjc26cc252002-10-23 21:58:55 +0000636 UInt mark; /* mark for graph traversal */
637};
sewardj16748af2002-10-22 04:55:54 +0000638
sewardj39a4d842002-11-13 22:14:30 +0000639static inline Int mutex_cmp(const Mutex *a, const Mutex *b)
sewardj4bffb232002-11-13 21:46:34 +0000640{
sewardjdac0a442002-11-13 22:08:40 +0000641 return a->mutexp - b->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000642}
njn25e49d8e72002-09-23 09:36:25 +0000643
sewardj274c6012002-10-22 04:54:55 +0000644struct _LockSet {
sewardj4bffb232002-11-13 21:46:34 +0000645 UInt setsize; /* number of members */
646 UInt hash; /* hash code */
647 LockSet *next; /* next in hash chain */
sewardj39a4d842002-11-13 22:14:30 +0000648 const Mutex *mutex[0]; /* locks */
sewardj274c6012002-10-22 04:54:55 +0000649};
sewardj4bffb232002-11-13 21:46:34 +0000650
651static const LockSet *emptyset;
njn25e49d8e72002-09-23 09:36:25 +0000652
653/* Each one is an index into the lockset table. */
sewardj4bffb232002-11-13 21:46:34 +0000654static const LockSet *thread_locks[VG_N_THREADS];
njn25e49d8e72002-09-23 09:36:25 +0000655
sewardjdac0a442002-11-13 22:08:40 +0000656#define LOCKSET_HASH_SZ 1021
njn25e49d8e72002-09-23 09:36:25 +0000657
sewardj4bffb232002-11-13 21:46:34 +0000658static LockSet *lockset_hash[LOCKSET_HASH_SZ];
njn25e49d8e72002-09-23 09:36:25 +0000659
sewardj4bffb232002-11-13 21:46:34 +0000660/* Pack and unpack a LockSet pointer into shadow_word.other */
sewardj8fac99a2002-11-13 22:31:26 +0000661static inline UInt packLockSet(const LockSet *p)
njn25e49d8e72002-09-23 09:36:25 +0000662{
sewardj4bffb232002-11-13 21:46:34 +0000663 UInt id;
664
sewardj8fac99a2002-11-13 22:31:26 +0000665 SK_ASSERT(((UInt)p & ((1 << STATE_BITS)-1)) == 0);
sewardj4bffb232002-11-13 21:46:34 +0000666 id = ((UInt)p) >> STATE_BITS;
667
668 return id;
njn25e49d8e72002-09-23 09:36:25 +0000669}
670
sewardj8fac99a2002-11-13 22:31:26 +0000671static inline const LockSet *unpackLockSet(UInt id)
njn25e49d8e72002-09-23 09:36:25 +0000672{
sewardj4bffb232002-11-13 21:46:34 +0000673 return (LockSet *)(id << STATE_BITS);
njn25e49d8e72002-09-23 09:36:25 +0000674}
675
njn25e49d8e72002-09-23 09:36:25 +0000676static
sewardj4bffb232002-11-13 21:46:34 +0000677void pp_LockSet(const LockSet* p)
njn25e49d8e72002-09-23 09:36:25 +0000678{
sewardj4bffb232002-11-13 21:46:34 +0000679 int i;
680
njn25e49d8e72002-09-23 09:36:25 +0000681 VG_(printf)("{ ");
sewardj4bffb232002-11-13 21:46:34 +0000682 for(i = 0; i < p->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000683 const Mutex *mx = p->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000684
685 VG_(printf)("%p%(y ", mx->mutexp, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000686 }
687 VG_(printf)("}\n");
688}
689
690
sewardj4bffb232002-11-13 21:46:34 +0000691static void print_LockSet(const Char *s, const LockSet *ls)
692{
693 VG_(printf)("%s: ", s);
694 pp_LockSet(ls);
695}
696
697/* Compute the hash of a LockSet */
698static inline UInt hash_LockSet_w_wo(const LockSet *ls,
sewardj39a4d842002-11-13 22:14:30 +0000699 const Mutex *with,
700 const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000701{
702 UInt i;
sewardj8fac99a2002-11-13 22:31:26 +0000703 UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sewardj4bffb232002-11-13 21:46:34 +0000704
705 sk_assert(with == NULL || with != without);
706
707 for(i = 0; with != NULL || i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000708 const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000709
710 if (without && mutex_cmp(without, mx) == 0)
711 continue;
712
713 if (with && (mx == NULL || mutex_cmp(with, mx) < 0)) {
714 mx = with;
715 with = NULL;
716 i--;
717 }
718
sewardj8fac99a2002-11-13 22:31:26 +0000719 hash = ROTL(hash, 17);
sewardj4bffb232002-11-13 21:46:34 +0000720 hash ^= (UInt)mx->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000721 }
722
723 return hash % LOCKSET_HASH_SZ;
724}
725
sewardj39a4d842002-11-13 22:14:30 +0000726static inline UInt hash_LockSet_with(const LockSet *ls, const Mutex *with)
sewardj4bffb232002-11-13 21:46:34 +0000727{
728 UInt hash = hash_LockSet_w_wo(ls, with, NULL);
729
730 if (0)
731 VG_(printf)("hash_with %p+%p -> %d\n", ls, with->mutexp, hash);
732
733 return hash;
734}
735
sewardj39a4d842002-11-13 22:14:30 +0000736static inline UInt hash_LockSet_without(const LockSet *ls, const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000737{
738 UInt hash = hash_LockSet_w_wo(ls, NULL, without);
739
740 if (0)
741 VG_(printf)("hash_with %p-%p -> %d\n", ls, without->mutexp, hash);
742
743 return hash;
744}
745
746static inline UInt hash_LockSet(const LockSet *ls)
747{
748 UInt hash = hash_LockSet_w_wo(ls, NULL, NULL);
749
750 if (0)
751 VG_(printf)("hash %p -> %d\n", ls, hash);
752
753 return hash;
754}
755
756static
757Bool structural_eq_LockSet(const LockSet* a, const LockSet* b)
njn25e49d8e72002-09-23 09:36:25 +0000758{
759 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000760
sewardj4bffb232002-11-13 21:46:34 +0000761 if (a == b)
762 return True;
763 if (a->setsize != b->setsize)
764 return False;
njn25e49d8e72002-09-23 09:36:25 +0000765
sewardj4bffb232002-11-13 21:46:34 +0000766 for(i = 0; i < a->setsize; i++) {
767 if (mutex_cmp(a->mutex[i], b->mutex[i]) != 0)
njn25e49d8e72002-09-23 09:36:25 +0000768 return False;
njn25e49d8e72002-09-23 09:36:25 +0000769 }
770
sewardj4bffb232002-11-13 21:46:34 +0000771 return True;
njn25e49d8e72002-09-23 09:36:25 +0000772}
773
774
775/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
776 * doesn't do the insertion. Returns True if they match.
777 */
778static Bool
sewardj4bffb232002-11-13 21:46:34 +0000779weird_LockSet_equals(const LockSet* a, const LockSet* b,
sewardj39a4d842002-11-13 22:14:30 +0000780 const Mutex *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000781{
sewardjc26cc252002-10-23 21:58:55 +0000782 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +0000783 Int ia, ib;
sewardjc26cc252002-10-23 21:58:55 +0000784
njn25e49d8e72002-09-23 09:36:25 +0000785 /* Idea is to try and match each element of b against either an
786 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000787
788 if (debug) {
789 print_LockSet("weird_LockSet_equals a", a);
790 print_LockSet(" b", b);
791 VG_(printf)( " missing: %p%(y\n",
792 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000793 }
sewardjc26cc252002-10-23 21:58:55 +0000794
sewardj4bffb232002-11-13 21:46:34 +0000795 if ((a->setsize + 1) != b->setsize) {
796 if (debug)
797 VG_(printf)(" fastpath length mismatch -> 0\n");
798 return False;
799 }
800
sewardjc26cc252002-10-23 21:58:55 +0000801 /* There are three phases to this compare:
802 1 the section from the start of a up to missing_mutex
803 2 missing mutex itself
804 3 the section after missing_mutex to the end of a
805 */
806
sewardj4bffb232002-11-13 21:46:34 +0000807 ia = 0;
808 ib = 0;
809
sewardjc26cc252002-10-23 21:58:55 +0000810 /* 1: up to missing_mutex */
sewardj4bffb232002-11-13 21:46:34 +0000811 for(; ia < a->setsize && mutex_cmp(a->mutex[ia], missing_mutex) < 0; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000812 if (debug) {
813 print_LockSet(" 1:a", a);
814 print_LockSet(" 1:b", b);
815 }
sewardj4bffb232002-11-13 21:46:34 +0000816 if (ib == b->setsize || mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000817 return False;
sewardjc26cc252002-10-23 21:58:55 +0000818 }
819
820 /* 2: missing_mutex itself */
821 if (debug) {
822 VG_(printf)( " 2:missing: %p%(y\n",
823 missing_mutex->mutexp, missing_mutex->mutexp);
824 print_LockSet(" 2: b", b);
825 }
826
sewardj4bffb232002-11-13 21:46:34 +0000827 sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
sewardjc26cc252002-10-23 21:58:55 +0000828
sewardj4bffb232002-11-13 21:46:34 +0000829 if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000830 return False;
831
sewardj4bffb232002-11-13 21:46:34 +0000832 ib++;
sewardjc26cc252002-10-23 21:58:55 +0000833
834 /* 3: after missing_mutex to end */
835
sewardj4bffb232002-11-13 21:46:34 +0000836 for(; ia < a->setsize && ib < b->setsize; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000837 if (debug) {
838 print_LockSet(" 3:a", a);
839 print_LockSet(" 3:b", b);
840 }
sewardj4bffb232002-11-13 21:46:34 +0000841 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000842 return False;
sewardjc26cc252002-10-23 21:58:55 +0000843 }
844
845 if (debug)
sewardj4bffb232002-11-13 21:46:34 +0000846 VG_(printf)(" ia=%d ib=%d --> %d\n", ia, ib, ia == a->setsize && ib == b->setsize);
sewardjc26cc252002-10-23 21:58:55 +0000847
sewardj4bffb232002-11-13 21:46:34 +0000848 return ia == a->setsize && ib == b->setsize;
849}
850
851
852
853static const LockSet *lookup_LockSet(const LockSet *set)
854{
855 UInt bucket = set->hash;
856 LockSet *ret;
857
858 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
859 if (set == ret || structural_eq_LockSet(set, ret))
860 return ret;
861
862 return NULL;
863}
864
sewardj39a4d842002-11-13 22:14:30 +0000865static const LockSet *lookup_LockSet_with(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000866{
867 UInt bucket = hash_LockSet_with(set, mutex);
868 const LockSet *ret;
869
870 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
871 if (weird_LockSet_equals(set, ret, mutex))
872 return ret;
873
874 return NULL;
875}
876
sewardj39a4d842002-11-13 22:14:30 +0000877static const LockSet *lookup_LockSet_without(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000878{
879 UInt bucket = hash_LockSet_without(set, mutex);
880 const LockSet *ret;
881
882 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
883 if (weird_LockSet_equals(ret, set, mutex))
884 return ret;
885
886 return NULL;
887}
888
889static void insert_LockSet(LockSet *set)
890{
891 UInt hash = hash_LockSet(set);
892
893 set->hash = hash;
894
895 sk_assert(lookup_LockSet(set) == NULL);
896
897 set->next = lockset_hash[hash];
898 lockset_hash[hash] = set;
899}
900
901static inline
902LockSet *alloc_LockSet(UInt setsize)
903{
sewardj39a4d842002-11-13 22:14:30 +0000904 LockSet *ret = VG_(malloc)(sizeof(*ret) + sizeof(Mutex *) * setsize);
sewardj4bffb232002-11-13 21:46:34 +0000905 ret->setsize = setsize;
906 return ret;
907}
908
909static inline
910void free_LockSet(LockSet *p)
911{
912 /* assert: not present in hash */
913 VG_(free)(p);
914}
915
njnb4aee052003-04-15 14:09:58 +0000916static
sewardj4bffb232002-11-13 21:46:34 +0000917void pp_all_LockSets ( void )
918{
919 Int i;
920 Int sets, buckets;
921
922 sets = buckets = 0;
923 for (i = 0; i < LOCKSET_HASH_SZ; i++) {
924 const LockSet *ls = lockset_hash[i];
925 Bool first = True;
926
sewardj4bffb232002-11-13 21:46:34 +0000927 for(; ls != NULL; ls = ls->next) {
sewardjdac0a442002-11-13 22:08:40 +0000928 if (first) {
929 buckets++;
930 VG_(printf)("[%4d] = ", i);
931 } else
932 VG_(printf)(" ");
933
sewardj4bffb232002-11-13 21:46:34 +0000934 sets++;
935 first = False;
936 pp_LockSet(ls);
937 }
938 }
939
940 VG_(printf)("%d distinct LockSets in %d buckets\n", sets, buckets);
941}
942
943static inline Bool isempty(const LockSet *ls)
944{
945 return ls == NULL || ls->setsize == 0;
946}
947
sewardj39a4d842002-11-13 22:14:30 +0000948static Bool ismember(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +0000949{
950 Int i;
951
952 /* XXX use binary search */
953 for(i = 0; i < ls->setsize; i++)
954 if (mutex_cmp(mx, ls->mutex[i]) == 0)
955 return True;
956
957 return False;
958}
959
960/* Check invariants:
961 - all locksets are unique
962 - each set is an array in strictly increasing order of mutex addr
963*/
964static
965void sanity_check_locksets ( const Char* caller )
966{
967 Int i;
968 const Char *badness;
969 LockSet *ls;
970
971 for(i = 0; i < LOCKSET_HASH_SZ; i++) {
972
973 for(ls = lockset_hash[i]; ls != NULL; ls = ls->next) {
sewardj39a4d842002-11-13 22:14:30 +0000974 const Mutex *prev;
sewardj4bffb232002-11-13 21:46:34 +0000975 Int j;
976
977 if (hash_LockSet(ls) != ls->hash) {
978 badness = "mismatched hash";
979 goto bad;
980 }
981 if (ls->hash != i) {
982 badness = "wrong bucket";
983 goto bad;
984 }
985 if (lookup_LockSet(ls) != ls) {
986 badness = "non-unique set";
987 goto bad;
988 }
989
990 prev = ls->mutex[0];
991 for(j = 1; j < ls->setsize; j++) {
992 if (mutex_cmp(prev, ls->mutex[j]) >= 0) {
993 badness = "mutexes out of order";
994 goto bad;
995 }
996 }
997 }
998 }
999 return;
1000
1001 bad:
1002 VG_(printf)("sanity_check_locksets: "
1003 "i = %d, ls=%p badness = %s, caller = %s\n",
1004 i, ls, badness, caller);
1005 pp_all_LockSets();
1006 VG_(skin_panic)("sanity_check_locksets");
1007}
1008
1009static
sewardj39a4d842002-11-13 22:14:30 +00001010LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +00001011{
1012 static const Bool debug = False;
1013 LockSet *ret = NULL;
1014 Int i, j;
1015
1016 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1017 VG_(printf)("add-IN mutex %p%(y\n", mx->mutexp, mx->mutexp);
1018 print_LockSet("add-IN", ls);
1019 }
1020
1021 if (debug || LOCKSET_SANITY)
1022 sanity_check_locksets("add-IN");
1023
1024 sk_assert(!ismember(ls, mx));
1025
1026 ret = alloc_LockSet(ls->setsize+1);
1027
1028 for(i = j = 0; i < ls->setsize; i++) {
1029 if (debug)
1030 VG_(printf)("i=%d j=%d ls->mutex[i]=%p mx=%p\n",
1031 i, j, ls->mutex[i]->mutexp, mx ? mx->mutexp : 0);
1032 if (mx && mutex_cmp(mx, ls->mutex[i]) < 0) {
1033 ret->mutex[j++] = mx;
1034 mx = NULL;
1035 }
1036 ret->mutex[j++] = ls->mutex[i];
1037 }
1038
1039 /* not added in loop - must be after */
1040 if (mx)
1041 ret->mutex[j++] = mx;
1042
1043 sk_assert(j == ret->setsize);
1044
1045 if (debug || LOCKSET_SANITY) {
1046 print_LockSet("add-OUT", ret);
1047 sanity_check_locksets("add-OUT");
1048 }
1049 return ret;
1050}
1051
1052/* Builds ls with mx removed. mx should actually be in ls!
1053 (a checked assertion). Resulting set should not already
1054 exist in the table (unchecked).
1055*/
1056static
sewardj39a4d842002-11-13 22:14:30 +00001057LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
sewardj4bffb232002-11-13 21:46:34 +00001058{
1059 static const Bool debug = False;
1060 LockSet *ret = NULL;
1061 Int i, j;
1062
1063 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1064 print_LockSet("remove-IN", ls);
1065 }
1066
1067 if (debug || LOCKSET_SANITY)
1068 sanity_check_locksets("remove-IN");
1069
1070 sk_assert(ismember(ls, mx));
1071
1072 ret = alloc_LockSet(ls->setsize-1);
1073
1074 for(i = j = 0; i < ls->setsize; i++) {
1075 if (mutex_cmp(ls->mutex[i], mx) == 0)
1076 continue;
1077 ret->mutex[j++] = ls->mutex[i];
1078 }
1079
1080 sk_assert(j == ret->setsize);
1081
1082 if (debug || LOCKSET_SANITY) {
1083 print_LockSet("remove-OUT", ret);
1084 sanity_check_locksets("remove-OUT");
1085 }
1086 return ret;
njn25e49d8e72002-09-23 09:36:25 +00001087}
1088
1089
1090/* Builds the intersection, and then unbuilds it if it's already in the table.
1091 */
sewardj4bffb232002-11-13 21:46:34 +00001092static const LockSet *_intersect(const LockSet *a, const LockSet *b)
njn25e49d8e72002-09-23 09:36:25 +00001093{
sewardj4bffb232002-11-13 21:46:34 +00001094 static const Bool debug = False;
1095 Int iret;
1096 Int ia, ib;
1097 Int size;
1098 LockSet *ret;
1099 const LockSet *found;
njn25e49d8e72002-09-23 09:36:25 +00001100
sewardj4bffb232002-11-13 21:46:34 +00001101 if (debug || LOCKSET_SANITY)
1102 sanity_check_locksets("intersect-IN");
njn25e49d8e72002-09-23 09:36:25 +00001103
sewardj4bffb232002-11-13 21:46:34 +00001104 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1105 print_LockSet("intersect a", a);
1106 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +00001107 }
1108
sewardj4bffb232002-11-13 21:46:34 +00001109 /* count the size of the new set */
1110 size = 0;
1111 ia = ib = 0;
1112 for(size = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1113 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1114 size++;
1115 ia++;
1116 ib++;
1117 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1118 ia++;
1119 } else {
1120 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1121 ib++;
1122 }
njn25e49d8e72002-09-23 09:36:25 +00001123 }
1124
sewardj4bffb232002-11-13 21:46:34 +00001125 /* Build the intersection of the two sets */
1126 ret = alloc_LockSet(size);
1127 for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1128 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1129 sk_assert(iret < ret->setsize);
1130 ret->mutex[iret++] = a->mutex[ia];
1131 ia++;
1132 ib++;
1133 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1134 ia++;
1135 } else {
1136 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1137 ib++;
1138 }
1139 }
1140
1141 ret->hash = hash_LockSet(ret);
1142
njn25e49d8e72002-09-23 09:36:25 +00001143 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001144 found = lookup_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001145
sewardj4bffb232002-11-13 21:46:34 +00001146 if (found != NULL) {
1147 free_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001148 } else {
sewardj4bffb232002-11-13 21:46:34 +00001149 insert_LockSet(ret);
1150 found = ret;
njn25e49d8e72002-09-23 09:36:25 +00001151 }
1152
sewardj4bffb232002-11-13 21:46:34 +00001153 if (debug || LOCKSET_SANITY) {
1154 print_LockSet("intersect-OUT", found);
1155 sanity_check_locksets("intersect-OUT");
1156 }
njn25e49d8e72002-09-23 09:36:25 +00001157
sewardj4bffb232002-11-13 21:46:34 +00001158 return found;
njn25e49d8e72002-09-23 09:36:25 +00001159}
1160
sewardj4bffb232002-11-13 21:46:34 +00001161/* inline the fastpath */
1162static inline const LockSet *intersect(const LockSet *a, const LockSet *b)
sewardjc26cc252002-10-23 21:58:55 +00001163{
sewardj4bffb232002-11-13 21:46:34 +00001164 static const Bool debug = False;
sewardjc26cc252002-10-23 21:58:55 +00001165
1166 /* Fast case -- when the two are the same */
sewardj4bffb232002-11-13 21:46:34 +00001167 if (a == b) {
1168 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1169 print_LockSet("intersect-same fastpath", a);
sewardjc26cc252002-10-23 21:58:55 +00001170 }
sewardj4bffb232002-11-13 21:46:34 +00001171 return a;
sewardjc26cc252002-10-23 21:58:55 +00001172 }
1173
sewardj4bffb232002-11-13 21:46:34 +00001174 if (isempty(a) || isempty(b)) {
1175 if (debug)
1176 VG_(printf)("intersect empty fastpath\n");
1177 return emptyset;
1178 }
1179
1180 return _intersect(a, b);
1181}
1182
1183
1184static const LockSet *ls_union(const LockSet *a, const LockSet *b)
1185{
1186 static const Bool debug = False;
1187 Int iret;
1188 Int ia, ib;
1189 Int size;
1190 LockSet *ret;
1191 const LockSet *found;
1192
1193 if (debug || LOCKSET_SANITY)
1194 sanity_check_locksets("union-IN");
1195
1196 /* Fast case -- when the two are the same */
1197 if (a == b) {
1198 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1199 print_LockSet("union-same fastpath", a);
1200 }
1201 return a;
1202 }
1203
1204 if (isempty(a)) {
1205 if (debug)
1206 print_LockSet("union a=empty b", b);
1207 return b;
1208 }
1209 if (isempty(b)) {
1210 if (debug)
1211 print_LockSet("union b=empty a", a);
1212 return a;
1213 }
1214
1215 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
sewardjc26cc252002-10-23 21:58:55 +00001216 print_LockSet("union a", a);
1217 print_LockSet("union b", b);
1218 }
1219
sewardj4bffb232002-11-13 21:46:34 +00001220 /* count the size of the new set */
1221 for(size = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1222 Int cmp;
sewardjc26cc252002-10-23 21:58:55 +00001223
sewardj4bffb232002-11-13 21:46:34 +00001224 if ((ia < a->setsize) && (ib < b->setsize))
1225 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1226 else if (ia == a->setsize)
1227 cmp = 1;
1228 else
1229 cmp = -1;
1230
1231 if (cmp == 0) {
1232 size++;
1233 ia++;
1234 ib++;
1235 } else if (cmp < 0) {
1236 size++;
1237 ia++;
1238 } else {
1239 sk_assert(cmp > 0);
1240 size++;
1241 ib++;
1242 }
sewardjc26cc252002-10-23 21:58:55 +00001243 }
1244
sewardj4bffb232002-11-13 21:46:34 +00001245 /* Build the intersection of the two sets */
1246 ret = alloc_LockSet(size);
1247 for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1248 Int cmp;
1249 sk_assert(iret < ret->setsize);
1250
1251 if ((ia < a->setsize) && (ib < b->setsize))
1252 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1253 else if (ia == a->setsize)
1254 cmp = 1;
1255 else
1256 cmp = -1;
1257
1258 if (cmp == 0) {
1259 ret->mutex[iret++] = a->mutex[ia];
1260 ia++;
1261 ib++;
1262 } else if (cmp < 0) {
1263 ret->mutex[iret++] = a->mutex[ia];
1264 ia++;
1265 } else {
1266 sk_assert(cmp > 0);
1267 ret->mutex[iret++] = b->mutex[ib];
1268 ib++;
1269 }
1270 }
1271
1272 sk_assert(iret == ret->setsize);
1273
1274 ret->hash = hash_LockSet(ret);
1275
sewardjc26cc252002-10-23 21:58:55 +00001276 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001277 found = lookup_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001278
sewardj4bffb232002-11-13 21:46:34 +00001279 if (found != NULL) {
1280 if (debug)
1281 print_LockSet("union found existing set", found);
1282 free_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001283 } else {
sewardj4bffb232002-11-13 21:46:34 +00001284 if (debug)
1285 print_LockSet("union inserting new set", ret);
1286 insert_LockSet(ret);
1287 found = ret;
sewardjc26cc252002-10-23 21:58:55 +00001288 }
1289
sewardj4bffb232002-11-13 21:46:34 +00001290 if (debug || LOCKSET_SANITY) {
1291 print_LockSet("union-OUT", found);
sewardjc26cc252002-10-23 21:58:55 +00001292 sanity_check_locksets("union-OUT");
sewardj4bffb232002-11-13 21:46:34 +00001293 }
sewardjc26cc252002-10-23 21:58:55 +00001294
sewardj4bffb232002-11-13 21:46:34 +00001295 return found;
sewardjc26cc252002-10-23 21:58:55 +00001296}
1297
1298/*------------------------------------------------------------*/
sewardjdac0a442002-11-13 22:08:40 +00001299/*--- Implementation of mutex structure. ---*/
1300/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +00001301
1302static UInt graph_mark; /* current mark we're using for graph traversal */
1303
sewardj39a4d842002-11-13 22:14:30 +00001304static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardjc26cc252002-10-23 21:58:55 +00001305 Char *str, ExeContext *ec);
sewardj39a4d842002-11-13 22:14:30 +00001306static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00001307 const LockSet *lockset_holding,
1308 const LockSet *lockset_prev);
sewardjc26cc252002-10-23 21:58:55 +00001309
sewardj39a4d842002-11-13 22:14:30 +00001310static void set_mutex_state(Mutex *mutex, MutexState state,
sewardjdac0a442002-11-13 22:08:40 +00001311 ThreadId tid, ThreadState *tst);
1312
1313#define M_MUTEX_HASHSZ 1021
1314
sewardj39a4d842002-11-13 22:14:30 +00001315static Mutex *mutex_hash[M_MUTEX_HASHSZ];
sewardjdac0a442002-11-13 22:08:40 +00001316static UInt total_mutexes;
1317
1318static const Char *pp_MutexState(MutexState st)
1319{
1320 switch(st) {
1321 case MxLocked: return "Locked";
1322 case MxUnlocked: return "Unlocked";
1323 case MxDead: return "Dead";
1324 case MxUnknown: return "Unknown";
1325 }
1326 return "???";
1327}
1328
1329static void pp_all_mutexes()
1330{
1331 Int i;
1332 Int locks, buckets;
1333
1334 locks = buckets = 0;
1335 for(i = 0; i < M_MUTEX_HASHSZ; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001336 Mutex *mx;
sewardjdac0a442002-11-13 22:08:40 +00001337 Bool first = True;
1338
1339 for(mx = mutex_hash[i]; mx != NULL; mx = mx->next) {
1340 if (first) {
1341 buckets++;
1342 VG_(printf)("[%4d] = ", i);
1343 } else
1344 VG_(printf)(" ");
1345 locks++;
1346 first = False;
1347 VG_(printf)("%p [%8s] -> %p%(y\n",
1348 mx, pp_MutexState(mx->state), mx->mutexp, mx->mutexp);
1349 }
1350 }
1351
1352 VG_(printf)("%d locks in %d buckets (%d allocated)\n",
1353 locks, buckets, total_mutexes);
1354}
sewardjc26cc252002-10-23 21:58:55 +00001355
sewardj39a4d842002-11-13 22:14:30 +00001356/* find or create a Mutex for a program's mutex use */
1357static Mutex *get_mutex(Addr mutexp)
sewardjc26cc252002-10-23 21:58:55 +00001358{
1359 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
sewardj39a4d842002-11-13 22:14:30 +00001360 Mutex *mp;
sewardjc26cc252002-10-23 21:58:55 +00001361
1362 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
1363 if (mp->mutexp == mutexp)
1364 return mp;
1365
sewardjdac0a442002-11-13 22:08:40 +00001366 total_mutexes++;
1367
sewardjc26cc252002-10-23 21:58:55 +00001368 mp = VG_(malloc)(sizeof(*mp));
1369 mp->mutexp = mutexp;
1370 mp->next = mutex_hash[bucket];
1371 mutex_hash[bucket] = mp;
1372
1373 mp->state = MxUnknown;
1374 mp->tid = VG_INVALID_THREADID;
1375 mp->location = NULL;
1376
sewardj4bffb232002-11-13 21:46:34 +00001377 mp->lockdep = emptyset;
sewardjc26cc252002-10-23 21:58:55 +00001378 mp->mark = graph_mark - 1;
1379
1380 return mp;
1381}
1382
sewardjdac0a442002-11-13 22:08:40 +00001383/* Find all mutexes in a range of memory, and call the callback.
1384 Remove the mutex from the hash if the callback returns True (mutex
1385 structure itself is not freed, because it may be pointed to by a
1386 LockSet. */
sewardj39a4d842002-11-13 22:14:30 +00001387static void find_mutex_range(Addr start, Addr end, Bool (*action)(Mutex *))
sewardjc26cc252002-10-23 21:58:55 +00001388{
sewardjdac0a442002-11-13 22:08:40 +00001389 UInt first = start % M_MUTEX_HASHSZ;
1390 UInt last = (end+1) % M_MUTEX_HASHSZ;
1391 UInt i;
1392
1393 /* Single pass over the hash table, looking for likely hashes */
1394 for(i = first; i != last; ) {
sewardj39a4d842002-11-13 22:14:30 +00001395 Mutex *mx;
1396 Mutex **prev = &mutex_hash[i];
sewardjdac0a442002-11-13 22:08:40 +00001397
1398 for(mx = mutex_hash[i]; mx != NULL; prev = &mx->next, mx = mx->next) {
1399 if (mx->mutexp >= start && mx->mutexp < end && (*action)(mx))
1400 *prev = mx->next;
1401 }
1402
1403 if (++i == M_MUTEX_HASHSZ)
1404 i = 0;
sewardjc26cc252002-10-23 21:58:55 +00001405 }
sewardjc26cc252002-10-23 21:58:55 +00001406}
1407
1408#define MARK_LOOP (graph_mark+0)
1409#define MARK_DONE (graph_mark+1)
1410
sewardj39a4d842002-11-13 22:14:30 +00001411static Bool check_cycle(const Mutex *start, const LockSet* lockset)
sewardjc26cc252002-10-23 21:58:55 +00001412{
sewardj39a4d842002-11-13 22:14:30 +00001413 Bool check_cycle_inner(const Mutex *mutex, const LockSet *ls)
sewardjff2c9232002-11-13 21:44:39 +00001414 {
1415 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +00001416 Int i;
sewardjff2c9232002-11-13 21:44:39 +00001417
1418 if (mutex->mark == MARK_LOOP)
1419 return True; /* found cycle */
1420 if (mutex->mark == MARK_DONE)
1421 return False; /* been here before, its OK */
1422
sewardj39a4d842002-11-13 22:14:30 +00001423 ((Mutex*)mutex)->mark = MARK_LOOP;
sewardjff2c9232002-11-13 21:44:39 +00001424
1425 if (debug)
1426 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
1427 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
sewardj4bffb232002-11-13 21:46:34 +00001428 for(i = 0; i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001429 const Mutex *mx = ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +00001430
sewardjff2c9232002-11-13 21:44:39 +00001431 if (debug)
1432 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
1433 mutex->mutexp, ls,
sewardj4bffb232002-11-13 21:46:34 +00001434 mx->mutexp, mx->mutexp);
1435 if (check_cycle_inner(mx, mx->lockdep))
sewardjff2c9232002-11-13 21:44:39 +00001436 return True;
1437 }
sewardj39a4d842002-11-13 22:14:30 +00001438 ((Mutex*)mutex)->mark = MARK_DONE;
sewardjff2c9232002-11-13 21:44:39 +00001439
1440 return False;
1441 }
1442
sewardjc26cc252002-10-23 21:58:55 +00001443 graph_mark += 2; /* clear all marks */
1444
sewardj4bffb232002-11-13 21:46:34 +00001445 return check_cycle_inner(start, lockset);
sewardjc26cc252002-10-23 21:58:55 +00001446}
1447
sewardjdca84112002-11-13 22:29:34 +00001448/* test to see if a mutex state change would be problematic; this
1449 makes no changes to the mutex state. This should be called before
1450 the locking thread has actually blocked. */
1451static void test_mutex_state(Mutex *mutex, MutexState state,
1452 ThreadId tid, ThreadState *tst)
sewardjc26cc252002-10-23 21:58:55 +00001453{
1454 static const Bool debug = False;
1455
sewardjc26cc252002-10-23 21:58:55 +00001456 if (mutex->state == MxDead) {
sewardjdac0a442002-11-13 22:08:40 +00001457 Char *str;
1458
1459 switch(state) {
1460 case MxLocked: str = "lock dead mutex"; break;
1461 case MxUnlocked: str = "unlock dead mutex"; break;
1462 default: str = "operate on dead mutex"; break;
1463 }
1464
sewardjc26cc252002-10-23 21:58:55 +00001465 /* can't do anything legal to a destroyed mutex */
sewardjdac0a442002-11-13 22:08:40 +00001466 record_mutex_error(tid, mutex, str, mutex->location);
sewardjc26cc252002-10-23 21:58:55 +00001467 return;
1468 }
1469
1470 switch(state) {
1471 case MxLocked:
sewardjdca84112002-11-13 22:29:34 +00001472 sk_assert(!check_cycle(mutex, mutex->lockdep));
1473
1474 if (debug)
1475 print_LockSet("thread holding", thread_locks[tid]);
1476
1477 if (check_cycle(mutex, thread_locks[tid]))
1478 record_lockgraph_error(tid, mutex, thread_locks[tid], mutex->lockdep);
1479 else {
1480 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
1481
1482 if (debug) {
1483 VG_(printf)("giving mutex %p%(y lockdep = %p ",
1484 mutex->mutexp, mutex->mutexp, mutex->lockdep);
1485 print_LockSet("lockdep", mutex->lockdep);
1486 }
1487 }
1488 break;
1489
1490 case MxUnlocked:
1491 if (debug)
1492 print_LockSet("thread holding", thread_locks[tid]);
1493
1494 if (mutex->state != MxLocked) {
1495 record_mutex_error(tid, mutex,
1496 "unlock non-locked mutex", mutex->location);
1497 }
1498 if (mutex->tid != tid) {
1499 record_mutex_error(tid, mutex,
1500 "unlock someone else's mutex", mutex->location);
1501 }
1502 break;
1503
1504 case MxDead:
1505 break;
1506
1507 default:
1508 break;
1509 }
1510}
1511
1512/* Update a mutex state. Expects most error testing and reporting to
1513 have happened in test_mutex_state(). The assumption is that no
1514 client code is run by thread tid between test and set, either
1515 because it is blocked or test and set are called together
1516 atomically.
1517
1518 Setting state to MxDead is the exception, since that can happen as
1519 a result of any thread freeing memory; in this case set_mutex_state
1520 does all the error reporting as well.
1521*/
1522static void set_mutex_state(Mutex *mutex, MutexState state,
1523 ThreadId tid, ThreadState *tst)
1524{
1525 static const Bool debug = False;
1526
1527 if (debug)
1528 VG_(printf)("\ntid %d changing mutex (%p)->%p%(y state %s -> %s\n",
1529 tid, mutex, mutex->mutexp, mutex->mutexp,
1530 pp_MutexState(mutex->state), pp_MutexState(state));
1531
1532 if (mutex->state == MxDead) {
1533 /* can't do anything legal to a destroyed mutex */
1534 return;
1535 }
1536
1537 switch(state) {
1538 case MxLocked:
sewardj4bffb232002-11-13 21:46:34 +00001539 if (mutex->state == MxLocked) {
1540 if (mutex->tid != tid)
1541 record_mutex_error(tid, mutex, "take lock held by someone else",
1542 mutex->location);
1543 else
1544 record_mutex_error(tid, mutex, "take lock we already hold",
1545 mutex->location);
1546
1547 VG_(skin_panic)("core should have checked this\n");
1548 break;
1549 }
sewardjc26cc252002-10-23 21:58:55 +00001550
1551 sk_assert(!check_cycle(mutex, mutex->lockdep));
1552
sewardjc26cc252002-10-23 21:58:55 +00001553 mutex->tid = tid;
1554 break;
1555
1556 case MxUnlocked:
1557 if (debug)
sewardj4bffb232002-11-13 21:46:34 +00001558 print_LockSet("thread holding", thread_locks[tid]);
sewardjc26cc252002-10-23 21:58:55 +00001559
sewardjdca84112002-11-13 22:29:34 +00001560 if (mutex->state != MxLocked || mutex->tid != tid)
1561 break;
1562
sewardjc26cc252002-10-23 21:58:55 +00001563 mutex->tid = VG_INVALID_THREADID;
1564 break;
1565
sewardjdac0a442002-11-13 22:08:40 +00001566 case MxDead:
1567 if (mutex->state == MxLocked) {
1568 /* forcably remove offending lock from thread's lockset */
1569 sk_assert(ismember(thread_locks[mutex->tid], mutex));
1570 thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
1571 mutex->tid = VG_INVALID_THREADID;
1572
1573 record_mutex_error(tid, mutex,
1574 "free locked mutex", mutex->location);
1575 }
1576 break;
1577
sewardjc26cc252002-10-23 21:58:55 +00001578 default:
1579 break;
1580 }
1581
1582 mutex->location = VG_(get_ExeContext)(tst);
1583 mutex->state = state;
1584}
njn25e49d8e72002-09-23 09:36:25 +00001585
1586/*------------------------------------------------------------*/
1587/*--- Setting and checking permissions. ---*/
1588/*------------------------------------------------------------*/
1589
1590static
1591void set_address_range_state ( Addr a, UInt len /* in bytes */,
1592 VgeInitStatus status )
1593{
sewardj1806d7f2002-10-22 05:05:49 +00001594 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00001595
sewardjdac0a442002-11-13 22:08:40 +00001596 /* only clean up dead mutexes */
sewardj39a4d842002-11-13 22:14:30 +00001597 Bool cleanmx(Mutex *mx) {
sewardjdac0a442002-11-13 22:08:40 +00001598 return mx->state == MxDead;
1599 }
1600
1601
njn25e49d8e72002-09-23 09:36:25 +00001602# if DEBUG_MAKE_ACCESSES
1603 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
1604# endif
1605 //PROF_EVENT(30); PPP
1606
1607 if (len == 0)
1608 return;
1609
1610 if (len > 100 * 1000 * 1000)
1611 VG_(message)(Vg_UserMsg,
1612 "Warning: set address range state: large range %d",
1613 len);
1614
1615 VGP_PUSHCC(VgpSARP);
1616
sewardjdac0a442002-11-13 22:08:40 +00001617 /* Remove mutexes in recycled memory range from hash */
1618 find_mutex_range(a, a+len, cleanmx);
1619
njn25e49d8e72002-09-23 09:36:25 +00001620 /* Memory block may not be aligned or a whole word multiple. In neat cases,
1621 * we have to init len/4 words (len is in bytes). In nasty cases, it's
1622 * len/4+1 words. This works out which it is by aligning the block and
1623 * seeing if the end byte is in the same word as it is for the unaligned
1624 * block; if not, it's the awkward case. */
sewardj8fac99a2002-11-13 22:31:26 +00001625 end = ROUNDUP(a + len, 4);
1626 a = ROUNDDN(a, 4);
njn25e49d8e72002-09-23 09:36:25 +00001627
1628 /* Do it ... */
1629 switch (status) {
1630 case Vge_VirginInit:
1631 for ( ; a < end; a += 4) {
1632 //PROF_EVENT(31); PPP
1633 init_virgin_sword(a);
1634 }
1635 break;
1636
1637 case Vge_NonVirginInit:
1638 for ( ; a < end; a += 4) {
1639 //PROF_EVENT(31); PPP
1640 init_nonvirgin_sword(a);
1641 }
1642 break;
1643
1644 case Vge_SegmentInit:
1645 for ( ; a < end; a += 4) {
1646 //PROF_EVENT(31); PPP
1647 init_magically_inited_sword(a);
1648 }
1649 break;
sewardj7f3ad222002-11-13 22:11:53 +00001650
1651 case Vge_Error:
1652 for ( ; a < end; a += 4) {
1653 //PROF_EVENT(31); PPP
1654 init_error_sword(a);
1655 }
1656 break;
njn25e49d8e72002-09-23 09:36:25 +00001657
1658 default:
1659 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001660 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001661 }
1662
1663 /* Check that zero page and highest page have not been written to
1664 -- this could happen with buggy syscall wrappers. Today
1665 (2001-04-26) had precisely such a problem with
1666 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001667 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001668 VGP_POPCC(VgpSARP);
1669}
1670
1671
1672static void make_segment_readable ( Addr a, UInt len )
1673{
1674 //PROF_EVENT(??); PPP
1675 set_address_range_state ( a, len, Vge_SegmentInit );
1676}
1677
1678static void make_writable ( Addr a, UInt len )
1679{
1680 //PROF_EVENT(36); PPP
1681 set_address_range_state( a, len, Vge_VirginInit );
1682}
1683
1684static void make_readable ( Addr a, UInt len )
1685{
1686 //PROF_EVENT(37); PPP
sewardj499e3de2002-11-13 22:22:25 +00001687 set_address_range_state( a, len, Vge_VirginInit );
njn25e49d8e72002-09-23 09:36:25 +00001688}
1689
1690
njn25e49d8e72002-09-23 09:36:25 +00001691/* Block-copy states (needed for implementing realloc()). */
1692static void copy_address_range_state(Addr src, Addr dst, UInt len)
1693{
1694 UInt i;
1695
1696 //PROF_EVENT(40); PPP
1697 for (i = 0; i < len; i += 4) {
1698 shadow_word sword = *(get_sword_addr ( src+i ));
1699 //PROF_EVENT(41); PPP
1700 set_sword ( dst+i, sword );
1701 }
1702}
1703
1704// SSS: put these somewhere better
sewardj0f811692002-10-22 04:59:26 +00001705static void eraser_mem_read (Addr a, UInt data_size, ThreadState *tst);
1706static void eraser_mem_write(Addr a, UInt data_size, ThreadState *tst);
sewardja5b3aec2002-10-22 05:09:36 +00001707
1708#define REGPARM(x) __attribute__((regparm (x)))
1709
1710static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1711static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1712static void eraser_mem_help_read_4(Addr a) REGPARM(1);
1713static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
1714
1715static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1716static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1717static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
1718static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001719
sewardj7a5ebcf2002-11-13 22:42:13 +00001720static void bus_lock(void);
1721static void bus_unlock(void);
1722
njn25e49d8e72002-09-23 09:36:25 +00001723static
1724void eraser_pre_mem_read(CorePart part, ThreadState* tst,
1725 Char* s, UInt base, UInt size )
1726{
sewardj0f811692002-10-22 04:59:26 +00001727 eraser_mem_read(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +00001728}
1729
1730static
1731void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst,
1732 Char* s, UInt base )
1733{
sewardj0f811692002-10-22 04:59:26 +00001734 eraser_mem_read(base, VG_(strlen)((Char*)base), tst);
njn25e49d8e72002-09-23 09:36:25 +00001735}
1736
1737static
1738void eraser_pre_mem_write(CorePart part, ThreadState* tst,
1739 Char* s, UInt base, UInt size )
1740{
sewardj0f811692002-10-22 04:59:26 +00001741 eraser_mem_write(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +00001742}
1743
1744
1745
1746static
1747void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1748{
njn1f3a9092002-10-04 09:22:30 +00001749 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001750 make_segment_readable(a, len);
1751}
1752
1753
1754static
1755void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1756{
1757 if (is_inited) {
1758 make_readable(a, len);
1759 } else {
1760 make_writable(a, len);
1761 }
1762}
1763
1764static
1765void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001766 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001767{
1768 if (rr) make_readable(a, len);
1769 else if (ww) make_writable(a, len);
1770 /* else do nothing */
1771}
1772
sewardjf6374322002-11-13 22:35:55 +00001773static
1774void eraser_new_mem_stack_private(Addr a, UInt len)
1775{
1776 set_address_range_state(a, len, Vge_NonVirginInit);
1777}
1778
1779static
1780void eraser_new_mem_stack(Addr a, UInt len)
1781{
1782 set_address_range_state(a, len, Vge_VirginInit);
1783}
njn25e49d8e72002-09-23 09:36:25 +00001784
1785/*--------------------------------------------------------------*/
1786/*--- Initialise the memory audit system on program startup. ---*/
1787/*--------------------------------------------------------------*/
1788
1789static
1790void init_shadow_memory(void)
1791{
1792 Int i;
1793
1794 for (i = 0; i < ESEC_MAP_WORDS; i++)
1795 distinguished_secondary_map.swords[i] = virgin_sword;
1796
1797 /* These entries gradually get overwritten as the used address
1798 space expands. */
1799 for (i = 0; i < 65536; i++)
1800 primary_map[i] = &distinguished_secondary_map;
1801}
1802
1803
njn3e884182003-04-15 13:03:23 +00001804/*------------------------------------------------------------*/
1805/*--- malloc() et al replacements ---*/
1806/*------------------------------------------------------------*/
1807
njnb4aee052003-04-15 14:09:58 +00001808static VgHashTable hg_malloc_list = NULL;
njn3e884182003-04-15 13:03:23 +00001809
1810#define N_FREED_CHUNKS 2
1811static Int freechunkptr = 0;
1812static HG_Chunk *freechunks[N_FREED_CHUNKS];
1813
1814/* Use a small redzone (paranoia) */
1815UInt VG_(vg_malloc_redzone_szB) = 4;
1816
1817
1818/* Allocate a user-chunk of size bytes. Also allocate its shadow
1819 block, make the shadow block point at the user block. Put the
1820 shadow chunk on the appropriate list, and set all memory
1821 protections correctly. */
1822
1823static void add_HG_Chunk ( ThreadState* tst, Addr p, UInt size )
1824{
1825 HG_Chunk* hc;
1826
1827 hc = VG_(malloc)(sizeof(HG_Chunk));
1828 hc->data = p;
1829 hc->size = size;
1830 hc->where = VG_(get_ExeContext)(tst);
1831 hc->tid = VG_(get_tid_from_ThreadState)(tst);
1832
1833 VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
1834}
1835
1836/* Allocate memory and note change in memory available */
1837static __inline__
1838void* alloc_and_new_mem ( ThreadState* tst, UInt size, UInt alignment,
1839 Bool is_zeroed )
1840{
1841 Addr p;
1842
1843 p = (Addr)VG_(cli_malloc)(alignment, size);
1844 add_HG_Chunk ( tst, p, size );
1845 eraser_new_mem_heap( p, size, is_zeroed );
1846
1847 return (void*)p;
1848}
1849
1850void* SK_(malloc) ( ThreadState* tst, Int n )
1851{
1852 return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
1853}
1854
1855void* SK_(__builtin_new) ( ThreadState* tst, Int n )
1856{
1857 return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
1858}
1859
1860void* SK_(__builtin_vec_new) ( ThreadState* tst, Int n )
1861{
1862 return alloc_and_new_mem ( tst, n, VG_(clo_alignment), /*is_zeroed*/False );
1863}
1864
1865void* SK_(memalign) ( ThreadState* tst, Int align, Int n )
1866{
1867 return alloc_and_new_mem ( tst, n, align, /*is_zeroed*/False );
1868}
1869
1870void* SK_(calloc) ( ThreadState* tst, Int nmemb, Int size1 )
1871{
1872 void* p;
1873 Int size, i;
1874
1875 size = nmemb * size1;
1876
1877 p = alloc_and_new_mem ( tst, size, VG_(clo_alignment), /*is_zeroed*/True );
1878 for (i = 0; i < size; i++) /* calloc() is zeroed */
1879 ((UChar*)p)[i] = 0;
1880 return p;
1881}
1882
1883static
1884void die_and_free_mem ( ThreadState* tst, HG_Chunk* hc,
1885 HG_Chunk** prev_chunks_next_ptr )
1886{
1887 ThreadId tid = VG_(get_tid_from_ThreadState)(tst);
1888 Addr start = hc->data;
1889 Addr end = start + hc->size;
1890
1891 Bool deadmx(Mutex *mx) {
1892 if (mx->state != MxDead)
1893 set_mutex_state(mx, MxDead, tid, tst);
1894
1895 return False;
1896 }
1897
1898 /* Remove hc from the malloclist using prev_chunks_next_ptr to
1899 avoid repeating the hash table lookup. Can't remove until at least
1900 after free and free_mismatch errors are done because they use
1901 describe_addr() which looks for it in malloclist. */
1902 *prev_chunks_next_ptr = hc->next;
1903
1904 /* Record where freed */
1905 hc->where = VG_(get_ExeContext) ( tst );
1906
1907 /* maintain a small window so that the error reporting machinery
1908 knows about this memory */
1909 if (freechunks[freechunkptr] != NULL) {
1910 /* free HG_Chunk */
1911 HG_Chunk* sc1 = freechunks[freechunkptr];
1912 VG_(cli_free) ( (void*)(sc1->data) );
1913 VG_(free) ( sc1 );
1914 }
1915
1916 freechunks[freechunkptr] = hc;
1917
1918 if (++freechunkptr == N_FREED_CHUNKS)
1919 freechunkptr = 0;
1920
1921 /* mark all mutexes in range dead */
1922 find_mutex_range(start, end, deadmx);
1923}
1924
1925
1926static __inline__
1927void handle_free ( ThreadState* tst, void* p )
1928{
1929 HG_Chunk* hc;
1930 HG_Chunk** prev_chunks_next_ptr;
1931
1932 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1933 (VgHashNode***)&prev_chunks_next_ptr );
1934 if (hc == NULL) {
1935 return;
1936 }
1937 die_and_free_mem ( tst, hc, prev_chunks_next_ptr );
1938}
1939
1940void SK_(free) ( ThreadState* tst, void* p )
1941{
1942 handle_free(tst, p);
1943}
1944
1945void SK_(__builtin_delete) ( ThreadState* tst, void* p )
1946{
1947 handle_free(tst, p);
1948}
1949
1950void SK_(__builtin_vec_delete) ( ThreadState* tst, void* p )
1951{
1952 handle_free(tst, p);
1953}
1954
1955void* SK_(realloc) ( ThreadState* tst, void* p, Int new_size )
1956{
1957 HG_Chunk *hc;
1958 HG_Chunk **prev_chunks_next_ptr;
1959 UInt i;
1960
1961 /* First try and find the block. */
1962 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1963 (VgHashNode***)&prev_chunks_next_ptr );
1964
1965 if (hc == NULL) {
1966 return NULL;
1967 }
1968
1969 if (hc->size == new_size) {
1970 /* size unchanged */
1971 return p;
1972
1973 } else if (hc->size > new_size) {
1974 /* new size is smaller */
1975 hc->size = new_size;
1976 return p;
1977
1978 } else {
1979 /* new size is bigger */
1980 Addr p_new;
1981
1982 /* Get new memory */
1983 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
1984
1985 /* First half kept and copied, second half new */
1986 copy_address_range_state( (Addr)p, p_new, hc->size );
1987 eraser_new_mem_heap ( p_new+hc->size, new_size-hc->size,
1988 /*inited*/False );
1989
1990 /* Copy from old to new */
1991 for (i = 0; i < hc->size; i++)
1992 ((UChar*)p_new)[i] = ((UChar*)p)[i];
1993
1994 /* Free old memory */
1995 die_and_free_mem ( tst, hc, prev_chunks_next_ptr );
1996
1997 /* this has to be after die_and_free_mem, otherwise the
1998 former succeeds in shorting out the new block, not the
1999 old, in the case when both are on the same list. */
2000 add_HG_Chunk ( tst, p_new, new_size );
2001
2002 return (void*)p_new;
2003 }
2004}
2005
njn25e49d8e72002-09-23 09:36:25 +00002006/*--------------------------------------------------------------*/
2007/*--- Machinery to support sanity checking ---*/
2008/*--------------------------------------------------------------*/
2009
2010/* Check that nobody has spuriously claimed that the first or last 16
2011 pages (64 KB) of address space have become accessible. Failure of
2012 the following do not per se indicate an internal consistency
2013 problem, but they are so likely to that we really want to know
2014 about it if so. */
2015
2016Bool SK_(cheap_sanity_check) ( void )
2017{
sewardjd5815ec2003-04-06 12:23:27 +00002018 if (VGE_IS_DISTINGUISHED_SM(primary_map[0])
2019 /* kludge: kernel drops a page up at top of address range for
2020 magic "optimized syscalls", so we can no longer check the
2021 highest page */
2022 /* && VGE_IS_DISTINGUISHED_SM(primary_map[65535]) */
2023 )
njn25e49d8e72002-09-23 09:36:25 +00002024 return True;
2025 else
2026 return False;
2027}
2028
2029
2030Bool SK_(expensive_sanity_check)(void)
2031{
2032 Int i;
2033
2034 /* Make sure nobody changed the distinguished secondary. */
2035 for (i = 0; i < ESEC_MAP_WORDS; i++)
2036 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
2037 distinguished_secondary_map.swords[i].state != virgin_sword.state)
2038 return False;
2039
2040 return True;
2041}
2042
2043
2044/*--------------------------------------------------------------*/
2045/*--- Instrumentation ---*/
2046/*--------------------------------------------------------------*/
2047
sewardjf6374322002-11-13 22:35:55 +00002048static UInt stk_ld, nonstk_ld, stk_st, nonstk_st;
2049
njn25e49d8e72002-09-23 09:36:25 +00002050/* Create and return an instrumented version of cb_in. Free cb_in
2051 before returning. */
2052UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
2053{
2054 UCodeBlock* cb;
2055 Int i;
2056 UInstr* u_in;
2057 Int t_size = INVALID_TEMPREG;
sewardjf6374322002-11-13 22:35:55 +00002058 Int ntemps;
2059 Bool *stackref = NULL;
sewardj7a5ebcf2002-11-13 22:42:13 +00002060 Bool locked = False; /* lock prefix */
njn25e49d8e72002-09-23 09:36:25 +00002061
njn810086f2002-11-14 12:42:47 +00002062 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002063
sewardjf6374322002-11-13 22:35:55 +00002064 /* stackref[] is used for super-simple value tracking to keep note
2065 of which tempregs currently hold a value which is derived from
2066 ESP or EBP, and is therefore likely stack-relative if used as
2067 the address for LOAD or STORE. */
njn810086f2002-11-14 12:42:47 +00002068 ntemps = VG_(get_num_temps)(cb);
sewardjf6374322002-11-13 22:35:55 +00002069 stackref = VG_(malloc)(sizeof(*stackref) * ntemps);
2070 VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps);
2071
njn810086f2002-11-14 12:42:47 +00002072 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
2073 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00002074
njn25e49d8e72002-09-23 09:36:25 +00002075 switch (u_in->opcode) {
2076
2077 case NOP: case CALLM_S: case CALLM_E:
2078 break;
sewardjf6374322002-11-13 22:35:55 +00002079
sewardj7a5ebcf2002-11-13 22:42:13 +00002080 case LOCK:
2081 locked = True;
2082 uInstr0(cb, CCALL, 0);
2083 uCCall(cb, (Addr)bus_lock, 0, 0, False);
2084 break;
2085
2086 case JMP: case INCEIP:
2087 if (locked) {
2088 uInstr0(cb, CCALL, 0);
2089 uCCall(cb, (Addr)bus_unlock, 0, 0, False);
2090 }
2091 locked = False;
2092 VG_(copy_UInstr)(cb, u_in);
2093 break;
2094
sewardjf6374322002-11-13 22:35:55 +00002095 case GET:
2096 sk_assert(u_in->tag1 == ArchReg);
2097 sk_assert(u_in->tag2 == TempReg);
2098 sk_assert(u_in->val2 < ntemps);
2099
2100 stackref[u_in->val2] = (u_in->size == 4 &&
2101 (u_in->val1 == R_ESP || u_in->val1 == R_EBP));
2102 VG_(copy_UInstr)(cb, u_in);
2103 break;
2104
2105 case MOV:
2106 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2107 sk_assert(u_in->tag2 == TempReg);
2108 stackref[u_in->val2] = stackref[u_in->val1];
2109 }
2110 VG_(copy_UInstr)(cb, u_in);
2111 break;
2112
2113 case LEA1:
2114 case ADD: case SUB:
2115 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2116 sk_assert(u_in->tag2 == TempReg);
2117 stackref[u_in->val2] |= stackref[u_in->val1];
2118 }
2119 VG_(copy_UInstr)(cb, u_in);
2120 break;
njn25e49d8e72002-09-23 09:36:25 +00002121
sewardja5b3aec2002-10-22 05:09:36 +00002122 case LOAD: {
2123 void (*help)(Addr);
2124 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002125 sk_assert(u_in->tag1 == TempReg);
2126
2127 if (!clo_priv_stacks || !stackref[u_in->val1]) {
2128 nonstk_ld++;
2129
2130 switch(u_in->size) {
2131 case 1: help = eraser_mem_help_read_1; break;
2132 case 2: help = eraser_mem_help_read_2; break;
2133 case 4: help = eraser_mem_help_read_4; break;
2134 default:
2135 VG_(skin_panic)("bad size");
2136 }
sewardja5b3aec2002-10-22 05:09:36 +00002137
sewardjf6374322002-11-13 22:35:55 +00002138 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
2139 uCCall(cb, (Addr)help, 1, 1, False);
2140 } else
2141 stk_ld++;
njn25e49d8e72002-09-23 09:36:25 +00002142
sewardja5b3aec2002-10-22 05:09:36 +00002143 VG_(copy_UInstr)(cb, u_in);
2144 t_size = INVALID_TEMPREG;
2145 break;
2146 }
2147
2148 case FPU_R: {
njne427a662002-10-02 11:08:25 +00002149 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002150 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002151
2152 t_size = newTemp(cb);
2153 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2154 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00002155
sewardja5b3aec2002-10-22 05:09:36 +00002156 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2157 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
njn25e49d8e72002-09-23 09:36:25 +00002158
sewardja5b3aec2002-10-22 05:09:36 +00002159 VG_(copy_UInstr)(cb, u_in);
2160 t_size = INVALID_TEMPREG;
2161 break;
2162 }
2163
2164 case STORE: {
2165 void (*help)(Addr, UInt);
2166 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002167 sk_assert(u_in->tag2 == TempReg);
sewardja5b3aec2002-10-22 05:09:36 +00002168
sewardjf6374322002-11-13 22:35:55 +00002169 if (!clo_priv_stacks || !stackref[u_in->val2]) {
2170 nonstk_st++;
2171
2172 switch(u_in->size) {
2173 case 1: help = eraser_mem_help_write_1; break;
2174 case 2: help = eraser_mem_help_write_2; break;
2175 case 4: help = eraser_mem_help_write_4; break;
2176 default:
2177 VG_(skin_panic)("bad size");
2178 }
2179
2180 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
2181 uCCall(cb, (Addr)help, 2, 2, False);
2182 } else
2183 stk_st++;
sewardja5b3aec2002-10-22 05:09:36 +00002184
2185 VG_(copy_UInstr)(cb, u_in);
2186 t_size = INVALID_TEMPREG;
2187 break;
2188 }
2189
2190 case FPU_W: {
njne427a662002-10-02 11:08:25 +00002191 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002192 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002193
2194 t_size = newTemp(cb);
2195 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2196 uLiteral(cb, (UInt)u_in->size);
2197 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2198 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2199
2200 VG_(copy_UInstr)(cb, u_in);
2201 t_size = INVALID_TEMPREG;
2202 break;
2203 }
njn25e49d8e72002-09-23 09:36:25 +00002204
sewardj3d7c9c82003-03-26 21:08:13 +00002205 case MMX1: case MMX2: case MMX3:
2206 case MMX2_MemRd: case MMX2_MemWr:
2207 case MMX2_RegRd: case MMX2_RegWr:
2208 VG_(skin_panic)(
2209 "I don't know how to instrument MMXish stuff (yet)");
2210 break;
2211
njn25e49d8e72002-09-23 09:36:25 +00002212 default:
sewardjf6374322002-11-13 22:35:55 +00002213 /* conservative tromping */
2214 if (0 && u_in->tag1 == TempReg) /* can val1 ever be dest? */
2215 stackref[u_in->val1] = False;
2216 if (u_in->tag2 == TempReg)
2217 stackref[u_in->val2] = False;
2218 if (u_in->tag3 == TempReg)
2219 stackref[u_in->val3] = False;
njn4ba5a792002-09-30 10:23:54 +00002220 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00002221 break;
2222 }
2223 }
2224
sewardjf6374322002-11-13 22:35:55 +00002225 VG_(free)(stackref);
njn4ba5a792002-09-30 10:23:54 +00002226 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002227 return cb;
2228}
2229
2230
2231/*--------------------------------------------------------------------*/
2232/*--- Error and suppression handling ---*/
2233/*--------------------------------------------------------------------*/
2234
2235typedef
2236 enum {
2237 /* Possible data race */
2238 EraserSupp
2239 }
2240 EraserSuppKind;
2241
2242/* What kind of error it is. */
2243typedef
2244 enum {
sewardj16748af2002-10-22 04:55:54 +00002245 EraserErr, /* data-race */
2246 MutexErr, /* mutex operations */
sewardjff2c9232002-11-13 21:44:39 +00002247 LockGraphErr, /* mutex order error */
njn25e49d8e72002-09-23 09:36:25 +00002248 }
2249 EraserErrorKind;
2250
sewardj16748af2002-10-22 04:55:54 +00002251/* The classification of a faulting address. */
2252typedef
2253 enum { Undescribed, /* as-yet unclassified */
2254 Stack,
2255 Unknown, /* classification yielded nothing useful */
sewardjdac0a442002-11-13 22:08:40 +00002256 Mallocd,
2257 Freed,
sewardj16748af2002-10-22 04:55:54 +00002258 Segment
2259 }
2260 AddrKind;
2261/* Records info about a faulting address. */
2262typedef
2263 struct {
2264 /* ALL */
2265 AddrKind akind;
2266 /* Freed, Mallocd */
2267 Int blksize;
2268 /* Freed, Mallocd */
2269 Int rwoffset;
2270 /* Freed, Mallocd */
2271 ExeContext* lastchange;
2272 ThreadId lasttid;
2273 /* Stack */
2274 ThreadId stack_tid;
2275 /* Segment */
2276 const Char* filename;
2277 const Char* section;
2278 /* True if is just-below %esp -- could be a gcc bug. */
2279 Bool maybe_gcc;
2280 }
2281 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00002282
sewardj16748af2002-10-22 04:55:54 +00002283/* What kind of memory access is involved in the error? */
2284typedef
2285 enum { ReadAxs, WriteAxs, ExecAxs }
2286 AxsKind;
2287
2288/* Extra context for memory errors */
2289typedef
2290 struct {
2291 AxsKind axskind;
2292 Int size;
2293 AddrInfo addrinfo;
2294 Bool isWrite;
2295 shadow_word prevstate;
sewardjff2c9232002-11-13 21:44:39 +00002296 /* MutexErr, LockGraphErr */
sewardj39a4d842002-11-13 22:14:30 +00002297 Mutex *mutex;
sewardj499e3de2002-11-13 22:22:25 +00002298 EC_EIP lasttouched;
sewardj16748af2002-10-22 04:55:54 +00002299 ThreadId lasttid;
sewardjff2c9232002-11-13 21:44:39 +00002300 /* LockGraphErr */
sewardj4bffb232002-11-13 21:46:34 +00002301 const LockSet *held_lockset;
2302 const LockSet *prev_lockset;
sewardj16748af2002-10-22 04:55:54 +00002303 }
2304 HelgrindError;
2305
2306static __inline__
2307void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002308{
sewardj16748af2002-10-22 04:55:54 +00002309 ai->akind = Unknown;
2310 ai->blksize = 0;
2311 ai->rwoffset = 0;
2312 ai->lastchange = NULL;
2313 ai->lasttid = VG_INVALID_THREADID;
2314 ai->filename = NULL;
2315 ai->section = "???";
2316 ai->stack_tid = VG_INVALID_THREADID;
2317 ai->maybe_gcc = False;
njn25e49d8e72002-09-23 09:36:25 +00002318}
2319
sewardj16748af2002-10-22 04:55:54 +00002320static __inline__
2321void clear_HelgrindError ( HelgrindError* err_extra )
2322{
2323 err_extra->axskind = ReadAxs;
2324 err_extra->size = 0;
2325 err_extra->mutex = NULL;
sewardj499e3de2002-11-13 22:22:25 +00002326 err_extra->lasttouched= NULL_EC_EIP;
sewardj16748af2002-10-22 04:55:54 +00002327 err_extra->lasttid = VG_INVALID_THREADID;
sewardjff2c9232002-11-13 21:44:39 +00002328 err_extra->prev_lockset = 0;
2329 err_extra->held_lockset = 0;
sewardj8fac99a2002-11-13 22:31:26 +00002330 err_extra->prevstate = SW(Vge_Virgin, 0);
sewardj16748af2002-10-22 04:55:54 +00002331 clear_AddrInfo ( &err_extra->addrinfo );
2332 err_extra->isWrite = False;
2333}
2334
2335
2336
2337/* Describe an address as best you can, for error messages,
2338 putting the result in ai. */
2339
2340static void describe_addr ( Addr a, AddrInfo* ai )
2341{
njn3e884182003-04-15 13:03:23 +00002342 HG_Chunk* hc;
sewardjdac0a442002-11-13 22:08:40 +00002343 Int i;
sewardj16748af2002-10-22 04:55:54 +00002344
2345 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
2346
2347 /* Closure for searching thread stacks */
2348 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
2349 {
2350 return (stack_min <= a && a <= stack_max);
2351 }
2352 /* Closure for searching malloc'd and free'd lists */
njn3e884182003-04-15 13:03:23 +00002353 Bool addr_is_in_block(VgHashNode *node)
sewardj16748af2002-10-22 04:55:54 +00002354 {
njn3e884182003-04-15 13:03:23 +00002355 HG_Chunk* hc2 = (HG_Chunk*)node;
2356 return (hc2->data <= a && a < hc2->data + hc2->size);
sewardj16748af2002-10-22 04:55:54 +00002357 }
2358
2359 /* Search for it in segments */
2360 {
2361 const SegInfo *seg;
2362
2363 for(seg = VG_(next_seginfo)(NULL);
2364 seg != NULL;
2365 seg = VG_(next_seginfo)(seg)) {
2366 Addr base = VG_(seg_start)(seg);
2367 UInt size = VG_(seg_size)(seg);
2368 const UChar *filename = VG_(seg_filename)(seg);
2369
2370 if (a >= base && a < base+size) {
2371 ai->akind = Segment;
2372 ai->blksize = size;
2373 ai->rwoffset = a - base;
2374 ai->filename = filename;
2375
2376 switch(VG_(seg_sect_kind)(a)) {
2377 case Vg_SectText: ai->section = "text"; break;
2378 case Vg_SectData: ai->section = "data"; break;
2379 case Vg_SectBSS: ai->section = "BSS"; break;
2380 case Vg_SectGOT: ai->section = "GOT"; break;
2381 case Vg_SectPLT: ai->section = "PLT"; break;
2382 case Vg_SectUnknown:
2383 default:
2384 ai->section = "???"; break;
2385 }
2386
2387 return;
2388 }
2389 }
2390 }
2391
2392 /* Search for a currently malloc'd block which might bracket it. */
njn3e884182003-04-15 13:03:23 +00002393 hc = (HG_Chunk*)VG_(HT_first_match)(hg_malloc_list, addr_is_in_block);
2394 if (NULL != hc) {
sewardj16748af2002-10-22 04:55:54 +00002395 ai->akind = Mallocd;
njn3e884182003-04-15 13:03:23 +00002396 ai->blksize = hc->size;
2397 ai->rwoffset = (Int)a - (Int)(hc->data);
2398 ai->lastchange = hc->where;
2399 ai->lasttid = hc->tid;
sewardj16748af2002-10-22 04:55:54 +00002400 return;
2401 }
sewardjdac0a442002-11-13 22:08:40 +00002402
2403 /* Look in recently freed memory */
2404 for(i = 0; i < N_FREED_CHUNKS; i++) {
njn3e884182003-04-15 13:03:23 +00002405 hc = freechunks[i];
2406 if (hc == NULL)
sewardjdac0a442002-11-13 22:08:40 +00002407 continue;
2408
njn3e884182003-04-15 13:03:23 +00002409 if (a >= hc->data && a < hc->data + hc->size) {
sewardjdac0a442002-11-13 22:08:40 +00002410 ai->akind = Freed;
njn3e884182003-04-15 13:03:23 +00002411 ai->blksize = hc->size;
2412 ai->rwoffset = a - hc->data;
2413 ai->lastchange = hc->where;
2414 ai->lasttid = hc->tid;
sewardjdac0a442002-11-13 22:08:40 +00002415 return;
2416 }
2417 }
2418
sewardj16748af2002-10-22 04:55:54 +00002419 /* Clueless ... */
2420 ai->akind = Unknown;
2421 return;
2422}
2423
2424
njn7e614812003-04-21 22:04:03 +00002425/* Updates the copy with address info if necessary. */
2426UInt SK_(update_extra)(Error* err)
sewardj16748af2002-10-22 04:55:54 +00002427{
njn7e614812003-04-21 22:04:03 +00002428 HelgrindError* extra;
sewardj16748af2002-10-22 04:55:54 +00002429
njn7e614812003-04-21 22:04:03 +00002430 extra = (HelgrindError*)VG_(get_error_extra)(err);
2431 if (extra != NULL && Undescribed == extra->addrinfo.akind) {
2432 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2433 }
2434 return sizeof(HelgrindError);
sewardj16748af2002-10-22 04:55:54 +00002435}
2436
sewardj0f811692002-10-22 04:59:26 +00002437static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write,
2438 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00002439{
sewardjc4a810d2002-11-13 22:25:51 +00002440 shadow_word *sw;
sewardj16748af2002-10-22 04:55:54 +00002441 HelgrindError err_extra;
2442
sewardjff2c9232002-11-13 21:44:39 +00002443 n_eraser_warnings++;
2444
sewardj16748af2002-10-22 04:55:54 +00002445 clear_HelgrindError(&err_extra);
2446 err_extra.isWrite = is_write;
2447 err_extra.addrinfo.akind = Undescribed;
2448 err_extra.prevstate = prevstate;
sewardj499e3de2002-11-13 22:22:25 +00002449 if (clo_execontext)
2450 err_extra.lasttouched = getExeContext(a);
sewardj0f811692002-10-22 04:59:26 +00002451 VG_(maybe_record_error)( tst, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00002452 (is_write ? "writing" : "reading"),
2453 &err_extra);
2454
sewardjc4a810d2002-11-13 22:25:51 +00002455 sw = get_sword_addr(a);
2456 if (sw->state == Vge_Excl && sw->other != TLSP_INDICATING_ALL) {
2457 ThreadLifeSeg *tls = unpackTLS(sw->other);
2458 tls->refcount--;
2459 }
2460
sewardj7f3ad222002-11-13 22:11:53 +00002461 set_sword(a, error_sword);
sewardj16748af2002-10-22 04:55:54 +00002462}
2463
sewardj39a4d842002-11-13 22:14:30 +00002464static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardj16748af2002-10-22 04:55:54 +00002465 Char *str, ExeContext *ec)
2466{
2467 HelgrindError err_extra;
2468
2469 clear_HelgrindError(&err_extra);
2470 err_extra.addrinfo.akind = Undescribed;
2471 err_extra.mutex = mutex;
sewardjc808ef52002-11-13 22:43:26 +00002472 err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
sewardj16748af2002-10-22 04:55:54 +00002473 err_extra.lasttid = tid;
2474
2475 VG_(maybe_record_error)(VG_(get_ThreadState)(tid), MutexErr,
2476 (Addr)mutex->mutexp, str, &err_extra);
2477}
njn25e49d8e72002-09-23 09:36:25 +00002478
sewardj39a4d842002-11-13 22:14:30 +00002479static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00002480 const LockSet *lockset_holding,
2481 const LockSet *lockset_prev)
sewardjff2c9232002-11-13 21:44:39 +00002482{
2483 HelgrindError err_extra;
2484
2485 n_lockorder_warnings++;
2486
2487 clear_HelgrindError(&err_extra);
2488 err_extra.addrinfo.akind = Undescribed;
2489 err_extra.mutex = mutex;
2490
sewardjc808ef52002-11-13 22:43:26 +00002491 err_extra.lasttouched = EC(mutex->location, virgin_sword, 0);
sewardjff2c9232002-11-13 21:44:39 +00002492 err_extra.held_lockset = lockset_holding;
2493 err_extra.prev_lockset = lockset_prev;
2494
2495 VG_(maybe_record_error)(VG_(get_ThreadState)(tid), LockGraphErr,
sewardjdac0a442002-11-13 22:08:40 +00002496 mutex->mutexp, "", &err_extra);
sewardjff2c9232002-11-13 21:44:39 +00002497}
2498
njn810086f2002-11-14 12:42:47 +00002499Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
njn25e49d8e72002-09-23 09:36:25 +00002500{
njn810086f2002-11-14 12:42:47 +00002501 Char *e1s, *e2s;
sewardj16748af2002-10-22 04:55:54 +00002502
njn810086f2002-11-14 12:42:47 +00002503 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2504
2505 switch (VG_(get_error_kind)(e1)) {
sewardj16748af2002-10-22 04:55:54 +00002506 case EraserErr:
njn810086f2002-11-14 12:42:47 +00002507 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002508
2509 case MutexErr:
njn810086f2002-11-14 12:42:47 +00002510 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002511 }
2512
njn810086f2002-11-14 12:42:47 +00002513 e1s = VG_(get_error_string)(e1);
2514 e2s = VG_(get_error_string)(e2);
2515 if (e1s != e2s) return False;
2516 if (0 != VG_(strcmp)(e1s, e2s)) return False;
njn25e49d8e72002-09-23 09:36:25 +00002517 return True;
2518}
2519
sewardj16748af2002-10-22 04:55:54 +00002520static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002521{
sewardj16748af2002-10-22 04:55:54 +00002522 switch (ai->akind) {
2523 case Stack:
2524 VG_(message)(Vg_UserMsg,
2525 " Address %p is on thread %d's stack",
2526 a, ai->stack_tid);
2527 break;
2528 case Unknown:
2529 if (ai->maybe_gcc) {
2530 VG_(message)(Vg_UserMsg,
2531 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
2532 a);
2533 VG_(message)(Vg_UserMsg,
2534 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
2535 } else {
2536 VG_(message)(Vg_UserMsg,
2537 " Address %p is not stack'd, malloc'd or free'd", a);
2538 }
2539 break;
2540 case Segment:
2541 VG_(message)(Vg_UserMsg,
2542 " Address %p is in %s section of %s",
2543 a, ai->section, ai->filename);
2544 break;
sewardjdac0a442002-11-13 22:08:40 +00002545 case Mallocd:
2546 case Freed: {
sewardj16748af2002-10-22 04:55:54 +00002547 UInt delta;
2548 UChar* relative;
2549 if (ai->rwoffset < 0) {
2550 delta = (UInt)(- ai->rwoffset);
2551 relative = "before";
2552 } else if (ai->rwoffset >= ai->blksize) {
2553 delta = ai->rwoffset - ai->blksize;
2554 relative = "after";
2555 } else {
2556 delta = ai->rwoffset;
2557 relative = "inside";
2558 }
2559 VG_(message)(Vg_UserMsg,
sewardj499e3de2002-11-13 22:22:25 +00002560 " Address %p is %d bytes %s a block of size %d %s by thread %d",
sewardj16748af2002-10-22 04:55:54 +00002561 a, delta, relative,
2562 ai->blksize,
sewardjdac0a442002-11-13 22:08:40 +00002563 ai->akind == Mallocd ? "alloc'd" : "freed",
sewardj16748af2002-10-22 04:55:54 +00002564 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00002565
sewardj16748af2002-10-22 04:55:54 +00002566 VG_(pp_ExeContext)(ai->lastchange);
2567 break;
2568 }
2569 default:
2570 VG_(skin_panic)("pp_AddrInfo");
2571 }
njn25e49d8e72002-09-23 09:36:25 +00002572}
2573
sewardj4bffb232002-11-13 21:46:34 +00002574static Char *lockset_str(const Char *prefix, const LockSet *lockset)
sewardjff2c9232002-11-13 21:44:39 +00002575{
sewardjff2c9232002-11-13 21:44:39 +00002576 Char *buf, *cp;
sewardj4bffb232002-11-13 21:46:34 +00002577 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002578
sewardj4bffb232002-11-13 21:46:34 +00002579 buf = VG_(malloc)((prefix == NULL ? 0 : VG_(strlen)(prefix)) +
2580 lockset->setsize * 120 +
2581 1);
sewardjff2c9232002-11-13 21:44:39 +00002582
2583 cp = buf;
2584 if (prefix)
2585 cp += VG_(sprintf)(cp, "%s", prefix);
2586
sewardj4bffb232002-11-13 21:46:34 +00002587 for(i = 0; i < lockset->setsize; i++)
2588 cp += VG_(sprintf)(cp, "%p%(y, ", lockset->mutex[i]->mutexp,
2589 lockset->mutex[i]->mutexp);
sewardjff2c9232002-11-13 21:44:39 +00002590
sewardj4bffb232002-11-13 21:46:34 +00002591 if (lockset->setsize)
sewardjff2c9232002-11-13 21:44:39 +00002592 cp[-2] = '\0';
2593 else
2594 *cp = '\0';
2595
2596 return buf;
2597}
njn25e49d8e72002-09-23 09:36:25 +00002598
njn43c799e2003-04-08 00:08:52 +00002599void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +00002600{
njn810086f2002-11-14 12:42:47 +00002601 HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err);
sewardj16748af2002-10-22 04:55:54 +00002602 Char buf[100];
2603 Char *msg = buf;
sewardj4bffb232002-11-13 21:46:34 +00002604 const LockSet *ls;
sewardj16748af2002-10-22 04:55:54 +00002605
2606 *msg = '\0';
2607
njn810086f2002-11-14 12:42:47 +00002608 switch(VG_(get_error_kind)(err)) {
2609 case EraserErr: {
2610 Addr err_addr = VG_(get_error_address)(err);
2611
sewardj16748af2002-10-22 04:55:54 +00002612 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
njn810086f2002-11-14 12:42:47 +00002613 VG_(get_error_string)(err), err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002614 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn810086f2002-11-14 12:42:47 +00002615 pp_AddrInfo(err_addr, &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002616
2617 switch(extra->prevstate.state) {
2618 case Vge_Virgin:
2619 /* shouldn't be possible to go directly from virgin -> error */
2620 VG_(sprintf)(buf, "virgin!?");
2621 break;
2622
sewardjc4a810d2002-11-13 22:25:51 +00002623 case Vge_Excl: {
2624 ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
2625
2626 sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
2627 VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
sewardj16748af2002-10-22 04:55:54 +00002628 break;
sewardjc4a810d2002-11-13 22:25:51 +00002629 }
sewardj16748af2002-10-22 04:55:54 +00002630
2631 case Vge_Shar:
sewardjff2c9232002-11-13 21:44:39 +00002632 case Vge_SharMod:
sewardj8fac99a2002-11-13 22:31:26 +00002633 ls = unpackLockSet(extra->prevstate.other);
sewardj4bffb232002-11-13 21:46:34 +00002634
2635 if (isempty(ls)) {
sewardj16748af2002-10-22 04:55:54 +00002636 VG_(sprintf)(buf, "shared %s, no locks",
2637 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
2638 break;
2639 }
2640
sewardjff2c9232002-11-13 21:44:39 +00002641 msg = lockset_str(extra->prevstate.state == Vge_Shar ?
2642 "shared RO, locked by:" :
sewardj4bffb232002-11-13 21:46:34 +00002643 "shared RW, locked by:", ls);
sewardj16748af2002-10-22 04:55:54 +00002644
sewardj16748af2002-10-22 04:55:54 +00002645 break;
2646 }
sewardj16748af2002-10-22 04:55:54 +00002647
sewardj499e3de2002-11-13 22:22:25 +00002648 if (*msg)
sewardj16748af2002-10-22 04:55:54 +00002649 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
sewardj499e3de2002-11-13 22:22:25 +00002650
sewardj72baa7a2002-12-09 23:32:58 +00002651 if (clo_execontext == EC_Some
2652 && extra->lasttouched.uu_ec_eip.eip != 0) {
sewardj499e3de2002-11-13 22:22:25 +00002653 Char file[100];
2654 UInt line;
sewardj72baa7a2002-12-09 23:32:58 +00002655 Addr eip = extra->lasttouched.uu_ec_eip.eip;
sewardj499e3de2002-11-13 22:22:25 +00002656
sewardjc808ef52002-11-13 22:43:26 +00002657 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u",
njn810086f2002-11-14 12:42:47 +00002658 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002659 pp_state(extra->lasttouched.state),
2660 unpackTLS(extra->lasttouched.tls)->tid);
sewardj499e3de2002-11-13 22:22:25 +00002661
2662 if (VG_(get_filename_linenum)(eip, file, sizeof(file), &line)) {
2663 VG_(message)(Vg_UserMsg, " at %p: %y (%s:%u)",
2664 eip, eip, file, line);
2665 } else if (VG_(get_objname)(eip, file, sizeof(file))) {
2666 VG_(message)(Vg_UserMsg, " at %p: %y (in %s)",
2667 eip, eip, file);
2668 } else {
2669 VG_(message)(Vg_UserMsg, " at %p: %y", eip, eip);
2670 }
sewardj72baa7a2002-12-09 23:32:58 +00002671 } else if (clo_execontext == EC_All
2672 && extra->lasttouched.uu_ec_eip.ec != NULL) {
sewardjc808ef52002-11-13 22:43:26 +00002673 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u",
njn810086f2002-11-14 12:42:47 +00002674 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002675 pp_state(extra->lasttouched.state),
2676 unpackTLS(extra->lasttouched.tls)->tid);
sewardj72baa7a2002-12-09 23:32:58 +00002677 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj499e3de2002-11-13 22:22:25 +00002678 }
sewardj16748af2002-10-22 04:55:54 +00002679 break;
njn810086f2002-11-14 12:42:47 +00002680 }
sewardj16748af2002-10-22 04:55:54 +00002681
2682 case MutexErr:
sewardj499e3de2002-11-13 22:22:25 +00002683 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s",
njn810086f2002-11-14 12:42:47 +00002684 VG_(get_error_address)(err),
2685 VG_(get_error_address)(err),
2686 VG_(get_error_string)(err));
njn43c799e2003-04-08 00:08:52 +00002687 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardj72baa7a2002-12-09 23:32:58 +00002688 if (extra->lasttouched.uu_ec_eip.ec != NULL) {
sewardj499e3de2002-11-13 22:22:25 +00002689 VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid);
sewardj72baa7a2002-12-09 23:32:58 +00002690 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj16748af2002-10-22 04:55:54 +00002691 }
njn810086f2002-11-14 12:42:47 +00002692 pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002693 break;
sewardjff2c9232002-11-13 21:44:39 +00002694
2695 case LockGraphErr: {
sewardj4bffb232002-11-13 21:46:34 +00002696 const LockSet *heldset = extra->held_lockset;
njn810086f2002-11-14 12:42:47 +00002697 Addr err_addr = VG_(get_error_address)(err);
sewardj4bffb232002-11-13 21:46:34 +00002698 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002699
2700 msg = lockset_str(NULL, heldset);
2701
2702 VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order",
njn810086f2002-11-14 12:42:47 +00002703 err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002704 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardjff2c9232002-11-13 21:44:39 +00002705 VG_(message)(Vg_UserMsg, " while holding locks %s", msg);
2706
sewardj4bffb232002-11-13 21:46:34 +00002707 for(i = 0; i < heldset->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00002708 const Mutex *lsmx = heldset->mutex[i];
sewardjff2c9232002-11-13 21:44:39 +00002709
sewardj542494b2002-11-13 22:46:13 +00002710 /* needs to be a recursive search+display */
2711 if (0 && !ismember(lsmx->lockdep, extra->mutex))
sewardjff2c9232002-11-13 21:44:39 +00002712 continue;
2713
2714 VG_(message)(Vg_UserMsg, " %p%(y last locked at",
2715 lsmx->mutexp, lsmx->mutexp);
2716 VG_(pp_ExeContext)(lsmx->location);
2717 VG_(free)(msg);
sewardj4bffb232002-11-13 21:46:34 +00002718 msg = lockset_str(NULL, lsmx->lockdep);
sewardjff2c9232002-11-13 21:44:39 +00002719 VG_(message)(Vg_UserMsg, " while depending on locks %s", msg);
2720 }
2721
2722 break;
sewardj16748af2002-10-22 04:55:54 +00002723 }
sewardjff2c9232002-11-13 21:44:39 +00002724 }
2725
2726 if (msg != buf)
2727 VG_(free)(msg);
njn25e49d8e72002-09-23 09:36:25 +00002728}
2729
2730
njn810086f2002-11-14 12:42:47 +00002731Bool SK_(recognised_suppression) ( Char* name, Supp *su )
njn25e49d8e72002-09-23 09:36:25 +00002732{
2733 if (0 == VG_(strcmp)(name, "Eraser")) {
njn810086f2002-11-14 12:42:47 +00002734 VG_(set_supp_kind)(su, EraserSupp);
njn25e49d8e72002-09-23 09:36:25 +00002735 return True;
2736 } else {
2737 return False;
2738 }
2739}
2740
2741
njn810086f2002-11-14 12:42:47 +00002742Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +00002743{
2744 /* do nothing -- no extra suppression info present. Return True to
2745 indicate nothing bad happened. */
2746 return True;
2747}
2748
2749
njn810086f2002-11-14 12:42:47 +00002750Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn25e49d8e72002-09-23 09:36:25 +00002751{
njn810086f2002-11-14 12:42:47 +00002752 sk_assert(VG_(get_supp_kind) (su) == EraserSupp);
2753 sk_assert(VG_(get_error_kind)(err) == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00002754 return True;
2755}
2756
njn43c799e2003-04-08 00:08:52 +00002757extern Char* SK_(get_error_name) ( Error* err )
2758{
2759 if (EraserErr == VG_(get_error_kind)(err)) {
2760 return "Eraser";
2761 } else {
2762 return NULL; /* Other errors types can't be suppressed */
2763 }
2764}
2765
2766extern void SK_(print_extra_suppression_info) ( Error* err )
2767{
2768 /* Do nothing */
2769}
njn25e49d8e72002-09-23 09:36:25 +00002770
sewardjdca84112002-11-13 22:29:34 +00002771static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
2772{
2773 Mutex *mutex = get_mutex((Addr)void_mutex);
2774
2775 test_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
2776}
2777
njn25e49d8e72002-09-23 09:36:25 +00002778static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
2779{
sewardj4bffb232002-11-13 21:46:34 +00002780 static const Bool debug = False;
sewardj39a4d842002-11-13 22:14:30 +00002781 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002782 const LockSet* ls;
2783
sewardj16748af2002-10-22 04:55:54 +00002784 set_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
2785
njn25e49d8e72002-09-23 09:36:25 +00002786# if DEBUG_LOCKS
sewardjdac0a442002-11-13 22:08:40 +00002787 VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002788# endif
2789
njn25e49d8e72002-09-23 09:36:25 +00002790 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
2791# if LOCKSET_SANITY > 1
2792 sanity_check_locksets("eraser_post_mutex_lock-IN");
2793# endif
2794
sewardj4bffb232002-11-13 21:46:34 +00002795 ls = lookup_LockSet_with(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002796
sewardj4bffb232002-11-13 21:46:34 +00002797 if (ls == NULL) {
2798 LockSet *newset = add_LockSet(thread_locks[tid], mutex);
2799 insert_LockSet(newset);
2800 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002801 }
sewardj4bffb232002-11-13 21:46:34 +00002802 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002803
sewardj4bffb232002-11-13 21:46:34 +00002804 if (debug || DEBUG_LOCKS)
2805 VG_(printf)("tid %u now has lockset %p\n", tid, ls);
njn25e49d8e72002-09-23 09:36:25 +00002806
sewardj4bffb232002-11-13 21:46:34 +00002807 if (debug || LOCKSET_SANITY > 1)
2808 sanity_check_locksets("eraser_post_mutex_lock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002809}
2810
2811
2812static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
2813{
sewardjc26cc252002-10-23 21:58:55 +00002814 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00002815 Int i = 0;
sewardj39a4d842002-11-13 22:14:30 +00002816 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002817 const LockSet *ls;
2818
sewardjdca84112002-11-13 22:29:34 +00002819 test_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
sewardj16748af2002-10-22 04:55:54 +00002820 set_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
2821
sewardjdac0a442002-11-13 22:08:40 +00002822 if (!ismember(thread_locks[tid], mutex))
2823 return;
2824
sewardjc26cc252002-10-23 21:58:55 +00002825 if (debug || DEBUG_LOCKS)
2826 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002827
sewardjc26cc252002-10-23 21:58:55 +00002828 if (debug || LOCKSET_SANITY > 1)
2829 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00002830
sewardj4bffb232002-11-13 21:46:34 +00002831 ls = lookup_LockSet_without(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002832
sewardj4bffb232002-11-13 21:46:34 +00002833 if (ls == NULL) {
2834 LockSet *newset = remove_LockSet(thread_locks[tid], mutex);
2835 insert_LockSet(newset);
2836 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002837 }
2838
2839 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00002840 if (debug || DEBUG_LOCKS)
sewardj4bffb232002-11-13 21:46:34 +00002841 VG_(printf)("tid %u reverts from %p to lockset %p\n",
sewardjc26cc252002-10-23 21:58:55 +00002842 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00002843
sewardj4bffb232002-11-13 21:46:34 +00002844 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002845
sewardjc26cc252002-10-23 21:58:55 +00002846 if (debug || LOCKSET_SANITY > 1)
2847 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002848}
2849
2850
2851/* ---------------------------------------------------------------------
2852 Checking memory reads and writes
2853 ------------------------------------------------------------------ */
2854
2855/* Behaviour on reads and writes:
2856 *
2857 * VIR EXCL SHAR SH_MOD
2858 * ----------------------------------------------------------------
2859 * rd/wr, 1st thread | - EXCL - -
2860 * rd, new thread | - SHAR - -
2861 * wr, new thread | - SH_MOD - -
2862 * rd | error! - SHAR SH_MOD
2863 * wr | EXCL - SH_MOD SH_MOD
2864 * ----------------------------------------------------------------
2865 */
2866
sewardj8fac99a2002-11-13 22:31:26 +00002867static inline
njn25e49d8e72002-09-23 09:36:25 +00002868void dump_around_a(Addr a)
2869{
2870 UInt i;
2871 shadow_word* sword;
2872 VG_(printf)("NEARBY:\n");
2873 for (i = a - 12; i <= a + 12; i += 4) {
2874 sword = get_sword_addr(i);
2875 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
2876 }
2877}
njn25e49d8e72002-09-23 09:36:25 +00002878
2879#if DEBUG_ACCESSES
2880 #define DEBUG_STATE(args...) \
2881 VG_(printf)("(%u) ", size), \
2882 VG_(printf)(args)
2883#else
2884 #define DEBUG_STATE(args...)
2885#endif
2886
sewardj18cd4a52002-11-13 22:37:41 +00002887static void eraser_mem_read_word(Addr a, ThreadId tid, ThreadState *tst)
2888{
sewardj72baa7a2002-12-09 23:32:58 +00002889 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002890 shadow_word prevstate;
2891 ThreadLifeSeg *tls;
2892 const LockSet *ls;
2893 Bool statechange = False;
2894
2895 static const void *const states[4] = {
2896 [Vge_Virgin] &&st_virgin,
2897 [Vge_Excl] &&st_excl,
2898 [Vge_Shar] &&st_shar,
2899 [Vge_SharMod] &&st_sharmod,
2900 };
2901
2902 tls = thread_seg[tid];
2903 sk_assert(tls != NULL && tls->tid == tid);
2904
2905 sword = get_sword_addr(a);
2906 if (sword == SEC_MAP_ACCESS) {
2907 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
2908 return;
2909 }
2910
2911 prevstate = *sword;
2912
2913 goto *states[sword->state];
2914
2915 /* This looks like reading of unitialised memory, may be legit. Eg.
2916 * calloc() zeroes its values, so untouched memory may actually be
2917 * initialised. Leave that stuff to Valgrind. */
2918 st_virgin:
2919 if (TID_INDICATING_NONVIRGIN == sword->other) {
2920 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
2921 if (DEBUG_VIRGIN_READS)
2922 dump_around_a(a);
2923 } else {
2924 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
2925 }
2926 statechange = True;
2927 *sword = SW(Vge_Excl, packTLS(tls)); /* remember exclusive owner */
2928 tls->refcount++;
2929 goto done;
2930
2931 st_excl: {
2932 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
2933
2934 if (tls == sw_tls) {
2935 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
2936 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
2937 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
2938 } else if (tlsIsDisjoint(tls, sw_tls)) {
2939 DEBUG_STATE("Read EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
2940 statechange = True;
2941 sword->other = packTLS(tls);
2942 sw_tls->refcount--;
2943 tls->refcount++;
2944 } else {
2945 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sw_tls->tid, a, tid);
2946 sw_tls->refcount--;
2947 statechange = True;
2948 *sword = SW(Vge_Shar, packLockSet(thread_locks[tid]));
2949
2950 if (DEBUG_MEM_LOCKSET_CHANGES)
2951 print_LockSet("excl read locks", unpackLockSet(sword->other));
2952 }
2953 goto done;
2954 }
2955
2956 st_shar:
2957 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
2958 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
2959 thread_locks[tid]));
2960 statechange = sword->other != prevstate.other;
2961 goto done;
2962
2963 st_sharmod:
2964 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
2965 ls = intersect(unpackLockSet(sword->other),
2966 thread_locks[tid]);
2967 sword->other = packLockSet(ls);
2968
2969 statechange = sword->other != prevstate.other;
2970
2971 if (isempty(ls)) {
2972 record_eraser_error(tst, a, False /* !is_write */, prevstate);
2973 }
2974 goto done;
2975
2976 done:
2977 if (clo_execontext != EC_None && statechange) {
2978 EC_EIP eceip;
2979
2980 if (clo_execontext == EC_Some)
sewardjc808ef52002-11-13 22:43:26 +00002981 eceip = EIP(VG_(get_EIP)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002982 else
sewardjc808ef52002-11-13 22:43:26 +00002983 eceip = EC(VG_(get_ExeContext)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002984 setExeContext(a, eceip);
2985 }
2986}
njn25e49d8e72002-09-23 09:36:25 +00002987
sewardj0f811692002-10-22 04:59:26 +00002988static void eraser_mem_read(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00002989{
sewardj0f811692002-10-22 04:59:26 +00002990 ThreadId tid;
sewardj8fac99a2002-11-13 22:31:26 +00002991 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00002992
sewardj8fac99a2002-11-13 22:31:26 +00002993 end = ROUNDUP(a+size, 4);
2994 a = ROUNDDN(a, 4);
2995
sewardj499e3de2002-11-13 22:22:25 +00002996 if (tst == NULL)
2997 tid = VG_(get_current_tid)();
2998 else
2999 tid = VG_(get_tid_from_ThreadState)(tst);
sewardj0f811692002-10-22 04:59:26 +00003000
sewardj18cd4a52002-11-13 22:37:41 +00003001
3002 for ( ; a < end; a += 4)
3003 eraser_mem_read_word(a, tid, tst);
3004}
3005
3006static void eraser_mem_write_word(Addr a, ThreadId tid, ThreadState *tst)
3007{
3008 ThreadLifeSeg *tls;
sewardj72baa7a2002-12-09 23:32:58 +00003009 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00003010 shadow_word prevstate;
3011 Bool statechange = False;
3012 static const void *const states[4] = {
3013 [Vge_Virgin] &&st_virgin,
3014 [Vge_Excl] &&st_excl,
3015 [Vge_Shar] &&st_shar,
3016 [Vge_SharMod] &&st_sharmod,
3017 };
3018
sewardjc4a810d2002-11-13 22:25:51 +00003019 tls = thread_seg[tid];
3020 sk_assert(tls != NULL && tls->tid == tid);
3021
sewardj18cd4a52002-11-13 22:37:41 +00003022 sword = get_sword_addr(a);
3023 if (sword == SEC_MAP_ACCESS) {
3024 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
3025 return;
3026 }
njn25e49d8e72002-09-23 09:36:25 +00003027
sewardj18cd4a52002-11-13 22:37:41 +00003028 prevstate = *sword;
njn25e49d8e72002-09-23 09:36:25 +00003029
sewardj18cd4a52002-11-13 22:37:41 +00003030 goto *states[sword->state];
sewardj16748af2002-10-22 04:55:54 +00003031
sewardj18cd4a52002-11-13 22:37:41 +00003032 st_virgin:
3033 if (TID_INDICATING_NONVIRGIN == sword->other)
3034 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
3035 else
3036 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
3037 statechange = True;
3038 *sword = SW(Vge_Excl, packTLS(tls));/* remember exclusive owner */
3039 tls->refcount++;
3040 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003041
sewardj18cd4a52002-11-13 22:37:41 +00003042 st_excl: {
3043 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
3044
3045 if (tls == sw_tls) {
3046 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
3047 goto done;
3048 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
3049 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
3050 goto done;
3051 } else if (tlsIsDisjoint(tls, sw_tls)) {
3052 DEBUG_STATE("Write EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
3053 sword->other = packTLS(tls);
3054 sw_tls->refcount--;
sewardjc4a810d2002-11-13 22:25:51 +00003055 tls->refcount++;
sewardj8fac99a2002-11-13 22:31:26 +00003056 goto done;
sewardj18cd4a52002-11-13 22:37:41 +00003057 } else {
3058 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sw_tls->tid, a, tid);
3059 statechange = True;
3060 sw_tls->refcount--;
3061 *sword = SW(Vge_SharMod, packLockSet(thread_locks[tid]));
3062 if(DEBUG_MEM_LOCKSET_CHANGES)
3063 print_LockSet("excl write locks", unpackLockSet(sword->other));
3064 goto SHARED_MODIFIED;
sewardjc4a810d2002-11-13 22:25:51 +00003065 }
sewardj18cd4a52002-11-13 22:37:41 +00003066 }
njn25e49d8e72002-09-23 09:36:25 +00003067
sewardj18cd4a52002-11-13 22:37:41 +00003068 st_shar:
3069 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
3070 sword->state = Vge_SharMod;
3071 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3072 thread_locks[tid]));
3073 statechange = True;
3074 goto SHARED_MODIFIED;
njn25e49d8e72002-09-23 09:36:25 +00003075
sewardj18cd4a52002-11-13 22:37:41 +00003076 st_sharmod:
3077 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
3078 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3079 thread_locks[tid]));
3080 statechange = sword->other != prevstate.other;
njn25e49d8e72002-09-23 09:36:25 +00003081
sewardj18cd4a52002-11-13 22:37:41 +00003082 SHARED_MODIFIED:
3083 if (isempty(unpackLockSet(sword->other))) {
3084 record_eraser_error(tst, a, True /* is_write */, prevstate);
3085 }
3086 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003087
sewardj18cd4a52002-11-13 22:37:41 +00003088 done:
3089 if (clo_execontext != EC_None && statechange) {
3090 EC_EIP eceip;
sewardj499e3de2002-11-13 22:22:25 +00003091
sewardj18cd4a52002-11-13 22:37:41 +00003092 if (clo_execontext == EC_Some)
sewardjc808ef52002-11-13 22:43:26 +00003093 eceip = EIP(VG_(get_EIP)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003094 else
sewardjc808ef52002-11-13 22:43:26 +00003095 eceip = EC(VG_(get_ExeContext)(tst), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003096 setExeContext(a, eceip);
njn25e49d8e72002-09-23 09:36:25 +00003097 }
3098}
3099
sewardj0f811692002-10-22 04:59:26 +00003100static void eraser_mem_write(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00003101{
sewardj0f811692002-10-22 04:59:26 +00003102 ThreadId tid;
sewardj8fac99a2002-11-13 22:31:26 +00003103 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003104
sewardj8fac99a2002-11-13 22:31:26 +00003105 end = ROUNDUP(a+size, 4);
3106 a = ROUNDDN(a, 4);
3107
sewardj499e3de2002-11-13 22:22:25 +00003108 if (tst == NULL)
3109 tid = VG_(get_current_tid)();
3110 else
3111 tid = VG_(get_tid_from_ThreadState)(tst);
3112
sewardj18cd4a52002-11-13 22:37:41 +00003113 for ( ; a < end; a += 4)
3114 eraser_mem_write_word(a, tid, tst);
njn25e49d8e72002-09-23 09:36:25 +00003115}
3116
3117#undef DEBUG_STATE
3118
sewardja5b3aec2002-10-22 05:09:36 +00003119static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00003120{
sewardja5b3aec2002-10-22 05:09:36 +00003121 eraser_mem_read(a, 1, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00003122}
3123
sewardja5b3aec2002-10-22 05:09:36 +00003124static void eraser_mem_help_read_2(Addr a)
3125{
3126 eraser_mem_read(a, 2, NULL);
3127}
3128
3129static void eraser_mem_help_read_4(Addr a)
3130{
3131 eraser_mem_read(a, 4, NULL);
3132}
3133
3134static void eraser_mem_help_read_N(Addr a, UInt size)
3135{
sewardjc26cc252002-10-23 21:58:55 +00003136 eraser_mem_read(a, size, NULL);
sewardja5b3aec2002-10-22 05:09:36 +00003137}
3138
3139static void eraser_mem_help_write_1(Addr a, UInt val)
3140{
3141 if (*(UChar *)a != val)
3142 eraser_mem_write(a, 1, NULL);
3143}
3144static void eraser_mem_help_write_2(Addr a, UInt val)
3145{
3146 if (*(UShort *)a != val)
3147 eraser_mem_write(a, 2, NULL);
3148}
3149static void eraser_mem_help_write_4(Addr a, UInt val)
3150{
3151 if (*(UInt *)a != val)
3152 eraser_mem_write(a, 4, NULL);
3153}
3154static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00003155{
sewardj0f811692002-10-22 04:59:26 +00003156 eraser_mem_write(a, size, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00003157}
njn25e49d8e72002-09-23 09:36:25 +00003158
sewardjc4a810d2002-11-13 22:25:51 +00003159static void hg_thread_create(ThreadId parent, ThreadId child)
3160{
3161 if (0)
3162 VG_(printf)("CREATE: %u creating %u\n", parent, child);
3163
3164 newTLS(child);
3165 addPriorTLS(child, parent);
3166
3167 newTLS(parent);
3168}
3169
3170static void hg_thread_join(ThreadId joiner, ThreadId joinee)
3171{
3172 if (0)
3173 VG_(printf)("JOIN: %u joining on %u\n", joiner, joinee);
3174
3175 newTLS(joiner);
3176 addPriorTLS(joiner, joinee);
3177
3178 clearTLS(joinee);
3179}
3180
sewardj7a5ebcf2002-11-13 22:42:13 +00003181static Int __BUS_HARDWARE_LOCK__;
3182
3183static void bus_lock(void)
3184{
3185 ThreadId tid = VG_(get_current_tid)();
3186 eraser_pre_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3187 eraser_post_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3188}
3189
3190static void bus_unlock(void)
3191{
3192 ThreadId tid = VG_(get_current_tid)();
3193 eraser_post_mutex_unlock(tid, &__BUS_HARDWARE_LOCK__);
3194}
3195
njn25e49d8e72002-09-23 09:36:25 +00003196/*--------------------------------------------------------------------*/
sewardj7f3ad222002-11-13 22:11:53 +00003197/*--- Client requests ---*/
3198/*--------------------------------------------------------------------*/
3199
3200Bool SK_(handle_client_request)(ThreadState *tst, UInt *args, UInt *ret)
3201{
3202 if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
3203 return False;
3204
3205 switch(args[0]) {
3206 case VG_USERREQ__HG_CLEAN_MEMORY:
3207 set_address_range_state(args[1], args[2], Vge_VirginInit);
3208 *ret = 0; /* meaningless */
3209 break;
3210
3211 case VG_USERREQ__HG_KNOWN_RACE:
3212 set_address_range_state(args[1], args[2], Vge_Error);
3213 *ret = 0; /* meaningless */
3214 break;
3215
3216 default:
3217 return False;
3218 }
3219
3220 return True;
3221}
3222
3223
3224/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003225/*--- Setup ---*/
3226/*--------------------------------------------------------------------*/
3227
njn810086f2002-11-14 12:42:47 +00003228void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00003229{
3230 Int i;
sewardj4bffb232002-11-13 21:46:34 +00003231 LockSet *empty;
njn25e49d8e72002-09-23 09:36:25 +00003232
njn810086f2002-11-14 12:42:47 +00003233 VG_(details_name) ("Helgrind");
3234 VG_(details_version) (NULL);
3235 VG_(details_description) ("a data race detector");
3236 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00003237 "Copyright (C) 2002-2003, and GNU GPL'd, by Nicholas Nethercote.");
njn810086f2002-11-14 12:42:47 +00003238 VG_(details_bug_reports_to) ("njn25@cam.ac.uk");
sewardj78210aa2002-12-01 02:55:46 +00003239 VG_(details_avg_translation_sizeB) ( 115 );
njn25e49d8e72002-09-23 09:36:25 +00003240
njn810086f2002-11-14 12:42:47 +00003241 VG_(needs_core_errors)();
3242 VG_(needs_skin_errors)();
3243 VG_(needs_data_syms)();
njn810086f2002-11-14 12:42:47 +00003244 VG_(needs_client_requests)();
3245 VG_(needs_command_line_options)();
njn25e49d8e72002-09-23 09:36:25 +00003246
njn810086f2002-11-14 12:42:47 +00003247 VG_(track_new_mem_startup) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003248
njn810086f2002-11-14 12:42:47 +00003249 /* stack ones not decided until VG_(post_clo_init)() */
njn25e49d8e72002-09-23 09:36:25 +00003250
njn810086f2002-11-14 12:42:47 +00003251 VG_(track_new_mem_brk) (& make_writable);
3252 VG_(track_new_mem_mmap) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003253
njn810086f2002-11-14 12:42:47 +00003254 VG_(track_change_mem_mprotect) (& eraser_set_perms);
njn25e49d8e72002-09-23 09:36:25 +00003255
njn810086f2002-11-14 12:42:47 +00003256 VG_(track_ban_mem_stack) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003257
njn810086f2002-11-14 12:42:47 +00003258 VG_(track_die_mem_stack) (NULL);
njn810086f2002-11-14 12:42:47 +00003259 VG_(track_die_mem_stack_signal) (NULL);
3260 VG_(track_die_mem_brk) (NULL);
3261 VG_(track_die_mem_munmap) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003262
njn810086f2002-11-14 12:42:47 +00003263 VG_(track_pre_mem_read) (& eraser_pre_mem_read);
3264 VG_(track_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz);
3265 VG_(track_pre_mem_write) (& eraser_pre_mem_write);
3266 VG_(track_post_mem_write) (NULL);
3267
3268 VG_(track_post_thread_create) (& hg_thread_create);
3269 VG_(track_post_thread_join) (& hg_thread_join);
3270
3271 VG_(track_post_mutex_lock) (& eraser_pre_mutex_lock);
3272 VG_(track_post_mutex_lock) (& eraser_post_mutex_lock);
3273 VG_(track_post_mutex_unlock) (& eraser_post_mutex_unlock);
sewardjc4a810d2002-11-13 22:25:51 +00003274
sewardja5b3aec2002-10-22 05:09:36 +00003275 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
3276 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
3277 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
3278 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
3279
3280 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
3281 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
3282 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
3283 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00003284
sewardj7a5ebcf2002-11-13 22:42:13 +00003285 VG_(register_noncompact_helper)((Addr) & bus_lock);
3286 VG_(register_noncompact_helper)((Addr) & bus_unlock);
3287
sewardj4bffb232002-11-13 21:46:34 +00003288 for(i = 0; i < LOCKSET_HASH_SZ; i++)
3289 lockset_hash[i] = NULL;
3290
3291 empty = alloc_LockSet(0);
3292 insert_LockSet(empty);
3293 emptyset = empty;
3294
sewardjc4a810d2002-11-13 22:25:51 +00003295 /* Init lock table and thread segments */
3296 for (i = 0; i < VG_N_THREADS; i++) {
sewardjdac0a442002-11-13 22:08:40 +00003297 thread_locks[i] = empty;
njn25e49d8e72002-09-23 09:36:25 +00003298
sewardjc4a810d2002-11-13 22:25:51 +00003299 newTLS(i);
3300 }
3301
njn25e49d8e72002-09-23 09:36:25 +00003302 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00003303 hg_malloc_list = VG_(HT_construct)();
njn25e49d8e72002-09-23 09:36:25 +00003304}
3305
sewardjf6374322002-11-13 22:35:55 +00003306static Bool match_Bool(Char *arg, Char *argstr, Bool *ret)
3307{
3308 Int len = VG_(strlen)(argstr);
3309
3310 if (VG_(strncmp)(arg, argstr, len) == 0) {
3311 if (VG_(strcmp)(arg+len, "yes") == 0) {
3312 *ret = True;
3313 return True;
3314 } else if (VG_(strcmp)(arg+len, "no") == 0) {
3315 *ret = False;
3316 return True;
3317 } else
3318 VG_(bad_option)(arg);
3319 }
3320 return False;
3321}
3322
sewardj406270b2002-11-13 22:18:09 +00003323static Bool match_str(Char *arg, Char *argstr, Char **ret)
3324{
3325 Int len = VG_(strlen)(argstr);
3326
3327 if (VG_(strncmp)(arg, argstr, len) == 0) {
3328 *ret = VG_(strdup)(arg+len);
3329 return True;
3330 }
3331
3332 return False;
3333}
sewardj406270b2002-11-13 22:18:09 +00003334
3335Bool SK_(process_cmd_line_option)(Char* arg)
3336{
sewardj499e3de2002-11-13 22:22:25 +00003337 Char *str;
3338
3339 if (match_str(arg, "--show-last-access=", &str)) {
3340 Bool ok = True;
3341 if (VG_(strcmp)(str, "no") == 0)
3342 clo_execontext = EC_None;
3343 else if (VG_(strcmp)(str, "some") == 0)
3344 clo_execontext = EC_Some;
3345 else if (VG_(strcmp)(str, "all") == 0)
3346 clo_execontext = EC_All;
3347 else {
3348 ok = False;
3349 VG_(bad_option)(arg);
3350 }
3351
3352 VG_(free)(str);
3353 if (ok)
3354 return True;
3355 }
3356
sewardjf6374322002-11-13 22:35:55 +00003357 if (match_Bool(arg, "--private-stacks=", &clo_priv_stacks))
3358 return True;
3359
njn3e884182003-04-15 13:03:23 +00003360 return VG_(replacement_malloc_process_cmd_line_option)(arg);
sewardj406270b2002-11-13 22:18:09 +00003361}
3362
njn3e884182003-04-15 13:03:23 +00003363void SK_(print_usage)(void)
sewardj406270b2002-11-13 22:18:09 +00003364{
njn3e884182003-04-15 13:03:23 +00003365 VG_(printf)(
sewardje11d6c82002-12-15 02:00:41 +00003366" --private-stacks=yes|no assume thread stacks are used privately [no]\n"
3367" --show-last-access=no|some|all\n"
3368" show location of last word access on error [no]\n"
njn3e884182003-04-15 13:03:23 +00003369 );
3370 VG_(replacement_malloc_print_usage)();
sewardj406270b2002-11-13 22:18:09 +00003371}
3372
njn3e884182003-04-15 13:03:23 +00003373void SK_(print_debug_usage)(void)
3374{
3375 VG_(replacement_malloc_print_debug_usage)();
3376}
njn25e49d8e72002-09-23 09:36:25 +00003377
3378void SK_(post_clo_init)(void)
3379{
njn810086f2002-11-14 12:42:47 +00003380 void (*stack_tracker)(Addr a, UInt len);
3381
sewardj499e3de2002-11-13 22:22:25 +00003382 if (clo_execontext) {
3383 execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536);
3384 VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536);
3385 }
sewardjf6374322002-11-13 22:35:55 +00003386
njn810086f2002-11-14 12:42:47 +00003387 if (clo_priv_stacks)
3388 stack_tracker = & eraser_new_mem_stack_private;
3389 else
3390 stack_tracker = & eraser_new_mem_stack;
sewardjf6374322002-11-13 22:35:55 +00003391
njn810086f2002-11-14 12:42:47 +00003392 VG_(track_new_mem_stack) (stack_tracker);
njn810086f2002-11-14 12:42:47 +00003393 VG_(track_new_mem_stack_signal) (stack_tracker);
njn25e49d8e72002-09-23 09:36:25 +00003394}
3395
3396
njn7d9f94d2003-04-22 21:41:40 +00003397void SK_(fini)(Int exitcode)
njn25e49d8e72002-09-23 09:36:25 +00003398{
sewardjdac0a442002-11-13 22:08:40 +00003399 if (DEBUG_LOCK_TABLE) {
sewardj4bffb232002-11-13 21:46:34 +00003400 pp_all_LockSets();
sewardjdac0a442002-11-13 22:08:40 +00003401 pp_all_mutexes();
3402 }
sewardj4bffb232002-11-13 21:46:34 +00003403
3404 if (LOCKSET_SANITY)
3405 sanity_check_locksets("SK_(fini)");
3406
sewardjff2c9232002-11-13 21:44:39 +00003407 VG_(message)(Vg_UserMsg, "%u possible data races found; %u lock order problems",
3408 n_eraser_warnings, n_lockorder_warnings);
sewardjf6374322002-11-13 22:35:55 +00003409
3410 if (0)
3411 VG_(printf)("stk_ld:%u+stk_st:%u = %u nonstk_ld:%u+nonstk_st:%u = %u %u%%\n",
3412 stk_ld, stk_st, stk_ld + stk_st,
3413 nonstk_ld, nonstk_st, nonstk_ld + nonstk_st,
3414 ((stk_ld+stk_st)*100) / (stk_ld + stk_st + nonstk_ld + nonstk_st));
njn25e49d8e72002-09-23 09:36:25 +00003415}
3416
3417/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00003418/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00003419/*--------------------------------------------------------------------*/