blob: d95173e4144bf79a6bf5568959ced94da9629143 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Helgrind, a Valgrind skin for detecting
9 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
njn0e1b5142003-04-15 14:58:06 +000011 Copyright (C) 2002-2003 Nicholas Nethercote
njn25e49d8e72002-09-23 09:36:25 +000012 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
sewardj7f3ad222002-11-13 22:11:53 +000033#include "helgrind.h"
njn25e49d8e72002-09-23 09:36:25 +000034
njn27f1a382002-11-08 15:48:16 +000035VG_DETERMINE_INTERFACE_VERSION
njn25e49d8e72002-09-23 09:36:25 +000036
37static UInt n_eraser_warnings = 0;
sewardjff2c9232002-11-13 21:44:39 +000038static UInt n_lockorder_warnings = 0;
njn25e49d8e72002-09-23 09:36:25 +000039
40/*------------------------------------------------------------*/
41/*--- Debug guff ---*/
42/*------------------------------------------------------------*/
43
sewardje11d6c82002-12-15 02:00:41 +000044#define DEBUG_LOCK_TABLE 0 /* Print lock table at end */
njn25e49d8e72002-09-23 09:36:25 +000045
46#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
47#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
48#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
49#define DEBUG_ACCESSES 0 /* Print reads, writes */
50#define DEBUG_MEM_LOCKSET_CHANGES 0
51 /* Print when an address's lockset
52 changes; only useful with
53 DEBUG_ACCESSES */
sewardj8fac99a2002-11-13 22:31:26 +000054#define SLOW_ASSERTS 0 /* do expensive asserts */
njn25e49d8e72002-09-23 09:36:25 +000055#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
56
sewardj8fac99a2002-11-13 22:31:26 +000057#if SLOW_ASSERTS
58#define SK_ASSERT(x) sk_assert(x)
59#else
60#define SK_ASSERT(x)
61#endif
62
njn25e49d8e72002-09-23 09:36:25 +000063/* heavyweight LockSet sanity checking:
64 0 == never
65 1 == after important ops
66 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
67 */
68#define LOCKSET_SANITY 0
69
sewardj8fac99a2002-11-13 22:31:26 +000070/* Rotate an unsigned quantity left */
71#define ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x)*8)-(n))))
72
73/* round a up to the next multiple of N. N must be a power of 2 */
74#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
75
76/* Round a down to the next multiple of N. N must be a power of 2 */
77#define ROUNDDN(a, N) ((a) & ~(N-1))
njn25e49d8e72002-09-23 09:36:25 +000078
79/*------------------------------------------------------------*/
sewardjf6374322002-11-13 22:35:55 +000080/*--- Command line options ---*/
81/*------------------------------------------------------------*/
82
83static enum {
84 EC_None,
85 EC_Some,
86 EC_All
87} clo_execontext = EC_None;
88
sewardje1a39f42002-12-15 01:56:17 +000089static Bool clo_priv_stacks = False;
sewardjf6374322002-11-13 22:35:55 +000090
91/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000092/*--- Crude profiling machinery. ---*/
93/*------------------------------------------------------------*/
94
95// PPP: work out if I want this
96
97#define PROF_EVENT(x)
98#if 0
99#ifdef VG_PROFILE_MEMORY
100
101#define N_PROF_EVENTS 150
102
103static UInt event_ctr[N_PROF_EVENTS];
104
105void VGE_(done_prof_mem) ( void )
106{
107 Int i;
108 for (i = 0; i < N_PROF_EVENTS; i++) {
109 if ((i % 10) == 0)
110 VG_(printf)("\n");
111 if (event_ctr[i] > 0)
112 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
113 }
114 VG_(printf)("\n");
115}
116
117#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000118 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000119 event_ctr[ev]++; \
120 } while (False);
121
122#else
123
124//static void init_prof_mem ( void ) { }
125// void VG_(done_prof_mem) ( void ) { }
126
127#define PROF_EVENT(ev) /* */
128
129#endif /* VG_PROFILE_MEMORY */
130
131/* Event index. If just the name of the fn is given, this means the
132 number of calls to the fn. Otherwise it is the specified event.
133
134 [PPP: snip event numbers...]
135*/
136#endif /* 0 */
137
138
139/*------------------------------------------------------------*/
140/*--- Data defns. ---*/
141/*------------------------------------------------------------*/
142
njn3e884182003-04-15 13:03:23 +0000143typedef
144 struct _HG_Chunk {
145 struct _HG_Chunk* next;
146 Addr data; /* ptr to actual block */
sewardj05bcdcb2003-05-18 10:05:38 +0000147 Int size; /* size requested */
njn3e884182003-04-15 13:03:23 +0000148 ExeContext* where; /* where it was allocated */
149 ThreadId tid; /* allocating thread */
150 }
151 HG_Chunk;
152
njn25e49d8e72002-09-23 09:36:25 +0000153typedef enum
sewardj7f3ad222002-11-13 22:11:53 +0000154 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit, Vge_Error }
njn25e49d8e72002-09-23 09:36:25 +0000155 VgeInitStatus;
156
sewardjc808ef52002-11-13 22:43:26 +0000157
njn25e49d8e72002-09-23 09:36:25 +0000158/* Should add up to 32 to fit in one word */
159#define OTHER_BITS 30
160#define STATE_BITS 2
161
162#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
163
164/* This is for indicating that a memory block has been initialised but not
165 * really directly by a particular thread... (eg. text/data initialised
166 * automatically at startup).
167 * Must be different to virgin_word.other */
168#define TID_INDICATING_NONVIRGIN 1
169
sewardjc4a810d2002-11-13 22:25:51 +0000170/* Magic packed TLS used for error suppression; if word state is Excl
171 and tid is this, then it means all access are OK without changing
172 state and without raising any more errors */
173#define TLSP_INDICATING_ALL ((1 << OTHER_BITS) - 1)
sewardj16748af2002-10-22 04:55:54 +0000174
njn25e49d8e72002-09-23 09:36:25 +0000175/* Number of entries must fit in STATE_BITS bits */
176typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
177
sewardjc808ef52002-11-13 22:43:26 +0000178static inline const Char *pp_state(pth_state st)
179{
180 const Char *ret;
181
182 switch(st) {
183 case Vge_Virgin: ret = "virgin"; break;
184 case Vge_Excl: ret = "exclusive"; break;
185 case Vge_Shar: ret = "shared RO"; break;
186 case Vge_SharMod: ret = "shared RW"; break;
187 default: ret = "???";
188 }
189 return ret;
190}
191
njn25e49d8e72002-09-23 09:36:25 +0000192typedef
193 struct {
sewardj8fac99a2002-11-13 22:31:26 +0000194 /* gcc arranges this bitfield with state in the 2LSB and other
195 in the 30MSB, which is what we want */
njn25e49d8e72002-09-23 09:36:25 +0000196 UInt state:STATE_BITS;
sewardj8fac99a2002-11-13 22:31:26 +0000197 UInt other:OTHER_BITS;
njn25e49d8e72002-09-23 09:36:25 +0000198 } shadow_word;
199
sewardj8fac99a2002-11-13 22:31:26 +0000200#define SW(st, other) ((shadow_word) { st, other })
201
njn25e49d8e72002-09-23 09:36:25 +0000202typedef
203 struct {
204 shadow_word swords[ESEC_MAP_WORDS];
205 }
206 ESecMap;
207
208static ESecMap* primary_map[ 65536 ];
209static ESecMap distinguished_secondary_map;
210
sewardj8fac99a2002-11-13 22:31:26 +0000211static const shadow_word virgin_sword = SW(Vge_Virgin, 0);
212static const shadow_word error_sword = SW(Vge_Excl, TLSP_INDICATING_ALL);
njn25e49d8e72002-09-23 09:36:25 +0000213
214#define VGE_IS_DISTINGUISHED_SM(smap) \
215 ((smap) == &distinguished_secondary_map)
216
217#define ENSURE_MAPPABLE(addr,caller) \
218 do { \
219 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
220 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
221 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
222 } \
223 } while(0)
224
225
sewardjc808ef52002-11-13 22:43:26 +0000226/* Parallel map which contains execution contexts when words last
227 changed state (if required) */
sewardj499e3de2002-11-13 22:22:25 +0000228
sewardjc808ef52002-11-13 22:43:26 +0000229typedef struct EC_EIP {
230 union u_ec_eip {
231 Addr eip;
232 ExeContext *ec;
sewardj72baa7a2002-12-09 23:32:58 +0000233 } uu_ec_eip;
sewardjc808ef52002-11-13 22:43:26 +0000234 UInt state:STATE_BITS;
235 UInt tls:OTHER_BITS; /* packed TLS */
sewardj499e3de2002-11-13 22:22:25 +0000236} EC_EIP;
237
sewardjc808ef52002-11-13 22:43:26 +0000238#define NULL_EC_EIP ((EC_EIP){ { 0 }, 0, 0})
239
240#define EIP(eip, prev, tls) ((EC_EIP) { (union u_ec_eip)(eip), (prev).state, packTLS(tls) })
241#define EC(ec, prev, tls) ((EC_EIP) { (union u_ec_eip)(ec), (prev).state, packTLS(tls) })
242
243static inline UInt packEC(ExeContext *ec)
244{
245 SK_ASSERT(((UInt)ec & ((1 << STATE_BITS)-1)) == 0);
246 return ((UInt)ec) >> STATE_BITS;
247}
248
249static inline ExeContext *unpackEC(UInt i)
250{
251 return (ExeContext *)(i << STATE_BITS);
252}
253
254/* Lose 2 LSB of eip */
255static inline UInt packEIP(Addr eip)
256{
257 return ((UInt)eip) >> STATE_BITS;
258}
259
260static inline Addr unpackEIP(UInt i)
261{
262 return (Addr)(i << STATE_BITS);
263}
sewardj499e3de2002-11-13 22:22:25 +0000264
265typedef struct {
266 EC_EIP execontext[ESEC_MAP_WORDS];
267} ExeContextMap;
268
269static ExeContextMap** execontext_map;
270
271static inline void setExeContext(Addr a, EC_EIP ec)
272{
273 UInt idx = (a >> 16) & 0xffff;
274 UInt off = (a >> 2) & 0x3fff;
275
276 if (execontext_map[idx] == NULL) {
277 execontext_map[idx] = VG_(malloc)(sizeof(ExeContextMap));
278 VG_(memset)(execontext_map[idx], 0, sizeof(ExeContextMap));
279 }
280
281 execontext_map[idx]->execontext[off] = ec;
282}
283
284static inline EC_EIP getExeContext(Addr a)
285{
286 UInt idx = (a >> 16) & 0xffff;
287 UInt off = (a >> 2) & 0x3fff;
sewardjc808ef52002-11-13 22:43:26 +0000288 EC_EIP ec = NULL_EC_EIP;
sewardj499e3de2002-11-13 22:22:25 +0000289
290 if (execontext_map[idx] != NULL)
291 ec = execontext_map[idx]->execontext[off];
292
293 return ec;
294}
295
njn25e49d8e72002-09-23 09:36:25 +0000296/*------------------------------------------------------------*/
sewardjc4a810d2002-11-13 22:25:51 +0000297/*--- Thread lifetime segments ---*/
298/*------------------------------------------------------------*/
299
300/*
301 * This mechanism deals with the common case of a parent thread
302 * creating a structure for a child thread, and then passing ownership
303 * of the structure to that thread. It similarly copes with a child
304 * thread passing information back to another thread waiting to join
305 * on it.
306 *
307 * Each thread's lifetime can be partitioned into segments. Those
308 * segments are arranged to form an interference graph which indicates
309 * whether two thread lifetime segments can possibly be concurrent.
310 * If not, then memory with is exclusively accessed by one TLS can be
daywalker7e73e5f2003-07-04 16:18:15 +0000311 * passed on to another TLS without an error occurring, and without
sewardjc4a810d2002-11-13 22:25:51 +0000312 * moving it from Excl state.
313 *
314 * At present this only considers thread creation and join as
315 * synchronisation events for creating new lifetime segments, but
316 * others may be possible (like mutex operations).
317 */
318
319typedef struct _ThreadLifeSeg ThreadLifeSeg;
320
321struct _ThreadLifeSeg {
322 ThreadId tid;
323 ThreadLifeSeg *prior[2]; /* Previous lifetime segments */
324 UInt refcount; /* Number of memory locations pointing here */
325 UInt mark; /* mark used for graph traversal */
326 ThreadLifeSeg *next; /* list of all TLS */
327};
328
329static ThreadLifeSeg *all_tls;
330static UInt tls_since_gc;
331#define TLS_SINCE_GC 10000
332
333/* current mark used for TLS graph traversal */
334static UInt tlsmark;
335
336static ThreadLifeSeg *thread_seg[VG_N_THREADS];
337
338
339static void tls_gc(void)
340{
341 /* XXX later. Walk through all TLSs and look for ones with 0
342 refcount and remove them from the structure and free them.
343 Could probably get rid of ThreadLifeSeg.refcount and simply use
344 mark-sweep from the shadow table. */
345 VG_(printf)("WRITEME: TLS GC\n");
346}
347
348static void newTLS(ThreadId tid)
349{
350 static const Bool debug = False;
351 ThreadLifeSeg *tls;
352
353 /* Initial NULL */
354 if (thread_seg[tid] == NULL) {
355 tls = VG_(malloc)(sizeof(*tls));
356 tls->tid = tid;
357 tls->prior[0] = tls->prior[1] = NULL;
358 tls->refcount = 0;
359 tls->mark = tlsmark-1;
360
361 tls->next = all_tls;
362 all_tls = tls;
363 tls_since_gc++;
364
365 thread_seg[tid] = tls;
366 return;
367 }
368
369 /* Previous TLS was unused, so just recycle */
370 if (thread_seg[tid]->refcount == 0) {
371 if (debug)
372 VG_(printf)("newTLS; recycling TLS %p for tid %u\n",
373 thread_seg[tid], tid);
374 return;
375 }
376
377 /* Use existing TLS for this tid as a prior for new TLS */
378 tls = VG_(malloc)(sizeof(*tls));
379 tls->tid = tid;
380 tls->prior[0] = thread_seg[tid];
381 tls->prior[1] = NULL;
382 tls->refcount = 0;
383 tls->mark = tlsmark-1;
384
385 tls->next = all_tls;
386 all_tls = tls;
387 if (++tls_since_gc > TLS_SINCE_GC) {
388 tls_gc();
389 tls_since_gc = 0;
390 }
391
392 if (debug)
393 VG_(printf)("newTLS: made new TLS %p for tid %u (prior %p(%u))\n",
394 tls, tid, tls->prior[0], tls->prior[0]->tid);
395
396 thread_seg[tid] = tls;
397}
398
399/* clear out a TLS for a thread that's died */
400static void clearTLS(ThreadId tid)
401{
402 newTLS(tid);
403
404 thread_seg[tid]->prior[0] = NULL;
405 thread_seg[tid]->prior[1] = NULL;
406}
407
408static void addPriorTLS(ThreadId tid, ThreadId prior)
409{
410 static const Bool debug = False;
411 ThreadLifeSeg *tls = thread_seg[tid];
412
413 if (debug)
414 VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
415 thread_seg[prior], prior, tls, tid);
416
417 sk_assert(thread_seg[tid] != NULL);
418 sk_assert(thread_seg[prior] != NULL);
419
420 if (tls->prior[0] == NULL)
421 tls->prior[0] = thread_seg[prior];
422 else {
423 sk_assert(tls->prior[1] == NULL);
424 tls->prior[1] = thread_seg[prior];
425 }
426}
427
428/* Return True if prior is definitely not concurrent with tls */
429static Bool tlsIsDisjoint(const ThreadLifeSeg *tls,
430 const ThreadLifeSeg *prior)
431{
432 Bool isPrior(const ThreadLifeSeg *t) {
433 if (t == NULL || t->mark == tlsmark)
434 return False;
435
436 if (t == prior)
437 return True;
438
439 ((ThreadLifeSeg *)t)->mark = tlsmark;
440
441 return isPrior(t->prior[0]) || isPrior(t->prior[1]);
442 }
443 tlsmark++; /* new traversal mark */
444
445 return isPrior(tls);
446}
447
448static inline UInt packTLS(ThreadLifeSeg *tls)
449{
sewardj8fac99a2002-11-13 22:31:26 +0000450 SK_ASSERT(((UInt)tls & ((1 << STATE_BITS)-1)) == 0);
sewardjc4a810d2002-11-13 22:25:51 +0000451 return ((UInt)tls) >> STATE_BITS;
452}
453
454static inline ThreadLifeSeg *unpackTLS(UInt i)
455{
456 return (ThreadLifeSeg *)(i << STATE_BITS);
457}
458
459/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000460/*--- Low-level support for memory tracking. ---*/
461/*------------------------------------------------------------*/
462
463/*
464 All reads and writes are recorded in the memory map, which
465 records the state of all memory in the process. The memory map is
466 organised like that for normal Valgrind, except each that everything
467 is done at word-level instead of byte-level, and each word has only
468 one word of shadow (instead of 36 bits).
469
470 As for normal Valgrind there is a distinguished secondary map. But we're
471 working at word-granularity, so it has 16k word entries instead of 64k byte
472 entries. Lookup is done as follows:
473
474 bits 31..16: primary map lookup
475 bits 15.. 2: secondary map lookup
476 bits 1.. 0: ignored
477*/
478
479
480/*------------------------------------------------------------*/
481/*--- Basic bitmap management, reading and writing. ---*/
482/*------------------------------------------------------------*/
483
484/* Allocate and initialise a secondary map, marking all words as virgin. */
485
486/* Just a value that isn't a real pointer */
487#define SEC_MAP_ACCESS (shadow_word*)0x99
488
489
490static
491ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
492{
493 ESecMap* map;
494 UInt i;
495 //PROF_EVENT(10); PPP
496
497 /* It just happens that a SecMap occupies exactly 18 pages --
498 although this isn't important, so the following assert is
499 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000500 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000501 map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller );
502
503 for (i = 0; i < ESEC_MAP_WORDS; i++)
504 map->swords[i] = virgin_sword;
505
506 return map;
507}
508
509
510/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
511 * word gets set. */
512static __inline__
513void set_sword ( Addr a, shadow_word sword )
514{
515 ESecMap* sm;
sewardjc4a810d2002-11-13 22:25:51 +0000516 shadow_word *oldsw;
njn25e49d8e72002-09-23 09:36:25 +0000517
518 //PROF_EVENT(23); PPP
519 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
520
521 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
522 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000523 sk_assert(sm != &distinguished_secondary_map);
sewardjc4a810d2002-11-13 22:25:51 +0000524 oldsw = &sm->swords[(a & 0xFFFC) >> 2];
525 if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
526 ThreadLifeSeg *tls = unpackTLS(oldsw->other);
527 tls->refcount--;
528 }
529
530 if (sword.state == Vge_Excl && sword.other != TLSP_INDICATING_ALL) {
531 ThreadLifeSeg *tls = unpackTLS(sword.other);
532 tls->refcount++;
533 }
534
njn25e49d8e72002-09-23 09:36:25 +0000535 sm->swords[(a & 0xFFFC) >> 2] = sword;
536
537 if (VGE_IS_DISTINGUISHED_SM(sm)) {
538 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
539 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000540 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000541 }
542}
543
544
545static __inline__
546shadow_word* get_sword_addr ( Addr a )
547{
548 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
549 ESecMap* sm = primary_map[a >> 16];
550 UInt sm_off = (a & 0xFFFC) >> 2;
551
552 if (VGE_IS_DISTINGUISHED_SM(sm)) {
553 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
554 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000555 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000556 return SEC_MAP_ACCESS;
557 }
558
559 //PROF_EVENT(21); PPP
560 return & (sm->swords[sm_off]);
561}
562
563
564// SSS: rename these so they're not so similar to memcheck, unless it's
565// appropriate of course
566
567static __inline__
568void init_virgin_sword(Addr a)
569{
sewardj499e3de2002-11-13 22:22:25 +0000570 if (clo_execontext != EC_None)
571 setExeContext(a, NULL_EC_EIP);
njn25e49d8e72002-09-23 09:36:25 +0000572 set_sword(a, virgin_sword);
573}
574
sewardj7f3ad222002-11-13 22:11:53 +0000575static __inline__
576void init_error_sword(Addr a)
577{
578 set_sword(a, error_sword);
579}
njn25e49d8e72002-09-23 09:36:25 +0000580
njn25e49d8e72002-09-23 09:36:25 +0000581static __inline__
582void init_nonvirgin_sword(Addr a)
583{
584 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000585 ThreadId tid = VG_(get_current_or_recent_tid)();
sewardjc4a810d2002-11-13 22:25:51 +0000586 ThreadLifeSeg *tls;
njn25e49d8e72002-09-23 09:36:25 +0000587
sewardjb52a1b02002-10-23 21:38:22 +0000588 sk_assert(tid != VG_INVALID_THREADID);
sewardjc4a810d2002-11-13 22:25:51 +0000589 tls = thread_seg[tid];
590
sewardj8fac99a2002-11-13 22:31:26 +0000591 sword = SW(Vge_Excl, packTLS(tls));
njn25e49d8e72002-09-23 09:36:25 +0000592 set_sword(a, sword);
593}
594
595
596/* In this case, we treat it for Eraser's sake like virgin (it hasn't
597 * been inited by a particular thread, it's just done automatically upon
598 * startup), but we mark its .state specially so it doesn't look like an
599 * uninited read. */
600static __inline__
601void init_magically_inited_sword(Addr a)
602{
603 shadow_word sword;
604
sewardjb52a1b02002-10-23 21:38:22 +0000605 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sewardj8fac99a2002-11-13 22:31:26 +0000606
607 sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
608
njn25e49d8e72002-09-23 09:36:25 +0000609 set_sword(a, virgin_sword);
610}
611
sewardjc26cc252002-10-23 21:58:55 +0000612
sewardj274c6012002-10-22 04:54:55 +0000613/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000614/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000615/*------------------------------------------------------------*/
616
sewardj39a4d842002-11-13 22:14:30 +0000617typedef struct _Mutex Mutex; /* forward decl */
sewardj4bffb232002-11-13 21:46:34 +0000618typedef struct _LockSet LockSet;
619
sewardj16748af2002-10-22 04:55:54 +0000620typedef enum MutexState {
621 MxUnknown, /* don't know */
622 MxUnlocked, /* unlocked */
623 MxLocked, /* locked */
624 MxDead /* destroyed */
625} MutexState;
626
sewardj39a4d842002-11-13 22:14:30 +0000627struct _Mutex {
sewardjdac0a442002-11-13 22:08:40 +0000628 Addr mutexp;
sewardj39a4d842002-11-13 22:14:30 +0000629 Mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000630
631 MutexState state; /* mutex state */
632 ThreadId tid; /* owner */
633 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000634
sewardj4bffb232002-11-13 21:46:34 +0000635 const LockSet *lockdep; /* set of locks we depend on */
sewardjc26cc252002-10-23 21:58:55 +0000636 UInt mark; /* mark for graph traversal */
637};
sewardj16748af2002-10-22 04:55:54 +0000638
sewardj39a4d842002-11-13 22:14:30 +0000639static inline Int mutex_cmp(const Mutex *a, const Mutex *b)
sewardj4bffb232002-11-13 21:46:34 +0000640{
sewardjdac0a442002-11-13 22:08:40 +0000641 return a->mutexp - b->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000642}
njn25e49d8e72002-09-23 09:36:25 +0000643
sewardj274c6012002-10-22 04:54:55 +0000644struct _LockSet {
sewardj05bcdcb2003-05-18 10:05:38 +0000645 Int setsize; /* number of members */
sewardj4bffb232002-11-13 21:46:34 +0000646 UInt hash; /* hash code */
647 LockSet *next; /* next in hash chain */
sewardj39a4d842002-11-13 22:14:30 +0000648 const Mutex *mutex[0]; /* locks */
sewardj274c6012002-10-22 04:54:55 +0000649};
sewardj4bffb232002-11-13 21:46:34 +0000650
651static const LockSet *emptyset;
njn25e49d8e72002-09-23 09:36:25 +0000652
653/* Each one is an index into the lockset table. */
sewardj4bffb232002-11-13 21:46:34 +0000654static const LockSet *thread_locks[VG_N_THREADS];
njn25e49d8e72002-09-23 09:36:25 +0000655
sewardjdac0a442002-11-13 22:08:40 +0000656#define LOCKSET_HASH_SZ 1021
njn25e49d8e72002-09-23 09:36:25 +0000657
sewardj4bffb232002-11-13 21:46:34 +0000658static LockSet *lockset_hash[LOCKSET_HASH_SZ];
njn25e49d8e72002-09-23 09:36:25 +0000659
sewardj4bffb232002-11-13 21:46:34 +0000660/* Pack and unpack a LockSet pointer into shadow_word.other */
sewardj8fac99a2002-11-13 22:31:26 +0000661static inline UInt packLockSet(const LockSet *p)
njn25e49d8e72002-09-23 09:36:25 +0000662{
sewardj4bffb232002-11-13 21:46:34 +0000663 UInt id;
664
sewardj8fac99a2002-11-13 22:31:26 +0000665 SK_ASSERT(((UInt)p & ((1 << STATE_BITS)-1)) == 0);
sewardj4bffb232002-11-13 21:46:34 +0000666 id = ((UInt)p) >> STATE_BITS;
667
668 return id;
njn25e49d8e72002-09-23 09:36:25 +0000669}
670
sewardj8fac99a2002-11-13 22:31:26 +0000671static inline const LockSet *unpackLockSet(UInt id)
njn25e49d8e72002-09-23 09:36:25 +0000672{
sewardj4bffb232002-11-13 21:46:34 +0000673 return (LockSet *)(id << STATE_BITS);
njn25e49d8e72002-09-23 09:36:25 +0000674}
675
njn25e49d8e72002-09-23 09:36:25 +0000676static
sewardj4bffb232002-11-13 21:46:34 +0000677void pp_LockSet(const LockSet* p)
njn25e49d8e72002-09-23 09:36:25 +0000678{
sewardj05bcdcb2003-05-18 10:05:38 +0000679 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000680 VG_(printf)("{ ");
sewardj4bffb232002-11-13 21:46:34 +0000681 for(i = 0; i < p->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000682 const Mutex *mx = p->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000683
684 VG_(printf)("%p%(y ", mx->mutexp, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000685 }
686 VG_(printf)("}\n");
687}
688
689
sewardj4bffb232002-11-13 21:46:34 +0000690static void print_LockSet(const Char *s, const LockSet *ls)
691{
692 VG_(printf)("%s: ", s);
693 pp_LockSet(ls);
694}
695
696/* Compute the hash of a LockSet */
697static inline UInt hash_LockSet_w_wo(const LockSet *ls,
sewardj39a4d842002-11-13 22:14:30 +0000698 const Mutex *with,
699 const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000700{
sewardj05bcdcb2003-05-18 10:05:38 +0000701 Int i;
sewardj8fac99a2002-11-13 22:31:26 +0000702 UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sewardj4bffb232002-11-13 21:46:34 +0000703
704 sk_assert(with == NULL || with != without);
705
706 for(i = 0; with != NULL || i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000707 const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000708
709 if (without && mutex_cmp(without, mx) == 0)
710 continue;
711
712 if (with && (mx == NULL || mutex_cmp(with, mx) < 0)) {
713 mx = with;
714 with = NULL;
715 i--;
716 }
717
sewardj8fac99a2002-11-13 22:31:26 +0000718 hash = ROTL(hash, 17);
sewardj4bffb232002-11-13 21:46:34 +0000719 hash ^= (UInt)mx->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000720 }
721
722 return hash % LOCKSET_HASH_SZ;
723}
724
sewardj39a4d842002-11-13 22:14:30 +0000725static inline UInt hash_LockSet_with(const LockSet *ls, const Mutex *with)
sewardj4bffb232002-11-13 21:46:34 +0000726{
727 UInt hash = hash_LockSet_w_wo(ls, with, NULL);
728
729 if (0)
730 VG_(printf)("hash_with %p+%p -> %d\n", ls, with->mutexp, hash);
731
732 return hash;
733}
734
sewardj39a4d842002-11-13 22:14:30 +0000735static inline UInt hash_LockSet_without(const LockSet *ls, const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000736{
737 UInt hash = hash_LockSet_w_wo(ls, NULL, without);
738
739 if (0)
740 VG_(printf)("hash_with %p-%p -> %d\n", ls, without->mutexp, hash);
741
742 return hash;
743}
744
745static inline UInt hash_LockSet(const LockSet *ls)
746{
747 UInt hash = hash_LockSet_w_wo(ls, NULL, NULL);
748
749 if (0)
750 VG_(printf)("hash %p -> %d\n", ls, hash);
751
752 return hash;
753}
754
755static
756Bool structural_eq_LockSet(const LockSet* a, const LockSet* b)
njn25e49d8e72002-09-23 09:36:25 +0000757{
758 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000759
sewardj4bffb232002-11-13 21:46:34 +0000760 if (a == b)
761 return True;
762 if (a->setsize != b->setsize)
763 return False;
njn25e49d8e72002-09-23 09:36:25 +0000764
sewardj4bffb232002-11-13 21:46:34 +0000765 for(i = 0; i < a->setsize; i++) {
766 if (mutex_cmp(a->mutex[i], b->mutex[i]) != 0)
njn25e49d8e72002-09-23 09:36:25 +0000767 return False;
njn25e49d8e72002-09-23 09:36:25 +0000768 }
769
sewardj4bffb232002-11-13 21:46:34 +0000770 return True;
njn25e49d8e72002-09-23 09:36:25 +0000771}
772
773
774/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
775 * doesn't do the insertion. Returns True if they match.
776 */
777static Bool
sewardj4bffb232002-11-13 21:46:34 +0000778weird_LockSet_equals(const LockSet* a, const LockSet* b,
sewardj39a4d842002-11-13 22:14:30 +0000779 const Mutex *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000780{
sewardjc26cc252002-10-23 21:58:55 +0000781 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +0000782 Int ia, ib;
sewardjc26cc252002-10-23 21:58:55 +0000783
njn25e49d8e72002-09-23 09:36:25 +0000784 /* Idea is to try and match each element of b against either an
785 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000786
787 if (debug) {
788 print_LockSet("weird_LockSet_equals a", a);
789 print_LockSet(" b", b);
790 VG_(printf)( " missing: %p%(y\n",
791 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000792 }
sewardjc26cc252002-10-23 21:58:55 +0000793
sewardj4bffb232002-11-13 21:46:34 +0000794 if ((a->setsize + 1) != b->setsize) {
795 if (debug)
796 VG_(printf)(" fastpath length mismatch -> 0\n");
797 return False;
798 }
799
sewardjc26cc252002-10-23 21:58:55 +0000800 /* There are three phases to this compare:
801 1 the section from the start of a up to missing_mutex
802 2 missing mutex itself
803 3 the section after missing_mutex to the end of a
804 */
805
sewardj4bffb232002-11-13 21:46:34 +0000806 ia = 0;
807 ib = 0;
808
sewardjc26cc252002-10-23 21:58:55 +0000809 /* 1: up to missing_mutex */
sewardj4bffb232002-11-13 21:46:34 +0000810 for(; ia < a->setsize && mutex_cmp(a->mutex[ia], missing_mutex) < 0; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000811 if (debug) {
812 print_LockSet(" 1:a", a);
813 print_LockSet(" 1:b", b);
814 }
sewardj4bffb232002-11-13 21:46:34 +0000815 if (ib == b->setsize || mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000816 return False;
sewardjc26cc252002-10-23 21:58:55 +0000817 }
818
819 /* 2: missing_mutex itself */
820 if (debug) {
821 VG_(printf)( " 2:missing: %p%(y\n",
822 missing_mutex->mutexp, missing_mutex->mutexp);
823 print_LockSet(" 2: b", b);
824 }
825
sewardj4bffb232002-11-13 21:46:34 +0000826 sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
sewardjc26cc252002-10-23 21:58:55 +0000827
sewardj4bffb232002-11-13 21:46:34 +0000828 if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000829 return False;
830
sewardj4bffb232002-11-13 21:46:34 +0000831 ib++;
sewardjc26cc252002-10-23 21:58:55 +0000832
833 /* 3: after missing_mutex to end */
834
sewardj4bffb232002-11-13 21:46:34 +0000835 for(; ia < a->setsize && ib < b->setsize; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000836 if (debug) {
837 print_LockSet(" 3:a", a);
838 print_LockSet(" 3:b", b);
839 }
sewardj4bffb232002-11-13 21:46:34 +0000840 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000841 return False;
sewardjc26cc252002-10-23 21:58:55 +0000842 }
843
844 if (debug)
sewardj4bffb232002-11-13 21:46:34 +0000845 VG_(printf)(" ia=%d ib=%d --> %d\n", ia, ib, ia == a->setsize && ib == b->setsize);
sewardjc26cc252002-10-23 21:58:55 +0000846
sewardj4bffb232002-11-13 21:46:34 +0000847 return ia == a->setsize && ib == b->setsize;
848}
849
850
851
852static const LockSet *lookup_LockSet(const LockSet *set)
853{
854 UInt bucket = set->hash;
855 LockSet *ret;
856
857 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
858 if (set == ret || structural_eq_LockSet(set, ret))
859 return ret;
860
861 return NULL;
862}
863
sewardj39a4d842002-11-13 22:14:30 +0000864static const LockSet *lookup_LockSet_with(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000865{
866 UInt bucket = hash_LockSet_with(set, mutex);
867 const LockSet *ret;
868
869 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
870 if (weird_LockSet_equals(set, ret, mutex))
871 return ret;
872
873 return NULL;
874}
875
sewardj39a4d842002-11-13 22:14:30 +0000876static const LockSet *lookup_LockSet_without(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000877{
878 UInt bucket = hash_LockSet_without(set, mutex);
879 const LockSet *ret;
880
881 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
882 if (weird_LockSet_equals(ret, set, mutex))
883 return ret;
884
885 return NULL;
886}
887
888static void insert_LockSet(LockSet *set)
889{
890 UInt hash = hash_LockSet(set);
891
892 set->hash = hash;
893
894 sk_assert(lookup_LockSet(set) == NULL);
895
896 set->next = lockset_hash[hash];
897 lockset_hash[hash] = set;
898}
899
900static inline
901LockSet *alloc_LockSet(UInt setsize)
902{
sewardj39a4d842002-11-13 22:14:30 +0000903 LockSet *ret = VG_(malloc)(sizeof(*ret) + sizeof(Mutex *) * setsize);
sewardj4bffb232002-11-13 21:46:34 +0000904 ret->setsize = setsize;
905 return ret;
906}
907
908static inline
909void free_LockSet(LockSet *p)
910{
911 /* assert: not present in hash */
912 VG_(free)(p);
913}
914
njnb4aee052003-04-15 14:09:58 +0000915static
sewardj4bffb232002-11-13 21:46:34 +0000916void pp_all_LockSets ( void )
917{
918 Int i;
919 Int sets, buckets;
920
921 sets = buckets = 0;
922 for (i = 0; i < LOCKSET_HASH_SZ; i++) {
923 const LockSet *ls = lockset_hash[i];
924 Bool first = True;
925
sewardj4bffb232002-11-13 21:46:34 +0000926 for(; ls != NULL; ls = ls->next) {
sewardjdac0a442002-11-13 22:08:40 +0000927 if (first) {
928 buckets++;
929 VG_(printf)("[%4d] = ", i);
930 } else
931 VG_(printf)(" ");
932
sewardj4bffb232002-11-13 21:46:34 +0000933 sets++;
934 first = False;
935 pp_LockSet(ls);
936 }
937 }
938
939 VG_(printf)("%d distinct LockSets in %d buckets\n", sets, buckets);
940}
941
942static inline Bool isempty(const LockSet *ls)
943{
944 return ls == NULL || ls->setsize == 0;
945}
946
sewardj39a4d842002-11-13 22:14:30 +0000947static Bool ismember(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +0000948{
949 Int i;
950
951 /* XXX use binary search */
952 for(i = 0; i < ls->setsize; i++)
953 if (mutex_cmp(mx, ls->mutex[i]) == 0)
954 return True;
955
956 return False;
957}
958
959/* Check invariants:
960 - all locksets are unique
961 - each set is an array in strictly increasing order of mutex addr
962*/
963static
964void sanity_check_locksets ( const Char* caller )
965{
966 Int i;
967 const Char *badness;
968 LockSet *ls;
969
970 for(i = 0; i < LOCKSET_HASH_SZ; i++) {
971
972 for(ls = lockset_hash[i]; ls != NULL; ls = ls->next) {
sewardj39a4d842002-11-13 22:14:30 +0000973 const Mutex *prev;
sewardj4bffb232002-11-13 21:46:34 +0000974 Int j;
975
976 if (hash_LockSet(ls) != ls->hash) {
977 badness = "mismatched hash";
978 goto bad;
979 }
sewardj05bcdcb2003-05-18 10:05:38 +0000980 if (ls->hash != (UInt)i) {
sewardj4bffb232002-11-13 21:46:34 +0000981 badness = "wrong bucket";
982 goto bad;
983 }
984 if (lookup_LockSet(ls) != ls) {
985 badness = "non-unique set";
986 goto bad;
987 }
988
989 prev = ls->mutex[0];
990 for(j = 1; j < ls->setsize; j++) {
991 if (mutex_cmp(prev, ls->mutex[j]) >= 0) {
992 badness = "mutexes out of order";
993 goto bad;
994 }
995 }
996 }
997 }
998 return;
999
1000 bad:
1001 VG_(printf)("sanity_check_locksets: "
1002 "i = %d, ls=%p badness = %s, caller = %s\n",
1003 i, ls, badness, caller);
1004 pp_all_LockSets();
1005 VG_(skin_panic)("sanity_check_locksets");
1006}
1007
1008static
sewardj39a4d842002-11-13 22:14:30 +00001009LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +00001010{
1011 static const Bool debug = False;
1012 LockSet *ret = NULL;
1013 Int i, j;
1014
1015 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1016 VG_(printf)("add-IN mutex %p%(y\n", mx->mutexp, mx->mutexp);
1017 print_LockSet("add-IN", ls);
1018 }
1019
1020 if (debug || LOCKSET_SANITY)
1021 sanity_check_locksets("add-IN");
1022
1023 sk_assert(!ismember(ls, mx));
1024
1025 ret = alloc_LockSet(ls->setsize+1);
1026
1027 for(i = j = 0; i < ls->setsize; i++) {
1028 if (debug)
1029 VG_(printf)("i=%d j=%d ls->mutex[i]=%p mx=%p\n",
1030 i, j, ls->mutex[i]->mutexp, mx ? mx->mutexp : 0);
1031 if (mx && mutex_cmp(mx, ls->mutex[i]) < 0) {
1032 ret->mutex[j++] = mx;
1033 mx = NULL;
1034 }
1035 ret->mutex[j++] = ls->mutex[i];
1036 }
1037
1038 /* not added in loop - must be after */
1039 if (mx)
1040 ret->mutex[j++] = mx;
1041
1042 sk_assert(j == ret->setsize);
1043
1044 if (debug || LOCKSET_SANITY) {
1045 print_LockSet("add-OUT", ret);
1046 sanity_check_locksets("add-OUT");
1047 }
1048 return ret;
1049}
1050
1051/* Builds ls with mx removed. mx should actually be in ls!
1052 (a checked assertion). Resulting set should not already
1053 exist in the table (unchecked).
1054*/
1055static
sewardj39a4d842002-11-13 22:14:30 +00001056LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
sewardj4bffb232002-11-13 21:46:34 +00001057{
1058 static const Bool debug = False;
1059 LockSet *ret = NULL;
1060 Int i, j;
1061
1062 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1063 print_LockSet("remove-IN", ls);
1064 }
1065
1066 if (debug || LOCKSET_SANITY)
1067 sanity_check_locksets("remove-IN");
1068
1069 sk_assert(ismember(ls, mx));
1070
1071 ret = alloc_LockSet(ls->setsize-1);
1072
1073 for(i = j = 0; i < ls->setsize; i++) {
1074 if (mutex_cmp(ls->mutex[i], mx) == 0)
1075 continue;
1076 ret->mutex[j++] = ls->mutex[i];
1077 }
1078
1079 sk_assert(j == ret->setsize);
1080
1081 if (debug || LOCKSET_SANITY) {
1082 print_LockSet("remove-OUT", ret);
1083 sanity_check_locksets("remove-OUT");
1084 }
1085 return ret;
njn25e49d8e72002-09-23 09:36:25 +00001086}
1087
1088
1089/* Builds the intersection, and then unbuilds it if it's already in the table.
1090 */
sewardj4bffb232002-11-13 21:46:34 +00001091static const LockSet *_intersect(const LockSet *a, const LockSet *b)
njn25e49d8e72002-09-23 09:36:25 +00001092{
sewardj4bffb232002-11-13 21:46:34 +00001093 static const Bool debug = False;
1094 Int iret;
1095 Int ia, ib;
1096 Int size;
1097 LockSet *ret;
1098 const LockSet *found;
njn25e49d8e72002-09-23 09:36:25 +00001099
sewardj4bffb232002-11-13 21:46:34 +00001100 if (debug || LOCKSET_SANITY)
1101 sanity_check_locksets("intersect-IN");
njn25e49d8e72002-09-23 09:36:25 +00001102
sewardj4bffb232002-11-13 21:46:34 +00001103 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1104 print_LockSet("intersect a", a);
1105 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +00001106 }
1107
sewardj4bffb232002-11-13 21:46:34 +00001108 /* count the size of the new set */
1109 size = 0;
1110 ia = ib = 0;
1111 for(size = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1112 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1113 size++;
1114 ia++;
1115 ib++;
1116 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1117 ia++;
1118 } else {
1119 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1120 ib++;
1121 }
njn25e49d8e72002-09-23 09:36:25 +00001122 }
1123
sewardj4bffb232002-11-13 21:46:34 +00001124 /* Build the intersection of the two sets */
1125 ret = alloc_LockSet(size);
1126 for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1127 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1128 sk_assert(iret < ret->setsize);
1129 ret->mutex[iret++] = a->mutex[ia];
1130 ia++;
1131 ib++;
1132 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1133 ia++;
1134 } else {
1135 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1136 ib++;
1137 }
1138 }
1139
1140 ret->hash = hash_LockSet(ret);
1141
njn25e49d8e72002-09-23 09:36:25 +00001142 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001143 found = lookup_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001144
sewardj4bffb232002-11-13 21:46:34 +00001145 if (found != NULL) {
1146 free_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001147 } else {
sewardj4bffb232002-11-13 21:46:34 +00001148 insert_LockSet(ret);
1149 found = ret;
njn25e49d8e72002-09-23 09:36:25 +00001150 }
1151
sewardj4bffb232002-11-13 21:46:34 +00001152 if (debug || LOCKSET_SANITY) {
1153 print_LockSet("intersect-OUT", found);
1154 sanity_check_locksets("intersect-OUT");
1155 }
njn25e49d8e72002-09-23 09:36:25 +00001156
sewardj4bffb232002-11-13 21:46:34 +00001157 return found;
njn25e49d8e72002-09-23 09:36:25 +00001158}
1159
sewardj4bffb232002-11-13 21:46:34 +00001160/* inline the fastpath */
1161static inline const LockSet *intersect(const LockSet *a, const LockSet *b)
sewardjc26cc252002-10-23 21:58:55 +00001162{
sewardj4bffb232002-11-13 21:46:34 +00001163 static const Bool debug = False;
sewardjc26cc252002-10-23 21:58:55 +00001164
1165 /* Fast case -- when the two are the same */
sewardj4bffb232002-11-13 21:46:34 +00001166 if (a == b) {
1167 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1168 print_LockSet("intersect-same fastpath", a);
sewardjc26cc252002-10-23 21:58:55 +00001169 }
sewardj4bffb232002-11-13 21:46:34 +00001170 return a;
sewardjc26cc252002-10-23 21:58:55 +00001171 }
1172
sewardj4bffb232002-11-13 21:46:34 +00001173 if (isempty(a) || isempty(b)) {
1174 if (debug)
1175 VG_(printf)("intersect empty fastpath\n");
1176 return emptyset;
1177 }
1178
1179 return _intersect(a, b);
1180}
1181
1182
1183static const LockSet *ls_union(const LockSet *a, const LockSet *b)
1184{
1185 static const Bool debug = False;
1186 Int iret;
1187 Int ia, ib;
1188 Int size;
1189 LockSet *ret;
1190 const LockSet *found;
1191
1192 if (debug || LOCKSET_SANITY)
1193 sanity_check_locksets("union-IN");
1194
1195 /* Fast case -- when the two are the same */
1196 if (a == b) {
1197 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1198 print_LockSet("union-same fastpath", a);
1199 }
1200 return a;
1201 }
1202
1203 if (isempty(a)) {
1204 if (debug)
1205 print_LockSet("union a=empty b", b);
1206 return b;
1207 }
1208 if (isempty(b)) {
1209 if (debug)
1210 print_LockSet("union b=empty a", a);
1211 return a;
1212 }
1213
1214 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
sewardjc26cc252002-10-23 21:58:55 +00001215 print_LockSet("union a", a);
1216 print_LockSet("union b", b);
1217 }
1218
sewardj4bffb232002-11-13 21:46:34 +00001219 /* count the size of the new set */
1220 for(size = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1221 Int cmp;
sewardjc26cc252002-10-23 21:58:55 +00001222
sewardj4bffb232002-11-13 21:46:34 +00001223 if ((ia < a->setsize) && (ib < b->setsize))
1224 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1225 else if (ia == a->setsize)
1226 cmp = 1;
1227 else
1228 cmp = -1;
1229
1230 if (cmp == 0) {
1231 size++;
1232 ia++;
1233 ib++;
1234 } else if (cmp < 0) {
1235 size++;
1236 ia++;
1237 } else {
1238 sk_assert(cmp > 0);
1239 size++;
1240 ib++;
1241 }
sewardjc26cc252002-10-23 21:58:55 +00001242 }
1243
sewardj4bffb232002-11-13 21:46:34 +00001244 /* Build the intersection of the two sets */
1245 ret = alloc_LockSet(size);
1246 for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1247 Int cmp;
1248 sk_assert(iret < ret->setsize);
1249
1250 if ((ia < a->setsize) && (ib < b->setsize))
1251 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1252 else if (ia == a->setsize)
1253 cmp = 1;
1254 else
1255 cmp = -1;
1256
1257 if (cmp == 0) {
1258 ret->mutex[iret++] = a->mutex[ia];
1259 ia++;
1260 ib++;
1261 } else if (cmp < 0) {
1262 ret->mutex[iret++] = a->mutex[ia];
1263 ia++;
1264 } else {
1265 sk_assert(cmp > 0);
1266 ret->mutex[iret++] = b->mutex[ib];
1267 ib++;
1268 }
1269 }
1270
1271 sk_assert(iret == ret->setsize);
1272
1273 ret->hash = hash_LockSet(ret);
1274
sewardjc26cc252002-10-23 21:58:55 +00001275 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001276 found = lookup_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001277
sewardj4bffb232002-11-13 21:46:34 +00001278 if (found != NULL) {
1279 if (debug)
1280 print_LockSet("union found existing set", found);
1281 free_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001282 } else {
sewardj4bffb232002-11-13 21:46:34 +00001283 if (debug)
1284 print_LockSet("union inserting new set", ret);
1285 insert_LockSet(ret);
1286 found = ret;
sewardjc26cc252002-10-23 21:58:55 +00001287 }
1288
sewardj4bffb232002-11-13 21:46:34 +00001289 if (debug || LOCKSET_SANITY) {
1290 print_LockSet("union-OUT", found);
sewardjc26cc252002-10-23 21:58:55 +00001291 sanity_check_locksets("union-OUT");
sewardj4bffb232002-11-13 21:46:34 +00001292 }
sewardjc26cc252002-10-23 21:58:55 +00001293
sewardj4bffb232002-11-13 21:46:34 +00001294 return found;
sewardjc26cc252002-10-23 21:58:55 +00001295}
1296
1297/*------------------------------------------------------------*/
sewardjdac0a442002-11-13 22:08:40 +00001298/*--- Implementation of mutex structure. ---*/
1299/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +00001300
1301static UInt graph_mark; /* current mark we're using for graph traversal */
1302
sewardj39a4d842002-11-13 22:14:30 +00001303static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardjc26cc252002-10-23 21:58:55 +00001304 Char *str, ExeContext *ec);
sewardj39a4d842002-11-13 22:14:30 +00001305static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00001306 const LockSet *lockset_holding,
1307 const LockSet *lockset_prev);
sewardjc26cc252002-10-23 21:58:55 +00001308
njn72718642003-07-24 08:45:32 +00001309static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid);
sewardjdac0a442002-11-13 22:08:40 +00001310
1311#define M_MUTEX_HASHSZ 1021
1312
sewardj39a4d842002-11-13 22:14:30 +00001313static Mutex *mutex_hash[M_MUTEX_HASHSZ];
sewardjdac0a442002-11-13 22:08:40 +00001314static UInt total_mutexes;
1315
1316static const Char *pp_MutexState(MutexState st)
1317{
1318 switch(st) {
1319 case MxLocked: return "Locked";
1320 case MxUnlocked: return "Unlocked";
1321 case MxDead: return "Dead";
1322 case MxUnknown: return "Unknown";
1323 }
1324 return "???";
1325}
1326
1327static void pp_all_mutexes()
1328{
1329 Int i;
1330 Int locks, buckets;
1331
1332 locks = buckets = 0;
1333 for(i = 0; i < M_MUTEX_HASHSZ; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001334 Mutex *mx;
sewardjdac0a442002-11-13 22:08:40 +00001335 Bool first = True;
1336
1337 for(mx = mutex_hash[i]; mx != NULL; mx = mx->next) {
1338 if (first) {
1339 buckets++;
1340 VG_(printf)("[%4d] = ", i);
1341 } else
1342 VG_(printf)(" ");
1343 locks++;
1344 first = False;
1345 VG_(printf)("%p [%8s] -> %p%(y\n",
1346 mx, pp_MutexState(mx->state), mx->mutexp, mx->mutexp);
1347 }
1348 }
1349
1350 VG_(printf)("%d locks in %d buckets (%d allocated)\n",
1351 locks, buckets, total_mutexes);
1352}
sewardjc26cc252002-10-23 21:58:55 +00001353
sewardj39a4d842002-11-13 22:14:30 +00001354/* find or create a Mutex for a program's mutex use */
1355static Mutex *get_mutex(Addr mutexp)
sewardjc26cc252002-10-23 21:58:55 +00001356{
1357 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
sewardj39a4d842002-11-13 22:14:30 +00001358 Mutex *mp;
sewardjc26cc252002-10-23 21:58:55 +00001359
1360 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
1361 if (mp->mutexp == mutexp)
1362 return mp;
1363
sewardjdac0a442002-11-13 22:08:40 +00001364 total_mutexes++;
1365
sewardjc26cc252002-10-23 21:58:55 +00001366 mp = VG_(malloc)(sizeof(*mp));
1367 mp->mutexp = mutexp;
1368 mp->next = mutex_hash[bucket];
1369 mutex_hash[bucket] = mp;
1370
1371 mp->state = MxUnknown;
1372 mp->tid = VG_INVALID_THREADID;
1373 mp->location = NULL;
1374
sewardj4bffb232002-11-13 21:46:34 +00001375 mp->lockdep = emptyset;
sewardjc26cc252002-10-23 21:58:55 +00001376 mp->mark = graph_mark - 1;
1377
1378 return mp;
1379}
1380
sewardjdac0a442002-11-13 22:08:40 +00001381/* Find all mutexes in a range of memory, and call the callback.
1382 Remove the mutex from the hash if the callback returns True (mutex
1383 structure itself is not freed, because it may be pointed to by a
1384 LockSet. */
sewardj39a4d842002-11-13 22:14:30 +00001385static void find_mutex_range(Addr start, Addr end, Bool (*action)(Mutex *))
sewardjc26cc252002-10-23 21:58:55 +00001386{
sewardjdac0a442002-11-13 22:08:40 +00001387 UInt first = start % M_MUTEX_HASHSZ;
1388 UInt last = (end+1) % M_MUTEX_HASHSZ;
1389 UInt i;
1390
1391 /* Single pass over the hash table, looking for likely hashes */
1392 for(i = first; i != last; ) {
sewardj39a4d842002-11-13 22:14:30 +00001393 Mutex *mx;
1394 Mutex **prev = &mutex_hash[i];
sewardjdac0a442002-11-13 22:08:40 +00001395
1396 for(mx = mutex_hash[i]; mx != NULL; prev = &mx->next, mx = mx->next) {
1397 if (mx->mutexp >= start && mx->mutexp < end && (*action)(mx))
1398 *prev = mx->next;
1399 }
1400
1401 if (++i == M_MUTEX_HASHSZ)
1402 i = 0;
sewardjc26cc252002-10-23 21:58:55 +00001403 }
sewardjc26cc252002-10-23 21:58:55 +00001404}
1405
1406#define MARK_LOOP (graph_mark+0)
1407#define MARK_DONE (graph_mark+1)
1408
sewardj39a4d842002-11-13 22:14:30 +00001409static Bool check_cycle(const Mutex *start, const LockSet* lockset)
sewardjc26cc252002-10-23 21:58:55 +00001410{
sewardj39a4d842002-11-13 22:14:30 +00001411 Bool check_cycle_inner(const Mutex *mutex, const LockSet *ls)
sewardjff2c9232002-11-13 21:44:39 +00001412 {
1413 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +00001414 Int i;
sewardjff2c9232002-11-13 21:44:39 +00001415
1416 if (mutex->mark == MARK_LOOP)
1417 return True; /* found cycle */
1418 if (mutex->mark == MARK_DONE)
1419 return False; /* been here before, its OK */
1420
sewardj39a4d842002-11-13 22:14:30 +00001421 ((Mutex*)mutex)->mark = MARK_LOOP;
sewardjff2c9232002-11-13 21:44:39 +00001422
1423 if (debug)
1424 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
1425 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
sewardj4bffb232002-11-13 21:46:34 +00001426 for(i = 0; i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001427 const Mutex *mx = ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +00001428
sewardjff2c9232002-11-13 21:44:39 +00001429 if (debug)
1430 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
1431 mutex->mutexp, ls,
sewardj4bffb232002-11-13 21:46:34 +00001432 mx->mutexp, mx->mutexp);
1433 if (check_cycle_inner(mx, mx->lockdep))
sewardjff2c9232002-11-13 21:44:39 +00001434 return True;
1435 }
sewardj39a4d842002-11-13 22:14:30 +00001436 ((Mutex*)mutex)->mark = MARK_DONE;
sewardjff2c9232002-11-13 21:44:39 +00001437
1438 return False;
1439 }
1440
sewardjc26cc252002-10-23 21:58:55 +00001441 graph_mark += 2; /* clear all marks */
1442
sewardj4bffb232002-11-13 21:46:34 +00001443 return check_cycle_inner(start, lockset);
sewardjc26cc252002-10-23 21:58:55 +00001444}
1445
sewardjdca84112002-11-13 22:29:34 +00001446/* test to see if a mutex state change would be problematic; this
1447 makes no changes to the mutex state. This should be called before
1448 the locking thread has actually blocked. */
njn72718642003-07-24 08:45:32 +00001449static void test_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjc26cc252002-10-23 21:58:55 +00001450{
1451 static const Bool debug = False;
1452
sewardjc26cc252002-10-23 21:58:55 +00001453 if (mutex->state == MxDead) {
sewardjdac0a442002-11-13 22:08:40 +00001454 Char *str;
1455
1456 switch(state) {
1457 case MxLocked: str = "lock dead mutex"; break;
1458 case MxUnlocked: str = "unlock dead mutex"; break;
1459 default: str = "operate on dead mutex"; break;
1460 }
1461
sewardjc26cc252002-10-23 21:58:55 +00001462 /* can't do anything legal to a destroyed mutex */
sewardjdac0a442002-11-13 22:08:40 +00001463 record_mutex_error(tid, mutex, str, mutex->location);
sewardjc26cc252002-10-23 21:58:55 +00001464 return;
1465 }
1466
1467 switch(state) {
1468 case MxLocked:
sewardjdca84112002-11-13 22:29:34 +00001469 sk_assert(!check_cycle(mutex, mutex->lockdep));
1470
1471 if (debug)
1472 print_LockSet("thread holding", thread_locks[tid]);
1473
1474 if (check_cycle(mutex, thread_locks[tid]))
1475 record_lockgraph_error(tid, mutex, thread_locks[tid], mutex->lockdep);
1476 else {
1477 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
1478
1479 if (debug) {
1480 VG_(printf)("giving mutex %p%(y lockdep = %p ",
1481 mutex->mutexp, mutex->mutexp, mutex->lockdep);
1482 print_LockSet("lockdep", mutex->lockdep);
1483 }
1484 }
1485 break;
1486
1487 case MxUnlocked:
1488 if (debug)
1489 print_LockSet("thread holding", thread_locks[tid]);
1490
1491 if (mutex->state != MxLocked) {
1492 record_mutex_error(tid, mutex,
1493 "unlock non-locked mutex", mutex->location);
1494 }
1495 if (mutex->tid != tid) {
1496 record_mutex_error(tid, mutex,
1497 "unlock someone else's mutex", mutex->location);
1498 }
1499 break;
1500
1501 case MxDead:
1502 break;
1503
1504 default:
1505 break;
1506 }
1507}
1508
1509/* Update a mutex state. Expects most error testing and reporting to
1510 have happened in test_mutex_state(). The assumption is that no
1511 client code is run by thread tid between test and set, either
1512 because it is blocked or test and set are called together
1513 atomically.
1514
1515 Setting state to MxDead is the exception, since that can happen as
1516 a result of any thread freeing memory; in this case set_mutex_state
1517 does all the error reporting as well.
1518*/
njn72718642003-07-24 08:45:32 +00001519static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjdca84112002-11-13 22:29:34 +00001520{
1521 static const Bool debug = False;
1522
1523 if (debug)
1524 VG_(printf)("\ntid %d changing mutex (%p)->%p%(y state %s -> %s\n",
1525 tid, mutex, mutex->mutexp, mutex->mutexp,
1526 pp_MutexState(mutex->state), pp_MutexState(state));
1527
1528 if (mutex->state == MxDead) {
1529 /* can't do anything legal to a destroyed mutex */
1530 return;
1531 }
1532
1533 switch(state) {
1534 case MxLocked:
sewardj4bffb232002-11-13 21:46:34 +00001535 if (mutex->state == MxLocked) {
1536 if (mutex->tid != tid)
1537 record_mutex_error(tid, mutex, "take lock held by someone else",
1538 mutex->location);
1539 else
1540 record_mutex_error(tid, mutex, "take lock we already hold",
1541 mutex->location);
1542
1543 VG_(skin_panic)("core should have checked this\n");
1544 break;
1545 }
sewardjc26cc252002-10-23 21:58:55 +00001546
1547 sk_assert(!check_cycle(mutex, mutex->lockdep));
1548
sewardjc26cc252002-10-23 21:58:55 +00001549 mutex->tid = tid;
1550 break;
1551
1552 case MxUnlocked:
1553 if (debug)
sewardj4bffb232002-11-13 21:46:34 +00001554 print_LockSet("thread holding", thread_locks[tid]);
sewardjc26cc252002-10-23 21:58:55 +00001555
sewardjdca84112002-11-13 22:29:34 +00001556 if (mutex->state != MxLocked || mutex->tid != tid)
1557 break;
1558
sewardjc26cc252002-10-23 21:58:55 +00001559 mutex->tid = VG_INVALID_THREADID;
1560 break;
1561
sewardjdac0a442002-11-13 22:08:40 +00001562 case MxDead:
1563 if (mutex->state == MxLocked) {
1564 /* forcably remove offending lock from thread's lockset */
1565 sk_assert(ismember(thread_locks[mutex->tid], mutex));
1566 thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
1567 mutex->tid = VG_INVALID_THREADID;
1568
1569 record_mutex_error(tid, mutex,
1570 "free locked mutex", mutex->location);
1571 }
1572 break;
1573
sewardjc26cc252002-10-23 21:58:55 +00001574 default:
1575 break;
1576 }
1577
njn72718642003-07-24 08:45:32 +00001578 mutex->location = VG_(get_ExeContext)(tid);
sewardjc26cc252002-10-23 21:58:55 +00001579 mutex->state = state;
1580}
njn25e49d8e72002-09-23 09:36:25 +00001581
1582/*------------------------------------------------------------*/
1583/*--- Setting and checking permissions. ---*/
1584/*------------------------------------------------------------*/
1585
1586static
1587void set_address_range_state ( Addr a, UInt len /* in bytes */,
1588 VgeInitStatus status )
1589{
sewardj1806d7f2002-10-22 05:05:49 +00001590 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00001591
sewardjdac0a442002-11-13 22:08:40 +00001592 /* only clean up dead mutexes */
sewardj39a4d842002-11-13 22:14:30 +00001593 Bool cleanmx(Mutex *mx) {
sewardjdac0a442002-11-13 22:08:40 +00001594 return mx->state == MxDead;
1595 }
1596
1597
njn25e49d8e72002-09-23 09:36:25 +00001598# if DEBUG_MAKE_ACCESSES
1599 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
1600# endif
1601 //PROF_EVENT(30); PPP
1602
1603 if (len == 0)
1604 return;
1605
1606 if (len > 100 * 1000 * 1000)
1607 VG_(message)(Vg_UserMsg,
1608 "Warning: set address range state: large range %d",
1609 len);
1610
1611 VGP_PUSHCC(VgpSARP);
1612
sewardjdac0a442002-11-13 22:08:40 +00001613 /* Remove mutexes in recycled memory range from hash */
1614 find_mutex_range(a, a+len, cleanmx);
1615
njn25e49d8e72002-09-23 09:36:25 +00001616 /* Memory block may not be aligned or a whole word multiple. In neat cases,
1617 * we have to init len/4 words (len is in bytes). In nasty cases, it's
1618 * len/4+1 words. This works out which it is by aligning the block and
1619 * seeing if the end byte is in the same word as it is for the unaligned
1620 * block; if not, it's the awkward case. */
sewardj8fac99a2002-11-13 22:31:26 +00001621 end = ROUNDUP(a + len, 4);
1622 a = ROUNDDN(a, 4);
njn25e49d8e72002-09-23 09:36:25 +00001623
1624 /* Do it ... */
1625 switch (status) {
1626 case Vge_VirginInit:
1627 for ( ; a < end; a += 4) {
1628 //PROF_EVENT(31); PPP
1629 init_virgin_sword(a);
1630 }
1631 break;
1632
1633 case Vge_NonVirginInit:
1634 for ( ; a < end; a += 4) {
1635 //PROF_EVENT(31); PPP
1636 init_nonvirgin_sword(a);
1637 }
1638 break;
1639
1640 case Vge_SegmentInit:
1641 for ( ; a < end; a += 4) {
1642 //PROF_EVENT(31); PPP
1643 init_magically_inited_sword(a);
1644 }
1645 break;
sewardj7f3ad222002-11-13 22:11:53 +00001646
1647 case Vge_Error:
1648 for ( ; a < end; a += 4) {
1649 //PROF_EVENT(31); PPP
1650 init_error_sword(a);
1651 }
1652 break;
njn25e49d8e72002-09-23 09:36:25 +00001653
1654 default:
1655 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001656 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001657 }
1658
1659 /* Check that zero page and highest page have not been written to
1660 -- this could happen with buggy syscall wrappers. Today
1661 (2001-04-26) had precisely such a problem with
1662 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001663 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001664 VGP_POPCC(VgpSARP);
1665}
1666
1667
1668static void make_segment_readable ( Addr a, UInt len )
1669{
1670 //PROF_EVENT(??); PPP
1671 set_address_range_state ( a, len, Vge_SegmentInit );
1672}
1673
1674static void make_writable ( Addr a, UInt len )
1675{
1676 //PROF_EVENT(36); PPP
1677 set_address_range_state( a, len, Vge_VirginInit );
1678}
1679
1680static void make_readable ( Addr a, UInt len )
1681{
1682 //PROF_EVENT(37); PPP
sewardj499e3de2002-11-13 22:22:25 +00001683 set_address_range_state( a, len, Vge_VirginInit );
njn25e49d8e72002-09-23 09:36:25 +00001684}
1685
1686
njn25e49d8e72002-09-23 09:36:25 +00001687/* Block-copy states (needed for implementing realloc()). */
1688static void copy_address_range_state(Addr src, Addr dst, UInt len)
1689{
1690 UInt i;
1691
1692 //PROF_EVENT(40); PPP
1693 for (i = 0; i < len; i += 4) {
1694 shadow_word sword = *(get_sword_addr ( src+i ));
1695 //PROF_EVENT(41); PPP
1696 set_sword ( dst+i, sword );
1697 }
1698}
1699
1700// SSS: put these somewhere better
njn72718642003-07-24 08:45:32 +00001701static void eraser_mem_read (Addr a, UInt data_size, ThreadId tid);
1702static void eraser_mem_write(Addr a, UInt data_size, ThreadId tid);
sewardja5b3aec2002-10-22 05:09:36 +00001703
1704#define REGPARM(x) __attribute__((regparm (x)))
1705
1706static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1707static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1708static void eraser_mem_help_read_4(Addr a) REGPARM(1);
1709static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
1710
1711static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1712static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1713static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
1714static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001715
sewardj7a5ebcf2002-11-13 22:42:13 +00001716static void bus_lock(void);
1717static void bus_unlock(void);
1718
njn25e49d8e72002-09-23 09:36:25 +00001719static
njn72718642003-07-24 08:45:32 +00001720void eraser_pre_mem_read(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001721 Char* s, UInt base, UInt size )
1722{
njn72718642003-07-24 08:45:32 +00001723 if (tid > 50) { VG_(printf)("pid = %d, s = `%s`, part = %d\n", tid, s, part); VG_(skin_panic)("a");}
1724 eraser_mem_read(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001725}
1726
1727static
njn72718642003-07-24 08:45:32 +00001728void eraser_pre_mem_read_asciiz(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001729 Char* s, UInt base )
1730{
njn72718642003-07-24 08:45:32 +00001731 eraser_mem_read(base, VG_(strlen)((Char*)base), tid);
njn25e49d8e72002-09-23 09:36:25 +00001732}
1733
1734static
njn72718642003-07-24 08:45:32 +00001735void eraser_pre_mem_write(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001736 Char* s, UInt base, UInt size )
1737{
njn72718642003-07-24 08:45:32 +00001738 eraser_mem_write(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001739}
1740
1741
1742
1743static
1744void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1745{
njn1f3a9092002-10-04 09:22:30 +00001746 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001747 make_segment_readable(a, len);
1748}
1749
1750
1751static
1752void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1753{
1754 if (is_inited) {
1755 make_readable(a, len);
1756 } else {
1757 make_writable(a, len);
1758 }
1759}
1760
1761static
1762void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001763 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001764{
1765 if (rr) make_readable(a, len);
1766 else if (ww) make_writable(a, len);
1767 /* else do nothing */
1768}
1769
sewardjf6374322002-11-13 22:35:55 +00001770static
1771void eraser_new_mem_stack_private(Addr a, UInt len)
1772{
1773 set_address_range_state(a, len, Vge_NonVirginInit);
1774}
1775
1776static
1777void eraser_new_mem_stack(Addr a, UInt len)
1778{
1779 set_address_range_state(a, len, Vge_VirginInit);
1780}
njn25e49d8e72002-09-23 09:36:25 +00001781
1782/*--------------------------------------------------------------*/
1783/*--- Initialise the memory audit system on program startup. ---*/
1784/*--------------------------------------------------------------*/
1785
1786static
1787void init_shadow_memory(void)
1788{
1789 Int i;
1790
1791 for (i = 0; i < ESEC_MAP_WORDS; i++)
1792 distinguished_secondary_map.swords[i] = virgin_sword;
1793
1794 /* These entries gradually get overwritten as the used address
1795 space expands. */
1796 for (i = 0; i < 65536; i++)
1797 primary_map[i] = &distinguished_secondary_map;
1798}
1799
1800
njn3e884182003-04-15 13:03:23 +00001801/*------------------------------------------------------------*/
1802/*--- malloc() et al replacements ---*/
1803/*------------------------------------------------------------*/
1804
njnb4aee052003-04-15 14:09:58 +00001805static VgHashTable hg_malloc_list = NULL;
njn3e884182003-04-15 13:03:23 +00001806
1807#define N_FREED_CHUNKS 2
1808static Int freechunkptr = 0;
1809static HG_Chunk *freechunks[N_FREED_CHUNKS];
1810
1811/* Use a small redzone (paranoia) */
1812UInt VG_(vg_malloc_redzone_szB) = 4;
1813
1814
1815/* Allocate a user-chunk of size bytes. Also allocate its shadow
1816 block, make the shadow block point at the user block. Put the
1817 shadow chunk on the appropriate list, and set all memory
1818 protections correctly. */
1819
njn72718642003-07-24 08:45:32 +00001820static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
njn3e884182003-04-15 13:03:23 +00001821{
1822 HG_Chunk* hc;
1823
1824 hc = VG_(malloc)(sizeof(HG_Chunk));
1825 hc->data = p;
1826 hc->size = size;
njn72718642003-07-24 08:45:32 +00001827 hc->where = VG_(get_ExeContext)(tid);
1828 hc->tid = tid;
njn3e884182003-04-15 13:03:23 +00001829
1830 VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
1831}
1832
1833/* Allocate memory and note change in memory available */
1834static __inline__
njn72718642003-07-24 08:45:32 +00001835void* alloc_and_new_mem ( UInt size, UInt alignment, Bool is_zeroed )
njn3e884182003-04-15 13:03:23 +00001836{
1837 Addr p;
1838
1839 p = (Addr)VG_(cli_malloc)(alignment, size);
njn72718642003-07-24 08:45:32 +00001840 add_HG_Chunk ( VG_(get_current_or_recent_tid)(), p, size );
njn3e884182003-04-15 13:03:23 +00001841 eraser_new_mem_heap( p, size, is_zeroed );
1842
1843 return (void*)p;
1844}
1845
njn72718642003-07-24 08:45:32 +00001846void* SK_(malloc) ( Int n )
njn3e884182003-04-15 13:03:23 +00001847{
njn72718642003-07-24 08:45:32 +00001848 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001849}
1850
njn72718642003-07-24 08:45:32 +00001851void* SK_(__builtin_new) ( Int n )
njn3e884182003-04-15 13:03:23 +00001852{
njn72718642003-07-24 08:45:32 +00001853 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001854}
1855
njn72718642003-07-24 08:45:32 +00001856void* SK_(__builtin_vec_new) ( Int n )
njn3e884182003-04-15 13:03:23 +00001857{
njn72718642003-07-24 08:45:32 +00001858 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001859}
1860
njn72718642003-07-24 08:45:32 +00001861void* SK_(memalign) ( Int align, Int n )
njn3e884182003-04-15 13:03:23 +00001862{
njn72718642003-07-24 08:45:32 +00001863 return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001864}
1865
njn72718642003-07-24 08:45:32 +00001866void* SK_(calloc) ( Int nmemb, Int size1 )
njn3e884182003-04-15 13:03:23 +00001867{
1868 void* p;
1869 Int size, i;
1870
1871 size = nmemb * size1;
1872
njn72718642003-07-24 08:45:32 +00001873 p = alloc_and_new_mem ( size, VG_(clo_alignment), /*is_zeroed*/True );
njn3e884182003-04-15 13:03:23 +00001874 for (i = 0; i < size; i++) /* calloc() is zeroed */
1875 ((UChar*)p)[i] = 0;
1876 return p;
1877}
1878
1879static
njn72718642003-07-24 08:45:32 +00001880void die_and_free_mem ( ThreadId tid, HG_Chunk* hc,
njn3e884182003-04-15 13:03:23 +00001881 HG_Chunk** prev_chunks_next_ptr )
1882{
njn72718642003-07-24 08:45:32 +00001883 Addr start = hc->data;
1884 Addr end = start + hc->size;
njn3e884182003-04-15 13:03:23 +00001885
1886 Bool deadmx(Mutex *mx) {
1887 if (mx->state != MxDead)
njn72718642003-07-24 08:45:32 +00001888 set_mutex_state(mx, MxDead, tid);
njn3e884182003-04-15 13:03:23 +00001889
1890 return False;
1891 }
1892
1893 /* Remove hc from the malloclist using prev_chunks_next_ptr to
1894 avoid repeating the hash table lookup. Can't remove until at least
1895 after free and free_mismatch errors are done because they use
1896 describe_addr() which looks for it in malloclist. */
1897 *prev_chunks_next_ptr = hc->next;
1898
1899 /* Record where freed */
njn72718642003-07-24 08:45:32 +00001900 hc->where = VG_(get_ExeContext) ( tid );
njn3e884182003-04-15 13:03:23 +00001901
1902 /* maintain a small window so that the error reporting machinery
1903 knows about this memory */
1904 if (freechunks[freechunkptr] != NULL) {
1905 /* free HG_Chunk */
1906 HG_Chunk* sc1 = freechunks[freechunkptr];
1907 VG_(cli_free) ( (void*)(sc1->data) );
1908 VG_(free) ( sc1 );
1909 }
1910
1911 freechunks[freechunkptr] = hc;
1912
1913 if (++freechunkptr == N_FREED_CHUNKS)
1914 freechunkptr = 0;
1915
1916 /* mark all mutexes in range dead */
1917 find_mutex_range(start, end, deadmx);
1918}
1919
1920
1921static __inline__
njn72718642003-07-24 08:45:32 +00001922void handle_free ( void* p )
njn3e884182003-04-15 13:03:23 +00001923{
1924 HG_Chunk* hc;
1925 HG_Chunk** prev_chunks_next_ptr;
1926
1927 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1928 (VgHashNode***)&prev_chunks_next_ptr );
1929 if (hc == NULL) {
1930 return;
1931 }
njn72718642003-07-24 08:45:32 +00001932 die_and_free_mem ( VG_(get_current_or_recent_tid)(),
1933 hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001934}
1935
njn72718642003-07-24 08:45:32 +00001936void SK_(free) ( void* p )
njn3e884182003-04-15 13:03:23 +00001937{
njn72718642003-07-24 08:45:32 +00001938 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001939}
1940
njn72718642003-07-24 08:45:32 +00001941void SK_(__builtin_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001942{
njn72718642003-07-24 08:45:32 +00001943 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001944}
1945
njn72718642003-07-24 08:45:32 +00001946void SK_(__builtin_vec_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001947{
njn72718642003-07-24 08:45:32 +00001948 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001949}
1950
njn72718642003-07-24 08:45:32 +00001951void* SK_(realloc) ( void* p, Int new_size )
njn3e884182003-04-15 13:03:23 +00001952{
1953 HG_Chunk *hc;
1954 HG_Chunk **prev_chunks_next_ptr;
sewardj05bcdcb2003-05-18 10:05:38 +00001955 Int i;
njn72718642003-07-24 08:45:32 +00001956 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +00001957
1958 /* First try and find the block. */
1959 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1960 (VgHashNode***)&prev_chunks_next_ptr );
1961
1962 if (hc == NULL) {
1963 return NULL;
1964 }
1965
1966 if (hc->size == new_size) {
1967 /* size unchanged */
1968 return p;
1969
1970 } else if (hc->size > new_size) {
1971 /* new size is smaller */
1972 hc->size = new_size;
1973 return p;
1974
1975 } else {
1976 /* new size is bigger */
1977 Addr p_new;
1978
1979 /* Get new memory */
1980 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
1981
1982 /* First half kept and copied, second half new */
1983 copy_address_range_state( (Addr)p, p_new, hc->size );
1984 eraser_new_mem_heap ( p_new+hc->size, new_size-hc->size,
1985 /*inited*/False );
1986
1987 /* Copy from old to new */
1988 for (i = 0; i < hc->size; i++)
1989 ((UChar*)p_new)[i] = ((UChar*)p)[i];
1990
1991 /* Free old memory */
njn72718642003-07-24 08:45:32 +00001992 die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001993
1994 /* this has to be after die_and_free_mem, otherwise the
1995 former succeeds in shorting out the new block, not the
1996 old, in the case when both are on the same list. */
njn72718642003-07-24 08:45:32 +00001997 add_HG_Chunk ( tid, p_new, new_size );
njn3e884182003-04-15 13:03:23 +00001998
1999 return (void*)p_new;
2000 }
2001}
2002
njn25e49d8e72002-09-23 09:36:25 +00002003/*--------------------------------------------------------------*/
2004/*--- Machinery to support sanity checking ---*/
2005/*--------------------------------------------------------------*/
2006
2007/* Check that nobody has spuriously claimed that the first or last 16
2008 pages (64 KB) of address space have become accessible. Failure of
2009 the following do not per se indicate an internal consistency
2010 problem, but they are so likely to that we really want to know
2011 about it if so. */
2012
2013Bool SK_(cheap_sanity_check) ( void )
2014{
sewardjd5815ec2003-04-06 12:23:27 +00002015 if (VGE_IS_DISTINGUISHED_SM(primary_map[0])
2016 /* kludge: kernel drops a page up at top of address range for
2017 magic "optimized syscalls", so we can no longer check the
2018 highest page */
2019 /* && VGE_IS_DISTINGUISHED_SM(primary_map[65535]) */
2020 )
njn25e49d8e72002-09-23 09:36:25 +00002021 return True;
2022 else
2023 return False;
2024}
2025
2026
2027Bool SK_(expensive_sanity_check)(void)
2028{
2029 Int i;
2030
2031 /* Make sure nobody changed the distinguished secondary. */
2032 for (i = 0; i < ESEC_MAP_WORDS; i++)
2033 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
2034 distinguished_secondary_map.swords[i].state != virgin_sword.state)
2035 return False;
2036
2037 return True;
2038}
2039
2040
2041/*--------------------------------------------------------------*/
2042/*--- Instrumentation ---*/
2043/*--------------------------------------------------------------*/
2044
sewardjf6374322002-11-13 22:35:55 +00002045static UInt stk_ld, nonstk_ld, stk_st, nonstk_st;
2046
njn25e49d8e72002-09-23 09:36:25 +00002047/* Create and return an instrumented version of cb_in. Free cb_in
2048 before returning. */
2049UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
2050{
2051 UCodeBlock* cb;
2052 Int i;
2053 UInstr* u_in;
2054 Int t_size = INVALID_TEMPREG;
sewardjf6374322002-11-13 22:35:55 +00002055 Int ntemps;
2056 Bool *stackref = NULL;
sewardj7a5ebcf2002-11-13 22:42:13 +00002057 Bool locked = False; /* lock prefix */
njn25e49d8e72002-09-23 09:36:25 +00002058
njn810086f2002-11-14 12:42:47 +00002059 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002060
sewardjf6374322002-11-13 22:35:55 +00002061 /* stackref[] is used for super-simple value tracking to keep note
2062 of which tempregs currently hold a value which is derived from
2063 ESP or EBP, and is therefore likely stack-relative if used as
2064 the address for LOAD or STORE. */
njn810086f2002-11-14 12:42:47 +00002065 ntemps = VG_(get_num_temps)(cb);
sewardjf6374322002-11-13 22:35:55 +00002066 stackref = VG_(malloc)(sizeof(*stackref) * ntemps);
2067 VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps);
2068
njn810086f2002-11-14 12:42:47 +00002069 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
2070 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00002071
njn25e49d8e72002-09-23 09:36:25 +00002072 switch (u_in->opcode) {
2073
2074 case NOP: case CALLM_S: case CALLM_E:
2075 break;
sewardjf6374322002-11-13 22:35:55 +00002076
sewardj7a5ebcf2002-11-13 22:42:13 +00002077 case LOCK:
2078 locked = True;
2079 uInstr0(cb, CCALL, 0);
2080 uCCall(cb, (Addr)bus_lock, 0, 0, False);
2081 break;
2082
2083 case JMP: case INCEIP:
2084 if (locked) {
2085 uInstr0(cb, CCALL, 0);
2086 uCCall(cb, (Addr)bus_unlock, 0, 0, False);
2087 }
2088 locked = False;
2089 VG_(copy_UInstr)(cb, u_in);
2090 break;
2091
sewardjf6374322002-11-13 22:35:55 +00002092 case GET:
2093 sk_assert(u_in->tag1 == ArchReg);
2094 sk_assert(u_in->tag2 == TempReg);
2095 sk_assert(u_in->val2 < ntemps);
2096
2097 stackref[u_in->val2] = (u_in->size == 4 &&
2098 (u_in->val1 == R_ESP || u_in->val1 == R_EBP));
2099 VG_(copy_UInstr)(cb, u_in);
2100 break;
2101
2102 case MOV:
2103 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2104 sk_assert(u_in->tag2 == TempReg);
2105 stackref[u_in->val2] = stackref[u_in->val1];
2106 }
2107 VG_(copy_UInstr)(cb, u_in);
2108 break;
2109
2110 case LEA1:
2111 case ADD: case SUB:
2112 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2113 sk_assert(u_in->tag2 == TempReg);
2114 stackref[u_in->val2] |= stackref[u_in->val1];
2115 }
2116 VG_(copy_UInstr)(cb, u_in);
2117 break;
njn25e49d8e72002-09-23 09:36:25 +00002118
sewardja5b3aec2002-10-22 05:09:36 +00002119 case LOAD: {
2120 void (*help)(Addr);
2121 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002122 sk_assert(u_in->tag1 == TempReg);
2123
2124 if (!clo_priv_stacks || !stackref[u_in->val1]) {
2125 nonstk_ld++;
2126
2127 switch(u_in->size) {
2128 case 1: help = eraser_mem_help_read_1; break;
2129 case 2: help = eraser_mem_help_read_2; break;
2130 case 4: help = eraser_mem_help_read_4; break;
2131 default:
2132 VG_(skin_panic)("bad size");
2133 }
sewardja5b3aec2002-10-22 05:09:36 +00002134
sewardjf6374322002-11-13 22:35:55 +00002135 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
2136 uCCall(cb, (Addr)help, 1, 1, False);
2137 } else
2138 stk_ld++;
njn25e49d8e72002-09-23 09:36:25 +00002139
sewardja5b3aec2002-10-22 05:09:36 +00002140 VG_(copy_UInstr)(cb, u_in);
2141 t_size = INVALID_TEMPREG;
2142 break;
2143 }
2144
2145 case FPU_R: {
njne427a662002-10-02 11:08:25 +00002146 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002147 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002148
2149 t_size = newTemp(cb);
2150 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2151 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00002152
sewardja5b3aec2002-10-22 05:09:36 +00002153 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2154 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
njn25e49d8e72002-09-23 09:36:25 +00002155
sewardja5b3aec2002-10-22 05:09:36 +00002156 VG_(copy_UInstr)(cb, u_in);
2157 t_size = INVALID_TEMPREG;
2158 break;
2159 }
2160
2161 case STORE: {
2162 void (*help)(Addr, UInt);
2163 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002164 sk_assert(u_in->tag2 == TempReg);
sewardja5b3aec2002-10-22 05:09:36 +00002165
sewardjf6374322002-11-13 22:35:55 +00002166 if (!clo_priv_stacks || !stackref[u_in->val2]) {
2167 nonstk_st++;
2168
2169 switch(u_in->size) {
2170 case 1: help = eraser_mem_help_write_1; break;
2171 case 2: help = eraser_mem_help_write_2; break;
2172 case 4: help = eraser_mem_help_write_4; break;
2173 default:
2174 VG_(skin_panic)("bad size");
2175 }
2176
2177 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
2178 uCCall(cb, (Addr)help, 2, 2, False);
2179 } else
2180 stk_st++;
sewardja5b3aec2002-10-22 05:09:36 +00002181
2182 VG_(copy_UInstr)(cb, u_in);
2183 t_size = INVALID_TEMPREG;
2184 break;
2185 }
2186
2187 case FPU_W: {
njne427a662002-10-02 11:08:25 +00002188 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002189 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002190
2191 t_size = newTemp(cb);
2192 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2193 uLiteral(cb, (UInt)u_in->size);
2194 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2195 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2196
2197 VG_(copy_UInstr)(cb, u_in);
2198 t_size = INVALID_TEMPREG;
2199 break;
2200 }
njn25e49d8e72002-09-23 09:36:25 +00002201
sewardj3d7c9c82003-03-26 21:08:13 +00002202 case MMX1: case MMX2: case MMX3:
2203 case MMX2_MemRd: case MMX2_MemWr:
sewardj4fbe6e92003-06-15 21:54:34 +00002204 case MMX2_ERegRd: case MMX2_ERegWr:
sewardj3d7c9c82003-03-26 21:08:13 +00002205 VG_(skin_panic)(
2206 "I don't know how to instrument MMXish stuff (yet)");
2207 break;
2208
njn25e49d8e72002-09-23 09:36:25 +00002209 default:
sewardjf6374322002-11-13 22:35:55 +00002210 /* conservative tromping */
2211 if (0 && u_in->tag1 == TempReg) /* can val1 ever be dest? */
2212 stackref[u_in->val1] = False;
2213 if (u_in->tag2 == TempReg)
2214 stackref[u_in->val2] = False;
2215 if (u_in->tag3 == TempReg)
2216 stackref[u_in->val3] = False;
njn4ba5a792002-09-30 10:23:54 +00002217 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00002218 break;
2219 }
2220 }
2221
sewardjf6374322002-11-13 22:35:55 +00002222 VG_(free)(stackref);
njn4ba5a792002-09-30 10:23:54 +00002223 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002224 return cb;
2225}
2226
2227
2228/*--------------------------------------------------------------------*/
2229/*--- Error and suppression handling ---*/
2230/*--------------------------------------------------------------------*/
2231
2232typedef
2233 enum {
2234 /* Possible data race */
2235 EraserSupp
2236 }
2237 EraserSuppKind;
2238
2239/* What kind of error it is. */
2240typedef
2241 enum {
sewardj16748af2002-10-22 04:55:54 +00002242 EraserErr, /* data-race */
2243 MutexErr, /* mutex operations */
sewardjff2c9232002-11-13 21:44:39 +00002244 LockGraphErr, /* mutex order error */
njn25e49d8e72002-09-23 09:36:25 +00002245 }
2246 EraserErrorKind;
2247
sewardj16748af2002-10-22 04:55:54 +00002248/* The classification of a faulting address. */
2249typedef
2250 enum { Undescribed, /* as-yet unclassified */
2251 Stack,
2252 Unknown, /* classification yielded nothing useful */
sewardjdac0a442002-11-13 22:08:40 +00002253 Mallocd,
2254 Freed,
sewardj16748af2002-10-22 04:55:54 +00002255 Segment
2256 }
2257 AddrKind;
2258/* Records info about a faulting address. */
2259typedef
2260 struct {
2261 /* ALL */
2262 AddrKind akind;
2263 /* Freed, Mallocd */
2264 Int blksize;
2265 /* Freed, Mallocd */
2266 Int rwoffset;
2267 /* Freed, Mallocd */
2268 ExeContext* lastchange;
2269 ThreadId lasttid;
2270 /* Stack */
2271 ThreadId stack_tid;
2272 /* Segment */
2273 const Char* filename;
2274 const Char* section;
2275 /* True if is just-below %esp -- could be a gcc bug. */
2276 Bool maybe_gcc;
2277 }
2278 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00002279
sewardj16748af2002-10-22 04:55:54 +00002280/* What kind of memory access is involved in the error? */
2281typedef
2282 enum { ReadAxs, WriteAxs, ExecAxs }
2283 AxsKind;
2284
2285/* Extra context for memory errors */
2286typedef
2287 struct {
2288 AxsKind axskind;
2289 Int size;
2290 AddrInfo addrinfo;
2291 Bool isWrite;
2292 shadow_word prevstate;
sewardjff2c9232002-11-13 21:44:39 +00002293 /* MutexErr, LockGraphErr */
sewardj39a4d842002-11-13 22:14:30 +00002294 Mutex *mutex;
sewardj499e3de2002-11-13 22:22:25 +00002295 EC_EIP lasttouched;
sewardj16748af2002-10-22 04:55:54 +00002296 ThreadId lasttid;
sewardjff2c9232002-11-13 21:44:39 +00002297 /* LockGraphErr */
sewardj4bffb232002-11-13 21:46:34 +00002298 const LockSet *held_lockset;
2299 const LockSet *prev_lockset;
sewardj16748af2002-10-22 04:55:54 +00002300 }
2301 HelgrindError;
2302
2303static __inline__
2304void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002305{
sewardj16748af2002-10-22 04:55:54 +00002306 ai->akind = Unknown;
2307 ai->blksize = 0;
2308 ai->rwoffset = 0;
2309 ai->lastchange = NULL;
2310 ai->lasttid = VG_INVALID_THREADID;
2311 ai->filename = NULL;
2312 ai->section = "???";
2313 ai->stack_tid = VG_INVALID_THREADID;
2314 ai->maybe_gcc = False;
njn25e49d8e72002-09-23 09:36:25 +00002315}
2316
sewardj16748af2002-10-22 04:55:54 +00002317static __inline__
2318void clear_HelgrindError ( HelgrindError* err_extra )
2319{
2320 err_extra->axskind = ReadAxs;
2321 err_extra->size = 0;
2322 err_extra->mutex = NULL;
sewardj499e3de2002-11-13 22:22:25 +00002323 err_extra->lasttouched= NULL_EC_EIP;
sewardj16748af2002-10-22 04:55:54 +00002324 err_extra->lasttid = VG_INVALID_THREADID;
sewardjff2c9232002-11-13 21:44:39 +00002325 err_extra->prev_lockset = 0;
2326 err_extra->held_lockset = 0;
sewardj8fac99a2002-11-13 22:31:26 +00002327 err_extra->prevstate = SW(Vge_Virgin, 0);
sewardj16748af2002-10-22 04:55:54 +00002328 clear_AddrInfo ( &err_extra->addrinfo );
2329 err_extra->isWrite = False;
2330}
2331
2332
2333
2334/* Describe an address as best you can, for error messages,
2335 putting the result in ai. */
2336
2337static void describe_addr ( Addr a, AddrInfo* ai )
2338{
njn3e884182003-04-15 13:03:23 +00002339 HG_Chunk* hc;
sewardjdac0a442002-11-13 22:08:40 +00002340 Int i;
sewardj16748af2002-10-22 04:55:54 +00002341
2342 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
2343
2344 /* Closure for searching thread stacks */
2345 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
2346 {
2347 return (stack_min <= a && a <= stack_max);
2348 }
2349 /* Closure for searching malloc'd and free'd lists */
njn3e884182003-04-15 13:03:23 +00002350 Bool addr_is_in_block(VgHashNode *node)
sewardj16748af2002-10-22 04:55:54 +00002351 {
njn3e884182003-04-15 13:03:23 +00002352 HG_Chunk* hc2 = (HG_Chunk*)node;
2353 return (hc2->data <= a && a < hc2->data + hc2->size);
sewardj16748af2002-10-22 04:55:54 +00002354 }
2355
2356 /* Search for it in segments */
2357 {
2358 const SegInfo *seg;
2359
2360 for(seg = VG_(next_seginfo)(NULL);
2361 seg != NULL;
2362 seg = VG_(next_seginfo)(seg)) {
2363 Addr base = VG_(seg_start)(seg);
2364 UInt size = VG_(seg_size)(seg);
2365 const UChar *filename = VG_(seg_filename)(seg);
2366
2367 if (a >= base && a < base+size) {
2368 ai->akind = Segment;
2369 ai->blksize = size;
2370 ai->rwoffset = a - base;
2371 ai->filename = filename;
2372
2373 switch(VG_(seg_sect_kind)(a)) {
2374 case Vg_SectText: ai->section = "text"; break;
2375 case Vg_SectData: ai->section = "data"; break;
2376 case Vg_SectBSS: ai->section = "BSS"; break;
2377 case Vg_SectGOT: ai->section = "GOT"; break;
2378 case Vg_SectPLT: ai->section = "PLT"; break;
2379 case Vg_SectUnknown:
2380 default:
2381 ai->section = "???"; break;
2382 }
2383
2384 return;
2385 }
2386 }
2387 }
2388
2389 /* Search for a currently malloc'd block which might bracket it. */
njn3e884182003-04-15 13:03:23 +00002390 hc = (HG_Chunk*)VG_(HT_first_match)(hg_malloc_list, addr_is_in_block);
2391 if (NULL != hc) {
sewardj16748af2002-10-22 04:55:54 +00002392 ai->akind = Mallocd;
njn3e884182003-04-15 13:03:23 +00002393 ai->blksize = hc->size;
2394 ai->rwoffset = (Int)a - (Int)(hc->data);
2395 ai->lastchange = hc->where;
2396 ai->lasttid = hc->tid;
sewardj16748af2002-10-22 04:55:54 +00002397 return;
2398 }
sewardjdac0a442002-11-13 22:08:40 +00002399
2400 /* Look in recently freed memory */
2401 for(i = 0; i < N_FREED_CHUNKS; i++) {
njn3e884182003-04-15 13:03:23 +00002402 hc = freechunks[i];
2403 if (hc == NULL)
sewardjdac0a442002-11-13 22:08:40 +00002404 continue;
2405
njn3e884182003-04-15 13:03:23 +00002406 if (a >= hc->data && a < hc->data + hc->size) {
sewardjdac0a442002-11-13 22:08:40 +00002407 ai->akind = Freed;
njn3e884182003-04-15 13:03:23 +00002408 ai->blksize = hc->size;
2409 ai->rwoffset = a - hc->data;
2410 ai->lastchange = hc->where;
2411 ai->lasttid = hc->tid;
sewardjdac0a442002-11-13 22:08:40 +00002412 return;
2413 }
2414 }
2415
sewardj16748af2002-10-22 04:55:54 +00002416 /* Clueless ... */
2417 ai->akind = Unknown;
2418 return;
2419}
2420
2421
njn7e614812003-04-21 22:04:03 +00002422/* Updates the copy with address info if necessary. */
2423UInt SK_(update_extra)(Error* err)
sewardj16748af2002-10-22 04:55:54 +00002424{
njn7e614812003-04-21 22:04:03 +00002425 HelgrindError* extra;
sewardj16748af2002-10-22 04:55:54 +00002426
njn7e614812003-04-21 22:04:03 +00002427 extra = (HelgrindError*)VG_(get_error_extra)(err);
2428 if (extra != NULL && Undescribed == extra->addrinfo.akind) {
2429 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2430 }
2431 return sizeof(HelgrindError);
sewardj16748af2002-10-22 04:55:54 +00002432}
2433
njn72718642003-07-24 08:45:32 +00002434static void record_eraser_error ( ThreadId tid, Addr a, Bool is_write,
sewardj0f811692002-10-22 04:59:26 +00002435 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00002436{
sewardjc4a810d2002-11-13 22:25:51 +00002437 shadow_word *sw;
sewardj16748af2002-10-22 04:55:54 +00002438 HelgrindError err_extra;
2439
sewardjff2c9232002-11-13 21:44:39 +00002440 n_eraser_warnings++;
2441
sewardj16748af2002-10-22 04:55:54 +00002442 clear_HelgrindError(&err_extra);
2443 err_extra.isWrite = is_write;
2444 err_extra.addrinfo.akind = Undescribed;
2445 err_extra.prevstate = prevstate;
sewardj499e3de2002-11-13 22:22:25 +00002446 if (clo_execontext)
2447 err_extra.lasttouched = getExeContext(a);
njn72718642003-07-24 08:45:32 +00002448 VG_(maybe_record_error)( tid, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00002449 (is_write ? "writing" : "reading"),
2450 &err_extra);
2451
sewardjc4a810d2002-11-13 22:25:51 +00002452 sw = get_sword_addr(a);
2453 if (sw->state == Vge_Excl && sw->other != TLSP_INDICATING_ALL) {
2454 ThreadLifeSeg *tls = unpackTLS(sw->other);
2455 tls->refcount--;
2456 }
2457
sewardj7f3ad222002-11-13 22:11:53 +00002458 set_sword(a, error_sword);
sewardj16748af2002-10-22 04:55:54 +00002459}
2460
sewardj39a4d842002-11-13 22:14:30 +00002461static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardj16748af2002-10-22 04:55:54 +00002462 Char *str, ExeContext *ec)
2463{
2464 HelgrindError err_extra;
2465
2466 clear_HelgrindError(&err_extra);
2467 err_extra.addrinfo.akind = Undescribed;
2468 err_extra.mutex = mutex;
sewardjc808ef52002-11-13 22:43:26 +00002469 err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
sewardj16748af2002-10-22 04:55:54 +00002470 err_extra.lasttid = tid;
2471
njn72718642003-07-24 08:45:32 +00002472 VG_(maybe_record_error)(tid, MutexErr,
sewardj16748af2002-10-22 04:55:54 +00002473 (Addr)mutex->mutexp, str, &err_extra);
2474}
njn25e49d8e72002-09-23 09:36:25 +00002475
sewardj39a4d842002-11-13 22:14:30 +00002476static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00002477 const LockSet *lockset_holding,
2478 const LockSet *lockset_prev)
sewardjff2c9232002-11-13 21:44:39 +00002479{
2480 HelgrindError err_extra;
2481
2482 n_lockorder_warnings++;
2483
2484 clear_HelgrindError(&err_extra);
2485 err_extra.addrinfo.akind = Undescribed;
2486 err_extra.mutex = mutex;
2487
sewardjc808ef52002-11-13 22:43:26 +00002488 err_extra.lasttouched = EC(mutex->location, virgin_sword, 0);
sewardjff2c9232002-11-13 21:44:39 +00002489 err_extra.held_lockset = lockset_holding;
2490 err_extra.prev_lockset = lockset_prev;
2491
njn72718642003-07-24 08:45:32 +00002492 VG_(maybe_record_error)(tid, LockGraphErr, mutex->mutexp, "", &err_extra);
sewardjff2c9232002-11-13 21:44:39 +00002493}
2494
njn810086f2002-11-14 12:42:47 +00002495Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
njn25e49d8e72002-09-23 09:36:25 +00002496{
njn810086f2002-11-14 12:42:47 +00002497 Char *e1s, *e2s;
sewardj16748af2002-10-22 04:55:54 +00002498
njn810086f2002-11-14 12:42:47 +00002499 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2500
2501 switch (VG_(get_error_kind)(e1)) {
sewardj16748af2002-10-22 04:55:54 +00002502 case EraserErr:
njn810086f2002-11-14 12:42:47 +00002503 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002504
2505 case MutexErr:
njn810086f2002-11-14 12:42:47 +00002506 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002507 }
2508
njn810086f2002-11-14 12:42:47 +00002509 e1s = VG_(get_error_string)(e1);
2510 e2s = VG_(get_error_string)(e2);
2511 if (e1s != e2s) return False;
2512 if (0 != VG_(strcmp)(e1s, e2s)) return False;
njn25e49d8e72002-09-23 09:36:25 +00002513 return True;
2514}
2515
sewardj16748af2002-10-22 04:55:54 +00002516static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002517{
sewardj16748af2002-10-22 04:55:54 +00002518 switch (ai->akind) {
2519 case Stack:
2520 VG_(message)(Vg_UserMsg,
2521 " Address %p is on thread %d's stack",
2522 a, ai->stack_tid);
2523 break;
2524 case Unknown:
2525 if (ai->maybe_gcc) {
2526 VG_(message)(Vg_UserMsg,
2527 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
2528 a);
2529 VG_(message)(Vg_UserMsg,
2530 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
2531 } else {
2532 VG_(message)(Vg_UserMsg,
2533 " Address %p is not stack'd, malloc'd or free'd", a);
2534 }
2535 break;
2536 case Segment:
2537 VG_(message)(Vg_UserMsg,
2538 " Address %p is in %s section of %s",
2539 a, ai->section, ai->filename);
2540 break;
sewardjdac0a442002-11-13 22:08:40 +00002541 case Mallocd:
2542 case Freed: {
sewardj16748af2002-10-22 04:55:54 +00002543 UInt delta;
2544 UChar* relative;
2545 if (ai->rwoffset < 0) {
2546 delta = (UInt)(- ai->rwoffset);
2547 relative = "before";
2548 } else if (ai->rwoffset >= ai->blksize) {
2549 delta = ai->rwoffset - ai->blksize;
2550 relative = "after";
2551 } else {
2552 delta = ai->rwoffset;
2553 relative = "inside";
2554 }
2555 VG_(message)(Vg_UserMsg,
sewardj499e3de2002-11-13 22:22:25 +00002556 " Address %p is %d bytes %s a block of size %d %s by thread %d",
sewardj16748af2002-10-22 04:55:54 +00002557 a, delta, relative,
2558 ai->blksize,
sewardjdac0a442002-11-13 22:08:40 +00002559 ai->akind == Mallocd ? "alloc'd" : "freed",
sewardj16748af2002-10-22 04:55:54 +00002560 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00002561
sewardj16748af2002-10-22 04:55:54 +00002562 VG_(pp_ExeContext)(ai->lastchange);
2563 break;
2564 }
2565 default:
2566 VG_(skin_panic)("pp_AddrInfo");
2567 }
njn25e49d8e72002-09-23 09:36:25 +00002568}
2569
sewardj4bffb232002-11-13 21:46:34 +00002570static Char *lockset_str(const Char *prefix, const LockSet *lockset)
sewardjff2c9232002-11-13 21:44:39 +00002571{
sewardjff2c9232002-11-13 21:44:39 +00002572 Char *buf, *cp;
sewardj4bffb232002-11-13 21:46:34 +00002573 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002574
sewardj4bffb232002-11-13 21:46:34 +00002575 buf = VG_(malloc)((prefix == NULL ? 0 : VG_(strlen)(prefix)) +
2576 lockset->setsize * 120 +
2577 1);
sewardjff2c9232002-11-13 21:44:39 +00002578
2579 cp = buf;
2580 if (prefix)
2581 cp += VG_(sprintf)(cp, "%s", prefix);
2582
sewardj4bffb232002-11-13 21:46:34 +00002583 for(i = 0; i < lockset->setsize; i++)
2584 cp += VG_(sprintf)(cp, "%p%(y, ", lockset->mutex[i]->mutexp,
2585 lockset->mutex[i]->mutexp);
sewardjff2c9232002-11-13 21:44:39 +00002586
sewardj4bffb232002-11-13 21:46:34 +00002587 if (lockset->setsize)
sewardjff2c9232002-11-13 21:44:39 +00002588 cp[-2] = '\0';
2589 else
2590 *cp = '\0';
2591
2592 return buf;
2593}
njn25e49d8e72002-09-23 09:36:25 +00002594
njn43c799e2003-04-08 00:08:52 +00002595void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +00002596{
njn810086f2002-11-14 12:42:47 +00002597 HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err);
sewardj16748af2002-10-22 04:55:54 +00002598 Char buf[100];
2599 Char *msg = buf;
sewardj4bffb232002-11-13 21:46:34 +00002600 const LockSet *ls;
sewardj16748af2002-10-22 04:55:54 +00002601
2602 *msg = '\0';
2603
njn810086f2002-11-14 12:42:47 +00002604 switch(VG_(get_error_kind)(err)) {
2605 case EraserErr: {
2606 Addr err_addr = VG_(get_error_address)(err);
2607
sewardj16748af2002-10-22 04:55:54 +00002608 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
njn810086f2002-11-14 12:42:47 +00002609 VG_(get_error_string)(err), err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002610 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn810086f2002-11-14 12:42:47 +00002611 pp_AddrInfo(err_addr, &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002612
2613 switch(extra->prevstate.state) {
2614 case Vge_Virgin:
2615 /* shouldn't be possible to go directly from virgin -> error */
2616 VG_(sprintf)(buf, "virgin!?");
2617 break;
2618
sewardjc4a810d2002-11-13 22:25:51 +00002619 case Vge_Excl: {
2620 ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
2621
2622 sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
2623 VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
sewardj16748af2002-10-22 04:55:54 +00002624 break;
sewardjc4a810d2002-11-13 22:25:51 +00002625 }
sewardj16748af2002-10-22 04:55:54 +00002626
2627 case Vge_Shar:
sewardjff2c9232002-11-13 21:44:39 +00002628 case Vge_SharMod:
sewardj8fac99a2002-11-13 22:31:26 +00002629 ls = unpackLockSet(extra->prevstate.other);
sewardj4bffb232002-11-13 21:46:34 +00002630
2631 if (isempty(ls)) {
sewardj16748af2002-10-22 04:55:54 +00002632 VG_(sprintf)(buf, "shared %s, no locks",
2633 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
2634 break;
2635 }
2636
sewardjff2c9232002-11-13 21:44:39 +00002637 msg = lockset_str(extra->prevstate.state == Vge_Shar ?
2638 "shared RO, locked by:" :
sewardj4bffb232002-11-13 21:46:34 +00002639 "shared RW, locked by:", ls);
sewardj16748af2002-10-22 04:55:54 +00002640
sewardj16748af2002-10-22 04:55:54 +00002641 break;
2642 }
sewardj16748af2002-10-22 04:55:54 +00002643
sewardj499e3de2002-11-13 22:22:25 +00002644 if (*msg)
sewardj16748af2002-10-22 04:55:54 +00002645 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
sewardj499e3de2002-11-13 22:22:25 +00002646
sewardj72baa7a2002-12-09 23:32:58 +00002647 if (clo_execontext == EC_Some
2648 && extra->lasttouched.uu_ec_eip.eip != 0) {
sewardj499e3de2002-11-13 22:22:25 +00002649 Char file[100];
2650 UInt line;
sewardj72baa7a2002-12-09 23:32:58 +00002651 Addr eip = extra->lasttouched.uu_ec_eip.eip;
sewardj499e3de2002-11-13 22:22:25 +00002652
sewardjc808ef52002-11-13 22:43:26 +00002653 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u",
njn810086f2002-11-14 12:42:47 +00002654 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002655 pp_state(extra->lasttouched.state),
2656 unpackTLS(extra->lasttouched.tls)->tid);
sewardj499e3de2002-11-13 22:22:25 +00002657
2658 if (VG_(get_filename_linenum)(eip, file, sizeof(file), &line)) {
2659 VG_(message)(Vg_UserMsg, " at %p: %y (%s:%u)",
2660 eip, eip, file, line);
2661 } else if (VG_(get_objname)(eip, file, sizeof(file))) {
2662 VG_(message)(Vg_UserMsg, " at %p: %y (in %s)",
2663 eip, eip, file);
2664 } else {
2665 VG_(message)(Vg_UserMsg, " at %p: %y", eip, eip);
2666 }
sewardj72baa7a2002-12-09 23:32:58 +00002667 } else if (clo_execontext == EC_All
2668 && extra->lasttouched.uu_ec_eip.ec != NULL) {
sewardjc808ef52002-11-13 22:43:26 +00002669 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u",
njn810086f2002-11-14 12:42:47 +00002670 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002671 pp_state(extra->lasttouched.state),
2672 unpackTLS(extra->lasttouched.tls)->tid);
sewardj72baa7a2002-12-09 23:32:58 +00002673 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj499e3de2002-11-13 22:22:25 +00002674 }
sewardj16748af2002-10-22 04:55:54 +00002675 break;
njn810086f2002-11-14 12:42:47 +00002676 }
sewardj16748af2002-10-22 04:55:54 +00002677
2678 case MutexErr:
sewardj499e3de2002-11-13 22:22:25 +00002679 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s",
njn810086f2002-11-14 12:42:47 +00002680 VG_(get_error_address)(err),
2681 VG_(get_error_address)(err),
2682 VG_(get_error_string)(err));
njn43c799e2003-04-08 00:08:52 +00002683 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardj72baa7a2002-12-09 23:32:58 +00002684 if (extra->lasttouched.uu_ec_eip.ec != NULL) {
sewardj499e3de2002-11-13 22:22:25 +00002685 VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid);
sewardj72baa7a2002-12-09 23:32:58 +00002686 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj16748af2002-10-22 04:55:54 +00002687 }
njn810086f2002-11-14 12:42:47 +00002688 pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002689 break;
sewardjff2c9232002-11-13 21:44:39 +00002690
2691 case LockGraphErr: {
sewardj4bffb232002-11-13 21:46:34 +00002692 const LockSet *heldset = extra->held_lockset;
njn810086f2002-11-14 12:42:47 +00002693 Addr err_addr = VG_(get_error_address)(err);
sewardj4bffb232002-11-13 21:46:34 +00002694 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002695
2696 msg = lockset_str(NULL, heldset);
2697
2698 VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order",
njn810086f2002-11-14 12:42:47 +00002699 err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002700 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardjff2c9232002-11-13 21:44:39 +00002701 VG_(message)(Vg_UserMsg, " while holding locks %s", msg);
2702
sewardj4bffb232002-11-13 21:46:34 +00002703 for(i = 0; i < heldset->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00002704 const Mutex *lsmx = heldset->mutex[i];
sewardjff2c9232002-11-13 21:44:39 +00002705
sewardj542494b2002-11-13 22:46:13 +00002706 /* needs to be a recursive search+display */
2707 if (0 && !ismember(lsmx->lockdep, extra->mutex))
sewardjff2c9232002-11-13 21:44:39 +00002708 continue;
2709
2710 VG_(message)(Vg_UserMsg, " %p%(y last locked at",
2711 lsmx->mutexp, lsmx->mutexp);
2712 VG_(pp_ExeContext)(lsmx->location);
2713 VG_(free)(msg);
sewardj4bffb232002-11-13 21:46:34 +00002714 msg = lockset_str(NULL, lsmx->lockdep);
sewardjff2c9232002-11-13 21:44:39 +00002715 VG_(message)(Vg_UserMsg, " while depending on locks %s", msg);
2716 }
2717
2718 break;
sewardj16748af2002-10-22 04:55:54 +00002719 }
sewardjff2c9232002-11-13 21:44:39 +00002720 }
2721
2722 if (msg != buf)
2723 VG_(free)(msg);
njn25e49d8e72002-09-23 09:36:25 +00002724}
2725
2726
njn810086f2002-11-14 12:42:47 +00002727Bool SK_(recognised_suppression) ( Char* name, Supp *su )
njn25e49d8e72002-09-23 09:36:25 +00002728{
2729 if (0 == VG_(strcmp)(name, "Eraser")) {
njn810086f2002-11-14 12:42:47 +00002730 VG_(set_supp_kind)(su, EraserSupp);
njn25e49d8e72002-09-23 09:36:25 +00002731 return True;
2732 } else {
2733 return False;
2734 }
2735}
2736
2737
njn810086f2002-11-14 12:42:47 +00002738Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +00002739{
2740 /* do nothing -- no extra suppression info present. Return True to
2741 indicate nothing bad happened. */
2742 return True;
2743}
2744
2745
njn810086f2002-11-14 12:42:47 +00002746Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn25e49d8e72002-09-23 09:36:25 +00002747{
njn810086f2002-11-14 12:42:47 +00002748 sk_assert(VG_(get_supp_kind) (su) == EraserSupp);
2749 sk_assert(VG_(get_error_kind)(err) == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00002750 return True;
2751}
2752
njn43c799e2003-04-08 00:08:52 +00002753extern Char* SK_(get_error_name) ( Error* err )
2754{
2755 if (EraserErr == VG_(get_error_kind)(err)) {
2756 return "Eraser";
2757 } else {
2758 return NULL; /* Other errors types can't be suppressed */
2759 }
2760}
2761
2762extern void SK_(print_extra_suppression_info) ( Error* err )
2763{
2764 /* Do nothing */
2765}
njn25e49d8e72002-09-23 09:36:25 +00002766
sewardjdca84112002-11-13 22:29:34 +00002767static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
2768{
2769 Mutex *mutex = get_mutex((Addr)void_mutex);
2770
njn72718642003-07-24 08:45:32 +00002771 test_mutex_state(mutex, MxLocked, tid);
sewardjdca84112002-11-13 22:29:34 +00002772}
2773
njn25e49d8e72002-09-23 09:36:25 +00002774static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
2775{
sewardj4bffb232002-11-13 21:46:34 +00002776 static const Bool debug = False;
sewardj39a4d842002-11-13 22:14:30 +00002777 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002778 const LockSet* ls;
2779
njn72718642003-07-24 08:45:32 +00002780 set_mutex_state(mutex, MxLocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002781
njn25e49d8e72002-09-23 09:36:25 +00002782# if DEBUG_LOCKS
sewardjdac0a442002-11-13 22:08:40 +00002783 VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002784# endif
2785
njn25e49d8e72002-09-23 09:36:25 +00002786 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
2787# if LOCKSET_SANITY > 1
2788 sanity_check_locksets("eraser_post_mutex_lock-IN");
2789# endif
2790
sewardj4bffb232002-11-13 21:46:34 +00002791 ls = lookup_LockSet_with(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002792
sewardj4bffb232002-11-13 21:46:34 +00002793 if (ls == NULL) {
2794 LockSet *newset = add_LockSet(thread_locks[tid], mutex);
2795 insert_LockSet(newset);
2796 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002797 }
sewardj4bffb232002-11-13 21:46:34 +00002798 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002799
sewardj4bffb232002-11-13 21:46:34 +00002800 if (debug || DEBUG_LOCKS)
2801 VG_(printf)("tid %u now has lockset %p\n", tid, ls);
njn25e49d8e72002-09-23 09:36:25 +00002802
sewardj4bffb232002-11-13 21:46:34 +00002803 if (debug || LOCKSET_SANITY > 1)
2804 sanity_check_locksets("eraser_post_mutex_lock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002805}
2806
2807
2808static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
2809{
sewardjc26cc252002-10-23 21:58:55 +00002810 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00002811 Int i = 0;
sewardj39a4d842002-11-13 22:14:30 +00002812 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002813 const LockSet *ls;
2814
njn72718642003-07-24 08:45:32 +00002815 test_mutex_state(mutex, MxUnlocked, tid);
2816 set_mutex_state(mutex, MxUnlocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002817
sewardjdac0a442002-11-13 22:08:40 +00002818 if (!ismember(thread_locks[tid], mutex))
2819 return;
2820
sewardjc26cc252002-10-23 21:58:55 +00002821 if (debug || DEBUG_LOCKS)
2822 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002823
sewardjc26cc252002-10-23 21:58:55 +00002824 if (debug || LOCKSET_SANITY > 1)
2825 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00002826
sewardj4bffb232002-11-13 21:46:34 +00002827 ls = lookup_LockSet_without(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002828
sewardj4bffb232002-11-13 21:46:34 +00002829 if (ls == NULL) {
2830 LockSet *newset = remove_LockSet(thread_locks[tid], mutex);
2831 insert_LockSet(newset);
2832 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002833 }
2834
2835 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00002836 if (debug || DEBUG_LOCKS)
sewardj4bffb232002-11-13 21:46:34 +00002837 VG_(printf)("tid %u reverts from %p to lockset %p\n",
sewardjc26cc252002-10-23 21:58:55 +00002838 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00002839
sewardj4bffb232002-11-13 21:46:34 +00002840 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002841
sewardjc26cc252002-10-23 21:58:55 +00002842 if (debug || LOCKSET_SANITY > 1)
2843 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002844}
2845
2846
2847/* ---------------------------------------------------------------------
2848 Checking memory reads and writes
2849 ------------------------------------------------------------------ */
2850
2851/* Behaviour on reads and writes:
2852 *
2853 * VIR EXCL SHAR SH_MOD
2854 * ----------------------------------------------------------------
2855 * rd/wr, 1st thread | - EXCL - -
2856 * rd, new thread | - SHAR - -
2857 * wr, new thread | - SH_MOD - -
2858 * rd | error! - SHAR SH_MOD
2859 * wr | EXCL - SH_MOD SH_MOD
2860 * ----------------------------------------------------------------
2861 */
2862
sewardj8fac99a2002-11-13 22:31:26 +00002863static inline
njn25e49d8e72002-09-23 09:36:25 +00002864void dump_around_a(Addr a)
2865{
2866 UInt i;
2867 shadow_word* sword;
2868 VG_(printf)("NEARBY:\n");
2869 for (i = a - 12; i <= a + 12; i += 4) {
2870 sword = get_sword_addr(i);
2871 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
2872 }
2873}
njn25e49d8e72002-09-23 09:36:25 +00002874
2875#if DEBUG_ACCESSES
2876 #define DEBUG_STATE(args...) \
2877 VG_(printf)("(%u) ", size), \
2878 VG_(printf)(args)
2879#else
2880 #define DEBUG_STATE(args...)
2881#endif
2882
njn72718642003-07-24 08:45:32 +00002883static void eraser_mem_read_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00002884{
sewardj72baa7a2002-12-09 23:32:58 +00002885 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002886 shadow_word prevstate;
2887 ThreadLifeSeg *tls;
2888 const LockSet *ls;
2889 Bool statechange = False;
2890
2891 static const void *const states[4] = {
2892 [Vge_Virgin] &&st_virgin,
2893 [Vge_Excl] &&st_excl,
2894 [Vge_Shar] &&st_shar,
2895 [Vge_SharMod] &&st_sharmod,
2896 };
2897
2898 tls = thread_seg[tid];
2899 sk_assert(tls != NULL && tls->tid == tid);
2900
2901 sword = get_sword_addr(a);
2902 if (sword == SEC_MAP_ACCESS) {
2903 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
2904 return;
2905 }
2906
2907 prevstate = *sword;
2908
2909 goto *states[sword->state];
2910
2911 /* This looks like reading of unitialised memory, may be legit. Eg.
2912 * calloc() zeroes its values, so untouched memory may actually be
2913 * initialised. Leave that stuff to Valgrind. */
2914 st_virgin:
2915 if (TID_INDICATING_NONVIRGIN == sword->other) {
2916 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
2917 if (DEBUG_VIRGIN_READS)
2918 dump_around_a(a);
2919 } else {
2920 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
2921 }
2922 statechange = True;
2923 *sword = SW(Vge_Excl, packTLS(tls)); /* remember exclusive owner */
2924 tls->refcount++;
2925 goto done;
2926
2927 st_excl: {
2928 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
2929
2930 if (tls == sw_tls) {
2931 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
2932 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
2933 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
2934 } else if (tlsIsDisjoint(tls, sw_tls)) {
2935 DEBUG_STATE("Read EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
2936 statechange = True;
2937 sword->other = packTLS(tls);
2938 sw_tls->refcount--;
2939 tls->refcount++;
2940 } else {
2941 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sw_tls->tid, a, tid);
2942 sw_tls->refcount--;
2943 statechange = True;
2944 *sword = SW(Vge_Shar, packLockSet(thread_locks[tid]));
2945
2946 if (DEBUG_MEM_LOCKSET_CHANGES)
2947 print_LockSet("excl read locks", unpackLockSet(sword->other));
2948 }
2949 goto done;
2950 }
2951
2952 st_shar:
2953 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
2954 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
2955 thread_locks[tid]));
2956 statechange = sword->other != prevstate.other;
2957 goto done;
2958
2959 st_sharmod:
2960 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
2961 ls = intersect(unpackLockSet(sword->other),
2962 thread_locks[tid]);
2963 sword->other = packLockSet(ls);
2964
2965 statechange = sword->other != prevstate.other;
2966
2967 if (isempty(ls)) {
njn72718642003-07-24 08:45:32 +00002968 record_eraser_error(tid, a, False /* !is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00002969 }
2970 goto done;
2971
2972 done:
2973 if (clo_execontext != EC_None && statechange) {
2974 EC_EIP eceip;
2975
2976 if (clo_execontext == EC_Some)
njn72718642003-07-24 08:45:32 +00002977 eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002978 else
njn72718642003-07-24 08:45:32 +00002979 eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002980 setExeContext(a, eceip);
2981 }
2982}
njn25e49d8e72002-09-23 09:36:25 +00002983
njn72718642003-07-24 08:45:32 +00002984static void eraser_mem_read(Addr a, UInt size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00002985{
njn72718642003-07-24 08:45:32 +00002986 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00002987
sewardj8fac99a2002-11-13 22:31:26 +00002988 end = ROUNDUP(a+size, 4);
2989 a = ROUNDDN(a, 4);
2990
sewardj18cd4a52002-11-13 22:37:41 +00002991 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00002992 eraser_mem_read_word(a, tid);
sewardj18cd4a52002-11-13 22:37:41 +00002993}
2994
njn72718642003-07-24 08:45:32 +00002995static void eraser_mem_write_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00002996{
2997 ThreadLifeSeg *tls;
sewardj72baa7a2002-12-09 23:32:58 +00002998 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002999 shadow_word prevstate;
3000 Bool statechange = False;
3001 static const void *const states[4] = {
3002 [Vge_Virgin] &&st_virgin,
3003 [Vge_Excl] &&st_excl,
3004 [Vge_Shar] &&st_shar,
3005 [Vge_SharMod] &&st_sharmod,
3006 };
3007
sewardjc4a810d2002-11-13 22:25:51 +00003008 tls = thread_seg[tid];
3009 sk_assert(tls != NULL && tls->tid == tid);
3010
sewardj18cd4a52002-11-13 22:37:41 +00003011 sword = get_sword_addr(a);
3012 if (sword == SEC_MAP_ACCESS) {
3013 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
3014 return;
3015 }
njn25e49d8e72002-09-23 09:36:25 +00003016
sewardj18cd4a52002-11-13 22:37:41 +00003017 prevstate = *sword;
njn25e49d8e72002-09-23 09:36:25 +00003018
sewardj18cd4a52002-11-13 22:37:41 +00003019 goto *states[sword->state];
sewardj16748af2002-10-22 04:55:54 +00003020
sewardj18cd4a52002-11-13 22:37:41 +00003021 st_virgin:
3022 if (TID_INDICATING_NONVIRGIN == sword->other)
3023 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
3024 else
3025 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
3026 statechange = True;
3027 *sword = SW(Vge_Excl, packTLS(tls));/* remember exclusive owner */
3028 tls->refcount++;
3029 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003030
sewardj18cd4a52002-11-13 22:37:41 +00003031 st_excl: {
3032 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
3033
3034 if (tls == sw_tls) {
3035 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
3036 goto done;
3037 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
3038 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
3039 goto done;
3040 } else if (tlsIsDisjoint(tls, sw_tls)) {
3041 DEBUG_STATE("Write EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
3042 sword->other = packTLS(tls);
3043 sw_tls->refcount--;
sewardjc4a810d2002-11-13 22:25:51 +00003044 tls->refcount++;
sewardj8fac99a2002-11-13 22:31:26 +00003045 goto done;
sewardj18cd4a52002-11-13 22:37:41 +00003046 } else {
3047 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sw_tls->tid, a, tid);
3048 statechange = True;
3049 sw_tls->refcount--;
3050 *sword = SW(Vge_SharMod, packLockSet(thread_locks[tid]));
3051 if(DEBUG_MEM_LOCKSET_CHANGES)
3052 print_LockSet("excl write locks", unpackLockSet(sword->other));
3053 goto SHARED_MODIFIED;
sewardjc4a810d2002-11-13 22:25:51 +00003054 }
sewardj18cd4a52002-11-13 22:37:41 +00003055 }
njn25e49d8e72002-09-23 09:36:25 +00003056
sewardj18cd4a52002-11-13 22:37:41 +00003057 st_shar:
3058 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
3059 sword->state = Vge_SharMod;
3060 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3061 thread_locks[tid]));
3062 statechange = True;
3063 goto SHARED_MODIFIED;
njn25e49d8e72002-09-23 09:36:25 +00003064
sewardj18cd4a52002-11-13 22:37:41 +00003065 st_sharmod:
3066 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
3067 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3068 thread_locks[tid]));
3069 statechange = sword->other != prevstate.other;
njn25e49d8e72002-09-23 09:36:25 +00003070
sewardj18cd4a52002-11-13 22:37:41 +00003071 SHARED_MODIFIED:
3072 if (isempty(unpackLockSet(sword->other))) {
njn72718642003-07-24 08:45:32 +00003073 record_eraser_error(tid, a, True /* is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00003074 }
3075 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003076
sewardj18cd4a52002-11-13 22:37:41 +00003077 done:
3078 if (clo_execontext != EC_None && statechange) {
3079 EC_EIP eceip;
sewardj499e3de2002-11-13 22:22:25 +00003080
sewardj18cd4a52002-11-13 22:37:41 +00003081 if (clo_execontext == EC_Some)
njn72718642003-07-24 08:45:32 +00003082 eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003083 else
njn72718642003-07-24 08:45:32 +00003084 eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003085 setExeContext(a, eceip);
njn25e49d8e72002-09-23 09:36:25 +00003086 }
3087}
3088
njn72718642003-07-24 08:45:32 +00003089static void eraser_mem_write(Addr a, UInt size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00003090{
sewardj8fac99a2002-11-13 22:31:26 +00003091 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003092
sewardj8fac99a2002-11-13 22:31:26 +00003093 end = ROUNDUP(a+size, 4);
3094 a = ROUNDDN(a, 4);
3095
sewardj18cd4a52002-11-13 22:37:41 +00003096 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00003097 eraser_mem_write_word(a, tid);
njn25e49d8e72002-09-23 09:36:25 +00003098}
3099
3100#undef DEBUG_STATE
3101
sewardja5b3aec2002-10-22 05:09:36 +00003102static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00003103{
njn72718642003-07-24 08:45:32 +00003104 eraser_mem_read(a, 1, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003105}
3106
sewardja5b3aec2002-10-22 05:09:36 +00003107static void eraser_mem_help_read_2(Addr a)
3108{
njn72718642003-07-24 08:45:32 +00003109 eraser_mem_read(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003110}
3111
3112static void eraser_mem_help_read_4(Addr a)
3113{
njn72718642003-07-24 08:45:32 +00003114 eraser_mem_read(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003115}
3116
3117static void eraser_mem_help_read_N(Addr a, UInt size)
3118{
njn72718642003-07-24 08:45:32 +00003119 eraser_mem_read(a, size, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003120}
3121
3122static void eraser_mem_help_write_1(Addr a, UInt val)
3123{
3124 if (*(UChar *)a != val)
njn72718642003-07-24 08:45:32 +00003125 eraser_mem_write(a, 1, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003126}
3127static void eraser_mem_help_write_2(Addr a, UInt val)
3128{
3129 if (*(UShort *)a != val)
njn72718642003-07-24 08:45:32 +00003130 eraser_mem_write(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003131}
3132static void eraser_mem_help_write_4(Addr a, UInt val)
3133{
3134 if (*(UInt *)a != val)
njn72718642003-07-24 08:45:32 +00003135 eraser_mem_write(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003136}
3137static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00003138{
njn72718642003-07-24 08:45:32 +00003139 eraser_mem_write(a, size, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003140}
njn25e49d8e72002-09-23 09:36:25 +00003141
sewardjc4a810d2002-11-13 22:25:51 +00003142static void hg_thread_create(ThreadId parent, ThreadId child)
3143{
3144 if (0)
3145 VG_(printf)("CREATE: %u creating %u\n", parent, child);
3146
3147 newTLS(child);
3148 addPriorTLS(child, parent);
3149
3150 newTLS(parent);
3151}
3152
3153static void hg_thread_join(ThreadId joiner, ThreadId joinee)
3154{
3155 if (0)
3156 VG_(printf)("JOIN: %u joining on %u\n", joiner, joinee);
3157
3158 newTLS(joiner);
3159 addPriorTLS(joiner, joinee);
3160
3161 clearTLS(joinee);
3162}
3163
sewardj7a5ebcf2002-11-13 22:42:13 +00003164static Int __BUS_HARDWARE_LOCK__;
3165
3166static void bus_lock(void)
3167{
3168 ThreadId tid = VG_(get_current_tid)();
3169 eraser_pre_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3170 eraser_post_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3171}
3172
3173static void bus_unlock(void)
3174{
3175 ThreadId tid = VG_(get_current_tid)();
3176 eraser_post_mutex_unlock(tid, &__BUS_HARDWARE_LOCK__);
3177}
3178
njn25e49d8e72002-09-23 09:36:25 +00003179/*--------------------------------------------------------------------*/
sewardj7f3ad222002-11-13 22:11:53 +00003180/*--- Client requests ---*/
3181/*--------------------------------------------------------------------*/
3182
njn72718642003-07-24 08:45:32 +00003183Bool SK_(handle_client_request)(ThreadId tid, UInt *args, UInt *ret)
sewardj7f3ad222002-11-13 22:11:53 +00003184{
3185 if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
3186 return False;
3187
3188 switch(args[0]) {
3189 case VG_USERREQ__HG_CLEAN_MEMORY:
3190 set_address_range_state(args[1], args[2], Vge_VirginInit);
3191 *ret = 0; /* meaningless */
3192 break;
3193
3194 case VG_USERREQ__HG_KNOWN_RACE:
3195 set_address_range_state(args[1], args[2], Vge_Error);
3196 *ret = 0; /* meaningless */
3197 break;
3198
3199 default:
3200 return False;
3201 }
3202
3203 return True;
3204}
3205
3206
3207/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003208/*--- Setup ---*/
3209/*--------------------------------------------------------------------*/
3210
njn810086f2002-11-14 12:42:47 +00003211void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00003212{
3213 Int i;
sewardj4bffb232002-11-13 21:46:34 +00003214 LockSet *empty;
njn25e49d8e72002-09-23 09:36:25 +00003215
njn810086f2002-11-14 12:42:47 +00003216 VG_(details_name) ("Helgrind");
3217 VG_(details_version) (NULL);
3218 VG_(details_description) ("a data race detector");
3219 VG_(details_copyright_author)(
njn0e1b5142003-04-15 14:58:06 +00003220 "Copyright (C) 2002-2003, and GNU GPL'd, by Nicholas Nethercote.");
njndc38f332003-04-29 15:52:33 +00003221 VG_(details_bug_reports_to) ("jeremy@goop.org");
sewardj78210aa2002-12-01 02:55:46 +00003222 VG_(details_avg_translation_sizeB) ( 115 );
njn25e49d8e72002-09-23 09:36:25 +00003223
njn810086f2002-11-14 12:42:47 +00003224 VG_(needs_core_errors)();
3225 VG_(needs_skin_errors)();
3226 VG_(needs_data_syms)();
njn810086f2002-11-14 12:42:47 +00003227 VG_(needs_client_requests)();
3228 VG_(needs_command_line_options)();
njn25e49d8e72002-09-23 09:36:25 +00003229
njn810086f2002-11-14 12:42:47 +00003230 VG_(track_new_mem_startup) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003231
njn810086f2002-11-14 12:42:47 +00003232 /* stack ones not decided until VG_(post_clo_init)() */
njn25e49d8e72002-09-23 09:36:25 +00003233
njn810086f2002-11-14 12:42:47 +00003234 VG_(track_new_mem_brk) (& make_writable);
3235 VG_(track_new_mem_mmap) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003236
njn810086f2002-11-14 12:42:47 +00003237 VG_(track_change_mem_mprotect) (& eraser_set_perms);
njn25e49d8e72002-09-23 09:36:25 +00003238
njn810086f2002-11-14 12:42:47 +00003239 VG_(track_ban_mem_stack) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003240
njn810086f2002-11-14 12:42:47 +00003241 VG_(track_die_mem_stack) (NULL);
njn810086f2002-11-14 12:42:47 +00003242 VG_(track_die_mem_stack_signal) (NULL);
3243 VG_(track_die_mem_brk) (NULL);
3244 VG_(track_die_mem_munmap) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003245
njn810086f2002-11-14 12:42:47 +00003246 VG_(track_pre_mem_read) (& eraser_pre_mem_read);
3247 VG_(track_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz);
3248 VG_(track_pre_mem_write) (& eraser_pre_mem_write);
3249 VG_(track_post_mem_write) (NULL);
3250
3251 VG_(track_post_thread_create) (& hg_thread_create);
3252 VG_(track_post_thread_join) (& hg_thread_join);
3253
3254 VG_(track_post_mutex_lock) (& eraser_pre_mutex_lock);
3255 VG_(track_post_mutex_lock) (& eraser_post_mutex_lock);
3256 VG_(track_post_mutex_unlock) (& eraser_post_mutex_unlock);
sewardjc4a810d2002-11-13 22:25:51 +00003257
sewardja5b3aec2002-10-22 05:09:36 +00003258 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
3259 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
3260 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
3261 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
3262
3263 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
3264 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
3265 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
3266 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00003267
sewardj7a5ebcf2002-11-13 22:42:13 +00003268 VG_(register_noncompact_helper)((Addr) & bus_lock);
3269 VG_(register_noncompact_helper)((Addr) & bus_unlock);
3270
sewardj4bffb232002-11-13 21:46:34 +00003271 for(i = 0; i < LOCKSET_HASH_SZ; i++)
3272 lockset_hash[i] = NULL;
3273
3274 empty = alloc_LockSet(0);
3275 insert_LockSet(empty);
3276 emptyset = empty;
3277
sewardjc4a810d2002-11-13 22:25:51 +00003278 /* Init lock table and thread segments */
3279 for (i = 0; i < VG_N_THREADS; i++) {
sewardjdac0a442002-11-13 22:08:40 +00003280 thread_locks[i] = empty;
njn25e49d8e72002-09-23 09:36:25 +00003281
sewardjc4a810d2002-11-13 22:25:51 +00003282 newTLS(i);
3283 }
3284
njn25e49d8e72002-09-23 09:36:25 +00003285 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00003286 hg_malloc_list = VG_(HT_construct)();
njn25e49d8e72002-09-23 09:36:25 +00003287}
3288
sewardjf6374322002-11-13 22:35:55 +00003289static Bool match_Bool(Char *arg, Char *argstr, Bool *ret)
3290{
3291 Int len = VG_(strlen)(argstr);
3292
3293 if (VG_(strncmp)(arg, argstr, len) == 0) {
3294 if (VG_(strcmp)(arg+len, "yes") == 0) {
3295 *ret = True;
3296 return True;
3297 } else if (VG_(strcmp)(arg+len, "no") == 0) {
3298 *ret = False;
3299 return True;
3300 } else
3301 VG_(bad_option)(arg);
3302 }
3303 return False;
3304}
3305
sewardj406270b2002-11-13 22:18:09 +00003306static Bool match_str(Char *arg, Char *argstr, Char **ret)
3307{
3308 Int len = VG_(strlen)(argstr);
3309
3310 if (VG_(strncmp)(arg, argstr, len) == 0) {
3311 *ret = VG_(strdup)(arg+len);
3312 return True;
3313 }
3314
3315 return False;
3316}
sewardj406270b2002-11-13 22:18:09 +00003317
3318Bool SK_(process_cmd_line_option)(Char* arg)
3319{
sewardj499e3de2002-11-13 22:22:25 +00003320 Char *str;
3321
3322 if (match_str(arg, "--show-last-access=", &str)) {
3323 Bool ok = True;
3324 if (VG_(strcmp)(str, "no") == 0)
3325 clo_execontext = EC_None;
3326 else if (VG_(strcmp)(str, "some") == 0)
3327 clo_execontext = EC_Some;
3328 else if (VG_(strcmp)(str, "all") == 0)
3329 clo_execontext = EC_All;
3330 else {
3331 ok = False;
3332 VG_(bad_option)(arg);
3333 }
3334
3335 VG_(free)(str);
3336 if (ok)
3337 return True;
3338 }
3339
sewardjf6374322002-11-13 22:35:55 +00003340 if (match_Bool(arg, "--private-stacks=", &clo_priv_stacks))
3341 return True;
3342
njn3e884182003-04-15 13:03:23 +00003343 return VG_(replacement_malloc_process_cmd_line_option)(arg);
sewardj406270b2002-11-13 22:18:09 +00003344}
3345
njn3e884182003-04-15 13:03:23 +00003346void SK_(print_usage)(void)
sewardj406270b2002-11-13 22:18:09 +00003347{
njn3e884182003-04-15 13:03:23 +00003348 VG_(printf)(
sewardje11d6c82002-12-15 02:00:41 +00003349" --private-stacks=yes|no assume thread stacks are used privately [no]\n"
3350" --show-last-access=no|some|all\n"
3351" show location of last word access on error [no]\n"
njn3e884182003-04-15 13:03:23 +00003352 );
3353 VG_(replacement_malloc_print_usage)();
sewardj406270b2002-11-13 22:18:09 +00003354}
3355
njn3e884182003-04-15 13:03:23 +00003356void SK_(print_debug_usage)(void)
3357{
3358 VG_(replacement_malloc_print_debug_usage)();
3359}
njn25e49d8e72002-09-23 09:36:25 +00003360
3361void SK_(post_clo_init)(void)
3362{
njn810086f2002-11-14 12:42:47 +00003363 void (*stack_tracker)(Addr a, UInt len);
3364
sewardj499e3de2002-11-13 22:22:25 +00003365 if (clo_execontext) {
3366 execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536);
3367 VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536);
3368 }
sewardjf6374322002-11-13 22:35:55 +00003369
njn810086f2002-11-14 12:42:47 +00003370 if (clo_priv_stacks)
3371 stack_tracker = & eraser_new_mem_stack_private;
3372 else
3373 stack_tracker = & eraser_new_mem_stack;
sewardjf6374322002-11-13 22:35:55 +00003374
njn810086f2002-11-14 12:42:47 +00003375 VG_(track_new_mem_stack) (stack_tracker);
njn810086f2002-11-14 12:42:47 +00003376 VG_(track_new_mem_stack_signal) (stack_tracker);
njn25e49d8e72002-09-23 09:36:25 +00003377}
3378
3379
njn7d9f94d2003-04-22 21:41:40 +00003380void SK_(fini)(Int exitcode)
njn25e49d8e72002-09-23 09:36:25 +00003381{
sewardjdac0a442002-11-13 22:08:40 +00003382 if (DEBUG_LOCK_TABLE) {
sewardj4bffb232002-11-13 21:46:34 +00003383 pp_all_LockSets();
sewardjdac0a442002-11-13 22:08:40 +00003384 pp_all_mutexes();
3385 }
sewardj4bffb232002-11-13 21:46:34 +00003386
3387 if (LOCKSET_SANITY)
3388 sanity_check_locksets("SK_(fini)");
3389
sewardjff2c9232002-11-13 21:44:39 +00003390 VG_(message)(Vg_UserMsg, "%u possible data races found; %u lock order problems",
3391 n_eraser_warnings, n_lockorder_warnings);
sewardjf6374322002-11-13 22:35:55 +00003392
3393 if (0)
3394 VG_(printf)("stk_ld:%u+stk_st:%u = %u nonstk_ld:%u+nonstk_st:%u = %u %u%%\n",
3395 stk_ld, stk_st, stk_ld + stk_st,
3396 nonstk_ld, nonstk_st, nonstk_ld + nonstk_st,
3397 ((stk_ld+stk_st)*100) / (stk_ld + stk_st + nonstk_ld + nonstk_st));
njn25e49d8e72002-09-23 09:36:25 +00003398}
3399
3400/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00003401/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00003402/*--------------------------------------------------------------------*/