blob: ac05374ca50fccb940fc48be0a23b0466a70d339 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of Helgrind, a Valgrind tool for detecting
njnc9539842002-10-02 13:26:35 +00009 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
nethercotebb1c9912004-01-04 16:43:23 +000011 Copyright (C) 2002-2004 Nicholas Nethercote
njn25e49d8e72002-09-23 09:36:25 +000012 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
sewardj7f3ad222002-11-13 22:11:53 +000033#include "helgrind.h"
njn25e49d8e72002-09-23 09:36:25 +000034
njn25e49d8e72002-09-23 09:36:25 +000035static UInt n_eraser_warnings = 0;
sewardjff2c9232002-11-13 21:44:39 +000036static UInt n_lockorder_warnings = 0;
njn25e49d8e72002-09-23 09:36:25 +000037
38/*------------------------------------------------------------*/
39/*--- Debug guff ---*/
40/*------------------------------------------------------------*/
41
sewardje11d6c82002-12-15 02:00:41 +000042#define DEBUG_LOCK_TABLE 0 /* Print lock table at end */
njn25e49d8e72002-09-23 09:36:25 +000043
44#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
45#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
46#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
47#define DEBUG_ACCESSES 0 /* Print reads, writes */
48#define DEBUG_MEM_LOCKSET_CHANGES 0
49 /* Print when an address's lockset
50 changes; only useful with
51 DEBUG_ACCESSES */
sewardj8fac99a2002-11-13 22:31:26 +000052#define SLOW_ASSERTS 0 /* do expensive asserts */
njn25e49d8e72002-09-23 09:36:25 +000053#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
54
sewardj8fac99a2002-11-13 22:31:26 +000055#if SLOW_ASSERTS
56#define SK_ASSERT(x) sk_assert(x)
57#else
58#define SK_ASSERT(x)
59#endif
60
njn25e49d8e72002-09-23 09:36:25 +000061/* heavyweight LockSet sanity checking:
62 0 == never
63 1 == after important ops
64 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
65 */
66#define LOCKSET_SANITY 0
67
sewardj8fac99a2002-11-13 22:31:26 +000068/* Rotate an unsigned quantity left */
69#define ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x)*8)-(n))))
70
71/* round a up to the next multiple of N. N must be a power of 2 */
72#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
73
74/* Round a down to the next multiple of N. N must be a power of 2 */
75#define ROUNDDN(a, N) ((a) & ~(N-1))
njn25e49d8e72002-09-23 09:36:25 +000076
77/*------------------------------------------------------------*/
sewardjf6374322002-11-13 22:35:55 +000078/*--- Command line options ---*/
79/*------------------------------------------------------------*/
80
81static enum {
82 EC_None,
83 EC_Some,
84 EC_All
85} clo_execontext = EC_None;
86
sewardje1a39f42002-12-15 01:56:17 +000087static Bool clo_priv_stacks = False;
sewardjf6374322002-11-13 22:35:55 +000088
89/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000090/*--- Crude profiling machinery. ---*/
91/*------------------------------------------------------------*/
92
93// PPP: work out if I want this
94
95#define PROF_EVENT(x)
96#if 0
97#ifdef VG_PROFILE_MEMORY
98
99#define N_PROF_EVENTS 150
100
101static UInt event_ctr[N_PROF_EVENTS];
102
103void VGE_(done_prof_mem) ( void )
104{
105 Int i;
106 for (i = 0; i < N_PROF_EVENTS; i++) {
107 if ((i % 10) == 0)
108 VG_(printf)("\n");
109 if (event_ctr[i] > 0)
110 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
111 }
112 VG_(printf)("\n");
113}
114
115#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000116 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000117 event_ctr[ev]++; \
118 } while (False);
119
120#else
121
122//static void init_prof_mem ( void ) { }
123// void VG_(done_prof_mem) ( void ) { }
124
125#define PROF_EVENT(ev) /* */
126
127#endif /* VG_PROFILE_MEMORY */
128
129/* Event index. If just the name of the fn is given, this means the
130 number of calls to the fn. Otherwise it is the specified event.
131
132 [PPP: snip event numbers...]
133*/
134#endif /* 0 */
135
136
137/*------------------------------------------------------------*/
138/*--- Data defns. ---*/
139/*------------------------------------------------------------*/
140
njn3e884182003-04-15 13:03:23 +0000141typedef
142 struct _HG_Chunk {
143 struct _HG_Chunk* next;
144 Addr data; /* ptr to actual block */
sewardj05bcdcb2003-05-18 10:05:38 +0000145 Int size; /* size requested */
njn3e884182003-04-15 13:03:23 +0000146 ExeContext* where; /* where it was allocated */
147 ThreadId tid; /* allocating thread */
148 }
149 HG_Chunk;
150
njn25e49d8e72002-09-23 09:36:25 +0000151typedef enum
sewardj7f3ad222002-11-13 22:11:53 +0000152 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit, Vge_Error }
njn25e49d8e72002-09-23 09:36:25 +0000153 VgeInitStatus;
154
sewardjc808ef52002-11-13 22:43:26 +0000155
njn25e49d8e72002-09-23 09:36:25 +0000156/* Should add up to 32 to fit in one word */
157#define OTHER_BITS 30
158#define STATE_BITS 2
159
160#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
161
162/* This is for indicating that a memory block has been initialised but not
163 * really directly by a particular thread... (eg. text/data initialised
164 * automatically at startup).
165 * Must be different to virgin_word.other */
166#define TID_INDICATING_NONVIRGIN 1
167
sewardjc4a810d2002-11-13 22:25:51 +0000168/* Magic packed TLS used for error suppression; if word state is Excl
169 and tid is this, then it means all access are OK without changing
170 state and without raising any more errors */
171#define TLSP_INDICATING_ALL ((1 << OTHER_BITS) - 1)
sewardj16748af2002-10-22 04:55:54 +0000172
njn25e49d8e72002-09-23 09:36:25 +0000173/* Number of entries must fit in STATE_BITS bits */
174typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
175
sewardjc808ef52002-11-13 22:43:26 +0000176static inline const Char *pp_state(pth_state st)
177{
178 const Char *ret;
179
180 switch(st) {
181 case Vge_Virgin: ret = "virgin"; break;
182 case Vge_Excl: ret = "exclusive"; break;
183 case Vge_Shar: ret = "shared RO"; break;
184 case Vge_SharMod: ret = "shared RW"; break;
185 default: ret = "???";
186 }
187 return ret;
188}
189
njn25e49d8e72002-09-23 09:36:25 +0000190typedef
191 struct {
sewardj8fac99a2002-11-13 22:31:26 +0000192 /* gcc arranges this bitfield with state in the 2LSB and other
193 in the 30MSB, which is what we want */
njn25e49d8e72002-09-23 09:36:25 +0000194 UInt state:STATE_BITS;
sewardj8fac99a2002-11-13 22:31:26 +0000195 UInt other:OTHER_BITS;
njn25e49d8e72002-09-23 09:36:25 +0000196 } shadow_word;
197
sewardj8fac99a2002-11-13 22:31:26 +0000198#define SW(st, other) ((shadow_word) { st, other })
199
njn25e49d8e72002-09-23 09:36:25 +0000200typedef
201 struct {
202 shadow_word swords[ESEC_MAP_WORDS];
203 }
204 ESecMap;
205
206static ESecMap* primary_map[ 65536 ];
207static ESecMap distinguished_secondary_map;
208
sewardj8fac99a2002-11-13 22:31:26 +0000209static const shadow_word virgin_sword = SW(Vge_Virgin, 0);
210static const shadow_word error_sword = SW(Vge_Excl, TLSP_INDICATING_ALL);
njn25e49d8e72002-09-23 09:36:25 +0000211
212#define VGE_IS_DISTINGUISHED_SM(smap) \
213 ((smap) == &distinguished_secondary_map)
214
215#define ENSURE_MAPPABLE(addr,caller) \
216 do { \
217 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
218 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
219 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
220 } \
221 } while(0)
222
223
sewardjc808ef52002-11-13 22:43:26 +0000224/* Parallel map which contains execution contexts when words last
225 changed state (if required) */
sewardj499e3de2002-11-13 22:22:25 +0000226
sewardjc808ef52002-11-13 22:43:26 +0000227typedef struct EC_EIP {
228 union u_ec_eip {
229 Addr eip;
230 ExeContext *ec;
sewardj72baa7a2002-12-09 23:32:58 +0000231 } uu_ec_eip;
sewardjc808ef52002-11-13 22:43:26 +0000232 UInt state:STATE_BITS;
233 UInt tls:OTHER_BITS; /* packed TLS */
sewardj499e3de2002-11-13 22:22:25 +0000234} EC_EIP;
235
sewardjc808ef52002-11-13 22:43:26 +0000236#define NULL_EC_EIP ((EC_EIP){ { 0 }, 0, 0})
237
238#define EIP(eip, prev, tls) ((EC_EIP) { (union u_ec_eip)(eip), (prev).state, packTLS(tls) })
239#define EC(ec, prev, tls) ((EC_EIP) { (union u_ec_eip)(ec), (prev).state, packTLS(tls) })
240
241static inline UInt packEC(ExeContext *ec)
242{
243 SK_ASSERT(((UInt)ec & ((1 << STATE_BITS)-1)) == 0);
244 return ((UInt)ec) >> STATE_BITS;
245}
246
247static inline ExeContext *unpackEC(UInt i)
248{
249 return (ExeContext *)(i << STATE_BITS);
250}
251
252/* Lose 2 LSB of eip */
253static inline UInt packEIP(Addr eip)
254{
255 return ((UInt)eip) >> STATE_BITS;
256}
257
258static inline Addr unpackEIP(UInt i)
259{
260 return (Addr)(i << STATE_BITS);
261}
sewardj499e3de2002-11-13 22:22:25 +0000262
263typedef struct {
264 EC_EIP execontext[ESEC_MAP_WORDS];
265} ExeContextMap;
266
267static ExeContextMap** execontext_map;
268
269static inline void setExeContext(Addr a, EC_EIP ec)
270{
271 UInt idx = (a >> 16) & 0xffff;
272 UInt off = (a >> 2) & 0x3fff;
273
274 if (execontext_map[idx] == NULL) {
275 execontext_map[idx] = VG_(malloc)(sizeof(ExeContextMap));
276 VG_(memset)(execontext_map[idx], 0, sizeof(ExeContextMap));
277 }
278
279 execontext_map[idx]->execontext[off] = ec;
280}
281
282static inline EC_EIP getExeContext(Addr a)
283{
284 UInt idx = (a >> 16) & 0xffff;
285 UInt off = (a >> 2) & 0x3fff;
sewardjc808ef52002-11-13 22:43:26 +0000286 EC_EIP ec = NULL_EC_EIP;
sewardj499e3de2002-11-13 22:22:25 +0000287
288 if (execontext_map[idx] != NULL)
289 ec = execontext_map[idx]->execontext[off];
290
291 return ec;
292}
293
njn25e49d8e72002-09-23 09:36:25 +0000294/*------------------------------------------------------------*/
sewardjc4a810d2002-11-13 22:25:51 +0000295/*--- Thread lifetime segments ---*/
296/*------------------------------------------------------------*/
297
298/*
299 * This mechanism deals with the common case of a parent thread
300 * creating a structure for a child thread, and then passing ownership
301 * of the structure to that thread. It similarly copes with a child
302 * thread passing information back to another thread waiting to join
303 * on it.
304 *
305 * Each thread's lifetime can be partitioned into segments. Those
306 * segments are arranged to form an interference graph which indicates
307 * whether two thread lifetime segments can possibly be concurrent.
308 * If not, then memory with is exclusively accessed by one TLS can be
daywalker7e73e5f2003-07-04 16:18:15 +0000309 * passed on to another TLS without an error occurring, and without
sewardjc4a810d2002-11-13 22:25:51 +0000310 * moving it from Excl state.
311 *
312 * At present this only considers thread creation and join as
313 * synchronisation events for creating new lifetime segments, but
314 * others may be possible (like mutex operations).
315 */
316
317typedef struct _ThreadLifeSeg ThreadLifeSeg;
318
319struct _ThreadLifeSeg {
320 ThreadId tid;
321 ThreadLifeSeg *prior[2]; /* Previous lifetime segments */
322 UInt refcount; /* Number of memory locations pointing here */
323 UInt mark; /* mark used for graph traversal */
324 ThreadLifeSeg *next; /* list of all TLS */
325};
326
327static ThreadLifeSeg *all_tls;
328static UInt tls_since_gc;
329#define TLS_SINCE_GC 10000
330
331/* current mark used for TLS graph traversal */
332static UInt tlsmark;
333
334static ThreadLifeSeg *thread_seg[VG_N_THREADS];
335
336
337static void tls_gc(void)
338{
339 /* XXX later. Walk through all TLSs and look for ones with 0
340 refcount and remove them from the structure and free them.
341 Could probably get rid of ThreadLifeSeg.refcount and simply use
342 mark-sweep from the shadow table. */
343 VG_(printf)("WRITEME: TLS GC\n");
344}
345
346static void newTLS(ThreadId tid)
347{
348 static const Bool debug = False;
349 ThreadLifeSeg *tls;
350
351 /* Initial NULL */
352 if (thread_seg[tid] == NULL) {
353 tls = VG_(malloc)(sizeof(*tls));
354 tls->tid = tid;
355 tls->prior[0] = tls->prior[1] = NULL;
356 tls->refcount = 0;
357 tls->mark = tlsmark-1;
358
359 tls->next = all_tls;
360 all_tls = tls;
361 tls_since_gc++;
362
363 thread_seg[tid] = tls;
364 return;
365 }
366
367 /* Previous TLS was unused, so just recycle */
368 if (thread_seg[tid]->refcount == 0) {
369 if (debug)
370 VG_(printf)("newTLS; recycling TLS %p for tid %u\n",
371 thread_seg[tid], tid);
372 return;
373 }
374
375 /* Use existing TLS for this tid as a prior for new TLS */
376 tls = VG_(malloc)(sizeof(*tls));
377 tls->tid = tid;
378 tls->prior[0] = thread_seg[tid];
379 tls->prior[1] = NULL;
380 tls->refcount = 0;
381 tls->mark = tlsmark-1;
382
383 tls->next = all_tls;
384 all_tls = tls;
385 if (++tls_since_gc > TLS_SINCE_GC) {
386 tls_gc();
387 tls_since_gc = 0;
388 }
389
390 if (debug)
391 VG_(printf)("newTLS: made new TLS %p for tid %u (prior %p(%u))\n",
392 tls, tid, tls->prior[0], tls->prior[0]->tid);
393
394 thread_seg[tid] = tls;
395}
396
397/* clear out a TLS for a thread that's died */
398static void clearTLS(ThreadId tid)
399{
400 newTLS(tid);
401
402 thread_seg[tid]->prior[0] = NULL;
403 thread_seg[tid]->prior[1] = NULL;
404}
405
406static void addPriorTLS(ThreadId tid, ThreadId prior)
407{
408 static const Bool debug = False;
409 ThreadLifeSeg *tls = thread_seg[tid];
410
411 if (debug)
412 VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
413 thread_seg[prior], prior, tls, tid);
414
415 sk_assert(thread_seg[tid] != NULL);
416 sk_assert(thread_seg[prior] != NULL);
417
418 if (tls->prior[0] == NULL)
419 tls->prior[0] = thread_seg[prior];
420 else {
421 sk_assert(tls->prior[1] == NULL);
422 tls->prior[1] = thread_seg[prior];
423 }
424}
425
426/* Return True if prior is definitely not concurrent with tls */
427static Bool tlsIsDisjoint(const ThreadLifeSeg *tls,
428 const ThreadLifeSeg *prior)
429{
430 Bool isPrior(const ThreadLifeSeg *t) {
431 if (t == NULL || t->mark == tlsmark)
432 return False;
433
434 if (t == prior)
435 return True;
436
437 ((ThreadLifeSeg *)t)->mark = tlsmark;
438
439 return isPrior(t->prior[0]) || isPrior(t->prior[1]);
440 }
441 tlsmark++; /* new traversal mark */
442
443 return isPrior(tls);
444}
445
446static inline UInt packTLS(ThreadLifeSeg *tls)
447{
sewardj8fac99a2002-11-13 22:31:26 +0000448 SK_ASSERT(((UInt)tls & ((1 << STATE_BITS)-1)) == 0);
sewardjc4a810d2002-11-13 22:25:51 +0000449 return ((UInt)tls) >> STATE_BITS;
450}
451
452static inline ThreadLifeSeg *unpackTLS(UInt i)
453{
454 return (ThreadLifeSeg *)(i << STATE_BITS);
455}
456
457/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000458/*--- Low-level support for memory tracking. ---*/
459/*------------------------------------------------------------*/
460
461/*
462 All reads and writes are recorded in the memory map, which
463 records the state of all memory in the process. The memory map is
464 organised like that for normal Valgrind, except each that everything
465 is done at word-level instead of byte-level, and each word has only
466 one word of shadow (instead of 36 bits).
467
468 As for normal Valgrind there is a distinguished secondary map. But we're
469 working at word-granularity, so it has 16k word entries instead of 64k byte
470 entries. Lookup is done as follows:
471
472 bits 31..16: primary map lookup
473 bits 15.. 2: secondary map lookup
474 bits 1.. 0: ignored
475*/
476
477
478/*------------------------------------------------------------*/
479/*--- Basic bitmap management, reading and writing. ---*/
480/*------------------------------------------------------------*/
481
482/* Allocate and initialise a secondary map, marking all words as virgin. */
483
484/* Just a value that isn't a real pointer */
485#define SEC_MAP_ACCESS (shadow_word*)0x99
486
487
488static
489ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
490{
491 ESecMap* map;
492 UInt i;
493 //PROF_EVENT(10); PPP
494
495 /* It just happens that a SecMap occupies exactly 18 pages --
496 although this isn't important, so the following assert is
497 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000498 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
fitzhardinge98abfc72003-12-16 02:05:15 +0000499 map = (ESecMap *)VG_(shadow_alloc)(sizeof(ESecMap));
njn25e49d8e72002-09-23 09:36:25 +0000500
501 for (i = 0; i < ESEC_MAP_WORDS; i++)
502 map->swords[i] = virgin_sword;
503
504 return map;
505}
506
507
508/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
509 * word gets set. */
sewardj56867352003-10-12 10:27:06 +0000510static /* __inline__ */
njn25e49d8e72002-09-23 09:36:25 +0000511void set_sword ( Addr a, shadow_word sword )
512{
513 ESecMap* sm;
sewardjc4a810d2002-11-13 22:25:51 +0000514 shadow_word *oldsw;
njn25e49d8e72002-09-23 09:36:25 +0000515
516 //PROF_EVENT(23); PPP
517 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
518
519 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
520 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000521 sk_assert(sm != &distinguished_secondary_map);
sewardjc4a810d2002-11-13 22:25:51 +0000522 oldsw = &sm->swords[(a & 0xFFFC) >> 2];
523 if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
524 ThreadLifeSeg *tls = unpackTLS(oldsw->other);
525 tls->refcount--;
526 }
527
528 if (sword.state == Vge_Excl && sword.other != TLSP_INDICATING_ALL) {
529 ThreadLifeSeg *tls = unpackTLS(sword.other);
530 tls->refcount++;
531 }
532
njn25e49d8e72002-09-23 09:36:25 +0000533 sm->swords[(a & 0xFFFC) >> 2] = sword;
534
535 if (VGE_IS_DISTINGUISHED_SM(sm)) {
536 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
537 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000538 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000539 }
540}
541
542
543static __inline__
544shadow_word* get_sword_addr ( Addr a )
545{
546 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
547 ESecMap* sm = primary_map[a >> 16];
548 UInt sm_off = (a & 0xFFFC) >> 2;
549
550 if (VGE_IS_DISTINGUISHED_SM(sm)) {
551 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
552 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000553 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000554 return SEC_MAP_ACCESS;
555 }
556
557 //PROF_EVENT(21); PPP
558 return & (sm->swords[sm_off]);
559}
560
561
562// SSS: rename these so they're not so similar to memcheck, unless it's
563// appropriate of course
564
565static __inline__
566void init_virgin_sword(Addr a)
567{
sewardj499e3de2002-11-13 22:22:25 +0000568 if (clo_execontext != EC_None)
569 setExeContext(a, NULL_EC_EIP);
njn25e49d8e72002-09-23 09:36:25 +0000570 set_sword(a, virgin_sword);
571}
572
sewardj7f3ad222002-11-13 22:11:53 +0000573static __inline__
574void init_error_sword(Addr a)
575{
576 set_sword(a, error_sword);
577}
njn25e49d8e72002-09-23 09:36:25 +0000578
njn25e49d8e72002-09-23 09:36:25 +0000579static __inline__
580void init_nonvirgin_sword(Addr a)
581{
582 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000583 ThreadId tid = VG_(get_current_or_recent_tid)();
sewardjc4a810d2002-11-13 22:25:51 +0000584 ThreadLifeSeg *tls;
njn25e49d8e72002-09-23 09:36:25 +0000585
sewardjb52a1b02002-10-23 21:38:22 +0000586 sk_assert(tid != VG_INVALID_THREADID);
sewardjc4a810d2002-11-13 22:25:51 +0000587 tls = thread_seg[tid];
588
sewardj8fac99a2002-11-13 22:31:26 +0000589 sword = SW(Vge_Excl, packTLS(tls));
njn25e49d8e72002-09-23 09:36:25 +0000590 set_sword(a, sword);
591}
592
593
594/* In this case, we treat it for Eraser's sake like virgin (it hasn't
595 * been inited by a particular thread, it's just done automatically upon
596 * startup), but we mark its .state specially so it doesn't look like an
597 * uninited read. */
598static __inline__
599void init_magically_inited_sword(Addr a)
600{
601 shadow_word sword;
602
sewardjb52a1b02002-10-23 21:38:22 +0000603 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sewardj8fac99a2002-11-13 22:31:26 +0000604
605 sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
606
njn25e49d8e72002-09-23 09:36:25 +0000607 set_sword(a, virgin_sword);
608}
609
sewardjc26cc252002-10-23 21:58:55 +0000610
sewardj274c6012002-10-22 04:54:55 +0000611/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000612/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000613/*------------------------------------------------------------*/
614
sewardj39a4d842002-11-13 22:14:30 +0000615typedef struct _Mutex Mutex; /* forward decl */
sewardj4bffb232002-11-13 21:46:34 +0000616typedef struct _LockSet LockSet;
617
sewardj16748af2002-10-22 04:55:54 +0000618typedef enum MutexState {
619 MxUnknown, /* don't know */
620 MxUnlocked, /* unlocked */
621 MxLocked, /* locked */
622 MxDead /* destroyed */
623} MutexState;
624
sewardj39a4d842002-11-13 22:14:30 +0000625struct _Mutex {
sewardjdac0a442002-11-13 22:08:40 +0000626 Addr mutexp;
sewardj39a4d842002-11-13 22:14:30 +0000627 Mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000628
629 MutexState state; /* mutex state */
630 ThreadId tid; /* owner */
631 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000632
sewardj4bffb232002-11-13 21:46:34 +0000633 const LockSet *lockdep; /* set of locks we depend on */
sewardjc26cc252002-10-23 21:58:55 +0000634 UInt mark; /* mark for graph traversal */
635};
sewardj16748af2002-10-22 04:55:54 +0000636
sewardj39a4d842002-11-13 22:14:30 +0000637static inline Int mutex_cmp(const Mutex *a, const Mutex *b)
sewardj4bffb232002-11-13 21:46:34 +0000638{
sewardjdac0a442002-11-13 22:08:40 +0000639 return a->mutexp - b->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000640}
njn25e49d8e72002-09-23 09:36:25 +0000641
sewardj274c6012002-10-22 04:54:55 +0000642struct _LockSet {
sewardj05bcdcb2003-05-18 10:05:38 +0000643 Int setsize; /* number of members */
sewardj4bffb232002-11-13 21:46:34 +0000644 UInt hash; /* hash code */
645 LockSet *next; /* next in hash chain */
sewardj39a4d842002-11-13 22:14:30 +0000646 const Mutex *mutex[0]; /* locks */
sewardj274c6012002-10-22 04:54:55 +0000647};
sewardj4bffb232002-11-13 21:46:34 +0000648
649static const LockSet *emptyset;
njn25e49d8e72002-09-23 09:36:25 +0000650
651/* Each one is an index into the lockset table. */
sewardj4bffb232002-11-13 21:46:34 +0000652static const LockSet *thread_locks[VG_N_THREADS];
njn25e49d8e72002-09-23 09:36:25 +0000653
sewardjdac0a442002-11-13 22:08:40 +0000654#define LOCKSET_HASH_SZ 1021
njn25e49d8e72002-09-23 09:36:25 +0000655
sewardj4bffb232002-11-13 21:46:34 +0000656static LockSet *lockset_hash[LOCKSET_HASH_SZ];
njn25e49d8e72002-09-23 09:36:25 +0000657
sewardj4bffb232002-11-13 21:46:34 +0000658/* Pack and unpack a LockSet pointer into shadow_word.other */
sewardj8fac99a2002-11-13 22:31:26 +0000659static inline UInt packLockSet(const LockSet *p)
njn25e49d8e72002-09-23 09:36:25 +0000660{
sewardj4bffb232002-11-13 21:46:34 +0000661 UInt id;
662
sewardj8fac99a2002-11-13 22:31:26 +0000663 SK_ASSERT(((UInt)p & ((1 << STATE_BITS)-1)) == 0);
sewardj4bffb232002-11-13 21:46:34 +0000664 id = ((UInt)p) >> STATE_BITS;
665
666 return id;
njn25e49d8e72002-09-23 09:36:25 +0000667}
668
sewardj8fac99a2002-11-13 22:31:26 +0000669static inline const LockSet *unpackLockSet(UInt id)
njn25e49d8e72002-09-23 09:36:25 +0000670{
sewardj4bffb232002-11-13 21:46:34 +0000671 return (LockSet *)(id << STATE_BITS);
njn25e49d8e72002-09-23 09:36:25 +0000672}
673
njn25e49d8e72002-09-23 09:36:25 +0000674static
sewardj4bffb232002-11-13 21:46:34 +0000675void pp_LockSet(const LockSet* p)
njn25e49d8e72002-09-23 09:36:25 +0000676{
sewardj05bcdcb2003-05-18 10:05:38 +0000677 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000678 VG_(printf)("{ ");
sewardj4bffb232002-11-13 21:46:34 +0000679 for(i = 0; i < p->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000680 const Mutex *mx = p->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000681
682 VG_(printf)("%p%(y ", mx->mutexp, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000683 }
684 VG_(printf)("}\n");
685}
686
687
sewardj4bffb232002-11-13 21:46:34 +0000688static void print_LockSet(const Char *s, const LockSet *ls)
689{
690 VG_(printf)("%s: ", s);
691 pp_LockSet(ls);
692}
693
694/* Compute the hash of a LockSet */
sewardj56867352003-10-12 10:27:06 +0000695static UInt hash_LockSet_w_wo(const LockSet *ls,
696 const Mutex *with,
697 const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000698{
sewardj05bcdcb2003-05-18 10:05:38 +0000699 Int i;
sewardj8fac99a2002-11-13 22:31:26 +0000700 UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sewardj4bffb232002-11-13 21:46:34 +0000701
702 sk_assert(with == NULL || with != without);
703
704 for(i = 0; with != NULL || i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000705 const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000706
707 if (without && mutex_cmp(without, mx) == 0)
708 continue;
709
710 if (with && (mx == NULL || mutex_cmp(with, mx) < 0)) {
711 mx = with;
712 with = NULL;
713 i--;
714 }
715
sewardj8fac99a2002-11-13 22:31:26 +0000716 hash = ROTL(hash, 17);
sewardj4bffb232002-11-13 21:46:34 +0000717 hash ^= (UInt)mx->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000718 }
719
720 return hash % LOCKSET_HASH_SZ;
721}
722
sewardj39a4d842002-11-13 22:14:30 +0000723static inline UInt hash_LockSet_with(const LockSet *ls, const Mutex *with)
sewardj4bffb232002-11-13 21:46:34 +0000724{
725 UInt hash = hash_LockSet_w_wo(ls, with, NULL);
726
727 if (0)
728 VG_(printf)("hash_with %p+%p -> %d\n", ls, with->mutexp, hash);
729
730 return hash;
731}
732
sewardj39a4d842002-11-13 22:14:30 +0000733static inline UInt hash_LockSet_without(const LockSet *ls, const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000734{
735 UInt hash = hash_LockSet_w_wo(ls, NULL, without);
736
737 if (0)
738 VG_(printf)("hash_with %p-%p -> %d\n", ls, without->mutexp, hash);
739
740 return hash;
741}
742
743static inline UInt hash_LockSet(const LockSet *ls)
744{
745 UInt hash = hash_LockSet_w_wo(ls, NULL, NULL);
746
747 if (0)
748 VG_(printf)("hash %p -> %d\n", ls, hash);
749
750 return hash;
751}
752
753static
754Bool structural_eq_LockSet(const LockSet* a, const LockSet* b)
njn25e49d8e72002-09-23 09:36:25 +0000755{
756 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000757
sewardj4bffb232002-11-13 21:46:34 +0000758 if (a == b)
759 return True;
760 if (a->setsize != b->setsize)
761 return False;
njn25e49d8e72002-09-23 09:36:25 +0000762
sewardj4bffb232002-11-13 21:46:34 +0000763 for(i = 0; i < a->setsize; i++) {
764 if (mutex_cmp(a->mutex[i], b->mutex[i]) != 0)
njn25e49d8e72002-09-23 09:36:25 +0000765 return False;
njn25e49d8e72002-09-23 09:36:25 +0000766 }
767
sewardj4bffb232002-11-13 21:46:34 +0000768 return True;
njn25e49d8e72002-09-23 09:36:25 +0000769}
770
771
772/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
773 * doesn't do the insertion. Returns True if they match.
774 */
775static Bool
sewardj4bffb232002-11-13 21:46:34 +0000776weird_LockSet_equals(const LockSet* a, const LockSet* b,
sewardj39a4d842002-11-13 22:14:30 +0000777 const Mutex *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000778{
sewardjc26cc252002-10-23 21:58:55 +0000779 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +0000780 Int ia, ib;
sewardjc26cc252002-10-23 21:58:55 +0000781
njn25e49d8e72002-09-23 09:36:25 +0000782 /* Idea is to try and match each element of b against either an
783 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000784
785 if (debug) {
786 print_LockSet("weird_LockSet_equals a", a);
787 print_LockSet(" b", b);
788 VG_(printf)( " missing: %p%(y\n",
789 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000790 }
sewardjc26cc252002-10-23 21:58:55 +0000791
sewardj4bffb232002-11-13 21:46:34 +0000792 if ((a->setsize + 1) != b->setsize) {
793 if (debug)
794 VG_(printf)(" fastpath length mismatch -> 0\n");
795 return False;
796 }
797
sewardjc26cc252002-10-23 21:58:55 +0000798 /* There are three phases to this compare:
799 1 the section from the start of a up to missing_mutex
800 2 missing mutex itself
801 3 the section after missing_mutex to the end of a
802 */
803
sewardj4bffb232002-11-13 21:46:34 +0000804 ia = 0;
805 ib = 0;
806
sewardjc26cc252002-10-23 21:58:55 +0000807 /* 1: up to missing_mutex */
sewardj4bffb232002-11-13 21:46:34 +0000808 for(; ia < a->setsize && mutex_cmp(a->mutex[ia], missing_mutex) < 0; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000809 if (debug) {
810 print_LockSet(" 1:a", a);
811 print_LockSet(" 1:b", b);
812 }
sewardj4bffb232002-11-13 21:46:34 +0000813 if (ib == b->setsize || mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000814 return False;
sewardjc26cc252002-10-23 21:58:55 +0000815 }
816
817 /* 2: missing_mutex itself */
818 if (debug) {
819 VG_(printf)( " 2:missing: %p%(y\n",
820 missing_mutex->mutexp, missing_mutex->mutexp);
821 print_LockSet(" 2: b", b);
822 }
823
sewardj4bffb232002-11-13 21:46:34 +0000824 sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
sewardjc26cc252002-10-23 21:58:55 +0000825
sewardj4bffb232002-11-13 21:46:34 +0000826 if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000827 return False;
828
sewardj4bffb232002-11-13 21:46:34 +0000829 ib++;
sewardjc26cc252002-10-23 21:58:55 +0000830
831 /* 3: after missing_mutex to end */
832
sewardj4bffb232002-11-13 21:46:34 +0000833 for(; ia < a->setsize && ib < b->setsize; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000834 if (debug) {
835 print_LockSet(" 3:a", a);
836 print_LockSet(" 3:b", b);
837 }
sewardj4bffb232002-11-13 21:46:34 +0000838 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000839 return False;
sewardjc26cc252002-10-23 21:58:55 +0000840 }
841
842 if (debug)
sewardj4bffb232002-11-13 21:46:34 +0000843 VG_(printf)(" ia=%d ib=%d --> %d\n", ia, ib, ia == a->setsize && ib == b->setsize);
sewardjc26cc252002-10-23 21:58:55 +0000844
sewardj4bffb232002-11-13 21:46:34 +0000845 return ia == a->setsize && ib == b->setsize;
846}
847
848
849
850static const LockSet *lookup_LockSet(const LockSet *set)
851{
852 UInt bucket = set->hash;
853 LockSet *ret;
854
855 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
856 if (set == ret || structural_eq_LockSet(set, ret))
857 return ret;
858
859 return NULL;
860}
861
sewardj39a4d842002-11-13 22:14:30 +0000862static const LockSet *lookup_LockSet_with(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000863{
864 UInt bucket = hash_LockSet_with(set, mutex);
865 const LockSet *ret;
866
867 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
868 if (weird_LockSet_equals(set, ret, mutex))
869 return ret;
870
871 return NULL;
872}
873
sewardj39a4d842002-11-13 22:14:30 +0000874static const LockSet *lookup_LockSet_without(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000875{
876 UInt bucket = hash_LockSet_without(set, mutex);
877 const LockSet *ret;
878
879 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
880 if (weird_LockSet_equals(ret, set, mutex))
881 return ret;
882
883 return NULL;
884}
885
886static void insert_LockSet(LockSet *set)
887{
888 UInt hash = hash_LockSet(set);
889
890 set->hash = hash;
891
892 sk_assert(lookup_LockSet(set) == NULL);
893
894 set->next = lockset_hash[hash];
895 lockset_hash[hash] = set;
896}
897
898static inline
899LockSet *alloc_LockSet(UInt setsize)
900{
sewardj39a4d842002-11-13 22:14:30 +0000901 LockSet *ret = VG_(malloc)(sizeof(*ret) + sizeof(Mutex *) * setsize);
sewardj4bffb232002-11-13 21:46:34 +0000902 ret->setsize = setsize;
903 return ret;
904}
905
906static inline
907void free_LockSet(LockSet *p)
908{
909 /* assert: not present in hash */
910 VG_(free)(p);
911}
912
njnb4aee052003-04-15 14:09:58 +0000913static
sewardj4bffb232002-11-13 21:46:34 +0000914void pp_all_LockSets ( void )
915{
916 Int i;
917 Int sets, buckets;
918
919 sets = buckets = 0;
920 for (i = 0; i < LOCKSET_HASH_SZ; i++) {
921 const LockSet *ls = lockset_hash[i];
922 Bool first = True;
923
sewardj4bffb232002-11-13 21:46:34 +0000924 for(; ls != NULL; ls = ls->next) {
sewardjdac0a442002-11-13 22:08:40 +0000925 if (first) {
926 buckets++;
927 VG_(printf)("[%4d] = ", i);
928 } else
929 VG_(printf)(" ");
930
sewardj4bffb232002-11-13 21:46:34 +0000931 sets++;
932 first = False;
933 pp_LockSet(ls);
934 }
935 }
936
937 VG_(printf)("%d distinct LockSets in %d buckets\n", sets, buckets);
938}
939
940static inline Bool isempty(const LockSet *ls)
941{
942 return ls == NULL || ls->setsize == 0;
943}
944
sewardj39a4d842002-11-13 22:14:30 +0000945static Bool ismember(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +0000946{
947 Int i;
948
949 /* XXX use binary search */
950 for(i = 0; i < ls->setsize; i++)
951 if (mutex_cmp(mx, ls->mutex[i]) == 0)
952 return True;
953
954 return False;
955}
956
957/* Check invariants:
958 - all locksets are unique
959 - each set is an array in strictly increasing order of mutex addr
960*/
961static
962void sanity_check_locksets ( const Char* caller )
963{
964 Int i;
965 const Char *badness;
966 LockSet *ls;
967
968 for(i = 0; i < LOCKSET_HASH_SZ; i++) {
969
970 for(ls = lockset_hash[i]; ls != NULL; ls = ls->next) {
sewardj39a4d842002-11-13 22:14:30 +0000971 const Mutex *prev;
sewardj4bffb232002-11-13 21:46:34 +0000972 Int j;
973
974 if (hash_LockSet(ls) != ls->hash) {
975 badness = "mismatched hash";
976 goto bad;
977 }
sewardj05bcdcb2003-05-18 10:05:38 +0000978 if (ls->hash != (UInt)i) {
sewardj4bffb232002-11-13 21:46:34 +0000979 badness = "wrong bucket";
980 goto bad;
981 }
982 if (lookup_LockSet(ls) != ls) {
983 badness = "non-unique set";
984 goto bad;
985 }
986
987 prev = ls->mutex[0];
988 for(j = 1; j < ls->setsize; j++) {
989 if (mutex_cmp(prev, ls->mutex[j]) >= 0) {
990 badness = "mutexes out of order";
991 goto bad;
992 }
993 }
994 }
995 }
996 return;
997
998 bad:
999 VG_(printf)("sanity_check_locksets: "
1000 "i = %d, ls=%p badness = %s, caller = %s\n",
1001 i, ls, badness, caller);
1002 pp_all_LockSets();
1003 VG_(skin_panic)("sanity_check_locksets");
1004}
1005
1006static
sewardj39a4d842002-11-13 22:14:30 +00001007LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +00001008{
1009 static const Bool debug = False;
1010 LockSet *ret = NULL;
1011 Int i, j;
1012
1013 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1014 VG_(printf)("add-IN mutex %p%(y\n", mx->mutexp, mx->mutexp);
1015 print_LockSet("add-IN", ls);
1016 }
1017
1018 if (debug || LOCKSET_SANITY)
1019 sanity_check_locksets("add-IN");
1020
1021 sk_assert(!ismember(ls, mx));
1022
1023 ret = alloc_LockSet(ls->setsize+1);
1024
1025 for(i = j = 0; i < ls->setsize; i++) {
1026 if (debug)
1027 VG_(printf)("i=%d j=%d ls->mutex[i]=%p mx=%p\n",
1028 i, j, ls->mutex[i]->mutexp, mx ? mx->mutexp : 0);
1029 if (mx && mutex_cmp(mx, ls->mutex[i]) < 0) {
1030 ret->mutex[j++] = mx;
1031 mx = NULL;
1032 }
1033 ret->mutex[j++] = ls->mutex[i];
1034 }
1035
1036 /* not added in loop - must be after */
1037 if (mx)
1038 ret->mutex[j++] = mx;
1039
1040 sk_assert(j == ret->setsize);
1041
1042 if (debug || LOCKSET_SANITY) {
1043 print_LockSet("add-OUT", ret);
1044 sanity_check_locksets("add-OUT");
1045 }
1046 return ret;
1047}
1048
1049/* Builds ls with mx removed. mx should actually be in ls!
1050 (a checked assertion). Resulting set should not already
1051 exist in the table (unchecked).
1052*/
1053static
sewardj39a4d842002-11-13 22:14:30 +00001054LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
sewardj4bffb232002-11-13 21:46:34 +00001055{
1056 static const Bool debug = False;
1057 LockSet *ret = NULL;
1058 Int i, j;
1059
1060 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1061 print_LockSet("remove-IN", ls);
1062 }
1063
1064 if (debug || LOCKSET_SANITY)
1065 sanity_check_locksets("remove-IN");
1066
1067 sk_assert(ismember(ls, mx));
1068
1069 ret = alloc_LockSet(ls->setsize-1);
1070
1071 for(i = j = 0; i < ls->setsize; i++) {
1072 if (mutex_cmp(ls->mutex[i], mx) == 0)
1073 continue;
1074 ret->mutex[j++] = ls->mutex[i];
1075 }
1076
1077 sk_assert(j == ret->setsize);
1078
1079 if (debug || LOCKSET_SANITY) {
1080 print_LockSet("remove-OUT", ret);
1081 sanity_check_locksets("remove-OUT");
1082 }
1083 return ret;
njn25e49d8e72002-09-23 09:36:25 +00001084}
1085
1086
1087/* Builds the intersection, and then unbuilds it if it's already in the table.
1088 */
sewardj4bffb232002-11-13 21:46:34 +00001089static const LockSet *_intersect(const LockSet *a, const LockSet *b)
njn25e49d8e72002-09-23 09:36:25 +00001090{
sewardj4bffb232002-11-13 21:46:34 +00001091 static const Bool debug = False;
1092 Int iret;
1093 Int ia, ib;
1094 Int size;
1095 LockSet *ret;
1096 const LockSet *found;
njn25e49d8e72002-09-23 09:36:25 +00001097
sewardj4bffb232002-11-13 21:46:34 +00001098 if (debug || LOCKSET_SANITY)
1099 sanity_check_locksets("intersect-IN");
njn25e49d8e72002-09-23 09:36:25 +00001100
sewardj4bffb232002-11-13 21:46:34 +00001101 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1102 print_LockSet("intersect a", a);
1103 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +00001104 }
1105
sewardj4bffb232002-11-13 21:46:34 +00001106 /* count the size of the new set */
1107 size = 0;
1108 ia = ib = 0;
1109 for(size = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1110 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1111 size++;
1112 ia++;
1113 ib++;
1114 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1115 ia++;
1116 } else {
1117 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1118 ib++;
1119 }
njn25e49d8e72002-09-23 09:36:25 +00001120 }
1121
sewardj4bffb232002-11-13 21:46:34 +00001122 /* Build the intersection of the two sets */
1123 ret = alloc_LockSet(size);
1124 for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1125 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1126 sk_assert(iret < ret->setsize);
1127 ret->mutex[iret++] = a->mutex[ia];
1128 ia++;
1129 ib++;
1130 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1131 ia++;
1132 } else {
1133 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1134 ib++;
1135 }
1136 }
1137
1138 ret->hash = hash_LockSet(ret);
1139
njn25e49d8e72002-09-23 09:36:25 +00001140 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001141 found = lookup_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001142
sewardj4bffb232002-11-13 21:46:34 +00001143 if (found != NULL) {
1144 free_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001145 } else {
sewardj4bffb232002-11-13 21:46:34 +00001146 insert_LockSet(ret);
1147 found = ret;
njn25e49d8e72002-09-23 09:36:25 +00001148 }
1149
sewardj4bffb232002-11-13 21:46:34 +00001150 if (debug || LOCKSET_SANITY) {
1151 print_LockSet("intersect-OUT", found);
1152 sanity_check_locksets("intersect-OUT");
1153 }
njn25e49d8e72002-09-23 09:36:25 +00001154
sewardj4bffb232002-11-13 21:46:34 +00001155 return found;
njn25e49d8e72002-09-23 09:36:25 +00001156}
1157
sewardj4bffb232002-11-13 21:46:34 +00001158/* inline the fastpath */
1159static inline const LockSet *intersect(const LockSet *a, const LockSet *b)
sewardjc26cc252002-10-23 21:58:55 +00001160{
sewardj4bffb232002-11-13 21:46:34 +00001161 static const Bool debug = False;
sewardjc26cc252002-10-23 21:58:55 +00001162
1163 /* Fast case -- when the two are the same */
sewardj4bffb232002-11-13 21:46:34 +00001164 if (a == b) {
1165 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1166 print_LockSet("intersect-same fastpath", a);
sewardjc26cc252002-10-23 21:58:55 +00001167 }
sewardj4bffb232002-11-13 21:46:34 +00001168 return a;
sewardjc26cc252002-10-23 21:58:55 +00001169 }
1170
sewardj4bffb232002-11-13 21:46:34 +00001171 if (isempty(a) || isempty(b)) {
1172 if (debug)
1173 VG_(printf)("intersect empty fastpath\n");
1174 return emptyset;
1175 }
1176
1177 return _intersect(a, b);
1178}
1179
1180
1181static const LockSet *ls_union(const LockSet *a, const LockSet *b)
1182{
1183 static const Bool debug = False;
1184 Int iret;
1185 Int ia, ib;
1186 Int size;
1187 LockSet *ret;
1188 const LockSet *found;
1189
1190 if (debug || LOCKSET_SANITY)
1191 sanity_check_locksets("union-IN");
1192
1193 /* Fast case -- when the two are the same */
1194 if (a == b) {
1195 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1196 print_LockSet("union-same fastpath", a);
1197 }
1198 return a;
1199 }
1200
1201 if (isempty(a)) {
1202 if (debug)
1203 print_LockSet("union a=empty b", b);
1204 return b;
1205 }
1206 if (isempty(b)) {
1207 if (debug)
1208 print_LockSet("union b=empty a", a);
1209 return a;
1210 }
1211
1212 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
sewardjc26cc252002-10-23 21:58:55 +00001213 print_LockSet("union a", a);
1214 print_LockSet("union b", b);
1215 }
1216
sewardj4bffb232002-11-13 21:46:34 +00001217 /* count the size of the new set */
1218 for(size = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1219 Int cmp;
sewardjc26cc252002-10-23 21:58:55 +00001220
sewardj4bffb232002-11-13 21:46:34 +00001221 if ((ia < a->setsize) && (ib < b->setsize))
1222 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1223 else if (ia == a->setsize)
1224 cmp = 1;
1225 else
1226 cmp = -1;
1227
1228 if (cmp == 0) {
1229 size++;
1230 ia++;
1231 ib++;
1232 } else if (cmp < 0) {
1233 size++;
1234 ia++;
1235 } else {
1236 sk_assert(cmp > 0);
1237 size++;
1238 ib++;
1239 }
sewardjc26cc252002-10-23 21:58:55 +00001240 }
1241
sewardj4bffb232002-11-13 21:46:34 +00001242 /* Build the intersection of the two sets */
1243 ret = alloc_LockSet(size);
1244 for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1245 Int cmp;
1246 sk_assert(iret < ret->setsize);
1247
1248 if ((ia < a->setsize) && (ib < b->setsize))
1249 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1250 else if (ia == a->setsize)
1251 cmp = 1;
1252 else
1253 cmp = -1;
1254
1255 if (cmp == 0) {
1256 ret->mutex[iret++] = a->mutex[ia];
1257 ia++;
1258 ib++;
1259 } else if (cmp < 0) {
1260 ret->mutex[iret++] = a->mutex[ia];
1261 ia++;
1262 } else {
1263 sk_assert(cmp > 0);
1264 ret->mutex[iret++] = b->mutex[ib];
1265 ib++;
1266 }
1267 }
1268
1269 sk_assert(iret == ret->setsize);
1270
1271 ret->hash = hash_LockSet(ret);
1272
sewardjc26cc252002-10-23 21:58:55 +00001273 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001274 found = lookup_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001275
sewardj4bffb232002-11-13 21:46:34 +00001276 if (found != NULL) {
1277 if (debug)
1278 print_LockSet("union found existing set", found);
1279 free_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001280 } else {
sewardj4bffb232002-11-13 21:46:34 +00001281 if (debug)
1282 print_LockSet("union inserting new set", ret);
1283 insert_LockSet(ret);
1284 found = ret;
sewardjc26cc252002-10-23 21:58:55 +00001285 }
1286
sewardj4bffb232002-11-13 21:46:34 +00001287 if (debug || LOCKSET_SANITY) {
1288 print_LockSet("union-OUT", found);
sewardjc26cc252002-10-23 21:58:55 +00001289 sanity_check_locksets("union-OUT");
sewardj4bffb232002-11-13 21:46:34 +00001290 }
sewardjc26cc252002-10-23 21:58:55 +00001291
sewardj4bffb232002-11-13 21:46:34 +00001292 return found;
sewardjc26cc252002-10-23 21:58:55 +00001293}
1294
1295/*------------------------------------------------------------*/
sewardjdac0a442002-11-13 22:08:40 +00001296/*--- Implementation of mutex structure. ---*/
1297/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +00001298
1299static UInt graph_mark; /* current mark we're using for graph traversal */
1300
sewardj39a4d842002-11-13 22:14:30 +00001301static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardjc26cc252002-10-23 21:58:55 +00001302 Char *str, ExeContext *ec);
sewardj39a4d842002-11-13 22:14:30 +00001303static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00001304 const LockSet *lockset_holding,
1305 const LockSet *lockset_prev);
sewardjc26cc252002-10-23 21:58:55 +00001306
njn72718642003-07-24 08:45:32 +00001307static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid);
sewardjdac0a442002-11-13 22:08:40 +00001308
1309#define M_MUTEX_HASHSZ 1021
1310
sewardj39a4d842002-11-13 22:14:30 +00001311static Mutex *mutex_hash[M_MUTEX_HASHSZ];
sewardjdac0a442002-11-13 22:08:40 +00001312static UInt total_mutexes;
1313
1314static const Char *pp_MutexState(MutexState st)
1315{
1316 switch(st) {
1317 case MxLocked: return "Locked";
1318 case MxUnlocked: return "Unlocked";
1319 case MxDead: return "Dead";
1320 case MxUnknown: return "Unknown";
1321 }
1322 return "???";
1323}
1324
1325static void pp_all_mutexes()
1326{
1327 Int i;
1328 Int locks, buckets;
1329
1330 locks = buckets = 0;
1331 for(i = 0; i < M_MUTEX_HASHSZ; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001332 Mutex *mx;
sewardjdac0a442002-11-13 22:08:40 +00001333 Bool first = True;
1334
1335 for(mx = mutex_hash[i]; mx != NULL; mx = mx->next) {
1336 if (first) {
1337 buckets++;
1338 VG_(printf)("[%4d] = ", i);
1339 } else
1340 VG_(printf)(" ");
1341 locks++;
1342 first = False;
1343 VG_(printf)("%p [%8s] -> %p%(y\n",
1344 mx, pp_MutexState(mx->state), mx->mutexp, mx->mutexp);
1345 }
1346 }
1347
1348 VG_(printf)("%d locks in %d buckets (%d allocated)\n",
1349 locks, buckets, total_mutexes);
1350}
sewardjc26cc252002-10-23 21:58:55 +00001351
sewardj39a4d842002-11-13 22:14:30 +00001352/* find or create a Mutex for a program's mutex use */
1353static Mutex *get_mutex(Addr mutexp)
sewardjc26cc252002-10-23 21:58:55 +00001354{
1355 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
sewardj39a4d842002-11-13 22:14:30 +00001356 Mutex *mp;
sewardjc26cc252002-10-23 21:58:55 +00001357
1358 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
1359 if (mp->mutexp == mutexp)
1360 return mp;
1361
sewardjdac0a442002-11-13 22:08:40 +00001362 total_mutexes++;
1363
sewardjc26cc252002-10-23 21:58:55 +00001364 mp = VG_(malloc)(sizeof(*mp));
1365 mp->mutexp = mutexp;
1366 mp->next = mutex_hash[bucket];
1367 mutex_hash[bucket] = mp;
1368
1369 mp->state = MxUnknown;
1370 mp->tid = VG_INVALID_THREADID;
1371 mp->location = NULL;
1372
sewardj4bffb232002-11-13 21:46:34 +00001373 mp->lockdep = emptyset;
sewardjc26cc252002-10-23 21:58:55 +00001374 mp->mark = graph_mark - 1;
1375
1376 return mp;
1377}
1378
sewardjdac0a442002-11-13 22:08:40 +00001379/* Find all mutexes in a range of memory, and call the callback.
1380 Remove the mutex from the hash if the callback returns True (mutex
1381 structure itself is not freed, because it may be pointed to by a
1382 LockSet. */
sewardj39a4d842002-11-13 22:14:30 +00001383static void find_mutex_range(Addr start, Addr end, Bool (*action)(Mutex *))
sewardjc26cc252002-10-23 21:58:55 +00001384{
sewardjdac0a442002-11-13 22:08:40 +00001385 UInt first = start % M_MUTEX_HASHSZ;
1386 UInt last = (end+1) % M_MUTEX_HASHSZ;
1387 UInt i;
1388
1389 /* Single pass over the hash table, looking for likely hashes */
1390 for(i = first; i != last; ) {
sewardj39a4d842002-11-13 22:14:30 +00001391 Mutex *mx;
1392 Mutex **prev = &mutex_hash[i];
sewardjdac0a442002-11-13 22:08:40 +00001393
1394 for(mx = mutex_hash[i]; mx != NULL; prev = &mx->next, mx = mx->next) {
1395 if (mx->mutexp >= start && mx->mutexp < end && (*action)(mx))
1396 *prev = mx->next;
1397 }
1398
1399 if (++i == M_MUTEX_HASHSZ)
1400 i = 0;
sewardjc26cc252002-10-23 21:58:55 +00001401 }
sewardjc26cc252002-10-23 21:58:55 +00001402}
1403
1404#define MARK_LOOP (graph_mark+0)
1405#define MARK_DONE (graph_mark+1)
1406
sewardj39a4d842002-11-13 22:14:30 +00001407static Bool check_cycle(const Mutex *start, const LockSet* lockset)
sewardjc26cc252002-10-23 21:58:55 +00001408{
sewardj39a4d842002-11-13 22:14:30 +00001409 Bool check_cycle_inner(const Mutex *mutex, const LockSet *ls)
sewardjff2c9232002-11-13 21:44:39 +00001410 {
1411 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +00001412 Int i;
sewardjff2c9232002-11-13 21:44:39 +00001413
1414 if (mutex->mark == MARK_LOOP)
1415 return True; /* found cycle */
1416 if (mutex->mark == MARK_DONE)
1417 return False; /* been here before, its OK */
1418
sewardj39a4d842002-11-13 22:14:30 +00001419 ((Mutex*)mutex)->mark = MARK_LOOP;
sewardjff2c9232002-11-13 21:44:39 +00001420
1421 if (debug)
1422 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
1423 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
sewardj4bffb232002-11-13 21:46:34 +00001424 for(i = 0; i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001425 const Mutex *mx = ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +00001426
sewardjff2c9232002-11-13 21:44:39 +00001427 if (debug)
1428 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
1429 mutex->mutexp, ls,
sewardj4bffb232002-11-13 21:46:34 +00001430 mx->mutexp, mx->mutexp);
1431 if (check_cycle_inner(mx, mx->lockdep))
sewardjff2c9232002-11-13 21:44:39 +00001432 return True;
1433 }
sewardj39a4d842002-11-13 22:14:30 +00001434 ((Mutex*)mutex)->mark = MARK_DONE;
sewardjff2c9232002-11-13 21:44:39 +00001435
1436 return False;
1437 }
1438
sewardjc26cc252002-10-23 21:58:55 +00001439 graph_mark += 2; /* clear all marks */
1440
sewardj4bffb232002-11-13 21:46:34 +00001441 return check_cycle_inner(start, lockset);
sewardjc26cc252002-10-23 21:58:55 +00001442}
1443
sewardjdca84112002-11-13 22:29:34 +00001444/* test to see if a mutex state change would be problematic; this
1445 makes no changes to the mutex state. This should be called before
1446 the locking thread has actually blocked. */
njn72718642003-07-24 08:45:32 +00001447static void test_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjc26cc252002-10-23 21:58:55 +00001448{
1449 static const Bool debug = False;
1450
sewardjc26cc252002-10-23 21:58:55 +00001451 if (mutex->state == MxDead) {
sewardjdac0a442002-11-13 22:08:40 +00001452 Char *str;
1453
1454 switch(state) {
1455 case MxLocked: str = "lock dead mutex"; break;
1456 case MxUnlocked: str = "unlock dead mutex"; break;
1457 default: str = "operate on dead mutex"; break;
1458 }
1459
sewardjc26cc252002-10-23 21:58:55 +00001460 /* can't do anything legal to a destroyed mutex */
sewardjdac0a442002-11-13 22:08:40 +00001461 record_mutex_error(tid, mutex, str, mutex->location);
sewardjc26cc252002-10-23 21:58:55 +00001462 return;
1463 }
1464
1465 switch(state) {
1466 case MxLocked:
sewardjdca84112002-11-13 22:29:34 +00001467 sk_assert(!check_cycle(mutex, mutex->lockdep));
1468
1469 if (debug)
1470 print_LockSet("thread holding", thread_locks[tid]);
1471
1472 if (check_cycle(mutex, thread_locks[tid]))
1473 record_lockgraph_error(tid, mutex, thread_locks[tid], mutex->lockdep);
1474 else {
1475 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
1476
1477 if (debug) {
1478 VG_(printf)("giving mutex %p%(y lockdep = %p ",
1479 mutex->mutexp, mutex->mutexp, mutex->lockdep);
1480 print_LockSet("lockdep", mutex->lockdep);
1481 }
1482 }
1483 break;
1484
1485 case MxUnlocked:
1486 if (debug)
1487 print_LockSet("thread holding", thread_locks[tid]);
1488
1489 if (mutex->state != MxLocked) {
1490 record_mutex_error(tid, mutex,
1491 "unlock non-locked mutex", mutex->location);
1492 }
1493 if (mutex->tid != tid) {
1494 record_mutex_error(tid, mutex,
1495 "unlock someone else's mutex", mutex->location);
1496 }
1497 break;
1498
1499 case MxDead:
1500 break;
1501
1502 default:
1503 break;
1504 }
1505}
1506
1507/* Update a mutex state. Expects most error testing and reporting to
1508 have happened in test_mutex_state(). The assumption is that no
1509 client code is run by thread tid between test and set, either
1510 because it is blocked or test and set are called together
1511 atomically.
1512
1513 Setting state to MxDead is the exception, since that can happen as
1514 a result of any thread freeing memory; in this case set_mutex_state
1515 does all the error reporting as well.
1516*/
njn72718642003-07-24 08:45:32 +00001517static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjdca84112002-11-13 22:29:34 +00001518{
1519 static const Bool debug = False;
1520
1521 if (debug)
1522 VG_(printf)("\ntid %d changing mutex (%p)->%p%(y state %s -> %s\n",
1523 tid, mutex, mutex->mutexp, mutex->mutexp,
1524 pp_MutexState(mutex->state), pp_MutexState(state));
1525
1526 if (mutex->state == MxDead) {
1527 /* can't do anything legal to a destroyed mutex */
1528 return;
1529 }
1530
1531 switch(state) {
1532 case MxLocked:
sewardj4bffb232002-11-13 21:46:34 +00001533 if (mutex->state == MxLocked) {
1534 if (mutex->tid != tid)
1535 record_mutex_error(tid, mutex, "take lock held by someone else",
1536 mutex->location);
1537 else
1538 record_mutex_error(tid, mutex, "take lock we already hold",
1539 mutex->location);
1540
1541 VG_(skin_panic)("core should have checked this\n");
1542 break;
1543 }
sewardjc26cc252002-10-23 21:58:55 +00001544
1545 sk_assert(!check_cycle(mutex, mutex->lockdep));
1546
sewardjc26cc252002-10-23 21:58:55 +00001547 mutex->tid = tid;
1548 break;
1549
1550 case MxUnlocked:
1551 if (debug)
sewardj4bffb232002-11-13 21:46:34 +00001552 print_LockSet("thread holding", thread_locks[tid]);
sewardjc26cc252002-10-23 21:58:55 +00001553
sewardjdca84112002-11-13 22:29:34 +00001554 if (mutex->state != MxLocked || mutex->tid != tid)
1555 break;
1556
sewardjc26cc252002-10-23 21:58:55 +00001557 mutex->tid = VG_INVALID_THREADID;
1558 break;
1559
sewardjdac0a442002-11-13 22:08:40 +00001560 case MxDead:
1561 if (mutex->state == MxLocked) {
1562 /* forcably remove offending lock from thread's lockset */
1563 sk_assert(ismember(thread_locks[mutex->tid], mutex));
1564 thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
1565 mutex->tid = VG_INVALID_THREADID;
1566
1567 record_mutex_error(tid, mutex,
1568 "free locked mutex", mutex->location);
1569 }
1570 break;
1571
sewardjc26cc252002-10-23 21:58:55 +00001572 default:
1573 break;
1574 }
1575
njn72718642003-07-24 08:45:32 +00001576 mutex->location = VG_(get_ExeContext)(tid);
sewardjc26cc252002-10-23 21:58:55 +00001577 mutex->state = state;
1578}
njn25e49d8e72002-09-23 09:36:25 +00001579
1580/*------------------------------------------------------------*/
1581/*--- Setting and checking permissions. ---*/
1582/*------------------------------------------------------------*/
1583
1584static
1585void set_address_range_state ( Addr a, UInt len /* in bytes */,
1586 VgeInitStatus status )
1587{
sewardj1806d7f2002-10-22 05:05:49 +00001588 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00001589
sewardjdac0a442002-11-13 22:08:40 +00001590 /* only clean up dead mutexes */
sewardj39a4d842002-11-13 22:14:30 +00001591 Bool cleanmx(Mutex *mx) {
sewardjdac0a442002-11-13 22:08:40 +00001592 return mx->state == MxDead;
1593 }
1594
1595
njn25e49d8e72002-09-23 09:36:25 +00001596# if DEBUG_MAKE_ACCESSES
1597 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
1598# endif
1599 //PROF_EVENT(30); PPP
1600
1601 if (len == 0)
1602 return;
1603
1604 if (len > 100 * 1000 * 1000)
1605 VG_(message)(Vg_UserMsg,
1606 "Warning: set address range state: large range %d",
1607 len);
1608
1609 VGP_PUSHCC(VgpSARP);
1610
sewardjdac0a442002-11-13 22:08:40 +00001611 /* Remove mutexes in recycled memory range from hash */
1612 find_mutex_range(a, a+len, cleanmx);
1613
njn25e49d8e72002-09-23 09:36:25 +00001614 /* Memory block may not be aligned or a whole word multiple. In neat cases,
1615 * we have to init len/4 words (len is in bytes). In nasty cases, it's
1616 * len/4+1 words. This works out which it is by aligning the block and
1617 * seeing if the end byte is in the same word as it is for the unaligned
1618 * block; if not, it's the awkward case. */
sewardj8fac99a2002-11-13 22:31:26 +00001619 end = ROUNDUP(a + len, 4);
1620 a = ROUNDDN(a, 4);
njn25e49d8e72002-09-23 09:36:25 +00001621
1622 /* Do it ... */
1623 switch (status) {
1624 case Vge_VirginInit:
1625 for ( ; a < end; a += 4) {
1626 //PROF_EVENT(31); PPP
1627 init_virgin_sword(a);
1628 }
1629 break;
1630
1631 case Vge_NonVirginInit:
1632 for ( ; a < end; a += 4) {
1633 //PROF_EVENT(31); PPP
1634 init_nonvirgin_sword(a);
1635 }
1636 break;
1637
1638 case Vge_SegmentInit:
1639 for ( ; a < end; a += 4) {
1640 //PROF_EVENT(31); PPP
1641 init_magically_inited_sword(a);
1642 }
1643 break;
sewardj7f3ad222002-11-13 22:11:53 +00001644
1645 case Vge_Error:
1646 for ( ; a < end; a += 4) {
1647 //PROF_EVENT(31); PPP
1648 init_error_sword(a);
1649 }
1650 break;
njn25e49d8e72002-09-23 09:36:25 +00001651
1652 default:
1653 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001654 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001655 }
1656
1657 /* Check that zero page and highest page have not been written to
1658 -- this could happen with buggy syscall wrappers. Today
1659 (2001-04-26) had precisely such a problem with
1660 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001661 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001662 VGP_POPCC(VgpSARP);
1663}
1664
1665
1666static void make_segment_readable ( Addr a, UInt len )
1667{
1668 //PROF_EVENT(??); PPP
1669 set_address_range_state ( a, len, Vge_SegmentInit );
1670}
1671
1672static void make_writable ( Addr a, UInt len )
1673{
1674 //PROF_EVENT(36); PPP
1675 set_address_range_state( a, len, Vge_VirginInit );
1676}
1677
1678static void make_readable ( Addr a, UInt len )
1679{
1680 //PROF_EVENT(37); PPP
sewardj499e3de2002-11-13 22:22:25 +00001681 set_address_range_state( a, len, Vge_VirginInit );
njn25e49d8e72002-09-23 09:36:25 +00001682}
1683
1684
njn25e49d8e72002-09-23 09:36:25 +00001685/* Block-copy states (needed for implementing realloc()). */
1686static void copy_address_range_state(Addr src, Addr dst, UInt len)
1687{
1688 UInt i;
1689
1690 //PROF_EVENT(40); PPP
1691 for (i = 0; i < len; i += 4) {
1692 shadow_word sword = *(get_sword_addr ( src+i ));
1693 //PROF_EVENT(41); PPP
1694 set_sword ( dst+i, sword );
1695 }
1696}
1697
1698// SSS: put these somewhere better
njn72718642003-07-24 08:45:32 +00001699static void eraser_mem_read (Addr a, UInt data_size, ThreadId tid);
1700static void eraser_mem_write(Addr a, UInt data_size, ThreadId tid);
sewardja5b3aec2002-10-22 05:09:36 +00001701
1702#define REGPARM(x) __attribute__((regparm (x)))
1703
1704static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1705static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1706static void eraser_mem_help_read_4(Addr a) REGPARM(1);
1707static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
1708
1709static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1710static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1711static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
1712static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001713
sewardj7a5ebcf2002-11-13 22:42:13 +00001714static void bus_lock(void);
1715static void bus_unlock(void);
1716
njn25e49d8e72002-09-23 09:36:25 +00001717static
njn72718642003-07-24 08:45:32 +00001718void eraser_pre_mem_read(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001719 Char* s, UInt base, UInt size )
1720{
njn72718642003-07-24 08:45:32 +00001721 if (tid > 50) { VG_(printf)("pid = %d, s = `%s`, part = %d\n", tid, s, part); VG_(skin_panic)("a");}
1722 eraser_mem_read(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001723}
1724
1725static
njn72718642003-07-24 08:45:32 +00001726void eraser_pre_mem_read_asciiz(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001727 Char* s, UInt base )
1728{
njn72718642003-07-24 08:45:32 +00001729 eraser_mem_read(base, VG_(strlen)((Char*)base), tid);
njn25e49d8e72002-09-23 09:36:25 +00001730}
1731
1732static
njn72718642003-07-24 08:45:32 +00001733void eraser_pre_mem_write(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001734 Char* s, UInt base, UInt size )
1735{
njn72718642003-07-24 08:45:32 +00001736 eraser_mem_write(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001737}
1738
1739
1740
1741static
1742void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1743{
njn1f3a9092002-10-04 09:22:30 +00001744 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001745 make_segment_readable(a, len);
1746}
1747
1748
1749static
1750void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1751{
1752 if (is_inited) {
1753 make_readable(a, len);
1754 } else {
1755 make_writable(a, len);
1756 }
1757}
1758
1759static
1760void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001761 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001762{
1763 if (rr) make_readable(a, len);
1764 else if (ww) make_writable(a, len);
1765 /* else do nothing */
1766}
1767
sewardjf6374322002-11-13 22:35:55 +00001768static
1769void eraser_new_mem_stack_private(Addr a, UInt len)
1770{
1771 set_address_range_state(a, len, Vge_NonVirginInit);
1772}
1773
1774static
1775void eraser_new_mem_stack(Addr a, UInt len)
1776{
1777 set_address_range_state(a, len, Vge_VirginInit);
1778}
njn25e49d8e72002-09-23 09:36:25 +00001779
1780/*--------------------------------------------------------------*/
1781/*--- Initialise the memory audit system on program startup. ---*/
1782/*--------------------------------------------------------------*/
1783
1784static
1785void init_shadow_memory(void)
1786{
1787 Int i;
1788
1789 for (i = 0; i < ESEC_MAP_WORDS; i++)
1790 distinguished_secondary_map.swords[i] = virgin_sword;
1791
1792 /* These entries gradually get overwritten as the used address
1793 space expands. */
1794 for (i = 0; i < 65536; i++)
1795 primary_map[i] = &distinguished_secondary_map;
1796}
1797
1798
njn3e884182003-04-15 13:03:23 +00001799/*------------------------------------------------------------*/
1800/*--- malloc() et al replacements ---*/
1801/*------------------------------------------------------------*/
1802
njnb4aee052003-04-15 14:09:58 +00001803static VgHashTable hg_malloc_list = NULL;
njn3e884182003-04-15 13:03:23 +00001804
1805#define N_FREED_CHUNKS 2
1806static Int freechunkptr = 0;
1807static HG_Chunk *freechunks[N_FREED_CHUNKS];
1808
1809/* Use a small redzone (paranoia) */
1810UInt VG_(vg_malloc_redzone_szB) = 4;
1811
1812
1813/* Allocate a user-chunk of size bytes. Also allocate its shadow
1814 block, make the shadow block point at the user block. Put the
1815 shadow chunk on the appropriate list, and set all memory
1816 protections correctly. */
1817
njn72718642003-07-24 08:45:32 +00001818static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
njn3e884182003-04-15 13:03:23 +00001819{
1820 HG_Chunk* hc;
1821
1822 hc = VG_(malloc)(sizeof(HG_Chunk));
1823 hc->data = p;
1824 hc->size = size;
njn72718642003-07-24 08:45:32 +00001825 hc->where = VG_(get_ExeContext)(tid);
1826 hc->tid = tid;
njn3e884182003-04-15 13:03:23 +00001827
1828 VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
1829}
1830
1831/* Allocate memory and note change in memory available */
1832static __inline__
njn34ac0272003-09-30 14:20:00 +00001833void* alloc_and_new_mem ( Int size, UInt alignment, Bool is_zeroed )
njn3e884182003-04-15 13:03:23 +00001834{
1835 Addr p;
1836
njn34ac0272003-09-30 14:20:00 +00001837 if (size < 0) return NULL;
1838
njn3e884182003-04-15 13:03:23 +00001839 p = (Addr)VG_(cli_malloc)(alignment, size);
njn34ac0272003-09-30 14:20:00 +00001840 if (is_zeroed) VG_(memset)((void*)p, 0, size);
njn72718642003-07-24 08:45:32 +00001841 add_HG_Chunk ( VG_(get_current_or_recent_tid)(), p, size );
njn3e884182003-04-15 13:03:23 +00001842 eraser_new_mem_heap( p, size, is_zeroed );
1843
1844 return (void*)p;
1845}
1846
njn72718642003-07-24 08:45:32 +00001847void* SK_(malloc) ( Int n )
njn3e884182003-04-15 13:03:23 +00001848{
njn72718642003-07-24 08:45:32 +00001849 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001850}
1851
njn72718642003-07-24 08:45:32 +00001852void* SK_(__builtin_new) ( Int n )
njn3e884182003-04-15 13:03:23 +00001853{
njn72718642003-07-24 08:45:32 +00001854 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001855}
1856
njn72718642003-07-24 08:45:32 +00001857void* SK_(__builtin_vec_new) ( Int n )
njn3e884182003-04-15 13:03:23 +00001858{
njn72718642003-07-24 08:45:32 +00001859 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001860}
1861
njn72718642003-07-24 08:45:32 +00001862void* SK_(memalign) ( Int align, Int n )
njn3e884182003-04-15 13:03:23 +00001863{
njn72718642003-07-24 08:45:32 +00001864 return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001865}
1866
njn34ac0272003-09-30 14:20:00 +00001867void* SK_(calloc) ( Int nmemb, Int size )
njn3e884182003-04-15 13:03:23 +00001868{
njn34ac0272003-09-30 14:20:00 +00001869 return alloc_and_new_mem ( nmemb*size, VG_(clo_alignment),
1870 /*is_zeroed*/True );
njn3e884182003-04-15 13:03:23 +00001871}
1872
1873static
njn72718642003-07-24 08:45:32 +00001874void die_and_free_mem ( ThreadId tid, HG_Chunk* hc,
njn3e884182003-04-15 13:03:23 +00001875 HG_Chunk** prev_chunks_next_ptr )
1876{
njn72718642003-07-24 08:45:32 +00001877 Addr start = hc->data;
1878 Addr end = start + hc->size;
njn3e884182003-04-15 13:03:23 +00001879
1880 Bool deadmx(Mutex *mx) {
1881 if (mx->state != MxDead)
njn72718642003-07-24 08:45:32 +00001882 set_mutex_state(mx, MxDead, tid);
njn3e884182003-04-15 13:03:23 +00001883
1884 return False;
1885 }
1886
1887 /* Remove hc from the malloclist using prev_chunks_next_ptr to
1888 avoid repeating the hash table lookup. Can't remove until at least
1889 after free and free_mismatch errors are done because they use
1890 describe_addr() which looks for it in malloclist. */
1891 *prev_chunks_next_ptr = hc->next;
1892
1893 /* Record where freed */
njn72718642003-07-24 08:45:32 +00001894 hc->where = VG_(get_ExeContext) ( tid );
njn3e884182003-04-15 13:03:23 +00001895
1896 /* maintain a small window so that the error reporting machinery
1897 knows about this memory */
1898 if (freechunks[freechunkptr] != NULL) {
1899 /* free HG_Chunk */
1900 HG_Chunk* sc1 = freechunks[freechunkptr];
1901 VG_(cli_free) ( (void*)(sc1->data) );
1902 VG_(free) ( sc1 );
1903 }
1904
1905 freechunks[freechunkptr] = hc;
1906
1907 if (++freechunkptr == N_FREED_CHUNKS)
1908 freechunkptr = 0;
1909
1910 /* mark all mutexes in range dead */
1911 find_mutex_range(start, end, deadmx);
1912}
1913
1914
1915static __inline__
njn72718642003-07-24 08:45:32 +00001916void handle_free ( void* p )
njn3e884182003-04-15 13:03:23 +00001917{
1918 HG_Chunk* hc;
1919 HG_Chunk** prev_chunks_next_ptr;
1920
1921 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1922 (VgHashNode***)&prev_chunks_next_ptr );
1923 if (hc == NULL) {
1924 return;
1925 }
njn72718642003-07-24 08:45:32 +00001926 die_and_free_mem ( VG_(get_current_or_recent_tid)(),
1927 hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001928}
1929
njn72718642003-07-24 08:45:32 +00001930void SK_(free) ( void* p )
njn3e884182003-04-15 13:03:23 +00001931{
njn72718642003-07-24 08:45:32 +00001932 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001933}
1934
njn72718642003-07-24 08:45:32 +00001935void SK_(__builtin_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001936{
njn72718642003-07-24 08:45:32 +00001937 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001938}
1939
njn72718642003-07-24 08:45:32 +00001940void SK_(__builtin_vec_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001941{
njn72718642003-07-24 08:45:32 +00001942 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001943}
1944
njn72718642003-07-24 08:45:32 +00001945void* SK_(realloc) ( void* p, Int new_size )
njn3e884182003-04-15 13:03:23 +00001946{
1947 HG_Chunk *hc;
1948 HG_Chunk **prev_chunks_next_ptr;
sewardj05bcdcb2003-05-18 10:05:38 +00001949 Int i;
njn72718642003-07-24 08:45:32 +00001950 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +00001951
1952 /* First try and find the block. */
1953 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1954 (VgHashNode***)&prev_chunks_next_ptr );
1955
1956 if (hc == NULL) {
1957 return NULL;
1958 }
1959
1960 if (hc->size == new_size) {
1961 /* size unchanged */
njn398044f2003-07-24 17:39:59 +00001962 hc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +00001963 return p;
1964
1965 } else if (hc->size > new_size) {
1966 /* new size is smaller */
1967 hc->size = new_size;
njn398044f2003-07-24 17:39:59 +00001968 hc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +00001969 return p;
1970
1971 } else {
1972 /* new size is bigger */
1973 Addr p_new;
1974
1975 /* Get new memory */
1976 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
1977
1978 /* First half kept and copied, second half new */
1979 copy_address_range_state( (Addr)p, p_new, hc->size );
1980 eraser_new_mem_heap ( p_new+hc->size, new_size-hc->size,
1981 /*inited*/False );
1982
1983 /* Copy from old to new */
1984 for (i = 0; i < hc->size; i++)
1985 ((UChar*)p_new)[i] = ((UChar*)p)[i];
1986
1987 /* Free old memory */
njn72718642003-07-24 08:45:32 +00001988 die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001989
1990 /* this has to be after die_and_free_mem, otherwise the
1991 former succeeds in shorting out the new block, not the
1992 old, in the case when both are on the same list. */
njn72718642003-07-24 08:45:32 +00001993 add_HG_Chunk ( tid, p_new, new_size );
njn3e884182003-04-15 13:03:23 +00001994
1995 return (void*)p_new;
1996 }
1997}
1998
njn25e49d8e72002-09-23 09:36:25 +00001999/*--------------------------------------------------------------*/
2000/*--- Machinery to support sanity checking ---*/
2001/*--------------------------------------------------------------*/
2002
njn25e49d8e72002-09-23 09:36:25 +00002003Bool SK_(cheap_sanity_check) ( void )
2004{
jseward9800fd32004-01-04 23:08:04 +00002005 /* nothing useful we can rapidly check */
2006 return True;
njn25e49d8e72002-09-23 09:36:25 +00002007}
2008
njn25e49d8e72002-09-23 09:36:25 +00002009Bool SK_(expensive_sanity_check)(void)
2010{
2011 Int i;
2012
2013 /* Make sure nobody changed the distinguished secondary. */
2014 for (i = 0; i < ESEC_MAP_WORDS; i++)
2015 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
2016 distinguished_secondary_map.swords[i].state != virgin_sword.state)
2017 return False;
2018
2019 return True;
2020}
2021
2022
2023/*--------------------------------------------------------------*/
2024/*--- Instrumentation ---*/
2025/*--------------------------------------------------------------*/
2026
sewardjf6374322002-11-13 22:35:55 +00002027static UInt stk_ld, nonstk_ld, stk_st, nonstk_st;
2028
njn25e49d8e72002-09-23 09:36:25 +00002029/* Create and return an instrumented version of cb_in. Free cb_in
2030 before returning. */
2031UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
2032{
2033 UCodeBlock* cb;
2034 Int i;
2035 UInstr* u_in;
2036 Int t_size = INVALID_TEMPREG;
sewardjf6374322002-11-13 22:35:55 +00002037 Int ntemps;
2038 Bool *stackref = NULL;
sewardj7a5ebcf2002-11-13 22:42:13 +00002039 Bool locked = False; /* lock prefix */
njn25e49d8e72002-09-23 09:36:25 +00002040
njn810086f2002-11-14 12:42:47 +00002041 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002042
sewardjf6374322002-11-13 22:35:55 +00002043 /* stackref[] is used for super-simple value tracking to keep note
2044 of which tempregs currently hold a value which is derived from
2045 ESP or EBP, and is therefore likely stack-relative if used as
2046 the address for LOAD or STORE. */
njn810086f2002-11-14 12:42:47 +00002047 ntemps = VG_(get_num_temps)(cb);
sewardjf6374322002-11-13 22:35:55 +00002048 stackref = VG_(malloc)(sizeof(*stackref) * ntemps);
2049 VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps);
2050
njn810086f2002-11-14 12:42:47 +00002051 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
2052 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00002053
njn25e49d8e72002-09-23 09:36:25 +00002054 switch (u_in->opcode) {
2055
2056 case NOP: case CALLM_S: case CALLM_E:
2057 break;
sewardjf6374322002-11-13 22:35:55 +00002058
sewardj7a5ebcf2002-11-13 22:42:13 +00002059 case LOCK:
2060 locked = True;
2061 uInstr0(cb, CCALL, 0);
2062 uCCall(cb, (Addr)bus_lock, 0, 0, False);
2063 break;
2064
2065 case JMP: case INCEIP:
2066 if (locked) {
2067 uInstr0(cb, CCALL, 0);
2068 uCCall(cb, (Addr)bus_unlock, 0, 0, False);
2069 }
2070 locked = False;
2071 VG_(copy_UInstr)(cb, u_in);
2072 break;
2073
sewardjf6374322002-11-13 22:35:55 +00002074 case GET:
2075 sk_assert(u_in->tag1 == ArchReg);
2076 sk_assert(u_in->tag2 == TempReg);
2077 sk_assert(u_in->val2 < ntemps);
2078
2079 stackref[u_in->val2] = (u_in->size == 4 &&
2080 (u_in->val1 == R_ESP || u_in->val1 == R_EBP));
2081 VG_(copy_UInstr)(cb, u_in);
2082 break;
2083
2084 case MOV:
2085 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2086 sk_assert(u_in->tag2 == TempReg);
2087 stackref[u_in->val2] = stackref[u_in->val1];
2088 }
2089 VG_(copy_UInstr)(cb, u_in);
2090 break;
2091
2092 case LEA1:
2093 case ADD: case SUB:
2094 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2095 sk_assert(u_in->tag2 == TempReg);
2096 stackref[u_in->val2] |= stackref[u_in->val1];
2097 }
2098 VG_(copy_UInstr)(cb, u_in);
2099 break;
njn25e49d8e72002-09-23 09:36:25 +00002100
sewardja5b3aec2002-10-22 05:09:36 +00002101 case LOAD: {
2102 void (*help)(Addr);
2103 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002104 sk_assert(u_in->tag1 == TempReg);
2105
2106 if (!clo_priv_stacks || !stackref[u_in->val1]) {
2107 nonstk_ld++;
2108
2109 switch(u_in->size) {
2110 case 1: help = eraser_mem_help_read_1; break;
2111 case 2: help = eraser_mem_help_read_2; break;
2112 case 4: help = eraser_mem_help_read_4; break;
2113 default:
2114 VG_(skin_panic)("bad size");
2115 }
jsgfcb1d1c02003-10-14 21:55:10 +00002116
2117 /* XXX all registers should be flushed to baseblock
2118 here */
sewardjf6374322002-11-13 22:35:55 +00002119 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
2120 uCCall(cb, (Addr)help, 1, 1, False);
2121 } else
2122 stk_ld++;
njn25e49d8e72002-09-23 09:36:25 +00002123
sewardja5b3aec2002-10-22 05:09:36 +00002124 VG_(copy_UInstr)(cb, u_in);
2125 t_size = INVALID_TEMPREG;
2126 break;
2127 }
2128
2129 case FPU_R: {
njne427a662002-10-02 11:08:25 +00002130 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002131 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002132
2133 t_size = newTemp(cb);
2134 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2135 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00002136
jsgfcb1d1c02003-10-14 21:55:10 +00002137 /* XXX all registers should be flushed to baseblock
2138 here */
sewardja5b3aec2002-10-22 05:09:36 +00002139 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2140 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
njn25e49d8e72002-09-23 09:36:25 +00002141
sewardja5b3aec2002-10-22 05:09:36 +00002142 VG_(copy_UInstr)(cb, u_in);
2143 t_size = INVALID_TEMPREG;
2144 break;
2145 }
2146
2147 case STORE: {
2148 void (*help)(Addr, UInt);
2149 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002150 sk_assert(u_in->tag2 == TempReg);
sewardja5b3aec2002-10-22 05:09:36 +00002151
sewardjf6374322002-11-13 22:35:55 +00002152 if (!clo_priv_stacks || !stackref[u_in->val2]) {
2153 nonstk_st++;
2154
2155 switch(u_in->size) {
2156 case 1: help = eraser_mem_help_write_1; break;
2157 case 2: help = eraser_mem_help_write_2; break;
2158 case 4: help = eraser_mem_help_write_4; break;
2159 default:
2160 VG_(skin_panic)("bad size");
2161 }
2162
jsgfcb1d1c02003-10-14 21:55:10 +00002163 /* XXX all registers should be flushed to baseblock
2164 here */
sewardjf6374322002-11-13 22:35:55 +00002165 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
2166 uCCall(cb, (Addr)help, 2, 2, False);
2167 } else
2168 stk_st++;
sewardja5b3aec2002-10-22 05:09:36 +00002169
2170 VG_(copy_UInstr)(cb, u_in);
2171 t_size = INVALID_TEMPREG;
2172 break;
2173 }
2174
2175 case FPU_W: {
njne427a662002-10-02 11:08:25 +00002176 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00002177 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002178
2179 t_size = newTemp(cb);
2180 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2181 uLiteral(cb, (UInt)u_in->size);
jsgfcb1d1c02003-10-14 21:55:10 +00002182 /* XXX all registers should be flushed to baseblock
2183 here */
sewardja5b3aec2002-10-22 05:09:36 +00002184 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2185 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2186
2187 VG_(copy_UInstr)(cb, u_in);
2188 t_size = INVALID_TEMPREG;
2189 break;
2190 }
njn25e49d8e72002-09-23 09:36:25 +00002191
sewardj3d7c9c82003-03-26 21:08:13 +00002192 case MMX1: case MMX2: case MMX3:
2193 case MMX2_MemRd: case MMX2_MemWr:
sewardj4fbe6e92003-06-15 21:54:34 +00002194 case MMX2_ERegRd: case MMX2_ERegWr:
sewardj3d7c9c82003-03-26 21:08:13 +00002195 VG_(skin_panic)(
2196 "I don't know how to instrument MMXish stuff (yet)");
2197 break;
2198
njn25e49d8e72002-09-23 09:36:25 +00002199 default:
sewardjf6374322002-11-13 22:35:55 +00002200 /* conservative tromping */
2201 if (0 && u_in->tag1 == TempReg) /* can val1 ever be dest? */
2202 stackref[u_in->val1] = False;
2203 if (u_in->tag2 == TempReg)
2204 stackref[u_in->val2] = False;
2205 if (u_in->tag3 == TempReg)
2206 stackref[u_in->val3] = False;
njn4ba5a792002-09-30 10:23:54 +00002207 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00002208 break;
2209 }
2210 }
2211
sewardjf6374322002-11-13 22:35:55 +00002212 VG_(free)(stackref);
njn4ba5a792002-09-30 10:23:54 +00002213 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002214 return cb;
2215}
2216
2217
2218/*--------------------------------------------------------------------*/
2219/*--- Error and suppression handling ---*/
2220/*--------------------------------------------------------------------*/
2221
2222typedef
2223 enum {
2224 /* Possible data race */
2225 EraserSupp
2226 }
2227 EraserSuppKind;
2228
2229/* What kind of error it is. */
2230typedef
2231 enum {
sewardj16748af2002-10-22 04:55:54 +00002232 EraserErr, /* data-race */
2233 MutexErr, /* mutex operations */
sewardjff2c9232002-11-13 21:44:39 +00002234 LockGraphErr, /* mutex order error */
njn25e49d8e72002-09-23 09:36:25 +00002235 }
2236 EraserErrorKind;
2237
sewardj16748af2002-10-22 04:55:54 +00002238/* The classification of a faulting address. */
2239typedef
2240 enum { Undescribed, /* as-yet unclassified */
2241 Stack,
2242 Unknown, /* classification yielded nothing useful */
sewardjdac0a442002-11-13 22:08:40 +00002243 Mallocd,
2244 Freed,
sewardj16748af2002-10-22 04:55:54 +00002245 Segment
2246 }
2247 AddrKind;
2248/* Records info about a faulting address. */
2249typedef
2250 struct {
2251 /* ALL */
2252 AddrKind akind;
2253 /* Freed, Mallocd */
2254 Int blksize;
2255 /* Freed, Mallocd */
2256 Int rwoffset;
2257 /* Freed, Mallocd */
2258 ExeContext* lastchange;
2259 ThreadId lasttid;
2260 /* Stack */
2261 ThreadId stack_tid;
2262 /* Segment */
2263 const Char* filename;
2264 const Char* section;
2265 /* True if is just-below %esp -- could be a gcc bug. */
2266 Bool maybe_gcc;
jsgfcb1d1c02003-10-14 21:55:10 +00002267 /* symbolic address description */
2268 Char *expr;
sewardj16748af2002-10-22 04:55:54 +00002269 }
2270 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00002271
sewardj16748af2002-10-22 04:55:54 +00002272/* What kind of memory access is involved in the error? */
2273typedef
2274 enum { ReadAxs, WriteAxs, ExecAxs }
2275 AxsKind;
2276
2277/* Extra context for memory errors */
2278typedef
2279 struct {
2280 AxsKind axskind;
2281 Int size;
2282 AddrInfo addrinfo;
2283 Bool isWrite;
2284 shadow_word prevstate;
sewardjff2c9232002-11-13 21:44:39 +00002285 /* MutexErr, LockGraphErr */
sewardj39a4d842002-11-13 22:14:30 +00002286 Mutex *mutex;
sewardj499e3de2002-11-13 22:22:25 +00002287 EC_EIP lasttouched;
sewardj16748af2002-10-22 04:55:54 +00002288 ThreadId lasttid;
sewardjff2c9232002-11-13 21:44:39 +00002289 /* LockGraphErr */
sewardj4bffb232002-11-13 21:46:34 +00002290 const LockSet *held_lockset;
2291 const LockSet *prev_lockset;
sewardj16748af2002-10-22 04:55:54 +00002292 }
2293 HelgrindError;
2294
2295static __inline__
2296void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002297{
sewardj16748af2002-10-22 04:55:54 +00002298 ai->akind = Unknown;
2299 ai->blksize = 0;
2300 ai->rwoffset = 0;
2301 ai->lastchange = NULL;
2302 ai->lasttid = VG_INVALID_THREADID;
2303 ai->filename = NULL;
2304 ai->section = "???";
2305 ai->stack_tid = VG_INVALID_THREADID;
2306 ai->maybe_gcc = False;
jsgfcb1d1c02003-10-14 21:55:10 +00002307 ai->expr = NULL;
njn25e49d8e72002-09-23 09:36:25 +00002308}
2309
sewardj16748af2002-10-22 04:55:54 +00002310static __inline__
2311void clear_HelgrindError ( HelgrindError* err_extra )
2312{
2313 err_extra->axskind = ReadAxs;
2314 err_extra->size = 0;
2315 err_extra->mutex = NULL;
sewardj499e3de2002-11-13 22:22:25 +00002316 err_extra->lasttouched= NULL_EC_EIP;
sewardj16748af2002-10-22 04:55:54 +00002317 err_extra->lasttid = VG_INVALID_THREADID;
sewardjff2c9232002-11-13 21:44:39 +00002318 err_extra->prev_lockset = 0;
2319 err_extra->held_lockset = 0;
sewardj8fac99a2002-11-13 22:31:26 +00002320 err_extra->prevstate = SW(Vge_Virgin, 0);
sewardj16748af2002-10-22 04:55:54 +00002321 clear_AddrInfo ( &err_extra->addrinfo );
2322 err_extra->isWrite = False;
2323}
2324
2325
2326
2327/* Describe an address as best you can, for error messages,
2328 putting the result in ai. */
2329
2330static void describe_addr ( Addr a, AddrInfo* ai )
2331{
njn3e884182003-04-15 13:03:23 +00002332 HG_Chunk* hc;
sewardjdac0a442002-11-13 22:08:40 +00002333 Int i;
sewardj16748af2002-10-22 04:55:54 +00002334
2335 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
2336
2337 /* Closure for searching thread stacks */
2338 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
2339 {
2340 return (stack_min <= a && a <= stack_max);
2341 }
2342 /* Closure for searching malloc'd and free'd lists */
njn3e884182003-04-15 13:03:23 +00002343 Bool addr_is_in_block(VgHashNode *node)
sewardj16748af2002-10-22 04:55:54 +00002344 {
njn3e884182003-04-15 13:03:23 +00002345 HG_Chunk* hc2 = (HG_Chunk*)node;
2346 return (hc2->data <= a && a < hc2->data + hc2->size);
sewardj16748af2002-10-22 04:55:54 +00002347 }
2348
2349 /* Search for it in segments */
2350 {
2351 const SegInfo *seg;
2352
2353 for(seg = VG_(next_seginfo)(NULL);
2354 seg != NULL;
2355 seg = VG_(next_seginfo)(seg)) {
2356 Addr base = VG_(seg_start)(seg);
2357 UInt size = VG_(seg_size)(seg);
2358 const UChar *filename = VG_(seg_filename)(seg);
2359
2360 if (a >= base && a < base+size) {
2361 ai->akind = Segment;
2362 ai->blksize = size;
2363 ai->rwoffset = a - base;
2364 ai->filename = filename;
2365
2366 switch(VG_(seg_sect_kind)(a)) {
2367 case Vg_SectText: ai->section = "text"; break;
2368 case Vg_SectData: ai->section = "data"; break;
2369 case Vg_SectBSS: ai->section = "BSS"; break;
2370 case Vg_SectGOT: ai->section = "GOT"; break;
2371 case Vg_SectPLT: ai->section = "PLT"; break;
2372 case Vg_SectUnknown:
2373 default:
2374 ai->section = "???"; break;
2375 }
2376
2377 return;
2378 }
2379 }
2380 }
2381
2382 /* Search for a currently malloc'd block which might bracket it. */
njn3e884182003-04-15 13:03:23 +00002383 hc = (HG_Chunk*)VG_(HT_first_match)(hg_malloc_list, addr_is_in_block);
2384 if (NULL != hc) {
sewardj16748af2002-10-22 04:55:54 +00002385 ai->akind = Mallocd;
njn3e884182003-04-15 13:03:23 +00002386 ai->blksize = hc->size;
2387 ai->rwoffset = (Int)a - (Int)(hc->data);
2388 ai->lastchange = hc->where;
2389 ai->lasttid = hc->tid;
sewardj16748af2002-10-22 04:55:54 +00002390 return;
2391 }
sewardjdac0a442002-11-13 22:08:40 +00002392
2393 /* Look in recently freed memory */
2394 for(i = 0; i < N_FREED_CHUNKS; i++) {
njn3e884182003-04-15 13:03:23 +00002395 hc = freechunks[i];
2396 if (hc == NULL)
sewardjdac0a442002-11-13 22:08:40 +00002397 continue;
2398
njn3e884182003-04-15 13:03:23 +00002399 if (a >= hc->data && a < hc->data + hc->size) {
sewardjdac0a442002-11-13 22:08:40 +00002400 ai->akind = Freed;
njn3e884182003-04-15 13:03:23 +00002401 ai->blksize = hc->size;
2402 ai->rwoffset = a - hc->data;
2403 ai->lastchange = hc->where;
2404 ai->lasttid = hc->tid;
sewardjdac0a442002-11-13 22:08:40 +00002405 return;
2406 }
2407 }
2408
sewardj16748af2002-10-22 04:55:54 +00002409 /* Clueless ... */
2410 ai->akind = Unknown;
2411 return;
2412}
2413
2414
njn7e614812003-04-21 22:04:03 +00002415/* Updates the copy with address info if necessary. */
2416UInt SK_(update_extra)(Error* err)
sewardj16748af2002-10-22 04:55:54 +00002417{
njn7e614812003-04-21 22:04:03 +00002418 HelgrindError* extra;
sewardj16748af2002-10-22 04:55:54 +00002419
njn7e614812003-04-21 22:04:03 +00002420 extra = (HelgrindError*)VG_(get_error_extra)(err);
2421 if (extra != NULL && Undescribed == extra->addrinfo.akind) {
2422 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2423 }
2424 return sizeof(HelgrindError);
sewardj16748af2002-10-22 04:55:54 +00002425}
2426
njn72718642003-07-24 08:45:32 +00002427static void record_eraser_error ( ThreadId tid, Addr a, Bool is_write,
sewardj0f811692002-10-22 04:59:26 +00002428 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00002429{
sewardjc4a810d2002-11-13 22:25:51 +00002430 shadow_word *sw;
sewardj16748af2002-10-22 04:55:54 +00002431 HelgrindError err_extra;
2432
sewardjff2c9232002-11-13 21:44:39 +00002433 n_eraser_warnings++;
2434
sewardj16748af2002-10-22 04:55:54 +00002435 clear_HelgrindError(&err_extra);
2436 err_extra.isWrite = is_write;
2437 err_extra.addrinfo.akind = Undescribed;
2438 err_extra.prevstate = prevstate;
sewardj499e3de2002-11-13 22:22:25 +00002439 if (clo_execontext)
2440 err_extra.lasttouched = getExeContext(a);
jsgfcb1d1c02003-10-14 21:55:10 +00002441 err_extra.addrinfo.expr = VG_(describe_addr)(tid, a);
2442
njn72718642003-07-24 08:45:32 +00002443 VG_(maybe_record_error)( tid, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00002444 (is_write ? "writing" : "reading"),
2445 &err_extra);
2446
sewardjc4a810d2002-11-13 22:25:51 +00002447 sw = get_sword_addr(a);
2448 if (sw->state == Vge_Excl && sw->other != TLSP_INDICATING_ALL) {
2449 ThreadLifeSeg *tls = unpackTLS(sw->other);
2450 tls->refcount--;
2451 }
2452
sewardj7f3ad222002-11-13 22:11:53 +00002453 set_sword(a, error_sword);
sewardj16748af2002-10-22 04:55:54 +00002454}
2455
sewardj39a4d842002-11-13 22:14:30 +00002456static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardj16748af2002-10-22 04:55:54 +00002457 Char *str, ExeContext *ec)
2458{
2459 HelgrindError err_extra;
2460
2461 clear_HelgrindError(&err_extra);
2462 err_extra.addrinfo.akind = Undescribed;
2463 err_extra.mutex = mutex;
sewardjc808ef52002-11-13 22:43:26 +00002464 err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
sewardj16748af2002-10-22 04:55:54 +00002465 err_extra.lasttid = tid;
2466
njn72718642003-07-24 08:45:32 +00002467 VG_(maybe_record_error)(tid, MutexErr,
sewardj16748af2002-10-22 04:55:54 +00002468 (Addr)mutex->mutexp, str, &err_extra);
2469}
njn25e49d8e72002-09-23 09:36:25 +00002470
sewardj39a4d842002-11-13 22:14:30 +00002471static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00002472 const LockSet *lockset_holding,
2473 const LockSet *lockset_prev)
sewardjff2c9232002-11-13 21:44:39 +00002474{
2475 HelgrindError err_extra;
2476
2477 n_lockorder_warnings++;
2478
2479 clear_HelgrindError(&err_extra);
2480 err_extra.addrinfo.akind = Undescribed;
2481 err_extra.mutex = mutex;
2482
sewardjc808ef52002-11-13 22:43:26 +00002483 err_extra.lasttouched = EC(mutex->location, virgin_sword, 0);
sewardjff2c9232002-11-13 21:44:39 +00002484 err_extra.held_lockset = lockset_holding;
2485 err_extra.prev_lockset = lockset_prev;
2486
njn72718642003-07-24 08:45:32 +00002487 VG_(maybe_record_error)(tid, LockGraphErr, mutex->mutexp, "", &err_extra);
sewardjff2c9232002-11-13 21:44:39 +00002488}
2489
njn810086f2002-11-14 12:42:47 +00002490Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
njn25e49d8e72002-09-23 09:36:25 +00002491{
njn810086f2002-11-14 12:42:47 +00002492 Char *e1s, *e2s;
sewardj16748af2002-10-22 04:55:54 +00002493
njn810086f2002-11-14 12:42:47 +00002494 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2495
2496 switch (VG_(get_error_kind)(e1)) {
sewardj16748af2002-10-22 04:55:54 +00002497 case EraserErr:
njn810086f2002-11-14 12:42:47 +00002498 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002499
2500 case MutexErr:
njn810086f2002-11-14 12:42:47 +00002501 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002502 }
2503
njn810086f2002-11-14 12:42:47 +00002504 e1s = VG_(get_error_string)(e1);
2505 e2s = VG_(get_error_string)(e2);
2506 if (e1s != e2s) return False;
2507 if (0 != VG_(strcmp)(e1s, e2s)) return False;
njn25e49d8e72002-09-23 09:36:25 +00002508 return True;
2509}
2510
sewardj16748af2002-10-22 04:55:54 +00002511static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002512{
jsgfcb1d1c02003-10-14 21:55:10 +00002513 if (ai->expr != NULL)
2514 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002515 " Address %p == %s", a, ai->expr);
jsgfcb1d1c02003-10-14 21:55:10 +00002516
sewardj16748af2002-10-22 04:55:54 +00002517 switch (ai->akind) {
2518 case Stack:
2519 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002520 " Address %p is on thread %d's stack",
sewardj16748af2002-10-22 04:55:54 +00002521 a, ai->stack_tid);
2522 break;
2523 case Unknown:
jsgfcb1d1c02003-10-14 21:55:10 +00002524 if (ai->expr != NULL)
2525 break;
2526
nethercote3b390c72003-11-13 17:53:43 +00002527 /* maybe_gcc is never set to True! This is a hangover from code
2528 in Memcheck */
sewardj16748af2002-10-22 04:55:54 +00002529 if (ai->maybe_gcc) {
2530 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002531 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
sewardj16748af2002-10-22 04:55:54 +00002532 a);
2533 VG_(message)(Vg_UserMsg,
2534 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
2535 } else {
2536 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002537 " Address %p is not stack'd, malloc'd or free'd", a);
sewardj16748af2002-10-22 04:55:54 +00002538 }
2539 break;
2540 case Segment:
2541 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002542 " Address %p is in %s section of %s",
sewardj16748af2002-10-22 04:55:54 +00002543 a, ai->section, ai->filename);
2544 break;
sewardjdac0a442002-11-13 22:08:40 +00002545 case Mallocd:
2546 case Freed: {
sewardj16748af2002-10-22 04:55:54 +00002547 UInt delta;
2548 UChar* relative;
2549 if (ai->rwoffset < 0) {
2550 delta = (UInt)(- ai->rwoffset);
2551 relative = "before";
2552 } else if (ai->rwoffset >= ai->blksize) {
2553 delta = ai->rwoffset - ai->blksize;
2554 relative = "after";
2555 } else {
2556 delta = ai->rwoffset;
2557 relative = "inside";
2558 }
2559 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002560 " Address %p is %d bytes %s a block of size %d %s by thread %d",
sewardj16748af2002-10-22 04:55:54 +00002561 a, delta, relative,
2562 ai->blksize,
sewardjdac0a442002-11-13 22:08:40 +00002563 ai->akind == Mallocd ? "alloc'd" : "freed",
sewardj16748af2002-10-22 04:55:54 +00002564 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00002565
sewardj16748af2002-10-22 04:55:54 +00002566 VG_(pp_ExeContext)(ai->lastchange);
2567 break;
2568 }
2569 default:
2570 VG_(skin_panic)("pp_AddrInfo");
2571 }
njn25e49d8e72002-09-23 09:36:25 +00002572}
2573
sewardj4bffb232002-11-13 21:46:34 +00002574static Char *lockset_str(const Char *prefix, const LockSet *lockset)
sewardjff2c9232002-11-13 21:44:39 +00002575{
sewardjff2c9232002-11-13 21:44:39 +00002576 Char *buf, *cp;
sewardj4bffb232002-11-13 21:46:34 +00002577 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002578
sewardj4bffb232002-11-13 21:46:34 +00002579 buf = VG_(malloc)((prefix == NULL ? 0 : VG_(strlen)(prefix)) +
2580 lockset->setsize * 120 +
2581 1);
sewardjff2c9232002-11-13 21:44:39 +00002582
2583 cp = buf;
2584 if (prefix)
2585 cp += VG_(sprintf)(cp, "%s", prefix);
2586
sewardj4bffb232002-11-13 21:46:34 +00002587 for(i = 0; i < lockset->setsize; i++)
2588 cp += VG_(sprintf)(cp, "%p%(y, ", lockset->mutex[i]->mutexp,
2589 lockset->mutex[i]->mutexp);
sewardjff2c9232002-11-13 21:44:39 +00002590
sewardj4bffb232002-11-13 21:46:34 +00002591 if (lockset->setsize)
sewardjff2c9232002-11-13 21:44:39 +00002592 cp[-2] = '\0';
2593 else
2594 *cp = '\0';
2595
2596 return buf;
2597}
njn25e49d8e72002-09-23 09:36:25 +00002598
njn43c799e2003-04-08 00:08:52 +00002599void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +00002600{
njn810086f2002-11-14 12:42:47 +00002601 HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err);
sewardj16748af2002-10-22 04:55:54 +00002602 Char buf[100];
2603 Char *msg = buf;
sewardj4bffb232002-11-13 21:46:34 +00002604 const LockSet *ls;
sewardj16748af2002-10-22 04:55:54 +00002605
2606 *msg = '\0';
2607
njn810086f2002-11-14 12:42:47 +00002608 switch(VG_(get_error_kind)(err)) {
2609 case EraserErr: {
2610 Addr err_addr = VG_(get_error_address)(err);
2611
sewardj16748af2002-10-22 04:55:54 +00002612 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
njn810086f2002-11-14 12:42:47 +00002613 VG_(get_error_string)(err), err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002614 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn810086f2002-11-14 12:42:47 +00002615 pp_AddrInfo(err_addr, &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002616
2617 switch(extra->prevstate.state) {
2618 case Vge_Virgin:
2619 /* shouldn't be possible to go directly from virgin -> error */
2620 VG_(sprintf)(buf, "virgin!?");
2621 break;
2622
sewardjc4a810d2002-11-13 22:25:51 +00002623 case Vge_Excl: {
2624 ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
2625
2626 sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
2627 VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
sewardj16748af2002-10-22 04:55:54 +00002628 break;
sewardjc4a810d2002-11-13 22:25:51 +00002629 }
sewardj16748af2002-10-22 04:55:54 +00002630
2631 case Vge_Shar:
sewardjff2c9232002-11-13 21:44:39 +00002632 case Vge_SharMod:
sewardj8fac99a2002-11-13 22:31:26 +00002633 ls = unpackLockSet(extra->prevstate.other);
sewardj4bffb232002-11-13 21:46:34 +00002634
2635 if (isempty(ls)) {
sewardj16748af2002-10-22 04:55:54 +00002636 VG_(sprintf)(buf, "shared %s, no locks",
2637 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
2638 break;
2639 }
2640
sewardjff2c9232002-11-13 21:44:39 +00002641 msg = lockset_str(extra->prevstate.state == Vge_Shar ?
2642 "shared RO, locked by:" :
sewardj4bffb232002-11-13 21:46:34 +00002643 "shared RW, locked by:", ls);
sewardj16748af2002-10-22 04:55:54 +00002644
sewardj16748af2002-10-22 04:55:54 +00002645 break;
2646 }
sewardj16748af2002-10-22 04:55:54 +00002647
sewardj499e3de2002-11-13 22:22:25 +00002648 if (*msg)
nethercote3b390c72003-11-13 17:53:43 +00002649 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
sewardj499e3de2002-11-13 22:22:25 +00002650
sewardj72baa7a2002-12-09 23:32:58 +00002651 if (clo_execontext == EC_Some
2652 && extra->lasttouched.uu_ec_eip.eip != 0) {
sewardj499e3de2002-11-13 22:22:25 +00002653 Char file[100];
2654 UInt line;
sewardj72baa7a2002-12-09 23:32:58 +00002655 Addr eip = extra->lasttouched.uu_ec_eip.eip;
sewardj499e3de2002-11-13 22:22:25 +00002656
nethercote3b390c72003-11-13 17:53:43 +00002657 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u",
njn810086f2002-11-14 12:42:47 +00002658 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002659 pp_state(extra->lasttouched.state),
2660 unpackTLS(extra->lasttouched.tls)->tid);
sewardj499e3de2002-11-13 22:22:25 +00002661
2662 if (VG_(get_filename_linenum)(eip, file, sizeof(file), &line)) {
2663 VG_(message)(Vg_UserMsg, " at %p: %y (%s:%u)",
2664 eip, eip, file, line);
2665 } else if (VG_(get_objname)(eip, file, sizeof(file))) {
2666 VG_(message)(Vg_UserMsg, " at %p: %y (in %s)",
2667 eip, eip, file);
2668 } else {
2669 VG_(message)(Vg_UserMsg, " at %p: %y", eip, eip);
2670 }
sewardj72baa7a2002-12-09 23:32:58 +00002671 } else if (clo_execontext == EC_All
2672 && extra->lasttouched.uu_ec_eip.ec != NULL) {
nethercote3b390c72003-11-13 17:53:43 +00002673 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u",
njn810086f2002-11-14 12:42:47 +00002674 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002675 pp_state(extra->lasttouched.state),
2676 unpackTLS(extra->lasttouched.tls)->tid);
sewardj72baa7a2002-12-09 23:32:58 +00002677 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj499e3de2002-11-13 22:22:25 +00002678 }
sewardj16748af2002-10-22 04:55:54 +00002679 break;
njn810086f2002-11-14 12:42:47 +00002680 }
sewardj16748af2002-10-22 04:55:54 +00002681
2682 case MutexErr:
sewardj499e3de2002-11-13 22:22:25 +00002683 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s",
njn810086f2002-11-14 12:42:47 +00002684 VG_(get_error_address)(err),
2685 VG_(get_error_address)(err),
2686 VG_(get_error_string)(err));
njn43c799e2003-04-08 00:08:52 +00002687 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardj72baa7a2002-12-09 23:32:58 +00002688 if (extra->lasttouched.uu_ec_eip.ec != NULL) {
nethercote3b390c72003-11-13 17:53:43 +00002689 VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid);
sewardj72baa7a2002-12-09 23:32:58 +00002690 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj16748af2002-10-22 04:55:54 +00002691 }
njn810086f2002-11-14 12:42:47 +00002692 pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002693 break;
sewardjff2c9232002-11-13 21:44:39 +00002694
2695 case LockGraphErr: {
sewardj4bffb232002-11-13 21:46:34 +00002696 const LockSet *heldset = extra->held_lockset;
njn810086f2002-11-14 12:42:47 +00002697 Addr err_addr = VG_(get_error_address)(err);
sewardj4bffb232002-11-13 21:46:34 +00002698 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002699
2700 msg = lockset_str(NULL, heldset);
2701
2702 VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order",
njn810086f2002-11-14 12:42:47 +00002703 err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002704 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardjff2c9232002-11-13 21:44:39 +00002705 VG_(message)(Vg_UserMsg, " while holding locks %s", msg);
2706
sewardj4bffb232002-11-13 21:46:34 +00002707 for(i = 0; i < heldset->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00002708 const Mutex *lsmx = heldset->mutex[i];
sewardjff2c9232002-11-13 21:44:39 +00002709
sewardj542494b2002-11-13 22:46:13 +00002710 /* needs to be a recursive search+display */
2711 if (0 && !ismember(lsmx->lockdep, extra->mutex))
sewardjff2c9232002-11-13 21:44:39 +00002712 continue;
2713
nethercote3b390c72003-11-13 17:53:43 +00002714 VG_(message)(Vg_UserMsg, " %p%(y last locked at",
sewardjff2c9232002-11-13 21:44:39 +00002715 lsmx->mutexp, lsmx->mutexp);
2716 VG_(pp_ExeContext)(lsmx->location);
2717 VG_(free)(msg);
sewardj4bffb232002-11-13 21:46:34 +00002718 msg = lockset_str(NULL, lsmx->lockdep);
nethercote3b390c72003-11-13 17:53:43 +00002719 VG_(message)(Vg_UserMsg, " while depending on locks %s", msg);
sewardjff2c9232002-11-13 21:44:39 +00002720 }
2721
2722 break;
sewardj16748af2002-10-22 04:55:54 +00002723 }
sewardjff2c9232002-11-13 21:44:39 +00002724 }
2725
2726 if (msg != buf)
2727 VG_(free)(msg);
njn25e49d8e72002-09-23 09:36:25 +00002728}
2729
2730
njn810086f2002-11-14 12:42:47 +00002731Bool SK_(recognised_suppression) ( Char* name, Supp *su )
njn25e49d8e72002-09-23 09:36:25 +00002732{
2733 if (0 == VG_(strcmp)(name, "Eraser")) {
njn810086f2002-11-14 12:42:47 +00002734 VG_(set_supp_kind)(su, EraserSupp);
njn25e49d8e72002-09-23 09:36:25 +00002735 return True;
2736 } else {
2737 return False;
2738 }
2739}
2740
2741
njn810086f2002-11-14 12:42:47 +00002742Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +00002743{
2744 /* do nothing -- no extra suppression info present. Return True to
2745 indicate nothing bad happened. */
2746 return True;
2747}
2748
2749
njn810086f2002-11-14 12:42:47 +00002750Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn25e49d8e72002-09-23 09:36:25 +00002751{
nethercote64366b42003-12-01 13:11:47 +00002752 sk_assert(VG_(get_supp_kind)(su) == EraserSupp);
2753
2754 return (VG_(get_error_kind)(err) == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00002755}
2756
njn43c799e2003-04-08 00:08:52 +00002757extern Char* SK_(get_error_name) ( Error* err )
2758{
2759 if (EraserErr == VG_(get_error_kind)(err)) {
2760 return "Eraser";
2761 } else {
2762 return NULL; /* Other errors types can't be suppressed */
2763 }
2764}
2765
2766extern void SK_(print_extra_suppression_info) ( Error* err )
2767{
2768 /* Do nothing */
2769}
njn25e49d8e72002-09-23 09:36:25 +00002770
sewardjdca84112002-11-13 22:29:34 +00002771static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
2772{
2773 Mutex *mutex = get_mutex((Addr)void_mutex);
2774
njn72718642003-07-24 08:45:32 +00002775 test_mutex_state(mutex, MxLocked, tid);
sewardjdca84112002-11-13 22:29:34 +00002776}
2777
njn25e49d8e72002-09-23 09:36:25 +00002778static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
2779{
sewardj4bffb232002-11-13 21:46:34 +00002780 static const Bool debug = False;
sewardj39a4d842002-11-13 22:14:30 +00002781 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002782 const LockSet* ls;
2783
njn72718642003-07-24 08:45:32 +00002784 set_mutex_state(mutex, MxLocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002785
njn25e49d8e72002-09-23 09:36:25 +00002786# if DEBUG_LOCKS
sewardjdac0a442002-11-13 22:08:40 +00002787 VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002788# endif
2789
njn25e49d8e72002-09-23 09:36:25 +00002790 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
2791# if LOCKSET_SANITY > 1
2792 sanity_check_locksets("eraser_post_mutex_lock-IN");
2793# endif
2794
sewardj4bffb232002-11-13 21:46:34 +00002795 ls = lookup_LockSet_with(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002796
sewardj4bffb232002-11-13 21:46:34 +00002797 if (ls == NULL) {
2798 LockSet *newset = add_LockSet(thread_locks[tid], mutex);
2799 insert_LockSet(newset);
2800 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002801 }
sewardj4bffb232002-11-13 21:46:34 +00002802 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002803
sewardj4bffb232002-11-13 21:46:34 +00002804 if (debug || DEBUG_LOCKS)
2805 VG_(printf)("tid %u now has lockset %p\n", tid, ls);
njn25e49d8e72002-09-23 09:36:25 +00002806
sewardj4bffb232002-11-13 21:46:34 +00002807 if (debug || LOCKSET_SANITY > 1)
2808 sanity_check_locksets("eraser_post_mutex_lock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002809}
2810
2811
2812static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
2813{
sewardjc26cc252002-10-23 21:58:55 +00002814 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00002815 Int i = 0;
sewardj39a4d842002-11-13 22:14:30 +00002816 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002817 const LockSet *ls;
2818
njn72718642003-07-24 08:45:32 +00002819 test_mutex_state(mutex, MxUnlocked, tid);
2820 set_mutex_state(mutex, MxUnlocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002821
sewardjdac0a442002-11-13 22:08:40 +00002822 if (!ismember(thread_locks[tid], mutex))
2823 return;
2824
sewardjc26cc252002-10-23 21:58:55 +00002825 if (debug || DEBUG_LOCKS)
2826 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002827
sewardjc26cc252002-10-23 21:58:55 +00002828 if (debug || LOCKSET_SANITY > 1)
2829 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00002830
sewardj4bffb232002-11-13 21:46:34 +00002831 ls = lookup_LockSet_without(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002832
sewardj4bffb232002-11-13 21:46:34 +00002833 if (ls == NULL) {
2834 LockSet *newset = remove_LockSet(thread_locks[tid], mutex);
2835 insert_LockSet(newset);
2836 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002837 }
2838
2839 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00002840 if (debug || DEBUG_LOCKS)
sewardj4bffb232002-11-13 21:46:34 +00002841 VG_(printf)("tid %u reverts from %p to lockset %p\n",
sewardjc26cc252002-10-23 21:58:55 +00002842 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00002843
sewardj4bffb232002-11-13 21:46:34 +00002844 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002845
sewardjc26cc252002-10-23 21:58:55 +00002846 if (debug || LOCKSET_SANITY > 1)
2847 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002848}
2849
2850
2851/* ---------------------------------------------------------------------
2852 Checking memory reads and writes
2853 ------------------------------------------------------------------ */
2854
2855/* Behaviour on reads and writes:
2856 *
2857 * VIR EXCL SHAR SH_MOD
2858 * ----------------------------------------------------------------
2859 * rd/wr, 1st thread | - EXCL - -
2860 * rd, new thread | - SHAR - -
2861 * wr, new thread | - SH_MOD - -
2862 * rd | error! - SHAR SH_MOD
2863 * wr | EXCL - SH_MOD SH_MOD
2864 * ----------------------------------------------------------------
2865 */
2866
sewardj8fac99a2002-11-13 22:31:26 +00002867static inline
njn25e49d8e72002-09-23 09:36:25 +00002868void dump_around_a(Addr a)
2869{
2870 UInt i;
2871 shadow_word* sword;
2872 VG_(printf)("NEARBY:\n");
2873 for (i = a - 12; i <= a + 12; i += 4) {
2874 sword = get_sword_addr(i);
2875 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
2876 }
2877}
njn25e49d8e72002-09-23 09:36:25 +00002878
2879#if DEBUG_ACCESSES
2880 #define DEBUG_STATE(args...) \
2881 VG_(printf)("(%u) ", size), \
2882 VG_(printf)(args)
2883#else
2884 #define DEBUG_STATE(args...)
2885#endif
2886
njn72718642003-07-24 08:45:32 +00002887static void eraser_mem_read_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00002888{
sewardj72baa7a2002-12-09 23:32:58 +00002889 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002890 shadow_word prevstate;
2891 ThreadLifeSeg *tls;
2892 const LockSet *ls;
2893 Bool statechange = False;
2894
2895 static const void *const states[4] = {
2896 [Vge_Virgin] &&st_virgin,
2897 [Vge_Excl] &&st_excl,
2898 [Vge_Shar] &&st_shar,
2899 [Vge_SharMod] &&st_sharmod,
2900 };
2901
2902 tls = thread_seg[tid];
2903 sk_assert(tls != NULL && tls->tid == tid);
2904
2905 sword = get_sword_addr(a);
2906 if (sword == SEC_MAP_ACCESS) {
2907 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
2908 return;
2909 }
2910
2911 prevstate = *sword;
2912
2913 goto *states[sword->state];
2914
2915 /* This looks like reading of unitialised memory, may be legit. Eg.
2916 * calloc() zeroes its values, so untouched memory may actually be
2917 * initialised. Leave that stuff to Valgrind. */
2918 st_virgin:
2919 if (TID_INDICATING_NONVIRGIN == sword->other) {
2920 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
2921 if (DEBUG_VIRGIN_READS)
2922 dump_around_a(a);
2923 } else {
2924 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
2925 }
2926 statechange = True;
2927 *sword = SW(Vge_Excl, packTLS(tls)); /* remember exclusive owner */
2928 tls->refcount++;
2929 goto done;
2930
2931 st_excl: {
2932 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
2933
2934 if (tls == sw_tls) {
2935 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
2936 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
2937 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
2938 } else if (tlsIsDisjoint(tls, sw_tls)) {
2939 DEBUG_STATE("Read EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
2940 statechange = True;
2941 sword->other = packTLS(tls);
2942 sw_tls->refcount--;
2943 tls->refcount++;
2944 } else {
2945 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sw_tls->tid, a, tid);
2946 sw_tls->refcount--;
2947 statechange = True;
2948 *sword = SW(Vge_Shar, packLockSet(thread_locks[tid]));
2949
2950 if (DEBUG_MEM_LOCKSET_CHANGES)
2951 print_LockSet("excl read locks", unpackLockSet(sword->other));
2952 }
2953 goto done;
2954 }
2955
2956 st_shar:
2957 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
2958 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
2959 thread_locks[tid]));
2960 statechange = sword->other != prevstate.other;
2961 goto done;
2962
2963 st_sharmod:
2964 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
2965 ls = intersect(unpackLockSet(sword->other),
2966 thread_locks[tid]);
2967 sword->other = packLockSet(ls);
2968
2969 statechange = sword->other != prevstate.other;
2970
2971 if (isempty(ls)) {
njn72718642003-07-24 08:45:32 +00002972 record_eraser_error(tid, a, False /* !is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00002973 }
2974 goto done;
2975
2976 done:
2977 if (clo_execontext != EC_None && statechange) {
2978 EC_EIP eceip;
2979
2980 if (clo_execontext == EC_Some)
njn72718642003-07-24 08:45:32 +00002981 eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002982 else
njn72718642003-07-24 08:45:32 +00002983 eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00002984 setExeContext(a, eceip);
2985 }
2986}
njn25e49d8e72002-09-23 09:36:25 +00002987
njn72718642003-07-24 08:45:32 +00002988static void eraser_mem_read(Addr a, UInt size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00002989{
njn72718642003-07-24 08:45:32 +00002990 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00002991
sewardj8fac99a2002-11-13 22:31:26 +00002992 end = ROUNDUP(a+size, 4);
2993 a = ROUNDDN(a, 4);
2994
sewardj18cd4a52002-11-13 22:37:41 +00002995 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00002996 eraser_mem_read_word(a, tid);
sewardj18cd4a52002-11-13 22:37:41 +00002997}
2998
njn72718642003-07-24 08:45:32 +00002999static void eraser_mem_write_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00003000{
3001 ThreadLifeSeg *tls;
sewardj72baa7a2002-12-09 23:32:58 +00003002 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00003003 shadow_word prevstate;
3004 Bool statechange = False;
3005 static const void *const states[4] = {
3006 [Vge_Virgin] &&st_virgin,
3007 [Vge_Excl] &&st_excl,
3008 [Vge_Shar] &&st_shar,
3009 [Vge_SharMod] &&st_sharmod,
3010 };
3011
sewardjc4a810d2002-11-13 22:25:51 +00003012 tls = thread_seg[tid];
3013 sk_assert(tls != NULL && tls->tid == tid);
3014
sewardj18cd4a52002-11-13 22:37:41 +00003015 sword = get_sword_addr(a);
3016 if (sword == SEC_MAP_ACCESS) {
3017 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
3018 return;
3019 }
njn25e49d8e72002-09-23 09:36:25 +00003020
sewardj18cd4a52002-11-13 22:37:41 +00003021 prevstate = *sword;
njn25e49d8e72002-09-23 09:36:25 +00003022
sewardj18cd4a52002-11-13 22:37:41 +00003023 goto *states[sword->state];
sewardj16748af2002-10-22 04:55:54 +00003024
sewardj18cd4a52002-11-13 22:37:41 +00003025 st_virgin:
3026 if (TID_INDICATING_NONVIRGIN == sword->other)
3027 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
3028 else
3029 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
3030 statechange = True;
3031 *sword = SW(Vge_Excl, packTLS(tls));/* remember exclusive owner */
3032 tls->refcount++;
3033 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003034
sewardj18cd4a52002-11-13 22:37:41 +00003035 st_excl: {
3036 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
3037
3038 if (tls == sw_tls) {
3039 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
3040 goto done;
3041 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
3042 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
3043 goto done;
3044 } else if (tlsIsDisjoint(tls, sw_tls)) {
3045 DEBUG_STATE("Write EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
3046 sword->other = packTLS(tls);
3047 sw_tls->refcount--;
sewardjc4a810d2002-11-13 22:25:51 +00003048 tls->refcount++;
sewardj8fac99a2002-11-13 22:31:26 +00003049 goto done;
sewardj18cd4a52002-11-13 22:37:41 +00003050 } else {
3051 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sw_tls->tid, a, tid);
3052 statechange = True;
3053 sw_tls->refcount--;
3054 *sword = SW(Vge_SharMod, packLockSet(thread_locks[tid]));
3055 if(DEBUG_MEM_LOCKSET_CHANGES)
3056 print_LockSet("excl write locks", unpackLockSet(sword->other));
3057 goto SHARED_MODIFIED;
sewardjc4a810d2002-11-13 22:25:51 +00003058 }
sewardj18cd4a52002-11-13 22:37:41 +00003059 }
njn25e49d8e72002-09-23 09:36:25 +00003060
sewardj18cd4a52002-11-13 22:37:41 +00003061 st_shar:
3062 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
3063 sword->state = Vge_SharMod;
3064 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3065 thread_locks[tid]));
3066 statechange = True;
3067 goto SHARED_MODIFIED;
njn25e49d8e72002-09-23 09:36:25 +00003068
sewardj18cd4a52002-11-13 22:37:41 +00003069 st_sharmod:
3070 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
3071 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3072 thread_locks[tid]));
3073 statechange = sword->other != prevstate.other;
njn25e49d8e72002-09-23 09:36:25 +00003074
sewardj18cd4a52002-11-13 22:37:41 +00003075 SHARED_MODIFIED:
3076 if (isempty(unpackLockSet(sword->other))) {
njn72718642003-07-24 08:45:32 +00003077 record_eraser_error(tid, a, True /* is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00003078 }
3079 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003080
sewardj18cd4a52002-11-13 22:37:41 +00003081 done:
3082 if (clo_execontext != EC_None && statechange) {
3083 EC_EIP eceip;
sewardj499e3de2002-11-13 22:22:25 +00003084
sewardj18cd4a52002-11-13 22:37:41 +00003085 if (clo_execontext == EC_Some)
njn72718642003-07-24 08:45:32 +00003086 eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003087 else
njn72718642003-07-24 08:45:32 +00003088 eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003089 setExeContext(a, eceip);
njn25e49d8e72002-09-23 09:36:25 +00003090 }
3091}
3092
njn72718642003-07-24 08:45:32 +00003093static void eraser_mem_write(Addr a, UInt size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00003094{
sewardj8fac99a2002-11-13 22:31:26 +00003095 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003096
sewardj8fac99a2002-11-13 22:31:26 +00003097 end = ROUNDUP(a+size, 4);
3098 a = ROUNDDN(a, 4);
3099
sewardj18cd4a52002-11-13 22:37:41 +00003100 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00003101 eraser_mem_write_word(a, tid);
njn25e49d8e72002-09-23 09:36:25 +00003102}
3103
3104#undef DEBUG_STATE
3105
nethercote31212bc2004-02-29 15:50:04 +00003106REGPARM(1) static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00003107{
njn72718642003-07-24 08:45:32 +00003108 eraser_mem_read(a, 1, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003109}
3110
nethercote31212bc2004-02-29 15:50:04 +00003111REGPARM(1) static void eraser_mem_help_read_2(Addr a)
sewardja5b3aec2002-10-22 05:09:36 +00003112{
njn72718642003-07-24 08:45:32 +00003113 eraser_mem_read(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003114}
3115
nethercote31212bc2004-02-29 15:50:04 +00003116REGPARM(1) static void eraser_mem_help_read_4(Addr a)
sewardja5b3aec2002-10-22 05:09:36 +00003117{
njn72718642003-07-24 08:45:32 +00003118 eraser_mem_read(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003119}
3120
nethercote31212bc2004-02-29 15:50:04 +00003121REGPARM(2) static void eraser_mem_help_read_N(Addr a, UInt size)
sewardja5b3aec2002-10-22 05:09:36 +00003122{
njn72718642003-07-24 08:45:32 +00003123 eraser_mem_read(a, size, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003124}
3125
nethercote31212bc2004-02-29 15:50:04 +00003126REGPARM(2) static void eraser_mem_help_write_1(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003127{
3128 if (*(UChar *)a != val)
njn72718642003-07-24 08:45:32 +00003129 eraser_mem_write(a, 1, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003130}
nethercote31212bc2004-02-29 15:50:04 +00003131REGPARM(2) static void eraser_mem_help_write_2(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003132{
3133 if (*(UShort *)a != val)
njn72718642003-07-24 08:45:32 +00003134 eraser_mem_write(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003135}
nethercote31212bc2004-02-29 15:50:04 +00003136REGPARM(2) static void eraser_mem_help_write_4(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003137{
3138 if (*(UInt *)a != val)
njn72718642003-07-24 08:45:32 +00003139 eraser_mem_write(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003140}
nethercote31212bc2004-02-29 15:50:04 +00003141REGPARM(2) static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00003142{
njn72718642003-07-24 08:45:32 +00003143 eraser_mem_write(a, size, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003144}
njn25e49d8e72002-09-23 09:36:25 +00003145
sewardjc4a810d2002-11-13 22:25:51 +00003146static void hg_thread_create(ThreadId parent, ThreadId child)
3147{
3148 if (0)
3149 VG_(printf)("CREATE: %u creating %u\n", parent, child);
3150
3151 newTLS(child);
3152 addPriorTLS(child, parent);
3153
3154 newTLS(parent);
3155}
3156
3157static void hg_thread_join(ThreadId joiner, ThreadId joinee)
3158{
3159 if (0)
3160 VG_(printf)("JOIN: %u joining on %u\n", joiner, joinee);
3161
3162 newTLS(joiner);
3163 addPriorTLS(joiner, joinee);
3164
3165 clearTLS(joinee);
3166}
3167
sewardj7a5ebcf2002-11-13 22:42:13 +00003168static Int __BUS_HARDWARE_LOCK__;
3169
3170static void bus_lock(void)
3171{
3172 ThreadId tid = VG_(get_current_tid)();
3173 eraser_pre_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3174 eraser_post_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3175}
3176
3177static void bus_unlock(void)
3178{
3179 ThreadId tid = VG_(get_current_tid)();
3180 eraser_post_mutex_unlock(tid, &__BUS_HARDWARE_LOCK__);
3181}
3182
njn25e49d8e72002-09-23 09:36:25 +00003183/*--------------------------------------------------------------------*/
sewardj7f3ad222002-11-13 22:11:53 +00003184/*--- Client requests ---*/
3185/*--------------------------------------------------------------------*/
3186
njn72718642003-07-24 08:45:32 +00003187Bool SK_(handle_client_request)(ThreadId tid, UInt *args, UInt *ret)
sewardj7f3ad222002-11-13 22:11:53 +00003188{
3189 if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
3190 return False;
3191
3192 switch(args[0]) {
3193 case VG_USERREQ__HG_CLEAN_MEMORY:
3194 set_address_range_state(args[1], args[2], Vge_VirginInit);
3195 *ret = 0; /* meaningless */
3196 break;
3197
3198 case VG_USERREQ__HG_KNOWN_RACE:
3199 set_address_range_state(args[1], args[2], Vge_Error);
3200 *ret = 0; /* meaningless */
3201 break;
3202
3203 default:
3204 return False;
3205 }
3206
3207 return True;
3208}
3209
3210
3211/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003212/*--- Setup ---*/
3213/*--------------------------------------------------------------------*/
3214
njn810086f2002-11-14 12:42:47 +00003215void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00003216{
3217 Int i;
sewardj4bffb232002-11-13 21:46:34 +00003218 LockSet *empty;
njn25e49d8e72002-09-23 09:36:25 +00003219
njn810086f2002-11-14 12:42:47 +00003220 VG_(details_name) ("Helgrind");
3221 VG_(details_version) (NULL);
3222 VG_(details_description) ("a data race detector");
3223 VG_(details_copyright_author)(
nethercotebb1c9912004-01-04 16:43:23 +00003224 "Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.");
nethercote421281e2003-11-20 16:20:55 +00003225 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00003226 VG_(details_avg_translation_sizeB) ( 115 );
njn25e49d8e72002-09-23 09:36:25 +00003227
njn810086f2002-11-14 12:42:47 +00003228 VG_(needs_core_errors)();
3229 VG_(needs_skin_errors)();
3230 VG_(needs_data_syms)();
njn810086f2002-11-14 12:42:47 +00003231 VG_(needs_client_requests)();
3232 VG_(needs_command_line_options)();
fitzhardinge98abfc72003-12-16 02:05:15 +00003233 VG_(needs_shadow_memory)();
njn25e49d8e72002-09-23 09:36:25 +00003234
fitzhardinge98abfc72003-12-16 02:05:15 +00003235 VG_(init_new_mem_startup) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003236
njn810086f2002-11-14 12:42:47 +00003237 /* stack ones not decided until VG_(post_clo_init)() */
njn25e49d8e72002-09-23 09:36:25 +00003238
fitzhardinge98abfc72003-12-16 02:05:15 +00003239 VG_(init_new_mem_brk) (& make_writable);
3240 VG_(init_new_mem_mmap) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003241
fitzhardinge98abfc72003-12-16 02:05:15 +00003242 VG_(init_change_mem_mprotect) (& eraser_set_perms);
njn25e49d8e72002-09-23 09:36:25 +00003243
fitzhardinge98abfc72003-12-16 02:05:15 +00003244 VG_(init_ban_mem_stack) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003245
fitzhardinge98abfc72003-12-16 02:05:15 +00003246 VG_(init_die_mem_stack) (NULL);
3247 VG_(init_die_mem_stack_signal) (NULL);
3248 VG_(init_die_mem_brk) (NULL);
3249 VG_(init_die_mem_munmap) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003250
fitzhardinge98abfc72003-12-16 02:05:15 +00003251 VG_(init_pre_mem_read) (& eraser_pre_mem_read);
3252 VG_(init_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz);
3253 VG_(init_pre_mem_write) (& eraser_pre_mem_write);
3254 VG_(init_post_mem_write) (NULL);
njn810086f2002-11-14 12:42:47 +00003255
fitzhardinge98abfc72003-12-16 02:05:15 +00003256 VG_(init_post_thread_create) (& hg_thread_create);
3257 VG_(init_post_thread_join) (& hg_thread_join);
njn810086f2002-11-14 12:42:47 +00003258
fitzhardinge98abfc72003-12-16 02:05:15 +00003259 VG_(init_pre_mutex_lock) (& eraser_pre_mutex_lock);
3260 VG_(init_post_mutex_lock) (& eraser_post_mutex_lock);
3261 VG_(init_post_mutex_unlock) (& eraser_post_mutex_unlock);
sewardjc4a810d2002-11-13 22:25:51 +00003262
sewardja5b3aec2002-10-22 05:09:36 +00003263 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
3264 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
3265 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
3266 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
3267
3268 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
3269 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
3270 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
3271 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00003272
sewardj7a5ebcf2002-11-13 22:42:13 +00003273 VG_(register_noncompact_helper)((Addr) & bus_lock);
3274 VG_(register_noncompact_helper)((Addr) & bus_unlock);
3275
sewardj4bffb232002-11-13 21:46:34 +00003276 for(i = 0; i < LOCKSET_HASH_SZ; i++)
3277 lockset_hash[i] = NULL;
3278
3279 empty = alloc_LockSet(0);
3280 insert_LockSet(empty);
3281 emptyset = empty;
3282
sewardjc4a810d2002-11-13 22:25:51 +00003283 /* Init lock table and thread segments */
3284 for (i = 0; i < VG_N_THREADS; i++) {
sewardjdac0a442002-11-13 22:08:40 +00003285 thread_locks[i] = empty;
njn25e49d8e72002-09-23 09:36:25 +00003286
sewardjc4a810d2002-11-13 22:25:51 +00003287 newTLS(i);
3288 }
3289
njn25e49d8e72002-09-23 09:36:25 +00003290 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00003291 hg_malloc_list = VG_(HT_construct)();
njn25e49d8e72002-09-23 09:36:25 +00003292}
3293
sewardjf6374322002-11-13 22:35:55 +00003294static Bool match_Bool(Char *arg, Char *argstr, Bool *ret)
3295{
3296 Int len = VG_(strlen)(argstr);
3297
3298 if (VG_(strncmp)(arg, argstr, len) == 0) {
3299 if (VG_(strcmp)(arg+len, "yes") == 0) {
3300 *ret = True;
3301 return True;
3302 } else if (VG_(strcmp)(arg+len, "no") == 0) {
3303 *ret = False;
3304 return True;
3305 } else
3306 VG_(bad_option)(arg);
3307 }
3308 return False;
3309}
3310
sewardj406270b2002-11-13 22:18:09 +00003311static Bool match_str(Char *arg, Char *argstr, Char **ret)
3312{
3313 Int len = VG_(strlen)(argstr);
3314
3315 if (VG_(strncmp)(arg, argstr, len) == 0) {
3316 *ret = VG_(strdup)(arg+len);
3317 return True;
3318 }
3319
3320 return False;
3321}
sewardj406270b2002-11-13 22:18:09 +00003322
3323Bool SK_(process_cmd_line_option)(Char* arg)
3324{
sewardj499e3de2002-11-13 22:22:25 +00003325 Char *str;
3326
3327 if (match_str(arg, "--show-last-access=", &str)) {
3328 Bool ok = True;
3329 if (VG_(strcmp)(str, "no") == 0)
3330 clo_execontext = EC_None;
3331 else if (VG_(strcmp)(str, "some") == 0)
3332 clo_execontext = EC_Some;
3333 else if (VG_(strcmp)(str, "all") == 0)
3334 clo_execontext = EC_All;
3335 else {
3336 ok = False;
3337 VG_(bad_option)(arg);
3338 }
3339
3340 VG_(free)(str);
3341 if (ok)
3342 return True;
3343 }
3344
sewardjf6374322002-11-13 22:35:55 +00003345 if (match_Bool(arg, "--private-stacks=", &clo_priv_stacks))
3346 return True;
3347
njn3e884182003-04-15 13:03:23 +00003348 return VG_(replacement_malloc_process_cmd_line_option)(arg);
sewardj406270b2002-11-13 22:18:09 +00003349}
3350
njn3e884182003-04-15 13:03:23 +00003351void SK_(print_usage)(void)
sewardj406270b2002-11-13 22:18:09 +00003352{
njn3e884182003-04-15 13:03:23 +00003353 VG_(printf)(
sewardje11d6c82002-12-15 02:00:41 +00003354" --private-stacks=yes|no assume thread stacks are used privately [no]\n"
3355" --show-last-access=no|some|all\n"
3356" show location of last word access on error [no]\n"
njn3e884182003-04-15 13:03:23 +00003357 );
3358 VG_(replacement_malloc_print_usage)();
sewardj406270b2002-11-13 22:18:09 +00003359}
3360
njn3e884182003-04-15 13:03:23 +00003361void SK_(print_debug_usage)(void)
3362{
3363 VG_(replacement_malloc_print_debug_usage)();
3364}
njn25e49d8e72002-09-23 09:36:25 +00003365
3366void SK_(post_clo_init)(void)
3367{
njn810086f2002-11-14 12:42:47 +00003368 void (*stack_tracker)(Addr a, UInt len);
3369
sewardj499e3de2002-11-13 22:22:25 +00003370 if (clo_execontext) {
3371 execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536);
3372 VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536);
3373 }
sewardjf6374322002-11-13 22:35:55 +00003374
njn810086f2002-11-14 12:42:47 +00003375 if (clo_priv_stacks)
3376 stack_tracker = & eraser_new_mem_stack_private;
3377 else
3378 stack_tracker = & eraser_new_mem_stack;
sewardjf6374322002-11-13 22:35:55 +00003379
fitzhardinge98abfc72003-12-16 02:05:15 +00003380 VG_(init_new_mem_stack) (stack_tracker);
3381 VG_(init_new_mem_stack_signal) (stack_tracker);
njn25e49d8e72002-09-23 09:36:25 +00003382}
3383
3384
njn7d9f94d2003-04-22 21:41:40 +00003385void SK_(fini)(Int exitcode)
njn25e49d8e72002-09-23 09:36:25 +00003386{
sewardjdac0a442002-11-13 22:08:40 +00003387 if (DEBUG_LOCK_TABLE) {
sewardj4bffb232002-11-13 21:46:34 +00003388 pp_all_LockSets();
sewardjdac0a442002-11-13 22:08:40 +00003389 pp_all_mutexes();
3390 }
sewardj4bffb232002-11-13 21:46:34 +00003391
3392 if (LOCKSET_SANITY)
3393 sanity_check_locksets("SK_(fini)");
3394
sewardjff2c9232002-11-13 21:44:39 +00003395 VG_(message)(Vg_UserMsg, "%u possible data races found; %u lock order problems",
3396 n_eraser_warnings, n_lockorder_warnings);
sewardjf6374322002-11-13 22:35:55 +00003397
3398 if (0)
3399 VG_(printf)("stk_ld:%u+stk_st:%u = %u nonstk_ld:%u+nonstk_st:%u = %u %u%%\n",
3400 stk_ld, stk_st, stk_ld + stk_st,
3401 nonstk_ld, nonstk_st, nonstk_ld + nonstk_st,
3402 ((stk_ld+stk_st)*100) / (stk_ld + stk_st + nonstk_ld + nonstk_st));
njn25e49d8e72002-09-23 09:36:25 +00003403}
3404
fitzhardinge98abfc72003-12-16 02:05:15 +00003405/* Uses a 1:1 mapping */
3406VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 1.0)
3407
njn25e49d8e72002-09-23 09:36:25 +00003408/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00003409/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00003410/*--------------------------------------------------------------------*/