blob: 206152ce6e57269698e5e40c01e7887e1b7f42e5 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Helgrind, a Valgrind skin for detecting
9 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
11 Copyright (C) 2000-2002 Nicholas Nethercote
12 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
33
34
35static UInt n_eraser_warnings = 0;
36
37
38/*------------------------------------------------------------*/
39/*--- Debug guff ---*/
40/*------------------------------------------------------------*/
41
42#define DEBUG_LOCK_TABLE 1 /* Print lock table at end */
43
44#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
45#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
46#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
47#define DEBUG_ACCESSES 0 /* Print reads, writes */
48#define DEBUG_MEM_LOCKSET_CHANGES 0
49 /* Print when an address's lockset
50 changes; only useful with
51 DEBUG_ACCESSES */
52
53#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
54
55/* heavyweight LockSet sanity checking:
56 0 == never
57 1 == after important ops
58 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
59 */
60#define LOCKSET_SANITY 0
61
62
63/*------------------------------------------------------------*/
64/*--- Crude profiling machinery. ---*/
65/*------------------------------------------------------------*/
66
67// PPP: work out if I want this
68
69#define PROF_EVENT(x)
70#if 0
71#ifdef VG_PROFILE_MEMORY
72
73#define N_PROF_EVENTS 150
74
75static UInt event_ctr[N_PROF_EVENTS];
76
77void VGE_(done_prof_mem) ( void )
78{
79 Int i;
80 for (i = 0; i < N_PROF_EVENTS; i++) {
81 if ((i % 10) == 0)
82 VG_(printf)("\n");
83 if (event_ctr[i] > 0)
84 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
85 }
86 VG_(printf)("\n");
87}
88
89#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +000090 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +000091 event_ctr[ev]++; \
92 } while (False);
93
94#else
95
96//static void init_prof_mem ( void ) { }
97// void VG_(done_prof_mem) ( void ) { }
98
99#define PROF_EVENT(ev) /* */
100
101#endif /* VG_PROFILE_MEMORY */
102
103/* Event index. If just the name of the fn is given, this means the
104 number of calls to the fn. Otherwise it is the specified event.
105
106 [PPP: snip event numbers...]
107*/
108#endif /* 0 */
109
110
111/*------------------------------------------------------------*/
112/*--- Data defns. ---*/
113/*------------------------------------------------------------*/
114
115typedef enum
116 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit }
117 VgeInitStatus;
118
119/* Should add up to 32 to fit in one word */
120#define OTHER_BITS 30
121#define STATE_BITS 2
122
123#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
124
125/* This is for indicating that a memory block has been initialised but not
126 * really directly by a particular thread... (eg. text/data initialised
127 * automatically at startup).
128 * Must be different to virgin_word.other */
129#define TID_INDICATING_NONVIRGIN 1
130
sewardj16748af2002-10-22 04:55:54 +0000131/* Magic TID used for error suppression; if word state is Excl and tid
132 is this, then it means all access are OK without changing state and
133 without raising any more errors */
134#define TID_INDICATING_ALL ((1 << OTHER_BITS) - 1)
135
njn25e49d8e72002-09-23 09:36:25 +0000136/* Number of entries must fit in STATE_BITS bits */
137typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
138
139typedef
140 struct {
141 UInt other:OTHER_BITS;
142 UInt state:STATE_BITS;
143 } shadow_word;
144
145typedef
146 struct {
147 shadow_word swords[ESEC_MAP_WORDS];
148 }
149 ESecMap;
150
151static ESecMap* primary_map[ 65536 ];
152static ESecMap distinguished_secondary_map;
153
154static shadow_word virgin_sword = { 0, Vge_Virgin };
155
156#define VGE_IS_DISTINGUISHED_SM(smap) \
157 ((smap) == &distinguished_secondary_map)
158
159#define ENSURE_MAPPABLE(addr,caller) \
160 do { \
161 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
162 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
163 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
164 } \
165 } while(0)
166
167
168/*------------------------------------------------------------*/
169/*--- Low-level support for memory tracking. ---*/
170/*------------------------------------------------------------*/
171
172/*
173 All reads and writes are recorded in the memory map, which
174 records the state of all memory in the process. The memory map is
175 organised like that for normal Valgrind, except each that everything
176 is done at word-level instead of byte-level, and each word has only
177 one word of shadow (instead of 36 bits).
178
179 As for normal Valgrind there is a distinguished secondary map. But we're
180 working at word-granularity, so it has 16k word entries instead of 64k byte
181 entries. Lookup is done as follows:
182
183 bits 31..16: primary map lookup
184 bits 15.. 2: secondary map lookup
185 bits 1.. 0: ignored
186*/
187
188
189/*------------------------------------------------------------*/
190/*--- Basic bitmap management, reading and writing. ---*/
191/*------------------------------------------------------------*/
192
193/* Allocate and initialise a secondary map, marking all words as virgin. */
194
195/* Just a value that isn't a real pointer */
196#define SEC_MAP_ACCESS (shadow_word*)0x99
197
198
199static
200ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
201{
202 ESecMap* map;
203 UInt i;
204 //PROF_EVENT(10); PPP
205
206 /* It just happens that a SecMap occupies exactly 18 pages --
207 although this isn't important, so the following assert is
208 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000209 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000210 map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller );
211
212 for (i = 0; i < ESEC_MAP_WORDS; i++)
213 map->swords[i] = virgin_sword;
214
215 return map;
216}
217
218
219/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
220 * word gets set. */
221static __inline__
222void set_sword ( Addr a, shadow_word sword )
223{
224 ESecMap* sm;
225
226 //PROF_EVENT(23); PPP
227 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
228
229 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
230 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000231 sk_assert(sm != &distinguished_secondary_map);
njn25e49d8e72002-09-23 09:36:25 +0000232 sm->swords[(a & 0xFFFC) >> 2] = sword;
233
234 if (VGE_IS_DISTINGUISHED_SM(sm)) {
235 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
236 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000237 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000238 }
239}
240
241
242static __inline__
243shadow_word* get_sword_addr ( Addr a )
244{
245 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
246 ESecMap* sm = primary_map[a >> 16];
247 UInt sm_off = (a & 0xFFFC) >> 2;
248
249 if (VGE_IS_DISTINGUISHED_SM(sm)) {
250 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
251 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000252 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000253 return SEC_MAP_ACCESS;
254 }
255
256 //PROF_EVENT(21); PPP
257 return & (sm->swords[sm_off]);
258}
259
260
261// SSS: rename these so they're not so similar to memcheck, unless it's
262// appropriate of course
263
264static __inline__
265void init_virgin_sword(Addr a)
266{
267 set_sword(a, virgin_sword);
268}
269
270
271/* 'a' is guaranteed to be 4-byte aligned here (not that that's important,
272 * really) */
273static
274void make_writable_aligned ( Addr a, UInt size )
275{
276 Addr a_past_end = a + size;
277
278 //PROF_EVENT(??) PPP
njne427a662002-10-02 11:08:25 +0000279 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000280
281 for ( ; a < a_past_end; a += 4) {
282 set_sword(a, virgin_sword);
283 }
284}
285
286static __inline__
287void init_nonvirgin_sword(Addr a)
288{
289 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000290 ThreadId tid = VG_(get_current_or_recent_tid)();
njn25e49d8e72002-09-23 09:36:25 +0000291
sewardjb52a1b02002-10-23 21:38:22 +0000292 sk_assert(tid != VG_INVALID_THREADID);
293 sword.other = tid;
njn25e49d8e72002-09-23 09:36:25 +0000294 sword.state = Vge_Excl;
295 set_sword(a, sword);
296}
297
298
299/* In this case, we treat it for Eraser's sake like virgin (it hasn't
300 * been inited by a particular thread, it's just done automatically upon
301 * startup), but we mark its .state specially so it doesn't look like an
302 * uninited read. */
303static __inline__
304void init_magically_inited_sword(Addr a)
305{
306 shadow_word sword;
307
sewardjb52a1b02002-10-23 21:38:22 +0000308 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
njn25e49d8e72002-09-23 09:36:25 +0000309 sword.other = TID_INDICATING_NONVIRGIN;
310 sword.state = Vge_Virgin;
311 set_sword(a, virgin_sword);
312}
313
sewardj274c6012002-10-22 04:54:55 +0000314/*------------------------------------------------------------*/
315/*--- Implementation of mutex structure. ---*/
316/*------------------------------------------------------------*/
317
318#define M_MUTEX_HASHSZ 1023
319
320typedef struct _LockSet LockSet; /* forward declaration */
321
sewardj16748af2002-10-22 04:55:54 +0000322typedef enum MutexState {
323 MxUnknown, /* don't know */
324 MxUnlocked, /* unlocked */
325 MxLocked, /* locked */
326 MxDead /* destroyed */
327} MutexState;
328
sewardj274c6012002-10-22 04:54:55 +0000329typedef struct hg_mutex {
330 void *mutexp;
sewardj274c6012002-10-22 04:54:55 +0000331 struct hg_mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000332
333 MutexState state; /* mutex state */
334 ThreadId tid; /* owner */
335 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000336} hg_mutex_t;
337
sewardj16748af2002-10-22 04:55:54 +0000338static void record_mutex_error(ThreadId tid, hg_mutex_t *mutex,
339 Char *str, ExeContext *ec);
340
sewardj274c6012002-10-22 04:54:55 +0000341static hg_mutex_t *mutex_hash[M_MUTEX_HASHSZ];
342
343static inline Int mutex_cmp(const hg_mutex_t *a, const hg_mutex_t *b)
344{
345 return (UInt)a->mutexp - (UInt)b->mutexp;
346}
347
348/* find or create an hg_mutex for a program's mutex use */
349static hg_mutex_t *get_mutex(void *mutexp)
350{
351 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
352 hg_mutex_t *mp;
353
354 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
355 if (mp->mutexp == mutexp)
356 return mp;
357
358 mp = VG_(malloc)(sizeof(*mp));
359 mp->mutexp = mutexp;
sewardj274c6012002-10-22 04:54:55 +0000360 mp->next = mutex_hash[bucket];
361 mutex_hash[bucket] = mp;
362
sewardj16748af2002-10-22 04:55:54 +0000363 mp->state = MxUnknown;
364 mp->tid = VG_INVALID_THREADID;
365 mp->location = NULL;
366
sewardj274c6012002-10-22 04:54:55 +0000367 return mp;
368}
njn25e49d8e72002-09-23 09:36:25 +0000369
sewardj16748af2002-10-22 04:55:54 +0000370static const char *pp_MutexState(MutexState st)
371{
372 switch(st) {
373 case MxLocked: return "Locked";
374 case MxUnlocked: return "Unlocked";
375 case MxDead: return "Dead";
376 case MxUnknown: return "Unknown";
377 }
378 return "???";
379}
380
381/* catch bad mutex state changes (though the common ones are handled
382 by core) */
383static void set_mutex_state(hg_mutex_t *mutex, MutexState state,
384 ThreadId tid, ThreadState *tst)
385{
386 if (0)
387 VG_(printf)("tid %d changing mutex (%p)->%p state %s -> %s\n",
388 tid, mutex, mutex->mutexp, pp_MutexState(mutex->state), pp_MutexState(state));
389
390 if (mutex->state == MxDead) {
391 /* can't do anything legal to a destroyed mutex */
392 record_mutex_error(tid, mutex,
393 "operate on dead mutex", mutex->location);
394 return;
395 }
396
397 switch(state) {
398 case MxLocked:
399 if (mutex->state == MxLocked && mutex->tid != tid)
400 record_mutex_error(tid, mutex, "take already held lock", mutex->location);
401 mutex->tid = tid;
402 break;
403
404 case MxUnlocked:
405 if (mutex->state != MxLocked) {
406 record_mutex_error(tid, mutex,
407 "unlock non-locked mutex", mutex->location);
408 }
409 if (mutex->tid != tid) {
410 record_mutex_error(tid, mutex,
411 "unlock someone else's mutex", mutex->location);
412 }
413 mutex->tid = VG_INVALID_THREADID;
414 break;
415
416 default:
417 break;
418 }
419
420 mutex->location = VG_(get_ExeContext)(tst);
421 mutex->state = state;
422}
423
njn25e49d8e72002-09-23 09:36:25 +0000424/*------------------------------------------------------------*/
425/*--- Implementation of lock sets. ---*/
426/*------------------------------------------------------------*/
427
428#define M_LOCKSET_TABLE 1000
429
sewardj274c6012002-10-22 04:54:55 +0000430struct _LockSet {
431 hg_mutex_t *mutex;
432 struct _LockSet* next;
433};
njn25e49d8e72002-09-23 09:36:25 +0000434
435
436/* Each one is an index into the lockset table. */
437static UInt thread_locks[VG_N_THREADS];
438
439/* # lockset table entries used. */
440static Int n_lockset_table = 1;
441
442/* lockset_table[0] is always NULL, representing the empty lockset */
443static LockSet* lockset_table[M_LOCKSET_TABLE];
444
445
446static __inline__
447Bool is_valid_lockset_id ( Int id )
448{
449 return id >= 0 && id < n_lockset_table;
450}
451
452
453static
454Int allocate_LockSet(LockSet* set)
455{
456 if (n_lockset_table >= M_LOCKSET_TABLE)
njne427a662002-10-02 11:08:25 +0000457 VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE");
njn25e49d8e72002-09-23 09:36:25 +0000458 lockset_table[n_lockset_table] = set;
459 n_lockset_table++;
460# if DEBUG_MEM_LOCKSET_CHANGES || DEBUG_NEW_LOCKSETS
461 VG_(printf)("allocate LOCKSET VECTOR %p to %d\n", set, n_lockset_table-1);
462# endif
463 return n_lockset_table-1;
464}
465
466
467static
468void pp_LockSet(LockSet* p)
469{
470 VG_(printf)("{ ");
471 while (p != NULL) {
472 VG_(printf)("%x ", p->mutex);
473 p = p->next;
474 }
475 VG_(printf)("}\n");
476}
477
478
479static __attribute__((unused))
480void pp_all_LockSets ( void )
481{
482 Int i;
483 for (i = 0; i < n_lockset_table; i++) {
484 VG_(printf)("[%d] = ", i);
485 pp_LockSet(lockset_table[i]);
486 }
487}
488
489
490static
491void free_LockSet(LockSet *p)
492{
493 LockSet* q;
494 while (NULL != p) {
495 q = p;
496 p = p->next;
497 VG_(free)(q);
498# if DEBUG_MEM_LOCKSET_CHANGES
499 VG_(printf)("free'd %x\n", q);
500# endif
501 }
502}
503
504
505static
506Bool structural_eq_LockSet(LockSet* a, LockSet* b)
507{
508 while (a && b) {
sewardj274c6012002-10-22 04:54:55 +0000509 if (mutex_cmp(a->mutex, b->mutex) != 0) {
njn25e49d8e72002-09-23 09:36:25 +0000510 return False;
511 }
512 a = a->next;
513 b = b->next;
514 }
515 return (NULL == a && NULL == b);
516}
517
518
519#if LOCKSET_SANITY
520/* Check invariants:
521 - all locksets are unique
522 - each set is a linked list in strictly increasing order of mutex addr
523*/
524static
525void sanity_check_locksets ( Char* caller )
526{
527 Int i, j, badness;
528 LockSet* v;
sewardj274c6012002-10-22 04:54:55 +0000529 hg_mutex_t mx_prev;
njn25e49d8e72002-09-23 09:36:25 +0000530
531 badness = 0;
532 i = j = -1;
533
534 //VG_(printf)("sanity %s\n", caller);
535 /* Check really simple things first */
536
537 if (n_lockset_table < 1 || n_lockset_table > M_LOCKSET_TABLE)
538 { badness = 1; goto baaad; }
539
540 if (lockset_table[0] != NULL)
541 { badness = 2; goto baaad; }
542
543 for (i = 1; i < n_lockset_table; i++)
544 if (lockset_table[i] == NULL)
545 { badness = 3; goto baaad; }
546
547 for (i = n_lockset_table; i < M_LOCKSET_TABLE; i++)
548 if (lockset_table[i] != NULL)
549 { badness = 4; goto baaad; }
550
551 /* Check the sanity of each individual set. */
552 for (i = 1; i < n_lockset_table; i++) {
553 v = lockset_table[i];
sewardj274c6012002-10-22 04:54:55 +0000554 mx_prev.mutexp = NULL;
njn25e49d8e72002-09-23 09:36:25 +0000555 while (True) {
556 if (v == NULL) break;
sewardj274c6012002-10-22 04:54:55 +0000557 if (mutex_cmp(&mx_prev, v->mutex) >= 0)
njn25e49d8e72002-09-23 09:36:25 +0000558 { badness = 5; goto baaad; }
sewardj274c6012002-10-22 04:54:55 +0000559 mx_prev = *v->mutex;
njn25e49d8e72002-09-23 09:36:25 +0000560 v = v->next;
561 }
562 }
563
564 /* Ensure the sets are unique, both structurally and in respect of
565 the address of their first nodes. */
566 for (i = 1; i < n_lockset_table; i++) {
567 for (j = i+1; j < n_lockset_table; j++) {
568 if (lockset_table[i] == lockset_table[j])
569 { badness = 6; goto baaad; }
570 if (structural_eq_LockSet(lockset_table[i], lockset_table[j]))
571 { badness = 7; goto baaad; }
572 }
573 }
574 return;
575
576 baaad:
577 VG_(printf)("sanity_check_locksets: "
578 "i = %d, j = %d, badness = %d, caller = %s\n",
579 i, j, badness, caller);
580 pp_all_LockSets();
njne427a662002-10-02 11:08:25 +0000581 VG_(skin_panic)("sanity_check_locksets");
njn25e49d8e72002-09-23 09:36:25 +0000582}
583#endif /* LOCKSET_SANITY */
584
585
586/* Builds ia with mx removed. mx should actually be in ia!
587 (a checked assertion). Resulting set should not already
588 exist in the table (unchecked).
589*/
590static
sewardj274c6012002-10-22 04:54:55 +0000591UInt remove ( UInt ia, hg_mutex_t *mx )
njn25e49d8e72002-09-23 09:36:25 +0000592{
593 Int found, res;
594 LockSet* new_vector = NULL;
595 LockSet* new_node;
596 LockSet** prev_ptr = &new_vector;
597 LockSet* a = lockset_table[ia];
njne427a662002-10-02 11:08:25 +0000598 sk_assert(is_valid_lockset_id(ia));
njn25e49d8e72002-09-23 09:36:25 +0000599
600# if DEBUG_MEM_LOCKSET_CHANGES
sewardj274c6012002-10-22 04:54:55 +0000601 VG_(printf)("Removing from %d mutex %p:\n", ia, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000602# endif
603
604# if DEBUG_MEM_LOCKSET_CHANGES
605 print_LockSet(a);
606# endif
607
608# if LOCKSET_SANITY
609 sanity_check_locksets("remove-IN");
610# endif
611
612 /* Build the intersection of the two lists */
613 found = 0;
614 while (a) {
sewardj274c6012002-10-22 04:54:55 +0000615 if (mutex_cmp(a->mutex, mx) != 0) {
njn25e49d8e72002-09-23 09:36:25 +0000616 new_node = VG_(malloc)(sizeof(LockSet));
617# if DEBUG_MEM_LOCKSET_CHANGES
618 VG_(printf)("malloc'd %x\n", new_node);
619# endif
620 new_node->mutex = a->mutex;
621 *prev_ptr = new_node;
622 prev_ptr = &((*prev_ptr)->next);
623 a = a->next;
624 } else {
625 found++;
626 }
627 *prev_ptr = NULL;
628 }
njne427a662002-10-02 11:08:25 +0000629 sk_assert(found == 1 /* sigh .. if the client is buggy */ || found == 0 );
njn25e49d8e72002-09-23 09:36:25 +0000630
631 /* Preserve uniqueness invariants in face of client buggyness */
632 if (found == 0) {
633 free_LockSet(new_vector);
634 return ia;
635 }
636
637 /* Add to the table. */
638 res = allocate_LockSet(new_vector);
639
640# if LOCKSET_SANITY
641 sanity_check_locksets("remove-OUT");
642# endif
643
644 return res;
645}
646
647
648/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
649 * doesn't do the insertion. Returns True if they match.
650 */
651static Bool
652weird_LockSet_equals(LockSet* a, LockSet* b,
sewardj274c6012002-10-22 04:54:55 +0000653 hg_mutex_t *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000654{
655 /* Idea is to try and match each element of b against either an
656 element of a, or missing_mutex. */
657 while (True) {
658 if (b == NULL)
659 break;
660 /* deal with missing already being in a */
sewardj274c6012002-10-22 04:54:55 +0000661 if (a && mutex_cmp(a->mutex, missing_mutex) == 0)
njn25e49d8e72002-09-23 09:36:25 +0000662 a = a->next;
663 /* match current b element either against a or missing */
sewardj274c6012002-10-22 04:54:55 +0000664 if (mutex_cmp(b->mutex, missing_mutex) == 0) {
njn25e49d8e72002-09-23 09:36:25 +0000665 b = b->next;
666 continue;
667 }
668 /* wasn't == missing, so have to match from a, or fail */
sewardj274c6012002-10-22 04:54:55 +0000669 if (a && mutex_cmp(b->mutex, a->mutex) == 0) {
njn25e49d8e72002-09-23 09:36:25 +0000670 a = a->next;
671 b = b->next;
672 continue;
673 }
674 break;
675 }
676 return (b==NULL ? True : False);
677}
678
679
680/* Builds the intersection, and then unbuilds it if it's already in the table.
681 */
682static UInt intersect(UInt ia, UInt ib)
683{
684 Int i;
685 LockSet* a = lockset_table[ia];
686 LockSet* b = lockset_table[ib];
687 LockSet* new_vector = NULL;
688 LockSet* new_node;
689 LockSet** prev_ptr = &new_vector;
690
691# if DEBUG_MEM_LOCKSET_CHANGES
692 VG_(printf)("Intersecting %d %d:\n", ia, ib);
693# endif
694
695# if LOCKSET_SANITY
696 sanity_check_locksets("intersect-IN");
697# endif
698
699 /* Fast case -- when the two are the same */
700 if (ia == ib) {
701# if DEBUG_MEM_LOCKSET_CHANGES
702 VG_(printf)("Fast case -- both the same: %u\n", ia);
703 print_LockSet(a);
704# endif
705 return ia;
706 }
707
708# if DEBUG_MEM_LOCKSET_CHANGES
709 print_LockSet(a);
710 print_LockSet(b);
711# endif
712
713 /* Build the intersection of the two lists */
714 while (a && b) {
sewardj274c6012002-10-22 04:54:55 +0000715 if (mutex_cmp(a->mutex, b->mutex) == 0) {
njn25e49d8e72002-09-23 09:36:25 +0000716 new_node = VG_(malloc)(sizeof(LockSet));
717# if DEBUG_MEM_LOCKSET_CHANGES
718 VG_(printf)("malloc'd %x\n", new_node);
719# endif
720 new_node->mutex = a->mutex;
721 *prev_ptr = new_node;
722 prev_ptr = &((*prev_ptr)->next);
723 a = a->next;
724 b = b->next;
sewardj274c6012002-10-22 04:54:55 +0000725 } else if (mutex_cmp(a->mutex, b->mutex) < 0) {
njn25e49d8e72002-09-23 09:36:25 +0000726 a = a->next;
sewardj274c6012002-10-22 04:54:55 +0000727 } else if (mutex_cmp(a->mutex, b->mutex) > 0) {
njn25e49d8e72002-09-23 09:36:25 +0000728 b = b->next;
njne427a662002-10-02 11:08:25 +0000729 } else VG_(skin_panic)("STOP PRESS: Laws of arithmetic broken");
njn25e49d8e72002-09-23 09:36:25 +0000730
731 *prev_ptr = NULL;
732 }
733
734 /* Now search for it in the table, adding it if not seen before */
735 for (i = 0; i < n_lockset_table; i++) {
736 if (structural_eq_LockSet(lockset_table[i], new_vector))
737 break;
738 }
739
740 if (i == n_lockset_table) {
741 i = allocate_LockSet(new_vector);
742 } else {
743 free_LockSet(new_vector);
744 }
745
746 /* Check we won't overflow the OTHER_BITS bits of sword->other */
njne427a662002-10-02 11:08:25 +0000747 sk_assert(i < (1 << OTHER_BITS));
njn25e49d8e72002-09-23 09:36:25 +0000748
749# if LOCKSET_SANITY
750 sanity_check_locksets("intersect-OUT");
751# endif
752
753 return i;
754}
755
756
757/*------------------------------------------------------------*/
758/*--- Setting and checking permissions. ---*/
759/*------------------------------------------------------------*/
760
761static
762void set_address_range_state ( Addr a, UInt len /* in bytes */,
763 VgeInitStatus status )
764{
sewardj1806d7f2002-10-22 05:05:49 +0000765 Addr end;
njn25e49d8e72002-09-23 09:36:25 +0000766
767# if DEBUG_MAKE_ACCESSES
768 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
769# endif
770 //PROF_EVENT(30); PPP
771
772 if (len == 0)
773 return;
774
775 if (len > 100 * 1000 * 1000)
776 VG_(message)(Vg_UserMsg,
777 "Warning: set address range state: large range %d",
778 len);
779
780 VGP_PUSHCC(VgpSARP);
781
782 /* Memory block may not be aligned or a whole word multiple. In neat cases,
783 * we have to init len/4 words (len is in bytes). In nasty cases, it's
784 * len/4+1 words. This works out which it is by aligning the block and
785 * seeing if the end byte is in the same word as it is for the unaligned
786 * block; if not, it's the awkward case. */
sewardj1806d7f2002-10-22 05:05:49 +0000787 end = (a + len + 3) & ~3; /* round up */
788 a &= ~3; /* round down */
njn25e49d8e72002-09-23 09:36:25 +0000789
790 /* Do it ... */
791 switch (status) {
792 case Vge_VirginInit:
793 for ( ; a < end; a += 4) {
794 //PROF_EVENT(31); PPP
795 init_virgin_sword(a);
796 }
797 break;
798
799 case Vge_NonVirginInit:
800 for ( ; a < end; a += 4) {
801 //PROF_EVENT(31); PPP
802 init_nonvirgin_sword(a);
803 }
804 break;
805
806 case Vge_SegmentInit:
807 for ( ; a < end; a += 4) {
808 //PROF_EVENT(31); PPP
809 init_magically_inited_sword(a);
810 }
811 break;
812
813 default:
814 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +0000815 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +0000816 }
817
818 /* Check that zero page and highest page have not been written to
819 -- this could happen with buggy syscall wrappers. Today
820 (2001-04-26) had precisely such a problem with
821 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000822 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000823 VGP_POPCC(VgpSARP);
824}
825
826
827static void make_segment_readable ( Addr a, UInt len )
828{
829 //PROF_EVENT(??); PPP
830 set_address_range_state ( a, len, Vge_SegmentInit );
831}
832
833static void make_writable ( Addr a, UInt len )
834{
835 //PROF_EVENT(36); PPP
836 set_address_range_state( a, len, Vge_VirginInit );
837}
838
839static void make_readable ( Addr a, UInt len )
840{
841 //PROF_EVENT(37); PPP
842 set_address_range_state( a, len, Vge_NonVirginInit );
843}
844
845
njn25e49d8e72002-09-23 09:36:25 +0000846/* Block-copy states (needed for implementing realloc()). */
847static void copy_address_range_state(Addr src, Addr dst, UInt len)
848{
849 UInt i;
850
851 //PROF_EVENT(40); PPP
852 for (i = 0; i < len; i += 4) {
853 shadow_word sword = *(get_sword_addr ( src+i ));
854 //PROF_EVENT(41); PPP
855 set_sword ( dst+i, sword );
856 }
857}
858
859// SSS: put these somewhere better
sewardj0f811692002-10-22 04:59:26 +0000860static void eraser_mem_read (Addr a, UInt data_size, ThreadState *tst);
861static void eraser_mem_write(Addr a, UInt data_size, ThreadState *tst);
sewardja5b3aec2002-10-22 05:09:36 +0000862
863#define REGPARM(x) __attribute__((regparm (x)))
864
865static void eraser_mem_help_read_1(Addr a) REGPARM(1);
866static void eraser_mem_help_read_2(Addr a) REGPARM(1);
867static void eraser_mem_help_read_4(Addr a) REGPARM(1);
868static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
869
870static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
871static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
872static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
873static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +0000874
875static
876void eraser_pre_mem_read(CorePart part, ThreadState* tst,
877 Char* s, UInt base, UInt size )
878{
sewardj0f811692002-10-22 04:59:26 +0000879 eraser_mem_read(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +0000880}
881
882static
883void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst,
884 Char* s, UInt base )
885{
sewardj0f811692002-10-22 04:59:26 +0000886 eraser_mem_read(base, VG_(strlen)((Char*)base), tst);
njn25e49d8e72002-09-23 09:36:25 +0000887}
888
889static
890void eraser_pre_mem_write(CorePart part, ThreadState* tst,
891 Char* s, UInt base, UInt size )
892{
sewardj0f811692002-10-22 04:59:26 +0000893 eraser_mem_write(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +0000894}
895
896
897
898static
899void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
900{
njn1f3a9092002-10-04 09:22:30 +0000901 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +0000902 make_segment_readable(a, len);
903}
904
905
906static
907void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
908{
909 if (is_inited) {
910 make_readable(a, len);
911 } else {
912 make_writable(a, len);
913 }
914}
915
916static
917void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +0000918 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +0000919{
920 if (rr) make_readable(a, len);
921 else if (ww) make_writable(a, len);
922 /* else do nothing */
923}
924
925
926/*--------------------------------------------------------------*/
927/*--- Initialise the memory audit system on program startup. ---*/
928/*--------------------------------------------------------------*/
929
930static
931void init_shadow_memory(void)
932{
933 Int i;
934
935 for (i = 0; i < ESEC_MAP_WORDS; i++)
936 distinguished_secondary_map.swords[i] = virgin_sword;
937
938 /* These entries gradually get overwritten as the used address
939 space expands. */
940 for (i = 0; i < 65536; i++)
941 primary_map[i] = &distinguished_secondary_map;
942}
943
944
945/*--------------------------------------------------------------*/
946/*--- Machinery to support sanity checking ---*/
947/*--------------------------------------------------------------*/
948
949/* Check that nobody has spuriously claimed that the first or last 16
950 pages (64 KB) of address space have become accessible. Failure of
951 the following do not per se indicate an internal consistency
952 problem, but they are so likely to that we really want to know
953 about it if so. */
954
955Bool SK_(cheap_sanity_check) ( void )
956{
957 if (VGE_IS_DISTINGUISHED_SM(primary_map[0]) &&
958 VGE_IS_DISTINGUISHED_SM(primary_map[65535]))
959 return True;
960 else
961 return False;
962}
963
964
965Bool SK_(expensive_sanity_check)(void)
966{
967 Int i;
968
969 /* Make sure nobody changed the distinguished secondary. */
970 for (i = 0; i < ESEC_MAP_WORDS; i++)
971 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
972 distinguished_secondary_map.swords[i].state != virgin_sword.state)
973 return False;
974
975 return True;
976}
977
978
979/*--------------------------------------------------------------*/
980/*--- Instrumentation ---*/
981/*--------------------------------------------------------------*/
982
njn25e49d8e72002-09-23 09:36:25 +0000983/* Create and return an instrumented version of cb_in. Free cb_in
984 before returning. */
985UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
986{
987 UCodeBlock* cb;
988 Int i;
989 UInstr* u_in;
990 Int t_size = INVALID_TEMPREG;
991
njn4ba5a792002-09-30 10:23:54 +0000992 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +0000993 cb->nextTemp = cb_in->nextTemp;
994
995 for (i = 0; i < cb_in->used; i++) {
996 u_in = &cb_in->instrs[i];
997
njn25e49d8e72002-09-23 09:36:25 +0000998 switch (u_in->opcode) {
999
1000 case NOP: case CALLM_S: case CALLM_E:
1001 break;
1002
sewardja5b3aec2002-10-22 05:09:36 +00001003 case LOAD: {
1004 void (*help)(Addr);
1005 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
1006
1007 switch(u_in->size) {
1008 case 1: help = eraser_mem_help_read_1; break;
1009 case 2: help = eraser_mem_help_read_2; break;
1010 case 4: help = eraser_mem_help_read_4; break;
1011 default:
1012 VG_(skin_panic)("bad size");
1013 }
1014
1015 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
1016 uCCall(cb, (Addr)help, 1, 1, False);
njn25e49d8e72002-09-23 09:36:25 +00001017
sewardja5b3aec2002-10-22 05:09:36 +00001018 VG_(copy_UInstr)(cb, u_in);
1019 t_size = INVALID_TEMPREG;
1020 break;
1021 }
1022
1023 case FPU_R: {
njne427a662002-10-02 11:08:25 +00001024 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00001025 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00001026
1027 t_size = newTemp(cb);
1028 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1029 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00001030
sewardja5b3aec2002-10-22 05:09:36 +00001031 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
1032 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
njn25e49d8e72002-09-23 09:36:25 +00001033
sewardja5b3aec2002-10-22 05:09:36 +00001034 VG_(copy_UInstr)(cb, u_in);
1035 t_size = INVALID_TEMPREG;
1036 break;
1037 }
1038
1039 case STORE: {
1040 void (*help)(Addr, UInt);
1041 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
1042
1043 switch(u_in->size) {
1044 case 1: help = eraser_mem_help_write_1; break;
1045 case 2: help = eraser_mem_help_write_2; break;
1046 case 4: help = eraser_mem_help_write_4; break;
1047 default:
1048 VG_(skin_panic)("bad size");
1049 }
1050
1051 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
1052 uCCall(cb, (Addr)help, 2, 2, False);
1053
1054 VG_(copy_UInstr)(cb, u_in);
1055 t_size = INVALID_TEMPREG;
1056 break;
1057 }
1058
1059 case FPU_W: {
njne427a662002-10-02 11:08:25 +00001060 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00001061 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00001062
1063 t_size = newTemp(cb);
1064 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1065 uLiteral(cb, (UInt)u_in->size);
1066 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
1067 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
1068
1069 VG_(copy_UInstr)(cb, u_in);
1070 t_size = INVALID_TEMPREG;
1071 break;
1072 }
njn25e49d8e72002-09-23 09:36:25 +00001073
1074 default:
njn4ba5a792002-09-30 10:23:54 +00001075 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001076 break;
1077 }
1078 }
1079
njn4ba5a792002-09-30 10:23:54 +00001080 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001081 return cb;
1082}
1083
1084
sewardj16748af2002-10-22 04:55:54 +00001085/*------------------------------------------------------------*/
1086/*--- Shadow chunks info ---*/
1087/*------------------------------------------------------------*/
1088
1089#define SHADOW_EXTRA 2
1090
1091static __inline__
1092void set_sc_where( ShadowChunk* sc, ExeContext* ec )
1093{
1094 sc->skin_extra[0] = (UInt)ec;
1095}
1096
1097static __inline__
1098ExeContext *get_sc_where( ShadowChunk* sc )
1099{
1100 return (ExeContext*)sc->skin_extra[0];
1101}
1102
1103static __inline__
1104void set_sc_tid(ShadowChunk *sc, ThreadId tid)
1105{
1106 sc->skin_extra[1] = (UInt)tid;
1107}
1108
1109static __inline__
1110ThreadId get_sc_tid(ShadowChunk *sc)
1111{
1112 return (ThreadId)sc->skin_extra[1];
1113}
1114
1115void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1116{
1117 set_sc_where( sc, VG_(get_ExeContext) ( tst ) );
1118 set_sc_tid(sc, VG_(get_tid_from_ThreadState(tst)));
1119}
1120
njn25e49d8e72002-09-23 09:36:25 +00001121/*--------------------------------------------------------------------*/
1122/*--- Error and suppression handling ---*/
1123/*--------------------------------------------------------------------*/
1124
1125typedef
1126 enum {
1127 /* Possible data race */
1128 EraserSupp
1129 }
1130 EraserSuppKind;
1131
1132/* What kind of error it is. */
1133typedef
1134 enum {
sewardj16748af2002-10-22 04:55:54 +00001135 EraserErr, /* data-race */
1136 MutexErr, /* mutex operations */
njn25e49d8e72002-09-23 09:36:25 +00001137 }
1138 EraserErrorKind;
1139
sewardj16748af2002-10-22 04:55:54 +00001140/* The classification of a faulting address. */
1141typedef
1142 enum { Undescribed, /* as-yet unclassified */
1143 Stack,
1144 Unknown, /* classification yielded nothing useful */
1145 Mallocd,
1146 Segment
1147 }
1148 AddrKind;
1149/* Records info about a faulting address. */
1150typedef
1151 struct {
1152 /* ALL */
1153 AddrKind akind;
1154 /* Freed, Mallocd */
1155 Int blksize;
1156 /* Freed, Mallocd */
1157 Int rwoffset;
1158 /* Freed, Mallocd */
1159 ExeContext* lastchange;
1160 ThreadId lasttid;
1161 /* Stack */
1162 ThreadId stack_tid;
1163 /* Segment */
1164 const Char* filename;
1165 const Char* section;
1166 /* True if is just-below %esp -- could be a gcc bug. */
1167 Bool maybe_gcc;
1168 }
1169 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00001170
sewardj16748af2002-10-22 04:55:54 +00001171/* What kind of memory access is involved in the error? */
1172typedef
1173 enum { ReadAxs, WriteAxs, ExecAxs }
1174 AxsKind;
1175
1176/* Extra context for memory errors */
1177typedef
1178 struct {
1179 AxsKind axskind;
1180 Int size;
1181 AddrInfo addrinfo;
1182 Bool isWrite;
1183 shadow_word prevstate;
1184 /* MutexErr */
1185 hg_mutex_t *mutex;
1186 ExeContext *lasttouched;
1187 ThreadId lasttid;
1188 }
1189 HelgrindError;
1190
1191static __inline__
1192void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00001193{
sewardj16748af2002-10-22 04:55:54 +00001194 ai->akind = Unknown;
1195 ai->blksize = 0;
1196 ai->rwoffset = 0;
1197 ai->lastchange = NULL;
1198 ai->lasttid = VG_INVALID_THREADID;
1199 ai->filename = NULL;
1200 ai->section = "???";
1201 ai->stack_tid = VG_INVALID_THREADID;
1202 ai->maybe_gcc = False;
njn25e49d8e72002-09-23 09:36:25 +00001203}
1204
sewardj16748af2002-10-22 04:55:54 +00001205static __inline__
1206void clear_HelgrindError ( HelgrindError* err_extra )
1207{
1208 err_extra->axskind = ReadAxs;
1209 err_extra->size = 0;
1210 err_extra->mutex = NULL;
1211 err_extra->lasttouched= NULL;
1212 err_extra->lasttid = VG_INVALID_THREADID;
1213 err_extra->prevstate.state = Vge_Virgin;
1214 err_extra->prevstate.other = 0;
1215 clear_AddrInfo ( &err_extra->addrinfo );
1216 err_extra->isWrite = False;
1217}
1218
1219
1220
1221/* Describe an address as best you can, for error messages,
1222 putting the result in ai. */
1223
1224static void describe_addr ( Addr a, AddrInfo* ai )
1225{
1226 ShadowChunk* sc;
1227
1228 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
1229
1230 /* Closure for searching thread stacks */
1231 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
1232 {
1233 return (stack_min <= a && a <= stack_max);
1234 }
1235 /* Closure for searching malloc'd and free'd lists */
1236 Bool addr_is_in_block(ShadowChunk *sh_ch)
1237 {
1238 return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size );
1239 }
1240
1241 /* Search for it in segments */
1242 {
1243 const SegInfo *seg;
1244
1245 for(seg = VG_(next_seginfo)(NULL);
1246 seg != NULL;
1247 seg = VG_(next_seginfo)(seg)) {
1248 Addr base = VG_(seg_start)(seg);
1249 UInt size = VG_(seg_size)(seg);
1250 const UChar *filename = VG_(seg_filename)(seg);
1251
1252 if (a >= base && a < base+size) {
1253 ai->akind = Segment;
1254 ai->blksize = size;
1255 ai->rwoffset = a - base;
1256 ai->filename = filename;
1257
1258 switch(VG_(seg_sect_kind)(a)) {
1259 case Vg_SectText: ai->section = "text"; break;
1260 case Vg_SectData: ai->section = "data"; break;
1261 case Vg_SectBSS: ai->section = "BSS"; break;
1262 case Vg_SectGOT: ai->section = "GOT"; break;
1263 case Vg_SectPLT: ai->section = "PLT"; break;
1264 case Vg_SectUnknown:
1265 default:
1266 ai->section = "???"; break;
1267 }
1268
1269 return;
1270 }
1271 }
1272 }
1273
1274 /* Search for a currently malloc'd block which might bracket it. */
1275 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
1276 if (NULL != sc) {
1277 ai->akind = Mallocd;
1278 ai->blksize = sc->size;
1279 ai->rwoffset = (Int)(a) - (Int)(sc->data);
1280 ai->lastchange = get_sc_where(sc);
1281 ai->lasttid = get_sc_tid(sc);
1282 return;
1283 }
1284 /* Clueless ... */
1285 ai->akind = Unknown;
1286 return;
1287}
1288
1289
1290/* Creates a copy of the err_extra, updates the copy with address info if
1291 necessary, sticks the copy into the SkinError. */
1292void SK_(dup_extra_and_update)(SkinError* err)
1293{
1294 HelgrindError* err_extra;
1295
1296 err_extra = VG_(malloc)(sizeof(HelgrindError));
1297 *err_extra = *((HelgrindError*)err->extra);
1298
1299 if (err_extra->addrinfo.akind == Undescribed)
1300 describe_addr ( err->addr, &(err_extra->addrinfo) );
1301
1302 err->extra = err_extra;
1303}
1304
sewardj0f811692002-10-22 04:59:26 +00001305static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write,
1306 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00001307{
1308 HelgrindError err_extra;
sewardj1806d7f2002-10-22 05:05:49 +00001309 static const shadow_word err_sw = { TID_INDICATING_ALL, Vge_Excl };
sewardj16748af2002-10-22 04:55:54 +00001310
1311 clear_HelgrindError(&err_extra);
1312 err_extra.isWrite = is_write;
1313 err_extra.addrinfo.akind = Undescribed;
1314 err_extra.prevstate = prevstate;
1315
sewardj0f811692002-10-22 04:59:26 +00001316 VG_(maybe_record_error)( tst, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00001317 (is_write ? "writing" : "reading"),
1318 &err_extra);
1319
sewardj1806d7f2002-10-22 05:05:49 +00001320 set_sword(a, err_sw);
sewardj16748af2002-10-22 04:55:54 +00001321}
1322
1323static void record_mutex_error(ThreadId tid, hg_mutex_t *mutex,
1324 Char *str, ExeContext *ec)
1325{
1326 HelgrindError err_extra;
1327
1328 clear_HelgrindError(&err_extra);
1329 err_extra.addrinfo.akind = Undescribed;
1330 err_extra.mutex = mutex;
1331 err_extra.lasttouched = ec;
1332 err_extra.lasttid = tid;
1333
1334 VG_(maybe_record_error)(VG_(get_ThreadState)(tid), MutexErr,
1335 (Addr)mutex->mutexp, str, &err_extra);
1336}
njn25e49d8e72002-09-23 09:36:25 +00001337
1338Bool SK_(eq_SkinError) ( VgRes not_used,
1339 SkinError* e1, SkinError* e2 )
1340{
sewardj16748af2002-10-22 04:55:54 +00001341 sk_assert(e1->ekind == e2->ekind);
1342
1343 switch(e1->ekind) {
1344 case EraserErr:
1345 return e1->addr == e2->addr;
1346
1347 case MutexErr:
1348 return e1->addr == e2->addr;
1349 }
1350
njn25e49d8e72002-09-23 09:36:25 +00001351 if (e1->string != e2->string) return False;
1352 if (0 != VG_(strcmp)(e1->string, e2->string)) return False;
1353 return True;
1354}
1355
sewardj16748af2002-10-22 04:55:54 +00001356static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00001357{
sewardj16748af2002-10-22 04:55:54 +00001358 switch (ai->akind) {
1359 case Stack:
1360 VG_(message)(Vg_UserMsg,
1361 " Address %p is on thread %d's stack",
1362 a, ai->stack_tid);
1363 break;
1364 case Unknown:
1365 if (ai->maybe_gcc) {
1366 VG_(message)(Vg_UserMsg,
1367 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
1368 a);
1369 VG_(message)(Vg_UserMsg,
1370 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
1371 } else {
1372 VG_(message)(Vg_UserMsg,
1373 " Address %p is not stack'd, malloc'd or free'd", a);
1374 }
1375 break;
1376 case Segment:
1377 VG_(message)(Vg_UserMsg,
1378 " Address %p is in %s section of %s",
1379 a, ai->section, ai->filename);
1380 break;
1381 case Mallocd: {
1382 UInt delta;
1383 UChar* relative;
1384 if (ai->rwoffset < 0) {
1385 delta = (UInt)(- ai->rwoffset);
1386 relative = "before";
1387 } else if (ai->rwoffset >= ai->blksize) {
1388 delta = ai->rwoffset - ai->blksize;
1389 relative = "after";
1390 } else {
1391 delta = ai->rwoffset;
1392 relative = "inside";
1393 }
1394 VG_(message)(Vg_UserMsg,
1395 " Address %p is %d bytes %s a block of size %d alloc'd by thread %d at",
1396 a, delta, relative,
1397 ai->blksize,
1398 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00001399
sewardj16748af2002-10-22 04:55:54 +00001400 VG_(pp_ExeContext)(ai->lastchange);
1401 break;
1402 }
1403 default:
1404 VG_(skin_panic)("pp_AddrInfo");
1405 }
njn25e49d8e72002-09-23 09:36:25 +00001406}
1407
1408
sewardj16748af2002-10-22 04:55:54 +00001409void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
njn25e49d8e72002-09-23 09:36:25 +00001410{
sewardj16748af2002-10-22 04:55:54 +00001411 HelgrindError *extra = (HelgrindError *)err->extra;
1412 Char buf[100];
1413 Char *msg = buf;
1414
1415 *msg = '\0';
1416
1417 switch(err->ekind) {
1418 case EraserErr:
1419 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
1420 err->string, err->addr, err->addr );
1421 pp_ExeContext();
1422
1423 switch(extra->prevstate.state) {
1424 case Vge_Virgin:
1425 /* shouldn't be possible to go directly from virgin -> error */
1426 VG_(sprintf)(buf, "virgin!?");
1427 break;
1428
1429 case Vge_Excl:
1430 sk_assert(extra->prevstate.other != TID_INDICATING_ALL);
1431 VG_(sprintf)(buf, "exclusively owned by thread %d", extra->prevstate.other);
1432 break;
1433
1434 case Vge_Shar:
1435 case Vge_SharMod: {
1436 LockSet *ls;
1437 UInt count;
1438 Char *cp;
1439
1440 if (lockset_table[extra->prevstate.other] == NULL) {
1441 VG_(sprintf)(buf, "shared %s, no locks",
1442 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
1443 break;
1444 }
1445
1446 for(count = 0, ls = lockset_table[extra->prevstate.other]; ls != NULL; ls = ls->next)
1447 count++;
1448 msg = VG_(malloc)(25 + (120 * count));
1449
1450 cp = msg;
1451 cp += VG_(sprintf)(cp, "shared %s, locked by: ",
1452 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
1453 for(ls = lockset_table[extra->prevstate.other]; ls != NULL; ls = ls->next)
1454 cp += VG_(sprintf)(cp, "%p%(y, ", ls->mutex->mutexp, ls->mutex->mutexp);
1455 cp[-2] = '\0';
1456 break;
1457 }
1458 }
1459
1460 if (*msg) {
1461 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
1462 if (msg != buf)
1463 VG_(free)(msg);
1464 }
1465 pp_AddrInfo(err->addr, &extra->addrinfo);
1466 break;
1467
1468 case MutexErr:
1469 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s at",
1470 err->addr, err->addr, err->string );
1471 pp_ExeContext();
1472 if (extra->lasttouched) {
1473 VG_(message)(Vg_UserMsg, " last touched by thread %d at", extra->lasttid);
1474 VG_(pp_ExeContext)(extra->lasttouched);
1475 }
1476 pp_AddrInfo(err->addr, &extra->addrinfo);
1477 break;
1478 }
njn25e49d8e72002-09-23 09:36:25 +00001479}
1480
1481
1482Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
1483{
1484 if (0 == VG_(strcmp)(name, "Eraser")) {
1485 *skind = EraserSupp;
1486 return True;
1487 } else {
1488 return False;
1489 }
1490}
1491
1492
1493Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf,
1494 Int nBuf, SkinSupp* s )
1495{
1496 /* do nothing -- no extra suppression info present. Return True to
1497 indicate nothing bad happened. */
1498 return True;
1499}
1500
1501
1502Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
1503{
njne427a662002-10-02 11:08:25 +00001504 sk_assert( su->skind == EraserSupp);
1505 sk_assert(err->ekind == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00001506 return True;
1507}
1508
1509
1510// SSS: copying mutex's pointer... is that ok? Could they get deallocated?
1511// (does that make sense, deallocating a mutex?)
1512static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
1513{
1514 Int i = 1;
1515 LockSet* new_node;
1516 LockSet* p;
1517 LockSet** q;
sewardj274c6012002-10-22 04:54:55 +00001518 hg_mutex_t *mutex = get_mutex(void_mutex);
njn25e49d8e72002-09-23 09:36:25 +00001519
sewardj16748af2002-10-22 04:55:54 +00001520 set_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
1521
njn25e49d8e72002-09-23 09:36:25 +00001522# if DEBUG_LOCKS
sewardj274c6012002-10-22 04:54:55 +00001523 VG_(printf)("lock (%u, %x)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00001524# endif
1525
njne427a662002-10-02 11:08:25 +00001526 sk_assert(tid < VG_N_THREADS &&
njn25e49d8e72002-09-23 09:36:25 +00001527 thread_locks[tid] < M_LOCKSET_TABLE);
1528 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
1529# if LOCKSET_SANITY > 1
1530 sanity_check_locksets("eraser_post_mutex_lock-IN");
1531# endif
1532
1533 while (True) {
1534 if (i == M_LOCKSET_TABLE)
njne427a662002-10-02 11:08:25 +00001535 VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE");
njn25e49d8e72002-09-23 09:36:25 +00001536
1537 /* the lockset didn't already exist */
1538 if (i == n_lockset_table) {
1539
1540 p = lockset_table[thread_locks[tid]];
1541 q = &lockset_table[i];
1542
1543 /* copy the thread's lockset, creating a new list */
1544 while (p != NULL) {
1545 new_node = VG_(malloc)(sizeof(LockSet));
1546 new_node->mutex = p->mutex;
1547 *q = new_node;
1548 q = &((*q)->next);
1549 p = p->next;
1550 }
1551 (*q) = NULL;
1552
1553 /* find spot for the new mutex in the new list */
1554 p = lockset_table[i];
1555 q = &lockset_table[i];
sewardj274c6012002-10-22 04:54:55 +00001556 while (NULL != p && mutex_cmp(mutex, p->mutex) > 0) {
njn25e49d8e72002-09-23 09:36:25 +00001557 p = p->next;
1558 q = &((*q)->next);
1559 }
1560
1561 /* insert new mutex in new list */
1562 new_node = VG_(malloc)(sizeof(LockSet));
1563 new_node->mutex = mutex;
1564 new_node->next = p;
1565 (*q) = new_node;
1566
1567 p = lockset_table[i];
njne427a662002-10-02 11:08:25 +00001568 sk_assert(i == n_lockset_table);
njn25e49d8e72002-09-23 09:36:25 +00001569 n_lockset_table++;
1570
1571# if DEBUG_NEW_LOCKSETS
1572 VG_(printf)("new lockset vector (%d): ", i);
1573 print_LockSet(p);
1574# endif
1575
1576 goto done;
1577
1578 } else {
1579 /* If this succeeds, the required vector (with the new mutex added)
1580 * already exists in the table at position i. Otherwise, keep
1581 * looking. */
1582 if (weird_LockSet_equals(lockset_table[thread_locks[tid]],
1583 lockset_table[i], mutex)) {
1584 goto done;
1585 }
1586 }
1587 /* if we get to here, table lockset didn't match the new thread
1588 * lockset, so keep looking */
1589 i++;
1590 }
1591
1592 done:
1593 /* Update the thread's lock vector */
1594 thread_locks[tid] = i;
1595# if DEBUG_LOCKS
1596 VG_(printf)("tid %u now has lockset %d\n", tid, i);
1597# endif
1598
1599# if LOCKSET_SANITY > 1
1600 sanity_check_locksets("eraser_post_mutex_lock-OUT");
1601# endif
1602
1603}
1604
1605
1606static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
1607{
1608 Int i = 0;
sewardj274c6012002-10-22 04:54:55 +00001609 hg_mutex_t *mutex = get_mutex(void_mutex);
njn25e49d8e72002-09-23 09:36:25 +00001610
sewardj16748af2002-10-22 04:55:54 +00001611 set_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
1612
njn25e49d8e72002-09-23 09:36:25 +00001613# if DEBUG_LOCKS
sewardj274c6012002-10-22 04:54:55 +00001614 VG_(printf)("unlock(%u, %x)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00001615# endif
1616
1617# if LOCKSET_SANITY > 1
1618 sanity_check_locksets("eraser_post_mutex_unlock-IN");
1619# endif
1620
1621 // find the lockset that is the current one minus tid, change thread to use
1622 // that index.
1623
1624 while (True) {
1625
1626 if (i == n_lockset_table) {
1627 /* We can't find a suitable pre-made set, so we'll have to
1628 make one. */
1629 i = remove ( thread_locks[tid], mutex );
1630 break;
1631 }
1632
1633 /* Args are in opposite order to call above, for reverse effect */
1634 if (weird_LockSet_equals( lockset_table[i],
1635 lockset_table[thread_locks[tid]], mutex) ) {
1636 /* found existing diminished set -- the best outcome. */
1637 break;
1638 }
1639
1640 i++;
1641 }
1642
1643 /* Update the thread's lock vector */
1644# if DEBUG_LOCKS
1645 VG_(printf)("tid %u reverts from %d to lockset %d\n",
1646 tid, thread_locks[tid], i);
1647# endif
1648
1649 thread_locks[tid] = i;
1650
1651# if LOCKSET_SANITY > 1
1652 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
1653# endif
1654}
1655
1656
1657/* ---------------------------------------------------------------------
1658 Checking memory reads and writes
1659 ------------------------------------------------------------------ */
1660
1661/* Behaviour on reads and writes:
1662 *
1663 * VIR EXCL SHAR SH_MOD
1664 * ----------------------------------------------------------------
1665 * rd/wr, 1st thread | - EXCL - -
1666 * rd, new thread | - SHAR - -
1667 * wr, new thread | - SH_MOD - -
1668 * rd | error! - SHAR SH_MOD
1669 * wr | EXCL - SH_MOD SH_MOD
1670 * ----------------------------------------------------------------
1671 */
1672
1673#if 0
1674static
1675void dump_around_a(Addr a)
1676{
1677 UInt i;
1678 shadow_word* sword;
1679 VG_(printf)("NEARBY:\n");
1680 for (i = a - 12; i <= a + 12; i += 4) {
1681 sword = get_sword_addr(i);
1682 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
1683 }
1684}
1685#endif
1686
1687/* Find which word the first and last bytes are in (by shifting out bottom 2
1688 * bits) then find the difference. */
1689static __inline__
1690Int compute_num_words_accessed(Addr a, UInt size)
1691{
1692 Int x, y, n_words;
1693 x = a >> 2;
1694 y = (a + size - 1) >> 2;
1695 n_words = y - x + 1;
1696 return n_words;
1697}
1698
1699
1700#if DEBUG_ACCESSES
1701 #define DEBUG_STATE(args...) \
1702 VG_(printf)("(%u) ", size), \
1703 VG_(printf)(args)
1704#else
1705 #define DEBUG_STATE(args...)
1706#endif
1707
1708
sewardj0f811692002-10-22 04:59:26 +00001709static void eraser_mem_read(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00001710{
sewardj0f811692002-10-22 04:59:26 +00001711 ThreadId tid;
njn25e49d8e72002-09-23 09:36:25 +00001712 shadow_word* sword;
njn25e49d8e72002-09-23 09:36:25 +00001713 Addr end = a + 4*compute_num_words_accessed(a, size);
sewardj16748af2002-10-22 04:55:54 +00001714 shadow_word prevstate;
njn25e49d8e72002-09-23 09:36:25 +00001715
sewardj0f811692002-10-22 04:59:26 +00001716 tid = (tst == NULL) ? VG_(get_current_tid)() : VG_(get_tid_from_ThreadState)(tst);
1717
njn25e49d8e72002-09-23 09:36:25 +00001718 for ( ; a < end; a += 4) {
1719
1720 sword = get_sword_addr(a);
1721 if (sword == SEC_MAP_ACCESS) {
1722 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
1723 continue;
1724 }
1725
sewardj16748af2002-10-22 04:55:54 +00001726 prevstate = *sword;
1727
njn25e49d8e72002-09-23 09:36:25 +00001728 switch (sword->state) {
1729
1730 /* This looks like reading of unitialised memory, may be legit. Eg.
1731 * calloc() zeroes its values, so untouched memory may actually be
1732 * initialised. Leave that stuff to Valgrind. */
1733 case Vge_Virgin:
1734 if (TID_INDICATING_NONVIRGIN == sword->other) {
1735 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
1736# if DEBUG_VIRGIN_READS
1737 dump_around_a(a);
1738# endif
1739 } else {
1740 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
1741 }
1742 sword->state = Vge_Excl;
1743 sword->other = tid; /* remember exclusive owner */
1744 break;
1745
1746 case Vge_Excl:
1747 if (tid == sword->other) {
1748 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
sewardj16748af2002-10-22 04:55:54 +00001749 } else if (TID_INDICATING_ALL == sword->other) {
1750 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
1751 } else {
njn25e49d8e72002-09-23 09:36:25 +00001752 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sword->other, a, tid);
1753 sword->state = Vge_Shar;
1754 sword->other = thread_locks[tid];
1755# if DEBUG_MEM_LOCKSET_CHANGES
1756 print_LockSet(lockset_table[sword->other]);
1757# endif
1758 }
1759 break;
1760
1761 case Vge_Shar:
1762 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
1763 sword->other = intersect(sword->other, thread_locks[tid]);
1764 break;
1765
1766 case Vge_SharMod:
1767 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
1768 sword->other = intersect(sword->other, thread_locks[tid]);
1769
1770 if (lockset_table[sword->other] == NULL) {
sewardj0f811692002-10-22 04:59:26 +00001771 record_eraser_error(tst, a, False /* !is_write */, prevstate);
njn25e49d8e72002-09-23 09:36:25 +00001772 n_eraser_warnings++;
1773 }
1774 break;
1775
1776 default:
njne427a662002-10-02 11:08:25 +00001777 VG_(skin_panic)("Unknown eraser state");
njn25e49d8e72002-09-23 09:36:25 +00001778 }
1779 }
1780}
1781
1782
sewardj0f811692002-10-22 04:59:26 +00001783static void eraser_mem_write(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00001784{
sewardj0f811692002-10-22 04:59:26 +00001785 ThreadId tid;
njn25e49d8e72002-09-23 09:36:25 +00001786 shadow_word* sword;
njn25e49d8e72002-09-23 09:36:25 +00001787 Addr end = a + 4*compute_num_words_accessed(a, size);
sewardj16748af2002-10-22 04:55:54 +00001788 shadow_word prevstate;
njn25e49d8e72002-09-23 09:36:25 +00001789
sewardj0f811692002-10-22 04:59:26 +00001790 tid = (tst == NULL) ? VG_(get_current_tid)() : VG_(get_tid_from_ThreadState)(tst);
1791
njn25e49d8e72002-09-23 09:36:25 +00001792 for ( ; a < end; a += 4) {
1793
1794 sword = get_sword_addr(a);
1795 if (sword == SEC_MAP_ACCESS) {
1796 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
1797 continue;
1798 }
1799
sewardj16748af2002-10-22 04:55:54 +00001800 prevstate = *sword;
1801
njn25e49d8e72002-09-23 09:36:25 +00001802 switch (sword->state) {
1803 case Vge_Virgin:
1804 if (TID_INDICATING_NONVIRGIN == sword->other)
1805 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
1806 else
1807 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
1808 sword->state = Vge_Excl;
1809 sword->other = tid; /* remember exclusive owner */
1810 break;
1811
1812 case Vge_Excl:
1813 if (tid == sword->other) {
1814 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
1815 break;
sewardj16748af2002-10-22 04:55:54 +00001816 } else if (TID_INDICATING_ALL == sword->other) {
1817 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
1818 break;
njn25e49d8e72002-09-23 09:36:25 +00001819 } else {
1820 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sword->other, a, tid);
1821 sword->state = Vge_SharMod;
1822 sword->other = thread_locks[tid];
1823# if DEBUG_MEM_LOCKSET_CHANGES
1824 print_LockSet(lockset_table[sword->other]);
1825# endif
1826 goto SHARED_MODIFIED;
1827 }
1828
1829 case Vge_Shar:
1830 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
1831 sword->state = Vge_SharMod;
1832 sword->other = intersect(sword->other, thread_locks[tid]);
1833 goto SHARED_MODIFIED;
1834
1835 case Vge_SharMod:
1836 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
1837 sword->other = intersect(sword->other, thread_locks[tid]);
1838 SHARED_MODIFIED:
1839 if (lockset_table[sword->other] == NULL) {
sewardj0f811692002-10-22 04:59:26 +00001840 record_eraser_error(tst, a, True /* is_write */, prevstate);
njn25e49d8e72002-09-23 09:36:25 +00001841 n_eraser_warnings++;
1842 }
1843 break;
1844
1845 default:
njne427a662002-10-02 11:08:25 +00001846 VG_(skin_panic)("Unknown eraser state");
njn25e49d8e72002-09-23 09:36:25 +00001847 }
1848 }
1849}
1850
1851#undef DEBUG_STATE
1852
sewardja5b3aec2002-10-22 05:09:36 +00001853static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00001854{
sewardja5b3aec2002-10-22 05:09:36 +00001855 eraser_mem_read(a, 1, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00001856}
1857
sewardja5b3aec2002-10-22 05:09:36 +00001858static void eraser_mem_help_read_2(Addr a)
1859{
1860 eraser_mem_read(a, 2, NULL);
1861}
1862
1863static void eraser_mem_help_read_4(Addr a)
1864{
1865 eraser_mem_read(a, 4, NULL);
1866}
1867
1868static void eraser_mem_help_read_N(Addr a, UInt size)
1869{
1870 eraser_mem_read(a, size, NULL);
1871}
1872
1873static void eraser_mem_help_write_1(Addr a, UInt val)
1874{
1875 if (*(UChar *)a != val)
1876 eraser_mem_write(a, 1, NULL);
1877}
1878static void eraser_mem_help_write_2(Addr a, UInt val)
1879{
1880 if (*(UShort *)a != val)
1881 eraser_mem_write(a, 2, NULL);
1882}
1883static void eraser_mem_help_write_4(Addr a, UInt val)
1884{
1885 if (*(UInt *)a != val)
1886 eraser_mem_write(a, 4, NULL);
1887}
1888static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00001889{
sewardj0f811692002-10-22 04:59:26 +00001890 eraser_mem_write(a, size, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00001891}
njn25e49d8e72002-09-23 09:36:25 +00001892
1893/*--------------------------------------------------------------------*/
1894/*--- Setup ---*/
1895/*--------------------------------------------------------------------*/
1896
njnd04b7c62002-10-03 14:05:52 +00001897void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00001898{
1899 Int i;
1900
sewardj4aa62ba2002-10-05 15:49:27 +00001901 details->name = "Helgrind";
njnd04b7c62002-10-03 14:05:52 +00001902 details->version = NULL;
1903 details->description = "a data race detector";
1904 details->copyright_author =
1905 "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote.";
1906 details->bug_reports_to = "njn25@cam.ac.uk";
njn25e49d8e72002-09-23 09:36:25 +00001907
sewardj5481f8f2002-10-20 19:43:47 +00001908 needs->core_errors = True;
1909 needs->skin_errors = True;
1910 needs->data_syms = True;
sewardj16748af2002-10-22 04:55:54 +00001911 needs->sizeof_shadow_block = SHADOW_EXTRA;
njn25e49d8e72002-09-23 09:36:25 +00001912
njn25e49d8e72002-09-23 09:36:25 +00001913 track->new_mem_startup = & eraser_new_mem_startup;
1914 track->new_mem_heap = & eraser_new_mem_heap;
1915 track->new_mem_stack = & make_writable;
1916 track->new_mem_stack_aligned = & make_writable_aligned;
1917 track->new_mem_stack_signal = & make_writable;
1918 track->new_mem_brk = & make_writable;
sewardj40f8ebe2002-10-23 21:46:13 +00001919 track->new_mem_mmap = & eraser_new_mem_startup;
njn25e49d8e72002-09-23 09:36:25 +00001920
1921 track->copy_mem_heap = & copy_address_range_state;
1922 track->change_mem_mprotect = & eraser_set_perms;
1923
1924 track->ban_mem_heap = NULL;
1925 track->ban_mem_stack = NULL;
1926
1927 track->die_mem_heap = NULL;
1928 track->die_mem_stack = NULL;
1929 track->die_mem_stack_aligned = NULL;
1930 track->die_mem_stack_signal = NULL;
1931 track->die_mem_brk = NULL;
1932 track->die_mem_munmap = NULL;
1933
1934 track->pre_mem_read = & eraser_pre_mem_read;
1935 track->pre_mem_read_asciiz = & eraser_pre_mem_read_asciiz;
1936 track->pre_mem_write = & eraser_pre_mem_write;
1937 track->post_mem_write = NULL;
1938
1939 track->post_mutex_lock = & eraser_post_mutex_lock;
1940 track->post_mutex_unlock = & eraser_post_mutex_unlock;
1941
sewardja5b3aec2002-10-22 05:09:36 +00001942 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
1943 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
1944 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
1945 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
1946
1947 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
1948 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
1949 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
1950 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00001951
njn25e49d8e72002-09-23 09:36:25 +00001952 /* Init lock table */
1953 for (i = 0; i < VG_N_THREADS; i++)
1954 thread_locks[i] = 0 /* the empty lock set */;
1955
1956 lockset_table[0] = NULL;
1957 for (i = 1; i < M_LOCKSET_TABLE; i++)
1958 lockset_table[i] = NULL;
1959
1960 init_shadow_memory();
1961}
1962
1963
1964void SK_(post_clo_init)(void)
1965{
1966}
1967
1968
1969void SK_(fini)(void)
1970{
1971# if DEBUG_LOCK_TABLE
1972 pp_all_LockSets();
1973# endif
1974# if LOCKSET_SANITY
1975 sanity_check_locksets("SK_(fini)");
1976# endif
1977 VG_(message)(Vg_UserMsg, "%u possible data races found", n_eraser_warnings);
1978}
1979
1980/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001981/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001982/*--------------------------------------------------------------------*/