blob: efe6fae7275306fe7720813fdce7a80aaecfb287 [file] [log] [blame]
njn25e49d8e72002-09-23 09:36:25 +00001/*--------------------------------------------------------------------*/
2/*--- The Eraser skin: checking for data races in threaded ---*/
3/*--- programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
8 This file is part of Valgrind, an x86 protected-mode emulator
9 designed for debugging and profiling binaries on x86-Unixes.
10
11 Copyright (C) 2000-2002 Nicholas Nethercote
12 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
33
34
35static UInt n_eraser_warnings = 0;
36
37
38/*------------------------------------------------------------*/
39/*--- Debug guff ---*/
40/*------------------------------------------------------------*/
41
42#define DEBUG_LOCK_TABLE 1 /* Print lock table at end */
43
44#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
45#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
46#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
47#define DEBUG_ACCESSES 0 /* Print reads, writes */
48#define DEBUG_MEM_LOCKSET_CHANGES 0
49 /* Print when an address's lockset
50 changes; only useful with
51 DEBUG_ACCESSES */
52
53#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
54
55/* heavyweight LockSet sanity checking:
56 0 == never
57 1 == after important ops
58 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
59 */
60#define LOCKSET_SANITY 0
61
62
63/*------------------------------------------------------------*/
64/*--- Crude profiling machinery. ---*/
65/*------------------------------------------------------------*/
66
67// PPP: work out if I want this
68
69#define PROF_EVENT(x)
70#if 0
71#ifdef VG_PROFILE_MEMORY
72
73#define N_PROF_EVENTS 150
74
75static UInt event_ctr[N_PROF_EVENTS];
76
77void VGE_(done_prof_mem) ( void )
78{
79 Int i;
80 for (i = 0; i < N_PROF_EVENTS; i++) {
81 if ((i % 10) == 0)
82 VG_(printf)("\n");
83 if (event_ctr[i] > 0)
84 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
85 }
86 VG_(printf)("\n");
87}
88
89#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +000090 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +000091 event_ctr[ev]++; \
92 } while (False);
93
94#else
95
96//static void init_prof_mem ( void ) { }
97// void VG_(done_prof_mem) ( void ) { }
98
99#define PROF_EVENT(ev) /* */
100
101#endif /* VG_PROFILE_MEMORY */
102
103/* Event index. If just the name of the fn is given, this means the
104 number of calls to the fn. Otherwise it is the specified event.
105
106 [PPP: snip event numbers...]
107*/
108#endif /* 0 */
109
110
111/*------------------------------------------------------------*/
112/*--- Data defns. ---*/
113/*------------------------------------------------------------*/
114
115typedef enum
116 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit }
117 VgeInitStatus;
118
119/* Should add up to 32 to fit in one word */
120#define OTHER_BITS 30
121#define STATE_BITS 2
122
123#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
124
125/* This is for indicating that a memory block has been initialised but not
126 * really directly by a particular thread... (eg. text/data initialised
127 * automatically at startup).
128 * Must be different to virgin_word.other */
129#define TID_INDICATING_NONVIRGIN 1
130
131/* Number of entries must fit in STATE_BITS bits */
132typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
133
134typedef
135 struct {
136 UInt other:OTHER_BITS;
137 UInt state:STATE_BITS;
138 } shadow_word;
139
140typedef
141 struct {
142 shadow_word swords[ESEC_MAP_WORDS];
143 }
144 ESecMap;
145
146static ESecMap* primary_map[ 65536 ];
147static ESecMap distinguished_secondary_map;
148
149static shadow_word virgin_sword = { 0, Vge_Virgin };
150
151#define VGE_IS_DISTINGUISHED_SM(smap) \
152 ((smap) == &distinguished_secondary_map)
153
154#define ENSURE_MAPPABLE(addr,caller) \
155 do { \
156 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
157 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
158 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
159 } \
160 } while(0)
161
162
163/*------------------------------------------------------------*/
164/*--- Low-level support for memory tracking. ---*/
165/*------------------------------------------------------------*/
166
167/*
168 All reads and writes are recorded in the memory map, which
169 records the state of all memory in the process. The memory map is
170 organised like that for normal Valgrind, except each that everything
171 is done at word-level instead of byte-level, and each word has only
172 one word of shadow (instead of 36 bits).
173
174 As for normal Valgrind there is a distinguished secondary map. But we're
175 working at word-granularity, so it has 16k word entries instead of 64k byte
176 entries. Lookup is done as follows:
177
178 bits 31..16: primary map lookup
179 bits 15.. 2: secondary map lookup
180 bits 1.. 0: ignored
181*/
182
183
184/*------------------------------------------------------------*/
185/*--- Basic bitmap management, reading and writing. ---*/
186/*------------------------------------------------------------*/
187
188/* Allocate and initialise a secondary map, marking all words as virgin. */
189
190/* Just a value that isn't a real pointer */
191#define SEC_MAP_ACCESS (shadow_word*)0x99
192
193
194static
195ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
196{
197 ESecMap* map;
198 UInt i;
199 //PROF_EVENT(10); PPP
200
201 /* It just happens that a SecMap occupies exactly 18 pages --
202 although this isn't important, so the following assert is
203 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000204 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000205 map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller );
206
207 for (i = 0; i < ESEC_MAP_WORDS; i++)
208 map->swords[i] = virgin_sword;
209
210 return map;
211}
212
213
214/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
215 * word gets set. */
216static __inline__
217void set_sword ( Addr a, shadow_word sword )
218{
219 ESecMap* sm;
220
221 //PROF_EVENT(23); PPP
222 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
223
224 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
225 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000226 sk_assert(sm != &distinguished_secondary_map);
njn25e49d8e72002-09-23 09:36:25 +0000227 sm->swords[(a & 0xFFFC) >> 2] = sword;
228
229 if (VGE_IS_DISTINGUISHED_SM(sm)) {
230 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
231 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000232 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000233 }
234}
235
236
237static __inline__
238shadow_word* get_sword_addr ( Addr a )
239{
240 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
241 ESecMap* sm = primary_map[a >> 16];
242 UInt sm_off = (a & 0xFFFC) >> 2;
243
244 if (VGE_IS_DISTINGUISHED_SM(sm)) {
245 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
246 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000247 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000248 return SEC_MAP_ACCESS;
249 }
250
251 //PROF_EVENT(21); PPP
252 return & (sm->swords[sm_off]);
253}
254
255
256// SSS: rename these so they're not so similar to memcheck, unless it's
257// appropriate of course
258
259static __inline__
260void init_virgin_sword(Addr a)
261{
262 set_sword(a, virgin_sword);
263}
264
265
266/* 'a' is guaranteed to be 4-byte aligned here (not that that's important,
267 * really) */
268static
269void make_writable_aligned ( Addr a, UInt size )
270{
271 Addr a_past_end = a + size;
272
273 //PROF_EVENT(??) PPP
njne427a662002-10-02 11:08:25 +0000274 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000275
276 for ( ; a < a_past_end; a += 4) {
277 set_sword(a, virgin_sword);
278 }
279}
280
281static __inline__
282void init_nonvirgin_sword(Addr a)
283{
284 shadow_word sword;
285
286 sword.other = VG_(get_current_tid_1_if_root)();
287 sword.state = Vge_Excl;
288 set_sword(a, sword);
289}
290
291
292/* In this case, we treat it for Eraser's sake like virgin (it hasn't
293 * been inited by a particular thread, it's just done automatically upon
294 * startup), but we mark its .state specially so it doesn't look like an
295 * uninited read. */
296static __inline__
297void init_magically_inited_sword(Addr a)
298{
299 shadow_word sword;
300
njne427a662002-10-02 11:08:25 +0000301 sk_assert(1 == VG_(get_current_tid_1_if_root)());
njn25e49d8e72002-09-23 09:36:25 +0000302 sword.other = TID_INDICATING_NONVIRGIN;
303 sword.state = Vge_Virgin;
304 set_sword(a, virgin_sword);
305}
306
307
308/*------------------------------------------------------------*/
309/*--- Implementation of lock sets. ---*/
310/*------------------------------------------------------------*/
311
312#define M_LOCKSET_TABLE 1000
313
314#include <pthread.h>
315
316typedef
317 struct _LockSet {
318 pthread_mutex_t* mutex;
319 struct _LockSet* next;
320 } LockSet;
321
322
323/* Each one is an index into the lockset table. */
324static UInt thread_locks[VG_N_THREADS];
325
326/* # lockset table entries used. */
327static Int n_lockset_table = 1;
328
329/* lockset_table[0] is always NULL, representing the empty lockset */
330static LockSet* lockset_table[M_LOCKSET_TABLE];
331
332
333static __inline__
334Bool is_valid_lockset_id ( Int id )
335{
336 return id >= 0 && id < n_lockset_table;
337}
338
339
340static
341Int allocate_LockSet(LockSet* set)
342{
343 if (n_lockset_table >= M_LOCKSET_TABLE)
njne427a662002-10-02 11:08:25 +0000344 VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE");
njn25e49d8e72002-09-23 09:36:25 +0000345 lockset_table[n_lockset_table] = set;
346 n_lockset_table++;
347# if DEBUG_MEM_LOCKSET_CHANGES || DEBUG_NEW_LOCKSETS
348 VG_(printf)("allocate LOCKSET VECTOR %p to %d\n", set, n_lockset_table-1);
349# endif
350 return n_lockset_table-1;
351}
352
353
354static
355void pp_LockSet(LockSet* p)
356{
357 VG_(printf)("{ ");
358 while (p != NULL) {
359 VG_(printf)("%x ", p->mutex);
360 p = p->next;
361 }
362 VG_(printf)("}\n");
363}
364
365
366static __attribute__((unused))
367void pp_all_LockSets ( void )
368{
369 Int i;
370 for (i = 0; i < n_lockset_table; i++) {
371 VG_(printf)("[%d] = ", i);
372 pp_LockSet(lockset_table[i]);
373 }
374}
375
376
377static
378void free_LockSet(LockSet *p)
379{
380 LockSet* q;
381 while (NULL != p) {
382 q = p;
383 p = p->next;
384 VG_(free)(q);
385# if DEBUG_MEM_LOCKSET_CHANGES
386 VG_(printf)("free'd %x\n", q);
387# endif
388 }
389}
390
391
392static
393Bool structural_eq_LockSet(LockSet* a, LockSet* b)
394{
395 while (a && b) {
396 if (a->mutex != b->mutex) {
397 return False;
398 }
399 a = a->next;
400 b = b->next;
401 }
402 return (NULL == a && NULL == b);
403}
404
405
406#if LOCKSET_SANITY
407/* Check invariants:
408 - all locksets are unique
409 - each set is a linked list in strictly increasing order of mutex addr
410*/
411static
412void sanity_check_locksets ( Char* caller )
413{
414 Int i, j, badness;
415 LockSet* v;
416 pthread_mutex_t* mx_prev;
417
418 badness = 0;
419 i = j = -1;
420
421 //VG_(printf)("sanity %s\n", caller);
422 /* Check really simple things first */
423
424 if (n_lockset_table < 1 || n_lockset_table > M_LOCKSET_TABLE)
425 { badness = 1; goto baaad; }
426
427 if (lockset_table[0] != NULL)
428 { badness = 2; goto baaad; }
429
430 for (i = 1; i < n_lockset_table; i++)
431 if (lockset_table[i] == NULL)
432 { badness = 3; goto baaad; }
433
434 for (i = n_lockset_table; i < M_LOCKSET_TABLE; i++)
435 if (lockset_table[i] != NULL)
436 { badness = 4; goto baaad; }
437
438 /* Check the sanity of each individual set. */
439 for (i = 1; i < n_lockset_table; i++) {
440 v = lockset_table[i];
441 mx_prev = (pthread_mutex_t*)0;
442 while (True) {
443 if (v == NULL) break;
444 if (mx_prev >= v->mutex)
445 { badness = 5; goto baaad; }
446 mx_prev = v->mutex;
447 v = v->next;
448 }
449 }
450
451 /* Ensure the sets are unique, both structurally and in respect of
452 the address of their first nodes. */
453 for (i = 1; i < n_lockset_table; i++) {
454 for (j = i+1; j < n_lockset_table; j++) {
455 if (lockset_table[i] == lockset_table[j])
456 { badness = 6; goto baaad; }
457 if (structural_eq_LockSet(lockset_table[i], lockset_table[j]))
458 { badness = 7; goto baaad; }
459 }
460 }
461 return;
462
463 baaad:
464 VG_(printf)("sanity_check_locksets: "
465 "i = %d, j = %d, badness = %d, caller = %s\n",
466 i, j, badness, caller);
467 pp_all_LockSets();
njne427a662002-10-02 11:08:25 +0000468 VG_(skin_panic)("sanity_check_locksets");
njn25e49d8e72002-09-23 09:36:25 +0000469}
470#endif /* LOCKSET_SANITY */
471
472
473/* Builds ia with mx removed. mx should actually be in ia!
474 (a checked assertion). Resulting set should not already
475 exist in the table (unchecked).
476*/
477static
478UInt remove ( UInt ia, pthread_mutex_t* mx )
479{
480 Int found, res;
481 LockSet* new_vector = NULL;
482 LockSet* new_node;
483 LockSet** prev_ptr = &new_vector;
484 LockSet* a = lockset_table[ia];
njne427a662002-10-02 11:08:25 +0000485 sk_assert(is_valid_lockset_id(ia));
njn25e49d8e72002-09-23 09:36:25 +0000486
487# if DEBUG_MEM_LOCKSET_CHANGES
488 VG_(printf)("Removing from %d mutex %p:\n", ia, mx);
489# endif
490
491# if DEBUG_MEM_LOCKSET_CHANGES
492 print_LockSet(a);
493# endif
494
495# if LOCKSET_SANITY
496 sanity_check_locksets("remove-IN");
497# endif
498
499 /* Build the intersection of the two lists */
500 found = 0;
501 while (a) {
502 if (a->mutex != mx) {
503 new_node = VG_(malloc)(sizeof(LockSet));
504# if DEBUG_MEM_LOCKSET_CHANGES
505 VG_(printf)("malloc'd %x\n", new_node);
506# endif
507 new_node->mutex = a->mutex;
508 *prev_ptr = new_node;
509 prev_ptr = &((*prev_ptr)->next);
510 a = a->next;
511 } else {
512 found++;
513 }
514 *prev_ptr = NULL;
515 }
njne427a662002-10-02 11:08:25 +0000516 sk_assert(found == 1 /* sigh .. if the client is buggy */ || found == 0 );
njn25e49d8e72002-09-23 09:36:25 +0000517
518 /* Preserve uniqueness invariants in face of client buggyness */
519 if (found == 0) {
520 free_LockSet(new_vector);
521 return ia;
522 }
523
524 /* Add to the table. */
525 res = allocate_LockSet(new_vector);
526
527# if LOCKSET_SANITY
528 sanity_check_locksets("remove-OUT");
529# endif
530
531 return res;
532}
533
534
535/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
536 * doesn't do the insertion. Returns True if they match.
537 */
538static Bool
539weird_LockSet_equals(LockSet* a, LockSet* b,
540 pthread_mutex_t* missing_mutex)
541{
542 /* Idea is to try and match each element of b against either an
543 element of a, or missing_mutex. */
544 while (True) {
545 if (b == NULL)
546 break;
547 /* deal with missing already being in a */
548 if (a && a->mutex == missing_mutex)
549 a = a->next;
550 /* match current b element either against a or missing */
551 if (b->mutex == missing_mutex) {
552 b = b->next;
553 continue;
554 }
555 /* wasn't == missing, so have to match from a, or fail */
556 if (a && b->mutex == a->mutex) {
557 a = a->next;
558 b = b->next;
559 continue;
560 }
561 break;
562 }
563 return (b==NULL ? True : False);
564}
565
566
567/* Builds the intersection, and then unbuilds it if it's already in the table.
568 */
569static UInt intersect(UInt ia, UInt ib)
570{
571 Int i;
572 LockSet* a = lockset_table[ia];
573 LockSet* b = lockset_table[ib];
574 LockSet* new_vector = NULL;
575 LockSet* new_node;
576 LockSet** prev_ptr = &new_vector;
577
578# if DEBUG_MEM_LOCKSET_CHANGES
579 VG_(printf)("Intersecting %d %d:\n", ia, ib);
580# endif
581
582# if LOCKSET_SANITY
583 sanity_check_locksets("intersect-IN");
584# endif
585
586 /* Fast case -- when the two are the same */
587 if (ia == ib) {
588# if DEBUG_MEM_LOCKSET_CHANGES
589 VG_(printf)("Fast case -- both the same: %u\n", ia);
590 print_LockSet(a);
591# endif
592 return ia;
593 }
594
595# if DEBUG_MEM_LOCKSET_CHANGES
596 print_LockSet(a);
597 print_LockSet(b);
598# endif
599
600 /* Build the intersection of the two lists */
601 while (a && b) {
602 if (a->mutex == b->mutex) {
603 new_node = VG_(malloc)(sizeof(LockSet));
604# if DEBUG_MEM_LOCKSET_CHANGES
605 VG_(printf)("malloc'd %x\n", new_node);
606# endif
607 new_node->mutex = a->mutex;
608 *prev_ptr = new_node;
609 prev_ptr = &((*prev_ptr)->next);
610 a = a->next;
611 b = b->next;
612 } else if (a->mutex < b->mutex) {
613 a = a->next;
614 } else if (a->mutex > b->mutex) {
615 b = b->next;
njne427a662002-10-02 11:08:25 +0000616 } else VG_(skin_panic)("STOP PRESS: Laws of arithmetic broken");
njn25e49d8e72002-09-23 09:36:25 +0000617
618 *prev_ptr = NULL;
619 }
620
621 /* Now search for it in the table, adding it if not seen before */
622 for (i = 0; i < n_lockset_table; i++) {
623 if (structural_eq_LockSet(lockset_table[i], new_vector))
624 break;
625 }
626
627 if (i == n_lockset_table) {
628 i = allocate_LockSet(new_vector);
629 } else {
630 free_LockSet(new_vector);
631 }
632
633 /* Check we won't overflow the OTHER_BITS bits of sword->other */
njne427a662002-10-02 11:08:25 +0000634 sk_assert(i < (1 << OTHER_BITS));
njn25e49d8e72002-09-23 09:36:25 +0000635
636# if LOCKSET_SANITY
637 sanity_check_locksets("intersect-OUT");
638# endif
639
640 return i;
641}
642
643
644/*------------------------------------------------------------*/
645/*--- Setting and checking permissions. ---*/
646/*------------------------------------------------------------*/
647
648static
649void set_address_range_state ( Addr a, UInt len /* in bytes */,
650 VgeInitStatus status )
651{
652 Addr aligned_a, end, aligned_end;
653
654# if DEBUG_MAKE_ACCESSES
655 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
656# endif
657 //PROF_EVENT(30); PPP
658
659 if (len == 0)
660 return;
661
662 if (len > 100 * 1000 * 1000)
663 VG_(message)(Vg_UserMsg,
664 "Warning: set address range state: large range %d",
665 len);
666
667 VGP_PUSHCC(VgpSARP);
668
669 /* Memory block may not be aligned or a whole word multiple. In neat cases,
670 * we have to init len/4 words (len is in bytes). In nasty cases, it's
671 * len/4+1 words. This works out which it is by aligning the block and
672 * seeing if the end byte is in the same word as it is for the unaligned
673 * block; if not, it's the awkward case. */
674 aligned_a = a & 0xc; /* zero bottom two bits */
675 end = a + len;
676 aligned_end = aligned_a + len;
677 if ((end & 0xc) != (aligned_end & 0xc)) {
678 end += 4; /* len/4 + 1 case */
679 }
680
681 /* Do it ... */
682 switch (status) {
683 case Vge_VirginInit:
684 for ( ; a < end; a += 4) {
685 //PROF_EVENT(31); PPP
686 init_virgin_sword(a);
687 }
688 break;
689
690 case Vge_NonVirginInit:
691 for ( ; a < end; a += 4) {
692 //PROF_EVENT(31); PPP
693 init_nonvirgin_sword(a);
694 }
695 break;
696
697 case Vge_SegmentInit:
698 for ( ; a < end; a += 4) {
699 //PROF_EVENT(31); PPP
700 init_magically_inited_sword(a);
701 }
702 break;
703
704 default:
705 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +0000706 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +0000707 }
708
709 /* Check that zero page and highest page have not been written to
710 -- this could happen with buggy syscall wrappers. Today
711 (2001-04-26) had precisely such a problem with
712 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +0000713 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +0000714 VGP_POPCC(VgpSARP);
715}
716
717
718static void make_segment_readable ( Addr a, UInt len )
719{
720 //PROF_EVENT(??); PPP
721 set_address_range_state ( a, len, Vge_SegmentInit );
722}
723
724static void make_writable ( Addr a, UInt len )
725{
726 //PROF_EVENT(36); PPP
727 set_address_range_state( a, len, Vge_VirginInit );
728}
729
730static void make_readable ( Addr a, UInt len )
731{
732 //PROF_EVENT(37); PPP
733 set_address_range_state( a, len, Vge_NonVirginInit );
734}
735
736
737// SSS: change name
738/* Block-copy states (needed for implementing realloc()). */
739static void copy_address_range_state(Addr src, Addr dst, UInt len)
740{
741 UInt i;
742
743 //PROF_EVENT(40); PPP
744 for (i = 0; i < len; i += 4) {
745 shadow_word sword = *(get_sword_addr ( src+i ));
746 //PROF_EVENT(41); PPP
747 set_sword ( dst+i, sword );
748 }
749}
750
751// SSS: put these somewhere better
752static void eraser_mem_read (Addr a, UInt data_size);
753static void eraser_mem_write(Addr a, UInt data_size);
754
755static
756void eraser_pre_mem_read(CorePart part, ThreadState* tst,
757 Char* s, UInt base, UInt size )
758{
759 eraser_mem_read(base, size);
760}
761
762static
763void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst,
764 Char* s, UInt base )
765{
766 eraser_mem_read(base, VG_(strlen)((Char*)base));
767}
768
769static
770void eraser_pre_mem_write(CorePart part, ThreadState* tst,
771 Char* s, UInt base, UInt size )
772{
773 eraser_mem_write(base, size);
774}
775
776
777
778static
779void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
780{
781 // JJJ: this ignores the permissions and just makes it readable, like the
782 // old code did, AFAICT
783 make_segment_readable(a, len);
784}
785
786
787static
788void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
789{
790 if (is_inited) {
791 make_readable(a, len);
792 } else {
793 make_writable(a, len);
794 }
795}
796
797static
798void eraser_set_perms (Addr a, UInt len,
799 Bool nn, Bool rr, Bool ww, Bool xx)
800{
801 if (rr) make_readable(a, len);
802 else if (ww) make_writable(a, len);
803 /* else do nothing */
804}
805
806
807/*--------------------------------------------------------------*/
808/*--- Initialise the memory audit system on program startup. ---*/
809/*--------------------------------------------------------------*/
810
811static
812void init_shadow_memory(void)
813{
814 Int i;
815
816 for (i = 0; i < ESEC_MAP_WORDS; i++)
817 distinguished_secondary_map.swords[i] = virgin_sword;
818
819 /* These entries gradually get overwritten as the used address
820 space expands. */
821 for (i = 0; i < 65536; i++)
822 primary_map[i] = &distinguished_secondary_map;
823}
824
825
826/*--------------------------------------------------------------*/
827/*--- Machinery to support sanity checking ---*/
828/*--------------------------------------------------------------*/
829
830/* Check that nobody has spuriously claimed that the first or last 16
831 pages (64 KB) of address space have become accessible. Failure of
832 the following do not per se indicate an internal consistency
833 problem, but they are so likely to that we really want to know
834 about it if so. */
835
836Bool SK_(cheap_sanity_check) ( void )
837{
838 if (VGE_IS_DISTINGUISHED_SM(primary_map[0]) &&
839 VGE_IS_DISTINGUISHED_SM(primary_map[65535]))
840 return True;
841 else
842 return False;
843}
844
845
846Bool SK_(expensive_sanity_check)(void)
847{
848 Int i;
849
850 /* Make sure nobody changed the distinguished secondary. */
851 for (i = 0; i < ESEC_MAP_WORDS; i++)
852 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
853 distinguished_secondary_map.swords[i].state != virgin_sword.state)
854 return False;
855
856 return True;
857}
858
859
860/*--------------------------------------------------------------*/
861/*--- Instrumentation ---*/
862/*--------------------------------------------------------------*/
863
njn4ba5a792002-09-30 10:23:54 +0000864#define uInstr1 VG_(new_UInstr1)
865#define uInstr2 VG_(new_UInstr2)
866#define uLiteral VG_(set_lit_field)
867#define uCCall VG_(set_ccall_fields)
868#define newTemp VG_(get_new_temp)
njn25e49d8e72002-09-23 09:36:25 +0000869
870/* Create and return an instrumented version of cb_in. Free cb_in
871 before returning. */
872UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
873{
874 UCodeBlock* cb;
875 Int i;
876 UInstr* u_in;
877 Int t_size = INVALID_TEMPREG;
878
njn4ba5a792002-09-30 10:23:54 +0000879 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +0000880 cb->nextTemp = cb_in->nextTemp;
881
882 for (i = 0; i < cb_in->used; i++) {
883 u_in = &cb_in->instrs[i];
884
njn25e49d8e72002-09-23 09:36:25 +0000885 switch (u_in->opcode) {
886
887 case NOP: case CALLM_S: case CALLM_E:
888 break;
889
890 /* For LOAD, address is in val1 */
891 case LOAD:
892 t_size = newTemp(cb);
893 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
894 uLiteral(cb, (UInt)u_in->size);
895
njne427a662002-10-02 11:08:25 +0000896 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +0000897 8 == u_in->size || 10 == u_in->size);
898 uInstr2(cb, CCALL, 0, TempReg, u_in->val1, TempReg, t_size);
899 // SSS: make regparms(2) eventually...
900 uCCall(cb, (Addr) & eraser_mem_read, 2, 0, False);
njn4ba5a792002-09-30 10:23:54 +0000901 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +0000902 t_size = INVALID_TEMPREG;
903 break;
904
905 /* For others, address is in val2 */
906 case STORE: case FPU_R: case FPU_W:
907 t_size = newTemp(cb);
908 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
909 uLiteral(cb, (UInt)u_in->size);
910
njne427a662002-10-02 11:08:25 +0000911 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +0000912 8 == u_in->size || 10 == u_in->size);
913 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
914 uCCall(cb, (Addr) & eraser_mem_write, 2, 0, False);
njn4ba5a792002-09-30 10:23:54 +0000915 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +0000916 t_size = INVALID_TEMPREG;
917 break;
918
919 default:
njn4ba5a792002-09-30 10:23:54 +0000920 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +0000921 break;
922 }
923 }
924
njn4ba5a792002-09-30 10:23:54 +0000925 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +0000926 return cb;
927}
928
929
930/*--------------------------------------------------------------------*/
931/*--- Error and suppression handling ---*/
932/*--------------------------------------------------------------------*/
933
934typedef
935 enum {
936 /* Possible data race */
937 EraserSupp
938 }
939 EraserSuppKind;
940
941/* What kind of error it is. */
942typedef
943 enum {
944 EraserErr
945 }
946 EraserErrorKind;
947
948
949static void record_eraser_error ( ThreadId tid, Addr a, Bool is_write )
950{
951 VG_(maybe_record_error)( VG_(get_ThreadState)(tid), EraserErr, a,
952 (is_write ? "writing" : "reading"),
953 /*extra*/NULL);
954}
955
956
957Bool SK_(eq_SkinError) ( VgRes not_used,
958 SkinError* e1, SkinError* e2 )
959{
njne427a662002-10-02 11:08:25 +0000960 sk_assert(EraserErr == e1->ekind && EraserErr == e2->ekind);
njn25e49d8e72002-09-23 09:36:25 +0000961 if (e1->string != e2->string) return False;
962 if (0 != VG_(strcmp)(e1->string, e2->string)) return False;
963 return True;
964}
965
966
967void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
968{
njne427a662002-10-02 11:08:25 +0000969 sk_assert(EraserErr == err->ekind);
njn25e49d8e72002-09-23 09:36:25 +0000970 VG_(message)(Vg_UserMsg, "Possible data race %s variable at 0x%x",
971 err->string, err->addr );
972 pp_ExeContext();
973}
974
975
976void SK_(dup_extra_and_update)(SkinError* err)
977{
978 /* do nothing -- extra field not used, and no need to update */
979}
980
981
982Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
983{
984 if (0 == VG_(strcmp)(name, "Eraser")) {
985 *skind = EraserSupp;
986 return True;
987 } else {
988 return False;
989 }
990}
991
992
993Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf,
994 Int nBuf, SkinSupp* s )
995{
996 /* do nothing -- no extra suppression info present. Return True to
997 indicate nothing bad happened. */
998 return True;
999}
1000
1001
1002Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
1003{
njne427a662002-10-02 11:08:25 +00001004 sk_assert( su->skind == EraserSupp);
1005 sk_assert(err->ekind == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00001006 return True;
1007}
1008
1009
1010// SSS: copying mutex's pointer... is that ok? Could they get deallocated?
1011// (does that make sense, deallocating a mutex?)
1012static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
1013{
1014 Int i = 1;
1015 LockSet* new_node;
1016 LockSet* p;
1017 LockSet** q;
1018 pthread_mutex_t* mutex = (pthread_mutex_t*)void_mutex;
1019
1020# if DEBUG_LOCKS
1021 VG_(printf)("lock (%u, %x)\n", tid, mutex);
1022# endif
1023
njne427a662002-10-02 11:08:25 +00001024 sk_assert(tid < VG_N_THREADS &&
njn25e49d8e72002-09-23 09:36:25 +00001025 thread_locks[tid] < M_LOCKSET_TABLE);
1026 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
1027# if LOCKSET_SANITY > 1
1028 sanity_check_locksets("eraser_post_mutex_lock-IN");
1029# endif
1030
1031 while (True) {
1032 if (i == M_LOCKSET_TABLE)
njne427a662002-10-02 11:08:25 +00001033 VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE");
njn25e49d8e72002-09-23 09:36:25 +00001034
1035 /* the lockset didn't already exist */
1036 if (i == n_lockset_table) {
1037
1038 p = lockset_table[thread_locks[tid]];
1039 q = &lockset_table[i];
1040
1041 /* copy the thread's lockset, creating a new list */
1042 while (p != NULL) {
1043 new_node = VG_(malloc)(sizeof(LockSet));
1044 new_node->mutex = p->mutex;
1045 *q = new_node;
1046 q = &((*q)->next);
1047 p = p->next;
1048 }
1049 (*q) = NULL;
1050
1051 /* find spot for the new mutex in the new list */
1052 p = lockset_table[i];
1053 q = &lockset_table[i];
1054 while (NULL != p && mutex > p->mutex) {
1055 p = p->next;
1056 q = &((*q)->next);
1057 }
1058
1059 /* insert new mutex in new list */
1060 new_node = VG_(malloc)(sizeof(LockSet));
1061 new_node->mutex = mutex;
1062 new_node->next = p;
1063 (*q) = new_node;
1064
1065 p = lockset_table[i];
njne427a662002-10-02 11:08:25 +00001066 sk_assert(i == n_lockset_table);
njn25e49d8e72002-09-23 09:36:25 +00001067 n_lockset_table++;
1068
1069# if DEBUG_NEW_LOCKSETS
1070 VG_(printf)("new lockset vector (%d): ", i);
1071 print_LockSet(p);
1072# endif
1073
1074 goto done;
1075
1076 } else {
1077 /* If this succeeds, the required vector (with the new mutex added)
1078 * already exists in the table at position i. Otherwise, keep
1079 * looking. */
1080 if (weird_LockSet_equals(lockset_table[thread_locks[tid]],
1081 lockset_table[i], mutex)) {
1082 goto done;
1083 }
1084 }
1085 /* if we get to here, table lockset didn't match the new thread
1086 * lockset, so keep looking */
1087 i++;
1088 }
1089
1090 done:
1091 /* Update the thread's lock vector */
1092 thread_locks[tid] = i;
1093# if DEBUG_LOCKS
1094 VG_(printf)("tid %u now has lockset %d\n", tid, i);
1095# endif
1096
1097# if LOCKSET_SANITY > 1
1098 sanity_check_locksets("eraser_post_mutex_lock-OUT");
1099# endif
1100
1101}
1102
1103
1104static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
1105{
1106 Int i = 0;
1107 pthread_mutex_t* mutex = (pthread_mutex_t*)void_mutex;
1108
1109# if DEBUG_LOCKS
1110 VG_(printf)("unlock(%u, %x)\n", tid, mutex);
1111# endif
1112
1113# if LOCKSET_SANITY > 1
1114 sanity_check_locksets("eraser_post_mutex_unlock-IN");
1115# endif
1116
1117 // find the lockset that is the current one minus tid, change thread to use
1118 // that index.
1119
1120 while (True) {
1121
1122 if (i == n_lockset_table) {
1123 /* We can't find a suitable pre-made set, so we'll have to
1124 make one. */
1125 i = remove ( thread_locks[tid], mutex );
1126 break;
1127 }
1128
1129 /* Args are in opposite order to call above, for reverse effect */
1130 if (weird_LockSet_equals( lockset_table[i],
1131 lockset_table[thread_locks[tid]], mutex) ) {
1132 /* found existing diminished set -- the best outcome. */
1133 break;
1134 }
1135
1136 i++;
1137 }
1138
1139 /* Update the thread's lock vector */
1140# if DEBUG_LOCKS
1141 VG_(printf)("tid %u reverts from %d to lockset %d\n",
1142 tid, thread_locks[tid], i);
1143# endif
1144
1145 thread_locks[tid] = i;
1146
1147# if LOCKSET_SANITY > 1
1148 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
1149# endif
1150}
1151
1152
1153/* ---------------------------------------------------------------------
1154 Checking memory reads and writes
1155 ------------------------------------------------------------------ */
1156
1157/* Behaviour on reads and writes:
1158 *
1159 * VIR EXCL SHAR SH_MOD
1160 * ----------------------------------------------------------------
1161 * rd/wr, 1st thread | - EXCL - -
1162 * rd, new thread | - SHAR - -
1163 * wr, new thread | - SH_MOD - -
1164 * rd | error! - SHAR SH_MOD
1165 * wr | EXCL - SH_MOD SH_MOD
1166 * ----------------------------------------------------------------
1167 */
1168
1169#if 0
1170static
1171void dump_around_a(Addr a)
1172{
1173 UInt i;
1174 shadow_word* sword;
1175 VG_(printf)("NEARBY:\n");
1176 for (i = a - 12; i <= a + 12; i += 4) {
1177 sword = get_sword_addr(i);
1178 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
1179 }
1180}
1181#endif
1182
1183/* Find which word the first and last bytes are in (by shifting out bottom 2
1184 * bits) then find the difference. */
1185static __inline__
1186Int compute_num_words_accessed(Addr a, UInt size)
1187{
1188 Int x, y, n_words;
1189 x = a >> 2;
1190 y = (a + size - 1) >> 2;
1191 n_words = y - x + 1;
1192 return n_words;
1193}
1194
1195
1196#if DEBUG_ACCESSES
1197 #define DEBUG_STATE(args...) \
1198 VG_(printf)("(%u) ", size), \
1199 VG_(printf)(args)
1200#else
1201 #define DEBUG_STATE(args...)
1202#endif
1203
1204
1205static void eraser_mem_read(Addr a, UInt size)
1206{
1207 shadow_word* sword;
1208 ThreadId tid = VG_(get_current_tid_1_if_root)();
1209 Addr end = a + 4*compute_num_words_accessed(a, size);
1210
1211 for ( ; a < end; a += 4) {
1212
1213 sword = get_sword_addr(a);
1214 if (sword == SEC_MAP_ACCESS) {
1215 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
1216 continue;
1217 }
1218
1219 switch (sword->state) {
1220
1221 /* This looks like reading of unitialised memory, may be legit. Eg.
1222 * calloc() zeroes its values, so untouched memory may actually be
1223 * initialised. Leave that stuff to Valgrind. */
1224 case Vge_Virgin:
1225 if (TID_INDICATING_NONVIRGIN == sword->other) {
1226 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
1227# if DEBUG_VIRGIN_READS
1228 dump_around_a(a);
1229# endif
1230 } else {
1231 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
1232 }
1233 sword->state = Vge_Excl;
1234 sword->other = tid; /* remember exclusive owner */
1235 break;
1236
1237 case Vge_Excl:
1238 if (tid == sword->other) {
1239 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
1240
1241 } else {
1242 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sword->other, a, tid);
1243 sword->state = Vge_Shar;
1244 sword->other = thread_locks[tid];
1245# if DEBUG_MEM_LOCKSET_CHANGES
1246 print_LockSet(lockset_table[sword->other]);
1247# endif
1248 }
1249 break;
1250
1251 case Vge_Shar:
1252 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
1253 sword->other = intersect(sword->other, thread_locks[tid]);
1254 break;
1255
1256 case Vge_SharMod:
1257 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
1258 sword->other = intersect(sword->other, thread_locks[tid]);
1259
1260 if (lockset_table[sword->other] == NULL) {
1261 record_eraser_error(tid, a, False /* !is_write */);
1262 n_eraser_warnings++;
1263 }
1264 break;
1265
1266 default:
njne427a662002-10-02 11:08:25 +00001267 VG_(skin_panic)("Unknown eraser state");
njn25e49d8e72002-09-23 09:36:25 +00001268 }
1269 }
1270}
1271
1272
1273static void eraser_mem_write(Addr a, UInt size)
1274{
1275 shadow_word* sword;
1276 ThreadId tid = VG_(get_current_tid_1_if_root)();
1277 Addr end = a + 4*compute_num_words_accessed(a, size);
1278
1279 for ( ; a < end; a += 4) {
1280
1281 sword = get_sword_addr(a);
1282 if (sword == SEC_MAP_ACCESS) {
1283 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
1284 continue;
1285 }
1286
1287 switch (sword->state) {
1288 case Vge_Virgin:
1289 if (TID_INDICATING_NONVIRGIN == sword->other)
1290 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
1291 else
1292 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
1293 sword->state = Vge_Excl;
1294 sword->other = tid; /* remember exclusive owner */
1295 break;
1296
1297 case Vge_Excl:
1298 if (tid == sword->other) {
1299 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
1300 break;
1301
1302 } else {
1303 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sword->other, a, tid);
1304 sword->state = Vge_SharMod;
1305 sword->other = thread_locks[tid];
1306# if DEBUG_MEM_LOCKSET_CHANGES
1307 print_LockSet(lockset_table[sword->other]);
1308# endif
1309 goto SHARED_MODIFIED;
1310 }
1311
1312 case Vge_Shar:
1313 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
1314 sword->state = Vge_SharMod;
1315 sword->other = intersect(sword->other, thread_locks[tid]);
1316 goto SHARED_MODIFIED;
1317
1318 case Vge_SharMod:
1319 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
1320 sword->other = intersect(sword->other, thread_locks[tid]);
1321 SHARED_MODIFIED:
1322 if (lockset_table[sword->other] == NULL) {
1323 record_eraser_error(tid, a, True /* is_write */);
1324 n_eraser_warnings++;
1325 }
1326 break;
1327
1328 default:
njne427a662002-10-02 11:08:25 +00001329 VG_(skin_panic)("Unknown eraser state");
njn25e49d8e72002-09-23 09:36:25 +00001330 }
1331 }
1332}
1333
1334#undef DEBUG_STATE
1335
1336
1337/*--------------------------------------------------------------------*/
1338/*--- Setup ---*/
1339/*--------------------------------------------------------------------*/
1340
1341void SK_(pre_clo_init)(VgNeeds* needs, VgTrackEvents* track)
1342{
1343 Int i;
1344
1345 needs->name = "helgrind";
1346 needs->description = "a data race detector";
njne427a662002-10-02 11:08:25 +00001347 needs->description = "njn25@cam.ac.uk";
njn25e49d8e72002-09-23 09:36:25 +00001348
1349 needs->core_errors = True;
1350 needs->skin_errors = True;
1351
1352 VG_(register_compact_helper)((Addr) & eraser_mem_read);
1353 VG_(register_compact_helper)((Addr) & eraser_mem_write);
1354
1355 /* Events to track */
1356 track->new_mem_startup = & eraser_new_mem_startup;
1357 track->new_mem_heap = & eraser_new_mem_heap;
1358 track->new_mem_stack = & make_writable;
1359 track->new_mem_stack_aligned = & make_writable_aligned;
1360 track->new_mem_stack_signal = & make_writable;
1361 track->new_mem_brk = & make_writable;
1362 track->new_mem_mmap = & eraser_set_perms;
1363
1364 track->copy_mem_heap = & copy_address_range_state;
1365 track->change_mem_mprotect = & eraser_set_perms;
1366
1367 track->ban_mem_heap = NULL;
1368 track->ban_mem_stack = NULL;
1369
1370 track->die_mem_heap = NULL;
1371 track->die_mem_stack = NULL;
1372 track->die_mem_stack_aligned = NULL;
1373 track->die_mem_stack_signal = NULL;
1374 track->die_mem_brk = NULL;
1375 track->die_mem_munmap = NULL;
1376
1377 track->pre_mem_read = & eraser_pre_mem_read;
1378 track->pre_mem_read_asciiz = & eraser_pre_mem_read_asciiz;
1379 track->pre_mem_write = & eraser_pre_mem_write;
1380 track->post_mem_write = NULL;
1381
1382 track->post_mutex_lock = & eraser_post_mutex_lock;
1383 track->post_mutex_unlock = & eraser_post_mutex_unlock;
1384
1385 /* Init lock table */
1386 for (i = 0; i < VG_N_THREADS; i++)
1387 thread_locks[i] = 0 /* the empty lock set */;
1388
1389 lockset_table[0] = NULL;
1390 for (i = 1; i < M_LOCKSET_TABLE; i++)
1391 lockset_table[i] = NULL;
1392
1393 init_shadow_memory();
1394}
1395
1396
1397void SK_(post_clo_init)(void)
1398{
1399}
1400
1401
1402void SK_(fini)(void)
1403{
1404# if DEBUG_LOCK_TABLE
1405 pp_all_LockSets();
1406# endif
1407# if LOCKSET_SANITY
1408 sanity_check_locksets("SK_(fini)");
1409# endif
1410 VG_(message)(Vg_UserMsg, "%u possible data races found", n_eraser_warnings);
1411}
1412
1413/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00001414/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00001415/*--------------------------------------------------------------------*/