blob: 6cc7b7a7ec6eb2a0172edeb0299fd4f254e4029d [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
njnc9539842002-10-02 13:26:35 +00008 This file is part of Helgrind, a Valgrind skin for detecting
9 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
11 Copyright (C) 2000-2002 Nicholas Nethercote
12 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
33
34
35static UInt n_eraser_warnings = 0;
36
37
38/*------------------------------------------------------------*/
39/*--- Debug guff ---*/
40/*------------------------------------------------------------*/
41
42#define DEBUG_LOCK_TABLE 1 /* Print lock table at end */
43
44#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
45#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
46#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
47#define DEBUG_ACCESSES 0 /* Print reads, writes */
48#define DEBUG_MEM_LOCKSET_CHANGES 0
49 /* Print when an address's lockset
50 changes; only useful with
51 DEBUG_ACCESSES */
52
53#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
54
55/* heavyweight LockSet sanity checking:
56 0 == never
57 1 == after important ops
58 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
59 */
60#define LOCKSET_SANITY 0
61
62
63/*------------------------------------------------------------*/
64/*--- Crude profiling machinery. ---*/
65/*------------------------------------------------------------*/
66
67// PPP: work out if I want this
68
69#define PROF_EVENT(x)
70#if 0
71#ifdef VG_PROFILE_MEMORY
72
73#define N_PROF_EVENTS 150
74
75static UInt event_ctr[N_PROF_EVENTS];
76
77void VGE_(done_prof_mem) ( void )
78{
79 Int i;
80 for (i = 0; i < N_PROF_EVENTS; i++) {
81 if ((i % 10) == 0)
82 VG_(printf)("\n");
83 if (event_ctr[i] > 0)
84 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
85 }
86 VG_(printf)("\n");
87}
88
89#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +000090 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +000091 event_ctr[ev]++; \
92 } while (False);
93
94#else
95
96//static void init_prof_mem ( void ) { }
97// void VG_(done_prof_mem) ( void ) { }
98
99#define PROF_EVENT(ev) /* */
100
101#endif /* VG_PROFILE_MEMORY */
102
103/* Event index. If just the name of the fn is given, this means the
104 number of calls to the fn. Otherwise it is the specified event.
105
106 [PPP: snip event numbers...]
107*/
108#endif /* 0 */
109
110
111/*------------------------------------------------------------*/
112/*--- Data defns. ---*/
113/*------------------------------------------------------------*/
114
115typedef enum
116 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit }
117 VgeInitStatus;
118
119/* Should add up to 32 to fit in one word */
120#define OTHER_BITS 30
121#define STATE_BITS 2
122
123#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
124
125/* This is for indicating that a memory block has been initialised but not
126 * really directly by a particular thread... (eg. text/data initialised
127 * automatically at startup).
128 * Must be different to virgin_word.other */
129#define TID_INDICATING_NONVIRGIN 1
130
sewardj16748af2002-10-22 04:55:54 +0000131/* Magic TID used for error suppression; if word state is Excl and tid
132 is this, then it means all access are OK without changing state and
133 without raising any more errors */
134#define TID_INDICATING_ALL ((1 << OTHER_BITS) - 1)
135
njn25e49d8e72002-09-23 09:36:25 +0000136/* Number of entries must fit in STATE_BITS bits */
137typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
138
139typedef
140 struct {
141 UInt other:OTHER_BITS;
142 UInt state:STATE_BITS;
143 } shadow_word;
144
145typedef
146 struct {
147 shadow_word swords[ESEC_MAP_WORDS];
148 }
149 ESecMap;
150
151static ESecMap* primary_map[ 65536 ];
152static ESecMap distinguished_secondary_map;
153
154static shadow_word virgin_sword = { 0, Vge_Virgin };
155
156#define VGE_IS_DISTINGUISHED_SM(smap) \
157 ((smap) == &distinguished_secondary_map)
158
159#define ENSURE_MAPPABLE(addr,caller) \
160 do { \
161 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
162 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
163 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
164 } \
165 } while(0)
166
167
168/*------------------------------------------------------------*/
169/*--- Low-level support for memory tracking. ---*/
170/*------------------------------------------------------------*/
171
172/*
173 All reads and writes are recorded in the memory map, which
174 records the state of all memory in the process. The memory map is
175 organised like that for normal Valgrind, except each that everything
176 is done at word-level instead of byte-level, and each word has only
177 one word of shadow (instead of 36 bits).
178
179 As for normal Valgrind there is a distinguished secondary map. But we're
180 working at word-granularity, so it has 16k word entries instead of 64k byte
181 entries. Lookup is done as follows:
182
183 bits 31..16: primary map lookup
184 bits 15.. 2: secondary map lookup
185 bits 1.. 0: ignored
186*/
187
188
189/*------------------------------------------------------------*/
190/*--- Basic bitmap management, reading and writing. ---*/
191/*------------------------------------------------------------*/
192
193/* Allocate and initialise a secondary map, marking all words as virgin. */
194
195/* Just a value that isn't a real pointer */
196#define SEC_MAP_ACCESS (shadow_word*)0x99
197
198
199static
200ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
201{
202 ESecMap* map;
203 UInt i;
204 //PROF_EVENT(10); PPP
205
206 /* It just happens that a SecMap occupies exactly 18 pages --
207 although this isn't important, so the following assert is
208 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000209 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
njn25e49d8e72002-09-23 09:36:25 +0000210 map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller );
211
212 for (i = 0; i < ESEC_MAP_WORDS; i++)
213 map->swords[i] = virgin_sword;
214
215 return map;
216}
217
218
219/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
220 * word gets set. */
221static __inline__
222void set_sword ( Addr a, shadow_word sword )
223{
224 ESecMap* sm;
225
226 //PROF_EVENT(23); PPP
227 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
228
229 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
230 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000231 sk_assert(sm != &distinguished_secondary_map);
njn25e49d8e72002-09-23 09:36:25 +0000232 sm->swords[(a & 0xFFFC) >> 2] = sword;
233
234 if (VGE_IS_DISTINGUISHED_SM(sm)) {
235 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
236 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000237 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000238 }
239}
240
241
242static __inline__
243shadow_word* get_sword_addr ( Addr a )
244{
245 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
246 ESecMap* sm = primary_map[a >> 16];
247 UInt sm_off = (a & 0xFFFC) >> 2;
248
249 if (VGE_IS_DISTINGUISHED_SM(sm)) {
250 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
251 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000252 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000253 return SEC_MAP_ACCESS;
254 }
255
256 //PROF_EVENT(21); PPP
257 return & (sm->swords[sm_off]);
258}
259
260
261// SSS: rename these so they're not so similar to memcheck, unless it's
262// appropriate of course
263
264static __inline__
265void init_virgin_sword(Addr a)
266{
267 set_sword(a, virgin_sword);
268}
269
270
271/* 'a' is guaranteed to be 4-byte aligned here (not that that's important,
272 * really) */
273static
274void make_writable_aligned ( Addr a, UInt size )
275{
276 Addr a_past_end = a + size;
277
278 //PROF_EVENT(??) PPP
njne427a662002-10-02 11:08:25 +0000279 sk_assert(IS_ALIGNED4_ADDR(a));
njn25e49d8e72002-09-23 09:36:25 +0000280
281 for ( ; a < a_past_end; a += 4) {
282 set_sword(a, virgin_sword);
283 }
284}
285
286static __inline__
287void init_nonvirgin_sword(Addr a)
288{
289 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000290 ThreadId tid = VG_(get_current_or_recent_tid)();
njn25e49d8e72002-09-23 09:36:25 +0000291
sewardjb52a1b02002-10-23 21:38:22 +0000292 sk_assert(tid != VG_INVALID_THREADID);
293 sword.other = tid;
njn25e49d8e72002-09-23 09:36:25 +0000294 sword.state = Vge_Excl;
295 set_sword(a, sword);
296}
297
298
299/* In this case, we treat it for Eraser's sake like virgin (it hasn't
300 * been inited by a particular thread, it's just done automatically upon
301 * startup), but we mark its .state specially so it doesn't look like an
302 * uninited read. */
303static __inline__
304void init_magically_inited_sword(Addr a)
305{
306 shadow_word sword;
307
sewardjb52a1b02002-10-23 21:38:22 +0000308 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
njn25e49d8e72002-09-23 09:36:25 +0000309 sword.other = TID_INDICATING_NONVIRGIN;
310 sword.state = Vge_Virgin;
311 set_sword(a, virgin_sword);
312}
313
sewardjc26cc252002-10-23 21:58:55 +0000314
sewardj274c6012002-10-22 04:54:55 +0000315/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000316/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000317/*------------------------------------------------------------*/
318
sewardjc26cc252002-10-23 21:58:55 +0000319typedef struct hg_mutex hg_mutex_t; /* forward decl */
sewardj16748af2002-10-22 04:55:54 +0000320typedef enum MutexState {
321 MxUnknown, /* don't know */
322 MxUnlocked, /* unlocked */
323 MxLocked, /* locked */
324 MxDead /* destroyed */
325} MutexState;
326
sewardjc26cc252002-10-23 21:58:55 +0000327struct hg_mutex {
sewardj274c6012002-10-22 04:54:55 +0000328 void *mutexp;
sewardj274c6012002-10-22 04:54:55 +0000329 struct hg_mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000330
331 MutexState state; /* mutex state */
332 ThreadId tid; /* owner */
333 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000334
sewardjc26cc252002-10-23 21:58:55 +0000335 UInt lockdep; /* set of locks we depend on */
336 UInt mark; /* mark for graph traversal */
337};
sewardj16748af2002-10-22 04:55:54 +0000338
sewardjc26cc252002-10-23 21:58:55 +0000339static Int mutex_cmp(const hg_mutex_t *a, const hg_mutex_t *b);
njn25e49d8e72002-09-23 09:36:25 +0000340
sewardj65f13702002-10-23 22:45:08 +0000341#define M_LOCKSET_TABLE 5000
njn25e49d8e72002-09-23 09:36:25 +0000342
sewardj274c6012002-10-22 04:54:55 +0000343struct _LockSet {
344 hg_mutex_t *mutex;
345 struct _LockSet* next;
346};
sewardjc26cc252002-10-23 21:58:55 +0000347typedef struct _LockSet LockSet;
njn25e49d8e72002-09-23 09:36:25 +0000348
349/* Each one is an index into the lockset table. */
350static UInt thread_locks[VG_N_THREADS];
351
352/* # lockset table entries used. */
353static Int n_lockset_table = 1;
354
355/* lockset_table[0] is always NULL, representing the empty lockset */
356static LockSet* lockset_table[M_LOCKSET_TABLE];
357
358
359static __inline__
360Bool is_valid_lockset_id ( Int id )
361{
362 return id >= 0 && id < n_lockset_table;
363}
364
365
366static
367Int allocate_LockSet(LockSet* set)
368{
sewardjc26cc252002-10-23 21:58:55 +0000369 static const Bool debug = False;
370
njn25e49d8e72002-09-23 09:36:25 +0000371 if (n_lockset_table >= M_LOCKSET_TABLE)
njne427a662002-10-02 11:08:25 +0000372 VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE");
njn25e49d8e72002-09-23 09:36:25 +0000373 lockset_table[n_lockset_table] = set;
374 n_lockset_table++;
sewardjc26cc252002-10-23 21:58:55 +0000375 if (debug || DEBUG_MEM_LOCKSET_CHANGES || DEBUG_NEW_LOCKSETS)
376 VG_(printf)("allocate LOCKSET VECTOR %p to %d\n", set, n_lockset_table-1);
377
njn25e49d8e72002-09-23 09:36:25 +0000378 return n_lockset_table-1;
379}
380
381
382static
383void pp_LockSet(LockSet* p)
384{
385 VG_(printf)("{ ");
386 while (p != NULL) {
sewardjc26cc252002-10-23 21:58:55 +0000387 VG_(printf)("%p%(y ", p->mutex->mutexp, p->mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000388 p = p->next;
389 }
390 VG_(printf)("}\n");
391}
392
393
394static __attribute__((unused))
395void pp_all_LockSets ( void )
396{
397 Int i;
398 for (i = 0; i < n_lockset_table; i++) {
399 VG_(printf)("[%d] = ", i);
400 pp_LockSet(lockset_table[i]);
401 }
402}
403
404
405static
406void free_LockSet(LockSet *p)
407{
408 LockSet* q;
409 while (NULL != p) {
410 q = p;
411 p = p->next;
412 VG_(free)(q);
413# if DEBUG_MEM_LOCKSET_CHANGES
414 VG_(printf)("free'd %x\n", q);
415# endif
416 }
417}
418
419
420static
421Bool structural_eq_LockSet(LockSet* a, LockSet* b)
422{
423 while (a && b) {
sewardj274c6012002-10-22 04:54:55 +0000424 if (mutex_cmp(a->mutex, b->mutex) != 0) {
njn25e49d8e72002-09-23 09:36:25 +0000425 return False;
426 }
427 a = a->next;
428 b = b->next;
429 }
430 return (NULL == a && NULL == b);
431}
432
433
njn25e49d8e72002-09-23 09:36:25 +0000434/* Check invariants:
435 - all locksets are unique
436 - each set is a linked list in strictly increasing order of mutex addr
437*/
438static
439void sanity_check_locksets ( Char* caller )
440{
441 Int i, j, badness;
442 LockSet* v;
sewardj274c6012002-10-22 04:54:55 +0000443 hg_mutex_t mx_prev;
njn25e49d8e72002-09-23 09:36:25 +0000444
445 badness = 0;
446 i = j = -1;
447
448 //VG_(printf)("sanity %s\n", caller);
449 /* Check really simple things first */
450
451 if (n_lockset_table < 1 || n_lockset_table > M_LOCKSET_TABLE)
452 { badness = 1; goto baaad; }
453
454 if (lockset_table[0] != NULL)
455 { badness = 2; goto baaad; }
456
457 for (i = 1; i < n_lockset_table; i++)
458 if (lockset_table[i] == NULL)
459 { badness = 3; goto baaad; }
460
461 for (i = n_lockset_table; i < M_LOCKSET_TABLE; i++)
462 if (lockset_table[i] != NULL)
463 { badness = 4; goto baaad; }
464
465 /* Check the sanity of each individual set. */
466 for (i = 1; i < n_lockset_table; i++) {
467 v = lockset_table[i];
sewardj274c6012002-10-22 04:54:55 +0000468 mx_prev.mutexp = NULL;
njn25e49d8e72002-09-23 09:36:25 +0000469 while (True) {
470 if (v == NULL) break;
sewardj274c6012002-10-22 04:54:55 +0000471 if (mutex_cmp(&mx_prev, v->mutex) >= 0)
njn25e49d8e72002-09-23 09:36:25 +0000472 { badness = 5; goto baaad; }
sewardj274c6012002-10-22 04:54:55 +0000473 mx_prev = *v->mutex;
njn25e49d8e72002-09-23 09:36:25 +0000474 v = v->next;
475 }
476 }
477
478 /* Ensure the sets are unique, both structurally and in respect of
479 the address of their first nodes. */
480 for (i = 1; i < n_lockset_table; i++) {
481 for (j = i+1; j < n_lockset_table; j++) {
482 if (lockset_table[i] == lockset_table[j])
483 { badness = 6; goto baaad; }
484 if (structural_eq_LockSet(lockset_table[i], lockset_table[j]))
485 { badness = 7; goto baaad; }
486 }
487 }
488 return;
489
490 baaad:
491 VG_(printf)("sanity_check_locksets: "
492 "i = %d, j = %d, badness = %d, caller = %s\n",
493 i, j, badness, caller);
494 pp_all_LockSets();
njne427a662002-10-02 11:08:25 +0000495 VG_(skin_panic)("sanity_check_locksets");
njn25e49d8e72002-09-23 09:36:25 +0000496}
njn25e49d8e72002-09-23 09:36:25 +0000497
sewardjc26cc252002-10-23 21:58:55 +0000498static void print_LockSet(const char *s, LockSet *ls)
499{
500 if (!ls) {
501 VG_(printf)("%s: empty\n", s);
502 } else {
503 VG_(printf)("%s: ", s);
504 for(; ls; ls = ls->next)
505 VG_(printf)("%p%(y, ", ls->mutex->mutexp, ls->mutex->mutexp);
506 VG_(printf)("\n");
507 }
508}
njn25e49d8e72002-09-23 09:36:25 +0000509
510/* Builds ia with mx removed. mx should actually be in ia!
511 (a checked assertion). Resulting set should not already
512 exist in the table (unchecked).
513*/
514static
sewardj274c6012002-10-22 04:54:55 +0000515UInt remove ( UInt ia, hg_mutex_t *mx )
njn25e49d8e72002-09-23 09:36:25 +0000516{
sewardjc26cc252002-10-23 21:58:55 +0000517 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +0000518 Int found, res;
519 LockSet* new_vector = NULL;
520 LockSet* new_node;
521 LockSet** prev_ptr = &new_vector;
522 LockSet* a = lockset_table[ia];
njne427a662002-10-02 11:08:25 +0000523 sk_assert(is_valid_lockset_id(ia));
njn25e49d8e72002-09-23 09:36:25 +0000524
sewardjc26cc252002-10-23 21:58:55 +0000525 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
526 VG_(printf)("Removing %p%(y from mutex %p%(y:\n",
527 a->mutex->mutexp, a->mutex->mutexp,
528 mx->mutexp, mx->mutexp);
529 print_LockSet("remove-IN", a);
530 }
njn25e49d8e72002-09-23 09:36:25 +0000531
sewardjc26cc252002-10-23 21:58:55 +0000532 if (debug || LOCKSET_SANITY)
533 sanity_check_locksets("remove-IN");
njn25e49d8e72002-09-23 09:36:25 +0000534
sewardjc26cc252002-10-23 21:58:55 +0000535 /* Build the new list */
njn25e49d8e72002-09-23 09:36:25 +0000536 found = 0;
537 while (a) {
sewardj274c6012002-10-22 04:54:55 +0000538 if (mutex_cmp(a->mutex, mx) != 0) {
njn25e49d8e72002-09-23 09:36:25 +0000539 new_node = VG_(malloc)(sizeof(LockSet));
njn25e49d8e72002-09-23 09:36:25 +0000540 new_node->mutex = a->mutex;
541 *prev_ptr = new_node;
542 prev_ptr = &((*prev_ptr)->next);
njn25e49d8e72002-09-23 09:36:25 +0000543 } else {
544 found++;
545 }
546 *prev_ptr = NULL;
sewardjc26cc252002-10-23 21:58:55 +0000547 a = a->next;
njn25e49d8e72002-09-23 09:36:25 +0000548 }
njne427a662002-10-02 11:08:25 +0000549 sk_assert(found == 1 /* sigh .. if the client is buggy */ || found == 0 );
njn25e49d8e72002-09-23 09:36:25 +0000550
551 /* Preserve uniqueness invariants in face of client buggyness */
552 if (found == 0) {
553 free_LockSet(new_vector);
554 return ia;
555 }
556
557 /* Add to the table. */
558 res = allocate_LockSet(new_vector);
559
sewardjc26cc252002-10-23 21:58:55 +0000560 if (debug || LOCKSET_SANITY) {
561 print_LockSet("remove-OUT", new_vector);
562 sanity_check_locksets("remove-OUT");
563 }
njn25e49d8e72002-09-23 09:36:25 +0000564 return res;
565}
566
567
568/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
569 * doesn't do the insertion. Returns True if they match.
570 */
571static Bool
572weird_LockSet_equals(LockSet* a, LockSet* b,
sewardj274c6012002-10-22 04:54:55 +0000573 hg_mutex_t *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000574{
sewardjc26cc252002-10-23 21:58:55 +0000575 static const Bool debug = False;
576
njn25e49d8e72002-09-23 09:36:25 +0000577 /* Idea is to try and match each element of b against either an
578 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000579
580 if (debug) {
581 print_LockSet("weird_LockSet_equals a", a);
582 print_LockSet(" b", b);
583 VG_(printf)( " missing: %p%(y\n",
584 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000585 }
sewardjc26cc252002-10-23 21:58:55 +0000586
587 /* There are three phases to this compare:
588 1 the section from the start of a up to missing_mutex
589 2 missing mutex itself
590 3 the section after missing_mutex to the end of a
591 */
592
593 /* 1: up to missing_mutex */
594 while(a && mutex_cmp(a->mutex, missing_mutex) < 0) {
595 if (debug) {
596 print_LockSet(" 1:a", a);
597 print_LockSet(" 1:b", b);
598 }
599 if (b == NULL || mutex_cmp(a->mutex, b->mutex) != 0)
600 return False;
601
602 a = a->next;
603 b = b->next;
604 }
605
606 /* 2: missing_mutex itself */
607 if (debug) {
608 VG_(printf)( " 2:missing: %p%(y\n",
609 missing_mutex->mutexp, missing_mutex->mutexp);
610 print_LockSet(" 2: b", b);
611 }
612
613 sk_assert(a == NULL || mutex_cmp(a->mutex, missing_mutex) >= 0);
614
615 if (b == NULL || mutex_cmp(missing_mutex, b->mutex) != 0)
616 return False;
617
618 b = b->next;
619
620 /* 3: after missing_mutex to end */
621
622 while(a && b) {
623 if (debug) {
624 print_LockSet(" 3:a", a);
625 print_LockSet(" 3:b", b);
626 }
627 if (mutex_cmp(a->mutex, b->mutex) != 0)
628 return False;
629 a = a->next;
630 b = b->next;
631 }
632
633 if (debug)
634 VG_(printf)(" a=%p b=%p --> %d\n", a, b, (a == NULL) && (b == NULL));
635
636 return (a == NULL) && (b == NULL);
njn25e49d8e72002-09-23 09:36:25 +0000637}
638
639
640/* Builds the intersection, and then unbuilds it if it's already in the table.
641 */
642static UInt intersect(UInt ia, UInt ib)
643{
644 Int i;
645 LockSet* a = lockset_table[ia];
646 LockSet* b = lockset_table[ib];
647 LockSet* new_vector = NULL;
648 LockSet* new_node;
649 LockSet** prev_ptr = &new_vector;
650
651# if DEBUG_MEM_LOCKSET_CHANGES
652 VG_(printf)("Intersecting %d %d:\n", ia, ib);
653# endif
654
655# if LOCKSET_SANITY
656 sanity_check_locksets("intersect-IN");
657# endif
658
659 /* Fast case -- when the two are the same */
660 if (ia == ib) {
661# if DEBUG_MEM_LOCKSET_CHANGES
662 VG_(printf)("Fast case -- both the same: %u\n", ia);
sewardjc26cc252002-10-23 21:58:55 +0000663 print_LockSet("intersect-same", a);
njn25e49d8e72002-09-23 09:36:25 +0000664# endif
665 return ia;
666 }
667
668# if DEBUG_MEM_LOCKSET_CHANGES
sewardjc26cc252002-10-23 21:58:55 +0000669 print_LockSet("intersect a", a);
670 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +0000671# endif
672
673 /* Build the intersection of the two lists */
674 while (a && b) {
sewardj274c6012002-10-22 04:54:55 +0000675 if (mutex_cmp(a->mutex, b->mutex) == 0) {
njn25e49d8e72002-09-23 09:36:25 +0000676 new_node = VG_(malloc)(sizeof(LockSet));
677# if DEBUG_MEM_LOCKSET_CHANGES
678 VG_(printf)("malloc'd %x\n", new_node);
679# endif
680 new_node->mutex = a->mutex;
681 *prev_ptr = new_node;
682 prev_ptr = &((*prev_ptr)->next);
683 a = a->next;
684 b = b->next;
sewardj274c6012002-10-22 04:54:55 +0000685 } else if (mutex_cmp(a->mutex, b->mutex) < 0) {
njn25e49d8e72002-09-23 09:36:25 +0000686 a = a->next;
sewardj274c6012002-10-22 04:54:55 +0000687 } else if (mutex_cmp(a->mutex, b->mutex) > 0) {
njn25e49d8e72002-09-23 09:36:25 +0000688 b = b->next;
njne427a662002-10-02 11:08:25 +0000689 } else VG_(skin_panic)("STOP PRESS: Laws of arithmetic broken");
njn25e49d8e72002-09-23 09:36:25 +0000690
691 *prev_ptr = NULL;
692 }
693
694 /* Now search for it in the table, adding it if not seen before */
695 for (i = 0; i < n_lockset_table; i++) {
696 if (structural_eq_LockSet(lockset_table[i], new_vector))
697 break;
698 }
699
700 if (i == n_lockset_table) {
701 i = allocate_LockSet(new_vector);
702 } else {
703 free_LockSet(new_vector);
704 }
705
706 /* Check we won't overflow the OTHER_BITS bits of sword->other */
njne427a662002-10-02 11:08:25 +0000707 sk_assert(i < (1 << OTHER_BITS));
njn25e49d8e72002-09-23 09:36:25 +0000708
709# if LOCKSET_SANITY
710 sanity_check_locksets("intersect-OUT");
711# endif
712
713 return i;
714}
715
sewardjc26cc252002-10-23 21:58:55 +0000716/* Builds the union, and then unbuilds it if it's already in the table.
717 */
718static UInt ls_union(UInt ia, UInt ib)
719{
720 Int i;
721 LockSet* a = lockset_table[ia];
722 LockSet* b = lockset_table[ib];
723 LockSet* new_vector = NULL;
724 LockSet* new_node;
725 LockSet** prev_ptr = &new_vector;
726
727 if(DEBUG_MEM_LOCKSET_CHANGES) {
728 VG_(printf)("Unionizing %d %d:\n", ia, ib);
729 sanity_check_locksets("union-IN");
730 }
731
732 /* Fast case -- when the two are the same */
733 if (ia == ib) {
734 if(DEBUG_MEM_LOCKSET_CHANGES) {
735 VG_(printf)("Fast case -- both the same: %u\n", ia);
736 print_LockSet("union same", a);
737 }
738 return ia;
739 }
740
741 if (DEBUG_MEM_LOCKSET_CHANGES) {
742 print_LockSet("union a", a);
743 print_LockSet("union b", b);
744 }
745
746 /* Build the union of the two lists */
747 while (a || b) {
748 if (a && b && mutex_cmp(a->mutex, b->mutex) == 0) {
749 new_node = VG_(malloc)(sizeof(LockSet));
750 new_node->mutex = a->mutex;
751 *prev_ptr = new_node;
752 prev_ptr = &new_node->next;
753 a = a->next;
754 b = b->next;
755 } else if (!b || (a && b && mutex_cmp(a->mutex, b->mutex) < 0)) {
756 new_node = VG_(malloc)(sizeof(LockSet));
757 new_node->mutex = a->mutex;
758 *prev_ptr = new_node;
759 prev_ptr = &new_node->next;
760 a = a->next;
761 } else if (!a || (a && b && mutex_cmp(a->mutex, b->mutex) > 0)) {
762 new_node = VG_(malloc)(sizeof(LockSet));
763 new_node->mutex = b->mutex;
764 *prev_ptr = new_node;
765 prev_ptr = &new_node->next;
766 b = b->next;
767 }
768
769 *prev_ptr = NULL;
770 }
771
772 /* Now search for it in the table, adding it if not seen before */
773 for (i = 0; i < n_lockset_table; i++) {
774 if (structural_eq_LockSet(lockset_table[i], new_vector))
775 break;
776 }
777
778 if (i == n_lockset_table) {
779 i = allocate_LockSet(new_vector);
780 } else {
781 free_LockSet(new_vector);
782 }
783
784 /* Check we won't overflow the OTHER_BITS bits of sword->other */
785 sk_assert(i < (1 << OTHER_BITS));
786
787 if (LOCKSET_SANITY)
788 sanity_check_locksets("union-OUT");
789
790 if (DEBUG_MEM_LOCKSET_CHANGES)
791 VG_(printf)("union -> %d\n", i);
792 return i;
793}
794
795/*------------------------------------------------------------*/
796/*--- Implementation of mutex structure. ---*/
797/*------------------------------------------------------------*/
798
799#define M_MUTEX_HASHSZ 1023
800
801static UInt graph_mark; /* current mark we're using for graph traversal */
802
803static void record_mutex_error(ThreadId tid, hg_mutex_t *mutex,
804 Char *str, ExeContext *ec);
805
806static hg_mutex_t *mutex_hash[M_MUTEX_HASHSZ];
807
808static Int mutex_cmp(const hg_mutex_t *a, const hg_mutex_t *b)
809{
810 return (UInt)a->mutexp - (UInt)b->mutexp;
811}
812
813/* find or create an hg_mutex for a program's mutex use */
814static hg_mutex_t *get_mutex(void *mutexp)
815{
816 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
817 hg_mutex_t *mp;
818
819 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
820 if (mp->mutexp == mutexp)
821 return mp;
822
823 mp = VG_(malloc)(sizeof(*mp));
824 mp->mutexp = mutexp;
825 mp->next = mutex_hash[bucket];
826 mutex_hash[bucket] = mp;
827
828 mp->state = MxUnknown;
829 mp->tid = VG_INVALID_THREADID;
830 mp->location = NULL;
831
832 mp->lockdep = 0;
833 mp->mark = graph_mark - 1;
834
835 return mp;
836}
837
838static const char *pp_MutexState(MutexState st)
839{
840 switch(st) {
841 case MxLocked: return "Locked";
842 case MxUnlocked: return "Unlocked";
843 case MxDead: return "Dead";
844 case MxUnknown: return "Unknown";
845 }
846 return "???";
847}
848
849#define MARK_LOOP (graph_mark+0)
850#define MARK_DONE (graph_mark+1)
851
852static Bool check_cycle_inner(hg_mutex_t *mutex, LockSet *ls)
853{
854 static const Bool debug = False;
855
856 if (mutex->mark == MARK_LOOP)
857 return True; /* found cycle */
858 if (mutex->mark == MARK_DONE)
859 return False; /* been here before, its OK */
860
861 mutex->mark = MARK_LOOP;
862
863 if (debug)
864 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
865 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
866 for(; ls != NULL; ls = ls->next) {
867 if (debug)
868 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
869 mutex->mutexp, ls,
870 ls->mutex ? ls->mutex->mutexp : 0,
871 ls->mutex ? ls->mutex->mutexp : 0);
872 if (check_cycle_inner(ls->mutex, lockset_table[ls->mutex->lockdep]))
873 return True;
874 }
875 mutex->mark = MARK_DONE;
876
877 return False;
878}
879
880static Bool check_cycle(hg_mutex_t *start, UInt lockset)
881{
882 graph_mark += 2; /* clear all marks */
883
884 return check_cycle_inner(start, lockset_table[lockset]);
885}
886
887/* catch bad mutex state changes (though the common ones are handled
888 by core) */
889static void set_mutex_state(hg_mutex_t *mutex, MutexState state,
890 ThreadId tid, ThreadState *tst)
891{
892 static const Bool debug = False;
893
894 if (debug)
895 VG_(printf)("\ntid %d changing mutex (%p %y)->%p state %s -> %s\n",
896 tid, mutex, mutex->mutexp, mutex->mutexp,
897 pp_MutexState(mutex->state), pp_MutexState(state));
898
899 if (mutex->state == MxDead) {
900 /* can't do anything legal to a destroyed mutex */
901 record_mutex_error(tid, mutex,
902 "operate on dead mutex", mutex->location);
903 return;
904 }
905
906 switch(state) {
907 case MxLocked:
908 if (mutex->state == MxLocked && mutex->tid != tid)
909 record_mutex_error(tid, mutex, "take already held lock", mutex->location);
910
911 sk_assert(!check_cycle(mutex, mutex->lockdep));
912
913 if (debug)
914 print_LockSet("thread holding", lockset_table[thread_locks[tid]]);
915
916 if (check_cycle(mutex, thread_locks[tid]))
917 record_mutex_error(tid, mutex, "take lock before dependent locks", NULL);
918 else {
919 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
920
921 if (debug) {
922 VG_(printf)("giving mutex %p%(y lockdep = %d ",
923 mutex->mutexp, mutex->mutexp, mutex->lockdep);
924 print_LockSet("lockdep", lockset_table[mutex->lockdep]);
925 }
926 }
927 mutex->tid = tid;
928 break;
929
930 case MxUnlocked:
931 if (debug)
932 print_LockSet("thread holding", lockset_table[thread_locks[tid]]);
933
934 if (mutex->state != MxLocked) {
935 record_mutex_error(tid, mutex,
936 "unlock non-locked mutex", mutex->location);
937 }
938 if (mutex->tid != tid) {
939 record_mutex_error(tid, mutex,
940 "unlock someone else's mutex", mutex->location);
941 }
942 mutex->tid = VG_INVALID_THREADID;
943 break;
944
945 default:
946 break;
947 }
948
949 mutex->location = VG_(get_ExeContext)(tst);
950 mutex->state = state;
951}
njn25e49d8e72002-09-23 09:36:25 +0000952
953/*------------------------------------------------------------*/
954/*--- Setting and checking permissions. ---*/
955/*------------------------------------------------------------*/
956
957static
958void set_address_range_state ( Addr a, UInt len /* in bytes */,
959 VgeInitStatus status )
960{
sewardj1806d7f2002-10-22 05:05:49 +0000961 Addr end;
njn25e49d8e72002-09-23 09:36:25 +0000962
963# if DEBUG_MAKE_ACCESSES
964 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
965# endif
966 //PROF_EVENT(30); PPP
967
968 if (len == 0)
969 return;
970
971 if (len > 100 * 1000 * 1000)
972 VG_(message)(Vg_UserMsg,
973 "Warning: set address range state: large range %d",
974 len);
975
976 VGP_PUSHCC(VgpSARP);
977
978 /* Memory block may not be aligned or a whole word multiple. In neat cases,
979 * we have to init len/4 words (len is in bytes). In nasty cases, it's
980 * len/4+1 words. This works out which it is by aligning the block and
981 * seeing if the end byte is in the same word as it is for the unaligned
982 * block; if not, it's the awkward case. */
sewardj1806d7f2002-10-22 05:05:49 +0000983 end = (a + len + 3) & ~3; /* round up */
984 a &= ~3; /* round down */
njn25e49d8e72002-09-23 09:36:25 +0000985
986 /* Do it ... */
987 switch (status) {
988 case Vge_VirginInit:
989 for ( ; a < end; a += 4) {
990 //PROF_EVENT(31); PPP
991 init_virgin_sword(a);
992 }
993 break;
994
995 case Vge_NonVirginInit:
996 for ( ; a < end; a += 4) {
997 //PROF_EVENT(31); PPP
998 init_nonvirgin_sword(a);
999 }
1000 break;
1001
1002 case Vge_SegmentInit:
1003 for ( ; a < end; a += 4) {
1004 //PROF_EVENT(31); PPP
1005 init_magically_inited_sword(a);
1006 }
1007 break;
1008
1009 default:
1010 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001011 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001012 }
1013
1014 /* Check that zero page and highest page have not been written to
1015 -- this could happen with buggy syscall wrappers. Today
1016 (2001-04-26) had precisely such a problem with
1017 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001018 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001019 VGP_POPCC(VgpSARP);
1020}
1021
1022
1023static void make_segment_readable ( Addr a, UInt len )
1024{
1025 //PROF_EVENT(??); PPP
1026 set_address_range_state ( a, len, Vge_SegmentInit );
1027}
1028
1029static void make_writable ( Addr a, UInt len )
1030{
1031 //PROF_EVENT(36); PPP
1032 set_address_range_state( a, len, Vge_VirginInit );
1033}
1034
1035static void make_readable ( Addr a, UInt len )
1036{
1037 //PROF_EVENT(37); PPP
1038 set_address_range_state( a, len, Vge_NonVirginInit );
1039}
1040
1041
njn25e49d8e72002-09-23 09:36:25 +00001042/* Block-copy states (needed for implementing realloc()). */
1043static void copy_address_range_state(Addr src, Addr dst, UInt len)
1044{
1045 UInt i;
1046
1047 //PROF_EVENT(40); PPP
1048 for (i = 0; i < len; i += 4) {
1049 shadow_word sword = *(get_sword_addr ( src+i ));
1050 //PROF_EVENT(41); PPP
1051 set_sword ( dst+i, sword );
1052 }
1053}
1054
1055// SSS: put these somewhere better
sewardj0f811692002-10-22 04:59:26 +00001056static void eraser_mem_read (Addr a, UInt data_size, ThreadState *tst);
1057static void eraser_mem_write(Addr a, UInt data_size, ThreadState *tst);
sewardja5b3aec2002-10-22 05:09:36 +00001058
1059#define REGPARM(x) __attribute__((regparm (x)))
1060
1061static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1062static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1063static void eraser_mem_help_read_4(Addr a) REGPARM(1);
1064static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
1065
1066static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1067static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1068static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
1069static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001070
1071static
1072void eraser_pre_mem_read(CorePart part, ThreadState* tst,
1073 Char* s, UInt base, UInt size )
1074{
sewardj0f811692002-10-22 04:59:26 +00001075 eraser_mem_read(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +00001076}
1077
1078static
1079void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst,
1080 Char* s, UInt base )
1081{
sewardj0f811692002-10-22 04:59:26 +00001082 eraser_mem_read(base, VG_(strlen)((Char*)base), tst);
njn25e49d8e72002-09-23 09:36:25 +00001083}
1084
1085static
1086void eraser_pre_mem_write(CorePart part, ThreadState* tst,
1087 Char* s, UInt base, UInt size )
1088{
sewardj0f811692002-10-22 04:59:26 +00001089 eraser_mem_write(base, size, tst);
njn25e49d8e72002-09-23 09:36:25 +00001090}
1091
1092
1093
1094static
1095void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1096{
njn1f3a9092002-10-04 09:22:30 +00001097 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001098 make_segment_readable(a, len);
1099}
1100
1101
1102static
1103void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1104{
1105 if (is_inited) {
1106 make_readable(a, len);
1107 } else {
1108 make_writable(a, len);
1109 }
1110}
1111
1112static
1113void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001114 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001115{
1116 if (rr) make_readable(a, len);
1117 else if (ww) make_writable(a, len);
1118 /* else do nothing */
1119}
1120
1121
1122/*--------------------------------------------------------------*/
1123/*--- Initialise the memory audit system on program startup. ---*/
1124/*--------------------------------------------------------------*/
1125
1126static
1127void init_shadow_memory(void)
1128{
1129 Int i;
1130
1131 for (i = 0; i < ESEC_MAP_WORDS; i++)
1132 distinguished_secondary_map.swords[i] = virgin_sword;
1133
1134 /* These entries gradually get overwritten as the used address
1135 space expands. */
1136 for (i = 0; i < 65536; i++)
1137 primary_map[i] = &distinguished_secondary_map;
1138}
1139
1140
1141/*--------------------------------------------------------------*/
1142/*--- Machinery to support sanity checking ---*/
1143/*--------------------------------------------------------------*/
1144
1145/* Check that nobody has spuriously claimed that the first or last 16
1146 pages (64 KB) of address space have become accessible. Failure of
1147 the following do not per se indicate an internal consistency
1148 problem, but they are so likely to that we really want to know
1149 about it if so. */
1150
1151Bool SK_(cheap_sanity_check) ( void )
1152{
1153 if (VGE_IS_DISTINGUISHED_SM(primary_map[0]) &&
1154 VGE_IS_DISTINGUISHED_SM(primary_map[65535]))
1155 return True;
1156 else
1157 return False;
1158}
1159
1160
1161Bool SK_(expensive_sanity_check)(void)
1162{
1163 Int i;
1164
1165 /* Make sure nobody changed the distinguished secondary. */
1166 for (i = 0; i < ESEC_MAP_WORDS; i++)
1167 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
1168 distinguished_secondary_map.swords[i].state != virgin_sword.state)
1169 return False;
1170
1171 return True;
1172}
1173
1174
1175/*--------------------------------------------------------------*/
1176/*--- Instrumentation ---*/
1177/*--------------------------------------------------------------*/
1178
njn25e49d8e72002-09-23 09:36:25 +00001179/* Create and return an instrumented version of cb_in. Free cb_in
1180 before returning. */
1181UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
1182{
1183 UCodeBlock* cb;
1184 Int i;
1185 UInstr* u_in;
1186 Int t_size = INVALID_TEMPREG;
1187
njn4ba5a792002-09-30 10:23:54 +00001188 cb = VG_(alloc_UCodeBlock)();
njn25e49d8e72002-09-23 09:36:25 +00001189 cb->nextTemp = cb_in->nextTemp;
1190
1191 for (i = 0; i < cb_in->used; i++) {
1192 u_in = &cb_in->instrs[i];
1193
njn25e49d8e72002-09-23 09:36:25 +00001194 switch (u_in->opcode) {
1195
1196 case NOP: case CALLM_S: case CALLM_E:
1197 break;
1198
sewardja5b3aec2002-10-22 05:09:36 +00001199 case LOAD: {
1200 void (*help)(Addr);
1201 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
1202
1203 switch(u_in->size) {
1204 case 1: help = eraser_mem_help_read_1; break;
1205 case 2: help = eraser_mem_help_read_2; break;
1206 case 4: help = eraser_mem_help_read_4; break;
1207 default:
1208 VG_(skin_panic)("bad size");
1209 }
1210
1211 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
1212 uCCall(cb, (Addr)help, 1, 1, False);
njn25e49d8e72002-09-23 09:36:25 +00001213
sewardja5b3aec2002-10-22 05:09:36 +00001214 VG_(copy_UInstr)(cb, u_in);
1215 t_size = INVALID_TEMPREG;
1216 break;
1217 }
1218
1219 case FPU_R: {
njne427a662002-10-02 11:08:25 +00001220 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00001221 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00001222
1223 t_size = newTemp(cb);
1224 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1225 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00001226
sewardja5b3aec2002-10-22 05:09:36 +00001227 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
1228 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
njn25e49d8e72002-09-23 09:36:25 +00001229
sewardja5b3aec2002-10-22 05:09:36 +00001230 VG_(copy_UInstr)(cb, u_in);
1231 t_size = INVALID_TEMPREG;
1232 break;
1233 }
1234
1235 case STORE: {
1236 void (*help)(Addr, UInt);
1237 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
1238
1239 switch(u_in->size) {
1240 case 1: help = eraser_mem_help_write_1; break;
1241 case 2: help = eraser_mem_help_write_2; break;
1242 case 4: help = eraser_mem_help_write_4; break;
1243 default:
1244 VG_(skin_panic)("bad size");
1245 }
1246
1247 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
1248 uCCall(cb, (Addr)help, 2, 2, False);
1249
1250 VG_(copy_UInstr)(cb, u_in);
1251 t_size = INVALID_TEMPREG;
1252 break;
1253 }
1254
1255 case FPU_W: {
njne427a662002-10-02 11:08:25 +00001256 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
njn25e49d8e72002-09-23 09:36:25 +00001257 8 == u_in->size || 10 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00001258
1259 t_size = newTemp(cb);
1260 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
1261 uLiteral(cb, (UInt)u_in->size);
1262 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
1263 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
1264
1265 VG_(copy_UInstr)(cb, u_in);
1266 t_size = INVALID_TEMPREG;
1267 break;
1268 }
njn25e49d8e72002-09-23 09:36:25 +00001269
1270 default:
njn4ba5a792002-09-30 10:23:54 +00001271 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00001272 break;
1273 }
1274 }
1275
njn4ba5a792002-09-30 10:23:54 +00001276 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00001277 return cb;
1278}
1279
1280
sewardj16748af2002-10-22 04:55:54 +00001281/*------------------------------------------------------------*/
1282/*--- Shadow chunks info ---*/
1283/*------------------------------------------------------------*/
1284
1285#define SHADOW_EXTRA 2
1286
1287static __inline__
1288void set_sc_where( ShadowChunk* sc, ExeContext* ec )
1289{
1290 sc->skin_extra[0] = (UInt)ec;
1291}
1292
1293static __inline__
1294ExeContext *get_sc_where( ShadowChunk* sc )
1295{
1296 return (ExeContext*)sc->skin_extra[0];
1297}
1298
1299static __inline__
1300void set_sc_tid(ShadowChunk *sc, ThreadId tid)
1301{
1302 sc->skin_extra[1] = (UInt)tid;
1303}
1304
1305static __inline__
1306ThreadId get_sc_tid(ShadowChunk *sc)
1307{
1308 return (ThreadId)sc->skin_extra[1];
1309}
1310
1311void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst )
1312{
1313 set_sc_where( sc, VG_(get_ExeContext) ( tst ) );
1314 set_sc_tid(sc, VG_(get_tid_from_ThreadState(tst)));
1315}
1316
njn25e49d8e72002-09-23 09:36:25 +00001317/*--------------------------------------------------------------------*/
1318/*--- Error and suppression handling ---*/
1319/*--------------------------------------------------------------------*/
1320
1321typedef
1322 enum {
1323 /* Possible data race */
1324 EraserSupp
1325 }
1326 EraserSuppKind;
1327
1328/* What kind of error it is. */
1329typedef
1330 enum {
sewardj16748af2002-10-22 04:55:54 +00001331 EraserErr, /* data-race */
1332 MutexErr, /* mutex operations */
njn25e49d8e72002-09-23 09:36:25 +00001333 }
1334 EraserErrorKind;
1335
sewardj16748af2002-10-22 04:55:54 +00001336/* The classification of a faulting address. */
1337typedef
1338 enum { Undescribed, /* as-yet unclassified */
1339 Stack,
1340 Unknown, /* classification yielded nothing useful */
1341 Mallocd,
1342 Segment
1343 }
1344 AddrKind;
1345/* Records info about a faulting address. */
1346typedef
1347 struct {
1348 /* ALL */
1349 AddrKind akind;
1350 /* Freed, Mallocd */
1351 Int blksize;
1352 /* Freed, Mallocd */
1353 Int rwoffset;
1354 /* Freed, Mallocd */
1355 ExeContext* lastchange;
1356 ThreadId lasttid;
1357 /* Stack */
1358 ThreadId stack_tid;
1359 /* Segment */
1360 const Char* filename;
1361 const Char* section;
1362 /* True if is just-below %esp -- could be a gcc bug. */
1363 Bool maybe_gcc;
1364 }
1365 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00001366
sewardj16748af2002-10-22 04:55:54 +00001367/* What kind of memory access is involved in the error? */
1368typedef
1369 enum { ReadAxs, WriteAxs, ExecAxs }
1370 AxsKind;
1371
1372/* Extra context for memory errors */
1373typedef
1374 struct {
1375 AxsKind axskind;
1376 Int size;
1377 AddrInfo addrinfo;
1378 Bool isWrite;
1379 shadow_word prevstate;
1380 /* MutexErr */
1381 hg_mutex_t *mutex;
1382 ExeContext *lasttouched;
1383 ThreadId lasttid;
1384 }
1385 HelgrindError;
1386
1387static __inline__
1388void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00001389{
sewardj16748af2002-10-22 04:55:54 +00001390 ai->akind = Unknown;
1391 ai->blksize = 0;
1392 ai->rwoffset = 0;
1393 ai->lastchange = NULL;
1394 ai->lasttid = VG_INVALID_THREADID;
1395 ai->filename = NULL;
1396 ai->section = "???";
1397 ai->stack_tid = VG_INVALID_THREADID;
1398 ai->maybe_gcc = False;
njn25e49d8e72002-09-23 09:36:25 +00001399}
1400
sewardj16748af2002-10-22 04:55:54 +00001401static __inline__
1402void clear_HelgrindError ( HelgrindError* err_extra )
1403{
1404 err_extra->axskind = ReadAxs;
1405 err_extra->size = 0;
1406 err_extra->mutex = NULL;
1407 err_extra->lasttouched= NULL;
1408 err_extra->lasttid = VG_INVALID_THREADID;
1409 err_extra->prevstate.state = Vge_Virgin;
1410 err_extra->prevstate.other = 0;
1411 clear_AddrInfo ( &err_extra->addrinfo );
1412 err_extra->isWrite = False;
1413}
1414
1415
1416
1417/* Describe an address as best you can, for error messages,
1418 putting the result in ai. */
1419
1420static void describe_addr ( Addr a, AddrInfo* ai )
1421{
1422 ShadowChunk* sc;
1423
1424 /* Nested functions, yeah. Need the lexical scoping of 'a'. */
1425
1426 /* Closure for searching thread stacks */
1427 Bool addr_is_in_bounds(Addr stack_min, Addr stack_max)
1428 {
1429 return (stack_min <= a && a <= stack_max);
1430 }
1431 /* Closure for searching malloc'd and free'd lists */
1432 Bool addr_is_in_block(ShadowChunk *sh_ch)
1433 {
1434 return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size );
1435 }
1436
1437 /* Search for it in segments */
1438 {
1439 const SegInfo *seg;
1440
1441 for(seg = VG_(next_seginfo)(NULL);
1442 seg != NULL;
1443 seg = VG_(next_seginfo)(seg)) {
1444 Addr base = VG_(seg_start)(seg);
1445 UInt size = VG_(seg_size)(seg);
1446 const UChar *filename = VG_(seg_filename)(seg);
1447
1448 if (a >= base && a < base+size) {
1449 ai->akind = Segment;
1450 ai->blksize = size;
1451 ai->rwoffset = a - base;
1452 ai->filename = filename;
1453
1454 switch(VG_(seg_sect_kind)(a)) {
1455 case Vg_SectText: ai->section = "text"; break;
1456 case Vg_SectData: ai->section = "data"; break;
1457 case Vg_SectBSS: ai->section = "BSS"; break;
1458 case Vg_SectGOT: ai->section = "GOT"; break;
1459 case Vg_SectPLT: ai->section = "PLT"; break;
1460 case Vg_SectUnknown:
1461 default:
1462 ai->section = "???"; break;
1463 }
1464
1465 return;
1466 }
1467 }
1468 }
1469
1470 /* Search for a currently malloc'd block which might bracket it. */
1471 sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block);
1472 if (NULL != sc) {
1473 ai->akind = Mallocd;
1474 ai->blksize = sc->size;
1475 ai->rwoffset = (Int)(a) - (Int)(sc->data);
1476 ai->lastchange = get_sc_where(sc);
1477 ai->lasttid = get_sc_tid(sc);
1478 return;
1479 }
1480 /* Clueless ... */
1481 ai->akind = Unknown;
1482 return;
1483}
1484
1485
1486/* Creates a copy of the err_extra, updates the copy with address info if
1487 necessary, sticks the copy into the SkinError. */
1488void SK_(dup_extra_and_update)(SkinError* err)
1489{
1490 HelgrindError* err_extra;
1491
1492 err_extra = VG_(malloc)(sizeof(HelgrindError));
1493 *err_extra = *((HelgrindError*)err->extra);
1494
1495 if (err_extra->addrinfo.akind == Undescribed)
1496 describe_addr ( err->addr, &(err_extra->addrinfo) );
1497
1498 err->extra = err_extra;
1499}
1500
sewardj0f811692002-10-22 04:59:26 +00001501static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write,
1502 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00001503{
1504 HelgrindError err_extra;
sewardj1806d7f2002-10-22 05:05:49 +00001505 static const shadow_word err_sw = { TID_INDICATING_ALL, Vge_Excl };
sewardj16748af2002-10-22 04:55:54 +00001506
1507 clear_HelgrindError(&err_extra);
1508 err_extra.isWrite = is_write;
1509 err_extra.addrinfo.akind = Undescribed;
1510 err_extra.prevstate = prevstate;
1511
sewardj0f811692002-10-22 04:59:26 +00001512 VG_(maybe_record_error)( tst, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00001513 (is_write ? "writing" : "reading"),
1514 &err_extra);
1515
sewardj1806d7f2002-10-22 05:05:49 +00001516 set_sword(a, err_sw);
sewardj16748af2002-10-22 04:55:54 +00001517}
1518
1519static void record_mutex_error(ThreadId tid, hg_mutex_t *mutex,
1520 Char *str, ExeContext *ec)
1521{
1522 HelgrindError err_extra;
1523
1524 clear_HelgrindError(&err_extra);
1525 err_extra.addrinfo.akind = Undescribed;
1526 err_extra.mutex = mutex;
1527 err_extra.lasttouched = ec;
1528 err_extra.lasttid = tid;
1529
1530 VG_(maybe_record_error)(VG_(get_ThreadState)(tid), MutexErr,
1531 (Addr)mutex->mutexp, str, &err_extra);
1532}
njn25e49d8e72002-09-23 09:36:25 +00001533
1534Bool SK_(eq_SkinError) ( VgRes not_used,
1535 SkinError* e1, SkinError* e2 )
1536{
sewardj16748af2002-10-22 04:55:54 +00001537 sk_assert(e1->ekind == e2->ekind);
1538
1539 switch(e1->ekind) {
1540 case EraserErr:
1541 return e1->addr == e2->addr;
1542
1543 case MutexErr:
1544 return e1->addr == e2->addr;
1545 }
1546
njn25e49d8e72002-09-23 09:36:25 +00001547 if (e1->string != e2->string) return False;
1548 if (0 != VG_(strcmp)(e1->string, e2->string)) return False;
1549 return True;
1550}
1551
sewardj16748af2002-10-22 04:55:54 +00001552static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00001553{
sewardj16748af2002-10-22 04:55:54 +00001554 switch (ai->akind) {
1555 case Stack:
1556 VG_(message)(Vg_UserMsg,
1557 " Address %p is on thread %d's stack",
1558 a, ai->stack_tid);
1559 break;
1560 case Unknown:
1561 if (ai->maybe_gcc) {
1562 VG_(message)(Vg_UserMsg,
1563 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
1564 a);
1565 VG_(message)(Vg_UserMsg,
1566 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
1567 } else {
1568 VG_(message)(Vg_UserMsg,
1569 " Address %p is not stack'd, malloc'd or free'd", a);
1570 }
1571 break;
1572 case Segment:
1573 VG_(message)(Vg_UserMsg,
1574 " Address %p is in %s section of %s",
1575 a, ai->section, ai->filename);
1576 break;
1577 case Mallocd: {
1578 UInt delta;
1579 UChar* relative;
1580 if (ai->rwoffset < 0) {
1581 delta = (UInt)(- ai->rwoffset);
1582 relative = "before";
1583 } else if (ai->rwoffset >= ai->blksize) {
1584 delta = ai->rwoffset - ai->blksize;
1585 relative = "after";
1586 } else {
1587 delta = ai->rwoffset;
1588 relative = "inside";
1589 }
1590 VG_(message)(Vg_UserMsg,
1591 " Address %p is %d bytes %s a block of size %d alloc'd by thread %d at",
1592 a, delta, relative,
1593 ai->blksize,
1594 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00001595
sewardj16748af2002-10-22 04:55:54 +00001596 VG_(pp_ExeContext)(ai->lastchange);
1597 break;
1598 }
1599 default:
1600 VG_(skin_panic)("pp_AddrInfo");
1601 }
njn25e49d8e72002-09-23 09:36:25 +00001602}
1603
1604
sewardj16748af2002-10-22 04:55:54 +00001605void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) )
njn25e49d8e72002-09-23 09:36:25 +00001606{
sewardj16748af2002-10-22 04:55:54 +00001607 HelgrindError *extra = (HelgrindError *)err->extra;
1608 Char buf[100];
1609 Char *msg = buf;
1610
1611 *msg = '\0';
1612
1613 switch(err->ekind) {
1614 case EraserErr:
1615 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
1616 err->string, err->addr, err->addr );
1617 pp_ExeContext();
1618
1619 switch(extra->prevstate.state) {
1620 case Vge_Virgin:
1621 /* shouldn't be possible to go directly from virgin -> error */
1622 VG_(sprintf)(buf, "virgin!?");
1623 break;
1624
1625 case Vge_Excl:
1626 sk_assert(extra->prevstate.other != TID_INDICATING_ALL);
1627 VG_(sprintf)(buf, "exclusively owned by thread %d", extra->prevstate.other);
1628 break;
1629
1630 case Vge_Shar:
1631 case Vge_SharMod: {
1632 LockSet *ls;
1633 UInt count;
1634 Char *cp;
1635
1636 if (lockset_table[extra->prevstate.other] == NULL) {
1637 VG_(sprintf)(buf, "shared %s, no locks",
1638 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
1639 break;
1640 }
1641
1642 for(count = 0, ls = lockset_table[extra->prevstate.other]; ls != NULL; ls = ls->next)
1643 count++;
1644 msg = VG_(malloc)(25 + (120 * count));
1645
1646 cp = msg;
1647 cp += VG_(sprintf)(cp, "shared %s, locked by: ",
1648 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
1649 for(ls = lockset_table[extra->prevstate.other]; ls != NULL; ls = ls->next)
1650 cp += VG_(sprintf)(cp, "%p%(y, ", ls->mutex->mutexp, ls->mutex->mutexp);
1651 cp[-2] = '\0';
1652 break;
1653 }
1654 }
1655
1656 if (*msg) {
1657 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
1658 if (msg != buf)
1659 VG_(free)(msg);
1660 }
1661 pp_AddrInfo(err->addr, &extra->addrinfo);
1662 break;
1663
1664 case MutexErr:
1665 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s at",
1666 err->addr, err->addr, err->string );
1667 pp_ExeContext();
1668 if (extra->lasttouched) {
1669 VG_(message)(Vg_UserMsg, " last touched by thread %d at", extra->lasttid);
1670 VG_(pp_ExeContext)(extra->lasttouched);
1671 }
1672 pp_AddrInfo(err->addr, &extra->addrinfo);
1673 break;
1674 }
njn25e49d8e72002-09-23 09:36:25 +00001675}
1676
1677
1678Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind )
1679{
1680 if (0 == VG_(strcmp)(name, "Eraser")) {
1681 *skind = EraserSupp;
1682 return True;
1683 } else {
1684 return False;
1685 }
1686}
1687
1688
1689Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf,
1690 Int nBuf, SkinSupp* s )
1691{
1692 /* do nothing -- no extra suppression info present. Return True to
1693 indicate nothing bad happened. */
1694 return True;
1695}
1696
1697
1698Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su)
1699{
njne427a662002-10-02 11:08:25 +00001700 sk_assert( su->skind == EraserSupp);
1701 sk_assert(err->ekind == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00001702 return True;
1703}
1704
1705
1706// SSS: copying mutex's pointer... is that ok? Could they get deallocated?
1707// (does that make sense, deallocating a mutex?)
1708static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
1709{
1710 Int i = 1;
1711 LockSet* new_node;
1712 LockSet* p;
1713 LockSet** q;
sewardj274c6012002-10-22 04:54:55 +00001714 hg_mutex_t *mutex = get_mutex(void_mutex);
njn25e49d8e72002-09-23 09:36:25 +00001715
sewardj16748af2002-10-22 04:55:54 +00001716 set_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid));
1717
njn25e49d8e72002-09-23 09:36:25 +00001718# if DEBUG_LOCKS
sewardj274c6012002-10-22 04:54:55 +00001719 VG_(printf)("lock (%u, %x)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00001720# endif
1721
njne427a662002-10-02 11:08:25 +00001722 sk_assert(tid < VG_N_THREADS &&
njn25e49d8e72002-09-23 09:36:25 +00001723 thread_locks[tid] < M_LOCKSET_TABLE);
1724 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
1725# if LOCKSET_SANITY > 1
1726 sanity_check_locksets("eraser_post_mutex_lock-IN");
1727# endif
1728
1729 while (True) {
1730 if (i == M_LOCKSET_TABLE)
njne427a662002-10-02 11:08:25 +00001731 VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE");
njn25e49d8e72002-09-23 09:36:25 +00001732
1733 /* the lockset didn't already exist */
1734 if (i == n_lockset_table) {
1735
1736 p = lockset_table[thread_locks[tid]];
1737 q = &lockset_table[i];
1738
1739 /* copy the thread's lockset, creating a new list */
1740 while (p != NULL) {
1741 new_node = VG_(malloc)(sizeof(LockSet));
1742 new_node->mutex = p->mutex;
1743 *q = new_node;
1744 q = &((*q)->next);
1745 p = p->next;
1746 }
1747 (*q) = NULL;
1748
1749 /* find spot for the new mutex in the new list */
1750 p = lockset_table[i];
1751 q = &lockset_table[i];
sewardj274c6012002-10-22 04:54:55 +00001752 while (NULL != p && mutex_cmp(mutex, p->mutex) > 0) {
njn25e49d8e72002-09-23 09:36:25 +00001753 p = p->next;
1754 q = &((*q)->next);
1755 }
1756
1757 /* insert new mutex in new list */
1758 new_node = VG_(malloc)(sizeof(LockSet));
1759 new_node->mutex = mutex;
1760 new_node->next = p;
1761 (*q) = new_node;
1762
1763 p = lockset_table[i];
njne427a662002-10-02 11:08:25 +00001764 sk_assert(i == n_lockset_table);
njn25e49d8e72002-09-23 09:36:25 +00001765 n_lockset_table++;
1766
1767# if DEBUG_NEW_LOCKSETS
1768 VG_(printf)("new lockset vector (%d): ", i);
sewardjc26cc252002-10-23 21:58:55 +00001769 print_LockSet("newvec", p);
njn25e49d8e72002-09-23 09:36:25 +00001770# endif
1771
1772 goto done;
1773
1774 } else {
1775 /* If this succeeds, the required vector (with the new mutex added)
1776 * already exists in the table at position i. Otherwise, keep
1777 * looking. */
1778 if (weird_LockSet_equals(lockset_table[thread_locks[tid]],
1779 lockset_table[i], mutex)) {
1780 goto done;
1781 }
1782 }
1783 /* if we get to here, table lockset didn't match the new thread
1784 * lockset, so keep looking */
1785 i++;
1786 }
1787
1788 done:
1789 /* Update the thread's lock vector */
1790 thread_locks[tid] = i;
1791# if DEBUG_LOCKS
1792 VG_(printf)("tid %u now has lockset %d\n", tid, i);
1793# endif
1794
1795# if LOCKSET_SANITY > 1
1796 sanity_check_locksets("eraser_post_mutex_lock-OUT");
1797# endif
1798
1799}
1800
1801
1802static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
1803{
sewardjc26cc252002-10-23 21:58:55 +00001804 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00001805 Int i = 0;
sewardj274c6012002-10-22 04:54:55 +00001806 hg_mutex_t *mutex = get_mutex(void_mutex);
njn25e49d8e72002-09-23 09:36:25 +00001807
sewardj16748af2002-10-22 04:55:54 +00001808 set_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid));
1809
sewardjc26cc252002-10-23 21:58:55 +00001810 if (debug || DEBUG_LOCKS)
1811 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00001812
sewardjc26cc252002-10-23 21:58:55 +00001813 if (debug || LOCKSET_SANITY > 1)
1814 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00001815
1816 // find the lockset that is the current one minus tid, change thread to use
1817 // that index.
1818
1819 while (True) {
1820
1821 if (i == n_lockset_table) {
1822 /* We can't find a suitable pre-made set, so we'll have to
1823 make one. */
1824 i = remove ( thread_locks[tid], mutex );
1825 break;
1826 }
1827
1828 /* Args are in opposite order to call above, for reverse effect */
1829 if (weird_LockSet_equals( lockset_table[i],
sewardjc26cc252002-10-23 21:58:55 +00001830 lockset_table[thread_locks[tid]],
1831 mutex) ) {
njn25e49d8e72002-09-23 09:36:25 +00001832 /* found existing diminished set -- the best outcome. */
sewardjc26cc252002-10-23 21:58:55 +00001833 if (debug)
1834 VG_(printf)("unlock: match found at %d\n", i);
njn25e49d8e72002-09-23 09:36:25 +00001835 break;
1836 }
1837
1838 i++;
1839 }
1840
1841 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00001842 if (debug || DEBUG_LOCKS)
1843 VG_(printf)("tid %u reverts from %d to lockset %d\n",
1844 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00001845
1846 thread_locks[tid] = i;
1847
sewardjc26cc252002-10-23 21:58:55 +00001848 if (debug || LOCKSET_SANITY > 1)
1849 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00001850}
1851
1852
1853/* ---------------------------------------------------------------------
1854 Checking memory reads and writes
1855 ------------------------------------------------------------------ */
1856
1857/* Behaviour on reads and writes:
1858 *
1859 * VIR EXCL SHAR SH_MOD
1860 * ----------------------------------------------------------------
1861 * rd/wr, 1st thread | - EXCL - -
1862 * rd, new thread | - SHAR - -
1863 * wr, new thread | - SH_MOD - -
1864 * rd | error! - SHAR SH_MOD
1865 * wr | EXCL - SH_MOD SH_MOD
1866 * ----------------------------------------------------------------
1867 */
1868
1869#if 0
1870static
1871void dump_around_a(Addr a)
1872{
1873 UInt i;
1874 shadow_word* sword;
1875 VG_(printf)("NEARBY:\n");
1876 for (i = a - 12; i <= a + 12; i += 4) {
1877 sword = get_sword_addr(i);
1878 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
1879 }
1880}
1881#endif
1882
1883/* Find which word the first and last bytes are in (by shifting out bottom 2
1884 * bits) then find the difference. */
1885static __inline__
1886Int compute_num_words_accessed(Addr a, UInt size)
1887{
1888 Int x, y, n_words;
1889 x = a >> 2;
1890 y = (a + size - 1) >> 2;
1891 n_words = y - x + 1;
1892 return n_words;
1893}
1894
1895
1896#if DEBUG_ACCESSES
1897 #define DEBUG_STATE(args...) \
1898 VG_(printf)("(%u) ", size), \
1899 VG_(printf)(args)
1900#else
1901 #define DEBUG_STATE(args...)
1902#endif
1903
1904
sewardj0f811692002-10-22 04:59:26 +00001905static void eraser_mem_read(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00001906{
sewardj0f811692002-10-22 04:59:26 +00001907 ThreadId tid;
njn25e49d8e72002-09-23 09:36:25 +00001908 shadow_word* sword;
njn25e49d8e72002-09-23 09:36:25 +00001909 Addr end = a + 4*compute_num_words_accessed(a, size);
sewardj16748af2002-10-22 04:55:54 +00001910 shadow_word prevstate;
njn25e49d8e72002-09-23 09:36:25 +00001911
sewardj0f811692002-10-22 04:59:26 +00001912 tid = (tst == NULL) ? VG_(get_current_tid)() : VG_(get_tid_from_ThreadState)(tst);
1913
njn25e49d8e72002-09-23 09:36:25 +00001914 for ( ; a < end; a += 4) {
1915
1916 sword = get_sword_addr(a);
1917 if (sword == SEC_MAP_ACCESS) {
1918 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
1919 continue;
1920 }
1921
sewardj16748af2002-10-22 04:55:54 +00001922 prevstate = *sword;
1923
njn25e49d8e72002-09-23 09:36:25 +00001924 switch (sword->state) {
1925
1926 /* This looks like reading of unitialised memory, may be legit. Eg.
1927 * calloc() zeroes its values, so untouched memory may actually be
1928 * initialised. Leave that stuff to Valgrind. */
1929 case Vge_Virgin:
1930 if (TID_INDICATING_NONVIRGIN == sword->other) {
1931 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
1932# if DEBUG_VIRGIN_READS
1933 dump_around_a(a);
1934# endif
1935 } else {
1936 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
1937 }
1938 sword->state = Vge_Excl;
1939 sword->other = tid; /* remember exclusive owner */
1940 break;
1941
1942 case Vge_Excl:
1943 if (tid == sword->other) {
1944 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
sewardj16748af2002-10-22 04:55:54 +00001945 } else if (TID_INDICATING_ALL == sword->other) {
1946 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
1947 } else {
njn25e49d8e72002-09-23 09:36:25 +00001948 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sword->other, a, tid);
1949 sword->state = Vge_Shar;
1950 sword->other = thread_locks[tid];
1951# if DEBUG_MEM_LOCKSET_CHANGES
sewardjc26cc252002-10-23 21:58:55 +00001952 print_LockSet("excl read locks", lockset_table[sword->other]);
njn25e49d8e72002-09-23 09:36:25 +00001953# endif
1954 }
1955 break;
1956
1957 case Vge_Shar:
1958 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
1959 sword->other = intersect(sword->other, thread_locks[tid]);
1960 break;
1961
1962 case Vge_SharMod:
1963 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
1964 sword->other = intersect(sword->other, thread_locks[tid]);
1965
1966 if (lockset_table[sword->other] == NULL) {
sewardj0f811692002-10-22 04:59:26 +00001967 record_eraser_error(tst, a, False /* !is_write */, prevstate);
njn25e49d8e72002-09-23 09:36:25 +00001968 n_eraser_warnings++;
1969 }
1970 break;
1971
1972 default:
njne427a662002-10-02 11:08:25 +00001973 VG_(skin_panic)("Unknown eraser state");
njn25e49d8e72002-09-23 09:36:25 +00001974 }
1975 }
1976}
1977
1978
sewardj0f811692002-10-22 04:59:26 +00001979static void eraser_mem_write(Addr a, UInt size, ThreadState *tst)
njn25e49d8e72002-09-23 09:36:25 +00001980{
sewardj0f811692002-10-22 04:59:26 +00001981 ThreadId tid;
njn25e49d8e72002-09-23 09:36:25 +00001982 shadow_word* sword;
njn25e49d8e72002-09-23 09:36:25 +00001983 Addr end = a + 4*compute_num_words_accessed(a, size);
sewardj16748af2002-10-22 04:55:54 +00001984 shadow_word prevstate;
njn25e49d8e72002-09-23 09:36:25 +00001985
sewardj0f811692002-10-22 04:59:26 +00001986 tid = (tst == NULL) ? VG_(get_current_tid)() : VG_(get_tid_from_ThreadState)(tst);
1987
njn25e49d8e72002-09-23 09:36:25 +00001988 for ( ; a < end; a += 4) {
1989
1990 sword = get_sword_addr(a);
1991 if (sword == SEC_MAP_ACCESS) {
1992 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
1993 continue;
1994 }
1995
sewardj16748af2002-10-22 04:55:54 +00001996 prevstate = *sword;
1997
njn25e49d8e72002-09-23 09:36:25 +00001998 switch (sword->state) {
1999 case Vge_Virgin:
2000 if (TID_INDICATING_NONVIRGIN == sword->other)
2001 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
2002 else
2003 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
2004 sword->state = Vge_Excl;
2005 sword->other = tid; /* remember exclusive owner */
2006 break;
2007
2008 case Vge_Excl:
2009 if (tid == sword->other) {
2010 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
2011 break;
sewardj16748af2002-10-22 04:55:54 +00002012 } else if (TID_INDICATING_ALL == sword->other) {
2013 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
2014 break;
njn25e49d8e72002-09-23 09:36:25 +00002015 } else {
2016 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sword->other, a, tid);
2017 sword->state = Vge_SharMod;
2018 sword->other = thread_locks[tid];
2019# if DEBUG_MEM_LOCKSET_CHANGES
sewardjc26cc252002-10-23 21:58:55 +00002020 print_LockSet("excl write locks", lockset_table[sword->other]);
njn25e49d8e72002-09-23 09:36:25 +00002021# endif
2022 goto SHARED_MODIFIED;
2023 }
2024
2025 case Vge_Shar:
2026 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
2027 sword->state = Vge_SharMod;
2028 sword->other = intersect(sword->other, thread_locks[tid]);
2029 goto SHARED_MODIFIED;
2030
2031 case Vge_SharMod:
2032 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
2033 sword->other = intersect(sword->other, thread_locks[tid]);
2034 SHARED_MODIFIED:
2035 if (lockset_table[sword->other] == NULL) {
sewardj0f811692002-10-22 04:59:26 +00002036 record_eraser_error(tst, a, True /* is_write */, prevstate);
njn25e49d8e72002-09-23 09:36:25 +00002037 n_eraser_warnings++;
2038 }
2039 break;
2040
2041 default:
njne427a662002-10-02 11:08:25 +00002042 VG_(skin_panic)("Unknown eraser state");
njn25e49d8e72002-09-23 09:36:25 +00002043 }
2044 }
2045}
2046
2047#undef DEBUG_STATE
2048
sewardja5b3aec2002-10-22 05:09:36 +00002049static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00002050{
sewardja5b3aec2002-10-22 05:09:36 +00002051 eraser_mem_read(a, 1, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00002052}
2053
sewardja5b3aec2002-10-22 05:09:36 +00002054static void eraser_mem_help_read_2(Addr a)
2055{
2056 eraser_mem_read(a, 2, NULL);
2057}
2058
2059static void eraser_mem_help_read_4(Addr a)
2060{
2061 eraser_mem_read(a, 4, NULL);
2062}
2063
2064static void eraser_mem_help_read_N(Addr a, UInt size)
2065{
sewardjc26cc252002-10-23 21:58:55 +00002066 eraser_mem_read(a, size, NULL);
sewardja5b3aec2002-10-22 05:09:36 +00002067}
2068
2069static void eraser_mem_help_write_1(Addr a, UInt val)
2070{
2071 if (*(UChar *)a != val)
2072 eraser_mem_write(a, 1, NULL);
2073}
2074static void eraser_mem_help_write_2(Addr a, UInt val)
2075{
2076 if (*(UShort *)a != val)
2077 eraser_mem_write(a, 2, NULL);
2078}
2079static void eraser_mem_help_write_4(Addr a, UInt val)
2080{
2081 if (*(UInt *)a != val)
2082 eraser_mem_write(a, 4, NULL);
2083}
2084static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00002085{
sewardj0f811692002-10-22 04:59:26 +00002086 eraser_mem_write(a, size, NULL);
sewardj7ab2aca2002-10-20 19:40:32 +00002087}
njn25e49d8e72002-09-23 09:36:25 +00002088
2089/*--------------------------------------------------------------------*/
2090/*--- Setup ---*/
2091/*--------------------------------------------------------------------*/
2092
njnd04b7c62002-10-03 14:05:52 +00002093void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track)
njn25e49d8e72002-09-23 09:36:25 +00002094{
2095 Int i;
2096
sewardj4aa62ba2002-10-05 15:49:27 +00002097 details->name = "Helgrind";
njnd04b7c62002-10-03 14:05:52 +00002098 details->version = NULL;
2099 details->description = "a data race detector";
2100 details->copyright_author =
2101 "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote.";
2102 details->bug_reports_to = "njn25@cam.ac.uk";
njn25e49d8e72002-09-23 09:36:25 +00002103
sewardj5481f8f2002-10-20 19:43:47 +00002104 needs->core_errors = True;
2105 needs->skin_errors = True;
2106 needs->data_syms = True;
sewardj16748af2002-10-22 04:55:54 +00002107 needs->sizeof_shadow_block = SHADOW_EXTRA;
njn25e49d8e72002-09-23 09:36:25 +00002108
njn25e49d8e72002-09-23 09:36:25 +00002109 track->new_mem_startup = & eraser_new_mem_startup;
2110 track->new_mem_heap = & eraser_new_mem_heap;
2111 track->new_mem_stack = & make_writable;
2112 track->new_mem_stack_aligned = & make_writable_aligned;
2113 track->new_mem_stack_signal = & make_writable;
2114 track->new_mem_brk = & make_writable;
sewardj40f8ebe2002-10-23 21:46:13 +00002115 track->new_mem_mmap = & eraser_new_mem_startup;
njn25e49d8e72002-09-23 09:36:25 +00002116
2117 track->copy_mem_heap = & copy_address_range_state;
2118 track->change_mem_mprotect = & eraser_set_perms;
2119
2120 track->ban_mem_heap = NULL;
2121 track->ban_mem_stack = NULL;
2122
2123 track->die_mem_heap = NULL;
2124 track->die_mem_stack = NULL;
2125 track->die_mem_stack_aligned = NULL;
2126 track->die_mem_stack_signal = NULL;
2127 track->die_mem_brk = NULL;
2128 track->die_mem_munmap = NULL;
2129
2130 track->pre_mem_read = & eraser_pre_mem_read;
2131 track->pre_mem_read_asciiz = & eraser_pre_mem_read_asciiz;
2132 track->pre_mem_write = & eraser_pre_mem_write;
2133 track->post_mem_write = NULL;
2134
2135 track->post_mutex_lock = & eraser_post_mutex_lock;
2136 track->post_mutex_unlock = & eraser_post_mutex_unlock;
2137
sewardja5b3aec2002-10-22 05:09:36 +00002138 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
2139 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
2140 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
2141 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
2142
2143 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
2144 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
2145 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
2146 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00002147
njn25e49d8e72002-09-23 09:36:25 +00002148 /* Init lock table */
2149 for (i = 0; i < VG_N_THREADS; i++)
2150 thread_locks[i] = 0 /* the empty lock set */;
2151
2152 lockset_table[0] = NULL;
2153 for (i = 1; i < M_LOCKSET_TABLE; i++)
2154 lockset_table[i] = NULL;
2155
2156 init_shadow_memory();
2157}
2158
2159
2160void SK_(post_clo_init)(void)
2161{
2162}
2163
2164
2165void SK_(fini)(void)
2166{
2167# if DEBUG_LOCK_TABLE
2168 pp_all_LockSets();
2169# endif
2170# if LOCKSET_SANITY
2171 sanity_check_locksets("SK_(fini)");
2172# endif
2173 VG_(message)(Vg_UserMsg, "%u possible data races found", n_eraser_warnings);
2174}
2175
2176/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00002177/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00002178/*--------------------------------------------------------------------*/