blob: 30017a433337b18fdccb7ba46787e79d42cfb859 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of Helgrind, a Valgrind tool for detecting
njnc9539842002-10-02 13:26:35 +00009 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
nethercotebb1c9912004-01-04 16:43:23 +000011 Copyright (C) 2002-2004 Nicholas Nethercote
njn25e49d8e72002-09-23 09:36:25 +000012 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
32#include "vg_skin.h"
sewardj7f3ad222002-11-13 22:11:53 +000033#include "helgrind.h"
njn25e49d8e72002-09-23 09:36:25 +000034
njn25e49d8e72002-09-23 09:36:25 +000035static UInt n_eraser_warnings = 0;
sewardjff2c9232002-11-13 21:44:39 +000036static UInt n_lockorder_warnings = 0;
njn25e49d8e72002-09-23 09:36:25 +000037
38/*------------------------------------------------------------*/
39/*--- Debug guff ---*/
40/*------------------------------------------------------------*/
41
sewardje11d6c82002-12-15 02:00:41 +000042#define DEBUG_LOCK_TABLE 0 /* Print lock table at end */
njn25e49d8e72002-09-23 09:36:25 +000043
44#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
45#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
46#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
47#define DEBUG_ACCESSES 0 /* Print reads, writes */
48#define DEBUG_MEM_LOCKSET_CHANGES 0
49 /* Print when an address's lockset
50 changes; only useful with
51 DEBUG_ACCESSES */
sewardj8fac99a2002-11-13 22:31:26 +000052#define SLOW_ASSERTS 0 /* do expensive asserts */
njn25e49d8e72002-09-23 09:36:25 +000053#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
54
sewardj8fac99a2002-11-13 22:31:26 +000055#if SLOW_ASSERTS
56#define SK_ASSERT(x) sk_assert(x)
57#else
58#define SK_ASSERT(x)
59#endif
60
njn25e49d8e72002-09-23 09:36:25 +000061/* heavyweight LockSet sanity checking:
62 0 == never
63 1 == after important ops
64 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
65 */
66#define LOCKSET_SANITY 0
67
sewardj8fac99a2002-11-13 22:31:26 +000068/* Rotate an unsigned quantity left */
69#define ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x)*8)-(n))))
70
71/* round a up to the next multiple of N. N must be a power of 2 */
72#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
73
74/* Round a down to the next multiple of N. N must be a power of 2 */
75#define ROUNDDN(a, N) ((a) & ~(N-1))
njn25e49d8e72002-09-23 09:36:25 +000076
77/*------------------------------------------------------------*/
sewardjf6374322002-11-13 22:35:55 +000078/*--- Command line options ---*/
79/*------------------------------------------------------------*/
80
81static enum {
82 EC_None,
83 EC_Some,
84 EC_All
85} clo_execontext = EC_None;
86
sewardje1a39f42002-12-15 01:56:17 +000087static Bool clo_priv_stacks = False;
sewardjf6374322002-11-13 22:35:55 +000088
89/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000090/*--- Crude profiling machinery. ---*/
91/*------------------------------------------------------------*/
92
93// PPP: work out if I want this
94
95#define PROF_EVENT(x)
96#if 0
97#ifdef VG_PROFILE_MEMORY
98
99#define N_PROF_EVENTS 150
100
101static UInt event_ctr[N_PROF_EVENTS];
102
103void VGE_(done_prof_mem) ( void )
104{
105 Int i;
106 for (i = 0; i < N_PROF_EVENTS; i++) {
107 if ((i % 10) == 0)
108 VG_(printf)("\n");
109 if (event_ctr[i] > 0)
110 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
111 }
112 VG_(printf)("\n");
113}
114
115#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000116 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000117 event_ctr[ev]++; \
118 } while (False);
119
120#else
121
122//static void init_prof_mem ( void ) { }
123// void VG_(done_prof_mem) ( void ) { }
124
125#define PROF_EVENT(ev) /* */
126
127#endif /* VG_PROFILE_MEMORY */
128
129/* Event index. If just the name of the fn is given, this means the
130 number of calls to the fn. Otherwise it is the specified event.
131
132 [PPP: snip event numbers...]
133*/
134#endif /* 0 */
135
136
137/*------------------------------------------------------------*/
138/*--- Data defns. ---*/
139/*------------------------------------------------------------*/
140
njn3e884182003-04-15 13:03:23 +0000141typedef
142 struct _HG_Chunk {
143 struct _HG_Chunk* next;
144 Addr data; /* ptr to actual block */
sewardj05bcdcb2003-05-18 10:05:38 +0000145 Int size; /* size requested */
njn3e884182003-04-15 13:03:23 +0000146 ExeContext* where; /* where it was allocated */
147 ThreadId tid; /* allocating thread */
148 }
149 HG_Chunk;
150
njn25e49d8e72002-09-23 09:36:25 +0000151typedef enum
sewardj7f3ad222002-11-13 22:11:53 +0000152 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit, Vge_Error }
njn25e49d8e72002-09-23 09:36:25 +0000153 VgeInitStatus;
154
sewardjc808ef52002-11-13 22:43:26 +0000155
njn25e49d8e72002-09-23 09:36:25 +0000156/* Should add up to 32 to fit in one word */
157#define OTHER_BITS 30
158#define STATE_BITS 2
159
160#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
161
162/* This is for indicating that a memory block has been initialised but not
163 * really directly by a particular thread... (eg. text/data initialised
164 * automatically at startup).
165 * Must be different to virgin_word.other */
166#define TID_INDICATING_NONVIRGIN 1
167
sewardjc4a810d2002-11-13 22:25:51 +0000168/* Magic packed TLS used for error suppression; if word state is Excl
169 and tid is this, then it means all access are OK without changing
170 state and without raising any more errors */
171#define TLSP_INDICATING_ALL ((1 << OTHER_BITS) - 1)
sewardj16748af2002-10-22 04:55:54 +0000172
njn25e49d8e72002-09-23 09:36:25 +0000173/* Number of entries must fit in STATE_BITS bits */
174typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
175
sewardjc808ef52002-11-13 22:43:26 +0000176static inline const Char *pp_state(pth_state st)
177{
178 const Char *ret;
179
180 switch(st) {
181 case Vge_Virgin: ret = "virgin"; break;
182 case Vge_Excl: ret = "exclusive"; break;
183 case Vge_Shar: ret = "shared RO"; break;
184 case Vge_SharMod: ret = "shared RW"; break;
185 default: ret = "???";
186 }
187 return ret;
188}
189
njn25e49d8e72002-09-23 09:36:25 +0000190typedef
191 struct {
sewardj8fac99a2002-11-13 22:31:26 +0000192 /* gcc arranges this bitfield with state in the 2LSB and other
193 in the 30MSB, which is what we want */
njn25e49d8e72002-09-23 09:36:25 +0000194 UInt state:STATE_BITS;
sewardj8fac99a2002-11-13 22:31:26 +0000195 UInt other:OTHER_BITS;
njn25e49d8e72002-09-23 09:36:25 +0000196 } shadow_word;
197
sewardj8fac99a2002-11-13 22:31:26 +0000198#define SW(st, other) ((shadow_word) { st, other })
199
njn25e49d8e72002-09-23 09:36:25 +0000200typedef
201 struct {
202 shadow_word swords[ESEC_MAP_WORDS];
203 }
204 ESecMap;
205
206static ESecMap* primary_map[ 65536 ];
207static ESecMap distinguished_secondary_map;
208
sewardj8fac99a2002-11-13 22:31:26 +0000209static const shadow_word virgin_sword = SW(Vge_Virgin, 0);
210static const shadow_word error_sword = SW(Vge_Excl, TLSP_INDICATING_ALL);
njn25e49d8e72002-09-23 09:36:25 +0000211
212#define VGE_IS_DISTINGUISHED_SM(smap) \
213 ((smap) == &distinguished_secondary_map)
214
215#define ENSURE_MAPPABLE(addr,caller) \
216 do { \
217 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
218 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
219 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
220 } \
221 } while(0)
222
223
sewardjc808ef52002-11-13 22:43:26 +0000224/* Parallel map which contains execution contexts when words last
225 changed state (if required) */
sewardj499e3de2002-11-13 22:22:25 +0000226
sewardjc808ef52002-11-13 22:43:26 +0000227typedef struct EC_EIP {
228 union u_ec_eip {
229 Addr eip;
230 ExeContext *ec;
sewardj72baa7a2002-12-09 23:32:58 +0000231 } uu_ec_eip;
sewardjc808ef52002-11-13 22:43:26 +0000232 UInt state:STATE_BITS;
233 UInt tls:OTHER_BITS; /* packed TLS */
sewardj499e3de2002-11-13 22:22:25 +0000234} EC_EIP;
235
sewardjc808ef52002-11-13 22:43:26 +0000236#define NULL_EC_EIP ((EC_EIP){ { 0 }, 0, 0})
237
238#define EIP(eip, prev, tls) ((EC_EIP) { (union u_ec_eip)(eip), (prev).state, packTLS(tls) })
239#define EC(ec, prev, tls) ((EC_EIP) { (union u_ec_eip)(ec), (prev).state, packTLS(tls) })
240
241static inline UInt packEC(ExeContext *ec)
242{
243 SK_ASSERT(((UInt)ec & ((1 << STATE_BITS)-1)) == 0);
244 return ((UInt)ec) >> STATE_BITS;
245}
246
247static inline ExeContext *unpackEC(UInt i)
248{
249 return (ExeContext *)(i << STATE_BITS);
250}
251
252/* Lose 2 LSB of eip */
253static inline UInt packEIP(Addr eip)
254{
255 return ((UInt)eip) >> STATE_BITS;
256}
257
258static inline Addr unpackEIP(UInt i)
259{
260 return (Addr)(i << STATE_BITS);
261}
sewardj499e3de2002-11-13 22:22:25 +0000262
263typedef struct {
264 EC_EIP execontext[ESEC_MAP_WORDS];
265} ExeContextMap;
266
267static ExeContextMap** execontext_map;
268
269static inline void setExeContext(Addr a, EC_EIP ec)
270{
271 UInt idx = (a >> 16) & 0xffff;
272 UInt off = (a >> 2) & 0x3fff;
273
274 if (execontext_map[idx] == NULL) {
275 execontext_map[idx] = VG_(malloc)(sizeof(ExeContextMap));
276 VG_(memset)(execontext_map[idx], 0, sizeof(ExeContextMap));
277 }
278
279 execontext_map[idx]->execontext[off] = ec;
280}
281
282static inline EC_EIP getExeContext(Addr a)
283{
284 UInt idx = (a >> 16) & 0xffff;
285 UInt off = (a >> 2) & 0x3fff;
sewardjc808ef52002-11-13 22:43:26 +0000286 EC_EIP ec = NULL_EC_EIP;
sewardj499e3de2002-11-13 22:22:25 +0000287
288 if (execontext_map[idx] != NULL)
289 ec = execontext_map[idx]->execontext[off];
290
291 return ec;
292}
293
njn25e49d8e72002-09-23 09:36:25 +0000294/*------------------------------------------------------------*/
sewardjc4a810d2002-11-13 22:25:51 +0000295/*--- Thread lifetime segments ---*/
296/*------------------------------------------------------------*/
297
298/*
299 * This mechanism deals with the common case of a parent thread
300 * creating a structure for a child thread, and then passing ownership
301 * of the structure to that thread. It similarly copes with a child
302 * thread passing information back to another thread waiting to join
303 * on it.
304 *
305 * Each thread's lifetime can be partitioned into segments. Those
306 * segments are arranged to form an interference graph which indicates
307 * whether two thread lifetime segments can possibly be concurrent.
308 * If not, then memory with is exclusively accessed by one TLS can be
daywalker7e73e5f2003-07-04 16:18:15 +0000309 * passed on to another TLS without an error occurring, and without
sewardjc4a810d2002-11-13 22:25:51 +0000310 * moving it from Excl state.
311 *
312 * At present this only considers thread creation and join as
313 * synchronisation events for creating new lifetime segments, but
314 * others may be possible (like mutex operations).
315 */
316
317typedef struct _ThreadLifeSeg ThreadLifeSeg;
318
319struct _ThreadLifeSeg {
320 ThreadId tid;
321 ThreadLifeSeg *prior[2]; /* Previous lifetime segments */
322 UInt refcount; /* Number of memory locations pointing here */
323 UInt mark; /* mark used for graph traversal */
324 ThreadLifeSeg *next; /* list of all TLS */
325};
326
327static ThreadLifeSeg *all_tls;
328static UInt tls_since_gc;
329#define TLS_SINCE_GC 10000
330
331/* current mark used for TLS graph traversal */
332static UInt tlsmark;
333
334static ThreadLifeSeg *thread_seg[VG_N_THREADS];
335
336
337static void tls_gc(void)
338{
339 /* XXX later. Walk through all TLSs and look for ones with 0
340 refcount and remove them from the structure and free them.
341 Could probably get rid of ThreadLifeSeg.refcount and simply use
342 mark-sweep from the shadow table. */
343 VG_(printf)("WRITEME: TLS GC\n");
344}
345
346static void newTLS(ThreadId tid)
347{
348 static const Bool debug = False;
349 ThreadLifeSeg *tls;
350
351 /* Initial NULL */
352 if (thread_seg[tid] == NULL) {
353 tls = VG_(malloc)(sizeof(*tls));
354 tls->tid = tid;
355 tls->prior[0] = tls->prior[1] = NULL;
356 tls->refcount = 0;
357 tls->mark = tlsmark-1;
358
359 tls->next = all_tls;
360 all_tls = tls;
361 tls_since_gc++;
362
363 thread_seg[tid] = tls;
364 return;
365 }
366
367 /* Previous TLS was unused, so just recycle */
368 if (thread_seg[tid]->refcount == 0) {
369 if (debug)
370 VG_(printf)("newTLS; recycling TLS %p for tid %u\n",
371 thread_seg[tid], tid);
372 return;
373 }
374
375 /* Use existing TLS for this tid as a prior for new TLS */
376 tls = VG_(malloc)(sizeof(*tls));
377 tls->tid = tid;
378 tls->prior[0] = thread_seg[tid];
379 tls->prior[1] = NULL;
380 tls->refcount = 0;
381 tls->mark = tlsmark-1;
382
383 tls->next = all_tls;
384 all_tls = tls;
385 if (++tls_since_gc > TLS_SINCE_GC) {
386 tls_gc();
387 tls_since_gc = 0;
388 }
389
390 if (debug)
391 VG_(printf)("newTLS: made new TLS %p for tid %u (prior %p(%u))\n",
392 tls, tid, tls->prior[0], tls->prior[0]->tid);
393
394 thread_seg[tid] = tls;
395}
396
397/* clear out a TLS for a thread that's died */
398static void clearTLS(ThreadId tid)
399{
400 newTLS(tid);
401
402 thread_seg[tid]->prior[0] = NULL;
403 thread_seg[tid]->prior[1] = NULL;
404}
405
406static void addPriorTLS(ThreadId tid, ThreadId prior)
407{
408 static const Bool debug = False;
409 ThreadLifeSeg *tls = thread_seg[tid];
410
411 if (debug)
412 VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
413 thread_seg[prior], prior, tls, tid);
414
415 sk_assert(thread_seg[tid] != NULL);
416 sk_assert(thread_seg[prior] != NULL);
417
418 if (tls->prior[0] == NULL)
419 tls->prior[0] = thread_seg[prior];
420 else {
421 sk_assert(tls->prior[1] == NULL);
422 tls->prior[1] = thread_seg[prior];
423 }
424}
425
426/* Return True if prior is definitely not concurrent with tls */
427static Bool tlsIsDisjoint(const ThreadLifeSeg *tls,
428 const ThreadLifeSeg *prior)
429{
430 Bool isPrior(const ThreadLifeSeg *t) {
431 if (t == NULL || t->mark == tlsmark)
432 return False;
433
434 if (t == prior)
435 return True;
436
437 ((ThreadLifeSeg *)t)->mark = tlsmark;
438
439 return isPrior(t->prior[0]) || isPrior(t->prior[1]);
440 }
441 tlsmark++; /* new traversal mark */
442
443 return isPrior(tls);
444}
445
446static inline UInt packTLS(ThreadLifeSeg *tls)
447{
sewardj8fac99a2002-11-13 22:31:26 +0000448 SK_ASSERT(((UInt)tls & ((1 << STATE_BITS)-1)) == 0);
sewardjc4a810d2002-11-13 22:25:51 +0000449 return ((UInt)tls) >> STATE_BITS;
450}
451
452static inline ThreadLifeSeg *unpackTLS(UInt i)
453{
454 return (ThreadLifeSeg *)(i << STATE_BITS);
455}
456
457/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000458/*--- Low-level support for memory tracking. ---*/
459/*------------------------------------------------------------*/
460
461/*
462 All reads and writes are recorded in the memory map, which
463 records the state of all memory in the process. The memory map is
464 organised like that for normal Valgrind, except each that everything
465 is done at word-level instead of byte-level, and each word has only
466 one word of shadow (instead of 36 bits).
467
468 As for normal Valgrind there is a distinguished secondary map. But we're
469 working at word-granularity, so it has 16k word entries instead of 64k byte
470 entries. Lookup is done as follows:
471
472 bits 31..16: primary map lookup
473 bits 15.. 2: secondary map lookup
474 bits 1.. 0: ignored
475*/
476
477
478/*------------------------------------------------------------*/
479/*--- Basic bitmap management, reading and writing. ---*/
480/*------------------------------------------------------------*/
481
482/* Allocate and initialise a secondary map, marking all words as virgin. */
483
484/* Just a value that isn't a real pointer */
485#define SEC_MAP_ACCESS (shadow_word*)0x99
486
487
488static
489ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
490{
491 ESecMap* map;
492 UInt i;
493 //PROF_EVENT(10); PPP
494
495 /* It just happens that a SecMap occupies exactly 18 pages --
496 although this isn't important, so the following assert is
497 spurious. (SSS: not true for ESecMaps -- they're 16 pages) */
njne427a662002-10-02 11:08:25 +0000498 sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE));
fitzhardinge98abfc72003-12-16 02:05:15 +0000499 map = (ESecMap *)VG_(shadow_alloc)(sizeof(ESecMap));
njn25e49d8e72002-09-23 09:36:25 +0000500
501 for (i = 0; i < ESEC_MAP_WORDS; i++)
502 map->swords[i] = virgin_sword;
503
504 return map;
505}
506
507
508/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
509 * word gets set. */
sewardj56867352003-10-12 10:27:06 +0000510static /* __inline__ */
njn25e49d8e72002-09-23 09:36:25 +0000511void set_sword ( Addr a, shadow_word sword )
512{
513 ESecMap* sm;
sewardjc4a810d2002-11-13 22:25:51 +0000514 shadow_word *oldsw;
njn25e49d8e72002-09-23 09:36:25 +0000515
516 //PROF_EVENT(23); PPP
517 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
518
519 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
520 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000521 sk_assert(sm != &distinguished_secondary_map);
sewardjc4a810d2002-11-13 22:25:51 +0000522 oldsw = &sm->swords[(a & 0xFFFC) >> 2];
523 if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
524 ThreadLifeSeg *tls = unpackTLS(oldsw->other);
525 tls->refcount--;
526 }
527
528 if (sword.state == Vge_Excl && sword.other != TLSP_INDICATING_ALL) {
529 ThreadLifeSeg *tls = unpackTLS(sword.other);
530 tls->refcount++;
531 }
532
njn25e49d8e72002-09-23 09:36:25 +0000533 sm->swords[(a & 0xFFFC) >> 2] = sword;
534
535 if (VGE_IS_DISTINGUISHED_SM(sm)) {
536 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
537 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000538 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000539 }
540}
541
542
543static __inline__
544shadow_word* get_sword_addr ( Addr a )
545{
546 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
547 ESecMap* sm = primary_map[a >> 16];
548 UInt sm_off = (a & 0xFFFC) >> 2;
549
550 if (VGE_IS_DISTINGUISHED_SM(sm)) {
551 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
552 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000553 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000554 return SEC_MAP_ACCESS;
555 }
556
557 //PROF_EVENT(21); PPP
558 return & (sm->swords[sm_off]);
559}
560
561
562// SSS: rename these so they're not so similar to memcheck, unless it's
563// appropriate of course
564
565static __inline__
566void init_virgin_sword(Addr a)
567{
sewardj499e3de2002-11-13 22:22:25 +0000568 if (clo_execontext != EC_None)
569 setExeContext(a, NULL_EC_EIP);
njn25e49d8e72002-09-23 09:36:25 +0000570 set_sword(a, virgin_sword);
571}
572
sewardj7f3ad222002-11-13 22:11:53 +0000573static __inline__
574void init_error_sword(Addr a)
575{
576 set_sword(a, error_sword);
577}
njn25e49d8e72002-09-23 09:36:25 +0000578
njn25e49d8e72002-09-23 09:36:25 +0000579static __inline__
580void init_nonvirgin_sword(Addr a)
581{
582 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000583 ThreadId tid = VG_(get_current_or_recent_tid)();
sewardjc4a810d2002-11-13 22:25:51 +0000584 ThreadLifeSeg *tls;
njn25e49d8e72002-09-23 09:36:25 +0000585
sewardjb52a1b02002-10-23 21:38:22 +0000586 sk_assert(tid != VG_INVALID_THREADID);
sewardjc4a810d2002-11-13 22:25:51 +0000587 tls = thread_seg[tid];
588
sewardj8fac99a2002-11-13 22:31:26 +0000589 sword = SW(Vge_Excl, packTLS(tls));
njn25e49d8e72002-09-23 09:36:25 +0000590 set_sword(a, sword);
591}
592
593
594/* In this case, we treat it for Eraser's sake like virgin (it hasn't
595 * been inited by a particular thread, it's just done automatically upon
596 * startup), but we mark its .state specially so it doesn't look like an
597 * uninited read. */
598static __inline__
599void init_magically_inited_sword(Addr a)
600{
601 shadow_word sword;
602
sewardjb52a1b02002-10-23 21:38:22 +0000603 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sewardj8fac99a2002-11-13 22:31:26 +0000604
605 sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
606
njn25e49d8e72002-09-23 09:36:25 +0000607 set_sword(a, virgin_sword);
608}
609
sewardjc26cc252002-10-23 21:58:55 +0000610
sewardj274c6012002-10-22 04:54:55 +0000611/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000612/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000613/*------------------------------------------------------------*/
614
sewardj39a4d842002-11-13 22:14:30 +0000615typedef struct _Mutex Mutex; /* forward decl */
sewardj4bffb232002-11-13 21:46:34 +0000616typedef struct _LockSet LockSet;
617
sewardj16748af2002-10-22 04:55:54 +0000618typedef enum MutexState {
619 MxUnknown, /* don't know */
620 MxUnlocked, /* unlocked */
621 MxLocked, /* locked */
622 MxDead /* destroyed */
623} MutexState;
624
sewardj39a4d842002-11-13 22:14:30 +0000625struct _Mutex {
sewardjdac0a442002-11-13 22:08:40 +0000626 Addr mutexp;
sewardj39a4d842002-11-13 22:14:30 +0000627 Mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000628
629 MutexState state; /* mutex state */
630 ThreadId tid; /* owner */
631 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000632
sewardj4bffb232002-11-13 21:46:34 +0000633 const LockSet *lockdep; /* set of locks we depend on */
sewardjc26cc252002-10-23 21:58:55 +0000634 UInt mark; /* mark for graph traversal */
635};
sewardj16748af2002-10-22 04:55:54 +0000636
sewardj39a4d842002-11-13 22:14:30 +0000637static inline Int mutex_cmp(const Mutex *a, const Mutex *b)
sewardj4bffb232002-11-13 21:46:34 +0000638{
sewardjdac0a442002-11-13 22:08:40 +0000639 return a->mutexp - b->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000640}
njn25e49d8e72002-09-23 09:36:25 +0000641
sewardj274c6012002-10-22 04:54:55 +0000642struct _LockSet {
sewardj05bcdcb2003-05-18 10:05:38 +0000643 Int setsize; /* number of members */
sewardj4bffb232002-11-13 21:46:34 +0000644 UInt hash; /* hash code */
645 LockSet *next; /* next in hash chain */
sewardj39a4d842002-11-13 22:14:30 +0000646 const Mutex *mutex[0]; /* locks */
sewardj274c6012002-10-22 04:54:55 +0000647};
sewardj4bffb232002-11-13 21:46:34 +0000648
649static const LockSet *emptyset;
njn25e49d8e72002-09-23 09:36:25 +0000650
651/* Each one is an index into the lockset table. */
sewardj4bffb232002-11-13 21:46:34 +0000652static const LockSet *thread_locks[VG_N_THREADS];
njn25e49d8e72002-09-23 09:36:25 +0000653
sewardjdac0a442002-11-13 22:08:40 +0000654#define LOCKSET_HASH_SZ 1021
njn25e49d8e72002-09-23 09:36:25 +0000655
sewardj4bffb232002-11-13 21:46:34 +0000656static LockSet *lockset_hash[LOCKSET_HASH_SZ];
njn25e49d8e72002-09-23 09:36:25 +0000657
sewardj4bffb232002-11-13 21:46:34 +0000658/* Pack and unpack a LockSet pointer into shadow_word.other */
sewardj8fac99a2002-11-13 22:31:26 +0000659static inline UInt packLockSet(const LockSet *p)
njn25e49d8e72002-09-23 09:36:25 +0000660{
sewardj4bffb232002-11-13 21:46:34 +0000661 UInt id;
662
sewardj8fac99a2002-11-13 22:31:26 +0000663 SK_ASSERT(((UInt)p & ((1 << STATE_BITS)-1)) == 0);
sewardj4bffb232002-11-13 21:46:34 +0000664 id = ((UInt)p) >> STATE_BITS;
665
666 return id;
njn25e49d8e72002-09-23 09:36:25 +0000667}
668
sewardj8fac99a2002-11-13 22:31:26 +0000669static inline const LockSet *unpackLockSet(UInt id)
njn25e49d8e72002-09-23 09:36:25 +0000670{
sewardj4bffb232002-11-13 21:46:34 +0000671 return (LockSet *)(id << STATE_BITS);
njn25e49d8e72002-09-23 09:36:25 +0000672}
673
njn25e49d8e72002-09-23 09:36:25 +0000674static
sewardj4bffb232002-11-13 21:46:34 +0000675void pp_LockSet(const LockSet* p)
njn25e49d8e72002-09-23 09:36:25 +0000676{
sewardj05bcdcb2003-05-18 10:05:38 +0000677 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000678 VG_(printf)("{ ");
sewardj4bffb232002-11-13 21:46:34 +0000679 for(i = 0; i < p->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000680 const Mutex *mx = p->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000681
682 VG_(printf)("%p%(y ", mx->mutexp, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000683 }
684 VG_(printf)("}\n");
685}
686
687
sewardj4bffb232002-11-13 21:46:34 +0000688static void print_LockSet(const Char *s, const LockSet *ls)
689{
690 VG_(printf)("%s: ", s);
691 pp_LockSet(ls);
692}
693
694/* Compute the hash of a LockSet */
sewardj56867352003-10-12 10:27:06 +0000695static UInt hash_LockSet_w_wo(const LockSet *ls,
696 const Mutex *with,
697 const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000698{
sewardj05bcdcb2003-05-18 10:05:38 +0000699 Int i;
sewardj8fac99a2002-11-13 22:31:26 +0000700 UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sewardj4bffb232002-11-13 21:46:34 +0000701
702 sk_assert(with == NULL || with != without);
703
704 for(i = 0; with != NULL || i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000705 const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000706
707 if (without && mutex_cmp(without, mx) == 0)
708 continue;
709
710 if (with && (mx == NULL || mutex_cmp(with, mx) < 0)) {
711 mx = with;
712 with = NULL;
713 i--;
714 }
715
sewardj8fac99a2002-11-13 22:31:26 +0000716 hash = ROTL(hash, 17);
sewardj4bffb232002-11-13 21:46:34 +0000717 hash ^= (UInt)mx->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000718 }
719
720 return hash % LOCKSET_HASH_SZ;
721}
722
sewardj39a4d842002-11-13 22:14:30 +0000723static inline UInt hash_LockSet_with(const LockSet *ls, const Mutex *with)
sewardj4bffb232002-11-13 21:46:34 +0000724{
725 UInt hash = hash_LockSet_w_wo(ls, with, NULL);
726
727 if (0)
728 VG_(printf)("hash_with %p+%p -> %d\n", ls, with->mutexp, hash);
729
730 return hash;
731}
732
sewardj39a4d842002-11-13 22:14:30 +0000733static inline UInt hash_LockSet_without(const LockSet *ls, const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000734{
735 UInt hash = hash_LockSet_w_wo(ls, NULL, without);
736
737 if (0)
738 VG_(printf)("hash_with %p-%p -> %d\n", ls, without->mutexp, hash);
739
740 return hash;
741}
742
743static inline UInt hash_LockSet(const LockSet *ls)
744{
745 UInt hash = hash_LockSet_w_wo(ls, NULL, NULL);
746
747 if (0)
748 VG_(printf)("hash %p -> %d\n", ls, hash);
749
750 return hash;
751}
752
753static
754Bool structural_eq_LockSet(const LockSet* a, const LockSet* b)
njn25e49d8e72002-09-23 09:36:25 +0000755{
756 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000757
sewardj4bffb232002-11-13 21:46:34 +0000758 if (a == b)
759 return True;
760 if (a->setsize != b->setsize)
761 return False;
njn25e49d8e72002-09-23 09:36:25 +0000762
sewardj4bffb232002-11-13 21:46:34 +0000763 for(i = 0; i < a->setsize; i++) {
764 if (mutex_cmp(a->mutex[i], b->mutex[i]) != 0)
njn25e49d8e72002-09-23 09:36:25 +0000765 return False;
njn25e49d8e72002-09-23 09:36:25 +0000766 }
767
sewardj4bffb232002-11-13 21:46:34 +0000768 return True;
njn25e49d8e72002-09-23 09:36:25 +0000769}
770
771
772/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
773 * doesn't do the insertion. Returns True if they match.
774 */
775static Bool
sewardj4bffb232002-11-13 21:46:34 +0000776weird_LockSet_equals(const LockSet* a, const LockSet* b,
sewardj39a4d842002-11-13 22:14:30 +0000777 const Mutex *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000778{
sewardjc26cc252002-10-23 21:58:55 +0000779 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +0000780 Int ia, ib;
sewardjc26cc252002-10-23 21:58:55 +0000781
njn25e49d8e72002-09-23 09:36:25 +0000782 /* Idea is to try and match each element of b against either an
783 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000784
785 if (debug) {
786 print_LockSet("weird_LockSet_equals a", a);
787 print_LockSet(" b", b);
788 VG_(printf)( " missing: %p%(y\n",
789 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000790 }
sewardjc26cc252002-10-23 21:58:55 +0000791
sewardj4bffb232002-11-13 21:46:34 +0000792 if ((a->setsize + 1) != b->setsize) {
793 if (debug)
794 VG_(printf)(" fastpath length mismatch -> 0\n");
795 return False;
796 }
797
sewardjc26cc252002-10-23 21:58:55 +0000798 /* There are three phases to this compare:
799 1 the section from the start of a up to missing_mutex
800 2 missing mutex itself
801 3 the section after missing_mutex to the end of a
802 */
803
sewardj4bffb232002-11-13 21:46:34 +0000804 ia = 0;
805 ib = 0;
806
sewardjc26cc252002-10-23 21:58:55 +0000807 /* 1: up to missing_mutex */
sewardj4bffb232002-11-13 21:46:34 +0000808 for(; ia < a->setsize && mutex_cmp(a->mutex[ia], missing_mutex) < 0; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000809 if (debug) {
810 print_LockSet(" 1:a", a);
811 print_LockSet(" 1:b", b);
812 }
sewardj4bffb232002-11-13 21:46:34 +0000813 if (ib == b->setsize || mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000814 return False;
sewardjc26cc252002-10-23 21:58:55 +0000815 }
816
817 /* 2: missing_mutex itself */
818 if (debug) {
819 VG_(printf)( " 2:missing: %p%(y\n",
820 missing_mutex->mutexp, missing_mutex->mutexp);
821 print_LockSet(" 2: b", b);
822 }
823
sewardj4bffb232002-11-13 21:46:34 +0000824 sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
sewardjc26cc252002-10-23 21:58:55 +0000825
sewardj4bffb232002-11-13 21:46:34 +0000826 if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000827 return False;
828
sewardj4bffb232002-11-13 21:46:34 +0000829 ib++;
sewardjc26cc252002-10-23 21:58:55 +0000830
831 /* 3: after missing_mutex to end */
832
sewardj4bffb232002-11-13 21:46:34 +0000833 for(; ia < a->setsize && ib < b->setsize; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000834 if (debug) {
835 print_LockSet(" 3:a", a);
836 print_LockSet(" 3:b", b);
837 }
sewardj4bffb232002-11-13 21:46:34 +0000838 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000839 return False;
sewardjc26cc252002-10-23 21:58:55 +0000840 }
841
842 if (debug)
sewardj4bffb232002-11-13 21:46:34 +0000843 VG_(printf)(" ia=%d ib=%d --> %d\n", ia, ib, ia == a->setsize && ib == b->setsize);
sewardjc26cc252002-10-23 21:58:55 +0000844
sewardj4bffb232002-11-13 21:46:34 +0000845 return ia == a->setsize && ib == b->setsize;
846}
847
848
849
850static const LockSet *lookup_LockSet(const LockSet *set)
851{
852 UInt bucket = set->hash;
853 LockSet *ret;
854
855 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
856 if (set == ret || structural_eq_LockSet(set, ret))
857 return ret;
858
859 return NULL;
860}
861
sewardj39a4d842002-11-13 22:14:30 +0000862static const LockSet *lookup_LockSet_with(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000863{
864 UInt bucket = hash_LockSet_with(set, mutex);
865 const LockSet *ret;
866
867 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
868 if (weird_LockSet_equals(set, ret, mutex))
869 return ret;
870
871 return NULL;
872}
873
sewardj39a4d842002-11-13 22:14:30 +0000874static const LockSet *lookup_LockSet_without(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000875{
876 UInt bucket = hash_LockSet_without(set, mutex);
877 const LockSet *ret;
878
879 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
880 if (weird_LockSet_equals(ret, set, mutex))
881 return ret;
882
883 return NULL;
884}
885
886static void insert_LockSet(LockSet *set)
887{
888 UInt hash = hash_LockSet(set);
889
890 set->hash = hash;
891
892 sk_assert(lookup_LockSet(set) == NULL);
893
894 set->next = lockset_hash[hash];
895 lockset_hash[hash] = set;
896}
897
898static inline
899LockSet *alloc_LockSet(UInt setsize)
900{
sewardj39a4d842002-11-13 22:14:30 +0000901 LockSet *ret = VG_(malloc)(sizeof(*ret) + sizeof(Mutex *) * setsize);
sewardj4bffb232002-11-13 21:46:34 +0000902 ret->setsize = setsize;
903 return ret;
904}
905
906static inline
907void free_LockSet(LockSet *p)
908{
909 /* assert: not present in hash */
910 VG_(free)(p);
911}
912
njnb4aee052003-04-15 14:09:58 +0000913static
sewardj4bffb232002-11-13 21:46:34 +0000914void pp_all_LockSets ( void )
915{
916 Int i;
917 Int sets, buckets;
918
919 sets = buckets = 0;
920 for (i = 0; i < LOCKSET_HASH_SZ; i++) {
921 const LockSet *ls = lockset_hash[i];
922 Bool first = True;
923
sewardj4bffb232002-11-13 21:46:34 +0000924 for(; ls != NULL; ls = ls->next) {
sewardjdac0a442002-11-13 22:08:40 +0000925 if (first) {
926 buckets++;
927 VG_(printf)("[%4d] = ", i);
928 } else
929 VG_(printf)(" ");
930
sewardj4bffb232002-11-13 21:46:34 +0000931 sets++;
932 first = False;
933 pp_LockSet(ls);
934 }
935 }
936
937 VG_(printf)("%d distinct LockSets in %d buckets\n", sets, buckets);
938}
939
940static inline Bool isempty(const LockSet *ls)
941{
942 return ls == NULL || ls->setsize == 0;
943}
944
sewardj39a4d842002-11-13 22:14:30 +0000945static Bool ismember(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +0000946{
947 Int i;
948
949 /* XXX use binary search */
950 for(i = 0; i < ls->setsize; i++)
951 if (mutex_cmp(mx, ls->mutex[i]) == 0)
952 return True;
953
954 return False;
955}
956
957/* Check invariants:
958 - all locksets are unique
959 - each set is an array in strictly increasing order of mutex addr
960*/
961static
962void sanity_check_locksets ( const Char* caller )
963{
964 Int i;
965 const Char *badness;
966 LockSet *ls;
967
968 for(i = 0; i < LOCKSET_HASH_SZ; i++) {
969
970 for(ls = lockset_hash[i]; ls != NULL; ls = ls->next) {
sewardj39a4d842002-11-13 22:14:30 +0000971 const Mutex *prev;
sewardj4bffb232002-11-13 21:46:34 +0000972 Int j;
973
974 if (hash_LockSet(ls) != ls->hash) {
975 badness = "mismatched hash";
976 goto bad;
977 }
sewardj05bcdcb2003-05-18 10:05:38 +0000978 if (ls->hash != (UInt)i) {
sewardj4bffb232002-11-13 21:46:34 +0000979 badness = "wrong bucket";
980 goto bad;
981 }
982 if (lookup_LockSet(ls) != ls) {
983 badness = "non-unique set";
984 goto bad;
985 }
986
987 prev = ls->mutex[0];
988 for(j = 1; j < ls->setsize; j++) {
989 if (mutex_cmp(prev, ls->mutex[j]) >= 0) {
990 badness = "mutexes out of order";
991 goto bad;
992 }
993 }
994 }
995 }
996 return;
997
998 bad:
999 VG_(printf)("sanity_check_locksets: "
1000 "i = %d, ls=%p badness = %s, caller = %s\n",
1001 i, ls, badness, caller);
1002 pp_all_LockSets();
1003 VG_(skin_panic)("sanity_check_locksets");
1004}
1005
1006static
sewardj39a4d842002-11-13 22:14:30 +00001007LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +00001008{
1009 static const Bool debug = False;
1010 LockSet *ret = NULL;
1011 Int i, j;
1012
1013 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1014 VG_(printf)("add-IN mutex %p%(y\n", mx->mutexp, mx->mutexp);
1015 print_LockSet("add-IN", ls);
1016 }
1017
1018 if (debug || LOCKSET_SANITY)
1019 sanity_check_locksets("add-IN");
1020
1021 sk_assert(!ismember(ls, mx));
1022
1023 ret = alloc_LockSet(ls->setsize+1);
1024
1025 for(i = j = 0; i < ls->setsize; i++) {
1026 if (debug)
1027 VG_(printf)("i=%d j=%d ls->mutex[i]=%p mx=%p\n",
1028 i, j, ls->mutex[i]->mutexp, mx ? mx->mutexp : 0);
1029 if (mx && mutex_cmp(mx, ls->mutex[i]) < 0) {
1030 ret->mutex[j++] = mx;
1031 mx = NULL;
1032 }
1033 ret->mutex[j++] = ls->mutex[i];
1034 }
1035
1036 /* not added in loop - must be after */
1037 if (mx)
1038 ret->mutex[j++] = mx;
1039
1040 sk_assert(j == ret->setsize);
1041
1042 if (debug || LOCKSET_SANITY) {
1043 print_LockSet("add-OUT", ret);
1044 sanity_check_locksets("add-OUT");
1045 }
1046 return ret;
1047}
1048
1049/* Builds ls with mx removed. mx should actually be in ls!
1050 (a checked assertion). Resulting set should not already
1051 exist in the table (unchecked).
1052*/
1053static
sewardj39a4d842002-11-13 22:14:30 +00001054LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
sewardj4bffb232002-11-13 21:46:34 +00001055{
1056 static const Bool debug = False;
1057 LockSet *ret = NULL;
1058 Int i, j;
1059
1060 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1061 print_LockSet("remove-IN", ls);
1062 }
1063
1064 if (debug || LOCKSET_SANITY)
1065 sanity_check_locksets("remove-IN");
1066
1067 sk_assert(ismember(ls, mx));
1068
1069 ret = alloc_LockSet(ls->setsize-1);
1070
1071 for(i = j = 0; i < ls->setsize; i++) {
1072 if (mutex_cmp(ls->mutex[i], mx) == 0)
1073 continue;
1074 ret->mutex[j++] = ls->mutex[i];
1075 }
1076
1077 sk_assert(j == ret->setsize);
1078
1079 if (debug || LOCKSET_SANITY) {
1080 print_LockSet("remove-OUT", ret);
1081 sanity_check_locksets("remove-OUT");
1082 }
1083 return ret;
njn25e49d8e72002-09-23 09:36:25 +00001084}
1085
1086
1087/* Builds the intersection, and then unbuilds it if it's already in the table.
1088 */
sewardj4bffb232002-11-13 21:46:34 +00001089static const LockSet *_intersect(const LockSet *a, const LockSet *b)
njn25e49d8e72002-09-23 09:36:25 +00001090{
sewardj4bffb232002-11-13 21:46:34 +00001091 static const Bool debug = False;
1092 Int iret;
1093 Int ia, ib;
1094 Int size;
1095 LockSet *ret;
1096 const LockSet *found;
njn25e49d8e72002-09-23 09:36:25 +00001097
sewardj4bffb232002-11-13 21:46:34 +00001098 if (debug || LOCKSET_SANITY)
1099 sanity_check_locksets("intersect-IN");
njn25e49d8e72002-09-23 09:36:25 +00001100
sewardj4bffb232002-11-13 21:46:34 +00001101 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1102 print_LockSet("intersect a", a);
1103 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +00001104 }
1105
sewardj4bffb232002-11-13 21:46:34 +00001106 /* count the size of the new set */
1107 size = 0;
1108 ia = ib = 0;
1109 for(size = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1110 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1111 size++;
1112 ia++;
1113 ib++;
1114 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1115 ia++;
1116 } else {
1117 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1118 ib++;
1119 }
njn25e49d8e72002-09-23 09:36:25 +00001120 }
1121
sewardj4bffb232002-11-13 21:46:34 +00001122 /* Build the intersection of the two sets */
1123 ret = alloc_LockSet(size);
1124 for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1125 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1126 sk_assert(iret < ret->setsize);
1127 ret->mutex[iret++] = a->mutex[ia];
1128 ia++;
1129 ib++;
1130 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1131 ia++;
1132 } else {
1133 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1134 ib++;
1135 }
1136 }
1137
1138 ret->hash = hash_LockSet(ret);
1139
njn25e49d8e72002-09-23 09:36:25 +00001140 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001141 found = lookup_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001142
sewardj4bffb232002-11-13 21:46:34 +00001143 if (found != NULL) {
1144 free_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001145 } else {
sewardj4bffb232002-11-13 21:46:34 +00001146 insert_LockSet(ret);
1147 found = ret;
njn25e49d8e72002-09-23 09:36:25 +00001148 }
1149
sewardj4bffb232002-11-13 21:46:34 +00001150 if (debug || LOCKSET_SANITY) {
1151 print_LockSet("intersect-OUT", found);
1152 sanity_check_locksets("intersect-OUT");
1153 }
njn25e49d8e72002-09-23 09:36:25 +00001154
sewardj4bffb232002-11-13 21:46:34 +00001155 return found;
njn25e49d8e72002-09-23 09:36:25 +00001156}
1157
sewardj4bffb232002-11-13 21:46:34 +00001158/* inline the fastpath */
1159static inline const LockSet *intersect(const LockSet *a, const LockSet *b)
sewardjc26cc252002-10-23 21:58:55 +00001160{
sewardj4bffb232002-11-13 21:46:34 +00001161 static const Bool debug = False;
sewardjc26cc252002-10-23 21:58:55 +00001162
1163 /* Fast case -- when the two are the same */
sewardj4bffb232002-11-13 21:46:34 +00001164 if (a == b) {
1165 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1166 print_LockSet("intersect-same fastpath", a);
sewardjc26cc252002-10-23 21:58:55 +00001167 }
sewardj4bffb232002-11-13 21:46:34 +00001168 return a;
sewardjc26cc252002-10-23 21:58:55 +00001169 }
1170
sewardj4bffb232002-11-13 21:46:34 +00001171 if (isempty(a) || isempty(b)) {
1172 if (debug)
1173 VG_(printf)("intersect empty fastpath\n");
1174 return emptyset;
1175 }
1176
1177 return _intersect(a, b);
1178}
1179
1180
1181static const LockSet *ls_union(const LockSet *a, const LockSet *b)
1182{
1183 static const Bool debug = False;
1184 Int iret;
1185 Int ia, ib;
1186 Int size;
1187 LockSet *ret;
1188 const LockSet *found;
1189
1190 if (debug || LOCKSET_SANITY)
1191 sanity_check_locksets("union-IN");
1192
1193 /* Fast case -- when the two are the same */
1194 if (a == b) {
1195 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1196 print_LockSet("union-same fastpath", a);
1197 }
1198 return a;
1199 }
1200
1201 if (isempty(a)) {
1202 if (debug)
1203 print_LockSet("union a=empty b", b);
1204 return b;
1205 }
1206 if (isempty(b)) {
1207 if (debug)
1208 print_LockSet("union b=empty a", a);
1209 return a;
1210 }
1211
1212 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
sewardjc26cc252002-10-23 21:58:55 +00001213 print_LockSet("union a", a);
1214 print_LockSet("union b", b);
1215 }
1216
sewardj4bffb232002-11-13 21:46:34 +00001217 /* count the size of the new set */
1218 for(size = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1219 Int cmp;
sewardjc26cc252002-10-23 21:58:55 +00001220
sewardj4bffb232002-11-13 21:46:34 +00001221 if ((ia < a->setsize) && (ib < b->setsize))
1222 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1223 else if (ia == a->setsize)
1224 cmp = 1;
1225 else
1226 cmp = -1;
1227
1228 if (cmp == 0) {
1229 size++;
1230 ia++;
1231 ib++;
1232 } else if (cmp < 0) {
1233 size++;
1234 ia++;
1235 } else {
1236 sk_assert(cmp > 0);
1237 size++;
1238 ib++;
1239 }
sewardjc26cc252002-10-23 21:58:55 +00001240 }
1241
sewardj4bffb232002-11-13 21:46:34 +00001242 /* Build the intersection of the two sets */
1243 ret = alloc_LockSet(size);
1244 for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1245 Int cmp;
1246 sk_assert(iret < ret->setsize);
1247
1248 if ((ia < a->setsize) && (ib < b->setsize))
1249 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1250 else if (ia == a->setsize)
1251 cmp = 1;
1252 else
1253 cmp = -1;
1254
1255 if (cmp == 0) {
1256 ret->mutex[iret++] = a->mutex[ia];
1257 ia++;
1258 ib++;
1259 } else if (cmp < 0) {
1260 ret->mutex[iret++] = a->mutex[ia];
1261 ia++;
1262 } else {
1263 sk_assert(cmp > 0);
1264 ret->mutex[iret++] = b->mutex[ib];
1265 ib++;
1266 }
1267 }
1268
1269 sk_assert(iret == ret->setsize);
1270
1271 ret->hash = hash_LockSet(ret);
1272
sewardjc26cc252002-10-23 21:58:55 +00001273 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001274 found = lookup_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001275
sewardj4bffb232002-11-13 21:46:34 +00001276 if (found != NULL) {
1277 if (debug)
1278 print_LockSet("union found existing set", found);
1279 free_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001280 } else {
sewardj4bffb232002-11-13 21:46:34 +00001281 if (debug)
1282 print_LockSet("union inserting new set", ret);
1283 insert_LockSet(ret);
1284 found = ret;
sewardjc26cc252002-10-23 21:58:55 +00001285 }
1286
sewardj4bffb232002-11-13 21:46:34 +00001287 if (debug || LOCKSET_SANITY) {
1288 print_LockSet("union-OUT", found);
sewardjc26cc252002-10-23 21:58:55 +00001289 sanity_check_locksets("union-OUT");
sewardj4bffb232002-11-13 21:46:34 +00001290 }
sewardjc26cc252002-10-23 21:58:55 +00001291
sewardj4bffb232002-11-13 21:46:34 +00001292 return found;
sewardjc26cc252002-10-23 21:58:55 +00001293}
1294
1295/*------------------------------------------------------------*/
sewardjdac0a442002-11-13 22:08:40 +00001296/*--- Implementation of mutex structure. ---*/
1297/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +00001298
1299static UInt graph_mark; /* current mark we're using for graph traversal */
1300
sewardj39a4d842002-11-13 22:14:30 +00001301static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardjc26cc252002-10-23 21:58:55 +00001302 Char *str, ExeContext *ec);
sewardj39a4d842002-11-13 22:14:30 +00001303static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00001304 const LockSet *lockset_holding,
1305 const LockSet *lockset_prev);
sewardjc26cc252002-10-23 21:58:55 +00001306
njn72718642003-07-24 08:45:32 +00001307static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid);
sewardjdac0a442002-11-13 22:08:40 +00001308
1309#define M_MUTEX_HASHSZ 1021
1310
sewardj39a4d842002-11-13 22:14:30 +00001311static Mutex *mutex_hash[M_MUTEX_HASHSZ];
sewardjdac0a442002-11-13 22:08:40 +00001312static UInt total_mutexes;
1313
1314static const Char *pp_MutexState(MutexState st)
1315{
1316 switch(st) {
1317 case MxLocked: return "Locked";
1318 case MxUnlocked: return "Unlocked";
1319 case MxDead: return "Dead";
1320 case MxUnknown: return "Unknown";
1321 }
1322 return "???";
1323}
1324
1325static void pp_all_mutexes()
1326{
1327 Int i;
1328 Int locks, buckets;
1329
1330 locks = buckets = 0;
1331 for(i = 0; i < M_MUTEX_HASHSZ; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001332 Mutex *mx;
sewardjdac0a442002-11-13 22:08:40 +00001333 Bool first = True;
1334
1335 for(mx = mutex_hash[i]; mx != NULL; mx = mx->next) {
1336 if (first) {
1337 buckets++;
1338 VG_(printf)("[%4d] = ", i);
1339 } else
1340 VG_(printf)(" ");
1341 locks++;
1342 first = False;
1343 VG_(printf)("%p [%8s] -> %p%(y\n",
1344 mx, pp_MutexState(mx->state), mx->mutexp, mx->mutexp);
1345 }
1346 }
1347
1348 VG_(printf)("%d locks in %d buckets (%d allocated)\n",
1349 locks, buckets, total_mutexes);
1350}
sewardjc26cc252002-10-23 21:58:55 +00001351
sewardj39a4d842002-11-13 22:14:30 +00001352/* find or create a Mutex for a program's mutex use */
1353static Mutex *get_mutex(Addr mutexp)
sewardjc26cc252002-10-23 21:58:55 +00001354{
1355 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
sewardj39a4d842002-11-13 22:14:30 +00001356 Mutex *mp;
sewardjc26cc252002-10-23 21:58:55 +00001357
1358 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
1359 if (mp->mutexp == mutexp)
1360 return mp;
1361
sewardjdac0a442002-11-13 22:08:40 +00001362 total_mutexes++;
1363
sewardjc26cc252002-10-23 21:58:55 +00001364 mp = VG_(malloc)(sizeof(*mp));
1365 mp->mutexp = mutexp;
1366 mp->next = mutex_hash[bucket];
1367 mutex_hash[bucket] = mp;
1368
1369 mp->state = MxUnknown;
1370 mp->tid = VG_INVALID_THREADID;
1371 mp->location = NULL;
1372
sewardj4bffb232002-11-13 21:46:34 +00001373 mp->lockdep = emptyset;
sewardjc26cc252002-10-23 21:58:55 +00001374 mp->mark = graph_mark - 1;
1375
1376 return mp;
1377}
1378
sewardjdac0a442002-11-13 22:08:40 +00001379/* Find all mutexes in a range of memory, and call the callback.
1380 Remove the mutex from the hash if the callback returns True (mutex
1381 structure itself is not freed, because it may be pointed to by a
1382 LockSet. */
sewardj39a4d842002-11-13 22:14:30 +00001383static void find_mutex_range(Addr start, Addr end, Bool (*action)(Mutex *))
sewardjc26cc252002-10-23 21:58:55 +00001384{
sewardjdac0a442002-11-13 22:08:40 +00001385 UInt first = start % M_MUTEX_HASHSZ;
1386 UInt last = (end+1) % M_MUTEX_HASHSZ;
1387 UInt i;
1388
1389 /* Single pass over the hash table, looking for likely hashes */
1390 for(i = first; i != last; ) {
sewardj39a4d842002-11-13 22:14:30 +00001391 Mutex *mx;
1392 Mutex **prev = &mutex_hash[i];
sewardjdac0a442002-11-13 22:08:40 +00001393
1394 for(mx = mutex_hash[i]; mx != NULL; prev = &mx->next, mx = mx->next) {
1395 if (mx->mutexp >= start && mx->mutexp < end && (*action)(mx))
1396 *prev = mx->next;
1397 }
1398
1399 if (++i == M_MUTEX_HASHSZ)
1400 i = 0;
sewardjc26cc252002-10-23 21:58:55 +00001401 }
sewardjc26cc252002-10-23 21:58:55 +00001402}
1403
1404#define MARK_LOOP (graph_mark+0)
1405#define MARK_DONE (graph_mark+1)
1406
thughes4ad52d02004-06-27 17:37:21 +00001407static Bool check_cycle_inner(const Mutex *mutex, const LockSet *ls)
1408{
1409 static const Bool debug = False;
1410 Int i;
1411
1412 if (mutex->mark == MARK_LOOP)
1413 return True; /* found cycle */
1414 if (mutex->mark == MARK_DONE)
1415 return False; /* been here before, its OK */
1416
1417 ((Mutex*)mutex)->mark = MARK_LOOP;
1418
1419 if (debug)
1420 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
1421 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
1422 for(i = 0; i < ls->setsize; i++) {
1423 const Mutex *mx = ls->mutex[i];
1424
1425 if (debug)
1426 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
1427 mutex->mutexp, ls,
1428 mx->mutexp, mx->mutexp);
1429 if (check_cycle_inner(mx, mx->lockdep))
1430 return True;
1431 }
1432 ((Mutex*)mutex)->mark = MARK_DONE;
1433
1434 return False;
1435}
1436
sewardj39a4d842002-11-13 22:14:30 +00001437static Bool check_cycle(const Mutex *start, const LockSet* lockset)
sewardjc26cc252002-10-23 21:58:55 +00001438{
sewardjff2c9232002-11-13 21:44:39 +00001439
sewardjc26cc252002-10-23 21:58:55 +00001440 graph_mark += 2; /* clear all marks */
1441
sewardj4bffb232002-11-13 21:46:34 +00001442 return check_cycle_inner(start, lockset);
sewardjc26cc252002-10-23 21:58:55 +00001443}
1444
sewardjdca84112002-11-13 22:29:34 +00001445/* test to see if a mutex state change would be problematic; this
1446 makes no changes to the mutex state. This should be called before
1447 the locking thread has actually blocked. */
njn72718642003-07-24 08:45:32 +00001448static void test_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjc26cc252002-10-23 21:58:55 +00001449{
1450 static const Bool debug = False;
1451
sewardjc26cc252002-10-23 21:58:55 +00001452 if (mutex->state == MxDead) {
sewardjdac0a442002-11-13 22:08:40 +00001453 Char *str;
1454
1455 switch(state) {
1456 case MxLocked: str = "lock dead mutex"; break;
1457 case MxUnlocked: str = "unlock dead mutex"; break;
1458 default: str = "operate on dead mutex"; break;
1459 }
1460
sewardjc26cc252002-10-23 21:58:55 +00001461 /* can't do anything legal to a destroyed mutex */
sewardjdac0a442002-11-13 22:08:40 +00001462 record_mutex_error(tid, mutex, str, mutex->location);
sewardjc26cc252002-10-23 21:58:55 +00001463 return;
1464 }
1465
1466 switch(state) {
1467 case MxLocked:
sewardjdca84112002-11-13 22:29:34 +00001468 sk_assert(!check_cycle(mutex, mutex->lockdep));
1469
1470 if (debug)
1471 print_LockSet("thread holding", thread_locks[tid]);
1472
1473 if (check_cycle(mutex, thread_locks[tid]))
1474 record_lockgraph_error(tid, mutex, thread_locks[tid], mutex->lockdep);
1475 else {
1476 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
1477
1478 if (debug) {
1479 VG_(printf)("giving mutex %p%(y lockdep = %p ",
1480 mutex->mutexp, mutex->mutexp, mutex->lockdep);
1481 print_LockSet("lockdep", mutex->lockdep);
1482 }
1483 }
1484 break;
1485
1486 case MxUnlocked:
1487 if (debug)
1488 print_LockSet("thread holding", thread_locks[tid]);
1489
1490 if (mutex->state != MxLocked) {
1491 record_mutex_error(tid, mutex,
1492 "unlock non-locked mutex", mutex->location);
1493 }
1494 if (mutex->tid != tid) {
1495 record_mutex_error(tid, mutex,
1496 "unlock someone else's mutex", mutex->location);
1497 }
1498 break;
1499
1500 case MxDead:
1501 break;
1502
1503 default:
1504 break;
1505 }
1506}
1507
1508/* Update a mutex state. Expects most error testing and reporting to
1509 have happened in test_mutex_state(). The assumption is that no
1510 client code is run by thread tid between test and set, either
1511 because it is blocked or test and set are called together
1512 atomically.
1513
1514 Setting state to MxDead is the exception, since that can happen as
1515 a result of any thread freeing memory; in this case set_mutex_state
1516 does all the error reporting as well.
1517*/
njn72718642003-07-24 08:45:32 +00001518static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjdca84112002-11-13 22:29:34 +00001519{
1520 static const Bool debug = False;
1521
1522 if (debug)
1523 VG_(printf)("\ntid %d changing mutex (%p)->%p%(y state %s -> %s\n",
1524 tid, mutex, mutex->mutexp, mutex->mutexp,
1525 pp_MutexState(mutex->state), pp_MutexState(state));
1526
1527 if (mutex->state == MxDead) {
1528 /* can't do anything legal to a destroyed mutex */
1529 return;
1530 }
1531
1532 switch(state) {
1533 case MxLocked:
sewardj4bffb232002-11-13 21:46:34 +00001534 if (mutex->state == MxLocked) {
1535 if (mutex->tid != tid)
1536 record_mutex_error(tid, mutex, "take lock held by someone else",
1537 mutex->location);
1538 else
1539 record_mutex_error(tid, mutex, "take lock we already hold",
1540 mutex->location);
1541
1542 VG_(skin_panic)("core should have checked this\n");
1543 break;
1544 }
sewardjc26cc252002-10-23 21:58:55 +00001545
1546 sk_assert(!check_cycle(mutex, mutex->lockdep));
1547
sewardjc26cc252002-10-23 21:58:55 +00001548 mutex->tid = tid;
1549 break;
1550
1551 case MxUnlocked:
1552 if (debug)
sewardj4bffb232002-11-13 21:46:34 +00001553 print_LockSet("thread holding", thread_locks[tid]);
sewardjc26cc252002-10-23 21:58:55 +00001554
sewardjdca84112002-11-13 22:29:34 +00001555 if (mutex->state != MxLocked || mutex->tid != tid)
1556 break;
1557
sewardjc26cc252002-10-23 21:58:55 +00001558 mutex->tid = VG_INVALID_THREADID;
1559 break;
1560
sewardjdac0a442002-11-13 22:08:40 +00001561 case MxDead:
1562 if (mutex->state == MxLocked) {
1563 /* forcably remove offending lock from thread's lockset */
1564 sk_assert(ismember(thread_locks[mutex->tid], mutex));
1565 thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
1566 mutex->tid = VG_INVALID_THREADID;
1567
1568 record_mutex_error(tid, mutex,
1569 "free locked mutex", mutex->location);
1570 }
1571 break;
1572
sewardjc26cc252002-10-23 21:58:55 +00001573 default:
1574 break;
1575 }
1576
njn72718642003-07-24 08:45:32 +00001577 mutex->location = VG_(get_ExeContext)(tid);
sewardjc26cc252002-10-23 21:58:55 +00001578 mutex->state = state;
1579}
njn25e49d8e72002-09-23 09:36:25 +00001580
1581/*------------------------------------------------------------*/
1582/*--- Setting and checking permissions. ---*/
1583/*------------------------------------------------------------*/
1584
thughes4ad52d02004-06-27 17:37:21 +00001585/* only clean up dead mutexes */
1586static
1587Bool cleanmx(Mutex *mx) {
1588 return mx->state == MxDead;
1589}
1590
njn25e49d8e72002-09-23 09:36:25 +00001591static
1592void set_address_range_state ( Addr a, UInt len /* in bytes */,
1593 VgeInitStatus status )
1594{
sewardj1806d7f2002-10-22 05:05:49 +00001595 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00001596
1597# if DEBUG_MAKE_ACCESSES
1598 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
1599# endif
1600 //PROF_EVENT(30); PPP
1601
1602 if (len == 0)
1603 return;
1604
1605 if (len > 100 * 1000 * 1000)
1606 VG_(message)(Vg_UserMsg,
1607 "Warning: set address range state: large range %d",
1608 len);
1609
1610 VGP_PUSHCC(VgpSARP);
1611
sewardjdac0a442002-11-13 22:08:40 +00001612 /* Remove mutexes in recycled memory range from hash */
1613 find_mutex_range(a, a+len, cleanmx);
1614
njn25e49d8e72002-09-23 09:36:25 +00001615 /* Memory block may not be aligned or a whole word multiple. In neat cases,
1616 * we have to init len/4 words (len is in bytes). In nasty cases, it's
1617 * len/4+1 words. This works out which it is by aligning the block and
1618 * seeing if the end byte is in the same word as it is for the unaligned
1619 * block; if not, it's the awkward case. */
sewardj8fac99a2002-11-13 22:31:26 +00001620 end = ROUNDUP(a + len, 4);
1621 a = ROUNDDN(a, 4);
njn25e49d8e72002-09-23 09:36:25 +00001622
1623 /* Do it ... */
1624 switch (status) {
1625 case Vge_VirginInit:
1626 for ( ; a < end; a += 4) {
1627 //PROF_EVENT(31); PPP
1628 init_virgin_sword(a);
1629 }
1630 break;
1631
1632 case Vge_NonVirginInit:
1633 for ( ; a < end; a += 4) {
1634 //PROF_EVENT(31); PPP
1635 init_nonvirgin_sword(a);
1636 }
1637 break;
1638
1639 case Vge_SegmentInit:
1640 for ( ; a < end; a += 4) {
1641 //PROF_EVENT(31); PPP
1642 init_magically_inited_sword(a);
1643 }
1644 break;
sewardj7f3ad222002-11-13 22:11:53 +00001645
1646 case Vge_Error:
1647 for ( ; a < end; a += 4) {
1648 //PROF_EVENT(31); PPP
1649 init_error_sword(a);
1650 }
1651 break;
njn25e49d8e72002-09-23 09:36:25 +00001652
1653 default:
1654 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001655 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001656 }
1657
1658 /* Check that zero page and highest page have not been written to
1659 -- this could happen with buggy syscall wrappers. Today
1660 (2001-04-26) had precisely such a problem with
1661 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001662 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001663 VGP_POPCC(VgpSARP);
1664}
1665
1666
1667static void make_segment_readable ( Addr a, UInt len )
1668{
1669 //PROF_EVENT(??); PPP
1670 set_address_range_state ( a, len, Vge_SegmentInit );
1671}
1672
1673static void make_writable ( Addr a, UInt len )
1674{
1675 //PROF_EVENT(36); PPP
1676 set_address_range_state( a, len, Vge_VirginInit );
1677}
1678
1679static void make_readable ( Addr a, UInt len )
1680{
1681 //PROF_EVENT(37); PPP
sewardj499e3de2002-11-13 22:22:25 +00001682 set_address_range_state( a, len, Vge_VirginInit );
njn25e49d8e72002-09-23 09:36:25 +00001683}
1684
1685
njn25e49d8e72002-09-23 09:36:25 +00001686/* Block-copy states (needed for implementing realloc()). */
1687static void copy_address_range_state(Addr src, Addr dst, UInt len)
1688{
1689 UInt i;
1690
1691 //PROF_EVENT(40); PPP
1692 for (i = 0; i < len; i += 4) {
1693 shadow_word sword = *(get_sword_addr ( src+i ));
1694 //PROF_EVENT(41); PPP
1695 set_sword ( dst+i, sword );
1696 }
1697}
1698
1699// SSS: put these somewhere better
njn72718642003-07-24 08:45:32 +00001700static void eraser_mem_read (Addr a, UInt data_size, ThreadId tid);
1701static void eraser_mem_write(Addr a, UInt data_size, ThreadId tid);
sewardja5b3aec2002-10-22 05:09:36 +00001702
1703#define REGPARM(x) __attribute__((regparm (x)))
1704
1705static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1706static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1707static void eraser_mem_help_read_4(Addr a) REGPARM(1);
1708static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2);
1709
1710static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1711static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1712static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
1713static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001714
sewardj7a5ebcf2002-11-13 22:42:13 +00001715static void bus_lock(void);
1716static void bus_unlock(void);
1717
njn25e49d8e72002-09-23 09:36:25 +00001718static
njn72718642003-07-24 08:45:32 +00001719void eraser_pre_mem_read(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001720 Char* s, UInt base, UInt size )
1721{
njn72718642003-07-24 08:45:32 +00001722 if (tid > 50) { VG_(printf)("pid = %d, s = `%s`, part = %d\n", tid, s, part); VG_(skin_panic)("a");}
1723 eraser_mem_read(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001724}
1725
1726static
njn72718642003-07-24 08:45:32 +00001727void eraser_pre_mem_read_asciiz(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001728 Char* s, UInt base )
1729{
njn72718642003-07-24 08:45:32 +00001730 eraser_mem_read(base, VG_(strlen)((Char*)base), tid);
njn25e49d8e72002-09-23 09:36:25 +00001731}
1732
1733static
njn72718642003-07-24 08:45:32 +00001734void eraser_pre_mem_write(CorePart part, ThreadId tid,
njn25e49d8e72002-09-23 09:36:25 +00001735 Char* s, UInt base, UInt size )
1736{
njn72718642003-07-24 08:45:32 +00001737 eraser_mem_write(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001738}
1739
1740
1741
1742static
1743void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx )
1744{
njn1f3a9092002-10-04 09:22:30 +00001745 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001746 make_segment_readable(a, len);
1747}
1748
1749
1750static
1751void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited )
1752{
1753 if (is_inited) {
1754 make_readable(a, len);
1755 } else {
1756 make_writable(a, len);
1757 }
1758}
1759
1760static
1761void eraser_set_perms (Addr a, UInt len,
sewardj40f8ebe2002-10-23 21:46:13 +00001762 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001763{
1764 if (rr) make_readable(a, len);
1765 else if (ww) make_writable(a, len);
1766 /* else do nothing */
1767}
1768
sewardjf6374322002-11-13 22:35:55 +00001769static
1770void eraser_new_mem_stack_private(Addr a, UInt len)
1771{
1772 set_address_range_state(a, len, Vge_NonVirginInit);
1773}
1774
1775static
1776void eraser_new_mem_stack(Addr a, UInt len)
1777{
1778 set_address_range_state(a, len, Vge_VirginInit);
1779}
njn25e49d8e72002-09-23 09:36:25 +00001780
1781/*--------------------------------------------------------------*/
1782/*--- Initialise the memory audit system on program startup. ---*/
1783/*--------------------------------------------------------------*/
1784
1785static
1786void init_shadow_memory(void)
1787{
1788 Int i;
1789
1790 for (i = 0; i < ESEC_MAP_WORDS; i++)
1791 distinguished_secondary_map.swords[i] = virgin_sword;
1792
1793 /* These entries gradually get overwritten as the used address
1794 space expands. */
1795 for (i = 0; i < 65536; i++)
1796 primary_map[i] = &distinguished_secondary_map;
1797}
1798
1799
njn3e884182003-04-15 13:03:23 +00001800/*------------------------------------------------------------*/
1801/*--- malloc() et al replacements ---*/
1802/*------------------------------------------------------------*/
1803
njnb4aee052003-04-15 14:09:58 +00001804static VgHashTable hg_malloc_list = NULL;
njn3e884182003-04-15 13:03:23 +00001805
1806#define N_FREED_CHUNKS 2
1807static Int freechunkptr = 0;
1808static HG_Chunk *freechunks[N_FREED_CHUNKS];
1809
1810/* Use a small redzone (paranoia) */
nethercotee1efb922004-07-10 16:01:52 +00001811UInt VG_(vg_malloc_redzone_szB) = 8;
njn3e884182003-04-15 13:03:23 +00001812
1813
1814/* Allocate a user-chunk of size bytes. Also allocate its shadow
1815 block, make the shadow block point at the user block. Put the
1816 shadow chunk on the appropriate list, and set all memory
1817 protections correctly. */
1818
njn72718642003-07-24 08:45:32 +00001819static void add_HG_Chunk ( ThreadId tid, Addr p, UInt size )
njn3e884182003-04-15 13:03:23 +00001820{
1821 HG_Chunk* hc;
1822
1823 hc = VG_(malloc)(sizeof(HG_Chunk));
1824 hc->data = p;
1825 hc->size = size;
njn72718642003-07-24 08:45:32 +00001826 hc->where = VG_(get_ExeContext)(tid);
1827 hc->tid = tid;
njn3e884182003-04-15 13:03:23 +00001828
1829 VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
1830}
1831
1832/* Allocate memory and note change in memory available */
1833static __inline__
njn34ac0272003-09-30 14:20:00 +00001834void* alloc_and_new_mem ( Int size, UInt alignment, Bool is_zeroed )
njn3e884182003-04-15 13:03:23 +00001835{
1836 Addr p;
1837
njn34ac0272003-09-30 14:20:00 +00001838 if (size < 0) return NULL;
1839
njn3e884182003-04-15 13:03:23 +00001840 p = (Addr)VG_(cli_malloc)(alignment, size);
nethercote57e36b32004-07-10 14:56:28 +00001841 if (!p) {
1842 return NULL;
1843 }
njn34ac0272003-09-30 14:20:00 +00001844 if (is_zeroed) VG_(memset)((void*)p, 0, size);
njn72718642003-07-24 08:45:32 +00001845 add_HG_Chunk ( VG_(get_current_or_recent_tid)(), p, size );
njn3e884182003-04-15 13:03:23 +00001846 eraser_new_mem_heap( p, size, is_zeroed );
1847
1848 return (void*)p;
1849}
1850
njn72718642003-07-24 08:45:32 +00001851void* SK_(malloc) ( Int n )
njn3e884182003-04-15 13:03:23 +00001852{
njn72718642003-07-24 08:45:32 +00001853 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001854}
1855
njn72718642003-07-24 08:45:32 +00001856void* SK_(__builtin_new) ( Int n )
njn3e884182003-04-15 13:03:23 +00001857{
njn72718642003-07-24 08:45:32 +00001858 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001859}
1860
njn72718642003-07-24 08:45:32 +00001861void* SK_(__builtin_vec_new) ( Int n )
njn3e884182003-04-15 13:03:23 +00001862{
njn72718642003-07-24 08:45:32 +00001863 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001864}
1865
njn72718642003-07-24 08:45:32 +00001866void* SK_(memalign) ( Int align, Int n )
njn3e884182003-04-15 13:03:23 +00001867{
njn72718642003-07-24 08:45:32 +00001868 return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001869}
1870
njn34ac0272003-09-30 14:20:00 +00001871void* SK_(calloc) ( Int nmemb, Int size )
njn3e884182003-04-15 13:03:23 +00001872{
njn34ac0272003-09-30 14:20:00 +00001873 return alloc_and_new_mem ( nmemb*size, VG_(clo_alignment),
1874 /*is_zeroed*/True );
njn3e884182003-04-15 13:03:23 +00001875}
1876
thughes4ad52d02004-06-27 17:37:21 +00001877static ThreadId deadmx_tid;
1878
1879static
1880Bool deadmx(Mutex *mx) {
1881 if (mx->state != MxDead)
1882 set_mutex_state(mx, MxDead, deadmx_tid);
1883
1884 return False;
1885}
1886
njn3e884182003-04-15 13:03:23 +00001887static
njn72718642003-07-24 08:45:32 +00001888void die_and_free_mem ( ThreadId tid, HG_Chunk* hc,
njn3e884182003-04-15 13:03:23 +00001889 HG_Chunk** prev_chunks_next_ptr )
1890{
njn72718642003-07-24 08:45:32 +00001891 Addr start = hc->data;
1892 Addr end = start + hc->size;
njn3e884182003-04-15 13:03:23 +00001893
njn3e884182003-04-15 13:03:23 +00001894 /* Remove hc from the malloclist using prev_chunks_next_ptr to
1895 avoid repeating the hash table lookup. Can't remove until at least
1896 after free and free_mismatch errors are done because they use
1897 describe_addr() which looks for it in malloclist. */
1898 *prev_chunks_next_ptr = hc->next;
1899
1900 /* Record where freed */
njn72718642003-07-24 08:45:32 +00001901 hc->where = VG_(get_ExeContext) ( tid );
njn3e884182003-04-15 13:03:23 +00001902
1903 /* maintain a small window so that the error reporting machinery
1904 knows about this memory */
1905 if (freechunks[freechunkptr] != NULL) {
1906 /* free HG_Chunk */
1907 HG_Chunk* sc1 = freechunks[freechunkptr];
1908 VG_(cli_free) ( (void*)(sc1->data) );
1909 VG_(free) ( sc1 );
1910 }
1911
1912 freechunks[freechunkptr] = hc;
1913
1914 if (++freechunkptr == N_FREED_CHUNKS)
1915 freechunkptr = 0;
1916
1917 /* mark all mutexes in range dead */
thughes4ad52d02004-06-27 17:37:21 +00001918 deadmx_tid = tid;
njn3e884182003-04-15 13:03:23 +00001919 find_mutex_range(start, end, deadmx);
1920}
1921
1922
1923static __inline__
njn72718642003-07-24 08:45:32 +00001924void handle_free ( void* p )
njn3e884182003-04-15 13:03:23 +00001925{
1926 HG_Chunk* hc;
1927 HG_Chunk** prev_chunks_next_ptr;
1928
1929 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1930 (VgHashNode***)&prev_chunks_next_ptr );
1931 if (hc == NULL) {
1932 return;
1933 }
njn72718642003-07-24 08:45:32 +00001934 die_and_free_mem ( VG_(get_current_or_recent_tid)(),
1935 hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001936}
1937
njn72718642003-07-24 08:45:32 +00001938void SK_(free) ( void* p )
njn3e884182003-04-15 13:03:23 +00001939{
njn72718642003-07-24 08:45:32 +00001940 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001941}
1942
njn72718642003-07-24 08:45:32 +00001943void SK_(__builtin_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001944{
njn72718642003-07-24 08:45:32 +00001945 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001946}
1947
njn72718642003-07-24 08:45:32 +00001948void SK_(__builtin_vec_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001949{
njn72718642003-07-24 08:45:32 +00001950 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001951}
1952
njn72718642003-07-24 08:45:32 +00001953void* SK_(realloc) ( void* p, Int new_size )
njn3e884182003-04-15 13:03:23 +00001954{
1955 HG_Chunk *hc;
1956 HG_Chunk **prev_chunks_next_ptr;
sewardj05bcdcb2003-05-18 10:05:38 +00001957 Int i;
njn72718642003-07-24 08:45:32 +00001958 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +00001959
1960 /* First try and find the block. */
1961 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UInt)p,
1962 (VgHashNode***)&prev_chunks_next_ptr );
1963
1964 if (hc == NULL) {
1965 return NULL;
1966 }
1967
1968 if (hc->size == new_size) {
1969 /* size unchanged */
njn398044f2003-07-24 17:39:59 +00001970 hc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +00001971 return p;
1972
1973 } else if (hc->size > new_size) {
1974 /* new size is smaller */
1975 hc->size = new_size;
njn398044f2003-07-24 17:39:59 +00001976 hc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +00001977 return p;
1978
1979 } else {
1980 /* new size is bigger */
1981 Addr p_new;
1982
1983 /* Get new memory */
1984 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
1985
1986 /* First half kept and copied, second half new */
1987 copy_address_range_state( (Addr)p, p_new, hc->size );
1988 eraser_new_mem_heap ( p_new+hc->size, new_size-hc->size,
1989 /*inited*/False );
1990
1991 /* Copy from old to new */
1992 for (i = 0; i < hc->size; i++)
1993 ((UChar*)p_new)[i] = ((UChar*)p)[i];
1994
1995 /* Free old memory */
njn72718642003-07-24 08:45:32 +00001996 die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001997
1998 /* this has to be after die_and_free_mem, otherwise the
1999 former succeeds in shorting out the new block, not the
2000 old, in the case when both are on the same list. */
njn72718642003-07-24 08:45:32 +00002001 add_HG_Chunk ( tid, p_new, new_size );
njn3e884182003-04-15 13:03:23 +00002002
2003 return (void*)p_new;
2004 }
2005}
2006
njn25e49d8e72002-09-23 09:36:25 +00002007/*--------------------------------------------------------------*/
2008/*--- Machinery to support sanity checking ---*/
2009/*--------------------------------------------------------------*/
2010
njn25e49d8e72002-09-23 09:36:25 +00002011Bool SK_(cheap_sanity_check) ( void )
2012{
jseward9800fd32004-01-04 23:08:04 +00002013 /* nothing useful we can rapidly check */
2014 return True;
njn25e49d8e72002-09-23 09:36:25 +00002015}
2016
njn25e49d8e72002-09-23 09:36:25 +00002017Bool SK_(expensive_sanity_check)(void)
2018{
2019 Int i;
2020
2021 /* Make sure nobody changed the distinguished secondary. */
2022 for (i = 0; i < ESEC_MAP_WORDS; i++)
2023 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
2024 distinguished_secondary_map.swords[i].state != virgin_sword.state)
2025 return False;
2026
2027 return True;
2028}
2029
2030
2031/*--------------------------------------------------------------*/
2032/*--- Instrumentation ---*/
2033/*--------------------------------------------------------------*/
2034
sewardjf6374322002-11-13 22:35:55 +00002035static UInt stk_ld, nonstk_ld, stk_st, nonstk_st;
2036
njn25e49d8e72002-09-23 09:36:25 +00002037/* Create and return an instrumented version of cb_in. Free cb_in
2038 before returning. */
2039UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
2040{
2041 UCodeBlock* cb;
2042 Int i;
2043 UInstr* u_in;
2044 Int t_size = INVALID_TEMPREG;
sewardjf6374322002-11-13 22:35:55 +00002045 Int ntemps;
2046 Bool *stackref = NULL;
sewardj7a5ebcf2002-11-13 22:42:13 +00002047 Bool locked = False; /* lock prefix */
njn25e49d8e72002-09-23 09:36:25 +00002048
njn810086f2002-11-14 12:42:47 +00002049 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002050
sewardjf6374322002-11-13 22:35:55 +00002051 /* stackref[] is used for super-simple value tracking to keep note
2052 of which tempregs currently hold a value which is derived from
2053 ESP or EBP, and is therefore likely stack-relative if used as
2054 the address for LOAD or STORE. */
njn810086f2002-11-14 12:42:47 +00002055 ntemps = VG_(get_num_temps)(cb);
sewardjf6374322002-11-13 22:35:55 +00002056 stackref = VG_(malloc)(sizeof(*stackref) * ntemps);
2057 VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps);
2058
njn810086f2002-11-14 12:42:47 +00002059 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
2060 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00002061
njn25e49d8e72002-09-23 09:36:25 +00002062 switch (u_in->opcode) {
2063
2064 case NOP: case CALLM_S: case CALLM_E:
2065 break;
sewardjf6374322002-11-13 22:35:55 +00002066
sewardj7a5ebcf2002-11-13 22:42:13 +00002067 case LOCK:
2068 locked = True;
2069 uInstr0(cb, CCALL, 0);
2070 uCCall(cb, (Addr)bus_lock, 0, 0, False);
2071 break;
2072
2073 case JMP: case INCEIP:
2074 if (locked) {
2075 uInstr0(cb, CCALL, 0);
2076 uCCall(cb, (Addr)bus_unlock, 0, 0, False);
2077 }
2078 locked = False;
2079 VG_(copy_UInstr)(cb, u_in);
2080 break;
2081
sewardjf6374322002-11-13 22:35:55 +00002082 case GET:
2083 sk_assert(u_in->tag1 == ArchReg);
2084 sk_assert(u_in->tag2 == TempReg);
2085 sk_assert(u_in->val2 < ntemps);
2086
2087 stackref[u_in->val2] = (u_in->size == 4 &&
2088 (u_in->val1 == R_ESP || u_in->val1 == R_EBP));
2089 VG_(copy_UInstr)(cb, u_in);
2090 break;
2091
2092 case MOV:
2093 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2094 sk_assert(u_in->tag2 == TempReg);
2095 stackref[u_in->val2] = stackref[u_in->val1];
2096 }
2097 VG_(copy_UInstr)(cb, u_in);
2098 break;
2099
2100 case LEA1:
2101 case ADD: case SUB:
2102 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2103 sk_assert(u_in->tag2 == TempReg);
2104 stackref[u_in->val2] |= stackref[u_in->val1];
2105 }
2106 VG_(copy_UInstr)(cb, u_in);
2107 break;
njn25e49d8e72002-09-23 09:36:25 +00002108
sewardja5b3aec2002-10-22 05:09:36 +00002109 case LOAD: {
2110 void (*help)(Addr);
2111 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002112 sk_assert(u_in->tag1 == TempReg);
2113
2114 if (!clo_priv_stacks || !stackref[u_in->val1]) {
2115 nonstk_ld++;
2116
2117 switch(u_in->size) {
2118 case 1: help = eraser_mem_help_read_1; break;
2119 case 2: help = eraser_mem_help_read_2; break;
2120 case 4: help = eraser_mem_help_read_4; break;
2121 default:
2122 VG_(skin_panic)("bad size");
2123 }
jsgfcb1d1c02003-10-14 21:55:10 +00002124
2125 /* XXX all registers should be flushed to baseblock
2126 here */
sewardjf6374322002-11-13 22:35:55 +00002127 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
2128 uCCall(cb, (Addr)help, 1, 1, False);
2129 } else
2130 stk_ld++;
njn25e49d8e72002-09-23 09:36:25 +00002131
sewardja5b3aec2002-10-22 05:09:36 +00002132 VG_(copy_UInstr)(cb, u_in);
2133 t_size = INVALID_TEMPREG;
2134 break;
2135 }
2136
fitzhardinge111c6072004-03-09 02:45:07 +00002137 case MMX2_MemRd:
sewardja5b3aec2002-10-22 05:09:36 +00002138 case FPU_R: {
njne427a662002-10-02 11:08:25 +00002139 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
fitzhardinge111c6072004-03-09 02:45:07 +00002140 8 == u_in->size || 10 == u_in->size || 108 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002141
fitzhardinge111c6072004-03-09 02:45:07 +00002142 t_size = newTemp(cb);
2143 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2144 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00002145
fitzhardinge111c6072004-03-09 02:45:07 +00002146 /* XXX all registers should be flushed to baseblock
2147 here */
2148 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2149 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
2150
2151 VG_(copy_UInstr)(cb, u_in);
2152 t_size = INVALID_TEMPREG;
2153 break;
sewardja5b3aec2002-10-22 05:09:36 +00002154 }
2155
thughes96b466a2004-03-15 16:43:58 +00002156 case MMX2a1_MemRd: {
2157 sk_assert(8 == u_in->size);
2158
2159 t_size = newTemp(cb);
2160 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2161 uLiteral(cb, (UInt)u_in->size);
2162
2163 /* XXX all registers should be flushed to baseblock
2164 here */
2165 uInstr2(cb, CCALL, 0, TempReg, u_in->val3, TempReg, t_size);
2166 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
2167
2168 VG_(copy_UInstr)(cb, u_in);
2169 t_size = INVALID_TEMPREG;
2170 break;
2171 }
2172
fitzhardinge111c6072004-03-09 02:45:07 +00002173 case SSE2a_MemRd:
2174 case SSE2a1_MemRd:
2175 case SSE3a_MemRd:
2176 case SSE3a1_MemRd:
2177 case SSE3ag_MemRd_RegWr: {
2178 Int addr = (u_in->opcode == SSE3ag_MemRd_RegWr) ? u_in->val1 : u_in->val3;
2179
2180 sk_assert(u_in->size == 4 || u_in->size == 8 || u_in->size == 16 || u_in->size == 512);
2181
2182 t_size = newTemp(cb);
2183 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2184 uLiteral(cb, (UInt)u_in->size);
2185
2186 uInstr2(cb, CCALL, 0, TempReg, addr, TempReg, t_size);
2187 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
2188
2189 VG_(copy_UInstr)(cb, u_in);
2190 t_size = INVALID_TEMPREG;
2191 break;
2192 }
2193
sewardja5b3aec2002-10-22 05:09:36 +00002194 case STORE: {
2195 void (*help)(Addr, UInt);
2196 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002197 sk_assert(u_in->tag2 == TempReg);
sewardja5b3aec2002-10-22 05:09:36 +00002198
sewardjf6374322002-11-13 22:35:55 +00002199 if (!clo_priv_stacks || !stackref[u_in->val2]) {
2200 nonstk_st++;
2201
2202 switch(u_in->size) {
2203 case 1: help = eraser_mem_help_write_1; break;
2204 case 2: help = eraser_mem_help_write_2; break;
2205 case 4: help = eraser_mem_help_write_4; break;
2206 default:
2207 VG_(skin_panic)("bad size");
2208 }
2209
jsgfcb1d1c02003-10-14 21:55:10 +00002210 /* XXX all registers should be flushed to baseblock
2211 here */
sewardjf6374322002-11-13 22:35:55 +00002212 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
2213 uCCall(cb, (Addr)help, 2, 2, False);
2214 } else
2215 stk_st++;
sewardja5b3aec2002-10-22 05:09:36 +00002216
2217 VG_(copy_UInstr)(cb, u_in);
2218 t_size = INVALID_TEMPREG;
2219 break;
2220 }
2221
fitzhardinge111c6072004-03-09 02:45:07 +00002222 case MMX2_MemWr:
sewardja5b3aec2002-10-22 05:09:36 +00002223 case FPU_W: {
njne427a662002-10-02 11:08:25 +00002224 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
fitzhardinge111c6072004-03-09 02:45:07 +00002225 8 == u_in->size || 10 == u_in->size || 108 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002226
2227 t_size = newTemp(cb);
2228 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2229 uLiteral(cb, (UInt)u_in->size);
jsgfcb1d1c02003-10-14 21:55:10 +00002230 /* XXX all registers should be flushed to baseblock
2231 here */
sewardja5b3aec2002-10-22 05:09:36 +00002232 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2233 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2234
2235 VG_(copy_UInstr)(cb, u_in);
2236 t_size = INVALID_TEMPREG;
2237 break;
2238 }
njn25e49d8e72002-09-23 09:36:25 +00002239
fitzhardinge111c6072004-03-09 02:45:07 +00002240 case SSE2a_MemWr:
2241 case SSE3a_MemWr: {
2242 sk_assert(4 == u_in->size || 8 == u_in->size || 16 == u_in->size ||
2243 512 == u_in->size);
2244
2245 t_size = newTemp(cb);
2246 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2247 uLiteral(cb, (UInt)u_in->size);
2248 /* XXX all registers should be flushed to baseblock
2249 here */
2250 uInstr2(cb, CCALL, 0, TempReg, u_in->val3, TempReg, t_size);
2251 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2252
2253 VG_(copy_UInstr)(cb, u_in);
2254 t_size = INVALID_TEMPREG;
2255 break;
2256 }
sewardj3d7c9c82003-03-26 21:08:13 +00002257
njn25e49d8e72002-09-23 09:36:25 +00002258 default:
sewardjf6374322002-11-13 22:35:55 +00002259 /* conservative tromping */
2260 if (0 && u_in->tag1 == TempReg) /* can val1 ever be dest? */
2261 stackref[u_in->val1] = False;
2262 if (u_in->tag2 == TempReg)
2263 stackref[u_in->val2] = False;
2264 if (u_in->tag3 == TempReg)
2265 stackref[u_in->val3] = False;
njn4ba5a792002-09-30 10:23:54 +00002266 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00002267 break;
2268 }
2269 }
2270
sewardjf6374322002-11-13 22:35:55 +00002271 VG_(free)(stackref);
njn4ba5a792002-09-30 10:23:54 +00002272 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002273 return cb;
2274}
2275
2276
2277/*--------------------------------------------------------------------*/
2278/*--- Error and suppression handling ---*/
2279/*--------------------------------------------------------------------*/
2280
2281typedef
2282 enum {
2283 /* Possible data race */
2284 EraserSupp
2285 }
2286 EraserSuppKind;
2287
2288/* What kind of error it is. */
2289typedef
2290 enum {
sewardj16748af2002-10-22 04:55:54 +00002291 EraserErr, /* data-race */
2292 MutexErr, /* mutex operations */
sewardjff2c9232002-11-13 21:44:39 +00002293 LockGraphErr, /* mutex order error */
njn25e49d8e72002-09-23 09:36:25 +00002294 }
2295 EraserErrorKind;
2296
sewardj16748af2002-10-22 04:55:54 +00002297/* The classification of a faulting address. */
2298typedef
2299 enum { Undescribed, /* as-yet unclassified */
2300 Stack,
2301 Unknown, /* classification yielded nothing useful */
sewardjdac0a442002-11-13 22:08:40 +00002302 Mallocd,
2303 Freed,
sewardj16748af2002-10-22 04:55:54 +00002304 Segment
2305 }
2306 AddrKind;
2307/* Records info about a faulting address. */
2308typedef
2309 struct {
2310 /* ALL */
2311 AddrKind akind;
2312 /* Freed, Mallocd */
2313 Int blksize;
2314 /* Freed, Mallocd */
2315 Int rwoffset;
2316 /* Freed, Mallocd */
2317 ExeContext* lastchange;
2318 ThreadId lasttid;
2319 /* Stack */
2320 ThreadId stack_tid;
2321 /* Segment */
2322 const Char* filename;
2323 const Char* section;
2324 /* True if is just-below %esp -- could be a gcc bug. */
2325 Bool maybe_gcc;
jsgfcb1d1c02003-10-14 21:55:10 +00002326 /* symbolic address description */
2327 Char *expr;
sewardj16748af2002-10-22 04:55:54 +00002328 }
2329 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00002330
sewardj16748af2002-10-22 04:55:54 +00002331/* What kind of memory access is involved in the error? */
2332typedef
2333 enum { ReadAxs, WriteAxs, ExecAxs }
2334 AxsKind;
2335
2336/* Extra context for memory errors */
2337typedef
2338 struct {
2339 AxsKind axskind;
2340 Int size;
2341 AddrInfo addrinfo;
2342 Bool isWrite;
2343 shadow_word prevstate;
sewardjff2c9232002-11-13 21:44:39 +00002344 /* MutexErr, LockGraphErr */
sewardj39a4d842002-11-13 22:14:30 +00002345 Mutex *mutex;
sewardj499e3de2002-11-13 22:22:25 +00002346 EC_EIP lasttouched;
sewardj16748af2002-10-22 04:55:54 +00002347 ThreadId lasttid;
sewardjff2c9232002-11-13 21:44:39 +00002348 /* LockGraphErr */
sewardj4bffb232002-11-13 21:46:34 +00002349 const LockSet *held_lockset;
2350 const LockSet *prev_lockset;
sewardj16748af2002-10-22 04:55:54 +00002351 }
2352 HelgrindError;
2353
2354static __inline__
2355void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002356{
sewardj16748af2002-10-22 04:55:54 +00002357 ai->akind = Unknown;
2358 ai->blksize = 0;
2359 ai->rwoffset = 0;
2360 ai->lastchange = NULL;
2361 ai->lasttid = VG_INVALID_THREADID;
2362 ai->filename = NULL;
2363 ai->section = "???";
2364 ai->stack_tid = VG_INVALID_THREADID;
2365 ai->maybe_gcc = False;
jsgfcb1d1c02003-10-14 21:55:10 +00002366 ai->expr = NULL;
njn25e49d8e72002-09-23 09:36:25 +00002367}
2368
sewardj16748af2002-10-22 04:55:54 +00002369static __inline__
2370void clear_HelgrindError ( HelgrindError* err_extra )
2371{
2372 err_extra->axskind = ReadAxs;
2373 err_extra->size = 0;
2374 err_extra->mutex = NULL;
sewardj499e3de2002-11-13 22:22:25 +00002375 err_extra->lasttouched= NULL_EC_EIP;
sewardj16748af2002-10-22 04:55:54 +00002376 err_extra->lasttid = VG_INVALID_THREADID;
sewardjff2c9232002-11-13 21:44:39 +00002377 err_extra->prev_lockset = 0;
2378 err_extra->held_lockset = 0;
sewardj8fac99a2002-11-13 22:31:26 +00002379 err_extra->prevstate = SW(Vge_Virgin, 0);
sewardj16748af2002-10-22 04:55:54 +00002380 clear_AddrInfo ( &err_extra->addrinfo );
2381 err_extra->isWrite = False;
2382}
2383
2384
2385
2386/* Describe an address as best you can, for error messages,
2387 putting the result in ai. */
2388
thughes4ad52d02004-06-27 17:37:21 +00002389/* Callback for searching malloc'd and free'd lists */
2390static Bool addr_is_in_block(VgHashNode *node, void *ap)
2391{
2392 HG_Chunk* hc2 = (HG_Chunk*)node;
2393 Addr a = *(Addr *)ap;
2394
2395 return (hc2->data <= a && a < hc2->data + hc2->size);
2396}
2397
sewardj16748af2002-10-22 04:55:54 +00002398static void describe_addr ( Addr a, AddrInfo* ai )
2399{
njn3e884182003-04-15 13:03:23 +00002400 HG_Chunk* hc;
sewardjdac0a442002-11-13 22:08:40 +00002401 Int i;
sewardj16748af2002-10-22 04:55:54 +00002402
sewardj16748af2002-10-22 04:55:54 +00002403 /* Search for it in segments */
2404 {
2405 const SegInfo *seg;
2406
2407 for(seg = VG_(next_seginfo)(NULL);
2408 seg != NULL;
2409 seg = VG_(next_seginfo)(seg)) {
2410 Addr base = VG_(seg_start)(seg);
2411 UInt size = VG_(seg_size)(seg);
2412 const UChar *filename = VG_(seg_filename)(seg);
2413
2414 if (a >= base && a < base+size) {
2415 ai->akind = Segment;
2416 ai->blksize = size;
2417 ai->rwoffset = a - base;
2418 ai->filename = filename;
2419
2420 switch(VG_(seg_sect_kind)(a)) {
2421 case Vg_SectText: ai->section = "text"; break;
2422 case Vg_SectData: ai->section = "data"; break;
2423 case Vg_SectBSS: ai->section = "BSS"; break;
2424 case Vg_SectGOT: ai->section = "GOT"; break;
2425 case Vg_SectPLT: ai->section = "PLT"; break;
2426 case Vg_SectUnknown:
2427 default:
2428 ai->section = "???"; break;
2429 }
2430
2431 return;
2432 }
2433 }
2434 }
2435
2436 /* Search for a currently malloc'd block which might bracket it. */
thughes4ad52d02004-06-27 17:37:21 +00002437 hc = (HG_Chunk*)VG_(HT_first_match)(hg_malloc_list, addr_is_in_block, &a);
njn3e884182003-04-15 13:03:23 +00002438 if (NULL != hc) {
sewardj16748af2002-10-22 04:55:54 +00002439 ai->akind = Mallocd;
njn3e884182003-04-15 13:03:23 +00002440 ai->blksize = hc->size;
2441 ai->rwoffset = (Int)a - (Int)(hc->data);
2442 ai->lastchange = hc->where;
2443 ai->lasttid = hc->tid;
sewardj16748af2002-10-22 04:55:54 +00002444 return;
2445 }
sewardjdac0a442002-11-13 22:08:40 +00002446
2447 /* Look in recently freed memory */
2448 for(i = 0; i < N_FREED_CHUNKS; i++) {
njn3e884182003-04-15 13:03:23 +00002449 hc = freechunks[i];
2450 if (hc == NULL)
sewardjdac0a442002-11-13 22:08:40 +00002451 continue;
2452
njn3e884182003-04-15 13:03:23 +00002453 if (a >= hc->data && a < hc->data + hc->size) {
sewardjdac0a442002-11-13 22:08:40 +00002454 ai->akind = Freed;
njn3e884182003-04-15 13:03:23 +00002455 ai->blksize = hc->size;
2456 ai->rwoffset = a - hc->data;
2457 ai->lastchange = hc->where;
2458 ai->lasttid = hc->tid;
sewardjdac0a442002-11-13 22:08:40 +00002459 return;
2460 }
2461 }
2462
sewardj16748af2002-10-22 04:55:54 +00002463 /* Clueless ... */
2464 ai->akind = Unknown;
2465 return;
2466}
2467
2468
njn7e614812003-04-21 22:04:03 +00002469/* Updates the copy with address info if necessary. */
2470UInt SK_(update_extra)(Error* err)
sewardj16748af2002-10-22 04:55:54 +00002471{
njn7e614812003-04-21 22:04:03 +00002472 HelgrindError* extra;
sewardj16748af2002-10-22 04:55:54 +00002473
njn7e614812003-04-21 22:04:03 +00002474 extra = (HelgrindError*)VG_(get_error_extra)(err);
2475 if (extra != NULL && Undescribed == extra->addrinfo.akind) {
2476 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2477 }
2478 return sizeof(HelgrindError);
sewardj16748af2002-10-22 04:55:54 +00002479}
2480
njn72718642003-07-24 08:45:32 +00002481static void record_eraser_error ( ThreadId tid, Addr a, Bool is_write,
sewardj0f811692002-10-22 04:59:26 +00002482 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00002483{
sewardjc4a810d2002-11-13 22:25:51 +00002484 shadow_word *sw;
sewardj16748af2002-10-22 04:55:54 +00002485 HelgrindError err_extra;
2486
sewardjff2c9232002-11-13 21:44:39 +00002487 n_eraser_warnings++;
2488
sewardj16748af2002-10-22 04:55:54 +00002489 clear_HelgrindError(&err_extra);
2490 err_extra.isWrite = is_write;
2491 err_extra.addrinfo.akind = Undescribed;
2492 err_extra.prevstate = prevstate;
sewardj499e3de2002-11-13 22:22:25 +00002493 if (clo_execontext)
2494 err_extra.lasttouched = getExeContext(a);
jsgfcb1d1c02003-10-14 21:55:10 +00002495 err_extra.addrinfo.expr = VG_(describe_addr)(tid, a);
2496
njn72718642003-07-24 08:45:32 +00002497 VG_(maybe_record_error)( tid, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00002498 (is_write ? "writing" : "reading"),
2499 &err_extra);
2500
sewardjc4a810d2002-11-13 22:25:51 +00002501 sw = get_sword_addr(a);
2502 if (sw->state == Vge_Excl && sw->other != TLSP_INDICATING_ALL) {
2503 ThreadLifeSeg *tls = unpackTLS(sw->other);
2504 tls->refcount--;
2505 }
2506
sewardj7f3ad222002-11-13 22:11:53 +00002507 set_sword(a, error_sword);
sewardj16748af2002-10-22 04:55:54 +00002508}
2509
sewardj39a4d842002-11-13 22:14:30 +00002510static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardj16748af2002-10-22 04:55:54 +00002511 Char *str, ExeContext *ec)
2512{
2513 HelgrindError err_extra;
2514
2515 clear_HelgrindError(&err_extra);
2516 err_extra.addrinfo.akind = Undescribed;
2517 err_extra.mutex = mutex;
sewardjc808ef52002-11-13 22:43:26 +00002518 err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
sewardj16748af2002-10-22 04:55:54 +00002519 err_extra.lasttid = tid;
2520
njn72718642003-07-24 08:45:32 +00002521 VG_(maybe_record_error)(tid, MutexErr,
sewardj16748af2002-10-22 04:55:54 +00002522 (Addr)mutex->mutexp, str, &err_extra);
2523}
njn25e49d8e72002-09-23 09:36:25 +00002524
sewardj39a4d842002-11-13 22:14:30 +00002525static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00002526 const LockSet *lockset_holding,
2527 const LockSet *lockset_prev)
sewardjff2c9232002-11-13 21:44:39 +00002528{
2529 HelgrindError err_extra;
2530
2531 n_lockorder_warnings++;
2532
2533 clear_HelgrindError(&err_extra);
2534 err_extra.addrinfo.akind = Undescribed;
2535 err_extra.mutex = mutex;
2536
sewardjc808ef52002-11-13 22:43:26 +00002537 err_extra.lasttouched = EC(mutex->location, virgin_sword, 0);
sewardjff2c9232002-11-13 21:44:39 +00002538 err_extra.held_lockset = lockset_holding;
2539 err_extra.prev_lockset = lockset_prev;
2540
njn72718642003-07-24 08:45:32 +00002541 VG_(maybe_record_error)(tid, LockGraphErr, mutex->mutexp, "", &err_extra);
sewardjff2c9232002-11-13 21:44:39 +00002542}
2543
njn810086f2002-11-14 12:42:47 +00002544Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
njn25e49d8e72002-09-23 09:36:25 +00002545{
njn810086f2002-11-14 12:42:47 +00002546 Char *e1s, *e2s;
sewardj16748af2002-10-22 04:55:54 +00002547
njn810086f2002-11-14 12:42:47 +00002548 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2549
2550 switch (VG_(get_error_kind)(e1)) {
sewardj16748af2002-10-22 04:55:54 +00002551 case EraserErr:
njn810086f2002-11-14 12:42:47 +00002552 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002553
2554 case MutexErr:
njn810086f2002-11-14 12:42:47 +00002555 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002556 }
2557
njn810086f2002-11-14 12:42:47 +00002558 e1s = VG_(get_error_string)(e1);
2559 e2s = VG_(get_error_string)(e2);
2560 if (e1s != e2s) return False;
2561 if (0 != VG_(strcmp)(e1s, e2s)) return False;
njn25e49d8e72002-09-23 09:36:25 +00002562 return True;
2563}
2564
sewardj16748af2002-10-22 04:55:54 +00002565static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002566{
jsgfcb1d1c02003-10-14 21:55:10 +00002567 if (ai->expr != NULL)
2568 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002569 " Address %p == %s", a, ai->expr);
jsgfcb1d1c02003-10-14 21:55:10 +00002570
sewardj16748af2002-10-22 04:55:54 +00002571 switch (ai->akind) {
2572 case Stack:
2573 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002574 " Address %p is on thread %d's stack",
sewardj16748af2002-10-22 04:55:54 +00002575 a, ai->stack_tid);
2576 break;
2577 case Unknown:
jsgfcb1d1c02003-10-14 21:55:10 +00002578 if (ai->expr != NULL)
2579 break;
2580
nethercote3b390c72003-11-13 17:53:43 +00002581 /* maybe_gcc is never set to True! This is a hangover from code
2582 in Memcheck */
sewardj16748af2002-10-22 04:55:54 +00002583 if (ai->maybe_gcc) {
2584 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002585 " Address %p is just below %%esp. Possibly a bug in GCC/G++",
sewardj16748af2002-10-22 04:55:54 +00002586 a);
2587 VG_(message)(Vg_UserMsg,
2588 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
2589 } else {
2590 VG_(message)(Vg_UserMsg,
nethercotef798eee2004-04-13 08:36:35 +00002591 " Address %p is not stack'd, malloc'd or (recently) free'd", a);
sewardj16748af2002-10-22 04:55:54 +00002592 }
2593 break;
2594 case Segment:
2595 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002596 " Address %p is in %s section of %s",
sewardj16748af2002-10-22 04:55:54 +00002597 a, ai->section, ai->filename);
2598 break;
sewardjdac0a442002-11-13 22:08:40 +00002599 case Mallocd:
2600 case Freed: {
sewardj16748af2002-10-22 04:55:54 +00002601 UInt delta;
2602 UChar* relative;
2603 if (ai->rwoffset < 0) {
2604 delta = (UInt)(- ai->rwoffset);
2605 relative = "before";
2606 } else if (ai->rwoffset >= ai->blksize) {
2607 delta = ai->rwoffset - ai->blksize;
2608 relative = "after";
2609 } else {
2610 delta = ai->rwoffset;
2611 relative = "inside";
2612 }
2613 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002614 " Address %p is %d bytes %s a block of size %d %s by thread %d",
sewardj16748af2002-10-22 04:55:54 +00002615 a, delta, relative,
2616 ai->blksize,
sewardjdac0a442002-11-13 22:08:40 +00002617 ai->akind == Mallocd ? "alloc'd" : "freed",
sewardj16748af2002-10-22 04:55:54 +00002618 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00002619
sewardj16748af2002-10-22 04:55:54 +00002620 VG_(pp_ExeContext)(ai->lastchange);
2621 break;
2622 }
2623 default:
2624 VG_(skin_panic)("pp_AddrInfo");
2625 }
njn25e49d8e72002-09-23 09:36:25 +00002626}
2627
sewardj4bffb232002-11-13 21:46:34 +00002628static Char *lockset_str(const Char *prefix, const LockSet *lockset)
sewardjff2c9232002-11-13 21:44:39 +00002629{
sewardjff2c9232002-11-13 21:44:39 +00002630 Char *buf, *cp;
sewardj4bffb232002-11-13 21:46:34 +00002631 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002632
sewardj4bffb232002-11-13 21:46:34 +00002633 buf = VG_(malloc)((prefix == NULL ? 0 : VG_(strlen)(prefix)) +
2634 lockset->setsize * 120 +
2635 1);
sewardjff2c9232002-11-13 21:44:39 +00002636
2637 cp = buf;
2638 if (prefix)
2639 cp += VG_(sprintf)(cp, "%s", prefix);
2640
sewardj4bffb232002-11-13 21:46:34 +00002641 for(i = 0; i < lockset->setsize; i++)
2642 cp += VG_(sprintf)(cp, "%p%(y, ", lockset->mutex[i]->mutexp,
2643 lockset->mutex[i]->mutexp);
sewardjff2c9232002-11-13 21:44:39 +00002644
sewardj4bffb232002-11-13 21:46:34 +00002645 if (lockset->setsize)
sewardjff2c9232002-11-13 21:44:39 +00002646 cp[-2] = '\0';
2647 else
2648 *cp = '\0';
2649
2650 return buf;
2651}
njn25e49d8e72002-09-23 09:36:25 +00002652
njn43c799e2003-04-08 00:08:52 +00002653void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +00002654{
njn810086f2002-11-14 12:42:47 +00002655 HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err);
sewardj16748af2002-10-22 04:55:54 +00002656 Char buf[100];
2657 Char *msg = buf;
sewardj4bffb232002-11-13 21:46:34 +00002658 const LockSet *ls;
sewardj16748af2002-10-22 04:55:54 +00002659
2660 *msg = '\0';
2661
njn810086f2002-11-14 12:42:47 +00002662 switch(VG_(get_error_kind)(err)) {
2663 case EraserErr: {
2664 Addr err_addr = VG_(get_error_address)(err);
2665
sewardj16748af2002-10-22 04:55:54 +00002666 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
njn810086f2002-11-14 12:42:47 +00002667 VG_(get_error_string)(err), err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002668 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn810086f2002-11-14 12:42:47 +00002669 pp_AddrInfo(err_addr, &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002670
2671 switch(extra->prevstate.state) {
2672 case Vge_Virgin:
2673 /* shouldn't be possible to go directly from virgin -> error */
2674 VG_(sprintf)(buf, "virgin!?");
2675 break;
2676
sewardjc4a810d2002-11-13 22:25:51 +00002677 case Vge_Excl: {
2678 ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
2679
2680 sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
2681 VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
sewardj16748af2002-10-22 04:55:54 +00002682 break;
sewardjc4a810d2002-11-13 22:25:51 +00002683 }
sewardj16748af2002-10-22 04:55:54 +00002684
2685 case Vge_Shar:
sewardjff2c9232002-11-13 21:44:39 +00002686 case Vge_SharMod:
sewardj8fac99a2002-11-13 22:31:26 +00002687 ls = unpackLockSet(extra->prevstate.other);
sewardj4bffb232002-11-13 21:46:34 +00002688
2689 if (isempty(ls)) {
sewardj16748af2002-10-22 04:55:54 +00002690 VG_(sprintf)(buf, "shared %s, no locks",
2691 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
2692 break;
2693 }
2694
sewardjff2c9232002-11-13 21:44:39 +00002695 msg = lockset_str(extra->prevstate.state == Vge_Shar ?
2696 "shared RO, locked by:" :
sewardj4bffb232002-11-13 21:46:34 +00002697 "shared RW, locked by:", ls);
sewardj16748af2002-10-22 04:55:54 +00002698
sewardj16748af2002-10-22 04:55:54 +00002699 break;
2700 }
sewardj16748af2002-10-22 04:55:54 +00002701
sewardj499e3de2002-11-13 22:22:25 +00002702 if (*msg)
nethercote3b390c72003-11-13 17:53:43 +00002703 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
sewardj499e3de2002-11-13 22:22:25 +00002704
sewardj72baa7a2002-12-09 23:32:58 +00002705 if (clo_execontext == EC_Some
2706 && extra->lasttouched.uu_ec_eip.eip != 0) {
sewardj499e3de2002-11-13 22:22:25 +00002707 Char file[100];
2708 UInt line;
sewardj72baa7a2002-12-09 23:32:58 +00002709 Addr eip = extra->lasttouched.uu_ec_eip.eip;
sewardj499e3de2002-11-13 22:22:25 +00002710
nethercote3b390c72003-11-13 17:53:43 +00002711 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u",
njn810086f2002-11-14 12:42:47 +00002712 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002713 pp_state(extra->lasttouched.state),
2714 unpackTLS(extra->lasttouched.tls)->tid);
sewardj499e3de2002-11-13 22:22:25 +00002715
2716 if (VG_(get_filename_linenum)(eip, file, sizeof(file), &line)) {
2717 VG_(message)(Vg_UserMsg, " at %p: %y (%s:%u)",
2718 eip, eip, file, line);
2719 } else if (VG_(get_objname)(eip, file, sizeof(file))) {
2720 VG_(message)(Vg_UserMsg, " at %p: %y (in %s)",
2721 eip, eip, file);
2722 } else {
2723 VG_(message)(Vg_UserMsg, " at %p: %y", eip, eip);
2724 }
sewardj72baa7a2002-12-09 23:32:58 +00002725 } else if (clo_execontext == EC_All
2726 && extra->lasttouched.uu_ec_eip.ec != NULL) {
nethercote3b390c72003-11-13 17:53:43 +00002727 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u",
njn810086f2002-11-14 12:42:47 +00002728 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002729 pp_state(extra->lasttouched.state),
2730 unpackTLS(extra->lasttouched.tls)->tid);
sewardj72baa7a2002-12-09 23:32:58 +00002731 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj499e3de2002-11-13 22:22:25 +00002732 }
sewardj16748af2002-10-22 04:55:54 +00002733 break;
njn810086f2002-11-14 12:42:47 +00002734 }
sewardj16748af2002-10-22 04:55:54 +00002735
2736 case MutexErr:
sewardj499e3de2002-11-13 22:22:25 +00002737 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s",
njn810086f2002-11-14 12:42:47 +00002738 VG_(get_error_address)(err),
2739 VG_(get_error_address)(err),
2740 VG_(get_error_string)(err));
njn43c799e2003-04-08 00:08:52 +00002741 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardj72baa7a2002-12-09 23:32:58 +00002742 if (extra->lasttouched.uu_ec_eip.ec != NULL) {
nethercote3b390c72003-11-13 17:53:43 +00002743 VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid);
sewardj72baa7a2002-12-09 23:32:58 +00002744 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_eip.ec);
sewardj16748af2002-10-22 04:55:54 +00002745 }
njn810086f2002-11-14 12:42:47 +00002746 pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002747 break;
sewardjff2c9232002-11-13 21:44:39 +00002748
2749 case LockGraphErr: {
sewardj4bffb232002-11-13 21:46:34 +00002750 const LockSet *heldset = extra->held_lockset;
njn810086f2002-11-14 12:42:47 +00002751 Addr err_addr = VG_(get_error_address)(err);
sewardj4bffb232002-11-13 21:46:34 +00002752 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002753
2754 msg = lockset_str(NULL, heldset);
2755
2756 VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order",
njn810086f2002-11-14 12:42:47 +00002757 err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002758 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardjff2c9232002-11-13 21:44:39 +00002759 VG_(message)(Vg_UserMsg, " while holding locks %s", msg);
2760
sewardj4bffb232002-11-13 21:46:34 +00002761 for(i = 0; i < heldset->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00002762 const Mutex *lsmx = heldset->mutex[i];
sewardjff2c9232002-11-13 21:44:39 +00002763
sewardj542494b2002-11-13 22:46:13 +00002764 /* needs to be a recursive search+display */
2765 if (0 && !ismember(lsmx->lockdep, extra->mutex))
sewardjff2c9232002-11-13 21:44:39 +00002766 continue;
2767
nethercote3b390c72003-11-13 17:53:43 +00002768 VG_(message)(Vg_UserMsg, " %p%(y last locked at",
sewardjff2c9232002-11-13 21:44:39 +00002769 lsmx->mutexp, lsmx->mutexp);
2770 VG_(pp_ExeContext)(lsmx->location);
2771 VG_(free)(msg);
sewardj4bffb232002-11-13 21:46:34 +00002772 msg = lockset_str(NULL, lsmx->lockdep);
nethercote3b390c72003-11-13 17:53:43 +00002773 VG_(message)(Vg_UserMsg, " while depending on locks %s", msg);
sewardjff2c9232002-11-13 21:44:39 +00002774 }
2775
2776 break;
sewardj16748af2002-10-22 04:55:54 +00002777 }
sewardjff2c9232002-11-13 21:44:39 +00002778 }
2779
2780 if (msg != buf)
2781 VG_(free)(msg);
njn25e49d8e72002-09-23 09:36:25 +00002782}
2783
2784
njn810086f2002-11-14 12:42:47 +00002785Bool SK_(recognised_suppression) ( Char* name, Supp *su )
njn25e49d8e72002-09-23 09:36:25 +00002786{
2787 if (0 == VG_(strcmp)(name, "Eraser")) {
njn810086f2002-11-14 12:42:47 +00002788 VG_(set_supp_kind)(su, EraserSupp);
njn25e49d8e72002-09-23 09:36:25 +00002789 return True;
2790 } else {
2791 return False;
2792 }
2793}
2794
2795
njn810086f2002-11-14 12:42:47 +00002796Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +00002797{
2798 /* do nothing -- no extra suppression info present. Return True to
2799 indicate nothing bad happened. */
2800 return True;
2801}
2802
2803
njn810086f2002-11-14 12:42:47 +00002804Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn25e49d8e72002-09-23 09:36:25 +00002805{
nethercote64366b42003-12-01 13:11:47 +00002806 sk_assert(VG_(get_supp_kind)(su) == EraserSupp);
2807
2808 return (VG_(get_error_kind)(err) == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00002809}
2810
njn43c799e2003-04-08 00:08:52 +00002811extern Char* SK_(get_error_name) ( Error* err )
2812{
2813 if (EraserErr == VG_(get_error_kind)(err)) {
2814 return "Eraser";
2815 } else {
2816 return NULL; /* Other errors types can't be suppressed */
2817 }
2818}
2819
2820extern void SK_(print_extra_suppression_info) ( Error* err )
2821{
2822 /* Do nothing */
2823}
njn25e49d8e72002-09-23 09:36:25 +00002824
sewardjdca84112002-11-13 22:29:34 +00002825static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
2826{
2827 Mutex *mutex = get_mutex((Addr)void_mutex);
2828
njn72718642003-07-24 08:45:32 +00002829 test_mutex_state(mutex, MxLocked, tid);
sewardjdca84112002-11-13 22:29:34 +00002830}
2831
njn25e49d8e72002-09-23 09:36:25 +00002832static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
2833{
sewardj4bffb232002-11-13 21:46:34 +00002834 static const Bool debug = False;
sewardj39a4d842002-11-13 22:14:30 +00002835 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002836 const LockSet* ls;
2837
njn72718642003-07-24 08:45:32 +00002838 set_mutex_state(mutex, MxLocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002839
njn25e49d8e72002-09-23 09:36:25 +00002840# if DEBUG_LOCKS
sewardjdac0a442002-11-13 22:08:40 +00002841 VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002842# endif
2843
njn25e49d8e72002-09-23 09:36:25 +00002844 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
2845# if LOCKSET_SANITY > 1
2846 sanity_check_locksets("eraser_post_mutex_lock-IN");
2847# endif
2848
sewardj4bffb232002-11-13 21:46:34 +00002849 ls = lookup_LockSet_with(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002850
sewardj4bffb232002-11-13 21:46:34 +00002851 if (ls == NULL) {
2852 LockSet *newset = add_LockSet(thread_locks[tid], mutex);
2853 insert_LockSet(newset);
2854 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002855 }
sewardj4bffb232002-11-13 21:46:34 +00002856 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002857
sewardj4bffb232002-11-13 21:46:34 +00002858 if (debug || DEBUG_LOCKS)
2859 VG_(printf)("tid %u now has lockset %p\n", tid, ls);
njn25e49d8e72002-09-23 09:36:25 +00002860
sewardj4bffb232002-11-13 21:46:34 +00002861 if (debug || LOCKSET_SANITY > 1)
2862 sanity_check_locksets("eraser_post_mutex_lock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002863}
2864
2865
2866static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
2867{
sewardjc26cc252002-10-23 21:58:55 +00002868 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00002869 Int i = 0;
sewardj39a4d842002-11-13 22:14:30 +00002870 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002871 const LockSet *ls;
2872
njn72718642003-07-24 08:45:32 +00002873 test_mutex_state(mutex, MxUnlocked, tid);
2874 set_mutex_state(mutex, MxUnlocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002875
sewardjdac0a442002-11-13 22:08:40 +00002876 if (!ismember(thread_locks[tid], mutex))
2877 return;
2878
sewardjc26cc252002-10-23 21:58:55 +00002879 if (debug || DEBUG_LOCKS)
2880 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002881
sewardjc26cc252002-10-23 21:58:55 +00002882 if (debug || LOCKSET_SANITY > 1)
2883 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00002884
sewardj4bffb232002-11-13 21:46:34 +00002885 ls = lookup_LockSet_without(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002886
sewardj4bffb232002-11-13 21:46:34 +00002887 if (ls == NULL) {
2888 LockSet *newset = remove_LockSet(thread_locks[tid], mutex);
2889 insert_LockSet(newset);
2890 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002891 }
2892
2893 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00002894 if (debug || DEBUG_LOCKS)
sewardj4bffb232002-11-13 21:46:34 +00002895 VG_(printf)("tid %u reverts from %p to lockset %p\n",
sewardjc26cc252002-10-23 21:58:55 +00002896 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00002897
sewardj4bffb232002-11-13 21:46:34 +00002898 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002899
sewardjc26cc252002-10-23 21:58:55 +00002900 if (debug || LOCKSET_SANITY > 1)
2901 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002902}
2903
2904
2905/* ---------------------------------------------------------------------
2906 Checking memory reads and writes
2907 ------------------------------------------------------------------ */
2908
2909/* Behaviour on reads and writes:
2910 *
2911 * VIR EXCL SHAR SH_MOD
2912 * ----------------------------------------------------------------
2913 * rd/wr, 1st thread | - EXCL - -
2914 * rd, new thread | - SHAR - -
2915 * wr, new thread | - SH_MOD - -
2916 * rd | error! - SHAR SH_MOD
2917 * wr | EXCL - SH_MOD SH_MOD
2918 * ----------------------------------------------------------------
2919 */
2920
sewardj8fac99a2002-11-13 22:31:26 +00002921static inline
njn25e49d8e72002-09-23 09:36:25 +00002922void dump_around_a(Addr a)
2923{
2924 UInt i;
2925 shadow_word* sword;
2926 VG_(printf)("NEARBY:\n");
2927 for (i = a - 12; i <= a + 12; i += 4) {
2928 sword = get_sword_addr(i);
2929 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
2930 }
2931}
njn25e49d8e72002-09-23 09:36:25 +00002932
2933#if DEBUG_ACCESSES
2934 #define DEBUG_STATE(args...) \
2935 VG_(printf)("(%u) ", size), \
2936 VG_(printf)(args)
2937#else
2938 #define DEBUG_STATE(args...)
2939#endif
2940
njn72718642003-07-24 08:45:32 +00002941static void eraser_mem_read_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00002942{
sewardj72baa7a2002-12-09 23:32:58 +00002943 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002944 shadow_word prevstate;
2945 ThreadLifeSeg *tls;
2946 const LockSet *ls;
2947 Bool statechange = False;
2948
2949 static const void *const states[4] = {
2950 [Vge_Virgin] &&st_virgin,
2951 [Vge_Excl] &&st_excl,
2952 [Vge_Shar] &&st_shar,
2953 [Vge_SharMod] &&st_sharmod,
2954 };
2955
2956 tls = thread_seg[tid];
2957 sk_assert(tls != NULL && tls->tid == tid);
2958
2959 sword = get_sword_addr(a);
2960 if (sword == SEC_MAP_ACCESS) {
2961 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
2962 return;
2963 }
2964
2965 prevstate = *sword;
2966
2967 goto *states[sword->state];
2968
2969 /* This looks like reading of unitialised memory, may be legit. Eg.
2970 * calloc() zeroes its values, so untouched memory may actually be
2971 * initialised. Leave that stuff to Valgrind. */
2972 st_virgin:
2973 if (TID_INDICATING_NONVIRGIN == sword->other) {
2974 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
2975 if (DEBUG_VIRGIN_READS)
2976 dump_around_a(a);
2977 } else {
2978 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
2979 }
2980 statechange = True;
2981 *sword = SW(Vge_Excl, packTLS(tls)); /* remember exclusive owner */
2982 tls->refcount++;
2983 goto done;
2984
2985 st_excl: {
2986 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
2987
2988 if (tls == sw_tls) {
2989 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
2990 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
2991 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
2992 } else if (tlsIsDisjoint(tls, sw_tls)) {
2993 DEBUG_STATE("Read EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
2994 statechange = True;
2995 sword->other = packTLS(tls);
2996 sw_tls->refcount--;
2997 tls->refcount++;
2998 } else {
2999 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sw_tls->tid, a, tid);
3000 sw_tls->refcount--;
3001 statechange = True;
3002 *sword = SW(Vge_Shar, packLockSet(thread_locks[tid]));
3003
3004 if (DEBUG_MEM_LOCKSET_CHANGES)
3005 print_LockSet("excl read locks", unpackLockSet(sword->other));
3006 }
3007 goto done;
3008 }
3009
3010 st_shar:
3011 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
3012 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3013 thread_locks[tid]));
3014 statechange = sword->other != prevstate.other;
3015 goto done;
3016
3017 st_sharmod:
3018 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
3019 ls = intersect(unpackLockSet(sword->other),
3020 thread_locks[tid]);
3021 sword->other = packLockSet(ls);
3022
3023 statechange = sword->other != prevstate.other;
3024
3025 if (isempty(ls)) {
njn72718642003-07-24 08:45:32 +00003026 record_eraser_error(tid, a, False /* !is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00003027 }
3028 goto done;
3029
3030 done:
3031 if (clo_execontext != EC_None && statechange) {
3032 EC_EIP eceip;
3033
3034 if (clo_execontext == EC_Some)
njn72718642003-07-24 08:45:32 +00003035 eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003036 else
njn72718642003-07-24 08:45:32 +00003037 eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003038 setExeContext(a, eceip);
3039 }
3040}
njn25e49d8e72002-09-23 09:36:25 +00003041
njn72718642003-07-24 08:45:32 +00003042static void eraser_mem_read(Addr a, UInt size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00003043{
njn72718642003-07-24 08:45:32 +00003044 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003045
sewardj8fac99a2002-11-13 22:31:26 +00003046 end = ROUNDUP(a+size, 4);
3047 a = ROUNDDN(a, 4);
3048
sewardj18cd4a52002-11-13 22:37:41 +00003049 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00003050 eraser_mem_read_word(a, tid);
sewardj18cd4a52002-11-13 22:37:41 +00003051}
3052
njn72718642003-07-24 08:45:32 +00003053static void eraser_mem_write_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00003054{
3055 ThreadLifeSeg *tls;
sewardj72baa7a2002-12-09 23:32:58 +00003056 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00003057 shadow_word prevstate;
3058 Bool statechange = False;
3059 static const void *const states[4] = {
3060 [Vge_Virgin] &&st_virgin,
3061 [Vge_Excl] &&st_excl,
3062 [Vge_Shar] &&st_shar,
3063 [Vge_SharMod] &&st_sharmod,
3064 };
3065
sewardjc4a810d2002-11-13 22:25:51 +00003066 tls = thread_seg[tid];
3067 sk_assert(tls != NULL && tls->tid == tid);
3068
sewardj18cd4a52002-11-13 22:37:41 +00003069 sword = get_sword_addr(a);
3070 if (sword == SEC_MAP_ACCESS) {
3071 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
3072 return;
3073 }
njn25e49d8e72002-09-23 09:36:25 +00003074
sewardj18cd4a52002-11-13 22:37:41 +00003075 prevstate = *sword;
njn25e49d8e72002-09-23 09:36:25 +00003076
sewardj18cd4a52002-11-13 22:37:41 +00003077 goto *states[sword->state];
sewardj16748af2002-10-22 04:55:54 +00003078
sewardj18cd4a52002-11-13 22:37:41 +00003079 st_virgin:
3080 if (TID_INDICATING_NONVIRGIN == sword->other)
3081 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
3082 else
3083 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
3084 statechange = True;
3085 *sword = SW(Vge_Excl, packTLS(tls));/* remember exclusive owner */
3086 tls->refcount++;
3087 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003088
sewardj18cd4a52002-11-13 22:37:41 +00003089 st_excl: {
3090 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
3091
3092 if (tls == sw_tls) {
3093 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
3094 goto done;
3095 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
3096 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
3097 goto done;
3098 } else if (tlsIsDisjoint(tls, sw_tls)) {
3099 DEBUG_STATE("Write EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
3100 sword->other = packTLS(tls);
3101 sw_tls->refcount--;
sewardjc4a810d2002-11-13 22:25:51 +00003102 tls->refcount++;
sewardj8fac99a2002-11-13 22:31:26 +00003103 goto done;
sewardj18cd4a52002-11-13 22:37:41 +00003104 } else {
3105 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sw_tls->tid, a, tid);
3106 statechange = True;
3107 sw_tls->refcount--;
3108 *sword = SW(Vge_SharMod, packLockSet(thread_locks[tid]));
3109 if(DEBUG_MEM_LOCKSET_CHANGES)
3110 print_LockSet("excl write locks", unpackLockSet(sword->other));
3111 goto SHARED_MODIFIED;
sewardjc4a810d2002-11-13 22:25:51 +00003112 }
sewardj18cd4a52002-11-13 22:37:41 +00003113 }
njn25e49d8e72002-09-23 09:36:25 +00003114
sewardj18cd4a52002-11-13 22:37:41 +00003115 st_shar:
3116 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
3117 sword->state = Vge_SharMod;
3118 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3119 thread_locks[tid]));
3120 statechange = True;
3121 goto SHARED_MODIFIED;
njn25e49d8e72002-09-23 09:36:25 +00003122
sewardj18cd4a52002-11-13 22:37:41 +00003123 st_sharmod:
3124 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
3125 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3126 thread_locks[tid]));
3127 statechange = sword->other != prevstate.other;
njn25e49d8e72002-09-23 09:36:25 +00003128
sewardj18cd4a52002-11-13 22:37:41 +00003129 SHARED_MODIFIED:
3130 if (isempty(unpackLockSet(sword->other))) {
njn72718642003-07-24 08:45:32 +00003131 record_eraser_error(tid, a, True /* is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00003132 }
3133 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003134
sewardj18cd4a52002-11-13 22:37:41 +00003135 done:
3136 if (clo_execontext != EC_None && statechange) {
3137 EC_EIP eceip;
sewardj499e3de2002-11-13 22:22:25 +00003138
sewardj18cd4a52002-11-13 22:37:41 +00003139 if (clo_execontext == EC_Some)
njn72718642003-07-24 08:45:32 +00003140 eceip = EIP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003141 else
njn72718642003-07-24 08:45:32 +00003142 eceip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003143 setExeContext(a, eceip);
njn25e49d8e72002-09-23 09:36:25 +00003144 }
3145}
3146
njn72718642003-07-24 08:45:32 +00003147static void eraser_mem_write(Addr a, UInt size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00003148{
sewardj8fac99a2002-11-13 22:31:26 +00003149 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003150
sewardj8fac99a2002-11-13 22:31:26 +00003151 end = ROUNDUP(a+size, 4);
3152 a = ROUNDDN(a, 4);
3153
sewardj18cd4a52002-11-13 22:37:41 +00003154 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00003155 eraser_mem_write_word(a, tid);
njn25e49d8e72002-09-23 09:36:25 +00003156}
3157
3158#undef DEBUG_STATE
3159
nethercote31212bc2004-02-29 15:50:04 +00003160REGPARM(1) static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00003161{
njn72718642003-07-24 08:45:32 +00003162 eraser_mem_read(a, 1, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003163}
3164
nethercote31212bc2004-02-29 15:50:04 +00003165REGPARM(1) static void eraser_mem_help_read_2(Addr a)
sewardja5b3aec2002-10-22 05:09:36 +00003166{
njn72718642003-07-24 08:45:32 +00003167 eraser_mem_read(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003168}
3169
nethercote31212bc2004-02-29 15:50:04 +00003170REGPARM(1) static void eraser_mem_help_read_4(Addr a)
sewardja5b3aec2002-10-22 05:09:36 +00003171{
njn72718642003-07-24 08:45:32 +00003172 eraser_mem_read(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003173}
3174
nethercote31212bc2004-02-29 15:50:04 +00003175REGPARM(2) static void eraser_mem_help_read_N(Addr a, UInt size)
sewardja5b3aec2002-10-22 05:09:36 +00003176{
njn72718642003-07-24 08:45:32 +00003177 eraser_mem_read(a, size, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003178}
3179
nethercote31212bc2004-02-29 15:50:04 +00003180REGPARM(2) static void eraser_mem_help_write_1(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003181{
3182 if (*(UChar *)a != val)
njn72718642003-07-24 08:45:32 +00003183 eraser_mem_write(a, 1, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003184}
nethercote31212bc2004-02-29 15:50:04 +00003185REGPARM(2) static void eraser_mem_help_write_2(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003186{
3187 if (*(UShort *)a != val)
njn72718642003-07-24 08:45:32 +00003188 eraser_mem_write(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003189}
nethercote31212bc2004-02-29 15:50:04 +00003190REGPARM(2) static void eraser_mem_help_write_4(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003191{
3192 if (*(UInt *)a != val)
njn72718642003-07-24 08:45:32 +00003193 eraser_mem_write(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003194}
nethercote31212bc2004-02-29 15:50:04 +00003195REGPARM(2) static void eraser_mem_help_write_N(Addr a, UInt size)
sewardj7ab2aca2002-10-20 19:40:32 +00003196{
njn72718642003-07-24 08:45:32 +00003197 eraser_mem_write(a, size, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003198}
njn25e49d8e72002-09-23 09:36:25 +00003199
sewardjc4a810d2002-11-13 22:25:51 +00003200static void hg_thread_create(ThreadId parent, ThreadId child)
3201{
3202 if (0)
3203 VG_(printf)("CREATE: %u creating %u\n", parent, child);
3204
3205 newTLS(child);
3206 addPriorTLS(child, parent);
3207
3208 newTLS(parent);
3209}
3210
3211static void hg_thread_join(ThreadId joiner, ThreadId joinee)
3212{
3213 if (0)
3214 VG_(printf)("JOIN: %u joining on %u\n", joiner, joinee);
3215
3216 newTLS(joiner);
3217 addPriorTLS(joiner, joinee);
3218
3219 clearTLS(joinee);
3220}
3221
sewardj7a5ebcf2002-11-13 22:42:13 +00003222static Int __BUS_HARDWARE_LOCK__;
3223
3224static void bus_lock(void)
3225{
3226 ThreadId tid = VG_(get_current_tid)();
3227 eraser_pre_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3228 eraser_post_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3229}
3230
3231static void bus_unlock(void)
3232{
3233 ThreadId tid = VG_(get_current_tid)();
3234 eraser_post_mutex_unlock(tid, &__BUS_HARDWARE_LOCK__);
3235}
3236
njn25e49d8e72002-09-23 09:36:25 +00003237/*--------------------------------------------------------------------*/
sewardj7f3ad222002-11-13 22:11:53 +00003238/*--- Client requests ---*/
3239/*--------------------------------------------------------------------*/
3240
njn72718642003-07-24 08:45:32 +00003241Bool SK_(handle_client_request)(ThreadId tid, UInt *args, UInt *ret)
sewardj7f3ad222002-11-13 22:11:53 +00003242{
3243 if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
3244 return False;
3245
3246 switch(args[0]) {
3247 case VG_USERREQ__HG_CLEAN_MEMORY:
3248 set_address_range_state(args[1], args[2], Vge_VirginInit);
3249 *ret = 0; /* meaningless */
3250 break;
3251
3252 case VG_USERREQ__HG_KNOWN_RACE:
3253 set_address_range_state(args[1], args[2], Vge_Error);
3254 *ret = 0; /* meaningless */
3255 break;
3256
3257 default:
3258 return False;
3259 }
3260
3261 return True;
3262}
3263
3264
3265/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003266/*--- Setup ---*/
3267/*--------------------------------------------------------------------*/
3268
njn810086f2002-11-14 12:42:47 +00003269void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00003270{
3271 Int i;
sewardj4bffb232002-11-13 21:46:34 +00003272 LockSet *empty;
njn25e49d8e72002-09-23 09:36:25 +00003273
njn810086f2002-11-14 12:42:47 +00003274 VG_(details_name) ("Helgrind");
3275 VG_(details_version) (NULL);
3276 VG_(details_description) ("a data race detector");
3277 VG_(details_copyright_author)(
nethercotebb1c9912004-01-04 16:43:23 +00003278 "Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote.");
nethercote421281e2003-11-20 16:20:55 +00003279 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00003280 VG_(details_avg_translation_sizeB) ( 115 );
njn25e49d8e72002-09-23 09:36:25 +00003281
njn810086f2002-11-14 12:42:47 +00003282 VG_(needs_core_errors)();
3283 VG_(needs_skin_errors)();
3284 VG_(needs_data_syms)();
njn810086f2002-11-14 12:42:47 +00003285 VG_(needs_client_requests)();
3286 VG_(needs_command_line_options)();
fitzhardinge98abfc72003-12-16 02:05:15 +00003287 VG_(needs_shadow_memory)();
njn25e49d8e72002-09-23 09:36:25 +00003288
fitzhardinge98abfc72003-12-16 02:05:15 +00003289 VG_(init_new_mem_startup) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003290
njn810086f2002-11-14 12:42:47 +00003291 /* stack ones not decided until VG_(post_clo_init)() */
njn25e49d8e72002-09-23 09:36:25 +00003292
fitzhardinge98abfc72003-12-16 02:05:15 +00003293 VG_(init_new_mem_brk) (& make_writable);
3294 VG_(init_new_mem_mmap) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003295
fitzhardinge98abfc72003-12-16 02:05:15 +00003296 VG_(init_change_mem_mprotect) (& eraser_set_perms);
njn25e49d8e72002-09-23 09:36:25 +00003297
fitzhardinge98abfc72003-12-16 02:05:15 +00003298 VG_(init_ban_mem_stack) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003299
fitzhardinge98abfc72003-12-16 02:05:15 +00003300 VG_(init_die_mem_stack) (NULL);
3301 VG_(init_die_mem_stack_signal) (NULL);
3302 VG_(init_die_mem_brk) (NULL);
3303 VG_(init_die_mem_munmap) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003304
fitzhardinge98abfc72003-12-16 02:05:15 +00003305 VG_(init_pre_mem_read) (& eraser_pre_mem_read);
3306 VG_(init_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz);
3307 VG_(init_pre_mem_write) (& eraser_pre_mem_write);
3308 VG_(init_post_mem_write) (NULL);
njn810086f2002-11-14 12:42:47 +00003309
fitzhardinge98abfc72003-12-16 02:05:15 +00003310 VG_(init_post_thread_create) (& hg_thread_create);
3311 VG_(init_post_thread_join) (& hg_thread_join);
njn810086f2002-11-14 12:42:47 +00003312
fitzhardinge98abfc72003-12-16 02:05:15 +00003313 VG_(init_pre_mutex_lock) (& eraser_pre_mutex_lock);
3314 VG_(init_post_mutex_lock) (& eraser_post_mutex_lock);
3315 VG_(init_post_mutex_unlock) (& eraser_post_mutex_unlock);
sewardjc4a810d2002-11-13 22:25:51 +00003316
sewardja5b3aec2002-10-22 05:09:36 +00003317 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
3318 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
3319 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
3320 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
3321
3322 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
3323 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
3324 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
3325 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00003326
sewardj7a5ebcf2002-11-13 22:42:13 +00003327 VG_(register_noncompact_helper)((Addr) & bus_lock);
3328 VG_(register_noncompact_helper)((Addr) & bus_unlock);
3329
sewardj4bffb232002-11-13 21:46:34 +00003330 for(i = 0; i < LOCKSET_HASH_SZ; i++)
3331 lockset_hash[i] = NULL;
3332
3333 empty = alloc_LockSet(0);
3334 insert_LockSet(empty);
3335 emptyset = empty;
3336
sewardjc4a810d2002-11-13 22:25:51 +00003337 /* Init lock table and thread segments */
3338 for (i = 0; i < VG_N_THREADS; i++) {
sewardjdac0a442002-11-13 22:08:40 +00003339 thread_locks[i] = empty;
njn25e49d8e72002-09-23 09:36:25 +00003340
sewardjc4a810d2002-11-13 22:25:51 +00003341 newTLS(i);
3342 }
3343
njn25e49d8e72002-09-23 09:36:25 +00003344 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00003345 hg_malloc_list = VG_(HT_construct)();
njn25e49d8e72002-09-23 09:36:25 +00003346}
3347
sewardj406270b2002-11-13 22:18:09 +00003348Bool SK_(process_cmd_line_option)(Char* arg)
3349{
nethercote27fec902004-06-16 21:26:32 +00003350 if (VG_CLO_STREQ(arg, "--show-last-access=no"))
3351 clo_execontext = EC_None;
3352 else if (VG_CLO_STREQ(arg, "--show-last-access=some"))
3353 clo_execontext = EC_Some;
3354 else if (VG_CLO_STREQ(arg, "--show-last-access=all"))
3355 clo_execontext = EC_All;
sewardj499e3de2002-11-13 22:22:25 +00003356
nethercote27fec902004-06-16 21:26:32 +00003357 else VG_BOOL_CLO("--private-stacks", clo_priv_stacks)
sewardj499e3de2002-11-13 22:22:25 +00003358
nethercote27fec902004-06-16 21:26:32 +00003359 else
3360 return VG_(replacement_malloc_process_cmd_line_option)(arg);
sewardj499e3de2002-11-13 22:22:25 +00003361
nethercote27fec902004-06-16 21:26:32 +00003362 return True;
sewardj406270b2002-11-13 22:18:09 +00003363}
3364
njn3e884182003-04-15 13:03:23 +00003365void SK_(print_usage)(void)
sewardj406270b2002-11-13 22:18:09 +00003366{
njn3e884182003-04-15 13:03:23 +00003367 VG_(printf)(
sewardje11d6c82002-12-15 02:00:41 +00003368" --private-stacks=yes|no assume thread stacks are used privately [no]\n"
3369" --show-last-access=no|some|all\n"
3370" show location of last word access on error [no]\n"
njn3e884182003-04-15 13:03:23 +00003371 );
3372 VG_(replacement_malloc_print_usage)();
sewardj406270b2002-11-13 22:18:09 +00003373}
3374
njn3e884182003-04-15 13:03:23 +00003375void SK_(print_debug_usage)(void)
3376{
3377 VG_(replacement_malloc_print_debug_usage)();
3378}
njn25e49d8e72002-09-23 09:36:25 +00003379
3380void SK_(post_clo_init)(void)
3381{
njn810086f2002-11-14 12:42:47 +00003382 void (*stack_tracker)(Addr a, UInt len);
3383
sewardj499e3de2002-11-13 22:22:25 +00003384 if (clo_execontext) {
3385 execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536);
3386 VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536);
3387 }
sewardjf6374322002-11-13 22:35:55 +00003388
njn810086f2002-11-14 12:42:47 +00003389 if (clo_priv_stacks)
3390 stack_tracker = & eraser_new_mem_stack_private;
3391 else
3392 stack_tracker = & eraser_new_mem_stack;
sewardjf6374322002-11-13 22:35:55 +00003393
fitzhardinge98abfc72003-12-16 02:05:15 +00003394 VG_(init_new_mem_stack) (stack_tracker);
3395 VG_(init_new_mem_stack_signal) (stack_tracker);
njn25e49d8e72002-09-23 09:36:25 +00003396}
3397
3398
njn7d9f94d2003-04-22 21:41:40 +00003399void SK_(fini)(Int exitcode)
njn25e49d8e72002-09-23 09:36:25 +00003400{
sewardjdac0a442002-11-13 22:08:40 +00003401 if (DEBUG_LOCK_TABLE) {
sewardj4bffb232002-11-13 21:46:34 +00003402 pp_all_LockSets();
sewardjdac0a442002-11-13 22:08:40 +00003403 pp_all_mutexes();
3404 }
sewardj4bffb232002-11-13 21:46:34 +00003405
3406 if (LOCKSET_SANITY)
3407 sanity_check_locksets("SK_(fini)");
3408
fitzhardinge111c6072004-03-09 02:45:07 +00003409 if (VG_(clo_verbosity) > 0)
3410 VG_(message)(Vg_UserMsg, "%u possible data races found; %u lock order problems",
3411 n_eraser_warnings, n_lockorder_warnings);
sewardjf6374322002-11-13 22:35:55 +00003412
3413 if (0)
3414 VG_(printf)("stk_ld:%u+stk_st:%u = %u nonstk_ld:%u+nonstk_st:%u = %u %u%%\n",
3415 stk_ld, stk_st, stk_ld + stk_st,
3416 nonstk_ld, nonstk_st, nonstk_ld + nonstk_st,
3417 ((stk_ld+stk_st)*100) / (stk_ld + stk_st + nonstk_ld + nonstk_st));
njn25e49d8e72002-09-23 09:36:25 +00003418}
3419
fitzhardinge98abfc72003-12-16 02:05:15 +00003420/* Uses a 1:1 mapping */
3421VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 1.0)
3422
njn25e49d8e72002-09-23 09:36:25 +00003423/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00003424/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00003425/*--------------------------------------------------------------------*/