blob: 8200f5ed134eb27fdcbd7323eb113bb92fd03930 [file] [log] [blame]
njnc9539842002-10-02 13:26:35 +00001
njn25e49d8e72002-09-23 09:36:25 +00002/*--------------------------------------------------------------------*/
njnc9539842002-10-02 13:26:35 +00003/*--- Helgrind: checking for data races in threaded programs. ---*/
njn25cac76cb2002-09-23 11:21:57 +00004/*--- hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00005/*--------------------------------------------------------------------*/
6
7/*
nethercote137bc552003-11-14 17:47:54 +00008 This file is part of Helgrind, a Valgrind tool for detecting
njnc9539842002-10-02 13:26:35 +00009 data races in threaded programs.
njn25e49d8e72002-09-23 09:36:25 +000010
nethercotebb1c9912004-01-04 16:43:23 +000011 Copyright (C) 2002-2004 Nicholas Nethercote
njn25e49d8e72002-09-23 09:36:25 +000012 njn25@cam.ac.uk
13
14 This program is free software; you can redistribute it and/or
15 modify it under the terms of the GNU General Public License as
16 published by the Free Software Foundation; either version 2 of the
17 License, or (at your option) any later version.
18
19 This program is distributed in the hope that it will be useful, but
20 WITHOUT ANY WARRANTY; without even the implied warranty of
21 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
22 General Public License for more details.
23
24 You should have received a copy of the GNU General Public License
25 along with this program; if not, write to the Free Software
26 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
27 02111-1307, USA.
28
29 The GNU General Public License is contained in the file COPYING.
30*/
31
nethercote46063202004-09-02 08:51:43 +000032#include "tool.h"
sewardj7f3ad222002-11-13 22:11:53 +000033#include "helgrind.h"
njn25e49d8e72002-09-23 09:36:25 +000034
njn25e49d8e72002-09-23 09:36:25 +000035static UInt n_eraser_warnings = 0;
sewardjff2c9232002-11-13 21:44:39 +000036static UInt n_lockorder_warnings = 0;
njn25e49d8e72002-09-23 09:36:25 +000037
38/*------------------------------------------------------------*/
39/*--- Debug guff ---*/
40/*------------------------------------------------------------*/
41
sewardje11d6c82002-12-15 02:00:41 +000042#define DEBUG_LOCK_TABLE 0 /* Print lock table at end */
njn25e49d8e72002-09-23 09:36:25 +000043
44#define DEBUG_MAKE_ACCESSES 0 /* Print make_access() calls */
45#define DEBUG_LOCKS 0 /* Print lock()/unlock() calls and locksets */
46#define DEBUG_NEW_LOCKSETS 0 /* Print new locksets when created */
47#define DEBUG_ACCESSES 0 /* Print reads, writes */
48#define DEBUG_MEM_LOCKSET_CHANGES 0
49 /* Print when an address's lockset
50 changes; only useful with
51 DEBUG_ACCESSES */
sewardj8fac99a2002-11-13 22:31:26 +000052#define SLOW_ASSERTS 0 /* do expensive asserts */
njn25e49d8e72002-09-23 09:36:25 +000053#define DEBUG_VIRGIN_READS 0 /* Dump around address on VIRGIN reads */
54
sewardj8fac99a2002-11-13 22:31:26 +000055#if SLOW_ASSERTS
56#define SK_ASSERT(x) sk_assert(x)
57#else
58#define SK_ASSERT(x)
59#endif
60
njn25e49d8e72002-09-23 09:36:25 +000061/* heavyweight LockSet sanity checking:
62 0 == never
63 1 == after important ops
64 2 == As 1 and also after pthread_mutex_* ops (excessively slow)
65 */
66#define LOCKSET_SANITY 0
67
sewardj8fac99a2002-11-13 22:31:26 +000068/* Rotate an unsigned quantity left */
69#define ROTL(x, n) (((x) << (n)) | ((x) >> ((sizeof(x)*8)-(n))))
70
71/* round a up to the next multiple of N. N must be a power of 2 */
72#define ROUNDUP(a, N) ((a + N - 1) & ~(N-1))
73
74/* Round a down to the next multiple of N. N must be a power of 2 */
75#define ROUNDDN(a, N) ((a) & ~(N-1))
njn25e49d8e72002-09-23 09:36:25 +000076
77/*------------------------------------------------------------*/
sewardjf6374322002-11-13 22:35:55 +000078/*--- Command line options ---*/
79/*------------------------------------------------------------*/
80
81static enum {
82 EC_None,
83 EC_Some,
84 EC_All
85} clo_execontext = EC_None;
86
sewardje1a39f42002-12-15 01:56:17 +000087static Bool clo_priv_stacks = False;
sewardjf6374322002-11-13 22:35:55 +000088
89/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +000090/*--- Crude profiling machinery. ---*/
91/*------------------------------------------------------------*/
92
93// PPP: work out if I want this
94
95#define PROF_EVENT(x)
96#if 0
97#ifdef VG_PROFILE_MEMORY
98
99#define N_PROF_EVENTS 150
100
101static UInt event_ctr[N_PROF_EVENTS];
102
103void VGE_(done_prof_mem) ( void )
104{
105 Int i;
106 for (i = 0; i < N_PROF_EVENTS; i++) {
107 if ((i % 10) == 0)
108 VG_(printf)("\n");
109 if (event_ctr[i] > 0)
110 VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] );
111 }
112 VG_(printf)("\n");
113}
114
115#define PROF_EVENT(ev) \
njne427a662002-10-02 11:08:25 +0000116 do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS); \
njn25e49d8e72002-09-23 09:36:25 +0000117 event_ctr[ev]++; \
118 } while (False);
119
120#else
121
122//static void init_prof_mem ( void ) { }
123// void VG_(done_prof_mem) ( void ) { }
124
125#define PROF_EVENT(ev) /* */
126
127#endif /* VG_PROFILE_MEMORY */
128
129/* Event index. If just the name of the fn is given, this means the
130 number of calls to the fn. Otherwise it is the specified event.
131
132 [PPP: snip event numbers...]
133*/
134#endif /* 0 */
135
136
137/*------------------------------------------------------------*/
138/*--- Data defns. ---*/
139/*------------------------------------------------------------*/
140
njn3e884182003-04-15 13:03:23 +0000141typedef
142 struct _HG_Chunk {
143 struct _HG_Chunk* next;
144 Addr data; /* ptr to actual block */
nethercote928a5f72004-11-03 18:10:37 +0000145 SizeT size; /* size requested */
njn3e884182003-04-15 13:03:23 +0000146 ExeContext* where; /* where it was allocated */
147 ThreadId tid; /* allocating thread */
148 }
149 HG_Chunk;
150
njn25e49d8e72002-09-23 09:36:25 +0000151typedef enum
sewardj7f3ad222002-11-13 22:11:53 +0000152 { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit, Vge_Error }
njn25e49d8e72002-09-23 09:36:25 +0000153 VgeInitStatus;
154
sewardjc808ef52002-11-13 22:43:26 +0000155
njn25e49d8e72002-09-23 09:36:25 +0000156/* Should add up to 32 to fit in one word */
157#define OTHER_BITS 30
158#define STATE_BITS 2
159
160#define ESEC_MAP_WORDS 16384 /* Words per secondary map */
161
162/* This is for indicating that a memory block has been initialised but not
163 * really directly by a particular thread... (eg. text/data initialised
164 * automatically at startup).
165 * Must be different to virgin_word.other */
166#define TID_INDICATING_NONVIRGIN 1
167
sewardjc4a810d2002-11-13 22:25:51 +0000168/* Magic packed TLS used for error suppression; if word state is Excl
169 and tid is this, then it means all access are OK without changing
170 state and without raising any more errors */
171#define TLSP_INDICATING_ALL ((1 << OTHER_BITS) - 1)
sewardj16748af2002-10-22 04:55:54 +0000172
njn25e49d8e72002-09-23 09:36:25 +0000173/* Number of entries must fit in STATE_BITS bits */
174typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state;
175
sewardjc808ef52002-11-13 22:43:26 +0000176static inline const Char *pp_state(pth_state st)
177{
178 const Char *ret;
179
180 switch(st) {
181 case Vge_Virgin: ret = "virgin"; break;
182 case Vge_Excl: ret = "exclusive"; break;
183 case Vge_Shar: ret = "shared RO"; break;
184 case Vge_SharMod: ret = "shared RW"; break;
185 default: ret = "???";
186 }
187 return ret;
188}
189
njn25e49d8e72002-09-23 09:36:25 +0000190typedef
191 struct {
sewardj8fac99a2002-11-13 22:31:26 +0000192 /* gcc arranges this bitfield with state in the 2LSB and other
193 in the 30MSB, which is what we want */
njn25e49d8e72002-09-23 09:36:25 +0000194 UInt state:STATE_BITS;
sewardj8fac99a2002-11-13 22:31:26 +0000195 UInt other:OTHER_BITS;
njn25e49d8e72002-09-23 09:36:25 +0000196 } shadow_word;
197
sewardj8fac99a2002-11-13 22:31:26 +0000198#define SW(st, other) ((shadow_word) { st, other })
199
njn25e49d8e72002-09-23 09:36:25 +0000200typedef
201 struct {
202 shadow_word swords[ESEC_MAP_WORDS];
203 }
204 ESecMap;
205
206static ESecMap* primary_map[ 65536 ];
207static ESecMap distinguished_secondary_map;
208
sewardj8fac99a2002-11-13 22:31:26 +0000209static const shadow_word virgin_sword = SW(Vge_Virgin, 0);
210static const shadow_word error_sword = SW(Vge_Excl, TLSP_INDICATING_ALL);
njn25e49d8e72002-09-23 09:36:25 +0000211
212#define VGE_IS_DISTINGUISHED_SM(smap) \
213 ((smap) == &distinguished_secondary_map)
214
215#define ENSURE_MAPPABLE(addr,caller) \
216 do { \
217 if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) { \
218 primary_map[(addr) >> 16] = alloc_secondary_map(caller); \
219 /*VG_(printf)("new 2map because of %p\n", addr);*/ \
220 } \
221 } while(0)
222
223
sewardjc808ef52002-11-13 22:43:26 +0000224/* Parallel map which contains execution contexts when words last
225 changed state (if required) */
sewardj499e3de2002-11-13 22:22:25 +0000226
nethercoteca788ff2004-10-20 10:58:09 +0000227typedef struct EC_IP {
228 union u_ec_ip {
229 Addr ip;
sewardjc808ef52002-11-13 22:43:26 +0000230 ExeContext *ec;
nethercoteca788ff2004-10-20 10:58:09 +0000231 } uu_ec_ip;
sewardjc808ef52002-11-13 22:43:26 +0000232 UInt state:STATE_BITS;
233 UInt tls:OTHER_BITS; /* packed TLS */
nethercoteca788ff2004-10-20 10:58:09 +0000234} EC_IP;
sewardj499e3de2002-11-13 22:22:25 +0000235
nethercoteca788ff2004-10-20 10:58:09 +0000236#define NULL_EC_IP ((EC_IP){ { 0 }, 0, 0})
sewardjc808ef52002-11-13 22:43:26 +0000237
nethercoteca788ff2004-10-20 10:58:09 +0000238#define IP(ip, prev, tls) ((EC_IP) { (union u_ec_ip)(ip), (prev).state, packTLS(tls) })
239#define EC(ec, prev, tls) ((EC_IP) { (union u_ec_ip)(ec), (prev).state, packTLS(tls) })
sewardjc808ef52002-11-13 22:43:26 +0000240
241static inline UInt packEC(ExeContext *ec)
242{
243 SK_ASSERT(((UInt)ec & ((1 << STATE_BITS)-1)) == 0);
244 return ((UInt)ec) >> STATE_BITS;
245}
246
247static inline ExeContext *unpackEC(UInt i)
248{
249 return (ExeContext *)(i << STATE_BITS);
250}
251
nethercoteca788ff2004-10-20 10:58:09 +0000252/* Lose 2 LSB of IP */
253static inline UInt packIP(Addr ip)
sewardjc808ef52002-11-13 22:43:26 +0000254{
nethercoteca788ff2004-10-20 10:58:09 +0000255 return ((UInt)ip) >> STATE_BITS;
sewardjc808ef52002-11-13 22:43:26 +0000256}
257
nethercoteca788ff2004-10-20 10:58:09 +0000258static inline Addr unpackIP(UInt i)
sewardjc808ef52002-11-13 22:43:26 +0000259{
260 return (Addr)(i << STATE_BITS);
261}
sewardj499e3de2002-11-13 22:22:25 +0000262
263typedef struct {
nethercoteca788ff2004-10-20 10:58:09 +0000264 EC_IP execontext[ESEC_MAP_WORDS];
sewardj499e3de2002-11-13 22:22:25 +0000265} ExeContextMap;
266
267static ExeContextMap** execontext_map;
268
nethercoteca788ff2004-10-20 10:58:09 +0000269static inline void setExeContext(Addr a, EC_IP ec)
sewardj499e3de2002-11-13 22:22:25 +0000270{
271 UInt idx = (a >> 16) & 0xffff;
272 UInt off = (a >> 2) & 0x3fff;
273
274 if (execontext_map[idx] == NULL) {
275 execontext_map[idx] = VG_(malloc)(sizeof(ExeContextMap));
276 VG_(memset)(execontext_map[idx], 0, sizeof(ExeContextMap));
277 }
278
279 execontext_map[idx]->execontext[off] = ec;
280}
281
nethercoteca788ff2004-10-20 10:58:09 +0000282static inline EC_IP getExeContext(Addr a)
sewardj499e3de2002-11-13 22:22:25 +0000283{
284 UInt idx = (a >> 16) & 0xffff;
285 UInt off = (a >> 2) & 0x3fff;
nethercoteca788ff2004-10-20 10:58:09 +0000286 EC_IP ec = NULL_EC_IP;
sewardj499e3de2002-11-13 22:22:25 +0000287
288 if (execontext_map[idx] != NULL)
289 ec = execontext_map[idx]->execontext[off];
290
291 return ec;
292}
293
njn25e49d8e72002-09-23 09:36:25 +0000294/*------------------------------------------------------------*/
sewardjc4a810d2002-11-13 22:25:51 +0000295/*--- Thread lifetime segments ---*/
296/*------------------------------------------------------------*/
297
298/*
299 * This mechanism deals with the common case of a parent thread
300 * creating a structure for a child thread, and then passing ownership
301 * of the structure to that thread. It similarly copes with a child
302 * thread passing information back to another thread waiting to join
303 * on it.
304 *
305 * Each thread's lifetime can be partitioned into segments. Those
306 * segments are arranged to form an interference graph which indicates
307 * whether two thread lifetime segments can possibly be concurrent.
308 * If not, then memory with is exclusively accessed by one TLS can be
daywalker7e73e5f2003-07-04 16:18:15 +0000309 * passed on to another TLS without an error occurring, and without
sewardjc4a810d2002-11-13 22:25:51 +0000310 * moving it from Excl state.
311 *
312 * At present this only considers thread creation and join as
313 * synchronisation events for creating new lifetime segments, but
314 * others may be possible (like mutex operations).
315 */
316
317typedef struct _ThreadLifeSeg ThreadLifeSeg;
318
319struct _ThreadLifeSeg {
320 ThreadId tid;
321 ThreadLifeSeg *prior[2]; /* Previous lifetime segments */
322 UInt refcount; /* Number of memory locations pointing here */
323 UInt mark; /* mark used for graph traversal */
324 ThreadLifeSeg *next; /* list of all TLS */
325};
326
327static ThreadLifeSeg *all_tls;
328static UInt tls_since_gc;
329#define TLS_SINCE_GC 10000
330
331/* current mark used for TLS graph traversal */
332static UInt tlsmark;
333
334static ThreadLifeSeg *thread_seg[VG_N_THREADS];
335
336
337static void tls_gc(void)
338{
339 /* XXX later. Walk through all TLSs and look for ones with 0
340 refcount and remove them from the structure and free them.
341 Could probably get rid of ThreadLifeSeg.refcount and simply use
342 mark-sweep from the shadow table. */
343 VG_(printf)("WRITEME: TLS GC\n");
344}
345
346static void newTLS(ThreadId tid)
347{
348 static const Bool debug = False;
349 ThreadLifeSeg *tls;
350
351 /* Initial NULL */
352 if (thread_seg[tid] == NULL) {
353 tls = VG_(malloc)(sizeof(*tls));
354 tls->tid = tid;
355 tls->prior[0] = tls->prior[1] = NULL;
356 tls->refcount = 0;
357 tls->mark = tlsmark-1;
358
359 tls->next = all_tls;
360 all_tls = tls;
361 tls_since_gc++;
362
363 thread_seg[tid] = tls;
364 return;
365 }
366
367 /* Previous TLS was unused, so just recycle */
368 if (thread_seg[tid]->refcount == 0) {
369 if (debug)
370 VG_(printf)("newTLS; recycling TLS %p for tid %u\n",
371 thread_seg[tid], tid);
372 return;
373 }
374
375 /* Use existing TLS for this tid as a prior for new TLS */
376 tls = VG_(malloc)(sizeof(*tls));
377 tls->tid = tid;
378 tls->prior[0] = thread_seg[tid];
379 tls->prior[1] = NULL;
380 tls->refcount = 0;
381 tls->mark = tlsmark-1;
382
383 tls->next = all_tls;
384 all_tls = tls;
385 if (++tls_since_gc > TLS_SINCE_GC) {
386 tls_gc();
387 tls_since_gc = 0;
388 }
389
390 if (debug)
391 VG_(printf)("newTLS: made new TLS %p for tid %u (prior %p(%u))\n",
392 tls, tid, tls->prior[0], tls->prior[0]->tid);
393
394 thread_seg[tid] = tls;
395}
396
397/* clear out a TLS for a thread that's died */
398static void clearTLS(ThreadId tid)
399{
400 newTLS(tid);
401
402 thread_seg[tid]->prior[0] = NULL;
403 thread_seg[tid]->prior[1] = NULL;
404}
405
406static void addPriorTLS(ThreadId tid, ThreadId prior)
407{
408 static const Bool debug = False;
409 ThreadLifeSeg *tls = thread_seg[tid];
410
411 if (debug)
412 VG_(printf)("making TLS %p(%u) prior to TLS %p(%u)\n",
413 thread_seg[prior], prior, tls, tid);
414
415 sk_assert(thread_seg[tid] != NULL);
416 sk_assert(thread_seg[prior] != NULL);
417
418 if (tls->prior[0] == NULL)
419 tls->prior[0] = thread_seg[prior];
420 else {
421 sk_assert(tls->prior[1] == NULL);
422 tls->prior[1] = thread_seg[prior];
423 }
424}
425
426/* Return True if prior is definitely not concurrent with tls */
427static Bool tlsIsDisjoint(const ThreadLifeSeg *tls,
428 const ThreadLifeSeg *prior)
429{
430 Bool isPrior(const ThreadLifeSeg *t) {
431 if (t == NULL || t->mark == tlsmark)
432 return False;
433
434 if (t == prior)
435 return True;
436
437 ((ThreadLifeSeg *)t)->mark = tlsmark;
438
439 return isPrior(t->prior[0]) || isPrior(t->prior[1]);
440 }
441 tlsmark++; /* new traversal mark */
442
443 return isPrior(tls);
444}
445
446static inline UInt packTLS(ThreadLifeSeg *tls)
447{
sewardj8fac99a2002-11-13 22:31:26 +0000448 SK_ASSERT(((UInt)tls & ((1 << STATE_BITS)-1)) == 0);
sewardjc4a810d2002-11-13 22:25:51 +0000449 return ((UInt)tls) >> STATE_BITS;
450}
451
452static inline ThreadLifeSeg *unpackTLS(UInt i)
453{
454 return (ThreadLifeSeg *)(i << STATE_BITS);
455}
456
457/*------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +0000458/*--- Low-level support for memory tracking. ---*/
459/*------------------------------------------------------------*/
460
461/*
462 All reads and writes are recorded in the memory map, which
463 records the state of all memory in the process. The memory map is
464 organised like that for normal Valgrind, except each that everything
465 is done at word-level instead of byte-level, and each word has only
466 one word of shadow (instead of 36 bits).
467
468 As for normal Valgrind there is a distinguished secondary map. But we're
469 working at word-granularity, so it has 16k word entries instead of 64k byte
470 entries. Lookup is done as follows:
471
472 bits 31..16: primary map lookup
473 bits 15.. 2: secondary map lookup
474 bits 1.. 0: ignored
475*/
476
477
478/*------------------------------------------------------------*/
479/*--- Basic bitmap management, reading and writing. ---*/
480/*------------------------------------------------------------*/
481
482/* Allocate and initialise a secondary map, marking all words as virgin. */
483
484/* Just a value that isn't a real pointer */
485#define SEC_MAP_ACCESS (shadow_word*)0x99
486
487
488static
489ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller )
490{
491 ESecMap* map;
492 UInt i;
493 //PROF_EVENT(10); PPP
494
nethercote1420ab22004-08-18 22:26:01 +0000495 // Mark all words as virgin.
fitzhardinge98abfc72003-12-16 02:05:15 +0000496 map = (ESecMap *)VG_(shadow_alloc)(sizeof(ESecMap));
njn25e49d8e72002-09-23 09:36:25 +0000497 for (i = 0; i < ESEC_MAP_WORDS; i++)
498 map->swords[i] = virgin_sword;
499
500 return map;
501}
502
503
504/* Set a word. The byte give by 'a' could be anywhere in the word -- the whole
505 * word gets set. */
sewardj56867352003-10-12 10:27:06 +0000506static /* __inline__ */
njn25e49d8e72002-09-23 09:36:25 +0000507void set_sword ( Addr a, shadow_word sword )
508{
509 ESecMap* sm;
sewardjc4a810d2002-11-13 22:25:51 +0000510 shadow_word *oldsw;
njn25e49d8e72002-09-23 09:36:25 +0000511
512 //PROF_EVENT(23); PPP
513 ENSURE_MAPPABLE(a, "VGE_(set_sword)");
514
515 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
516 sm = primary_map[a >> 16];
njne427a662002-10-02 11:08:25 +0000517 sk_assert(sm != &distinguished_secondary_map);
sewardjc4a810d2002-11-13 22:25:51 +0000518 oldsw = &sm->swords[(a & 0xFFFC) >> 2];
519 if (oldsw->state == Vge_Excl && oldsw->other != TLSP_INDICATING_ALL) {
520 ThreadLifeSeg *tls = unpackTLS(oldsw->other);
521 tls->refcount--;
522 }
523
524 if (sword.state == Vge_Excl && sword.other != TLSP_INDICATING_ALL) {
525 ThreadLifeSeg *tls = unpackTLS(sword.other);
526 tls->refcount++;
527 }
528
njn25e49d8e72002-09-23 09:36:25 +0000529 sm->swords[(a & 0xFFFC) >> 2] = sword;
530
531 if (VGE_IS_DISTINGUISHED_SM(sm)) {
532 VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a);
533 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000534 VG_(skin_panic)("wrote to distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000535 }
536}
537
538
539static __inline__
540shadow_word* get_sword_addr ( Addr a )
541{
542 /* Use bits 31..16 for primary, 15..2 for secondary lookup */
543 ESecMap* sm = primary_map[a >> 16];
544 UInt sm_off = (a & 0xFFFC) >> 2;
545
546 if (VGE_IS_DISTINGUISHED_SM(sm)) {
547 VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a);
548 // XXX: may be legit, but I want to know when it happens --njn
njne427a662002-10-02 11:08:25 +0000549 //VG_(skin_panic)("accessed distinguished 2ndary map!");
njn25e49d8e72002-09-23 09:36:25 +0000550 return SEC_MAP_ACCESS;
551 }
552
553 //PROF_EVENT(21); PPP
554 return & (sm->swords[sm_off]);
555}
556
557
558// SSS: rename these so they're not so similar to memcheck, unless it's
559// appropriate of course
560
561static __inline__
562void init_virgin_sword(Addr a)
563{
sewardj499e3de2002-11-13 22:22:25 +0000564 if (clo_execontext != EC_None)
nethercoteca788ff2004-10-20 10:58:09 +0000565 setExeContext(a, NULL_EC_IP);
njn25e49d8e72002-09-23 09:36:25 +0000566 set_sword(a, virgin_sword);
567}
568
sewardj7f3ad222002-11-13 22:11:53 +0000569static __inline__
570void init_error_sword(Addr a)
571{
572 set_sword(a, error_sword);
573}
njn25e49d8e72002-09-23 09:36:25 +0000574
njn25e49d8e72002-09-23 09:36:25 +0000575static __inline__
576void init_nonvirgin_sword(Addr a)
577{
578 shadow_word sword;
sewardjb52a1b02002-10-23 21:38:22 +0000579 ThreadId tid = VG_(get_current_or_recent_tid)();
sewardjc4a810d2002-11-13 22:25:51 +0000580 ThreadLifeSeg *tls;
njn25e49d8e72002-09-23 09:36:25 +0000581
sewardjb52a1b02002-10-23 21:38:22 +0000582 sk_assert(tid != VG_INVALID_THREADID);
sewardjc4a810d2002-11-13 22:25:51 +0000583 tls = thread_seg[tid];
584
sewardj8fac99a2002-11-13 22:31:26 +0000585 sword = SW(Vge_Excl, packTLS(tls));
njn25e49d8e72002-09-23 09:36:25 +0000586 set_sword(a, sword);
587}
588
589
590/* In this case, we treat it for Eraser's sake like virgin (it hasn't
591 * been inited by a particular thread, it's just done automatically upon
592 * startup), but we mark its .state specially so it doesn't look like an
593 * uninited read. */
594static __inline__
595void init_magically_inited_sword(Addr a)
596{
597 shadow_word sword;
598
sewardjb52a1b02002-10-23 21:38:22 +0000599 sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)());
sewardj8fac99a2002-11-13 22:31:26 +0000600
601 sword = SW(Vge_Virgin, TID_INDICATING_NONVIRGIN);
602
njn25e49d8e72002-09-23 09:36:25 +0000603 set_sword(a, virgin_sword);
604}
605
sewardjc26cc252002-10-23 21:58:55 +0000606
sewardj274c6012002-10-22 04:54:55 +0000607/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +0000608/*--- Implementation of lock sets. ---*/
sewardj274c6012002-10-22 04:54:55 +0000609/*------------------------------------------------------------*/
610
sewardj39a4d842002-11-13 22:14:30 +0000611typedef struct _Mutex Mutex; /* forward decl */
sewardj4bffb232002-11-13 21:46:34 +0000612typedef struct _LockSet LockSet;
613
sewardj16748af2002-10-22 04:55:54 +0000614typedef enum MutexState {
615 MxUnknown, /* don't know */
616 MxUnlocked, /* unlocked */
617 MxLocked, /* locked */
618 MxDead /* destroyed */
619} MutexState;
620
sewardj39a4d842002-11-13 22:14:30 +0000621struct _Mutex {
sewardjdac0a442002-11-13 22:08:40 +0000622 Addr mutexp;
sewardj39a4d842002-11-13 22:14:30 +0000623 Mutex *next;
sewardj16748af2002-10-22 04:55:54 +0000624
625 MutexState state; /* mutex state */
626 ThreadId tid; /* owner */
627 ExeContext *location; /* where the last change happened */
sewardj274c6012002-10-22 04:54:55 +0000628
sewardj4bffb232002-11-13 21:46:34 +0000629 const LockSet *lockdep; /* set of locks we depend on */
sewardjc26cc252002-10-23 21:58:55 +0000630 UInt mark; /* mark for graph traversal */
631};
sewardj16748af2002-10-22 04:55:54 +0000632
sewardj39a4d842002-11-13 22:14:30 +0000633static inline Int mutex_cmp(const Mutex *a, const Mutex *b)
sewardj4bffb232002-11-13 21:46:34 +0000634{
sewardjdac0a442002-11-13 22:08:40 +0000635 return a->mutexp - b->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000636}
njn25e49d8e72002-09-23 09:36:25 +0000637
sewardj274c6012002-10-22 04:54:55 +0000638struct _LockSet {
sewardj05bcdcb2003-05-18 10:05:38 +0000639 Int setsize; /* number of members */
sewardj4bffb232002-11-13 21:46:34 +0000640 UInt hash; /* hash code */
641 LockSet *next; /* next in hash chain */
sewardj39a4d842002-11-13 22:14:30 +0000642 const Mutex *mutex[0]; /* locks */
sewardj274c6012002-10-22 04:54:55 +0000643};
sewardj4bffb232002-11-13 21:46:34 +0000644
645static const LockSet *emptyset;
njn25e49d8e72002-09-23 09:36:25 +0000646
647/* Each one is an index into the lockset table. */
sewardj4bffb232002-11-13 21:46:34 +0000648static const LockSet *thread_locks[VG_N_THREADS];
njn25e49d8e72002-09-23 09:36:25 +0000649
sewardjdac0a442002-11-13 22:08:40 +0000650#define LOCKSET_HASH_SZ 1021
njn25e49d8e72002-09-23 09:36:25 +0000651
sewardj4bffb232002-11-13 21:46:34 +0000652static LockSet *lockset_hash[LOCKSET_HASH_SZ];
njn25e49d8e72002-09-23 09:36:25 +0000653
sewardj4bffb232002-11-13 21:46:34 +0000654/* Pack and unpack a LockSet pointer into shadow_word.other */
sewardj8fac99a2002-11-13 22:31:26 +0000655static inline UInt packLockSet(const LockSet *p)
njn25e49d8e72002-09-23 09:36:25 +0000656{
sewardj4bffb232002-11-13 21:46:34 +0000657 UInt id;
658
sewardj8fac99a2002-11-13 22:31:26 +0000659 SK_ASSERT(((UInt)p & ((1 << STATE_BITS)-1)) == 0);
sewardj4bffb232002-11-13 21:46:34 +0000660 id = ((UInt)p) >> STATE_BITS;
661
662 return id;
njn25e49d8e72002-09-23 09:36:25 +0000663}
664
sewardj8fac99a2002-11-13 22:31:26 +0000665static inline const LockSet *unpackLockSet(UInt id)
njn25e49d8e72002-09-23 09:36:25 +0000666{
sewardj4bffb232002-11-13 21:46:34 +0000667 return (LockSet *)(id << STATE_BITS);
njn25e49d8e72002-09-23 09:36:25 +0000668}
669
njn25e49d8e72002-09-23 09:36:25 +0000670static
sewardj4bffb232002-11-13 21:46:34 +0000671void pp_LockSet(const LockSet* p)
njn25e49d8e72002-09-23 09:36:25 +0000672{
sewardj05bcdcb2003-05-18 10:05:38 +0000673 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000674 VG_(printf)("{ ");
sewardj4bffb232002-11-13 21:46:34 +0000675 for(i = 0; i < p->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000676 const Mutex *mx = p->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000677
678 VG_(printf)("%p%(y ", mx->mutexp, mx->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000679 }
680 VG_(printf)("}\n");
681}
682
683
sewardj4bffb232002-11-13 21:46:34 +0000684static void print_LockSet(const Char *s, const LockSet *ls)
685{
686 VG_(printf)("%s: ", s);
687 pp_LockSet(ls);
688}
689
690/* Compute the hash of a LockSet */
sewardj56867352003-10-12 10:27:06 +0000691static UInt hash_LockSet_w_wo(const LockSet *ls,
692 const Mutex *with,
693 const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000694{
sewardj05bcdcb2003-05-18 10:05:38 +0000695 Int i;
sewardj8fac99a2002-11-13 22:31:26 +0000696 UInt hash = ls->setsize + (with != NULL) - (without != NULL);
sewardj4bffb232002-11-13 21:46:34 +0000697
698 sk_assert(with == NULL || with != without);
699
700 for(i = 0; with != NULL || i < ls->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +0000701 const Mutex *mx = i >= ls->setsize ? NULL : ls->mutex[i];
sewardj4bffb232002-11-13 21:46:34 +0000702
703 if (without && mutex_cmp(without, mx) == 0)
704 continue;
705
706 if (with && (mx == NULL || mutex_cmp(with, mx) < 0)) {
707 mx = with;
708 with = NULL;
709 i--;
710 }
711
sewardj8fac99a2002-11-13 22:31:26 +0000712 hash = ROTL(hash, 17);
sewardj4bffb232002-11-13 21:46:34 +0000713 hash ^= (UInt)mx->mutexp;
sewardj4bffb232002-11-13 21:46:34 +0000714 }
715
716 return hash % LOCKSET_HASH_SZ;
717}
718
sewardj39a4d842002-11-13 22:14:30 +0000719static inline UInt hash_LockSet_with(const LockSet *ls, const Mutex *with)
sewardj4bffb232002-11-13 21:46:34 +0000720{
721 UInt hash = hash_LockSet_w_wo(ls, with, NULL);
722
723 if (0)
724 VG_(printf)("hash_with %p+%p -> %d\n", ls, with->mutexp, hash);
725
726 return hash;
727}
728
sewardj39a4d842002-11-13 22:14:30 +0000729static inline UInt hash_LockSet_without(const LockSet *ls, const Mutex *without)
sewardj4bffb232002-11-13 21:46:34 +0000730{
731 UInt hash = hash_LockSet_w_wo(ls, NULL, without);
732
733 if (0)
734 VG_(printf)("hash_with %p-%p -> %d\n", ls, without->mutexp, hash);
735
736 return hash;
737}
738
739static inline UInt hash_LockSet(const LockSet *ls)
740{
741 UInt hash = hash_LockSet_w_wo(ls, NULL, NULL);
742
743 if (0)
744 VG_(printf)("hash %p -> %d\n", ls, hash);
745
746 return hash;
747}
748
749static
750Bool structural_eq_LockSet(const LockSet* a, const LockSet* b)
njn25e49d8e72002-09-23 09:36:25 +0000751{
752 Int i;
njn25e49d8e72002-09-23 09:36:25 +0000753
sewardj4bffb232002-11-13 21:46:34 +0000754 if (a == b)
755 return True;
756 if (a->setsize != b->setsize)
757 return False;
njn25e49d8e72002-09-23 09:36:25 +0000758
sewardj4bffb232002-11-13 21:46:34 +0000759 for(i = 0; i < a->setsize; i++) {
760 if (mutex_cmp(a->mutex[i], b->mutex[i]) != 0)
njn25e49d8e72002-09-23 09:36:25 +0000761 return False;
njn25e49d8e72002-09-23 09:36:25 +0000762 }
763
sewardj4bffb232002-11-13 21:46:34 +0000764 return True;
njn25e49d8e72002-09-23 09:36:25 +0000765}
766
767
768/* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but
769 * doesn't do the insertion. Returns True if they match.
770 */
771static Bool
sewardj4bffb232002-11-13 21:46:34 +0000772weird_LockSet_equals(const LockSet* a, const LockSet* b,
sewardj39a4d842002-11-13 22:14:30 +0000773 const Mutex *missing_mutex)
njn25e49d8e72002-09-23 09:36:25 +0000774{
sewardjc26cc252002-10-23 21:58:55 +0000775 static const Bool debug = False;
sewardj4bffb232002-11-13 21:46:34 +0000776 Int ia, ib;
sewardjc26cc252002-10-23 21:58:55 +0000777
njn25e49d8e72002-09-23 09:36:25 +0000778 /* Idea is to try and match each element of b against either an
779 element of a, or missing_mutex. */
sewardjc26cc252002-10-23 21:58:55 +0000780
781 if (debug) {
782 print_LockSet("weird_LockSet_equals a", a);
783 print_LockSet(" b", b);
784 VG_(printf)( " missing: %p%(y\n",
785 missing_mutex->mutexp, missing_mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +0000786 }
sewardjc26cc252002-10-23 21:58:55 +0000787
sewardj4bffb232002-11-13 21:46:34 +0000788 if ((a->setsize + 1) != b->setsize) {
789 if (debug)
790 VG_(printf)(" fastpath length mismatch -> 0\n");
791 return False;
792 }
793
sewardjc26cc252002-10-23 21:58:55 +0000794 /* There are three phases to this compare:
795 1 the section from the start of a up to missing_mutex
796 2 missing mutex itself
797 3 the section after missing_mutex to the end of a
798 */
799
sewardj4bffb232002-11-13 21:46:34 +0000800 ia = 0;
801 ib = 0;
802
sewardjc26cc252002-10-23 21:58:55 +0000803 /* 1: up to missing_mutex */
sewardj4bffb232002-11-13 21:46:34 +0000804 for(; ia < a->setsize && mutex_cmp(a->mutex[ia], missing_mutex) < 0; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000805 if (debug) {
806 print_LockSet(" 1:a", a);
807 print_LockSet(" 1:b", b);
808 }
sewardj4bffb232002-11-13 21:46:34 +0000809 if (ib == b->setsize || mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000810 return False;
sewardjc26cc252002-10-23 21:58:55 +0000811 }
812
813 /* 2: missing_mutex itself */
814 if (debug) {
815 VG_(printf)( " 2:missing: %p%(y\n",
816 missing_mutex->mutexp, missing_mutex->mutexp);
817 print_LockSet(" 2: b", b);
818 }
819
sewardj4bffb232002-11-13 21:46:34 +0000820 sk_assert(ia == a->setsize || mutex_cmp(a->mutex[ia], missing_mutex) >= 0);
sewardjc26cc252002-10-23 21:58:55 +0000821
sewardj4bffb232002-11-13 21:46:34 +0000822 if (ib == b->setsize || mutex_cmp(missing_mutex, b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000823 return False;
824
sewardj4bffb232002-11-13 21:46:34 +0000825 ib++;
sewardjc26cc252002-10-23 21:58:55 +0000826
827 /* 3: after missing_mutex to end */
828
sewardj4bffb232002-11-13 21:46:34 +0000829 for(; ia < a->setsize && ib < b->setsize; ia++, ib++) {
sewardjc26cc252002-10-23 21:58:55 +0000830 if (debug) {
831 print_LockSet(" 3:a", a);
832 print_LockSet(" 3:b", b);
833 }
sewardj4bffb232002-11-13 21:46:34 +0000834 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) != 0)
sewardjc26cc252002-10-23 21:58:55 +0000835 return False;
sewardjc26cc252002-10-23 21:58:55 +0000836 }
837
838 if (debug)
sewardj4bffb232002-11-13 21:46:34 +0000839 VG_(printf)(" ia=%d ib=%d --> %d\n", ia, ib, ia == a->setsize && ib == b->setsize);
sewardjc26cc252002-10-23 21:58:55 +0000840
sewardj4bffb232002-11-13 21:46:34 +0000841 return ia == a->setsize && ib == b->setsize;
842}
843
844
845
846static const LockSet *lookup_LockSet(const LockSet *set)
847{
848 UInt bucket = set->hash;
849 LockSet *ret;
850
851 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
852 if (set == ret || structural_eq_LockSet(set, ret))
853 return ret;
854
855 return NULL;
856}
857
sewardj39a4d842002-11-13 22:14:30 +0000858static const LockSet *lookup_LockSet_with(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000859{
860 UInt bucket = hash_LockSet_with(set, mutex);
861 const LockSet *ret;
862
863 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
864 if (weird_LockSet_equals(set, ret, mutex))
865 return ret;
866
867 return NULL;
868}
869
sewardj39a4d842002-11-13 22:14:30 +0000870static const LockSet *lookup_LockSet_without(const LockSet *set, Mutex *mutex)
sewardj4bffb232002-11-13 21:46:34 +0000871{
872 UInt bucket = hash_LockSet_without(set, mutex);
873 const LockSet *ret;
874
875 for(ret = lockset_hash[bucket]; ret != NULL; ret = ret->next)
876 if (weird_LockSet_equals(ret, set, mutex))
877 return ret;
878
879 return NULL;
880}
881
882static void insert_LockSet(LockSet *set)
883{
884 UInt hash = hash_LockSet(set);
885
886 set->hash = hash;
887
888 sk_assert(lookup_LockSet(set) == NULL);
889
890 set->next = lockset_hash[hash];
891 lockset_hash[hash] = set;
892}
893
894static inline
895LockSet *alloc_LockSet(UInt setsize)
896{
sewardj39a4d842002-11-13 22:14:30 +0000897 LockSet *ret = VG_(malloc)(sizeof(*ret) + sizeof(Mutex *) * setsize);
sewardj4bffb232002-11-13 21:46:34 +0000898 ret->setsize = setsize;
899 return ret;
900}
901
902static inline
903void free_LockSet(LockSet *p)
904{
905 /* assert: not present in hash */
906 VG_(free)(p);
907}
908
njnb4aee052003-04-15 14:09:58 +0000909static
sewardj4bffb232002-11-13 21:46:34 +0000910void pp_all_LockSets ( void )
911{
912 Int i;
913 Int sets, buckets;
914
915 sets = buckets = 0;
916 for (i = 0; i < LOCKSET_HASH_SZ; i++) {
917 const LockSet *ls = lockset_hash[i];
918 Bool first = True;
919
sewardj4bffb232002-11-13 21:46:34 +0000920 for(; ls != NULL; ls = ls->next) {
sewardjdac0a442002-11-13 22:08:40 +0000921 if (first) {
922 buckets++;
923 VG_(printf)("[%4d] = ", i);
924 } else
925 VG_(printf)(" ");
926
sewardj4bffb232002-11-13 21:46:34 +0000927 sets++;
928 first = False;
929 pp_LockSet(ls);
930 }
931 }
932
933 VG_(printf)("%d distinct LockSets in %d buckets\n", sets, buckets);
934}
935
936static inline Bool isempty(const LockSet *ls)
937{
938 return ls == NULL || ls->setsize == 0;
939}
940
sewardj39a4d842002-11-13 22:14:30 +0000941static Bool ismember(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +0000942{
943 Int i;
944
945 /* XXX use binary search */
946 for(i = 0; i < ls->setsize; i++)
947 if (mutex_cmp(mx, ls->mutex[i]) == 0)
948 return True;
949
950 return False;
951}
952
953/* Check invariants:
954 - all locksets are unique
955 - each set is an array in strictly increasing order of mutex addr
956*/
957static
958void sanity_check_locksets ( const Char* caller )
959{
960 Int i;
961 const Char *badness;
962 LockSet *ls;
963
964 for(i = 0; i < LOCKSET_HASH_SZ; i++) {
965
966 for(ls = lockset_hash[i]; ls != NULL; ls = ls->next) {
sewardj39a4d842002-11-13 22:14:30 +0000967 const Mutex *prev;
sewardj4bffb232002-11-13 21:46:34 +0000968 Int j;
969
970 if (hash_LockSet(ls) != ls->hash) {
971 badness = "mismatched hash";
972 goto bad;
973 }
sewardj05bcdcb2003-05-18 10:05:38 +0000974 if (ls->hash != (UInt)i) {
sewardj4bffb232002-11-13 21:46:34 +0000975 badness = "wrong bucket";
976 goto bad;
977 }
978 if (lookup_LockSet(ls) != ls) {
979 badness = "non-unique set";
980 goto bad;
981 }
982
983 prev = ls->mutex[0];
984 for(j = 1; j < ls->setsize; j++) {
985 if (mutex_cmp(prev, ls->mutex[j]) >= 0) {
986 badness = "mutexes out of order";
987 goto bad;
988 }
989 }
990 }
991 }
992 return;
993
994 bad:
995 VG_(printf)("sanity_check_locksets: "
996 "i = %d, ls=%p badness = %s, caller = %s\n",
997 i, ls, badness, caller);
998 pp_all_LockSets();
999 VG_(skin_panic)("sanity_check_locksets");
1000}
1001
1002static
sewardj39a4d842002-11-13 22:14:30 +00001003LockSet *add_LockSet(const LockSet *ls, const Mutex *mx)
sewardj4bffb232002-11-13 21:46:34 +00001004{
1005 static const Bool debug = False;
1006 LockSet *ret = NULL;
1007 Int i, j;
1008
1009 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1010 VG_(printf)("add-IN mutex %p%(y\n", mx->mutexp, mx->mutexp);
1011 print_LockSet("add-IN", ls);
1012 }
1013
1014 if (debug || LOCKSET_SANITY)
1015 sanity_check_locksets("add-IN");
1016
1017 sk_assert(!ismember(ls, mx));
1018
1019 ret = alloc_LockSet(ls->setsize+1);
1020
1021 for(i = j = 0; i < ls->setsize; i++) {
1022 if (debug)
1023 VG_(printf)("i=%d j=%d ls->mutex[i]=%p mx=%p\n",
1024 i, j, ls->mutex[i]->mutexp, mx ? mx->mutexp : 0);
1025 if (mx && mutex_cmp(mx, ls->mutex[i]) < 0) {
1026 ret->mutex[j++] = mx;
1027 mx = NULL;
1028 }
1029 ret->mutex[j++] = ls->mutex[i];
1030 }
1031
1032 /* not added in loop - must be after */
1033 if (mx)
1034 ret->mutex[j++] = mx;
1035
1036 sk_assert(j == ret->setsize);
1037
1038 if (debug || LOCKSET_SANITY) {
1039 print_LockSet("add-OUT", ret);
1040 sanity_check_locksets("add-OUT");
1041 }
1042 return ret;
1043}
1044
1045/* Builds ls with mx removed. mx should actually be in ls!
1046 (a checked assertion). Resulting set should not already
1047 exist in the table (unchecked).
1048*/
1049static
sewardj39a4d842002-11-13 22:14:30 +00001050LockSet *remove_LockSet ( const LockSet *ls, const Mutex *mx )
sewardj4bffb232002-11-13 21:46:34 +00001051{
1052 static const Bool debug = False;
1053 LockSet *ret = NULL;
1054 Int i, j;
1055
1056 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1057 print_LockSet("remove-IN", ls);
1058 }
1059
1060 if (debug || LOCKSET_SANITY)
1061 sanity_check_locksets("remove-IN");
1062
1063 sk_assert(ismember(ls, mx));
1064
1065 ret = alloc_LockSet(ls->setsize-1);
1066
1067 for(i = j = 0; i < ls->setsize; i++) {
1068 if (mutex_cmp(ls->mutex[i], mx) == 0)
1069 continue;
1070 ret->mutex[j++] = ls->mutex[i];
1071 }
1072
1073 sk_assert(j == ret->setsize);
1074
1075 if (debug || LOCKSET_SANITY) {
1076 print_LockSet("remove-OUT", ret);
1077 sanity_check_locksets("remove-OUT");
1078 }
1079 return ret;
njn25e49d8e72002-09-23 09:36:25 +00001080}
1081
1082
1083/* Builds the intersection, and then unbuilds it if it's already in the table.
1084 */
sewardj4bffb232002-11-13 21:46:34 +00001085static const LockSet *_intersect(const LockSet *a, const LockSet *b)
njn25e49d8e72002-09-23 09:36:25 +00001086{
sewardj4bffb232002-11-13 21:46:34 +00001087 static const Bool debug = False;
1088 Int iret;
1089 Int ia, ib;
1090 Int size;
1091 LockSet *ret;
1092 const LockSet *found;
njn25e49d8e72002-09-23 09:36:25 +00001093
sewardj4bffb232002-11-13 21:46:34 +00001094 if (debug || LOCKSET_SANITY)
1095 sanity_check_locksets("intersect-IN");
njn25e49d8e72002-09-23 09:36:25 +00001096
sewardj4bffb232002-11-13 21:46:34 +00001097 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1098 print_LockSet("intersect a", a);
1099 print_LockSet("intersect b", b);
njn25e49d8e72002-09-23 09:36:25 +00001100 }
1101
sewardj4bffb232002-11-13 21:46:34 +00001102 /* count the size of the new set */
1103 size = 0;
1104 ia = ib = 0;
1105 for(size = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1106 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1107 size++;
1108 ia++;
1109 ib++;
1110 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1111 ia++;
1112 } else {
1113 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1114 ib++;
1115 }
njn25e49d8e72002-09-23 09:36:25 +00001116 }
1117
sewardj4bffb232002-11-13 21:46:34 +00001118 /* Build the intersection of the two sets */
1119 ret = alloc_LockSet(size);
1120 for (iret = ia = ib = 0; ia < a->setsize && ib < b->setsize; ) {
1121 if (mutex_cmp(a->mutex[ia], b->mutex[ib]) == 0) {
1122 sk_assert(iret < ret->setsize);
1123 ret->mutex[iret++] = a->mutex[ia];
1124 ia++;
1125 ib++;
1126 } else if (mutex_cmp(a->mutex[ia], b->mutex[ib]) < 0) {
1127 ia++;
1128 } else {
1129 sk_assert(mutex_cmp(a->mutex[ia], b->mutex[ib]) > 0);
1130 ib++;
1131 }
1132 }
1133
1134 ret->hash = hash_LockSet(ret);
1135
njn25e49d8e72002-09-23 09:36:25 +00001136 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001137 found = lookup_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001138
sewardj4bffb232002-11-13 21:46:34 +00001139 if (found != NULL) {
1140 free_LockSet(ret);
njn25e49d8e72002-09-23 09:36:25 +00001141 } else {
sewardj4bffb232002-11-13 21:46:34 +00001142 insert_LockSet(ret);
1143 found = ret;
njn25e49d8e72002-09-23 09:36:25 +00001144 }
1145
sewardj4bffb232002-11-13 21:46:34 +00001146 if (debug || LOCKSET_SANITY) {
1147 print_LockSet("intersect-OUT", found);
1148 sanity_check_locksets("intersect-OUT");
1149 }
njn25e49d8e72002-09-23 09:36:25 +00001150
sewardj4bffb232002-11-13 21:46:34 +00001151 return found;
njn25e49d8e72002-09-23 09:36:25 +00001152}
1153
sewardj4bffb232002-11-13 21:46:34 +00001154/* inline the fastpath */
1155static inline const LockSet *intersect(const LockSet *a, const LockSet *b)
sewardjc26cc252002-10-23 21:58:55 +00001156{
sewardj4bffb232002-11-13 21:46:34 +00001157 static const Bool debug = False;
sewardjc26cc252002-10-23 21:58:55 +00001158
1159 /* Fast case -- when the two are the same */
sewardj4bffb232002-11-13 21:46:34 +00001160 if (a == b) {
1161 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1162 print_LockSet("intersect-same fastpath", a);
sewardjc26cc252002-10-23 21:58:55 +00001163 }
sewardj4bffb232002-11-13 21:46:34 +00001164 return a;
sewardjc26cc252002-10-23 21:58:55 +00001165 }
1166
sewardj4bffb232002-11-13 21:46:34 +00001167 if (isempty(a) || isempty(b)) {
1168 if (debug)
1169 VG_(printf)("intersect empty fastpath\n");
1170 return emptyset;
1171 }
1172
1173 return _intersect(a, b);
1174}
1175
1176
1177static const LockSet *ls_union(const LockSet *a, const LockSet *b)
1178{
1179 static const Bool debug = False;
1180 Int iret;
1181 Int ia, ib;
1182 Int size;
1183 LockSet *ret;
1184 const LockSet *found;
1185
1186 if (debug || LOCKSET_SANITY)
1187 sanity_check_locksets("union-IN");
1188
1189 /* Fast case -- when the two are the same */
1190 if (a == b) {
1191 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
1192 print_LockSet("union-same fastpath", a);
1193 }
1194 return a;
1195 }
1196
1197 if (isempty(a)) {
1198 if (debug)
1199 print_LockSet("union a=empty b", b);
1200 return b;
1201 }
1202 if (isempty(b)) {
1203 if (debug)
1204 print_LockSet("union b=empty a", a);
1205 return a;
1206 }
1207
1208 if (debug || DEBUG_MEM_LOCKSET_CHANGES) {
sewardjc26cc252002-10-23 21:58:55 +00001209 print_LockSet("union a", a);
1210 print_LockSet("union b", b);
1211 }
1212
sewardj4bffb232002-11-13 21:46:34 +00001213 /* count the size of the new set */
1214 for(size = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1215 Int cmp;
sewardjc26cc252002-10-23 21:58:55 +00001216
sewardj4bffb232002-11-13 21:46:34 +00001217 if ((ia < a->setsize) && (ib < b->setsize))
1218 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1219 else if (ia == a->setsize)
1220 cmp = 1;
1221 else
1222 cmp = -1;
1223
1224 if (cmp == 0) {
1225 size++;
1226 ia++;
1227 ib++;
1228 } else if (cmp < 0) {
1229 size++;
1230 ia++;
1231 } else {
1232 sk_assert(cmp > 0);
1233 size++;
1234 ib++;
1235 }
sewardjc26cc252002-10-23 21:58:55 +00001236 }
1237
sewardj4bffb232002-11-13 21:46:34 +00001238 /* Build the intersection of the two sets */
1239 ret = alloc_LockSet(size);
1240 for (iret = ia = ib = 0; (ia < a->setsize) || (ib < b->setsize); ) {
1241 Int cmp;
1242 sk_assert(iret < ret->setsize);
1243
1244 if ((ia < a->setsize) && (ib < b->setsize))
1245 cmp = mutex_cmp(a->mutex[ia], b->mutex[ib]);
1246 else if (ia == a->setsize)
1247 cmp = 1;
1248 else
1249 cmp = -1;
1250
1251 if (cmp == 0) {
1252 ret->mutex[iret++] = a->mutex[ia];
1253 ia++;
1254 ib++;
1255 } else if (cmp < 0) {
1256 ret->mutex[iret++] = a->mutex[ia];
1257 ia++;
1258 } else {
1259 sk_assert(cmp > 0);
1260 ret->mutex[iret++] = b->mutex[ib];
1261 ib++;
1262 }
1263 }
1264
1265 sk_assert(iret == ret->setsize);
1266
1267 ret->hash = hash_LockSet(ret);
1268
sewardjc26cc252002-10-23 21:58:55 +00001269 /* Now search for it in the table, adding it if not seen before */
sewardj4bffb232002-11-13 21:46:34 +00001270 found = lookup_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001271
sewardj4bffb232002-11-13 21:46:34 +00001272 if (found != NULL) {
1273 if (debug)
1274 print_LockSet("union found existing set", found);
1275 free_LockSet(ret);
sewardjc26cc252002-10-23 21:58:55 +00001276 } else {
sewardj4bffb232002-11-13 21:46:34 +00001277 if (debug)
1278 print_LockSet("union inserting new set", ret);
1279 insert_LockSet(ret);
1280 found = ret;
sewardjc26cc252002-10-23 21:58:55 +00001281 }
1282
sewardj4bffb232002-11-13 21:46:34 +00001283 if (debug || LOCKSET_SANITY) {
1284 print_LockSet("union-OUT", found);
sewardjc26cc252002-10-23 21:58:55 +00001285 sanity_check_locksets("union-OUT");
sewardj4bffb232002-11-13 21:46:34 +00001286 }
sewardjc26cc252002-10-23 21:58:55 +00001287
sewardj4bffb232002-11-13 21:46:34 +00001288 return found;
sewardjc26cc252002-10-23 21:58:55 +00001289}
1290
1291/*------------------------------------------------------------*/
sewardjdac0a442002-11-13 22:08:40 +00001292/*--- Implementation of mutex structure. ---*/
1293/*------------------------------------------------------------*/
sewardjc26cc252002-10-23 21:58:55 +00001294
1295static UInt graph_mark; /* current mark we're using for graph traversal */
1296
sewardj39a4d842002-11-13 22:14:30 +00001297static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardjc26cc252002-10-23 21:58:55 +00001298 Char *str, ExeContext *ec);
sewardj39a4d842002-11-13 22:14:30 +00001299static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00001300 const LockSet *lockset_holding,
1301 const LockSet *lockset_prev);
sewardjc26cc252002-10-23 21:58:55 +00001302
njn72718642003-07-24 08:45:32 +00001303static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid);
sewardjdac0a442002-11-13 22:08:40 +00001304
1305#define M_MUTEX_HASHSZ 1021
1306
sewardj39a4d842002-11-13 22:14:30 +00001307static Mutex *mutex_hash[M_MUTEX_HASHSZ];
sewardjdac0a442002-11-13 22:08:40 +00001308static UInt total_mutexes;
1309
1310static const Char *pp_MutexState(MutexState st)
1311{
1312 switch(st) {
1313 case MxLocked: return "Locked";
1314 case MxUnlocked: return "Unlocked";
1315 case MxDead: return "Dead";
1316 case MxUnknown: return "Unknown";
1317 }
1318 return "???";
1319}
1320
1321static void pp_all_mutexes()
1322{
1323 Int i;
1324 Int locks, buckets;
1325
1326 locks = buckets = 0;
1327 for(i = 0; i < M_MUTEX_HASHSZ; i++) {
sewardj39a4d842002-11-13 22:14:30 +00001328 Mutex *mx;
sewardjdac0a442002-11-13 22:08:40 +00001329 Bool first = True;
1330
1331 for(mx = mutex_hash[i]; mx != NULL; mx = mx->next) {
1332 if (first) {
1333 buckets++;
1334 VG_(printf)("[%4d] = ", i);
1335 } else
1336 VG_(printf)(" ");
1337 locks++;
1338 first = False;
1339 VG_(printf)("%p [%8s] -> %p%(y\n",
1340 mx, pp_MutexState(mx->state), mx->mutexp, mx->mutexp);
1341 }
1342 }
1343
1344 VG_(printf)("%d locks in %d buckets (%d allocated)\n",
1345 locks, buckets, total_mutexes);
1346}
sewardjc26cc252002-10-23 21:58:55 +00001347
sewardj39a4d842002-11-13 22:14:30 +00001348/* find or create a Mutex for a program's mutex use */
1349static Mutex *get_mutex(Addr mutexp)
sewardjc26cc252002-10-23 21:58:55 +00001350{
1351 UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ;
sewardj39a4d842002-11-13 22:14:30 +00001352 Mutex *mp;
sewardjc26cc252002-10-23 21:58:55 +00001353
1354 for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next)
1355 if (mp->mutexp == mutexp)
1356 return mp;
1357
sewardjdac0a442002-11-13 22:08:40 +00001358 total_mutexes++;
1359
sewardjc26cc252002-10-23 21:58:55 +00001360 mp = VG_(malloc)(sizeof(*mp));
1361 mp->mutexp = mutexp;
1362 mp->next = mutex_hash[bucket];
1363 mutex_hash[bucket] = mp;
1364
1365 mp->state = MxUnknown;
1366 mp->tid = VG_INVALID_THREADID;
1367 mp->location = NULL;
1368
sewardj4bffb232002-11-13 21:46:34 +00001369 mp->lockdep = emptyset;
sewardjc26cc252002-10-23 21:58:55 +00001370 mp->mark = graph_mark - 1;
1371
1372 return mp;
1373}
1374
sewardjdac0a442002-11-13 22:08:40 +00001375/* Find all mutexes in a range of memory, and call the callback.
1376 Remove the mutex from the hash if the callback returns True (mutex
1377 structure itself is not freed, because it may be pointed to by a
1378 LockSet. */
sewardj39a4d842002-11-13 22:14:30 +00001379static void find_mutex_range(Addr start, Addr end, Bool (*action)(Mutex *))
sewardjc26cc252002-10-23 21:58:55 +00001380{
sewardjdac0a442002-11-13 22:08:40 +00001381 UInt first = start % M_MUTEX_HASHSZ;
1382 UInt last = (end+1) % M_MUTEX_HASHSZ;
1383 UInt i;
1384
1385 /* Single pass over the hash table, looking for likely hashes */
1386 for(i = first; i != last; ) {
sewardj39a4d842002-11-13 22:14:30 +00001387 Mutex *mx;
1388 Mutex **prev = &mutex_hash[i];
sewardjdac0a442002-11-13 22:08:40 +00001389
1390 for(mx = mutex_hash[i]; mx != NULL; prev = &mx->next, mx = mx->next) {
1391 if (mx->mutexp >= start && mx->mutexp < end && (*action)(mx))
1392 *prev = mx->next;
1393 }
1394
1395 if (++i == M_MUTEX_HASHSZ)
1396 i = 0;
sewardjc26cc252002-10-23 21:58:55 +00001397 }
sewardjc26cc252002-10-23 21:58:55 +00001398}
1399
1400#define MARK_LOOP (graph_mark+0)
1401#define MARK_DONE (graph_mark+1)
1402
thughes4ad52d02004-06-27 17:37:21 +00001403static Bool check_cycle_inner(const Mutex *mutex, const LockSet *ls)
1404{
1405 static const Bool debug = False;
1406 Int i;
1407
1408 if (mutex->mark == MARK_LOOP)
1409 return True; /* found cycle */
1410 if (mutex->mark == MARK_DONE)
1411 return False; /* been here before, its OK */
1412
1413 ((Mutex*)mutex)->mark = MARK_LOOP;
1414
1415 if (debug)
1416 VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n",
1417 graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep);
1418 for(i = 0; i < ls->setsize; i++) {
1419 const Mutex *mx = ls->mutex[i];
1420
1421 if (debug)
1422 VG_(printf)(" %y ls=%p (ls->mutex=%p%(y)\n",
1423 mutex->mutexp, ls,
1424 mx->mutexp, mx->mutexp);
1425 if (check_cycle_inner(mx, mx->lockdep))
1426 return True;
1427 }
1428 ((Mutex*)mutex)->mark = MARK_DONE;
1429
1430 return False;
1431}
1432
sewardj39a4d842002-11-13 22:14:30 +00001433static Bool check_cycle(const Mutex *start, const LockSet* lockset)
sewardjc26cc252002-10-23 21:58:55 +00001434{
sewardjff2c9232002-11-13 21:44:39 +00001435
sewardjc26cc252002-10-23 21:58:55 +00001436 graph_mark += 2; /* clear all marks */
1437
sewardj4bffb232002-11-13 21:46:34 +00001438 return check_cycle_inner(start, lockset);
sewardjc26cc252002-10-23 21:58:55 +00001439}
1440
sewardjdca84112002-11-13 22:29:34 +00001441/* test to see if a mutex state change would be problematic; this
1442 makes no changes to the mutex state. This should be called before
1443 the locking thread has actually blocked. */
njn72718642003-07-24 08:45:32 +00001444static void test_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjc26cc252002-10-23 21:58:55 +00001445{
1446 static const Bool debug = False;
1447
sewardjc26cc252002-10-23 21:58:55 +00001448 if (mutex->state == MxDead) {
sewardjdac0a442002-11-13 22:08:40 +00001449 Char *str;
1450
1451 switch(state) {
1452 case MxLocked: str = "lock dead mutex"; break;
1453 case MxUnlocked: str = "unlock dead mutex"; break;
1454 default: str = "operate on dead mutex"; break;
1455 }
1456
sewardjc26cc252002-10-23 21:58:55 +00001457 /* can't do anything legal to a destroyed mutex */
sewardjdac0a442002-11-13 22:08:40 +00001458 record_mutex_error(tid, mutex, str, mutex->location);
sewardjc26cc252002-10-23 21:58:55 +00001459 return;
1460 }
1461
1462 switch(state) {
1463 case MxLocked:
sewardjdca84112002-11-13 22:29:34 +00001464 sk_assert(!check_cycle(mutex, mutex->lockdep));
1465
1466 if (debug)
1467 print_LockSet("thread holding", thread_locks[tid]);
1468
1469 if (check_cycle(mutex, thread_locks[tid]))
1470 record_lockgraph_error(tid, mutex, thread_locks[tid], mutex->lockdep);
1471 else {
1472 mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]);
1473
1474 if (debug) {
1475 VG_(printf)("giving mutex %p%(y lockdep = %p ",
1476 mutex->mutexp, mutex->mutexp, mutex->lockdep);
1477 print_LockSet("lockdep", mutex->lockdep);
1478 }
1479 }
1480 break;
1481
1482 case MxUnlocked:
1483 if (debug)
1484 print_LockSet("thread holding", thread_locks[tid]);
1485
1486 if (mutex->state != MxLocked) {
1487 record_mutex_error(tid, mutex,
1488 "unlock non-locked mutex", mutex->location);
1489 }
1490 if (mutex->tid != tid) {
1491 record_mutex_error(tid, mutex,
1492 "unlock someone else's mutex", mutex->location);
1493 }
1494 break;
1495
1496 case MxDead:
1497 break;
1498
1499 default:
1500 break;
1501 }
1502}
1503
1504/* Update a mutex state. Expects most error testing and reporting to
1505 have happened in test_mutex_state(). The assumption is that no
1506 client code is run by thread tid between test and set, either
1507 because it is blocked or test and set are called together
1508 atomically.
1509
1510 Setting state to MxDead is the exception, since that can happen as
1511 a result of any thread freeing memory; in this case set_mutex_state
1512 does all the error reporting as well.
1513*/
njn72718642003-07-24 08:45:32 +00001514static void set_mutex_state(Mutex *mutex, MutexState state, ThreadId tid)
sewardjdca84112002-11-13 22:29:34 +00001515{
1516 static const Bool debug = False;
1517
1518 if (debug)
1519 VG_(printf)("\ntid %d changing mutex (%p)->%p%(y state %s -> %s\n",
1520 tid, mutex, mutex->mutexp, mutex->mutexp,
1521 pp_MutexState(mutex->state), pp_MutexState(state));
1522
1523 if (mutex->state == MxDead) {
1524 /* can't do anything legal to a destroyed mutex */
1525 return;
1526 }
1527
1528 switch(state) {
1529 case MxLocked:
sewardj4bffb232002-11-13 21:46:34 +00001530 if (mutex->state == MxLocked) {
1531 if (mutex->tid != tid)
1532 record_mutex_error(tid, mutex, "take lock held by someone else",
1533 mutex->location);
1534 else
1535 record_mutex_error(tid, mutex, "take lock we already hold",
1536 mutex->location);
1537
1538 VG_(skin_panic)("core should have checked this\n");
1539 break;
1540 }
sewardjc26cc252002-10-23 21:58:55 +00001541
1542 sk_assert(!check_cycle(mutex, mutex->lockdep));
1543
sewardjc26cc252002-10-23 21:58:55 +00001544 mutex->tid = tid;
1545 break;
1546
1547 case MxUnlocked:
1548 if (debug)
sewardj4bffb232002-11-13 21:46:34 +00001549 print_LockSet("thread holding", thread_locks[tid]);
sewardjc26cc252002-10-23 21:58:55 +00001550
sewardjdca84112002-11-13 22:29:34 +00001551 if (mutex->state != MxLocked || mutex->tid != tid)
1552 break;
1553
sewardjc26cc252002-10-23 21:58:55 +00001554 mutex->tid = VG_INVALID_THREADID;
1555 break;
1556
sewardjdac0a442002-11-13 22:08:40 +00001557 case MxDead:
1558 if (mutex->state == MxLocked) {
1559 /* forcably remove offending lock from thread's lockset */
1560 sk_assert(ismember(thread_locks[mutex->tid], mutex));
1561 thread_locks[mutex->tid] = remove_LockSet(thread_locks[mutex->tid], mutex);
1562 mutex->tid = VG_INVALID_THREADID;
1563
1564 record_mutex_error(tid, mutex,
1565 "free locked mutex", mutex->location);
1566 }
1567 break;
1568
sewardjc26cc252002-10-23 21:58:55 +00001569 default:
1570 break;
1571 }
1572
njn72718642003-07-24 08:45:32 +00001573 mutex->location = VG_(get_ExeContext)(tid);
sewardjc26cc252002-10-23 21:58:55 +00001574 mutex->state = state;
1575}
njn25e49d8e72002-09-23 09:36:25 +00001576
1577/*------------------------------------------------------------*/
1578/*--- Setting and checking permissions. ---*/
1579/*------------------------------------------------------------*/
1580
thughes4ad52d02004-06-27 17:37:21 +00001581/* only clean up dead mutexes */
1582static
1583Bool cleanmx(Mutex *mx) {
1584 return mx->state == MxDead;
1585}
1586
njn25e49d8e72002-09-23 09:36:25 +00001587static
nethercote451eae92004-11-02 13:06:32 +00001588void set_address_range_state ( Addr a, SizeT len /* in bytes */,
njn25e49d8e72002-09-23 09:36:25 +00001589 VgeInitStatus status )
1590{
sewardj1806d7f2002-10-22 05:05:49 +00001591 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00001592
1593# if DEBUG_MAKE_ACCESSES
1594 VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status);
1595# endif
1596 //PROF_EVENT(30); PPP
1597
1598 if (len == 0)
1599 return;
1600
1601 if (len > 100 * 1000 * 1000)
1602 VG_(message)(Vg_UserMsg,
1603 "Warning: set address range state: large range %d",
1604 len);
1605
1606 VGP_PUSHCC(VgpSARP);
1607
sewardjdac0a442002-11-13 22:08:40 +00001608 /* Remove mutexes in recycled memory range from hash */
1609 find_mutex_range(a, a+len, cleanmx);
1610
njn25e49d8e72002-09-23 09:36:25 +00001611 /* Memory block may not be aligned or a whole word multiple. In neat cases,
1612 * we have to init len/4 words (len is in bytes). In nasty cases, it's
1613 * len/4+1 words. This works out which it is by aligning the block and
1614 * seeing if the end byte is in the same word as it is for the unaligned
1615 * block; if not, it's the awkward case. */
sewardj8fac99a2002-11-13 22:31:26 +00001616 end = ROUNDUP(a + len, 4);
1617 a = ROUNDDN(a, 4);
njn25e49d8e72002-09-23 09:36:25 +00001618
1619 /* Do it ... */
1620 switch (status) {
1621 case Vge_VirginInit:
1622 for ( ; a < end; a += 4) {
1623 //PROF_EVENT(31); PPP
1624 init_virgin_sword(a);
1625 }
1626 break;
1627
1628 case Vge_NonVirginInit:
1629 for ( ; a < end; a += 4) {
1630 //PROF_EVENT(31); PPP
1631 init_nonvirgin_sword(a);
1632 }
1633 break;
1634
1635 case Vge_SegmentInit:
1636 for ( ; a < end; a += 4) {
1637 //PROF_EVENT(31); PPP
1638 init_magically_inited_sword(a);
1639 }
1640 break;
sewardj7f3ad222002-11-13 22:11:53 +00001641
1642 case Vge_Error:
1643 for ( ; a < end; a += 4) {
1644 //PROF_EVENT(31); PPP
1645 init_error_sword(a);
1646 }
1647 break;
njn25e49d8e72002-09-23 09:36:25 +00001648
1649 default:
1650 VG_(printf)("init_status = %u\n", status);
njne427a662002-10-02 11:08:25 +00001651 VG_(skin_panic)("Unexpected Vge_InitStatus");
njn25e49d8e72002-09-23 09:36:25 +00001652 }
1653
1654 /* Check that zero page and highest page have not been written to
1655 -- this could happen with buggy syscall wrappers. Today
1656 (2001-04-26) had precisely such a problem with
1657 __NR_setitimer. */
njne427a662002-10-02 11:08:25 +00001658 sk_assert(SK_(cheap_sanity_check)());
njn25e49d8e72002-09-23 09:36:25 +00001659 VGP_POPCC(VgpSARP);
1660}
1661
1662
nethercote451eae92004-11-02 13:06:32 +00001663static void make_segment_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001664{
1665 //PROF_EVENT(??); PPP
1666 set_address_range_state ( a, len, Vge_SegmentInit );
1667}
1668
nethercote451eae92004-11-02 13:06:32 +00001669static void make_writable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001670{
1671 //PROF_EVENT(36); PPP
1672 set_address_range_state( a, len, Vge_VirginInit );
1673}
1674
nethercote451eae92004-11-02 13:06:32 +00001675static void make_readable ( Addr a, SizeT len )
njn25e49d8e72002-09-23 09:36:25 +00001676{
1677 //PROF_EVENT(37); PPP
sewardj499e3de2002-11-13 22:22:25 +00001678 set_address_range_state( a, len, Vge_VirginInit );
njn25e49d8e72002-09-23 09:36:25 +00001679}
1680
1681
njn25e49d8e72002-09-23 09:36:25 +00001682/* Block-copy states (needed for implementing realloc()). */
nethercote451eae92004-11-02 13:06:32 +00001683static void copy_address_range_state(Addr src, Addr dst, SizeT len)
njn25e49d8e72002-09-23 09:36:25 +00001684{
1685 UInt i;
1686
1687 //PROF_EVENT(40); PPP
1688 for (i = 0; i < len; i += 4) {
1689 shadow_word sword = *(get_sword_addr ( src+i ));
1690 //PROF_EVENT(41); PPP
1691 set_sword ( dst+i, sword );
1692 }
1693}
1694
1695// SSS: put these somewhere better
nethercote451eae92004-11-02 13:06:32 +00001696static void eraser_mem_read (Addr a, SizeT data_size, ThreadId tid);
1697static void eraser_mem_write(Addr a, SizeT data_size, ThreadId tid);
sewardja5b3aec2002-10-22 05:09:36 +00001698
sewardja5b3aec2002-10-22 05:09:36 +00001699static void eraser_mem_help_read_1(Addr a) REGPARM(1);
1700static void eraser_mem_help_read_2(Addr a) REGPARM(1);
1701static void eraser_mem_help_read_4(Addr a) REGPARM(1);
nethercote451eae92004-11-02 13:06:32 +00001702static void eraser_mem_help_read_N(Addr a, SizeT size) REGPARM(2);
sewardja5b3aec2002-10-22 05:09:36 +00001703
1704static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2);
1705static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2);
1706static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2);
nethercote451eae92004-11-02 13:06:32 +00001707static void eraser_mem_help_write_N(Addr a, SizeT size) REGPARM(2);
njn25e49d8e72002-09-23 09:36:25 +00001708
sewardj7a5ebcf2002-11-13 22:42:13 +00001709static void bus_lock(void);
1710static void bus_unlock(void);
1711
njn25e49d8e72002-09-23 09:36:25 +00001712static
njn72718642003-07-24 08:45:32 +00001713void eraser_pre_mem_read(CorePart part, ThreadId tid,
nethercote451eae92004-11-02 13:06:32 +00001714 Char* s, Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001715{
njn72718642003-07-24 08:45:32 +00001716 if (tid > 50) { VG_(printf)("pid = %d, s = `%s`, part = %d\n", tid, s, part); VG_(skin_panic)("a");}
1717 eraser_mem_read(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001718}
1719
1720static
njn72718642003-07-24 08:45:32 +00001721void eraser_pre_mem_read_asciiz(CorePart part, ThreadId tid,
nethercote6a27d832004-09-07 10:17:02 +00001722 Char* s, Addr base )
njn25e49d8e72002-09-23 09:36:25 +00001723{
njn72718642003-07-24 08:45:32 +00001724 eraser_mem_read(base, VG_(strlen)((Char*)base), tid);
njn25e49d8e72002-09-23 09:36:25 +00001725}
1726
1727static
njn72718642003-07-24 08:45:32 +00001728void eraser_pre_mem_write(CorePart part, ThreadId tid,
nethercote451eae92004-11-02 13:06:32 +00001729 Char* s, Addr base, SizeT size )
njn25e49d8e72002-09-23 09:36:25 +00001730{
njn72718642003-07-24 08:45:32 +00001731 eraser_mem_write(base, size, tid);
njn25e49d8e72002-09-23 09:36:25 +00001732}
1733
1734
1735
1736static
nethercote451eae92004-11-02 13:06:32 +00001737void eraser_new_mem_startup( Addr a, SizeT len, Bool rr, Bool ww, Bool xx )
njn25e49d8e72002-09-23 09:36:25 +00001738{
njn1f3a9092002-10-04 09:22:30 +00001739 /* Ignore the permissions, just make it readable. Seems to work... */
njn25e49d8e72002-09-23 09:36:25 +00001740 make_segment_readable(a, len);
1741}
1742
1743
1744static
nethercote451eae92004-11-02 13:06:32 +00001745void eraser_new_mem_heap ( Addr a, SizeT len, Bool is_inited )
njn25e49d8e72002-09-23 09:36:25 +00001746{
1747 if (is_inited) {
1748 make_readable(a, len);
1749 } else {
1750 make_writable(a, len);
1751 }
1752}
1753
1754static
nethercote451eae92004-11-02 13:06:32 +00001755void eraser_set_perms (Addr a, SizeT len,
sewardj40f8ebe2002-10-23 21:46:13 +00001756 Bool rr, Bool ww, Bool xx)
njn25e49d8e72002-09-23 09:36:25 +00001757{
1758 if (rr) make_readable(a, len);
1759 else if (ww) make_writable(a, len);
1760 /* else do nothing */
1761}
1762
sewardjf6374322002-11-13 22:35:55 +00001763static
nethercote451eae92004-11-02 13:06:32 +00001764void eraser_new_mem_stack_private(Addr a, SizeT len)
sewardjf6374322002-11-13 22:35:55 +00001765{
1766 set_address_range_state(a, len, Vge_NonVirginInit);
1767}
1768
1769static
nethercote451eae92004-11-02 13:06:32 +00001770void eraser_new_mem_stack(Addr a, SizeT len)
sewardjf6374322002-11-13 22:35:55 +00001771{
1772 set_address_range_state(a, len, Vge_VirginInit);
1773}
njn25e49d8e72002-09-23 09:36:25 +00001774
1775/*--------------------------------------------------------------*/
1776/*--- Initialise the memory audit system on program startup. ---*/
1777/*--------------------------------------------------------------*/
1778
1779static
1780void init_shadow_memory(void)
1781{
1782 Int i;
1783
1784 for (i = 0; i < ESEC_MAP_WORDS; i++)
1785 distinguished_secondary_map.swords[i] = virgin_sword;
1786
1787 /* These entries gradually get overwritten as the used address
1788 space expands. */
1789 for (i = 0; i < 65536; i++)
1790 primary_map[i] = &distinguished_secondary_map;
1791}
1792
1793
njn3e884182003-04-15 13:03:23 +00001794/*------------------------------------------------------------*/
1795/*--- malloc() et al replacements ---*/
1796/*------------------------------------------------------------*/
1797
njnb4aee052003-04-15 14:09:58 +00001798static VgHashTable hg_malloc_list = NULL;
njn3e884182003-04-15 13:03:23 +00001799
1800#define N_FREED_CHUNKS 2
1801static Int freechunkptr = 0;
1802static HG_Chunk *freechunks[N_FREED_CHUNKS];
1803
1804/* Use a small redzone (paranoia) */
nethercotee1efb922004-07-10 16:01:52 +00001805UInt VG_(vg_malloc_redzone_szB) = 8;
njn3e884182003-04-15 13:03:23 +00001806
1807
1808/* Allocate a user-chunk of size bytes. Also allocate its shadow
1809 block, make the shadow block point at the user block. Put the
1810 shadow chunk on the appropriate list, and set all memory
1811 protections correctly. */
1812
nethercote7ac7f7b2004-11-02 12:36:02 +00001813static void add_HG_Chunk ( ThreadId tid, Addr p, SizeT size )
njn3e884182003-04-15 13:03:23 +00001814{
1815 HG_Chunk* hc;
1816
1817 hc = VG_(malloc)(sizeof(HG_Chunk));
1818 hc->data = p;
1819 hc->size = size;
njn72718642003-07-24 08:45:32 +00001820 hc->where = VG_(get_ExeContext)(tid);
1821 hc->tid = tid;
njn3e884182003-04-15 13:03:23 +00001822
1823 VG_(HT_add_node)( hg_malloc_list, (VgHashNode*)hc );
1824}
1825
1826/* Allocate memory and note change in memory available */
1827static __inline__
nethercote7ac7f7b2004-11-02 12:36:02 +00001828void* alloc_and_new_mem ( SizeT size, SizeT alignment, Bool is_zeroed )
njn3e884182003-04-15 13:03:23 +00001829{
1830 Addr p;
1831
njn34ac0272003-09-30 14:20:00 +00001832 if (size < 0) return NULL;
1833
njn3e884182003-04-15 13:03:23 +00001834 p = (Addr)VG_(cli_malloc)(alignment, size);
nethercote57e36b32004-07-10 14:56:28 +00001835 if (!p) {
1836 return NULL;
1837 }
njn34ac0272003-09-30 14:20:00 +00001838 if (is_zeroed) VG_(memset)((void*)p, 0, size);
njn72718642003-07-24 08:45:32 +00001839 add_HG_Chunk ( VG_(get_current_or_recent_tid)(), p, size );
njn3e884182003-04-15 13:03:23 +00001840 eraser_new_mem_heap( p, size, is_zeroed );
1841
1842 return (void*)p;
1843}
1844
nethercote7ac7f7b2004-11-02 12:36:02 +00001845void* SK_(malloc) ( SizeT n )
njn3e884182003-04-15 13:03:23 +00001846{
njn72718642003-07-24 08:45:32 +00001847 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001848}
1849
nethercote7ac7f7b2004-11-02 12:36:02 +00001850void* SK_(__builtin_new) ( SizeT n )
njn3e884182003-04-15 13:03:23 +00001851{
njn72718642003-07-24 08:45:32 +00001852 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001853}
1854
nethercote7ac7f7b2004-11-02 12:36:02 +00001855void* SK_(__builtin_vec_new) ( SizeT n )
njn3e884182003-04-15 13:03:23 +00001856{
njn72718642003-07-24 08:45:32 +00001857 return alloc_and_new_mem ( n, VG_(clo_alignment), /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001858}
1859
nethercote7ac7f7b2004-11-02 12:36:02 +00001860void* SK_(memalign) ( SizeT align, SizeT n )
njn3e884182003-04-15 13:03:23 +00001861{
njn72718642003-07-24 08:45:32 +00001862 return alloc_and_new_mem ( n, align, /*is_zeroed*/False );
njn3e884182003-04-15 13:03:23 +00001863}
1864
nethercote7ac7f7b2004-11-02 12:36:02 +00001865void* SK_(calloc) ( SizeT nmemb, SizeT size )
njn3e884182003-04-15 13:03:23 +00001866{
njn34ac0272003-09-30 14:20:00 +00001867 return alloc_and_new_mem ( nmemb*size, VG_(clo_alignment),
1868 /*is_zeroed*/True );
njn3e884182003-04-15 13:03:23 +00001869}
1870
thughes4ad52d02004-06-27 17:37:21 +00001871static ThreadId deadmx_tid;
1872
1873static
1874Bool deadmx(Mutex *mx) {
1875 if (mx->state != MxDead)
1876 set_mutex_state(mx, MxDead, deadmx_tid);
1877
1878 return False;
1879}
1880
njn3e884182003-04-15 13:03:23 +00001881static
njn72718642003-07-24 08:45:32 +00001882void die_and_free_mem ( ThreadId tid, HG_Chunk* hc,
njn3e884182003-04-15 13:03:23 +00001883 HG_Chunk** prev_chunks_next_ptr )
1884{
njn72718642003-07-24 08:45:32 +00001885 Addr start = hc->data;
1886 Addr end = start + hc->size;
njn3e884182003-04-15 13:03:23 +00001887
njn3e884182003-04-15 13:03:23 +00001888 /* Remove hc from the malloclist using prev_chunks_next_ptr to
1889 avoid repeating the hash table lookup. Can't remove until at least
1890 after free and free_mismatch errors are done because they use
1891 describe_addr() which looks for it in malloclist. */
1892 *prev_chunks_next_ptr = hc->next;
1893
1894 /* Record where freed */
njn72718642003-07-24 08:45:32 +00001895 hc->where = VG_(get_ExeContext) ( tid );
njn3e884182003-04-15 13:03:23 +00001896
1897 /* maintain a small window so that the error reporting machinery
1898 knows about this memory */
1899 if (freechunks[freechunkptr] != NULL) {
1900 /* free HG_Chunk */
1901 HG_Chunk* sc1 = freechunks[freechunkptr];
1902 VG_(cli_free) ( (void*)(sc1->data) );
1903 VG_(free) ( sc1 );
1904 }
1905
1906 freechunks[freechunkptr] = hc;
1907
1908 if (++freechunkptr == N_FREED_CHUNKS)
1909 freechunkptr = 0;
1910
1911 /* mark all mutexes in range dead */
thughes4ad52d02004-06-27 17:37:21 +00001912 deadmx_tid = tid;
njn3e884182003-04-15 13:03:23 +00001913 find_mutex_range(start, end, deadmx);
1914}
1915
1916
1917static __inline__
njn72718642003-07-24 08:45:32 +00001918void handle_free ( void* p )
njn3e884182003-04-15 13:03:23 +00001919{
1920 HG_Chunk* hc;
1921 HG_Chunk** prev_chunks_next_ptr;
1922
nethercote3d6b6112004-11-04 16:39:43 +00001923 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UWord)p,
njn3e884182003-04-15 13:03:23 +00001924 (VgHashNode***)&prev_chunks_next_ptr );
1925 if (hc == NULL) {
1926 return;
1927 }
njn72718642003-07-24 08:45:32 +00001928 die_and_free_mem ( VG_(get_current_or_recent_tid)(),
1929 hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001930}
1931
njn72718642003-07-24 08:45:32 +00001932void SK_(free) ( void* p )
njn3e884182003-04-15 13:03:23 +00001933{
njn72718642003-07-24 08:45:32 +00001934 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001935}
1936
njn72718642003-07-24 08:45:32 +00001937void SK_(__builtin_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001938{
njn72718642003-07-24 08:45:32 +00001939 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001940}
1941
njn72718642003-07-24 08:45:32 +00001942void SK_(__builtin_vec_delete) ( void* p )
njn3e884182003-04-15 13:03:23 +00001943{
njn72718642003-07-24 08:45:32 +00001944 handle_free(p);
njn3e884182003-04-15 13:03:23 +00001945}
1946
nethercote7ac7f7b2004-11-02 12:36:02 +00001947void* SK_(realloc) ( void* p, SizeT new_size )
njn3e884182003-04-15 13:03:23 +00001948{
1949 HG_Chunk *hc;
1950 HG_Chunk **prev_chunks_next_ptr;
sewardj05bcdcb2003-05-18 10:05:38 +00001951 Int i;
njn72718642003-07-24 08:45:32 +00001952 ThreadId tid = VG_(get_current_or_recent_tid)();
njn3e884182003-04-15 13:03:23 +00001953
1954 /* First try and find the block. */
nethercote3d6b6112004-11-04 16:39:43 +00001955 hc = (HG_Chunk*)VG_(HT_get_node) ( hg_malloc_list, (UWord)p,
njn3e884182003-04-15 13:03:23 +00001956 (VgHashNode***)&prev_chunks_next_ptr );
1957
1958 if (hc == NULL) {
1959 return NULL;
1960 }
1961
1962 if (hc->size == new_size) {
1963 /* size unchanged */
njn398044f2003-07-24 17:39:59 +00001964 hc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +00001965 return p;
1966
1967 } else if (hc->size > new_size) {
1968 /* new size is smaller */
1969 hc->size = new_size;
njn398044f2003-07-24 17:39:59 +00001970 hc->where = VG_(get_ExeContext)(tid);
njn3e884182003-04-15 13:03:23 +00001971 return p;
1972
1973 } else {
1974 /* new size is bigger */
1975 Addr p_new;
1976
1977 /* Get new memory */
1978 p_new = (Addr)VG_(cli_malloc)(VG_(clo_alignment), new_size);
1979
1980 /* First half kept and copied, second half new */
1981 copy_address_range_state( (Addr)p, p_new, hc->size );
1982 eraser_new_mem_heap ( p_new+hc->size, new_size-hc->size,
1983 /*inited*/False );
1984
1985 /* Copy from old to new */
1986 for (i = 0; i < hc->size; i++)
1987 ((UChar*)p_new)[i] = ((UChar*)p)[i];
1988
1989 /* Free old memory */
njn72718642003-07-24 08:45:32 +00001990 die_and_free_mem ( tid, hc, prev_chunks_next_ptr );
njn3e884182003-04-15 13:03:23 +00001991
1992 /* this has to be after die_and_free_mem, otherwise the
1993 former succeeds in shorting out the new block, not the
1994 old, in the case when both are on the same list. */
njn72718642003-07-24 08:45:32 +00001995 add_HG_Chunk ( tid, p_new, new_size );
njn3e884182003-04-15 13:03:23 +00001996
1997 return (void*)p_new;
1998 }
1999}
2000
njn25e49d8e72002-09-23 09:36:25 +00002001/*--------------------------------------------------------------*/
2002/*--- Machinery to support sanity checking ---*/
2003/*--------------------------------------------------------------*/
2004
njn25e49d8e72002-09-23 09:36:25 +00002005Bool SK_(cheap_sanity_check) ( void )
2006{
jseward9800fd32004-01-04 23:08:04 +00002007 /* nothing useful we can rapidly check */
2008 return True;
njn25e49d8e72002-09-23 09:36:25 +00002009}
2010
njn25e49d8e72002-09-23 09:36:25 +00002011Bool SK_(expensive_sanity_check)(void)
2012{
2013 Int i;
2014
2015 /* Make sure nobody changed the distinguished secondary. */
2016 for (i = 0; i < ESEC_MAP_WORDS; i++)
2017 if (distinguished_secondary_map.swords[i].other != virgin_sword.other ||
2018 distinguished_secondary_map.swords[i].state != virgin_sword.state)
2019 return False;
2020
2021 return True;
2022}
2023
2024
2025/*--------------------------------------------------------------*/
2026/*--- Instrumentation ---*/
2027/*--------------------------------------------------------------*/
2028
sewardjf6374322002-11-13 22:35:55 +00002029static UInt stk_ld, nonstk_ld, stk_st, nonstk_st;
2030
njn25e49d8e72002-09-23 09:36:25 +00002031/* Create and return an instrumented version of cb_in. Free cb_in
2032 before returning. */
2033UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used )
2034{
2035 UCodeBlock* cb;
2036 Int i;
2037 UInstr* u_in;
2038 Int t_size = INVALID_TEMPREG;
sewardjf6374322002-11-13 22:35:55 +00002039 Int ntemps;
2040 Bool *stackref = NULL;
sewardj7a5ebcf2002-11-13 22:42:13 +00002041 Bool locked = False; /* lock prefix */
njn25e49d8e72002-09-23 09:36:25 +00002042
njn810086f2002-11-14 12:42:47 +00002043 cb = VG_(setup_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002044
sewardjf6374322002-11-13 22:35:55 +00002045 /* stackref[] is used for super-simple value tracking to keep note
2046 of which tempregs currently hold a value which is derived from
nethercoteca788ff2004-10-20 10:58:09 +00002047 the stack pointer or frame pointer, and is therefore likely
2048 stack-relative if used as the address for LOAD or STORE. */
njn810086f2002-11-14 12:42:47 +00002049 ntemps = VG_(get_num_temps)(cb);
sewardjf6374322002-11-13 22:35:55 +00002050 stackref = VG_(malloc)(sizeof(*stackref) * ntemps);
2051 VG_(memset)(stackref, 0, sizeof(*stackref) * ntemps);
2052
njn810086f2002-11-14 12:42:47 +00002053 for (i = 0; i < VG_(get_num_instrs)(cb_in); i++) {
2054 u_in = VG_(get_instr)(cb_in, i);
njn25e49d8e72002-09-23 09:36:25 +00002055
njn25e49d8e72002-09-23 09:36:25 +00002056 switch (u_in->opcode) {
2057
2058 case NOP: case CALLM_S: case CALLM_E:
2059 break;
sewardjf6374322002-11-13 22:35:55 +00002060
sewardj7a5ebcf2002-11-13 22:42:13 +00002061 case LOCK:
2062 locked = True;
2063 uInstr0(cb, CCALL, 0);
2064 uCCall(cb, (Addr)bus_lock, 0, 0, False);
2065 break;
2066
2067 case JMP: case INCEIP:
2068 if (locked) {
2069 uInstr0(cb, CCALL, 0);
2070 uCCall(cb, (Addr)bus_unlock, 0, 0, False);
2071 }
2072 locked = False;
2073 VG_(copy_UInstr)(cb, u_in);
2074 break;
2075
sewardjf6374322002-11-13 22:35:55 +00002076 case GET:
2077 sk_assert(u_in->tag1 == ArchReg);
2078 sk_assert(u_in->tag2 == TempReg);
2079 sk_assert(u_in->val2 < ntemps);
2080
2081 stackref[u_in->val2] = (u_in->size == 4 &&
nethercoteca788ff2004-10-20 10:58:09 +00002082 (u_in->val1 == R_STACK_PTR ||
2083 u_in->val1 == R_FRAME_PTR));
sewardjf6374322002-11-13 22:35:55 +00002084 VG_(copy_UInstr)(cb, u_in);
2085 break;
2086
2087 case MOV:
2088 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2089 sk_assert(u_in->tag2 == TempReg);
2090 stackref[u_in->val2] = stackref[u_in->val1];
2091 }
2092 VG_(copy_UInstr)(cb, u_in);
2093 break;
2094
2095 case LEA1:
2096 case ADD: case SUB:
2097 if (u_in->size == 4 && u_in->tag1 == TempReg) {
2098 sk_assert(u_in->tag2 == TempReg);
2099 stackref[u_in->val2] |= stackref[u_in->val1];
2100 }
2101 VG_(copy_UInstr)(cb, u_in);
2102 break;
njn25e49d8e72002-09-23 09:36:25 +00002103
sewardja5b3aec2002-10-22 05:09:36 +00002104 case LOAD: {
2105 void (*help)(Addr);
2106 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002107 sk_assert(u_in->tag1 == TempReg);
2108
2109 if (!clo_priv_stacks || !stackref[u_in->val1]) {
2110 nonstk_ld++;
2111
2112 switch(u_in->size) {
2113 case 1: help = eraser_mem_help_read_1; break;
2114 case 2: help = eraser_mem_help_read_2; break;
2115 case 4: help = eraser_mem_help_read_4; break;
2116 default:
2117 VG_(skin_panic)("bad size");
2118 }
jsgfcb1d1c02003-10-14 21:55:10 +00002119
2120 /* XXX all registers should be flushed to baseblock
2121 here */
sewardjf6374322002-11-13 22:35:55 +00002122 uInstr1(cb, CCALL, 0, TempReg, u_in->val1);
2123 uCCall(cb, (Addr)help, 1, 1, False);
2124 } else
2125 stk_ld++;
njn25e49d8e72002-09-23 09:36:25 +00002126
sewardja5b3aec2002-10-22 05:09:36 +00002127 VG_(copy_UInstr)(cb, u_in);
2128 t_size = INVALID_TEMPREG;
2129 break;
2130 }
2131
fitzhardinge111c6072004-03-09 02:45:07 +00002132 case MMX2_MemRd:
sewardja5b3aec2002-10-22 05:09:36 +00002133 case FPU_R: {
njne427a662002-10-02 11:08:25 +00002134 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
fitzhardinge111c6072004-03-09 02:45:07 +00002135 8 == u_in->size || 10 == u_in->size || 108 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002136
fitzhardinge111c6072004-03-09 02:45:07 +00002137 t_size = newTemp(cb);
2138 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2139 uLiteral(cb, (UInt)u_in->size);
njn25e49d8e72002-09-23 09:36:25 +00002140
fitzhardinge111c6072004-03-09 02:45:07 +00002141 /* XXX all registers should be flushed to baseblock
2142 here */
2143 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2144 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
2145
2146 VG_(copy_UInstr)(cb, u_in);
2147 t_size = INVALID_TEMPREG;
2148 break;
sewardja5b3aec2002-10-22 05:09:36 +00002149 }
2150
thughes96b466a2004-03-15 16:43:58 +00002151 case MMX2a1_MemRd: {
2152 sk_assert(8 == u_in->size);
2153
2154 t_size = newTemp(cb);
2155 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2156 uLiteral(cb, (UInt)u_in->size);
2157
2158 /* XXX all registers should be flushed to baseblock
2159 here */
2160 uInstr2(cb, CCALL, 0, TempReg, u_in->val3, TempReg, t_size);
2161 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
2162
2163 VG_(copy_UInstr)(cb, u_in);
2164 t_size = INVALID_TEMPREG;
2165 break;
2166 }
2167
fitzhardinge111c6072004-03-09 02:45:07 +00002168 case SSE2a_MemRd:
2169 case SSE2a1_MemRd:
2170 case SSE3a_MemRd:
2171 case SSE3a1_MemRd:
2172 case SSE3ag_MemRd_RegWr: {
2173 Int addr = (u_in->opcode == SSE3ag_MemRd_RegWr) ? u_in->val1 : u_in->val3;
2174
2175 sk_assert(u_in->size == 4 || u_in->size == 8 || u_in->size == 16 || u_in->size == 512);
2176
2177 t_size = newTemp(cb);
2178 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2179 uLiteral(cb, (UInt)u_in->size);
2180
2181 uInstr2(cb, CCALL, 0, TempReg, addr, TempReg, t_size);
2182 uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False);
2183
2184 VG_(copy_UInstr)(cb, u_in);
2185 t_size = INVALID_TEMPREG;
2186 break;
2187 }
2188
sewardja5b3aec2002-10-22 05:09:36 +00002189 case STORE: {
2190 void (*help)(Addr, UInt);
2191 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size);
sewardjf6374322002-11-13 22:35:55 +00002192 sk_assert(u_in->tag2 == TempReg);
sewardja5b3aec2002-10-22 05:09:36 +00002193
sewardjf6374322002-11-13 22:35:55 +00002194 if (!clo_priv_stacks || !stackref[u_in->val2]) {
2195 nonstk_st++;
2196
2197 switch(u_in->size) {
2198 case 1: help = eraser_mem_help_write_1; break;
2199 case 2: help = eraser_mem_help_write_2; break;
2200 case 4: help = eraser_mem_help_write_4; break;
2201 default:
2202 VG_(skin_panic)("bad size");
2203 }
2204
jsgfcb1d1c02003-10-14 21:55:10 +00002205 /* XXX all registers should be flushed to baseblock
2206 here */
sewardjf6374322002-11-13 22:35:55 +00002207 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1);
2208 uCCall(cb, (Addr)help, 2, 2, False);
2209 } else
2210 stk_st++;
sewardja5b3aec2002-10-22 05:09:36 +00002211
2212 VG_(copy_UInstr)(cb, u_in);
2213 t_size = INVALID_TEMPREG;
2214 break;
2215 }
2216
fitzhardinge111c6072004-03-09 02:45:07 +00002217 case MMX2_MemWr:
sewardja5b3aec2002-10-22 05:09:36 +00002218 case FPU_W: {
njne427a662002-10-02 11:08:25 +00002219 sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size ||
fitzhardinge111c6072004-03-09 02:45:07 +00002220 8 == u_in->size || 10 == u_in->size || 108 == u_in->size);
sewardja5b3aec2002-10-22 05:09:36 +00002221
2222 t_size = newTemp(cb);
2223 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2224 uLiteral(cb, (UInt)u_in->size);
jsgfcb1d1c02003-10-14 21:55:10 +00002225 /* XXX all registers should be flushed to baseblock
2226 here */
sewardja5b3aec2002-10-22 05:09:36 +00002227 uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size);
2228 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2229
2230 VG_(copy_UInstr)(cb, u_in);
2231 t_size = INVALID_TEMPREG;
2232 break;
2233 }
njn25e49d8e72002-09-23 09:36:25 +00002234
fitzhardinge111c6072004-03-09 02:45:07 +00002235 case SSE2a_MemWr:
2236 case SSE3a_MemWr: {
2237 sk_assert(4 == u_in->size || 8 == u_in->size || 16 == u_in->size ||
2238 512 == u_in->size);
2239
2240 t_size = newTemp(cb);
2241 uInstr2(cb, MOV, 4, Literal, 0, TempReg, t_size);
2242 uLiteral(cb, (UInt)u_in->size);
2243 /* XXX all registers should be flushed to baseblock
2244 here */
2245 uInstr2(cb, CCALL, 0, TempReg, u_in->val3, TempReg, t_size);
2246 uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False);
2247
2248 VG_(copy_UInstr)(cb, u_in);
2249 t_size = INVALID_TEMPREG;
2250 break;
2251 }
sewardj3d7c9c82003-03-26 21:08:13 +00002252
njn25e49d8e72002-09-23 09:36:25 +00002253 default:
sewardjf6374322002-11-13 22:35:55 +00002254 /* conservative tromping */
2255 if (0 && u_in->tag1 == TempReg) /* can val1 ever be dest? */
2256 stackref[u_in->val1] = False;
2257 if (u_in->tag2 == TempReg)
2258 stackref[u_in->val2] = False;
2259 if (u_in->tag3 == TempReg)
2260 stackref[u_in->val3] = False;
njn4ba5a792002-09-30 10:23:54 +00002261 VG_(copy_UInstr)(cb, u_in);
njn25e49d8e72002-09-23 09:36:25 +00002262 break;
2263 }
2264 }
2265
sewardjf6374322002-11-13 22:35:55 +00002266 VG_(free)(stackref);
njn4ba5a792002-09-30 10:23:54 +00002267 VG_(free_UCodeBlock)(cb_in);
njn25e49d8e72002-09-23 09:36:25 +00002268 return cb;
2269}
2270
2271
2272/*--------------------------------------------------------------------*/
2273/*--- Error and suppression handling ---*/
2274/*--------------------------------------------------------------------*/
2275
2276typedef
2277 enum {
2278 /* Possible data race */
2279 EraserSupp
2280 }
2281 EraserSuppKind;
2282
2283/* What kind of error it is. */
2284typedef
2285 enum {
sewardj16748af2002-10-22 04:55:54 +00002286 EraserErr, /* data-race */
2287 MutexErr, /* mutex operations */
sewardjff2c9232002-11-13 21:44:39 +00002288 LockGraphErr, /* mutex order error */
njn25e49d8e72002-09-23 09:36:25 +00002289 }
2290 EraserErrorKind;
2291
sewardj16748af2002-10-22 04:55:54 +00002292/* The classification of a faulting address. */
2293typedef
2294 enum { Undescribed, /* as-yet unclassified */
2295 Stack,
2296 Unknown, /* classification yielded nothing useful */
sewardjdac0a442002-11-13 22:08:40 +00002297 Mallocd,
2298 Freed,
sewardj16748af2002-10-22 04:55:54 +00002299 Segment
2300 }
2301 AddrKind;
2302/* Records info about a faulting address. */
2303typedef
2304 struct {
2305 /* ALL */
2306 AddrKind akind;
2307 /* Freed, Mallocd */
2308 Int blksize;
2309 /* Freed, Mallocd */
2310 Int rwoffset;
2311 /* Freed, Mallocd */
2312 ExeContext* lastchange;
2313 ThreadId lasttid;
2314 /* Stack */
2315 ThreadId stack_tid;
2316 /* Segment */
2317 const Char* filename;
2318 const Char* section;
nethercoteca788ff2004-10-20 10:58:09 +00002319 /* True if is just-below the stack pointer -- could be a gcc bug. */
sewardj16748af2002-10-22 04:55:54 +00002320 Bool maybe_gcc;
jsgfcb1d1c02003-10-14 21:55:10 +00002321 /* symbolic address description */
2322 Char *expr;
sewardj16748af2002-10-22 04:55:54 +00002323 }
2324 AddrInfo;
njn25e49d8e72002-09-23 09:36:25 +00002325
sewardj16748af2002-10-22 04:55:54 +00002326/* What kind of memory access is involved in the error? */
2327typedef
2328 enum { ReadAxs, WriteAxs, ExecAxs }
2329 AxsKind;
2330
2331/* Extra context for memory errors */
2332typedef
2333 struct {
2334 AxsKind axskind;
2335 Int size;
2336 AddrInfo addrinfo;
2337 Bool isWrite;
2338 shadow_word prevstate;
sewardjff2c9232002-11-13 21:44:39 +00002339 /* MutexErr, LockGraphErr */
sewardj39a4d842002-11-13 22:14:30 +00002340 Mutex *mutex;
nethercoteca788ff2004-10-20 10:58:09 +00002341 EC_IP lasttouched;
sewardj16748af2002-10-22 04:55:54 +00002342 ThreadId lasttid;
sewardjff2c9232002-11-13 21:44:39 +00002343 /* LockGraphErr */
sewardj4bffb232002-11-13 21:46:34 +00002344 const LockSet *held_lockset;
2345 const LockSet *prev_lockset;
sewardj16748af2002-10-22 04:55:54 +00002346 }
2347 HelgrindError;
2348
2349static __inline__
2350void clear_AddrInfo ( AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002351{
sewardj16748af2002-10-22 04:55:54 +00002352 ai->akind = Unknown;
2353 ai->blksize = 0;
2354 ai->rwoffset = 0;
2355 ai->lastchange = NULL;
2356 ai->lasttid = VG_INVALID_THREADID;
2357 ai->filename = NULL;
2358 ai->section = "???";
2359 ai->stack_tid = VG_INVALID_THREADID;
2360 ai->maybe_gcc = False;
jsgfcb1d1c02003-10-14 21:55:10 +00002361 ai->expr = NULL;
njn25e49d8e72002-09-23 09:36:25 +00002362}
2363
sewardj16748af2002-10-22 04:55:54 +00002364static __inline__
2365void clear_HelgrindError ( HelgrindError* err_extra )
2366{
2367 err_extra->axskind = ReadAxs;
2368 err_extra->size = 0;
2369 err_extra->mutex = NULL;
nethercoteca788ff2004-10-20 10:58:09 +00002370 err_extra->lasttouched= NULL_EC_IP;
sewardj16748af2002-10-22 04:55:54 +00002371 err_extra->lasttid = VG_INVALID_THREADID;
sewardjff2c9232002-11-13 21:44:39 +00002372 err_extra->prev_lockset = 0;
2373 err_extra->held_lockset = 0;
sewardj8fac99a2002-11-13 22:31:26 +00002374 err_extra->prevstate = SW(Vge_Virgin, 0);
sewardj16748af2002-10-22 04:55:54 +00002375 clear_AddrInfo ( &err_extra->addrinfo );
2376 err_extra->isWrite = False;
2377}
2378
2379
2380
2381/* Describe an address as best you can, for error messages,
2382 putting the result in ai. */
2383
thughes4ad52d02004-06-27 17:37:21 +00002384/* Callback for searching malloc'd and free'd lists */
2385static Bool addr_is_in_block(VgHashNode *node, void *ap)
2386{
2387 HG_Chunk* hc2 = (HG_Chunk*)node;
2388 Addr a = *(Addr *)ap;
2389
2390 return (hc2->data <= a && a < hc2->data + hc2->size);
2391}
2392
sewardj16748af2002-10-22 04:55:54 +00002393static void describe_addr ( Addr a, AddrInfo* ai )
2394{
njn3e884182003-04-15 13:03:23 +00002395 HG_Chunk* hc;
sewardjdac0a442002-11-13 22:08:40 +00002396 Int i;
sewardj16748af2002-10-22 04:55:54 +00002397
sewardj16748af2002-10-22 04:55:54 +00002398 /* Search for it in segments */
2399 {
2400 const SegInfo *seg;
2401
2402 for(seg = VG_(next_seginfo)(NULL);
2403 seg != NULL;
2404 seg = VG_(next_seginfo)(seg)) {
2405 Addr base = VG_(seg_start)(seg);
nethercote928a5f72004-11-03 18:10:37 +00002406 SizeT size = VG_(seg_size)(seg);
sewardj16748af2002-10-22 04:55:54 +00002407 const UChar *filename = VG_(seg_filename)(seg);
2408
2409 if (a >= base && a < base+size) {
2410 ai->akind = Segment;
2411 ai->blksize = size;
2412 ai->rwoffset = a - base;
2413 ai->filename = filename;
2414
2415 switch(VG_(seg_sect_kind)(a)) {
2416 case Vg_SectText: ai->section = "text"; break;
2417 case Vg_SectData: ai->section = "data"; break;
2418 case Vg_SectBSS: ai->section = "BSS"; break;
2419 case Vg_SectGOT: ai->section = "GOT"; break;
2420 case Vg_SectPLT: ai->section = "PLT"; break;
2421 case Vg_SectUnknown:
2422 default:
2423 ai->section = "???"; break;
2424 }
2425
2426 return;
2427 }
2428 }
2429 }
2430
2431 /* Search for a currently malloc'd block which might bracket it. */
thughes4ad52d02004-06-27 17:37:21 +00002432 hc = (HG_Chunk*)VG_(HT_first_match)(hg_malloc_list, addr_is_in_block, &a);
njn3e884182003-04-15 13:03:23 +00002433 if (NULL != hc) {
sewardj16748af2002-10-22 04:55:54 +00002434 ai->akind = Mallocd;
njn3e884182003-04-15 13:03:23 +00002435 ai->blksize = hc->size;
2436 ai->rwoffset = (Int)a - (Int)(hc->data);
2437 ai->lastchange = hc->where;
2438 ai->lasttid = hc->tid;
sewardj16748af2002-10-22 04:55:54 +00002439 return;
2440 }
sewardjdac0a442002-11-13 22:08:40 +00002441
2442 /* Look in recently freed memory */
2443 for(i = 0; i < N_FREED_CHUNKS; i++) {
njn3e884182003-04-15 13:03:23 +00002444 hc = freechunks[i];
2445 if (hc == NULL)
sewardjdac0a442002-11-13 22:08:40 +00002446 continue;
2447
njn3e884182003-04-15 13:03:23 +00002448 if (a >= hc->data && a < hc->data + hc->size) {
sewardjdac0a442002-11-13 22:08:40 +00002449 ai->akind = Freed;
njn3e884182003-04-15 13:03:23 +00002450 ai->blksize = hc->size;
2451 ai->rwoffset = a - hc->data;
2452 ai->lastchange = hc->where;
2453 ai->lasttid = hc->tid;
sewardjdac0a442002-11-13 22:08:40 +00002454 return;
2455 }
2456 }
2457
sewardj16748af2002-10-22 04:55:54 +00002458 /* Clueless ... */
2459 ai->akind = Unknown;
2460 return;
2461}
2462
2463
njn7e614812003-04-21 22:04:03 +00002464/* Updates the copy with address info if necessary. */
2465UInt SK_(update_extra)(Error* err)
sewardj16748af2002-10-22 04:55:54 +00002466{
njn7e614812003-04-21 22:04:03 +00002467 HelgrindError* extra;
sewardj16748af2002-10-22 04:55:54 +00002468
njn7e614812003-04-21 22:04:03 +00002469 extra = (HelgrindError*)VG_(get_error_extra)(err);
2470 if (extra != NULL && Undescribed == extra->addrinfo.akind) {
2471 describe_addr ( VG_(get_error_address)(err), &(extra->addrinfo) );
2472 }
2473 return sizeof(HelgrindError);
sewardj16748af2002-10-22 04:55:54 +00002474}
2475
njn72718642003-07-24 08:45:32 +00002476static void record_eraser_error ( ThreadId tid, Addr a, Bool is_write,
sewardj0f811692002-10-22 04:59:26 +00002477 shadow_word prevstate )
sewardj16748af2002-10-22 04:55:54 +00002478{
sewardjc4a810d2002-11-13 22:25:51 +00002479 shadow_word *sw;
sewardj16748af2002-10-22 04:55:54 +00002480 HelgrindError err_extra;
2481
sewardjff2c9232002-11-13 21:44:39 +00002482 n_eraser_warnings++;
2483
sewardj16748af2002-10-22 04:55:54 +00002484 clear_HelgrindError(&err_extra);
2485 err_extra.isWrite = is_write;
2486 err_extra.addrinfo.akind = Undescribed;
2487 err_extra.prevstate = prevstate;
sewardj499e3de2002-11-13 22:22:25 +00002488 if (clo_execontext)
2489 err_extra.lasttouched = getExeContext(a);
jsgfcb1d1c02003-10-14 21:55:10 +00002490 err_extra.addrinfo.expr = VG_(describe_addr)(tid, a);
2491
njn72718642003-07-24 08:45:32 +00002492 VG_(maybe_record_error)( tid, EraserErr, a,
sewardj16748af2002-10-22 04:55:54 +00002493 (is_write ? "writing" : "reading"),
2494 &err_extra);
2495
sewardjc4a810d2002-11-13 22:25:51 +00002496 sw = get_sword_addr(a);
2497 if (sw->state == Vge_Excl && sw->other != TLSP_INDICATING_ALL) {
2498 ThreadLifeSeg *tls = unpackTLS(sw->other);
2499 tls->refcount--;
2500 }
2501
sewardj7f3ad222002-11-13 22:11:53 +00002502 set_sword(a, error_sword);
sewardj16748af2002-10-22 04:55:54 +00002503}
2504
sewardj39a4d842002-11-13 22:14:30 +00002505static void record_mutex_error(ThreadId tid, Mutex *mutex,
sewardj16748af2002-10-22 04:55:54 +00002506 Char *str, ExeContext *ec)
2507{
2508 HelgrindError err_extra;
2509
2510 clear_HelgrindError(&err_extra);
2511 err_extra.addrinfo.akind = Undescribed;
2512 err_extra.mutex = mutex;
sewardjc808ef52002-11-13 22:43:26 +00002513 err_extra.lasttouched = EC(ec, virgin_sword, thread_seg[tid]);
sewardj16748af2002-10-22 04:55:54 +00002514 err_extra.lasttid = tid;
2515
njn72718642003-07-24 08:45:32 +00002516 VG_(maybe_record_error)(tid, MutexErr,
sewardj16748af2002-10-22 04:55:54 +00002517 (Addr)mutex->mutexp, str, &err_extra);
2518}
njn25e49d8e72002-09-23 09:36:25 +00002519
sewardj39a4d842002-11-13 22:14:30 +00002520static void record_lockgraph_error(ThreadId tid, Mutex *mutex,
sewardj4bffb232002-11-13 21:46:34 +00002521 const LockSet *lockset_holding,
2522 const LockSet *lockset_prev)
sewardjff2c9232002-11-13 21:44:39 +00002523{
2524 HelgrindError err_extra;
2525
2526 n_lockorder_warnings++;
2527
2528 clear_HelgrindError(&err_extra);
2529 err_extra.addrinfo.akind = Undescribed;
2530 err_extra.mutex = mutex;
2531
sewardjc808ef52002-11-13 22:43:26 +00002532 err_extra.lasttouched = EC(mutex->location, virgin_sword, 0);
sewardjff2c9232002-11-13 21:44:39 +00002533 err_extra.held_lockset = lockset_holding;
2534 err_extra.prev_lockset = lockset_prev;
2535
njn72718642003-07-24 08:45:32 +00002536 VG_(maybe_record_error)(tid, LockGraphErr, mutex->mutexp, "", &err_extra);
sewardjff2c9232002-11-13 21:44:39 +00002537}
2538
njn810086f2002-11-14 12:42:47 +00002539Bool SK_(eq_SkinError) ( VgRes not_used, Error* e1, Error* e2 )
njn25e49d8e72002-09-23 09:36:25 +00002540{
njn810086f2002-11-14 12:42:47 +00002541 Char *e1s, *e2s;
sewardj16748af2002-10-22 04:55:54 +00002542
njn810086f2002-11-14 12:42:47 +00002543 sk_assert(VG_(get_error_kind)(e1) == VG_(get_error_kind)(e2));
2544
2545 switch (VG_(get_error_kind)(e1)) {
sewardj16748af2002-10-22 04:55:54 +00002546 case EraserErr:
njn810086f2002-11-14 12:42:47 +00002547 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002548
2549 case MutexErr:
njn810086f2002-11-14 12:42:47 +00002550 return VG_(get_error_address)(e1) == VG_(get_error_address)(e2);
sewardj16748af2002-10-22 04:55:54 +00002551 }
2552
njn810086f2002-11-14 12:42:47 +00002553 e1s = VG_(get_error_string)(e1);
2554 e2s = VG_(get_error_string)(e2);
2555 if (e1s != e2s) return False;
2556 if (0 != VG_(strcmp)(e1s, e2s)) return False;
njn25e49d8e72002-09-23 09:36:25 +00002557 return True;
2558}
2559
sewardj16748af2002-10-22 04:55:54 +00002560static void pp_AddrInfo ( Addr a, AddrInfo* ai )
njn25e49d8e72002-09-23 09:36:25 +00002561{
jsgfcb1d1c02003-10-14 21:55:10 +00002562 if (ai->expr != NULL)
2563 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002564 " Address %p == %s", a, ai->expr);
jsgfcb1d1c02003-10-14 21:55:10 +00002565
sewardj16748af2002-10-22 04:55:54 +00002566 switch (ai->akind) {
2567 case Stack:
2568 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002569 " Address %p is on thread %d's stack",
sewardj16748af2002-10-22 04:55:54 +00002570 a, ai->stack_tid);
2571 break;
2572 case Unknown:
jsgfcb1d1c02003-10-14 21:55:10 +00002573 if (ai->expr != NULL)
2574 break;
2575
nethercote3b390c72003-11-13 17:53:43 +00002576 /* maybe_gcc is never set to True! This is a hangover from code
2577 in Memcheck */
sewardj16748af2002-10-22 04:55:54 +00002578 if (ai->maybe_gcc) {
2579 VG_(message)(Vg_UserMsg,
nethercoteca788ff2004-10-20 10:58:09 +00002580 " Address %p is just below the stack pointer. Possibly a bug in GCC/G++",
sewardj16748af2002-10-22 04:55:54 +00002581 a);
2582 VG_(message)(Vg_UserMsg,
2583 " v 2.96 or 3.0.X. To suppress, use: --workaround-gcc296-bugs=yes");
2584 } else {
2585 VG_(message)(Vg_UserMsg,
nethercotef798eee2004-04-13 08:36:35 +00002586 " Address %p is not stack'd, malloc'd or (recently) free'd", a);
sewardj16748af2002-10-22 04:55:54 +00002587 }
2588 break;
2589 case Segment:
2590 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002591 " Address %p is in %s section of %s",
sewardj16748af2002-10-22 04:55:54 +00002592 a, ai->section, ai->filename);
2593 break;
sewardjdac0a442002-11-13 22:08:40 +00002594 case Mallocd:
2595 case Freed: {
sewardj16748af2002-10-22 04:55:54 +00002596 UInt delta;
2597 UChar* relative;
2598 if (ai->rwoffset < 0) {
2599 delta = (UInt)(- ai->rwoffset);
2600 relative = "before";
2601 } else if (ai->rwoffset >= ai->blksize) {
2602 delta = ai->rwoffset - ai->blksize;
2603 relative = "after";
2604 } else {
2605 delta = ai->rwoffset;
2606 relative = "inside";
2607 }
2608 VG_(message)(Vg_UserMsg,
nethercote3b390c72003-11-13 17:53:43 +00002609 " Address %p is %d bytes %s a block of size %d %s by thread %d",
sewardj16748af2002-10-22 04:55:54 +00002610 a, delta, relative,
2611 ai->blksize,
sewardjdac0a442002-11-13 22:08:40 +00002612 ai->akind == Mallocd ? "alloc'd" : "freed",
sewardj16748af2002-10-22 04:55:54 +00002613 ai->lasttid);
sewardj5481f8f2002-10-20 19:43:47 +00002614
sewardj16748af2002-10-22 04:55:54 +00002615 VG_(pp_ExeContext)(ai->lastchange);
2616 break;
2617 }
2618 default:
2619 VG_(skin_panic)("pp_AddrInfo");
2620 }
njn25e49d8e72002-09-23 09:36:25 +00002621}
2622
sewardj4bffb232002-11-13 21:46:34 +00002623static Char *lockset_str(const Char *prefix, const LockSet *lockset)
sewardjff2c9232002-11-13 21:44:39 +00002624{
sewardjff2c9232002-11-13 21:44:39 +00002625 Char *buf, *cp;
sewardj4bffb232002-11-13 21:46:34 +00002626 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002627
sewardj4bffb232002-11-13 21:46:34 +00002628 buf = VG_(malloc)((prefix == NULL ? 0 : VG_(strlen)(prefix)) +
2629 lockset->setsize * 120 +
2630 1);
sewardjff2c9232002-11-13 21:44:39 +00002631
2632 cp = buf;
2633 if (prefix)
2634 cp += VG_(sprintf)(cp, "%s", prefix);
2635
sewardj4bffb232002-11-13 21:46:34 +00002636 for(i = 0; i < lockset->setsize; i++)
2637 cp += VG_(sprintf)(cp, "%p%(y, ", lockset->mutex[i]->mutexp,
2638 lockset->mutex[i]->mutexp);
sewardjff2c9232002-11-13 21:44:39 +00002639
sewardj4bffb232002-11-13 21:46:34 +00002640 if (lockset->setsize)
sewardjff2c9232002-11-13 21:44:39 +00002641 cp[-2] = '\0';
2642 else
2643 *cp = '\0';
2644
2645 return buf;
2646}
njn25e49d8e72002-09-23 09:36:25 +00002647
njn43c799e2003-04-08 00:08:52 +00002648void SK_(pp_SkinError) ( Error* err )
njn25e49d8e72002-09-23 09:36:25 +00002649{
njn810086f2002-11-14 12:42:47 +00002650 HelgrindError *extra = (HelgrindError *)VG_(get_error_extra)(err);
sewardj16748af2002-10-22 04:55:54 +00002651 Char buf[100];
2652 Char *msg = buf;
sewardj4bffb232002-11-13 21:46:34 +00002653 const LockSet *ls;
sewardj16748af2002-10-22 04:55:54 +00002654
2655 *msg = '\0';
2656
njn810086f2002-11-14 12:42:47 +00002657 switch(VG_(get_error_kind)(err)) {
2658 case EraserErr: {
2659 Addr err_addr = VG_(get_error_address)(err);
2660
sewardj16748af2002-10-22 04:55:54 +00002661 VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y",
njn810086f2002-11-14 12:42:47 +00002662 VG_(get_error_string)(err), err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002663 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
njn810086f2002-11-14 12:42:47 +00002664 pp_AddrInfo(err_addr, &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002665
2666 switch(extra->prevstate.state) {
2667 case Vge_Virgin:
2668 /* shouldn't be possible to go directly from virgin -> error */
2669 VG_(sprintf)(buf, "virgin!?");
2670 break;
2671
sewardjc4a810d2002-11-13 22:25:51 +00002672 case Vge_Excl: {
2673 ThreadLifeSeg *tls = unpackTLS(extra->prevstate.other);
2674
2675 sk_assert(tls != unpackTLS(TLSP_INDICATING_ALL));
2676 VG_(sprintf)(buf, "exclusively owned by thread %u", tls->tid);
sewardj16748af2002-10-22 04:55:54 +00002677 break;
sewardjc4a810d2002-11-13 22:25:51 +00002678 }
sewardj16748af2002-10-22 04:55:54 +00002679
2680 case Vge_Shar:
sewardjff2c9232002-11-13 21:44:39 +00002681 case Vge_SharMod:
sewardj8fac99a2002-11-13 22:31:26 +00002682 ls = unpackLockSet(extra->prevstate.other);
sewardj4bffb232002-11-13 21:46:34 +00002683
2684 if (isempty(ls)) {
sewardj16748af2002-10-22 04:55:54 +00002685 VG_(sprintf)(buf, "shared %s, no locks",
2686 extra->prevstate.state == Vge_Shar ? "RO" : "RW");
2687 break;
2688 }
2689
sewardjff2c9232002-11-13 21:44:39 +00002690 msg = lockset_str(extra->prevstate.state == Vge_Shar ?
2691 "shared RO, locked by:" :
sewardj4bffb232002-11-13 21:46:34 +00002692 "shared RW, locked by:", ls);
sewardj16748af2002-10-22 04:55:54 +00002693
sewardj16748af2002-10-22 04:55:54 +00002694 break;
2695 }
sewardj16748af2002-10-22 04:55:54 +00002696
sewardj499e3de2002-11-13 22:22:25 +00002697 if (*msg)
nethercote3b390c72003-11-13 17:53:43 +00002698 VG_(message)(Vg_UserMsg, " Previous state: %s", msg);
sewardj499e3de2002-11-13 22:22:25 +00002699
sewardj72baa7a2002-12-09 23:32:58 +00002700 if (clo_execontext == EC_Some
nethercoteca788ff2004-10-20 10:58:09 +00002701 && extra->lasttouched.uu_ec_ip.ip != 0) {
sewardj499e3de2002-11-13 22:22:25 +00002702 Char file[100];
2703 UInt line;
nethercoteca788ff2004-10-20 10:58:09 +00002704 Addr ip = extra->lasttouched.uu_ec_ip.ip;
sewardj499e3de2002-11-13 22:22:25 +00002705
nethercote3b390c72003-11-13 17:53:43 +00002706 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s by thread %u",
njn810086f2002-11-14 12:42:47 +00002707 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002708 pp_state(extra->lasttouched.state),
2709 unpackTLS(extra->lasttouched.tls)->tid);
sewardj499e3de2002-11-13 22:22:25 +00002710
nethercoteca788ff2004-10-20 10:58:09 +00002711 if (VG_(get_filename_linenum)(ip, file, sizeof(file), &line)) {
sewardj499e3de2002-11-13 22:22:25 +00002712 VG_(message)(Vg_UserMsg, " at %p: %y (%s:%u)",
nethercoteca788ff2004-10-20 10:58:09 +00002713 ip, ip, file, line);
2714 } else if (VG_(get_objname)(ip, file, sizeof(file))) {
sewardj499e3de2002-11-13 22:22:25 +00002715 VG_(message)(Vg_UserMsg, " at %p: %y (in %s)",
nethercoteca788ff2004-10-20 10:58:09 +00002716 ip, ip, file);
sewardj499e3de2002-11-13 22:22:25 +00002717 } else {
nethercoteca788ff2004-10-20 10:58:09 +00002718 VG_(message)(Vg_UserMsg, " at %p: %y", ip, ip);
sewardj499e3de2002-11-13 22:22:25 +00002719 }
sewardj72baa7a2002-12-09 23:32:58 +00002720 } else if (clo_execontext == EC_All
nethercoteca788ff2004-10-20 10:58:09 +00002721 && extra->lasttouched.uu_ec_ip.ec != NULL) {
nethercote3b390c72003-11-13 17:53:43 +00002722 VG_(message)(Vg_UserMsg, " Word at %p last changed state from %s in tid %u",
njn810086f2002-11-14 12:42:47 +00002723 err_addr,
sewardjc808ef52002-11-13 22:43:26 +00002724 pp_state(extra->lasttouched.state),
2725 unpackTLS(extra->lasttouched.tls)->tid);
nethercoteca788ff2004-10-20 10:58:09 +00002726 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_ip.ec);
sewardj499e3de2002-11-13 22:22:25 +00002727 }
sewardj16748af2002-10-22 04:55:54 +00002728 break;
njn810086f2002-11-14 12:42:47 +00002729 }
sewardj16748af2002-10-22 04:55:54 +00002730
2731 case MutexErr:
sewardj499e3de2002-11-13 22:22:25 +00002732 VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s",
njn810086f2002-11-14 12:42:47 +00002733 VG_(get_error_address)(err),
2734 VG_(get_error_address)(err),
2735 VG_(get_error_string)(err));
njn43c799e2003-04-08 00:08:52 +00002736 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
nethercoteca788ff2004-10-20 10:58:09 +00002737 if (extra->lasttouched.uu_ec_ip.ec != NULL) {
nethercote3b390c72003-11-13 17:53:43 +00002738 VG_(message)(Vg_UserMsg, " last touched by thread %d", extra->lasttid);
nethercoteca788ff2004-10-20 10:58:09 +00002739 VG_(pp_ExeContext)(extra->lasttouched.uu_ec_ip.ec);
sewardj16748af2002-10-22 04:55:54 +00002740 }
njn810086f2002-11-14 12:42:47 +00002741 pp_AddrInfo(VG_(get_error_address)(err), &extra->addrinfo);
sewardj16748af2002-10-22 04:55:54 +00002742 break;
sewardjff2c9232002-11-13 21:44:39 +00002743
2744 case LockGraphErr: {
sewardj4bffb232002-11-13 21:46:34 +00002745 const LockSet *heldset = extra->held_lockset;
njn810086f2002-11-14 12:42:47 +00002746 Addr err_addr = VG_(get_error_address)(err);
sewardj4bffb232002-11-13 21:46:34 +00002747 Int i;
sewardjff2c9232002-11-13 21:44:39 +00002748
2749 msg = lockset_str(NULL, heldset);
2750
2751 VG_(message)(Vg_UserMsg, "Mutex %p%(y locked in inconsistent order",
njn810086f2002-11-14 12:42:47 +00002752 err_addr, err_addr);
njn43c799e2003-04-08 00:08:52 +00002753 VG_(pp_ExeContext)( VG_(get_error_where)(err) );
sewardjff2c9232002-11-13 21:44:39 +00002754 VG_(message)(Vg_UserMsg, " while holding locks %s", msg);
2755
sewardj4bffb232002-11-13 21:46:34 +00002756 for(i = 0; i < heldset->setsize; i++) {
sewardj39a4d842002-11-13 22:14:30 +00002757 const Mutex *lsmx = heldset->mutex[i];
sewardjff2c9232002-11-13 21:44:39 +00002758
sewardj542494b2002-11-13 22:46:13 +00002759 /* needs to be a recursive search+display */
2760 if (0 && !ismember(lsmx->lockdep, extra->mutex))
sewardjff2c9232002-11-13 21:44:39 +00002761 continue;
2762
nethercote3b390c72003-11-13 17:53:43 +00002763 VG_(message)(Vg_UserMsg, " %p%(y last locked at",
sewardjff2c9232002-11-13 21:44:39 +00002764 lsmx->mutexp, lsmx->mutexp);
2765 VG_(pp_ExeContext)(lsmx->location);
2766 VG_(free)(msg);
sewardj4bffb232002-11-13 21:46:34 +00002767 msg = lockset_str(NULL, lsmx->lockdep);
nethercote3b390c72003-11-13 17:53:43 +00002768 VG_(message)(Vg_UserMsg, " while depending on locks %s", msg);
sewardjff2c9232002-11-13 21:44:39 +00002769 }
2770
2771 break;
sewardj16748af2002-10-22 04:55:54 +00002772 }
sewardjff2c9232002-11-13 21:44:39 +00002773 }
2774
2775 if (msg != buf)
2776 VG_(free)(msg);
njn25e49d8e72002-09-23 09:36:25 +00002777}
2778
2779
njn810086f2002-11-14 12:42:47 +00002780Bool SK_(recognised_suppression) ( Char* name, Supp *su )
njn25e49d8e72002-09-23 09:36:25 +00002781{
2782 if (0 == VG_(strcmp)(name, "Eraser")) {
njn810086f2002-11-14 12:42:47 +00002783 VG_(set_supp_kind)(su, EraserSupp);
njn25e49d8e72002-09-23 09:36:25 +00002784 return True;
2785 } else {
2786 return False;
2787 }
2788}
2789
2790
njn810086f2002-11-14 12:42:47 +00002791Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, Int nBuf, Supp* su )
njn25e49d8e72002-09-23 09:36:25 +00002792{
2793 /* do nothing -- no extra suppression info present. Return True to
2794 indicate nothing bad happened. */
2795 return True;
2796}
2797
2798
njn810086f2002-11-14 12:42:47 +00002799Bool SK_(error_matches_suppression)(Error* err, Supp* su)
njn25e49d8e72002-09-23 09:36:25 +00002800{
nethercote64366b42003-12-01 13:11:47 +00002801 sk_assert(VG_(get_supp_kind)(su) == EraserSupp);
2802
2803 return (VG_(get_error_kind)(err) == EraserErr);
njn25e49d8e72002-09-23 09:36:25 +00002804}
2805
njn43c799e2003-04-08 00:08:52 +00002806extern Char* SK_(get_error_name) ( Error* err )
2807{
2808 if (EraserErr == VG_(get_error_kind)(err)) {
2809 return "Eraser";
2810 } else {
2811 return NULL; /* Other errors types can't be suppressed */
2812 }
2813}
2814
2815extern void SK_(print_extra_suppression_info) ( Error* err )
2816{
2817 /* Do nothing */
2818}
njn25e49d8e72002-09-23 09:36:25 +00002819
sewardjdca84112002-11-13 22:29:34 +00002820static void eraser_pre_mutex_lock(ThreadId tid, void* void_mutex)
2821{
2822 Mutex *mutex = get_mutex((Addr)void_mutex);
2823
njn72718642003-07-24 08:45:32 +00002824 test_mutex_state(mutex, MxLocked, tid);
sewardjdca84112002-11-13 22:29:34 +00002825}
2826
njn25e49d8e72002-09-23 09:36:25 +00002827static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex)
2828{
sewardj4bffb232002-11-13 21:46:34 +00002829 static const Bool debug = False;
sewardj39a4d842002-11-13 22:14:30 +00002830 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002831 const LockSet* ls;
2832
njn72718642003-07-24 08:45:32 +00002833 set_mutex_state(mutex, MxLocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002834
njn25e49d8e72002-09-23 09:36:25 +00002835# if DEBUG_LOCKS
sewardjdac0a442002-11-13 22:08:40 +00002836 VG_(printf)("lock (%u, %p)\n", tid, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002837# endif
2838
njn25e49d8e72002-09-23 09:36:25 +00002839 /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */
2840# if LOCKSET_SANITY > 1
2841 sanity_check_locksets("eraser_post_mutex_lock-IN");
2842# endif
2843
sewardj4bffb232002-11-13 21:46:34 +00002844 ls = lookup_LockSet_with(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002845
sewardj4bffb232002-11-13 21:46:34 +00002846 if (ls == NULL) {
2847 LockSet *newset = add_LockSet(thread_locks[tid], mutex);
2848 insert_LockSet(newset);
2849 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002850 }
sewardj4bffb232002-11-13 21:46:34 +00002851 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002852
sewardj4bffb232002-11-13 21:46:34 +00002853 if (debug || DEBUG_LOCKS)
2854 VG_(printf)("tid %u now has lockset %p\n", tid, ls);
njn25e49d8e72002-09-23 09:36:25 +00002855
sewardj4bffb232002-11-13 21:46:34 +00002856 if (debug || LOCKSET_SANITY > 1)
2857 sanity_check_locksets("eraser_post_mutex_lock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002858}
2859
2860
2861static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex)
2862{
sewardjc26cc252002-10-23 21:58:55 +00002863 static const Bool debug = False;
njn25e49d8e72002-09-23 09:36:25 +00002864 Int i = 0;
sewardj39a4d842002-11-13 22:14:30 +00002865 Mutex *mutex = get_mutex((Addr)void_mutex);
sewardj4bffb232002-11-13 21:46:34 +00002866 const LockSet *ls;
2867
njn72718642003-07-24 08:45:32 +00002868 test_mutex_state(mutex, MxUnlocked, tid);
2869 set_mutex_state(mutex, MxUnlocked, tid);
sewardj16748af2002-10-22 04:55:54 +00002870
sewardjdac0a442002-11-13 22:08:40 +00002871 if (!ismember(thread_locks[tid], mutex))
2872 return;
2873
sewardjc26cc252002-10-23 21:58:55 +00002874 if (debug || DEBUG_LOCKS)
2875 VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp);
njn25e49d8e72002-09-23 09:36:25 +00002876
sewardjc26cc252002-10-23 21:58:55 +00002877 if (debug || LOCKSET_SANITY > 1)
2878 sanity_check_locksets("eraser_post_mutex_unlock-IN");
njn25e49d8e72002-09-23 09:36:25 +00002879
sewardj4bffb232002-11-13 21:46:34 +00002880 ls = lookup_LockSet_without(thread_locks[tid], mutex);
njn25e49d8e72002-09-23 09:36:25 +00002881
sewardj4bffb232002-11-13 21:46:34 +00002882 if (ls == NULL) {
2883 LockSet *newset = remove_LockSet(thread_locks[tid], mutex);
2884 insert_LockSet(newset);
2885 ls = newset;
njn25e49d8e72002-09-23 09:36:25 +00002886 }
2887
2888 /* Update the thread's lock vector */
sewardjc26cc252002-10-23 21:58:55 +00002889 if (debug || DEBUG_LOCKS)
sewardj4bffb232002-11-13 21:46:34 +00002890 VG_(printf)("tid %u reverts from %p to lockset %p\n",
sewardjc26cc252002-10-23 21:58:55 +00002891 tid, thread_locks[tid], i);
njn25e49d8e72002-09-23 09:36:25 +00002892
sewardj4bffb232002-11-13 21:46:34 +00002893 thread_locks[tid] = ls;
njn25e49d8e72002-09-23 09:36:25 +00002894
sewardjc26cc252002-10-23 21:58:55 +00002895 if (debug || LOCKSET_SANITY > 1)
2896 sanity_check_locksets("eraser_post_mutex_unlock-OUT");
njn25e49d8e72002-09-23 09:36:25 +00002897}
2898
2899
2900/* ---------------------------------------------------------------------
2901 Checking memory reads and writes
2902 ------------------------------------------------------------------ */
2903
2904/* Behaviour on reads and writes:
2905 *
2906 * VIR EXCL SHAR SH_MOD
2907 * ----------------------------------------------------------------
2908 * rd/wr, 1st thread | - EXCL - -
2909 * rd, new thread | - SHAR - -
2910 * wr, new thread | - SH_MOD - -
2911 * rd | error! - SHAR SH_MOD
2912 * wr | EXCL - SH_MOD SH_MOD
2913 * ----------------------------------------------------------------
2914 */
2915
sewardj8fac99a2002-11-13 22:31:26 +00002916static inline
njn25e49d8e72002-09-23 09:36:25 +00002917void dump_around_a(Addr a)
2918{
2919 UInt i;
2920 shadow_word* sword;
2921 VG_(printf)("NEARBY:\n");
2922 for (i = a - 12; i <= a + 12; i += 4) {
2923 sword = get_sword_addr(i);
2924 VG_(printf)(" %x -- tid: %u, state: %u\n", i, sword->other, sword->state);
2925 }
2926}
njn25e49d8e72002-09-23 09:36:25 +00002927
2928#if DEBUG_ACCESSES
2929 #define DEBUG_STATE(args...) \
2930 VG_(printf)("(%u) ", size), \
2931 VG_(printf)(args)
2932#else
2933 #define DEBUG_STATE(args...)
2934#endif
2935
njn72718642003-07-24 08:45:32 +00002936static void eraser_mem_read_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00002937{
sewardj72baa7a2002-12-09 23:32:58 +00002938 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00002939 shadow_word prevstate;
2940 ThreadLifeSeg *tls;
2941 const LockSet *ls;
2942 Bool statechange = False;
2943
2944 static const void *const states[4] = {
2945 [Vge_Virgin] &&st_virgin,
2946 [Vge_Excl] &&st_excl,
2947 [Vge_Shar] &&st_shar,
2948 [Vge_SharMod] &&st_sharmod,
2949 };
2950
2951 tls = thread_seg[tid];
2952 sk_assert(tls != NULL && tls->tid == tid);
2953
2954 sword = get_sword_addr(a);
2955 if (sword == SEC_MAP_ACCESS) {
2956 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
2957 return;
2958 }
2959
2960 prevstate = *sword;
2961
2962 goto *states[sword->state];
2963
2964 /* This looks like reading of unitialised memory, may be legit. Eg.
2965 * calloc() zeroes its values, so untouched memory may actually be
2966 * initialised. Leave that stuff to Valgrind. */
2967 st_virgin:
2968 if (TID_INDICATING_NONVIRGIN == sword->other) {
2969 DEBUG_STATE("Read VIRGIN --> EXCL: %8x, %u\n", a, tid);
2970 if (DEBUG_VIRGIN_READS)
2971 dump_around_a(a);
2972 } else {
2973 DEBUG_STATE("Read SPECIAL --> EXCL: %8x, %u\n", a, tid);
2974 }
2975 statechange = True;
2976 *sword = SW(Vge_Excl, packTLS(tls)); /* remember exclusive owner */
2977 tls->refcount++;
2978 goto done;
2979
2980 st_excl: {
2981 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
2982
2983 if (tls == sw_tls) {
2984 DEBUG_STATE("Read EXCL: %8x, %u\n", a, tid);
2985 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
2986 DEBUG_STATE("Read EXCL/ERR: %8x, %u\n", a, tid);
2987 } else if (tlsIsDisjoint(tls, sw_tls)) {
2988 DEBUG_STATE("Read EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
2989 statechange = True;
2990 sword->other = packTLS(tls);
2991 sw_tls->refcount--;
2992 tls->refcount++;
2993 } else {
2994 DEBUG_STATE("Read EXCL(%u) --> SHAR: %8x, %u\n", sw_tls->tid, a, tid);
2995 sw_tls->refcount--;
2996 statechange = True;
2997 *sword = SW(Vge_Shar, packLockSet(thread_locks[tid]));
2998
2999 if (DEBUG_MEM_LOCKSET_CHANGES)
3000 print_LockSet("excl read locks", unpackLockSet(sword->other));
3001 }
3002 goto done;
3003 }
3004
3005 st_shar:
3006 DEBUG_STATE("Read SHAR: %8x, %u\n", a, tid);
3007 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3008 thread_locks[tid]));
3009 statechange = sword->other != prevstate.other;
3010 goto done;
3011
3012 st_sharmod:
3013 DEBUG_STATE("Read SHAR_MOD: %8x, %u\n", a, tid);
3014 ls = intersect(unpackLockSet(sword->other),
3015 thread_locks[tid]);
3016 sword->other = packLockSet(ls);
3017
3018 statechange = sword->other != prevstate.other;
3019
3020 if (isempty(ls)) {
njn72718642003-07-24 08:45:32 +00003021 record_eraser_error(tid, a, False /* !is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00003022 }
3023 goto done;
3024
3025 done:
3026 if (clo_execontext != EC_None && statechange) {
nethercoteca788ff2004-10-20 10:58:09 +00003027 EC_IP ecip;
sewardj18cd4a52002-11-13 22:37:41 +00003028
3029 if (clo_execontext == EC_Some)
nethercoteca788ff2004-10-20 10:58:09 +00003030 ecip = IP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003031 else
nethercoteca788ff2004-10-20 10:58:09 +00003032 ecip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
3033 setExeContext(a, ecip);
sewardj18cd4a52002-11-13 22:37:41 +00003034 }
3035}
njn25e49d8e72002-09-23 09:36:25 +00003036
nethercote451eae92004-11-02 13:06:32 +00003037static void eraser_mem_read(Addr a, SizeT size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00003038{
njn72718642003-07-24 08:45:32 +00003039 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003040
sewardj8fac99a2002-11-13 22:31:26 +00003041 end = ROUNDUP(a+size, 4);
3042 a = ROUNDDN(a, 4);
3043
sewardj18cd4a52002-11-13 22:37:41 +00003044 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00003045 eraser_mem_read_word(a, tid);
sewardj18cd4a52002-11-13 22:37:41 +00003046}
3047
njn72718642003-07-24 08:45:32 +00003048static void eraser_mem_write_word(Addr a, ThreadId tid)
sewardj18cd4a52002-11-13 22:37:41 +00003049{
3050 ThreadLifeSeg *tls;
sewardj72baa7a2002-12-09 23:32:58 +00003051 shadow_word* sword /* egcs-2.91.66 complains uninit */ = NULL;
sewardj18cd4a52002-11-13 22:37:41 +00003052 shadow_word prevstate;
3053 Bool statechange = False;
3054 static const void *const states[4] = {
3055 [Vge_Virgin] &&st_virgin,
3056 [Vge_Excl] &&st_excl,
3057 [Vge_Shar] &&st_shar,
3058 [Vge_SharMod] &&st_sharmod,
3059 };
3060
sewardjc4a810d2002-11-13 22:25:51 +00003061 tls = thread_seg[tid];
3062 sk_assert(tls != NULL && tls->tid == tid);
3063
sewardj18cd4a52002-11-13 22:37:41 +00003064 sword = get_sword_addr(a);
3065 if (sword == SEC_MAP_ACCESS) {
3066 VG_(printf)("read distinguished 2ndary map! 0x%x\n", a);
3067 return;
3068 }
njn25e49d8e72002-09-23 09:36:25 +00003069
sewardj18cd4a52002-11-13 22:37:41 +00003070 prevstate = *sword;
njn25e49d8e72002-09-23 09:36:25 +00003071
sewardj18cd4a52002-11-13 22:37:41 +00003072 goto *states[sword->state];
sewardj16748af2002-10-22 04:55:54 +00003073
sewardj18cd4a52002-11-13 22:37:41 +00003074 st_virgin:
3075 if (TID_INDICATING_NONVIRGIN == sword->other)
3076 DEBUG_STATE("Write VIRGIN --> EXCL: %8x, %u\n", a, tid);
3077 else
3078 DEBUG_STATE("Write SPECIAL --> EXCL: %8x, %u\n", a, tid);
3079 statechange = True;
3080 *sword = SW(Vge_Excl, packTLS(tls));/* remember exclusive owner */
3081 tls->refcount++;
3082 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003083
sewardj18cd4a52002-11-13 22:37:41 +00003084 st_excl: {
3085 ThreadLifeSeg *sw_tls = unpackTLS(sword->other);
3086
3087 if (tls == sw_tls) {
3088 DEBUG_STATE("Write EXCL: %8x, %u\n", a, tid);
3089 goto done;
3090 } else if (unpackTLS(TLSP_INDICATING_ALL) == sw_tls) {
3091 DEBUG_STATE("Write EXCL/ERR: %8x, %u\n", a, tid);
3092 goto done;
3093 } else if (tlsIsDisjoint(tls, sw_tls)) {
3094 DEBUG_STATE("Write EXCL(%u) --> EXCL: %8x, %u\n", sw_tls->tid, a, tid);
3095 sword->other = packTLS(tls);
3096 sw_tls->refcount--;
sewardjc4a810d2002-11-13 22:25:51 +00003097 tls->refcount++;
sewardj8fac99a2002-11-13 22:31:26 +00003098 goto done;
sewardj18cd4a52002-11-13 22:37:41 +00003099 } else {
3100 DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sw_tls->tid, a, tid);
3101 statechange = True;
3102 sw_tls->refcount--;
3103 *sword = SW(Vge_SharMod, packLockSet(thread_locks[tid]));
3104 if(DEBUG_MEM_LOCKSET_CHANGES)
3105 print_LockSet("excl write locks", unpackLockSet(sword->other));
3106 goto SHARED_MODIFIED;
sewardjc4a810d2002-11-13 22:25:51 +00003107 }
sewardj18cd4a52002-11-13 22:37:41 +00003108 }
njn25e49d8e72002-09-23 09:36:25 +00003109
sewardj18cd4a52002-11-13 22:37:41 +00003110 st_shar:
3111 DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid);
3112 sword->state = Vge_SharMod;
3113 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3114 thread_locks[tid]));
3115 statechange = True;
3116 goto SHARED_MODIFIED;
njn25e49d8e72002-09-23 09:36:25 +00003117
sewardj18cd4a52002-11-13 22:37:41 +00003118 st_sharmod:
3119 DEBUG_STATE("Write SHAR_MOD: %8x, %u\n", a, tid);
3120 sword->other = packLockSet(intersect(unpackLockSet(sword->other),
3121 thread_locks[tid]));
3122 statechange = sword->other != prevstate.other;
njn25e49d8e72002-09-23 09:36:25 +00003123
sewardj18cd4a52002-11-13 22:37:41 +00003124 SHARED_MODIFIED:
3125 if (isempty(unpackLockSet(sword->other))) {
njn72718642003-07-24 08:45:32 +00003126 record_eraser_error(tid, a, True /* is_write */, prevstate);
sewardj18cd4a52002-11-13 22:37:41 +00003127 }
3128 goto done;
njn25e49d8e72002-09-23 09:36:25 +00003129
sewardj18cd4a52002-11-13 22:37:41 +00003130 done:
3131 if (clo_execontext != EC_None && statechange) {
nethercoteca788ff2004-10-20 10:58:09 +00003132 EC_IP ecip;
sewardj499e3de2002-11-13 22:22:25 +00003133
sewardj18cd4a52002-11-13 22:37:41 +00003134 if (clo_execontext == EC_Some)
nethercoteca788ff2004-10-20 10:58:09 +00003135 ecip = IP(VG_(get_EIP)(tid), prevstate, tls);
sewardj18cd4a52002-11-13 22:37:41 +00003136 else
nethercoteca788ff2004-10-20 10:58:09 +00003137 ecip = EC(VG_(get_ExeContext)(tid), prevstate, tls);
3138 setExeContext(a, ecip);
njn25e49d8e72002-09-23 09:36:25 +00003139 }
3140}
3141
nethercote451eae92004-11-02 13:06:32 +00003142static void eraser_mem_write(Addr a, SizeT size, ThreadId tid)
njn25e49d8e72002-09-23 09:36:25 +00003143{
sewardj8fac99a2002-11-13 22:31:26 +00003144 Addr end;
njn25e49d8e72002-09-23 09:36:25 +00003145
sewardj8fac99a2002-11-13 22:31:26 +00003146 end = ROUNDUP(a+size, 4);
3147 a = ROUNDDN(a, 4);
3148
sewardj18cd4a52002-11-13 22:37:41 +00003149 for ( ; a < end; a += 4)
njn72718642003-07-24 08:45:32 +00003150 eraser_mem_write_word(a, tid);
njn25e49d8e72002-09-23 09:36:25 +00003151}
3152
3153#undef DEBUG_STATE
3154
nethercote31212bc2004-02-29 15:50:04 +00003155REGPARM(1) static void eraser_mem_help_read_1(Addr a)
sewardj7ab2aca2002-10-20 19:40:32 +00003156{
njn72718642003-07-24 08:45:32 +00003157 eraser_mem_read(a, 1, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003158}
3159
nethercote31212bc2004-02-29 15:50:04 +00003160REGPARM(1) static void eraser_mem_help_read_2(Addr a)
sewardja5b3aec2002-10-22 05:09:36 +00003161{
njn72718642003-07-24 08:45:32 +00003162 eraser_mem_read(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003163}
3164
nethercote31212bc2004-02-29 15:50:04 +00003165REGPARM(1) static void eraser_mem_help_read_4(Addr a)
sewardja5b3aec2002-10-22 05:09:36 +00003166{
njn72718642003-07-24 08:45:32 +00003167 eraser_mem_read(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003168}
3169
nethercote451eae92004-11-02 13:06:32 +00003170REGPARM(2) static void eraser_mem_help_read_N(Addr a, SizeT size)
sewardja5b3aec2002-10-22 05:09:36 +00003171{
njn72718642003-07-24 08:45:32 +00003172 eraser_mem_read(a, size, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003173}
3174
nethercote31212bc2004-02-29 15:50:04 +00003175REGPARM(2) static void eraser_mem_help_write_1(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003176{
3177 if (*(UChar *)a != val)
njn72718642003-07-24 08:45:32 +00003178 eraser_mem_write(a, 1, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003179}
nethercote31212bc2004-02-29 15:50:04 +00003180REGPARM(2) static void eraser_mem_help_write_2(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003181{
3182 if (*(UShort *)a != val)
njn72718642003-07-24 08:45:32 +00003183 eraser_mem_write(a, 2, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003184}
nethercote31212bc2004-02-29 15:50:04 +00003185REGPARM(2) static void eraser_mem_help_write_4(Addr a, UInt val)
sewardja5b3aec2002-10-22 05:09:36 +00003186{
3187 if (*(UInt *)a != val)
njn72718642003-07-24 08:45:32 +00003188 eraser_mem_write(a, 4, VG_(get_current_tid)());
sewardja5b3aec2002-10-22 05:09:36 +00003189}
nethercote451eae92004-11-02 13:06:32 +00003190REGPARM(2) static void eraser_mem_help_write_N(Addr a, SizeT size)
sewardj7ab2aca2002-10-20 19:40:32 +00003191{
njn72718642003-07-24 08:45:32 +00003192 eraser_mem_write(a, size, VG_(get_current_tid)());
sewardj7ab2aca2002-10-20 19:40:32 +00003193}
njn25e49d8e72002-09-23 09:36:25 +00003194
sewardjc4a810d2002-11-13 22:25:51 +00003195static void hg_thread_create(ThreadId parent, ThreadId child)
3196{
3197 if (0)
3198 VG_(printf)("CREATE: %u creating %u\n", parent, child);
3199
3200 newTLS(child);
3201 addPriorTLS(child, parent);
3202
3203 newTLS(parent);
3204}
3205
3206static void hg_thread_join(ThreadId joiner, ThreadId joinee)
3207{
3208 if (0)
3209 VG_(printf)("JOIN: %u joining on %u\n", joiner, joinee);
3210
3211 newTLS(joiner);
3212 addPriorTLS(joiner, joinee);
3213
3214 clearTLS(joinee);
3215}
3216
sewardj7a5ebcf2002-11-13 22:42:13 +00003217static Int __BUS_HARDWARE_LOCK__;
3218
3219static void bus_lock(void)
3220{
3221 ThreadId tid = VG_(get_current_tid)();
3222 eraser_pre_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3223 eraser_post_mutex_lock(tid, &__BUS_HARDWARE_LOCK__);
3224}
3225
3226static void bus_unlock(void)
3227{
3228 ThreadId tid = VG_(get_current_tid)();
3229 eraser_post_mutex_unlock(tid, &__BUS_HARDWARE_LOCK__);
3230}
3231
njn25e49d8e72002-09-23 09:36:25 +00003232/*--------------------------------------------------------------------*/
sewardj7f3ad222002-11-13 22:11:53 +00003233/*--- Client requests ---*/
3234/*--------------------------------------------------------------------*/
3235
njn72718642003-07-24 08:45:32 +00003236Bool SK_(handle_client_request)(ThreadId tid, UInt *args, UInt *ret)
sewardj7f3ad222002-11-13 22:11:53 +00003237{
3238 if (!VG_IS_SKIN_USERREQ('H','G',args[0]))
3239 return False;
3240
3241 switch(args[0]) {
3242 case VG_USERREQ__HG_CLEAN_MEMORY:
3243 set_address_range_state(args[1], args[2], Vge_VirginInit);
3244 *ret = 0; /* meaningless */
3245 break;
3246
3247 case VG_USERREQ__HG_KNOWN_RACE:
3248 set_address_range_state(args[1], args[2], Vge_Error);
3249 *ret = 0; /* meaningless */
3250 break;
3251
3252 default:
3253 return False;
3254 }
3255
3256 return True;
3257}
3258
3259
3260/*--------------------------------------------------------------------*/
njn25e49d8e72002-09-23 09:36:25 +00003261/*--- Setup ---*/
3262/*--------------------------------------------------------------------*/
3263
njn810086f2002-11-14 12:42:47 +00003264void SK_(pre_clo_init)(void)
njn25e49d8e72002-09-23 09:36:25 +00003265{
3266 Int i;
sewardj4bffb232002-11-13 21:46:34 +00003267 LockSet *empty;
njn25e49d8e72002-09-23 09:36:25 +00003268
njn810086f2002-11-14 12:42:47 +00003269 VG_(details_name) ("Helgrind");
3270 VG_(details_version) (NULL);
3271 VG_(details_description) ("a data race detector");
3272 VG_(details_copyright_author)(
nethercote08fa9a72004-07-16 17:44:00 +00003273 "Copyright (C) 2002-2004, and GNU GPL'd, by Nicholas Nethercote et al.");
nethercote421281e2003-11-20 16:20:55 +00003274 VG_(details_bug_reports_to) (VG_BUGS_TO);
sewardj78210aa2002-12-01 02:55:46 +00003275 VG_(details_avg_translation_sizeB) ( 115 );
njn25e49d8e72002-09-23 09:36:25 +00003276
njn810086f2002-11-14 12:42:47 +00003277 VG_(needs_core_errors)();
3278 VG_(needs_skin_errors)();
3279 VG_(needs_data_syms)();
njn810086f2002-11-14 12:42:47 +00003280 VG_(needs_client_requests)();
3281 VG_(needs_command_line_options)();
fitzhardinge98abfc72003-12-16 02:05:15 +00003282 VG_(needs_shadow_memory)();
njn25e49d8e72002-09-23 09:36:25 +00003283
fitzhardinge98abfc72003-12-16 02:05:15 +00003284 VG_(init_new_mem_startup) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003285
njn810086f2002-11-14 12:42:47 +00003286 /* stack ones not decided until VG_(post_clo_init)() */
njn25e49d8e72002-09-23 09:36:25 +00003287
fitzhardinge98abfc72003-12-16 02:05:15 +00003288 VG_(init_new_mem_brk) (& make_writable);
3289 VG_(init_new_mem_mmap) (& eraser_new_mem_startup);
njn25e49d8e72002-09-23 09:36:25 +00003290
fitzhardinge98abfc72003-12-16 02:05:15 +00003291 VG_(init_change_mem_mprotect) (& eraser_set_perms);
njn25e49d8e72002-09-23 09:36:25 +00003292
fitzhardinge98abfc72003-12-16 02:05:15 +00003293 VG_(init_ban_mem_stack) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003294
fitzhardinge98abfc72003-12-16 02:05:15 +00003295 VG_(init_die_mem_stack) (NULL);
3296 VG_(init_die_mem_stack_signal) (NULL);
3297 VG_(init_die_mem_brk) (NULL);
3298 VG_(init_die_mem_munmap) (NULL);
njn25e49d8e72002-09-23 09:36:25 +00003299
fitzhardinge98abfc72003-12-16 02:05:15 +00003300 VG_(init_pre_mem_read) (& eraser_pre_mem_read);
3301 VG_(init_pre_mem_read_asciiz) (& eraser_pre_mem_read_asciiz);
3302 VG_(init_pre_mem_write) (& eraser_pre_mem_write);
3303 VG_(init_post_mem_write) (NULL);
njn810086f2002-11-14 12:42:47 +00003304
fitzhardinge98abfc72003-12-16 02:05:15 +00003305 VG_(init_post_thread_create) (& hg_thread_create);
3306 VG_(init_post_thread_join) (& hg_thread_join);
njn810086f2002-11-14 12:42:47 +00003307
fitzhardinge98abfc72003-12-16 02:05:15 +00003308 VG_(init_pre_mutex_lock) (& eraser_pre_mutex_lock);
3309 VG_(init_post_mutex_lock) (& eraser_post_mutex_lock);
3310 VG_(init_post_mutex_unlock) (& eraser_post_mutex_unlock);
sewardjc4a810d2002-11-13 22:25:51 +00003311
sewardja5b3aec2002-10-22 05:09:36 +00003312 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1);
3313 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2);
3314 VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4);
3315 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N);
3316
3317 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1);
3318 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2);
3319 VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4);
3320 VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N);
njnd04b7c62002-10-03 14:05:52 +00003321
sewardj7a5ebcf2002-11-13 22:42:13 +00003322 VG_(register_noncompact_helper)((Addr) & bus_lock);
3323 VG_(register_noncompact_helper)((Addr) & bus_unlock);
3324
sewardj4bffb232002-11-13 21:46:34 +00003325 for(i = 0; i < LOCKSET_HASH_SZ; i++)
3326 lockset_hash[i] = NULL;
3327
3328 empty = alloc_LockSet(0);
3329 insert_LockSet(empty);
3330 emptyset = empty;
3331
sewardjc4a810d2002-11-13 22:25:51 +00003332 /* Init lock table and thread segments */
3333 for (i = 0; i < VG_N_THREADS; i++) {
sewardjdac0a442002-11-13 22:08:40 +00003334 thread_locks[i] = empty;
njn25e49d8e72002-09-23 09:36:25 +00003335
sewardjc4a810d2002-11-13 22:25:51 +00003336 newTLS(i);
3337 }
3338
njn25e49d8e72002-09-23 09:36:25 +00003339 init_shadow_memory();
njn3e884182003-04-15 13:03:23 +00003340 hg_malloc_list = VG_(HT_construct)();
njn25e49d8e72002-09-23 09:36:25 +00003341}
3342
sewardj406270b2002-11-13 22:18:09 +00003343Bool SK_(process_cmd_line_option)(Char* arg)
3344{
nethercote27fec902004-06-16 21:26:32 +00003345 if (VG_CLO_STREQ(arg, "--show-last-access=no"))
3346 clo_execontext = EC_None;
3347 else if (VG_CLO_STREQ(arg, "--show-last-access=some"))
3348 clo_execontext = EC_Some;
3349 else if (VG_CLO_STREQ(arg, "--show-last-access=all"))
3350 clo_execontext = EC_All;
sewardj499e3de2002-11-13 22:22:25 +00003351
nethercote27fec902004-06-16 21:26:32 +00003352 else VG_BOOL_CLO("--private-stacks", clo_priv_stacks)
sewardj499e3de2002-11-13 22:22:25 +00003353
nethercote27fec902004-06-16 21:26:32 +00003354 else
3355 return VG_(replacement_malloc_process_cmd_line_option)(arg);
sewardj499e3de2002-11-13 22:22:25 +00003356
nethercote27fec902004-06-16 21:26:32 +00003357 return True;
sewardj406270b2002-11-13 22:18:09 +00003358}
3359
njn3e884182003-04-15 13:03:23 +00003360void SK_(print_usage)(void)
sewardj406270b2002-11-13 22:18:09 +00003361{
njn3e884182003-04-15 13:03:23 +00003362 VG_(printf)(
sewardje11d6c82002-12-15 02:00:41 +00003363" --private-stacks=yes|no assume thread stacks are used privately [no]\n"
3364" --show-last-access=no|some|all\n"
3365" show location of last word access on error [no]\n"
njn3e884182003-04-15 13:03:23 +00003366 );
3367 VG_(replacement_malloc_print_usage)();
sewardj406270b2002-11-13 22:18:09 +00003368}
3369
njn3e884182003-04-15 13:03:23 +00003370void SK_(print_debug_usage)(void)
3371{
3372 VG_(replacement_malloc_print_debug_usage)();
3373}
njn25e49d8e72002-09-23 09:36:25 +00003374
3375void SK_(post_clo_init)(void)
3376{
nethercote451eae92004-11-02 13:06:32 +00003377 void (*stack_tracker)(Addr a, SizeT len);
njn810086f2002-11-14 12:42:47 +00003378
sewardj499e3de2002-11-13 22:22:25 +00003379 if (clo_execontext) {
3380 execontext_map = VG_(malloc)(sizeof(ExeContextMap *) * 65536);
3381 VG_(memset)(execontext_map, 0, sizeof(ExeContextMap *) * 65536);
3382 }
sewardjf6374322002-11-13 22:35:55 +00003383
njn810086f2002-11-14 12:42:47 +00003384 if (clo_priv_stacks)
3385 stack_tracker = & eraser_new_mem_stack_private;
3386 else
3387 stack_tracker = & eraser_new_mem_stack;
sewardjf6374322002-11-13 22:35:55 +00003388
fitzhardinge98abfc72003-12-16 02:05:15 +00003389 VG_(init_new_mem_stack) (stack_tracker);
3390 VG_(init_new_mem_stack_signal) (stack_tracker);
njn25e49d8e72002-09-23 09:36:25 +00003391}
3392
3393
njn7d9f94d2003-04-22 21:41:40 +00003394void SK_(fini)(Int exitcode)
njn25e49d8e72002-09-23 09:36:25 +00003395{
sewardjdac0a442002-11-13 22:08:40 +00003396 if (DEBUG_LOCK_TABLE) {
sewardj4bffb232002-11-13 21:46:34 +00003397 pp_all_LockSets();
sewardjdac0a442002-11-13 22:08:40 +00003398 pp_all_mutexes();
3399 }
sewardj4bffb232002-11-13 21:46:34 +00003400
3401 if (LOCKSET_SANITY)
3402 sanity_check_locksets("SK_(fini)");
3403
fitzhardinge111c6072004-03-09 02:45:07 +00003404 if (VG_(clo_verbosity) > 0)
3405 VG_(message)(Vg_UserMsg, "%u possible data races found; %u lock order problems",
3406 n_eraser_warnings, n_lockorder_warnings);
sewardjf6374322002-11-13 22:35:55 +00003407
3408 if (0)
3409 VG_(printf)("stk_ld:%u+stk_st:%u = %u nonstk_ld:%u+nonstk_st:%u = %u %u%%\n",
3410 stk_ld, stk_st, stk_ld + stk_st,
3411 nonstk_ld, nonstk_st, nonstk_ld + nonstk_st,
3412 ((stk_ld+stk_st)*100) / (stk_ld + stk_st + nonstk_ld + nonstk_st));
njn25e49d8e72002-09-23 09:36:25 +00003413}
3414
fitzhardinge98abfc72003-12-16 02:05:15 +00003415/* Uses a 1:1 mapping */
3416VG_DETERMINE_INTERFACE_VERSION(SK_(pre_clo_init), 1.0)
3417
njn25e49d8e72002-09-23 09:36:25 +00003418/*--------------------------------------------------------------------*/
njn25cac76cb2002-09-23 11:21:57 +00003419/*--- end hg_main.c ---*/
njn25e49d8e72002-09-23 09:36:25 +00003420/*--------------------------------------------------------------------*/