| njn | c953984 | 2002-10-02 13:26:35 +0000 | [diff] [blame] | 1 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2 | /*--------------------------------------------------------------------*/ | 
| njn | c953984 | 2002-10-02 13:26:35 +0000 | [diff] [blame] | 3 | /*--- Helgrind: checking for data races in threaded programs.      ---*/ | 
| njn25 | cac76cb | 2002-09-23 11:21:57 +0000 | [diff] [blame] | 4 | /*---                                                    hg_main.c ---*/ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 5 | /*--------------------------------------------------------------------*/ | 
|  | 6 |  | 
|  | 7 | /* | 
| njn | c953984 | 2002-10-02 13:26:35 +0000 | [diff] [blame] | 8 | This file is part of Helgrind, a Valgrind skin for detecting | 
|  | 9 | data races in threaded programs. | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 10 |  | 
|  | 11 | Copyright (C) 2000-2002 Nicholas Nethercote | 
|  | 12 | njn25@cam.ac.uk | 
|  | 13 |  | 
|  | 14 | This program is free software; you can redistribute it and/or | 
|  | 15 | modify it under the terms of the GNU General Public License as | 
|  | 16 | published by the Free Software Foundation; either version 2 of the | 
|  | 17 | License, or (at your option) any later version. | 
|  | 18 |  | 
|  | 19 | This program is distributed in the hope that it will be useful, but | 
|  | 20 | WITHOUT ANY WARRANTY; without even the implied warranty of | 
|  | 21 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU | 
|  | 22 | General Public License for more details. | 
|  | 23 |  | 
|  | 24 | You should have received a copy of the GNU General Public License | 
|  | 25 | along with this program; if not, write to the Free Software | 
|  | 26 | Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA | 
|  | 27 | 02111-1307, USA. | 
|  | 28 |  | 
|  | 29 | The GNU General Public License is contained in the file COPYING. | 
|  | 30 | */ | 
|  | 31 |  | 
|  | 32 | #include "vg_skin.h" | 
|  | 33 |  | 
| njn | 27f1a38 | 2002-11-08 15:48:16 +0000 | [diff] [blame^] | 34 | VG_DETERMINE_INTERFACE_VERSION | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 35 |  | 
|  | 36 | static UInt n_eraser_warnings = 0; | 
|  | 37 |  | 
|  | 38 |  | 
|  | 39 | /*------------------------------------------------------------*/ | 
|  | 40 | /*--- Debug guff                                           ---*/ | 
|  | 41 | /*------------------------------------------------------------*/ | 
|  | 42 |  | 
|  | 43 | #define DEBUG_LOCK_TABLE    1   /* Print lock table at end */ | 
|  | 44 |  | 
|  | 45 | #define DEBUG_MAKE_ACCESSES 0   /* Print make_access() calls */ | 
|  | 46 | #define DEBUG_LOCKS         0   /* Print lock()/unlock() calls and locksets */ | 
|  | 47 | #define DEBUG_NEW_LOCKSETS  0   /* Print new locksets when created */ | 
|  | 48 | #define DEBUG_ACCESSES      0   /* Print reads, writes */ | 
|  | 49 | #define DEBUG_MEM_LOCKSET_CHANGES 0 | 
|  | 50 | /* Print when an address's lockset | 
|  | 51 | changes; only useful with | 
|  | 52 | DEBUG_ACCESSES */ | 
|  | 53 |  | 
|  | 54 | #define DEBUG_VIRGIN_READS  0   /* Dump around address on VIRGIN reads */ | 
|  | 55 |  | 
|  | 56 | /* heavyweight LockSet sanity checking: | 
|  | 57 | 0 == never | 
|  | 58 | 1 == after important ops | 
|  | 59 | 2 == As 1 and also after pthread_mutex_* ops (excessively slow) | 
|  | 60 | */ | 
|  | 61 | #define LOCKSET_SANITY 0 | 
|  | 62 |  | 
|  | 63 |  | 
|  | 64 | /*------------------------------------------------------------*/ | 
|  | 65 | /*--- Crude profiling machinery.                           ---*/ | 
|  | 66 | /*------------------------------------------------------------*/ | 
|  | 67 |  | 
|  | 68 | // PPP: work out if I want this | 
|  | 69 |  | 
|  | 70 | #define PROF_EVENT(x) | 
|  | 71 | #if 0 | 
|  | 72 | #ifdef VG_PROFILE_MEMORY | 
|  | 73 |  | 
|  | 74 | #define N_PROF_EVENTS 150 | 
|  | 75 |  | 
|  | 76 | static UInt event_ctr[N_PROF_EVENTS]; | 
|  | 77 |  | 
|  | 78 | void VGE_(done_prof_mem) ( void ) | 
|  | 79 | { | 
|  | 80 | Int i; | 
|  | 81 | for (i = 0; i < N_PROF_EVENTS; i++) { | 
|  | 82 | if ((i % 10) == 0) | 
|  | 83 | VG_(printf)("\n"); | 
|  | 84 | if (event_ctr[i] > 0) | 
|  | 85 | VG_(printf)( "prof mem event %2d: %d\n", i, event_ctr[i] ); | 
|  | 86 | } | 
|  | 87 | VG_(printf)("\n"); | 
|  | 88 | } | 
|  | 89 |  | 
|  | 90 | #define PROF_EVENT(ev)                                  \ | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 91 | do { sk_assert((ev) >= 0 && (ev) < N_PROF_EVENTS);   \ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 92 | event_ctr[ev]++;                                \ | 
|  | 93 | } while (False); | 
|  | 94 |  | 
|  | 95 | #else | 
|  | 96 |  | 
|  | 97 | //static void init_prof_mem ( void ) { } | 
|  | 98 | //       void VG_(done_prof_mem) ( void ) { } | 
|  | 99 |  | 
|  | 100 | #define PROF_EVENT(ev) /* */ | 
|  | 101 |  | 
|  | 102 | #endif /* VG_PROFILE_MEMORY */ | 
|  | 103 |  | 
|  | 104 | /* Event index.  If just the name of the fn is given, this means the | 
|  | 105 | number of calls to the fn.  Otherwise it is the specified event. | 
|  | 106 |  | 
|  | 107 | [PPP: snip event numbers...] | 
|  | 108 | */ | 
|  | 109 | #endif /* 0 */ | 
|  | 110 |  | 
|  | 111 |  | 
|  | 112 | /*------------------------------------------------------------*/ | 
|  | 113 | /*--- Data defns.                                          ---*/ | 
|  | 114 | /*------------------------------------------------------------*/ | 
|  | 115 |  | 
|  | 116 | typedef enum | 
|  | 117 | { Vge_VirginInit, Vge_NonVirginInit, Vge_SegmentInit } | 
|  | 118 | VgeInitStatus; | 
|  | 119 |  | 
|  | 120 | /* Should add up to 32 to fit in one word */ | 
|  | 121 | #define OTHER_BITS      30 | 
|  | 122 | #define STATE_BITS      2 | 
|  | 123 |  | 
|  | 124 | #define ESEC_MAP_WORDS  16384   /* Words per secondary map */ | 
|  | 125 |  | 
|  | 126 | /* This is for indicating that a memory block has been initialised but not | 
|  | 127 | * really directly by a particular thread... (eg. text/data initialised | 
|  | 128 | * automatically at startup). | 
|  | 129 | * Must be different to virgin_word.other */ | 
|  | 130 | #define TID_INDICATING_NONVIRGIN    1 | 
|  | 131 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 132 | /* Magic TID used for error suppression; if word state is Excl and tid | 
|  | 133 | is this, then it means all access are OK without changing state and | 
|  | 134 | without raising any more errors  */ | 
|  | 135 | #define TID_INDICATING_ALL          ((1 << OTHER_BITS) - 1) | 
|  | 136 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 137 | /* Number of entries must fit in STATE_BITS bits */ | 
|  | 138 | typedef enum { Vge_Virgin, Vge_Excl, Vge_Shar, Vge_SharMod } pth_state; | 
|  | 139 |  | 
|  | 140 | typedef | 
|  | 141 | struct { | 
|  | 142 | UInt other:OTHER_BITS; | 
|  | 143 | UInt state:STATE_BITS; | 
|  | 144 | } shadow_word; | 
|  | 145 |  | 
|  | 146 | typedef | 
|  | 147 | struct { | 
|  | 148 | shadow_word swords[ESEC_MAP_WORDS]; | 
|  | 149 | } | 
|  | 150 | ESecMap; | 
|  | 151 |  | 
|  | 152 | static ESecMap* primary_map[ 65536 ]; | 
|  | 153 | static ESecMap  distinguished_secondary_map; | 
|  | 154 |  | 
|  | 155 | static shadow_word virgin_sword = { 0, Vge_Virgin }; | 
|  | 156 |  | 
|  | 157 | #define VGE_IS_DISTINGUISHED_SM(smap) \ | 
|  | 158 | ((smap) == &distinguished_secondary_map) | 
|  | 159 |  | 
|  | 160 | #define ENSURE_MAPPABLE(addr,caller)                                  \ | 
|  | 161 | do {                                                               \ | 
|  | 162 | if (VGE_IS_DISTINGUISHED_SM(primary_map[(addr) >> 16])) {       \ | 
|  | 163 | primary_map[(addr) >> 16] = alloc_secondary_map(caller);     \ | 
|  | 164 | /*VG_(printf)("new 2map because of %p\n", addr);*/           \ | 
|  | 165 | } \ | 
|  | 166 | } while(0) | 
|  | 167 |  | 
|  | 168 |  | 
|  | 169 | /*------------------------------------------------------------*/ | 
|  | 170 | /*--- Low-level support for memory tracking.               ---*/ | 
|  | 171 | /*------------------------------------------------------------*/ | 
|  | 172 |  | 
|  | 173 | /* | 
|  | 174 | All reads and writes are recorded in the memory map, which | 
|  | 175 | records the state of all memory in the process.  The memory map is | 
|  | 176 | organised like that for normal Valgrind, except each that everything | 
|  | 177 | is done at word-level instead of byte-level, and each word has only | 
|  | 178 | one word of shadow (instead of 36 bits). | 
|  | 179 |  | 
|  | 180 | As for normal Valgrind there is a distinguished secondary map.  But we're | 
|  | 181 | working at word-granularity, so it has 16k word entries instead of 64k byte | 
|  | 182 | entries.  Lookup is done as follows: | 
|  | 183 |  | 
|  | 184 | bits 31..16:   primary map lookup | 
|  | 185 | bits 15.. 2:   secondary map lookup | 
|  | 186 | bits  1.. 0:   ignored | 
|  | 187 | */ | 
|  | 188 |  | 
|  | 189 |  | 
|  | 190 | /*------------------------------------------------------------*/ | 
|  | 191 | /*--- Basic bitmap management, reading and writing.        ---*/ | 
|  | 192 | /*------------------------------------------------------------*/ | 
|  | 193 |  | 
|  | 194 | /* Allocate and initialise a secondary map, marking all words as virgin. */ | 
|  | 195 |  | 
|  | 196 | /* Just a value that isn't a real pointer */ | 
|  | 197 | #define SEC_MAP_ACCESS  (shadow_word*)0x99 | 
|  | 198 |  | 
|  | 199 |  | 
|  | 200 | static | 
|  | 201 | ESecMap* alloc_secondary_map ( __attribute__ ((unused)) Char* caller ) | 
|  | 202 | { | 
|  | 203 | ESecMap* map; | 
|  | 204 | UInt  i; | 
|  | 205 | //PROF_EVENT(10); PPP | 
|  | 206 |  | 
|  | 207 | /* It just happens that a SecMap occupies exactly 18 pages -- | 
|  | 208 | although this isn't important, so the following assert is | 
|  | 209 | spurious. (SSS: not true for ESecMaps -- they're 16 pages) */ | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 210 | sk_assert(0 == (sizeof(ESecMap) % VKI_BYTES_PER_PAGE)); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 211 | map = VG_(get_memory_from_mmap)( sizeof(ESecMap), caller ); | 
|  | 212 |  | 
|  | 213 | for (i = 0; i < ESEC_MAP_WORDS; i++) | 
|  | 214 | map->swords[i] = virgin_sword; | 
|  | 215 |  | 
|  | 216 | return map; | 
|  | 217 | } | 
|  | 218 |  | 
|  | 219 |  | 
|  | 220 | /* Set a word.  The byte give by 'a' could be anywhere in the word -- the whole | 
|  | 221 | * word gets set. */ | 
|  | 222 | static __inline__ | 
|  | 223 | void set_sword ( Addr a, shadow_word sword ) | 
|  | 224 | { | 
|  | 225 | ESecMap* sm; | 
|  | 226 |  | 
|  | 227 | //PROF_EVENT(23); PPP | 
|  | 228 | ENSURE_MAPPABLE(a, "VGE_(set_sword)"); | 
|  | 229 |  | 
|  | 230 | /* Use bits 31..16 for primary, 15..2 for secondary lookup */ | 
|  | 231 | sm     = primary_map[a >> 16]; | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 232 | sk_assert(sm != &distinguished_secondary_map); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 233 | sm->swords[(a & 0xFFFC) >> 2] = sword; | 
|  | 234 |  | 
|  | 235 | if (VGE_IS_DISTINGUISHED_SM(sm)) { | 
|  | 236 | VG_(printf)("wrote to distinguished 2ndary map! 0x%x\n", a); | 
|  | 237 | // XXX: may be legit, but I want to know when it happens --njn | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 238 | VG_(skin_panic)("wrote to distinguished 2ndary map!"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 239 | } | 
|  | 240 | } | 
|  | 241 |  | 
|  | 242 |  | 
|  | 243 | static __inline__ | 
|  | 244 | shadow_word* get_sword_addr ( Addr a ) | 
|  | 245 | { | 
|  | 246 | /* Use bits 31..16 for primary, 15..2 for secondary lookup */ | 
|  | 247 | ESecMap* sm     = primary_map[a >> 16]; | 
|  | 248 | UInt    sm_off = (a & 0xFFFC) >> 2; | 
|  | 249 |  | 
|  | 250 | if (VGE_IS_DISTINGUISHED_SM(sm)) { | 
|  | 251 | VG_(printf)("accessed distinguished 2ndary map! 0x%x\n", a); | 
|  | 252 | // XXX: may be legit, but I want to know when it happens --njn | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 253 | //VG_(skin_panic)("accessed distinguished 2ndary map!"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 254 | return SEC_MAP_ACCESS; | 
|  | 255 | } | 
|  | 256 |  | 
|  | 257 | //PROF_EVENT(21); PPP | 
|  | 258 | return & (sm->swords[sm_off]); | 
|  | 259 | } | 
|  | 260 |  | 
|  | 261 |  | 
|  | 262 | // SSS: rename these so they're not so similar to memcheck, unless it's | 
|  | 263 | // appropriate of course | 
|  | 264 |  | 
|  | 265 | static __inline__ | 
|  | 266 | void init_virgin_sword(Addr a) | 
|  | 267 | { | 
|  | 268 | set_sword(a, virgin_sword); | 
|  | 269 | } | 
|  | 270 |  | 
|  | 271 |  | 
|  | 272 | /* 'a' is guaranteed to be 4-byte aligned here (not that that's important, | 
|  | 273 | * really) */ | 
|  | 274 | static | 
|  | 275 | void make_writable_aligned ( Addr a, UInt size ) | 
|  | 276 | { | 
|  | 277 | Addr a_past_end = a + size; | 
|  | 278 |  | 
|  | 279 | //PROF_EVENT(??)  PPP | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 280 | sk_assert(IS_ALIGNED4_ADDR(a)); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 281 |  | 
|  | 282 | for ( ; a < a_past_end; a += 4) { | 
|  | 283 | set_sword(a, virgin_sword); | 
|  | 284 | } | 
|  | 285 | } | 
|  | 286 |  | 
|  | 287 | static __inline__ | 
|  | 288 | void init_nonvirgin_sword(Addr a) | 
|  | 289 | { | 
|  | 290 | shadow_word sword; | 
| sewardj | b52a1b0 | 2002-10-23 21:38:22 +0000 | [diff] [blame] | 291 | ThreadId tid = VG_(get_current_or_recent_tid)(); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 292 |  | 
| sewardj | b52a1b0 | 2002-10-23 21:38:22 +0000 | [diff] [blame] | 293 | sk_assert(tid != VG_INVALID_THREADID); | 
|  | 294 | sword.other = tid; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 295 | sword.state = Vge_Excl; | 
|  | 296 | set_sword(a, sword); | 
|  | 297 | } | 
|  | 298 |  | 
|  | 299 |  | 
|  | 300 | /* In this case, we treat it for Eraser's sake like virgin (it hasn't | 
|  | 301 | * been inited by a particular thread, it's just done automatically upon | 
|  | 302 | * startup), but we mark its .state specially so it doesn't look like an | 
|  | 303 | * uninited read. */ | 
|  | 304 | static __inline__ | 
|  | 305 | void init_magically_inited_sword(Addr a) | 
|  | 306 | { | 
|  | 307 | shadow_word sword; | 
|  | 308 |  | 
| sewardj | b52a1b0 | 2002-10-23 21:38:22 +0000 | [diff] [blame] | 309 | sk_assert(VG_INVALID_THREADID == VG_(get_current_tid)()); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 310 | sword.other = TID_INDICATING_NONVIRGIN; | 
|  | 311 | sword.state = Vge_Virgin; | 
|  | 312 | set_sword(a, virgin_sword); | 
|  | 313 | } | 
|  | 314 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 315 |  | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 316 | /*------------------------------------------------------------*/ | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 317 | /*--- Implementation of lock sets.                         ---*/ | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 318 | /*------------------------------------------------------------*/ | 
|  | 319 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 320 | typedef struct hg_mutex hg_mutex_t; /* forward decl */ | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 321 | typedef enum MutexState { | 
|  | 322 | MxUnknown,			/* don't know */ | 
|  | 323 | MxUnlocked,			/* unlocked */ | 
|  | 324 | MxLocked,			/* locked */ | 
|  | 325 | MxDead			/* destroyed */ | 
|  | 326 | } MutexState; | 
|  | 327 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 328 | struct hg_mutex { | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 329 | void              *mutexp; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 330 | struct hg_mutex   *next; | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 331 |  | 
|  | 332 | MutexState         state;	/* mutex state */ | 
|  | 333 | ThreadId           tid;	/* owner */ | 
|  | 334 | ExeContext	     *location;	/* where the last change happened */ | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 335 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 336 | UInt               lockdep;	/* set of locks we depend on */ | 
|  | 337 | UInt               mark;	/* mark for graph traversal */ | 
|  | 338 | }; | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 339 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 340 | static Int mutex_cmp(const hg_mutex_t *a, const hg_mutex_t *b); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 341 |  | 
| sewardj | 65f1370 | 2002-10-23 22:45:08 +0000 | [diff] [blame] | 342 | #define M_LOCKSET_TABLE 5000 | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 343 |  | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 344 | struct _LockSet { | 
|  | 345 | hg_mutex_t *mutex; | 
|  | 346 | struct _LockSet* next; | 
|  | 347 | }; | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 348 | typedef struct _LockSet LockSet; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 349 |  | 
|  | 350 | /* Each one is an index into the lockset table. */ | 
|  | 351 | static UInt thread_locks[VG_N_THREADS]; | 
|  | 352 |  | 
|  | 353 | /* # lockset table entries used. */ | 
|  | 354 | static Int n_lockset_table = 1; | 
|  | 355 |  | 
|  | 356 | /* lockset_table[0] is always NULL, representing the empty lockset */ | 
|  | 357 | static LockSet* lockset_table[M_LOCKSET_TABLE]; | 
|  | 358 |  | 
|  | 359 |  | 
|  | 360 | static __inline__ | 
|  | 361 | Bool is_valid_lockset_id ( Int id ) | 
|  | 362 | { | 
|  | 363 | return id >= 0 && id < n_lockset_table; | 
|  | 364 | } | 
|  | 365 |  | 
|  | 366 |  | 
|  | 367 | static | 
|  | 368 | Int allocate_LockSet(LockSet* set) | 
|  | 369 | { | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 370 | static const Bool debug = False; | 
|  | 371 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 372 | if (n_lockset_table >= M_LOCKSET_TABLE) | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 373 | VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 374 | lockset_table[n_lockset_table] = set; | 
|  | 375 | n_lockset_table++; | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 376 | if (debug || DEBUG_MEM_LOCKSET_CHANGES || DEBUG_NEW_LOCKSETS) | 
|  | 377 | VG_(printf)("allocate LOCKSET VECTOR %p to %d\n", set, n_lockset_table-1); | 
|  | 378 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 379 | return n_lockset_table-1; | 
|  | 380 | } | 
|  | 381 |  | 
|  | 382 |  | 
|  | 383 | static | 
|  | 384 | void pp_LockSet(LockSet* p) | 
|  | 385 | { | 
|  | 386 | VG_(printf)("{ "); | 
|  | 387 | while (p != NULL) { | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 388 | VG_(printf)("%p%(y ", p->mutex->mutexp, p->mutex->mutexp); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 389 | p = p->next; | 
|  | 390 | } | 
|  | 391 | VG_(printf)("}\n"); | 
|  | 392 | } | 
|  | 393 |  | 
|  | 394 |  | 
|  | 395 | static __attribute__((unused)) | 
|  | 396 | void pp_all_LockSets ( void ) | 
|  | 397 | { | 
|  | 398 | Int i; | 
|  | 399 | for (i = 0; i < n_lockset_table; i++) { | 
|  | 400 | VG_(printf)("[%d] = ", i); | 
|  | 401 | pp_LockSet(lockset_table[i]); | 
|  | 402 | } | 
|  | 403 | } | 
|  | 404 |  | 
|  | 405 |  | 
|  | 406 | static | 
|  | 407 | void free_LockSet(LockSet *p) | 
|  | 408 | { | 
|  | 409 | LockSet* q; | 
|  | 410 | while (NULL != p) { | 
|  | 411 | q = p; | 
|  | 412 | p = p->next; | 
|  | 413 | VG_(free)(q); | 
|  | 414 | #     if DEBUG_MEM_LOCKSET_CHANGES | 
|  | 415 | VG_(printf)("free'd   %x\n", q); | 
|  | 416 | #     endif | 
|  | 417 | } | 
|  | 418 | } | 
|  | 419 |  | 
|  | 420 |  | 
|  | 421 | static | 
|  | 422 | Bool structural_eq_LockSet(LockSet* a, LockSet* b) | 
|  | 423 | { | 
|  | 424 | while (a && b) { | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 425 | if (mutex_cmp(a->mutex, b->mutex) != 0) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 426 | return False; | 
|  | 427 | } | 
|  | 428 | a = a->next; | 
|  | 429 | b = b->next; | 
|  | 430 | } | 
|  | 431 | return (NULL == a && NULL == b); | 
|  | 432 | } | 
|  | 433 |  | 
|  | 434 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 435 | /* Check invariants: | 
|  | 436 | - all locksets are unique | 
|  | 437 | - each set is a linked list in strictly increasing order of mutex addr | 
|  | 438 | */ | 
|  | 439 | static | 
|  | 440 | void sanity_check_locksets ( Char* caller ) | 
|  | 441 | { | 
|  | 442 | Int              i, j, badness; | 
|  | 443 | LockSet*         v; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 444 | hg_mutex_t       mx_prev; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 445 |  | 
|  | 446 | badness = 0; | 
|  | 447 | i = j = -1; | 
|  | 448 |  | 
|  | 449 | //VG_(printf)("sanity %s\n", caller); | 
|  | 450 | /* Check really simple things first */ | 
|  | 451 |  | 
|  | 452 | if (n_lockset_table < 1 || n_lockset_table > M_LOCKSET_TABLE) | 
|  | 453 | { badness = 1; goto baaad; } | 
|  | 454 |  | 
|  | 455 | if (lockset_table[0] != NULL) | 
|  | 456 | { badness = 2; goto baaad; } | 
|  | 457 |  | 
|  | 458 | for (i = 1; i < n_lockset_table; i++) | 
|  | 459 | if (lockset_table[i] == NULL) | 
|  | 460 | { badness = 3; goto baaad; } | 
|  | 461 |  | 
|  | 462 | for (i = n_lockset_table; i < M_LOCKSET_TABLE; i++) | 
|  | 463 | if (lockset_table[i] != NULL) | 
|  | 464 | { badness = 4; goto baaad; } | 
|  | 465 |  | 
|  | 466 | /* Check the sanity of each individual set. */ | 
|  | 467 | for (i = 1; i < n_lockset_table; i++) { | 
|  | 468 | v = lockset_table[i]; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 469 | mx_prev.mutexp = NULL; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 470 | while (True) { | 
|  | 471 | if (v == NULL) break; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 472 | if (mutex_cmp(&mx_prev, v->mutex) >= 0) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 473 | { badness = 5; goto baaad; } | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 474 | mx_prev = *v->mutex; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 475 | v = v->next; | 
|  | 476 | } | 
|  | 477 | } | 
|  | 478 |  | 
|  | 479 | /* Ensure the sets are unique, both structurally and in respect of | 
|  | 480 | the address of their first nodes. */ | 
|  | 481 | for (i = 1; i < n_lockset_table; i++) { | 
|  | 482 | for (j = i+1; j < n_lockset_table; j++) { | 
|  | 483 | if (lockset_table[i] == lockset_table[j]) | 
|  | 484 | { badness = 6; goto baaad; } | 
|  | 485 | if (structural_eq_LockSet(lockset_table[i], lockset_table[j])) | 
|  | 486 | { badness = 7; goto baaad; } | 
|  | 487 | } | 
|  | 488 | } | 
|  | 489 | return; | 
|  | 490 |  | 
|  | 491 | baaad: | 
|  | 492 | VG_(printf)("sanity_check_locksets: " | 
|  | 493 | "i = %d, j = %d, badness = %d, caller = %s\n", | 
|  | 494 | i, j, badness, caller); | 
|  | 495 | pp_all_LockSets(); | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 496 | VG_(skin_panic)("sanity_check_locksets"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 497 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 498 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 499 | static void print_LockSet(const char *s, LockSet *ls) | 
|  | 500 | { | 
|  | 501 | if (!ls) { | 
|  | 502 | VG_(printf)("%s: empty\n", s); | 
|  | 503 | } else { | 
|  | 504 | VG_(printf)("%s: ", s); | 
|  | 505 | for(; ls; ls = ls->next) | 
|  | 506 | VG_(printf)("%p%(y, ", ls->mutex->mutexp, ls->mutex->mutexp); | 
|  | 507 | VG_(printf)("\n"); | 
|  | 508 | } | 
|  | 509 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 510 |  | 
|  | 511 | /* Builds ia with mx removed.  mx should actually be in ia! | 
|  | 512 | (a checked assertion).  Resulting set should not already | 
|  | 513 | exist in the table (unchecked). | 
|  | 514 | */ | 
|  | 515 | static | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 516 | UInt remove ( UInt ia, hg_mutex_t *mx ) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 517 | { | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 518 | static const Bool debug = False; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 519 | Int       found, res; | 
|  | 520 | LockSet*  new_vector = NULL; | 
|  | 521 | LockSet*  new_node; | 
|  | 522 | LockSet** prev_ptr = &new_vector; | 
|  | 523 | LockSet*  a = lockset_table[ia]; | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 524 | sk_assert(is_valid_lockset_id(ia)); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 525 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 526 | if (debug || DEBUG_MEM_LOCKSET_CHANGES) { | 
|  | 527 | VG_(printf)("Removing %p%(y from mutex %p%(y:\n", | 
|  | 528 | a->mutex->mutexp, a->mutex->mutexp, | 
|  | 529 | mx->mutexp, mx->mutexp); | 
|  | 530 | print_LockSet("remove-IN", a); | 
|  | 531 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 532 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 533 | if (debug || LOCKSET_SANITY) | 
|  | 534 | sanity_check_locksets("remove-IN"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 535 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 536 | /* Build the new list */ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 537 | found = 0; | 
|  | 538 | while (a) { | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 539 | if (mutex_cmp(a->mutex, mx) != 0) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 540 | new_node = VG_(malloc)(sizeof(LockSet)); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 541 | new_node->mutex = a->mutex; | 
|  | 542 | *prev_ptr = new_node; | 
|  | 543 | prev_ptr = &((*prev_ptr)->next); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 544 | } else { | 
|  | 545 | found++; | 
|  | 546 | } | 
|  | 547 | *prev_ptr = NULL; | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 548 | a = a->next; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 549 | } | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 550 | sk_assert(found == 1 /* sigh .. if the client is buggy */ || found == 0 ); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 551 |  | 
|  | 552 | /* Preserve uniqueness invariants in face of client buggyness */ | 
|  | 553 | if (found == 0) { | 
|  | 554 | free_LockSet(new_vector); | 
|  | 555 | return ia; | 
|  | 556 | } | 
|  | 557 |  | 
|  | 558 | /* Add to the table. */ | 
|  | 559 | res = allocate_LockSet(new_vector); | 
|  | 560 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 561 | if (debug || LOCKSET_SANITY) { | 
|  | 562 | print_LockSet("remove-OUT", new_vector); | 
|  | 563 | sanity_check_locksets("remove-OUT"); | 
|  | 564 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 565 | return res; | 
|  | 566 | } | 
|  | 567 |  | 
|  | 568 |  | 
|  | 569 | /* Tricky: equivalent to (compare(insert(missing_elem, a), b)), but | 
|  | 570 | * doesn't do the insertion.  Returns True if they match. | 
|  | 571 | */ | 
|  | 572 | static Bool | 
|  | 573 | weird_LockSet_equals(LockSet* a, LockSet* b, | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 574 | hg_mutex_t *missing_mutex) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 575 | { | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 576 | static const Bool debug = False; | 
|  | 577 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 578 | /* Idea is to try and match each element of b against either an | 
|  | 579 | element of a, or missing_mutex. */ | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 580 |  | 
|  | 581 | if (debug) { | 
|  | 582 | print_LockSet("weird_LockSet_equals a", a); | 
|  | 583 | print_LockSet("                     b", b); | 
|  | 584 | VG_(printf)(  "               missing: %p%(y\n", | 
|  | 585 | missing_mutex->mutexp, missing_mutex->mutexp); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 586 | } | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 587 |  | 
|  | 588 | /* There are three phases to this compare: | 
|  | 589 | 1 the section from the start of a up to missing_mutex | 
|  | 590 | 2 missing mutex itself | 
|  | 591 | 3 the section after missing_mutex to the end of a | 
|  | 592 | */ | 
|  | 593 |  | 
|  | 594 | /* 1: up to missing_mutex */ | 
|  | 595 | while(a && mutex_cmp(a->mutex, missing_mutex) < 0) { | 
|  | 596 | if (debug) { | 
|  | 597 | print_LockSet("     1:a", a); | 
|  | 598 | print_LockSet("     1:b", b); | 
|  | 599 | } | 
|  | 600 | if (b == NULL || mutex_cmp(a->mutex, b->mutex) != 0) | 
|  | 601 | return False; | 
|  | 602 |  | 
|  | 603 | a = a->next; | 
|  | 604 | b = b->next; | 
|  | 605 | } | 
|  | 606 |  | 
|  | 607 | /* 2: missing_mutex itself */ | 
|  | 608 | if (debug) { | 
|  | 609 | VG_(printf)(  "     2:missing: %p%(y\n", | 
|  | 610 | missing_mutex->mutexp, missing_mutex->mutexp); | 
|  | 611 | print_LockSet("     2:      b", b); | 
|  | 612 | } | 
|  | 613 |  | 
|  | 614 | sk_assert(a == NULL || mutex_cmp(a->mutex, missing_mutex) >= 0); | 
|  | 615 |  | 
|  | 616 | if (b == NULL || mutex_cmp(missing_mutex, b->mutex) != 0) | 
|  | 617 | return False; | 
|  | 618 |  | 
|  | 619 | b = b->next; | 
|  | 620 |  | 
|  | 621 | /* 3: after missing_mutex to end */ | 
|  | 622 |  | 
|  | 623 | while(a && b) { | 
|  | 624 | if (debug) { | 
|  | 625 | print_LockSet("     3:a", a); | 
|  | 626 | print_LockSet("     3:b", b); | 
|  | 627 | } | 
|  | 628 | if (mutex_cmp(a->mutex, b->mutex) != 0) | 
|  | 629 | return False; | 
|  | 630 | a = a->next; | 
|  | 631 | b = b->next; | 
|  | 632 | } | 
|  | 633 |  | 
|  | 634 | if (debug) | 
|  | 635 | VG_(printf)("  a=%p b=%p --> %d\n", a, b, (a == NULL) && (b == NULL)); | 
|  | 636 |  | 
|  | 637 | return (a == NULL) && (b == NULL); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 638 | } | 
|  | 639 |  | 
|  | 640 |  | 
|  | 641 | /* Builds the intersection, and then unbuilds it if it's already in the table. | 
|  | 642 | */ | 
|  | 643 | static UInt intersect(UInt ia, UInt ib) | 
|  | 644 | { | 
|  | 645 | Int       i; | 
|  | 646 | LockSet*  a = lockset_table[ia]; | 
|  | 647 | LockSet*  b = lockset_table[ib]; | 
|  | 648 | LockSet*  new_vector = NULL; | 
|  | 649 | LockSet*  new_node; | 
|  | 650 | LockSet** prev_ptr = &new_vector; | 
|  | 651 |  | 
|  | 652 | #  if DEBUG_MEM_LOCKSET_CHANGES | 
|  | 653 | VG_(printf)("Intersecting %d %d:\n", ia, ib); | 
|  | 654 | #  endif | 
|  | 655 |  | 
|  | 656 | #  if LOCKSET_SANITY | 
|  | 657 | sanity_check_locksets("intersect-IN"); | 
|  | 658 | #  endif | 
|  | 659 |  | 
|  | 660 | /* Fast case -- when the two are the same */ | 
|  | 661 | if (ia == ib) { | 
|  | 662 | #     if DEBUG_MEM_LOCKSET_CHANGES | 
|  | 663 | VG_(printf)("Fast case -- both the same: %u\n", ia); | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 664 | print_LockSet("intersect-same", a); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 665 | #     endif | 
|  | 666 | return ia; | 
|  | 667 | } | 
|  | 668 |  | 
|  | 669 | #  if DEBUG_MEM_LOCKSET_CHANGES | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 670 | print_LockSet("intersect a", a); | 
|  | 671 | print_LockSet("intersect b", b); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 672 | #  endif | 
|  | 673 |  | 
|  | 674 | /* Build the intersection of the two lists */ | 
|  | 675 | while (a && b) { | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 676 | if (mutex_cmp(a->mutex, b->mutex) == 0) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 677 | new_node = VG_(malloc)(sizeof(LockSet)); | 
|  | 678 | #        if DEBUG_MEM_LOCKSET_CHANGES | 
|  | 679 | VG_(printf)("malloc'd %x\n", new_node); | 
|  | 680 | #        endif | 
|  | 681 | new_node->mutex = a->mutex; | 
|  | 682 | *prev_ptr = new_node; | 
|  | 683 | prev_ptr = &((*prev_ptr)->next); | 
|  | 684 | a = a->next; | 
|  | 685 | b = b->next; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 686 | } else if (mutex_cmp(a->mutex, b->mutex) < 0) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 687 | a = a->next; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 688 | } else if (mutex_cmp(a->mutex, b->mutex) > 0) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 689 | b = b->next; | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 690 | } else VG_(skin_panic)("STOP PRESS: Laws of arithmetic broken"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 691 |  | 
|  | 692 | *prev_ptr = NULL; | 
|  | 693 | } | 
|  | 694 |  | 
|  | 695 | /* Now search for it in the table, adding it if not seen before */ | 
|  | 696 | for (i = 0; i < n_lockset_table; i++) { | 
|  | 697 | if (structural_eq_LockSet(lockset_table[i], new_vector)) | 
|  | 698 | break; | 
|  | 699 | } | 
|  | 700 |  | 
|  | 701 | if (i == n_lockset_table) { | 
|  | 702 | i = allocate_LockSet(new_vector); | 
|  | 703 | } else { | 
|  | 704 | free_LockSet(new_vector); | 
|  | 705 | } | 
|  | 706 |  | 
|  | 707 | /* Check we won't overflow the OTHER_BITS bits of sword->other */ | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 708 | sk_assert(i < (1 << OTHER_BITS)); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 709 |  | 
|  | 710 | #  if LOCKSET_SANITY | 
|  | 711 | sanity_check_locksets("intersect-OUT"); | 
|  | 712 | #  endif | 
|  | 713 |  | 
|  | 714 | return i; | 
|  | 715 | } | 
|  | 716 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 717 | /* Builds the union, and then unbuilds it if it's already in the table. | 
|  | 718 | */ | 
|  | 719 | static UInt ls_union(UInt ia, UInt ib) | 
|  | 720 | { | 
|  | 721 | Int       i; | 
|  | 722 | LockSet*  a = lockset_table[ia]; | 
|  | 723 | LockSet*  b = lockset_table[ib]; | 
|  | 724 | LockSet*  new_vector = NULL; | 
|  | 725 | LockSet*  new_node; | 
|  | 726 | LockSet** prev_ptr = &new_vector; | 
|  | 727 |  | 
|  | 728 | if(DEBUG_MEM_LOCKSET_CHANGES) { | 
|  | 729 | VG_(printf)("Unionizing %d %d:\n", ia, ib); | 
|  | 730 | sanity_check_locksets("union-IN"); | 
|  | 731 | } | 
|  | 732 |  | 
|  | 733 | /* Fast case -- when the two are the same */ | 
|  | 734 | if (ia == ib) { | 
|  | 735 | if(DEBUG_MEM_LOCKSET_CHANGES) { | 
|  | 736 | VG_(printf)("Fast case -- both the same: %u\n", ia); | 
|  | 737 | print_LockSet("union same", a); | 
|  | 738 | } | 
|  | 739 | return ia; | 
|  | 740 | } | 
|  | 741 |  | 
|  | 742 | if (DEBUG_MEM_LOCKSET_CHANGES) { | 
|  | 743 | print_LockSet("union a", a); | 
|  | 744 | print_LockSet("union b", b); | 
|  | 745 | } | 
|  | 746 |  | 
|  | 747 | /* Build the union of the two lists */ | 
|  | 748 | while (a || b) { | 
|  | 749 | if (a && b && mutex_cmp(a->mutex, b->mutex) == 0) { | 
|  | 750 | new_node = VG_(malloc)(sizeof(LockSet)); | 
|  | 751 | new_node->mutex = a->mutex; | 
|  | 752 | *prev_ptr = new_node; | 
|  | 753 | prev_ptr = &new_node->next; | 
|  | 754 | a = a->next; | 
|  | 755 | b = b->next; | 
|  | 756 | } else if (!b || (a && b && mutex_cmp(a->mutex, b->mutex) < 0)) { | 
|  | 757 | new_node = VG_(malloc)(sizeof(LockSet)); | 
|  | 758 | new_node->mutex = a->mutex; | 
|  | 759 | *prev_ptr = new_node; | 
|  | 760 | prev_ptr = &new_node->next; | 
|  | 761 | a = a->next; | 
|  | 762 | } else if (!a || (a && b && mutex_cmp(a->mutex, b->mutex) > 0)) { | 
|  | 763 | new_node = VG_(malloc)(sizeof(LockSet)); | 
|  | 764 | new_node->mutex = b->mutex; | 
|  | 765 | *prev_ptr = new_node; | 
|  | 766 | prev_ptr = &new_node->next; | 
|  | 767 | b = b->next; | 
|  | 768 | } | 
|  | 769 |  | 
|  | 770 | *prev_ptr = NULL; | 
|  | 771 | } | 
|  | 772 |  | 
|  | 773 | /* Now search for it in the table, adding it if not seen before */ | 
|  | 774 | for (i = 0; i < n_lockset_table; i++) { | 
|  | 775 | if (structural_eq_LockSet(lockset_table[i], new_vector)) | 
|  | 776 | break; | 
|  | 777 | } | 
|  | 778 |  | 
|  | 779 | if (i == n_lockset_table) { | 
|  | 780 | i = allocate_LockSet(new_vector); | 
|  | 781 | } else { | 
|  | 782 | free_LockSet(new_vector); | 
|  | 783 | } | 
|  | 784 |  | 
|  | 785 | /* Check we won't overflow the OTHER_BITS bits of sword->other */ | 
|  | 786 | sk_assert(i < (1 << OTHER_BITS)); | 
|  | 787 |  | 
|  | 788 | if (LOCKSET_SANITY) | 
|  | 789 | sanity_check_locksets("union-OUT"); | 
|  | 790 |  | 
|  | 791 | if (DEBUG_MEM_LOCKSET_CHANGES) | 
|  | 792 | VG_(printf)("union -> %d\n", i); | 
|  | 793 | return i; | 
|  | 794 | } | 
|  | 795 |  | 
|  | 796 | /*------------------------------------------------------------*/ | 
|  | 797 | /*--- Implementation of mutex structure.                   ---*/ | 
|  | 798 | /*------------------------------------------------------------*/ | 
|  | 799 |  | 
|  | 800 | #define M_MUTEX_HASHSZ	1023 | 
|  | 801 |  | 
|  | 802 | static UInt graph_mark;		/* current mark we're using for graph traversal */ | 
|  | 803 |  | 
|  | 804 | static void record_mutex_error(ThreadId tid, hg_mutex_t *mutex, | 
|  | 805 | Char *str, ExeContext *ec); | 
|  | 806 |  | 
|  | 807 | static hg_mutex_t *mutex_hash[M_MUTEX_HASHSZ]; | 
|  | 808 |  | 
|  | 809 | static Int mutex_cmp(const hg_mutex_t *a, const hg_mutex_t *b) | 
|  | 810 | { | 
|  | 811 | return (UInt)a->mutexp - (UInt)b->mutexp; | 
|  | 812 | } | 
|  | 813 |  | 
|  | 814 | /* find or create an hg_mutex for a program's mutex use */ | 
|  | 815 | static hg_mutex_t *get_mutex(void *mutexp) | 
|  | 816 | { | 
|  | 817 | UInt bucket = ((UInt)mutexp) % M_MUTEX_HASHSZ; | 
|  | 818 | hg_mutex_t *mp; | 
|  | 819 |  | 
|  | 820 | for(mp = mutex_hash[bucket]; mp != NULL; mp = mp->next) | 
|  | 821 | if (mp->mutexp == mutexp) | 
|  | 822 | return mp; | 
|  | 823 |  | 
|  | 824 | mp = VG_(malloc)(sizeof(*mp)); | 
|  | 825 | mp->mutexp = mutexp; | 
|  | 826 | mp->next = mutex_hash[bucket]; | 
|  | 827 | mutex_hash[bucket] = mp; | 
|  | 828 |  | 
|  | 829 | mp->state = MxUnknown; | 
|  | 830 | mp->tid = VG_INVALID_THREADID; | 
|  | 831 | mp->location = NULL; | 
|  | 832 |  | 
|  | 833 | mp->lockdep = 0; | 
|  | 834 | mp->mark = graph_mark - 1; | 
|  | 835 |  | 
|  | 836 | return mp; | 
|  | 837 | } | 
|  | 838 |  | 
|  | 839 | static const char *pp_MutexState(MutexState st) | 
|  | 840 | { | 
|  | 841 | switch(st) { | 
|  | 842 | case MxLocked:	return "Locked"; | 
|  | 843 | case MxUnlocked:	return "Unlocked"; | 
|  | 844 | case MxDead:		return "Dead"; | 
|  | 845 | case MxUnknown:	return "Unknown"; | 
|  | 846 | } | 
|  | 847 | return "???"; | 
|  | 848 | } | 
|  | 849 |  | 
|  | 850 | #define MARK_LOOP	(graph_mark+0) | 
|  | 851 | #define MARK_DONE	(graph_mark+1) | 
|  | 852 |  | 
|  | 853 | static Bool check_cycle_inner(hg_mutex_t *mutex, LockSet *ls) | 
|  | 854 | { | 
|  | 855 | static const Bool debug = False; | 
|  | 856 |  | 
|  | 857 | if (mutex->mark == MARK_LOOP) | 
|  | 858 | return True;		/* found cycle */ | 
|  | 859 | if (mutex->mark == MARK_DONE) | 
|  | 860 | return False;		/* been here before, its OK */ | 
|  | 861 |  | 
|  | 862 | mutex->mark = MARK_LOOP; | 
|  | 863 |  | 
|  | 864 | if (debug) | 
|  | 865 | VG_(printf)("mark=%d visiting %p%(y mutex->lockset=%d\n", | 
|  | 866 | graph_mark, mutex->mutexp, mutex->mutexp, mutex->lockdep); | 
|  | 867 | for(; ls != NULL; ls = ls->next) { | 
|  | 868 | if (debug) | 
|  | 869 | VG_(printf)("   %y ls=%p (ls->mutex=%p%(y)\n", | 
|  | 870 | mutex->mutexp, ls, | 
|  | 871 | ls->mutex ? ls->mutex->mutexp : 0, | 
|  | 872 | ls->mutex ? ls->mutex->mutexp : 0); | 
|  | 873 | if (check_cycle_inner(ls->mutex, lockset_table[ls->mutex->lockdep])) | 
|  | 874 | return True; | 
|  | 875 | } | 
|  | 876 | mutex->mark = MARK_DONE; | 
|  | 877 |  | 
|  | 878 | return False; | 
|  | 879 | } | 
|  | 880 |  | 
|  | 881 | static Bool check_cycle(hg_mutex_t *start, UInt lockset) | 
|  | 882 | { | 
|  | 883 | graph_mark += 2;		/* clear all marks */ | 
|  | 884 |  | 
|  | 885 | return check_cycle_inner(start, lockset_table[lockset]); | 
|  | 886 | } | 
|  | 887 |  | 
|  | 888 | /* catch bad mutex state changes (though the common ones are handled | 
|  | 889 | by core) */ | 
|  | 890 | static void set_mutex_state(hg_mutex_t *mutex, MutexState state, | 
|  | 891 | ThreadId tid, ThreadState *tst) | 
|  | 892 | { | 
|  | 893 | static const Bool debug = False; | 
|  | 894 |  | 
|  | 895 | if (debug) | 
|  | 896 | VG_(printf)("\ntid %d changing mutex (%p %y)->%p state %s -> %s\n", | 
|  | 897 | tid, mutex, mutex->mutexp, mutex->mutexp, | 
|  | 898 | pp_MutexState(mutex->state), pp_MutexState(state)); | 
|  | 899 |  | 
|  | 900 | if (mutex->state == MxDead) { | 
|  | 901 | /* can't do anything legal to a destroyed mutex */ | 
|  | 902 | record_mutex_error(tid, mutex, | 
|  | 903 | "operate on dead mutex", mutex->location); | 
|  | 904 | return; | 
|  | 905 | } | 
|  | 906 |  | 
|  | 907 | switch(state) { | 
|  | 908 | case MxLocked: | 
|  | 909 | if (mutex->state == MxLocked && mutex->tid != tid) | 
|  | 910 | record_mutex_error(tid, mutex, "take already held lock", mutex->location); | 
|  | 911 |  | 
|  | 912 | sk_assert(!check_cycle(mutex, mutex->lockdep)); | 
|  | 913 |  | 
|  | 914 | if (debug) | 
|  | 915 | print_LockSet("thread holding", lockset_table[thread_locks[tid]]); | 
|  | 916 |  | 
|  | 917 | if (check_cycle(mutex, thread_locks[tid])) | 
|  | 918 | record_mutex_error(tid, mutex, "take lock before dependent locks", NULL); | 
|  | 919 | else { | 
|  | 920 | mutex->lockdep = ls_union(mutex->lockdep, thread_locks[tid]); | 
|  | 921 |  | 
|  | 922 | if (debug) { | 
|  | 923 | VG_(printf)("giving mutex %p%(y lockdep = %d ", | 
|  | 924 | mutex->mutexp, mutex->mutexp, mutex->lockdep); | 
|  | 925 | print_LockSet("lockdep", lockset_table[mutex->lockdep]); | 
|  | 926 | } | 
|  | 927 | } | 
|  | 928 | mutex->tid = tid; | 
|  | 929 | break; | 
|  | 930 |  | 
|  | 931 | case MxUnlocked: | 
|  | 932 | if (debug) | 
|  | 933 | print_LockSet("thread holding", lockset_table[thread_locks[tid]]); | 
|  | 934 |  | 
|  | 935 | if (mutex->state != MxLocked) { | 
|  | 936 | record_mutex_error(tid, mutex, | 
|  | 937 | "unlock non-locked mutex", mutex->location); | 
|  | 938 | } | 
|  | 939 | if (mutex->tid != tid) { | 
|  | 940 | record_mutex_error(tid, mutex, | 
|  | 941 | "unlock someone else's mutex", mutex->location); | 
|  | 942 | } | 
|  | 943 | mutex->tid = VG_INVALID_THREADID; | 
|  | 944 | break; | 
|  | 945 |  | 
|  | 946 | default: | 
|  | 947 | break; | 
|  | 948 | } | 
|  | 949 |  | 
|  | 950 | mutex->location = VG_(get_ExeContext)(tst); | 
|  | 951 | mutex->state = state; | 
|  | 952 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 953 |  | 
|  | 954 | /*------------------------------------------------------------*/ | 
|  | 955 | /*--- Setting and checking permissions.                    ---*/ | 
|  | 956 | /*------------------------------------------------------------*/ | 
|  | 957 |  | 
|  | 958 | static | 
|  | 959 | void set_address_range_state ( Addr a, UInt len /* in bytes */, | 
|  | 960 | VgeInitStatus status ) | 
|  | 961 | { | 
| sewardj | 1806d7f | 2002-10-22 05:05:49 +0000 | [diff] [blame] | 962 | Addr end; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 963 |  | 
|  | 964 | #  if DEBUG_MAKE_ACCESSES | 
|  | 965 | VG_(printf)("make_access: 0x%x, %u, status=%u\n", a, len, status); | 
|  | 966 | #  endif | 
|  | 967 | //PROF_EVENT(30); PPP | 
|  | 968 |  | 
|  | 969 | if (len == 0) | 
|  | 970 | return; | 
|  | 971 |  | 
|  | 972 | if (len > 100 * 1000 * 1000) | 
|  | 973 | VG_(message)(Vg_UserMsg, | 
|  | 974 | "Warning: set address range state: large range %d", | 
|  | 975 | len); | 
|  | 976 |  | 
|  | 977 | VGP_PUSHCC(VgpSARP); | 
|  | 978 |  | 
|  | 979 | /* Memory block may not be aligned or a whole word multiple.  In neat cases, | 
|  | 980 | * we have to init len/4 words (len is in bytes).  In nasty cases, it's | 
|  | 981 | * len/4+1 words.  This works out which it is by aligning the block and | 
|  | 982 | * seeing if the end byte is in the same word as it is for the unaligned | 
|  | 983 | * block; if not, it's the awkward case. */ | 
| sewardj | 1806d7f | 2002-10-22 05:05:49 +0000 | [diff] [blame] | 984 | end = (a + len + 3) & ~3;	/* round up */ | 
|  | 985 | a   &= ~3;			/* round down */ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 986 |  | 
|  | 987 | /* Do it ... */ | 
|  | 988 | switch (status) { | 
|  | 989 | case Vge_VirginInit: | 
|  | 990 | for ( ; a < end; a += 4) { | 
|  | 991 | //PROF_EVENT(31);  PPP | 
|  | 992 | init_virgin_sword(a); | 
|  | 993 | } | 
|  | 994 | break; | 
|  | 995 |  | 
|  | 996 | case Vge_NonVirginInit: | 
|  | 997 | for ( ; a < end; a += 4) { | 
|  | 998 | //PROF_EVENT(31);  PPP | 
|  | 999 | init_nonvirgin_sword(a); | 
|  | 1000 | } | 
|  | 1001 | break; | 
|  | 1002 |  | 
|  | 1003 | case Vge_SegmentInit: | 
|  | 1004 | for ( ; a < end; a += 4) { | 
|  | 1005 | //PROF_EVENT(31);  PPP | 
|  | 1006 | init_magically_inited_sword(a); | 
|  | 1007 | } | 
|  | 1008 | break; | 
|  | 1009 |  | 
|  | 1010 | default: | 
|  | 1011 | VG_(printf)("init_status = %u\n", status); | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1012 | VG_(skin_panic)("Unexpected Vge_InitStatus"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1013 | } | 
|  | 1014 |  | 
|  | 1015 | /* Check that zero page and highest page have not been written to | 
|  | 1016 | -- this could happen with buggy syscall wrappers.  Today | 
|  | 1017 | (2001-04-26) had precisely such a problem with | 
|  | 1018 | __NR_setitimer. */ | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1019 | sk_assert(SK_(cheap_sanity_check)()); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1020 | VGP_POPCC(VgpSARP); | 
|  | 1021 | } | 
|  | 1022 |  | 
|  | 1023 |  | 
|  | 1024 | static void make_segment_readable ( Addr a, UInt len ) | 
|  | 1025 | { | 
|  | 1026 | //PROF_EVENT(??);    PPP | 
|  | 1027 | set_address_range_state ( a, len, Vge_SegmentInit ); | 
|  | 1028 | } | 
|  | 1029 |  | 
|  | 1030 | static void make_writable ( Addr a, UInt len ) | 
|  | 1031 | { | 
|  | 1032 | //PROF_EVENT(36);  PPP | 
|  | 1033 | set_address_range_state( a, len, Vge_VirginInit ); | 
|  | 1034 | } | 
|  | 1035 |  | 
|  | 1036 | static void make_readable ( Addr a, UInt len ) | 
|  | 1037 | { | 
|  | 1038 | //PROF_EVENT(37);  PPP | 
|  | 1039 | set_address_range_state( a, len, Vge_NonVirginInit ); | 
|  | 1040 | } | 
|  | 1041 |  | 
|  | 1042 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1043 | /* Block-copy states (needed for implementing realloc()). */ | 
|  | 1044 | static void copy_address_range_state(Addr src, Addr dst, UInt len) | 
|  | 1045 | { | 
|  | 1046 | UInt i; | 
|  | 1047 |  | 
|  | 1048 | //PROF_EVENT(40); PPP | 
|  | 1049 | for (i = 0; i < len; i += 4) { | 
|  | 1050 | shadow_word sword = *(get_sword_addr ( src+i )); | 
|  | 1051 | //PROF_EVENT(41);  PPP | 
|  | 1052 | set_sword ( dst+i, sword ); | 
|  | 1053 | } | 
|  | 1054 | } | 
|  | 1055 |  | 
|  | 1056 | // SSS: put these somewhere better | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1057 | static void eraser_mem_read (Addr a, UInt data_size, ThreadState *tst); | 
|  | 1058 | static void eraser_mem_write(Addr a, UInt data_size, ThreadState *tst); | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1059 |  | 
|  | 1060 | #define REGPARM(x)	__attribute__((regparm (x))) | 
|  | 1061 |  | 
|  | 1062 | static void eraser_mem_help_read_1(Addr a) REGPARM(1); | 
|  | 1063 | static void eraser_mem_help_read_2(Addr a) REGPARM(1); | 
|  | 1064 | static void eraser_mem_help_read_4(Addr a) REGPARM(1); | 
|  | 1065 | static void eraser_mem_help_read_N(Addr a, UInt size) REGPARM(2); | 
|  | 1066 |  | 
|  | 1067 | static void eraser_mem_help_write_1(Addr a, UInt val) REGPARM(2); | 
|  | 1068 | static void eraser_mem_help_write_2(Addr a, UInt val) REGPARM(2); | 
|  | 1069 | static void eraser_mem_help_write_4(Addr a, UInt val) REGPARM(2); | 
|  | 1070 | static void eraser_mem_help_write_N(Addr a, UInt size) REGPARM(2); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1071 |  | 
|  | 1072 | static | 
|  | 1073 | void eraser_pre_mem_read(CorePart part, ThreadState* tst, | 
|  | 1074 | Char* s, UInt base, UInt size ) | 
|  | 1075 | { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1076 | eraser_mem_read(base, size, tst); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1077 | } | 
|  | 1078 |  | 
|  | 1079 | static | 
|  | 1080 | void eraser_pre_mem_read_asciiz(CorePart part, ThreadState* tst, | 
|  | 1081 | Char* s, UInt base ) | 
|  | 1082 | { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1083 | eraser_mem_read(base, VG_(strlen)((Char*)base), tst); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1084 | } | 
|  | 1085 |  | 
|  | 1086 | static | 
|  | 1087 | void eraser_pre_mem_write(CorePart part, ThreadState* tst, | 
|  | 1088 | Char* s, UInt base, UInt size ) | 
|  | 1089 | { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1090 | eraser_mem_write(base, size, tst); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1091 | } | 
|  | 1092 |  | 
|  | 1093 |  | 
|  | 1094 |  | 
|  | 1095 | static | 
|  | 1096 | void eraser_new_mem_startup( Addr a, UInt len, Bool rr, Bool ww, Bool xx ) | 
|  | 1097 | { | 
| njn | 1f3a909 | 2002-10-04 09:22:30 +0000 | [diff] [blame] | 1098 | /* Ignore the permissions, just make it readable.  Seems to work... */ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1099 | make_segment_readable(a, len); | 
|  | 1100 | } | 
|  | 1101 |  | 
|  | 1102 |  | 
|  | 1103 | static | 
|  | 1104 | void eraser_new_mem_heap ( Addr a, UInt len, Bool is_inited ) | 
|  | 1105 | { | 
|  | 1106 | if (is_inited) { | 
|  | 1107 | make_readable(a, len); | 
|  | 1108 | } else { | 
|  | 1109 | make_writable(a, len); | 
|  | 1110 | } | 
|  | 1111 | } | 
|  | 1112 |  | 
|  | 1113 | static | 
|  | 1114 | void eraser_set_perms (Addr a, UInt len, | 
| sewardj | 40f8ebe | 2002-10-23 21:46:13 +0000 | [diff] [blame] | 1115 | Bool rr, Bool ww, Bool xx) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1116 | { | 
|  | 1117 | if      (rr) make_readable(a, len); | 
|  | 1118 | else if (ww) make_writable(a, len); | 
|  | 1119 | /* else do nothing */ | 
|  | 1120 | } | 
|  | 1121 |  | 
|  | 1122 |  | 
|  | 1123 | /*--------------------------------------------------------------*/ | 
|  | 1124 | /*--- Initialise the memory audit system on program startup. ---*/ | 
|  | 1125 | /*--------------------------------------------------------------*/ | 
|  | 1126 |  | 
|  | 1127 | static | 
|  | 1128 | void init_shadow_memory(void) | 
|  | 1129 | { | 
|  | 1130 | Int i; | 
|  | 1131 |  | 
|  | 1132 | for (i = 0; i < ESEC_MAP_WORDS; i++) | 
|  | 1133 | distinguished_secondary_map.swords[i] = virgin_sword; | 
|  | 1134 |  | 
|  | 1135 | /* These entries gradually get overwritten as the used address | 
|  | 1136 | space expands. */ | 
|  | 1137 | for (i = 0; i < 65536; i++) | 
|  | 1138 | primary_map[i] = &distinguished_secondary_map; | 
|  | 1139 | } | 
|  | 1140 |  | 
|  | 1141 |  | 
|  | 1142 | /*--------------------------------------------------------------*/ | 
|  | 1143 | /*--- Machinery to support sanity checking                   ---*/ | 
|  | 1144 | /*--------------------------------------------------------------*/ | 
|  | 1145 |  | 
|  | 1146 | /* Check that nobody has spuriously claimed that the first or last 16 | 
|  | 1147 | pages (64 KB) of address space have become accessible.  Failure of | 
|  | 1148 | the following do not per se indicate an internal consistency | 
|  | 1149 | problem, but they are so likely to that we really want to know | 
|  | 1150 | about it if so. */ | 
|  | 1151 |  | 
|  | 1152 | Bool SK_(cheap_sanity_check) ( void ) | 
|  | 1153 | { | 
|  | 1154 | if (VGE_IS_DISTINGUISHED_SM(primary_map[0]) && | 
|  | 1155 | VGE_IS_DISTINGUISHED_SM(primary_map[65535])) | 
|  | 1156 | return True; | 
|  | 1157 | else | 
|  | 1158 | return False; | 
|  | 1159 | } | 
|  | 1160 |  | 
|  | 1161 |  | 
|  | 1162 | Bool SK_(expensive_sanity_check)(void) | 
|  | 1163 | { | 
|  | 1164 | Int i; | 
|  | 1165 |  | 
|  | 1166 | /* Make sure nobody changed the distinguished secondary. */ | 
|  | 1167 | for (i = 0; i < ESEC_MAP_WORDS; i++) | 
|  | 1168 | if (distinguished_secondary_map.swords[i].other != virgin_sword.other || | 
|  | 1169 | distinguished_secondary_map.swords[i].state != virgin_sword.state) | 
|  | 1170 | return False; | 
|  | 1171 |  | 
|  | 1172 | return True; | 
|  | 1173 | } | 
|  | 1174 |  | 
|  | 1175 |  | 
|  | 1176 | /*--------------------------------------------------------------*/ | 
|  | 1177 | /*--- Instrumentation                                        ---*/ | 
|  | 1178 | /*--------------------------------------------------------------*/ | 
|  | 1179 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1180 | /* Create and return an instrumented version of cb_in.  Free cb_in | 
|  | 1181 | before returning. */ | 
|  | 1182 | UCodeBlock* SK_(instrument) ( UCodeBlock* cb_in, Addr not_used ) | 
|  | 1183 | { | 
|  | 1184 | UCodeBlock* cb; | 
|  | 1185 | Int         i; | 
|  | 1186 | UInstr*     u_in; | 
|  | 1187 | Int         t_size = INVALID_TEMPREG; | 
|  | 1188 |  | 
| njn | 4ba5a79 | 2002-09-30 10:23:54 +0000 | [diff] [blame] | 1189 | cb = VG_(alloc_UCodeBlock)(); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1190 | cb->nextTemp = cb_in->nextTemp; | 
|  | 1191 |  | 
|  | 1192 | for (i = 0; i < cb_in->used; i++) { | 
|  | 1193 | u_in = &cb_in->instrs[i]; | 
|  | 1194 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1195 | switch (u_in->opcode) { | 
|  | 1196 |  | 
|  | 1197 | case NOP: case CALLM_S: case CALLM_E: | 
|  | 1198 | break; | 
|  | 1199 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1200 | case LOAD: { | 
|  | 1201 | void (*help)(Addr); | 
|  | 1202 | sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size); | 
|  | 1203 |  | 
|  | 1204 | switch(u_in->size) { | 
|  | 1205 | case 1: help = eraser_mem_help_read_1; break; | 
|  | 1206 | case 2: help = eraser_mem_help_read_2; break; | 
|  | 1207 | case 4: help = eraser_mem_help_read_4; break; | 
|  | 1208 | default: | 
|  | 1209 | VG_(skin_panic)("bad size"); | 
|  | 1210 | } | 
|  | 1211 |  | 
|  | 1212 | uInstr1(cb, CCALL, 0, TempReg, u_in->val1); | 
|  | 1213 | uCCall(cb, (Addr)help, 1, 1, False); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1214 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1215 | VG_(copy_UInstr)(cb, u_in); | 
|  | 1216 | t_size = INVALID_TEMPREG; | 
|  | 1217 | break; | 
|  | 1218 | } | 
|  | 1219 |  | 
|  | 1220 | case FPU_R: { | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1221 | sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size || | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1222 | 8 == u_in->size || 10 == u_in->size); | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1223 |  | 
|  | 1224 | t_size = newTemp(cb); | 
|  | 1225 | uInstr2(cb, MOV,   4, Literal, 0, TempReg, t_size); | 
|  | 1226 | uLiteral(cb, (UInt)u_in->size); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1227 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1228 | uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size); | 
|  | 1229 | uCCall(cb, (Addr) & eraser_mem_help_read_N, 2, 2, False); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1230 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1231 | VG_(copy_UInstr)(cb, u_in); | 
|  | 1232 | t_size = INVALID_TEMPREG; | 
|  | 1233 | break; | 
|  | 1234 | } | 
|  | 1235 |  | 
|  | 1236 | case STORE: { | 
|  | 1237 | void (*help)(Addr, UInt); | 
|  | 1238 | sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size); | 
|  | 1239 |  | 
|  | 1240 | switch(u_in->size) { | 
|  | 1241 | case 1: help = eraser_mem_help_write_1; break; | 
|  | 1242 | case 2: help = eraser_mem_help_write_2; break; | 
|  | 1243 | case 4: help = eraser_mem_help_write_4; break; | 
|  | 1244 | default: | 
|  | 1245 | VG_(skin_panic)("bad size"); | 
|  | 1246 | } | 
|  | 1247 |  | 
|  | 1248 | uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, u_in->val1); | 
|  | 1249 | uCCall(cb, (Addr)help, 2, 2, False); | 
|  | 1250 |  | 
|  | 1251 | VG_(copy_UInstr)(cb, u_in); | 
|  | 1252 | t_size = INVALID_TEMPREG; | 
|  | 1253 | break; | 
|  | 1254 | } | 
|  | 1255 |  | 
|  | 1256 | case FPU_W: { | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1257 | sk_assert(1 == u_in->size || 2 == u_in->size || 4 == u_in->size || | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1258 | 8 == u_in->size || 10 == u_in->size); | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 1259 |  | 
|  | 1260 | t_size = newTemp(cb); | 
|  | 1261 | uInstr2(cb, MOV,   4, Literal, 0, TempReg, t_size); | 
|  | 1262 | uLiteral(cb, (UInt)u_in->size); | 
|  | 1263 | uInstr2(cb, CCALL, 0, TempReg, u_in->val2, TempReg, t_size); | 
|  | 1264 | uCCall(cb, (Addr) & eraser_mem_help_write_N, 2, 2, False); | 
|  | 1265 |  | 
|  | 1266 | VG_(copy_UInstr)(cb, u_in); | 
|  | 1267 | t_size = INVALID_TEMPREG; | 
|  | 1268 | break; | 
|  | 1269 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1270 |  | 
|  | 1271 | default: | 
| njn | 4ba5a79 | 2002-09-30 10:23:54 +0000 | [diff] [blame] | 1272 | VG_(copy_UInstr)(cb, u_in); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1273 | break; | 
|  | 1274 | } | 
|  | 1275 | } | 
|  | 1276 |  | 
| njn | 4ba5a79 | 2002-09-30 10:23:54 +0000 | [diff] [blame] | 1277 | VG_(free_UCodeBlock)(cb_in); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1278 | return cb; | 
|  | 1279 | } | 
|  | 1280 |  | 
|  | 1281 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1282 | /*------------------------------------------------------------*/ | 
|  | 1283 | /*--- Shadow chunks info                                   ---*/ | 
|  | 1284 | /*------------------------------------------------------------*/ | 
|  | 1285 |  | 
|  | 1286 | #define SHADOW_EXTRA	2 | 
|  | 1287 |  | 
|  | 1288 | static __inline__ | 
|  | 1289 | void set_sc_where( ShadowChunk* sc, ExeContext* ec ) | 
|  | 1290 | { | 
|  | 1291 | sc->skin_extra[0] = (UInt)ec; | 
|  | 1292 | } | 
|  | 1293 |  | 
|  | 1294 | static __inline__ | 
|  | 1295 | ExeContext *get_sc_where( ShadowChunk* sc ) | 
|  | 1296 | { | 
|  | 1297 | return (ExeContext*)sc->skin_extra[0]; | 
|  | 1298 | } | 
|  | 1299 |  | 
|  | 1300 | static __inline__ | 
|  | 1301 | void set_sc_tid(ShadowChunk *sc, ThreadId tid) | 
|  | 1302 | { | 
|  | 1303 | sc->skin_extra[1] = (UInt)tid; | 
|  | 1304 | } | 
|  | 1305 |  | 
|  | 1306 | static __inline__ | 
|  | 1307 | ThreadId get_sc_tid(ShadowChunk *sc) | 
|  | 1308 | { | 
|  | 1309 | return (ThreadId)sc->skin_extra[1]; | 
|  | 1310 | } | 
|  | 1311 |  | 
|  | 1312 | void SK_(complete_shadow_chunk) ( ShadowChunk* sc, ThreadState* tst ) | 
|  | 1313 | { | 
|  | 1314 | set_sc_where( sc, VG_(get_ExeContext) ( tst ) ); | 
|  | 1315 | set_sc_tid(sc, VG_(get_tid_from_ThreadState(tst))); | 
|  | 1316 | } | 
|  | 1317 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1318 | /*--------------------------------------------------------------------*/ | 
|  | 1319 | /*--- Error and suppression handling                               ---*/ | 
|  | 1320 | /*--------------------------------------------------------------------*/ | 
|  | 1321 |  | 
|  | 1322 | typedef | 
|  | 1323 | enum { | 
|  | 1324 | /* Possible data race */ | 
|  | 1325 | EraserSupp | 
|  | 1326 | } | 
|  | 1327 | EraserSuppKind; | 
|  | 1328 |  | 
|  | 1329 | /* What kind of error it is. */ | 
|  | 1330 | typedef | 
|  | 1331 | enum { | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1332 | EraserErr,		/* data-race */ | 
|  | 1333 | MutexErr,			/* mutex operations */ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1334 | } | 
|  | 1335 | EraserErrorKind; | 
|  | 1336 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1337 | /* The classification of a faulting address. */ | 
|  | 1338 | typedef | 
|  | 1339 | enum { Undescribed, /* as-yet unclassified */ | 
|  | 1340 | Stack, | 
|  | 1341 | Unknown, /* classification yielded nothing useful */ | 
|  | 1342 | Mallocd, | 
|  | 1343 | Segment | 
|  | 1344 | } | 
|  | 1345 | AddrKind; | 
|  | 1346 | /* Records info about a faulting address. */ | 
|  | 1347 | typedef | 
|  | 1348 | struct { | 
|  | 1349 | /* ALL */ | 
|  | 1350 | AddrKind akind; | 
|  | 1351 | /* Freed, Mallocd */ | 
|  | 1352 | Int blksize; | 
|  | 1353 | /* Freed, Mallocd */ | 
|  | 1354 | Int rwoffset; | 
|  | 1355 | /* Freed, Mallocd */ | 
|  | 1356 | ExeContext* lastchange; | 
|  | 1357 | ThreadId lasttid; | 
|  | 1358 | /* Stack */ | 
|  | 1359 | ThreadId stack_tid; | 
|  | 1360 | /* Segment */ | 
|  | 1361 | const Char* filename; | 
|  | 1362 | const Char* section; | 
|  | 1363 | /* True if is just-below %esp -- could be a gcc bug. */ | 
|  | 1364 | Bool maybe_gcc; | 
|  | 1365 | } | 
|  | 1366 | AddrInfo; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1367 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1368 | /* What kind of memory access is involved in the error? */ | 
|  | 1369 | typedef | 
|  | 1370 | enum { ReadAxs, WriteAxs, ExecAxs } | 
|  | 1371 | AxsKind; | 
|  | 1372 |  | 
|  | 1373 | /* Extra context for memory errors */ | 
|  | 1374 | typedef | 
|  | 1375 | struct { | 
|  | 1376 | AxsKind axskind; | 
|  | 1377 | Int size; | 
|  | 1378 | AddrInfo addrinfo; | 
|  | 1379 | Bool isWrite; | 
|  | 1380 | shadow_word prevstate; | 
|  | 1381 | /* MutexErr */ | 
|  | 1382 | hg_mutex_t *mutex; | 
|  | 1383 | ExeContext *lasttouched; | 
|  | 1384 | ThreadId    lasttid; | 
|  | 1385 | } | 
|  | 1386 | HelgrindError; | 
|  | 1387 |  | 
|  | 1388 | static __inline__ | 
|  | 1389 | void clear_AddrInfo ( AddrInfo* ai ) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1390 | { | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1391 | ai->akind      = Unknown; | 
|  | 1392 | ai->blksize    = 0; | 
|  | 1393 | ai->rwoffset   = 0; | 
|  | 1394 | ai->lastchange = NULL; | 
|  | 1395 | ai->lasttid    = VG_INVALID_THREADID; | 
|  | 1396 | ai->filename   = NULL; | 
|  | 1397 | ai->section    = "???"; | 
|  | 1398 | ai->stack_tid  = VG_INVALID_THREADID; | 
|  | 1399 | ai->maybe_gcc  = False; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1400 | } | 
|  | 1401 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1402 | static __inline__ | 
|  | 1403 | void clear_HelgrindError ( HelgrindError* err_extra ) | 
|  | 1404 | { | 
|  | 1405 | err_extra->axskind    = ReadAxs; | 
|  | 1406 | err_extra->size       = 0; | 
|  | 1407 | err_extra->mutex      = NULL; | 
|  | 1408 | err_extra->lasttouched= NULL; | 
|  | 1409 | err_extra->lasttid    = VG_INVALID_THREADID; | 
|  | 1410 | err_extra->prevstate.state  = Vge_Virgin; | 
|  | 1411 | err_extra->prevstate.other  = 0; | 
|  | 1412 | clear_AddrInfo ( &err_extra->addrinfo ); | 
|  | 1413 | err_extra->isWrite    = False; | 
|  | 1414 | } | 
|  | 1415 |  | 
|  | 1416 |  | 
|  | 1417 |  | 
|  | 1418 | /* Describe an address as best you can, for error messages, | 
|  | 1419 | putting the result in ai. */ | 
|  | 1420 |  | 
|  | 1421 | static void describe_addr ( Addr a, AddrInfo* ai ) | 
|  | 1422 | { | 
|  | 1423 | ShadowChunk* sc; | 
|  | 1424 |  | 
|  | 1425 | /* Nested functions, yeah.  Need the lexical scoping of 'a'. */ | 
|  | 1426 |  | 
|  | 1427 | /* Closure for searching thread stacks */ | 
|  | 1428 | Bool addr_is_in_bounds(Addr stack_min, Addr stack_max) | 
|  | 1429 | { | 
|  | 1430 | return (stack_min <= a && a <= stack_max); | 
|  | 1431 | } | 
|  | 1432 | /* Closure for searching malloc'd and free'd lists */ | 
|  | 1433 | Bool addr_is_in_block(ShadowChunk *sh_ch) | 
|  | 1434 | { | 
|  | 1435 | return VG_(addr_is_in_block) ( a, sh_ch->data, sh_ch->size ); | 
|  | 1436 | } | 
|  | 1437 |  | 
|  | 1438 | /* Search for it in segments */ | 
|  | 1439 | { | 
|  | 1440 | const SegInfo *seg; | 
|  | 1441 |  | 
|  | 1442 | for(seg = VG_(next_seginfo)(NULL); | 
|  | 1443 | seg != NULL; | 
|  | 1444 | seg = VG_(next_seginfo)(seg)) { | 
|  | 1445 | Addr base = VG_(seg_start)(seg); | 
|  | 1446 | UInt size = VG_(seg_size)(seg); | 
|  | 1447 | const UChar *filename = VG_(seg_filename)(seg); | 
|  | 1448 |  | 
|  | 1449 | if (a >= base && a < base+size) { | 
|  | 1450 | ai->akind = Segment; | 
|  | 1451 | ai->blksize = size; | 
|  | 1452 | ai->rwoffset = a - base; | 
|  | 1453 | ai->filename = filename; | 
|  | 1454 |  | 
|  | 1455 | switch(VG_(seg_sect_kind)(a)) { | 
|  | 1456 | case Vg_SectText:	ai->section = "text"; break; | 
|  | 1457 | case Vg_SectData:	ai->section = "data"; break; | 
|  | 1458 | case Vg_SectBSS:	ai->section = "BSS"; break; | 
|  | 1459 | case Vg_SectGOT:	ai->section = "GOT"; break; | 
|  | 1460 | case Vg_SectPLT:	ai->section = "PLT"; break; | 
|  | 1461 | case Vg_SectUnknown: | 
|  | 1462 | default: | 
|  | 1463 | ai->section = "???"; break; | 
|  | 1464 | } | 
|  | 1465 |  | 
|  | 1466 | return; | 
|  | 1467 | } | 
|  | 1468 | } | 
|  | 1469 | } | 
|  | 1470 |  | 
|  | 1471 | /* Search for a currently malloc'd block which might bracket it. */ | 
|  | 1472 | sc = VG_(any_matching_mallocd_ShadowChunks)(addr_is_in_block); | 
|  | 1473 | if (NULL != sc) { | 
|  | 1474 | ai->akind      = Mallocd; | 
|  | 1475 | ai->blksize    = sc->size; | 
|  | 1476 | ai->rwoffset   = (Int)(a) - (Int)(sc->data); | 
|  | 1477 | ai->lastchange = get_sc_where(sc); | 
|  | 1478 | ai->lasttid    = get_sc_tid(sc); | 
|  | 1479 | return; | 
|  | 1480 | } | 
|  | 1481 | /* Clueless ... */ | 
|  | 1482 | ai->akind = Unknown; | 
|  | 1483 | return; | 
|  | 1484 | } | 
|  | 1485 |  | 
|  | 1486 |  | 
|  | 1487 | /* Creates a copy of the err_extra, updates the copy with address info if | 
|  | 1488 | necessary, sticks the copy into the SkinError. */ | 
|  | 1489 | void SK_(dup_extra_and_update)(SkinError* err) | 
|  | 1490 | { | 
|  | 1491 | HelgrindError* err_extra; | 
|  | 1492 |  | 
|  | 1493 | err_extra  = VG_(malloc)(sizeof(HelgrindError)); | 
|  | 1494 | *err_extra = *((HelgrindError*)err->extra); | 
|  | 1495 |  | 
|  | 1496 | if (err_extra->addrinfo.akind == Undescribed) | 
|  | 1497 | describe_addr ( err->addr, &(err_extra->addrinfo) ); | 
|  | 1498 |  | 
|  | 1499 | err->extra = err_extra; | 
|  | 1500 | } | 
|  | 1501 |  | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1502 | static void record_eraser_error ( ThreadState *tst, Addr a, Bool is_write, | 
|  | 1503 | shadow_word prevstate ) | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1504 | { | 
|  | 1505 | HelgrindError err_extra; | 
| sewardj | 1806d7f | 2002-10-22 05:05:49 +0000 | [diff] [blame] | 1506 | static const shadow_word err_sw = { TID_INDICATING_ALL, Vge_Excl }; | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1507 |  | 
|  | 1508 | clear_HelgrindError(&err_extra); | 
|  | 1509 | err_extra.isWrite = is_write; | 
|  | 1510 | err_extra.addrinfo.akind = Undescribed; | 
|  | 1511 | err_extra.prevstate = prevstate; | 
|  | 1512 |  | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1513 | VG_(maybe_record_error)( tst, EraserErr, a, | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1514 | (is_write ? "writing" : "reading"), | 
|  | 1515 | &err_extra); | 
|  | 1516 |  | 
| sewardj | 1806d7f | 2002-10-22 05:05:49 +0000 | [diff] [blame] | 1517 | set_sword(a, err_sw); | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1518 | } | 
|  | 1519 |  | 
|  | 1520 | static void record_mutex_error(ThreadId tid, hg_mutex_t *mutex, | 
|  | 1521 | Char *str, ExeContext *ec) | 
|  | 1522 | { | 
|  | 1523 | HelgrindError err_extra; | 
|  | 1524 |  | 
|  | 1525 | clear_HelgrindError(&err_extra); | 
|  | 1526 | err_extra.addrinfo.akind = Undescribed; | 
|  | 1527 | err_extra.mutex = mutex; | 
|  | 1528 | err_extra.lasttouched = ec; | 
|  | 1529 | err_extra.lasttid = tid; | 
|  | 1530 |  | 
|  | 1531 | VG_(maybe_record_error)(VG_(get_ThreadState)(tid), MutexErr, | 
|  | 1532 | (Addr)mutex->mutexp, str, &err_extra); | 
|  | 1533 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1534 |  | 
|  | 1535 | Bool SK_(eq_SkinError) ( VgRes not_used, | 
|  | 1536 | SkinError* e1, SkinError* e2 ) | 
|  | 1537 | { | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1538 | sk_assert(e1->ekind == e2->ekind); | 
|  | 1539 |  | 
|  | 1540 | switch(e1->ekind) { | 
|  | 1541 | case EraserErr: | 
|  | 1542 | return e1->addr == e2->addr; | 
|  | 1543 |  | 
|  | 1544 | case MutexErr: | 
|  | 1545 | return e1->addr == e2->addr; | 
|  | 1546 | } | 
|  | 1547 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1548 | if (e1->string != e2->string) return False; | 
|  | 1549 | if (0 != VG_(strcmp)(e1->string, e2->string)) return False; | 
|  | 1550 | return True; | 
|  | 1551 | } | 
|  | 1552 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1553 | static void pp_AddrInfo ( Addr a, AddrInfo* ai ) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1554 | { | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1555 | switch (ai->akind) { | 
|  | 1556 | case Stack: | 
|  | 1557 | VG_(message)(Vg_UserMsg, | 
|  | 1558 | "  Address %p is on thread %d's stack", | 
|  | 1559 | a, ai->stack_tid); | 
|  | 1560 | break; | 
|  | 1561 | case Unknown: | 
|  | 1562 | if (ai->maybe_gcc) { | 
|  | 1563 | VG_(message)(Vg_UserMsg, | 
|  | 1564 | "  Address %p is just below %%esp.  Possibly a bug in GCC/G++", | 
|  | 1565 | a); | 
|  | 1566 | VG_(message)(Vg_UserMsg, | 
|  | 1567 | "   v 2.96 or 3.0.X.  To suppress, use: --workaround-gcc296-bugs=yes"); | 
|  | 1568 | } else { | 
|  | 1569 | VG_(message)(Vg_UserMsg, | 
|  | 1570 | "  Address %p is not stack'd, malloc'd or free'd", a); | 
|  | 1571 | } | 
|  | 1572 | break; | 
|  | 1573 | case Segment: | 
|  | 1574 | VG_(message)(Vg_UserMsg, | 
|  | 1575 | "  Address %p is in %s section of %s", | 
|  | 1576 | a, ai->section, ai->filename); | 
|  | 1577 | break; | 
|  | 1578 | case Mallocd: { | 
|  | 1579 | UInt delta; | 
|  | 1580 | UChar* relative; | 
|  | 1581 | if (ai->rwoffset < 0) { | 
|  | 1582 | delta    = (UInt)(- ai->rwoffset); | 
|  | 1583 | relative = "before"; | 
|  | 1584 | } else if (ai->rwoffset >= ai->blksize) { | 
|  | 1585 | delta    = ai->rwoffset - ai->blksize; | 
|  | 1586 | relative = "after"; | 
|  | 1587 | } else { | 
|  | 1588 | delta    = ai->rwoffset; | 
|  | 1589 | relative = "inside"; | 
|  | 1590 | } | 
|  | 1591 | VG_(message)(Vg_UserMsg, | 
|  | 1592 | "  Address %p is %d bytes %s a block of size %d alloc'd by thread %d at", | 
|  | 1593 | a, delta, relative, | 
|  | 1594 | ai->blksize, | 
|  | 1595 | ai->lasttid); | 
| sewardj | 5481f8f | 2002-10-20 19:43:47 +0000 | [diff] [blame] | 1596 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1597 | VG_(pp_ExeContext)(ai->lastchange); | 
|  | 1598 | break; | 
|  | 1599 | } | 
|  | 1600 | default: | 
|  | 1601 | VG_(skin_panic)("pp_AddrInfo"); | 
|  | 1602 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1603 | } | 
|  | 1604 |  | 
|  | 1605 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1606 | void SK_(pp_SkinError) ( SkinError* err, void (*pp_ExeContext)(void) ) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1607 | { | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1608 | HelgrindError *extra = (HelgrindError *)err->extra; | 
|  | 1609 | Char buf[100]; | 
|  | 1610 | Char *msg = buf; | 
|  | 1611 |  | 
|  | 1612 | *msg = '\0'; | 
|  | 1613 |  | 
|  | 1614 | switch(err->ekind) { | 
|  | 1615 | case EraserErr: | 
|  | 1616 | VG_(message)(Vg_UserMsg, "Possible data race %s variable at %p %(y", | 
|  | 1617 | err->string, err->addr, err->addr ); | 
|  | 1618 | pp_ExeContext(); | 
|  | 1619 |  | 
|  | 1620 | switch(extra->prevstate.state) { | 
|  | 1621 | case Vge_Virgin: | 
|  | 1622 | /* shouldn't be possible to go directly from virgin -> error */ | 
|  | 1623 | VG_(sprintf)(buf, "virgin!?"); | 
|  | 1624 | break; | 
|  | 1625 |  | 
|  | 1626 | case Vge_Excl: | 
|  | 1627 | sk_assert(extra->prevstate.other != TID_INDICATING_ALL); | 
|  | 1628 | VG_(sprintf)(buf, "exclusively owned by thread %d", extra->prevstate.other); | 
|  | 1629 | break; | 
|  | 1630 |  | 
|  | 1631 | case Vge_Shar: | 
|  | 1632 | case Vge_SharMod: { | 
|  | 1633 | LockSet *ls; | 
|  | 1634 | UInt count; | 
|  | 1635 | Char *cp; | 
|  | 1636 |  | 
|  | 1637 | if (lockset_table[extra->prevstate.other] == NULL) { | 
|  | 1638 | VG_(sprintf)(buf, "shared %s, no locks", | 
|  | 1639 | extra->prevstate.state == Vge_Shar ? "RO" : "RW"); | 
|  | 1640 | break; | 
|  | 1641 | } | 
|  | 1642 |  | 
|  | 1643 | for(count = 0, ls = lockset_table[extra->prevstate.other]; ls != NULL; ls = ls->next) | 
|  | 1644 | count++; | 
|  | 1645 | msg = VG_(malloc)(25 + (120 * count)); | 
|  | 1646 |  | 
|  | 1647 | cp = msg; | 
|  | 1648 | cp += VG_(sprintf)(cp, "shared %s, locked by: ", | 
|  | 1649 | extra->prevstate.state == Vge_Shar ? "RO" : "RW"); | 
|  | 1650 | for(ls = lockset_table[extra->prevstate.other]; ls != NULL; ls = ls->next) | 
|  | 1651 | cp += VG_(sprintf)(cp, "%p%(y, ", ls->mutex->mutexp, ls->mutex->mutexp); | 
|  | 1652 | cp[-2] = '\0'; | 
|  | 1653 | break; | 
|  | 1654 | } | 
|  | 1655 | } | 
|  | 1656 |  | 
|  | 1657 | if (*msg) { | 
|  | 1658 | VG_(message)(Vg_UserMsg, "  Previous state: %s", msg); | 
|  | 1659 | if (msg != buf) | 
|  | 1660 | VG_(free)(msg); | 
|  | 1661 | } | 
|  | 1662 | pp_AddrInfo(err->addr, &extra->addrinfo); | 
|  | 1663 | break; | 
|  | 1664 |  | 
|  | 1665 | case MutexErr: | 
|  | 1666 | VG_(message)(Vg_UserMsg, "Mutex problem at %p%(y trying to %s at", | 
|  | 1667 | err->addr, err->addr, err->string ); | 
|  | 1668 | pp_ExeContext(); | 
|  | 1669 | if (extra->lasttouched) { | 
|  | 1670 | VG_(message)(Vg_UserMsg, "  last touched by thread %d at", extra->lasttid); | 
|  | 1671 | VG_(pp_ExeContext)(extra->lasttouched); | 
|  | 1672 | } | 
|  | 1673 | pp_AddrInfo(err->addr, &extra->addrinfo); | 
|  | 1674 | break; | 
|  | 1675 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1676 | } | 
|  | 1677 |  | 
|  | 1678 |  | 
|  | 1679 | Bool SK_(recognised_suppression) ( Char* name, SuppKind *skind ) | 
|  | 1680 | { | 
|  | 1681 | if (0 == VG_(strcmp)(name, "Eraser")) { | 
|  | 1682 | *skind = EraserSupp; | 
|  | 1683 | return True; | 
|  | 1684 | } else { | 
|  | 1685 | return False; | 
|  | 1686 | } | 
|  | 1687 | } | 
|  | 1688 |  | 
|  | 1689 |  | 
|  | 1690 | Bool SK_(read_extra_suppression_info) ( Int fd, Char* buf, | 
|  | 1691 | Int nBuf, SkinSupp* s ) | 
|  | 1692 | { | 
|  | 1693 | /* do nothing -- no extra suppression info present.  Return True to | 
|  | 1694 | indicate nothing bad happened. */ | 
|  | 1695 | return True; | 
|  | 1696 | } | 
|  | 1697 |  | 
|  | 1698 |  | 
|  | 1699 | Bool SK_(error_matches_suppression)(SkinError* err, SkinSupp* su) | 
|  | 1700 | { | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1701 | sk_assert( su->skind == EraserSupp); | 
|  | 1702 | sk_assert(err->ekind == EraserErr); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1703 | return True; | 
|  | 1704 | } | 
|  | 1705 |  | 
|  | 1706 |  | 
|  | 1707 | // SSS: copying mutex's pointer... is that ok?  Could they get deallocated? | 
|  | 1708 | // (does that make sense, deallocating a mutex?) | 
|  | 1709 | static void eraser_post_mutex_lock(ThreadId tid, void* void_mutex) | 
|  | 1710 | { | 
|  | 1711 | Int i = 1; | 
|  | 1712 | LockSet*  new_node; | 
|  | 1713 | LockSet*  p; | 
|  | 1714 | LockSet** q; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 1715 | hg_mutex_t *mutex = get_mutex(void_mutex); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1716 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1717 | set_mutex_state(mutex, MxLocked, tid, VG_(get_ThreadState)(tid)); | 
|  | 1718 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1719 | #  if DEBUG_LOCKS | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 1720 | VG_(printf)("lock  (%u, %x)\n", tid, mutex->mutexp); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1721 | #  endif | 
|  | 1722 |  | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1723 | sk_assert(tid < VG_N_THREADS && | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1724 | thread_locks[tid] < M_LOCKSET_TABLE); | 
|  | 1725 | /* VG_(printf)("LOCK: held %d, new %p\n", thread_locks[tid], mutex); */ | 
|  | 1726 | #  if LOCKSET_SANITY > 1 | 
|  | 1727 | sanity_check_locksets("eraser_post_mutex_lock-IN"); | 
|  | 1728 | #  endif | 
|  | 1729 |  | 
|  | 1730 | while (True) { | 
|  | 1731 | if (i == M_LOCKSET_TABLE) | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1732 | VG_(skin_panic)("lockset table full -- increase M_LOCKSET_TABLE"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1733 |  | 
|  | 1734 | /* the lockset didn't already exist */ | 
|  | 1735 | if (i == n_lockset_table) { | 
|  | 1736 |  | 
|  | 1737 | p = lockset_table[thread_locks[tid]]; | 
|  | 1738 | q = &lockset_table[i]; | 
|  | 1739 |  | 
|  | 1740 | /* copy the thread's lockset, creating a new list */ | 
|  | 1741 | while (p != NULL) { | 
|  | 1742 | new_node = VG_(malloc)(sizeof(LockSet)); | 
|  | 1743 | new_node->mutex = p->mutex; | 
|  | 1744 | *q = new_node; | 
|  | 1745 | q = &((*q)->next); | 
|  | 1746 | p = p->next; | 
|  | 1747 | } | 
|  | 1748 | (*q) = NULL; | 
|  | 1749 |  | 
|  | 1750 | /* find spot for the new mutex in the new list */ | 
|  | 1751 | p = lockset_table[i]; | 
|  | 1752 | q = &lockset_table[i]; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 1753 | while (NULL != p && mutex_cmp(mutex, p->mutex) > 0) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1754 | p = p->next; | 
|  | 1755 | q = &((*q)->next); | 
|  | 1756 | } | 
|  | 1757 |  | 
|  | 1758 | /* insert new mutex in new list */ | 
|  | 1759 | new_node = VG_(malloc)(sizeof(LockSet)); | 
|  | 1760 | new_node->mutex = mutex; | 
|  | 1761 | new_node->next = p; | 
|  | 1762 | (*q) = new_node; | 
|  | 1763 |  | 
|  | 1764 | p = lockset_table[i]; | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1765 | sk_assert(i == n_lockset_table); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1766 | n_lockset_table++; | 
|  | 1767 |  | 
|  | 1768 | #        if DEBUG_NEW_LOCKSETS | 
|  | 1769 | VG_(printf)("new lockset vector (%d): ", i); | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1770 | print_LockSet("newvec", p); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1771 | #        endif | 
|  | 1772 |  | 
|  | 1773 | goto done; | 
|  | 1774 |  | 
|  | 1775 | } else { | 
|  | 1776 | /* If this succeeds, the required vector (with the new mutex added) | 
|  | 1777 | * already exists in the table at position i.  Otherwise, keep | 
|  | 1778 | * looking. */ | 
|  | 1779 | if (weird_LockSet_equals(lockset_table[thread_locks[tid]], | 
|  | 1780 | lockset_table[i], mutex)) { | 
|  | 1781 | goto done; | 
|  | 1782 | } | 
|  | 1783 | } | 
|  | 1784 | /* if we get to here, table lockset didn't match the new thread | 
|  | 1785 | * lockset, so keep looking */ | 
|  | 1786 | i++; | 
|  | 1787 | } | 
|  | 1788 |  | 
|  | 1789 | done: | 
|  | 1790 | /* Update the thread's lock vector */ | 
|  | 1791 | thread_locks[tid] = i; | 
|  | 1792 | #  if DEBUG_LOCKS | 
|  | 1793 | VG_(printf)("tid %u now has lockset %d\n", tid, i); | 
|  | 1794 | #  endif | 
|  | 1795 |  | 
|  | 1796 | #  if LOCKSET_SANITY > 1 | 
|  | 1797 | sanity_check_locksets("eraser_post_mutex_lock-OUT"); | 
|  | 1798 | #  endif | 
|  | 1799 |  | 
|  | 1800 | } | 
|  | 1801 |  | 
|  | 1802 |  | 
|  | 1803 | static void eraser_post_mutex_unlock(ThreadId tid, void* void_mutex) | 
|  | 1804 | { | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1805 | static const Bool debug = False; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1806 | Int i = 0; | 
| sewardj | 274c601 | 2002-10-22 04:54:55 +0000 | [diff] [blame] | 1807 | hg_mutex_t *mutex = get_mutex(void_mutex); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1808 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1809 | set_mutex_state(mutex, MxUnlocked, tid, VG_(get_ThreadState)(tid)); | 
|  | 1810 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1811 | if (debug || DEBUG_LOCKS) | 
|  | 1812 | VG_(printf)("unlock(%u, %p%(y)\n", tid, mutex->mutexp, mutex->mutexp); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1813 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1814 | if (debug || LOCKSET_SANITY > 1) | 
|  | 1815 | sanity_check_locksets("eraser_post_mutex_unlock-IN"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1816 |  | 
|  | 1817 | // find the lockset that is the current one minus tid, change thread to use | 
|  | 1818 | // that index. | 
|  | 1819 |  | 
|  | 1820 | while (True) { | 
|  | 1821 |  | 
|  | 1822 | if (i == n_lockset_table) { | 
|  | 1823 | /* We can't find a suitable pre-made set, so we'll have to | 
|  | 1824 | make one. */ | 
|  | 1825 | i = remove ( thread_locks[tid], mutex ); | 
|  | 1826 | break; | 
|  | 1827 | } | 
|  | 1828 |  | 
|  | 1829 | /* Args are in opposite order to call above, for reverse effect */ | 
|  | 1830 | if (weird_LockSet_equals( lockset_table[i], | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1831 | lockset_table[thread_locks[tid]], | 
|  | 1832 | mutex) ) { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1833 | /* found existing diminished set -- the best outcome. */ | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1834 | if (debug) | 
|  | 1835 | VG_(printf)("unlock: match found at %d\n", i); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1836 | break; | 
|  | 1837 | } | 
|  | 1838 |  | 
|  | 1839 | i++; | 
|  | 1840 | } | 
|  | 1841 |  | 
|  | 1842 | /* Update the thread's lock vector */ | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1843 | if (debug || DEBUG_LOCKS) | 
|  | 1844 | VG_(printf)("tid %u reverts from %d to lockset %d\n", | 
|  | 1845 | tid, thread_locks[tid], i); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1846 |  | 
|  | 1847 | thread_locks[tid] = i; | 
|  | 1848 |  | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1849 | if (debug || LOCKSET_SANITY > 1) | 
|  | 1850 | sanity_check_locksets("eraser_post_mutex_unlock-OUT"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1851 | } | 
|  | 1852 |  | 
|  | 1853 |  | 
|  | 1854 | /* --------------------------------------------------------------------- | 
|  | 1855 | Checking memory reads and writes | 
|  | 1856 | ------------------------------------------------------------------ */ | 
|  | 1857 |  | 
|  | 1858 | /* Behaviour on reads and writes: | 
|  | 1859 | * | 
|  | 1860 | *                      VIR          EXCL        SHAR        SH_MOD | 
|  | 1861 | * ---------------------------------------------------------------- | 
|  | 1862 | * rd/wr, 1st thread |  -            EXCL        -           - | 
|  | 1863 | * rd, new thread    |  -            SHAR        -           - | 
|  | 1864 | * wr, new thread    |  -            SH_MOD      -           - | 
|  | 1865 | * rd                |  error!       -           SHAR        SH_MOD | 
|  | 1866 | * wr                |  EXCL         -           SH_MOD      SH_MOD | 
|  | 1867 | * ---------------------------------------------------------------- | 
|  | 1868 | */ | 
|  | 1869 |  | 
|  | 1870 | #if 0 | 
|  | 1871 | static | 
|  | 1872 | void dump_around_a(Addr a) | 
|  | 1873 | { | 
|  | 1874 | UInt i; | 
|  | 1875 | shadow_word* sword; | 
|  | 1876 | VG_(printf)("NEARBY:\n"); | 
|  | 1877 | for (i = a - 12; i <= a + 12; i += 4) { | 
|  | 1878 | sword = get_sword_addr(i); | 
|  | 1879 | VG_(printf)("    %x -- tid: %u, state: %u\n", i, sword->other, sword->state); | 
|  | 1880 | } | 
|  | 1881 | } | 
|  | 1882 | #endif | 
|  | 1883 |  | 
|  | 1884 | /* Find which word the first and last bytes are in (by shifting out bottom 2 | 
|  | 1885 | * bits) then find the difference. */ | 
|  | 1886 | static __inline__ | 
|  | 1887 | Int compute_num_words_accessed(Addr a, UInt size) | 
|  | 1888 | { | 
|  | 1889 | Int x, y, n_words; | 
|  | 1890 | x =  a             >> 2; | 
|  | 1891 | y = (a + size - 1) >> 2; | 
|  | 1892 | n_words = y - x + 1; | 
|  | 1893 | return n_words; | 
|  | 1894 | } | 
|  | 1895 |  | 
|  | 1896 |  | 
|  | 1897 | #if DEBUG_ACCESSES | 
|  | 1898 | #define DEBUG_STATE(args...)   \ | 
|  | 1899 | VG_(printf)("(%u) ", size), \ | 
|  | 1900 | VG_(printf)(args) | 
|  | 1901 | #else | 
|  | 1902 | #define DEBUG_STATE(args...) | 
|  | 1903 | #endif | 
|  | 1904 |  | 
|  | 1905 |  | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1906 | static void eraser_mem_read(Addr a, UInt size, ThreadState *tst) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1907 | { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1908 | ThreadId tid; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1909 | shadow_word* sword; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1910 | Addr     end = a + 4*compute_num_words_accessed(a, size); | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1911 | shadow_word  prevstate; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1912 |  | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1913 | tid = (tst == NULL) ? VG_(get_current_tid)() : VG_(get_tid_from_ThreadState)(tst); | 
|  | 1914 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1915 | for ( ; a < end; a += 4) { | 
|  | 1916 |  | 
|  | 1917 | sword = get_sword_addr(a); | 
|  | 1918 | if (sword == SEC_MAP_ACCESS) { | 
|  | 1919 | VG_(printf)("read distinguished 2ndary map! 0x%x\n", a); | 
|  | 1920 | continue; | 
|  | 1921 | } | 
|  | 1922 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1923 | prevstate = *sword; | 
|  | 1924 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1925 | switch (sword->state) { | 
|  | 1926 |  | 
|  | 1927 | /* This looks like reading of unitialised memory, may be legit.  Eg. | 
|  | 1928 | * calloc() zeroes its values, so untouched memory may actually be | 
|  | 1929 | * initialised.   Leave that stuff to Valgrind.  */ | 
|  | 1930 | case Vge_Virgin: | 
|  | 1931 | if (TID_INDICATING_NONVIRGIN == sword->other) { | 
|  | 1932 | DEBUG_STATE("Read  VIRGIN --> EXCL:   %8x, %u\n", a, tid); | 
|  | 1933 | #           if DEBUG_VIRGIN_READS | 
|  | 1934 | dump_around_a(a); | 
|  | 1935 | #           endif | 
|  | 1936 | } else { | 
|  | 1937 | DEBUG_STATE("Read  SPECIAL --> EXCL:  %8x, %u\n", a, tid); | 
|  | 1938 | } | 
|  | 1939 | sword->state = Vge_Excl; | 
|  | 1940 | sword->other = tid;       /* remember exclusive owner */ | 
|  | 1941 | break; | 
|  | 1942 |  | 
|  | 1943 | case Vge_Excl: | 
|  | 1944 | if (tid == sword->other) { | 
|  | 1945 | DEBUG_STATE("Read  EXCL:              %8x, %u\n", a, tid); | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1946 | } else if (TID_INDICATING_ALL == sword->other) { | 
|  | 1947 | DEBUG_STATE("Read  EXCL/ERR:          %8x, %u\n", a, tid); | 
|  | 1948 | } else { | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1949 | DEBUG_STATE("Read  EXCL(%u) --> SHAR:  %8x, %u\n", sword->other, a, tid); | 
|  | 1950 | sword->state = Vge_Shar; | 
|  | 1951 | sword->other = thread_locks[tid]; | 
|  | 1952 | #           if DEBUG_MEM_LOCKSET_CHANGES | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 1953 | print_LockSet("excl read locks", lockset_table[sword->other]); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1954 | #           endif | 
|  | 1955 | } | 
|  | 1956 | break; | 
|  | 1957 |  | 
|  | 1958 | case Vge_Shar: | 
|  | 1959 | DEBUG_STATE("Read  SHAR:              %8x, %u\n", a, tid); | 
|  | 1960 | sword->other = intersect(sword->other, thread_locks[tid]); | 
|  | 1961 | break; | 
|  | 1962 |  | 
|  | 1963 | case Vge_SharMod: | 
|  | 1964 | DEBUG_STATE("Read  SHAR_MOD:          %8x, %u\n", a, tid); | 
|  | 1965 | sword->other = intersect(sword->other, thread_locks[tid]); | 
|  | 1966 |  | 
|  | 1967 | if (lockset_table[sword->other] == NULL) { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1968 | record_eraser_error(tst, a, False /* !is_write */, prevstate); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1969 | n_eraser_warnings++; | 
|  | 1970 | } | 
|  | 1971 | break; | 
|  | 1972 |  | 
|  | 1973 | default: | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 1974 | VG_(skin_panic)("Unknown eraser state"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1975 | } | 
|  | 1976 | } | 
|  | 1977 | } | 
|  | 1978 |  | 
|  | 1979 |  | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1980 | static void eraser_mem_write(Addr a, UInt size, ThreadState *tst) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1981 | { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1982 | ThreadId tid; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1983 | shadow_word* sword; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1984 | Addr     end = a + 4*compute_num_words_accessed(a, size); | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1985 | shadow_word  prevstate; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1986 |  | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 1987 | tid = (tst == NULL) ? VG_(get_current_tid)() : VG_(get_tid_from_ThreadState)(tst); | 
|  | 1988 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1989 | for ( ; a < end; a += 4) { | 
|  | 1990 |  | 
|  | 1991 | sword = get_sword_addr(a); | 
|  | 1992 | if (sword == SEC_MAP_ACCESS) { | 
|  | 1993 | VG_(printf)("read distinguished 2ndary map! 0x%x\n", a); | 
|  | 1994 | continue; | 
|  | 1995 | } | 
|  | 1996 |  | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 1997 | prevstate = *sword; | 
|  | 1998 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 1999 | switch (sword->state) { | 
|  | 2000 | case Vge_Virgin: | 
|  | 2001 | if (TID_INDICATING_NONVIRGIN == sword->other) | 
|  | 2002 | DEBUG_STATE("Write VIRGIN --> EXCL:   %8x, %u\n", a, tid); | 
|  | 2003 | else | 
|  | 2004 | DEBUG_STATE("Write SPECIAL --> EXCL:  %8x, %u\n", a, tid); | 
|  | 2005 | sword->state = Vge_Excl; | 
|  | 2006 | sword->other = tid;       /* remember exclusive owner */ | 
|  | 2007 | break; | 
|  | 2008 |  | 
|  | 2009 | case Vge_Excl: | 
|  | 2010 | if (tid == sword->other) { | 
|  | 2011 | DEBUG_STATE("Write EXCL:              %8x, %u\n", a, tid); | 
|  | 2012 | break; | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 2013 | } else if (TID_INDICATING_ALL == sword->other) { | 
|  | 2014 | DEBUG_STATE("Write EXCL/ERR:          %8x, %u\n", a, tid); | 
|  | 2015 | break; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2016 | } else { | 
|  | 2017 | DEBUG_STATE("Write EXCL(%u) --> SHAR_MOD: %8x, %u\n", sword->other, a, tid); | 
|  | 2018 | sword->state = Vge_SharMod; | 
|  | 2019 | sword->other = thread_locks[tid]; | 
|  | 2020 | #           if DEBUG_MEM_LOCKSET_CHANGES | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 2021 | print_LockSet("excl write locks", lockset_table[sword->other]); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2022 | #           endif | 
|  | 2023 | goto SHARED_MODIFIED; | 
|  | 2024 | } | 
|  | 2025 |  | 
|  | 2026 | case Vge_Shar: | 
|  | 2027 | DEBUG_STATE("Write SHAR --> SHAR_MOD: %8x, %u\n", a, tid); | 
|  | 2028 | sword->state = Vge_SharMod; | 
|  | 2029 | sword->other = intersect(sword->other, thread_locks[tid]); | 
|  | 2030 | goto SHARED_MODIFIED; | 
|  | 2031 |  | 
|  | 2032 | case Vge_SharMod: | 
|  | 2033 | DEBUG_STATE("Write SHAR_MOD:          %8x, %u\n", a, tid); | 
|  | 2034 | sword->other = intersect(sword->other, thread_locks[tid]); | 
|  | 2035 | SHARED_MODIFIED: | 
|  | 2036 | if (lockset_table[sword->other] == NULL) { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 2037 | record_eraser_error(tst, a, True /* is_write */, prevstate); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2038 | n_eraser_warnings++; | 
|  | 2039 | } | 
|  | 2040 | break; | 
|  | 2041 |  | 
|  | 2042 | default: | 
| njn | e427a66 | 2002-10-02 11:08:25 +0000 | [diff] [blame] | 2043 | VG_(skin_panic)("Unknown eraser state"); | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2044 | } | 
|  | 2045 | } | 
|  | 2046 | } | 
|  | 2047 |  | 
|  | 2048 | #undef DEBUG_STATE | 
|  | 2049 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 2050 | static void eraser_mem_help_read_1(Addr a) | 
| sewardj | 7ab2aca | 2002-10-20 19:40:32 +0000 | [diff] [blame] | 2051 | { | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 2052 | eraser_mem_read(a, 1, NULL); | 
| sewardj | 7ab2aca | 2002-10-20 19:40:32 +0000 | [diff] [blame] | 2053 | } | 
|  | 2054 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 2055 | static void eraser_mem_help_read_2(Addr a) | 
|  | 2056 | { | 
|  | 2057 | eraser_mem_read(a, 2, NULL); | 
|  | 2058 | } | 
|  | 2059 |  | 
|  | 2060 | static void eraser_mem_help_read_4(Addr a) | 
|  | 2061 | { | 
|  | 2062 | eraser_mem_read(a, 4, NULL); | 
|  | 2063 | } | 
|  | 2064 |  | 
|  | 2065 | static void eraser_mem_help_read_N(Addr a, UInt size) | 
|  | 2066 | { | 
| sewardj | c26cc25 | 2002-10-23 21:58:55 +0000 | [diff] [blame] | 2067 | eraser_mem_read(a, size, NULL); | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 2068 | } | 
|  | 2069 |  | 
|  | 2070 | static void eraser_mem_help_write_1(Addr a, UInt val) | 
|  | 2071 | { | 
|  | 2072 | if (*(UChar *)a != val) | 
|  | 2073 | eraser_mem_write(a, 1, NULL); | 
|  | 2074 | } | 
|  | 2075 | static void eraser_mem_help_write_2(Addr a, UInt val) | 
|  | 2076 | { | 
|  | 2077 | if (*(UShort *)a != val) | 
|  | 2078 | eraser_mem_write(a, 2, NULL); | 
|  | 2079 | } | 
|  | 2080 | static void eraser_mem_help_write_4(Addr a, UInt val) | 
|  | 2081 | { | 
|  | 2082 | if (*(UInt *)a != val) | 
|  | 2083 | eraser_mem_write(a, 4, NULL); | 
|  | 2084 | } | 
|  | 2085 | static void eraser_mem_help_write_N(Addr a, UInt size) | 
| sewardj | 7ab2aca | 2002-10-20 19:40:32 +0000 | [diff] [blame] | 2086 | { | 
| sewardj | 0f81169 | 2002-10-22 04:59:26 +0000 | [diff] [blame] | 2087 | eraser_mem_write(a, size, NULL); | 
| sewardj | 7ab2aca | 2002-10-20 19:40:32 +0000 | [diff] [blame] | 2088 | } | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2089 |  | 
|  | 2090 | /*--------------------------------------------------------------------*/ | 
|  | 2091 | /*--- Setup                                                        ---*/ | 
|  | 2092 | /*--------------------------------------------------------------------*/ | 
|  | 2093 |  | 
| njn | d04b7c6 | 2002-10-03 14:05:52 +0000 | [diff] [blame] | 2094 | void SK_(pre_clo_init)(VgDetails* details, VgNeeds* needs, VgTrackEvents* track) | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2095 | { | 
|  | 2096 | Int i; | 
|  | 2097 |  | 
| sewardj | 4aa62ba | 2002-10-05 15:49:27 +0000 | [diff] [blame] | 2098 | details->name             = "Helgrind"; | 
| njn | d04b7c6 | 2002-10-03 14:05:52 +0000 | [diff] [blame] | 2099 | details->version          = NULL; | 
|  | 2100 | details->description      = "a data race detector"; | 
|  | 2101 | details->copyright_author = | 
|  | 2102 | "Copyright (C) 2002, and GNU GPL'd, by Nicholas Nethercote."; | 
|  | 2103 | details->bug_reports_to   = "njn25@cam.ac.uk"; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2104 |  | 
| sewardj | 5481f8f | 2002-10-20 19:43:47 +0000 | [diff] [blame] | 2105 | needs->core_errors           = True; | 
|  | 2106 | needs->skin_errors           = True; | 
|  | 2107 | needs->data_syms             = True; | 
| sewardj | 16748af | 2002-10-22 04:55:54 +0000 | [diff] [blame] | 2108 | needs->sizeof_shadow_block	= SHADOW_EXTRA; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2109 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2110 | track->new_mem_startup       = & eraser_new_mem_startup; | 
|  | 2111 | track->new_mem_heap          = & eraser_new_mem_heap; | 
|  | 2112 | track->new_mem_stack         = & make_writable; | 
|  | 2113 | track->new_mem_stack_aligned = & make_writable_aligned; | 
|  | 2114 | track->new_mem_stack_signal  = & make_writable; | 
|  | 2115 | track->new_mem_brk           = & make_writable; | 
| sewardj | 40f8ebe | 2002-10-23 21:46:13 +0000 | [diff] [blame] | 2116 | track->new_mem_mmap          = & eraser_new_mem_startup; | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2117 |  | 
|  | 2118 | track->copy_mem_heap         = & copy_address_range_state; | 
|  | 2119 | track->change_mem_mprotect   = & eraser_set_perms; | 
|  | 2120 |  | 
|  | 2121 | track->ban_mem_heap          = NULL; | 
|  | 2122 | track->ban_mem_stack         = NULL; | 
|  | 2123 |  | 
|  | 2124 | track->die_mem_heap          = NULL; | 
|  | 2125 | track->die_mem_stack         = NULL; | 
|  | 2126 | track->die_mem_stack_aligned = NULL; | 
|  | 2127 | track->die_mem_stack_signal  = NULL; | 
|  | 2128 | track->die_mem_brk           = NULL; | 
|  | 2129 | track->die_mem_munmap        = NULL; | 
|  | 2130 |  | 
|  | 2131 | track->pre_mem_read          = & eraser_pre_mem_read; | 
|  | 2132 | track->pre_mem_read_asciiz   = & eraser_pre_mem_read_asciiz; | 
|  | 2133 | track->pre_mem_write         = & eraser_pre_mem_write; | 
|  | 2134 | track->post_mem_write        = NULL; | 
|  | 2135 |  | 
|  | 2136 | track->post_mutex_lock       = & eraser_post_mutex_lock; | 
|  | 2137 | track->post_mutex_unlock     = & eraser_post_mutex_unlock; | 
|  | 2138 |  | 
| sewardj | a5b3aec | 2002-10-22 05:09:36 +0000 | [diff] [blame] | 2139 | VG_(register_compact_helper)((Addr) & eraser_mem_help_read_1); | 
|  | 2140 | VG_(register_compact_helper)((Addr) & eraser_mem_help_read_2); | 
|  | 2141 | VG_(register_compact_helper)((Addr) & eraser_mem_help_read_4); | 
|  | 2142 | VG_(register_noncompact_helper)((Addr) & eraser_mem_help_read_N); | 
|  | 2143 |  | 
|  | 2144 | VG_(register_compact_helper)((Addr) & eraser_mem_help_write_1); | 
|  | 2145 | VG_(register_compact_helper)((Addr) & eraser_mem_help_write_2); | 
|  | 2146 | VG_(register_compact_helper)((Addr) & eraser_mem_help_write_4); | 
|  | 2147 | VG_(register_noncompact_helper)((Addr) & eraser_mem_help_write_N); | 
| njn | d04b7c6 | 2002-10-03 14:05:52 +0000 | [diff] [blame] | 2148 |  | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2149 | /* Init lock table */ | 
|  | 2150 | for (i = 0; i < VG_N_THREADS; i++) | 
|  | 2151 | thread_locks[i] = 0 /* the empty lock set */; | 
|  | 2152 |  | 
|  | 2153 | lockset_table[0] = NULL; | 
|  | 2154 | for (i = 1; i < M_LOCKSET_TABLE; i++) | 
|  | 2155 | lockset_table[i] = NULL; | 
|  | 2156 |  | 
|  | 2157 | init_shadow_memory(); | 
|  | 2158 | } | 
|  | 2159 |  | 
|  | 2160 |  | 
|  | 2161 | void SK_(post_clo_init)(void) | 
|  | 2162 | { | 
|  | 2163 | } | 
|  | 2164 |  | 
|  | 2165 |  | 
|  | 2166 | void SK_(fini)(void) | 
|  | 2167 | { | 
|  | 2168 | #  if DEBUG_LOCK_TABLE | 
|  | 2169 | pp_all_LockSets(); | 
|  | 2170 | #  endif | 
|  | 2171 | #  if LOCKSET_SANITY | 
|  | 2172 | sanity_check_locksets("SK_(fini)"); | 
|  | 2173 | #  endif | 
|  | 2174 | VG_(message)(Vg_UserMsg, "%u possible data races found", n_eraser_warnings); | 
|  | 2175 | } | 
|  | 2176 |  | 
|  | 2177 | /*--------------------------------------------------------------------*/ | 
| njn25 | cac76cb | 2002-09-23 11:21:57 +0000 | [diff] [blame] | 2178 | /*--- end                                                hg_main.c ---*/ | 
| njn25 | e49d8e7 | 2002-09-23 09:36:25 +0000 | [diff] [blame] | 2179 | /*--------------------------------------------------------------------*/ |