blob: a47483a1e285707ad2e06e1ec38bf80e6a2259df [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
sewardjaf44c822007-11-25 14:01:38 +000030#include "pub_tool_basics.h" // Addr, SizeT
31#include "pub_tool_errormgr.h" // VG_(unique_error)()
32#include "pub_tool_libcassert.h" // tl_assert()
33#include "pub_tool_libcbase.h" // VG_(strlen)()
34#include "pub_tool_libcprint.h" // VG_(printf)()
35#include "pub_tool_machine.h"
36#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
sewardj85642922008-01-14 11:54:56 +000037#include "pub_tool_options.h" // VG_(clo_backtrace_size)
sewardjaf44c822007-11-25 14:01:38 +000038#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
39
40
41// Defines.
42
43#define DRD_N_THREADS VG_N_THREADS
44
45
46// Type definitions.
47
48typedef struct
49{
50 Segment* first;
51 Segment* last;
52 ThreadId vg_threadid;
53 PThreadId pt_threadid;
54 Addr stack_min_min;
55 Addr stack_min;
56 Addr stack_startup;
57 Addr stack_max;
58 char name[32];
59 /// Indicates whether the Valgrind core knows about this thread.
60 Bool vg_thread_exists;
61 /// Indicates whether there is an associated POSIX thread ID.
62 Bool posix_thread_exists;
63 /// If true, indicates that there is a corresponding POSIX thread ID and
64 /// a corresponding OS thread that is detached.
65 Bool detached_posix_thread;
sewardjaf44c822007-11-25 14:01:38 +000066} ThreadInfo;
67
68
69// Local functions.
70
71static void thread_append_segment(const DrdThreadId tid,
72 Segment* const sg);
73static void thread_update_danger_set(const DrdThreadId tid);
74
75
76// Local variables.
77
78static ULong s_context_switch_count;
79static ULong s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +000080static ULong s_update_danger_set_count;
81static ULong s_danger_set_bitmap_creation_count;
82static ULong s_danger_set_bitmap2_creation_count;
sewardj8b09d4f2007-12-04 21:27:18 +000083static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
84static DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +000085static ThreadInfo s_threadinfo[DRD_N_THREADS];
86static struct bitmap* s_danger_set;
bart26f73e12008-02-24 18:37:08 +000087static Bool s_trace_context_switches = False;
88static Bool s_trace_danger_set = False;
sewardjaf44c822007-11-25 14:01:38 +000089
90
91// Function definitions.
92
bart26f73e12008-02-24 18:37:08 +000093void thread_trace_context_switches(const Bool t)
94{
95 s_trace_context_switches = t;
96}
97
98void thread_trace_danger_set(const Bool t)
99{
100 s_trace_danger_set = t;
101}
102
sewardjaf44c822007-11-25 14:01:38 +0000103__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
104{
105 return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
106 && ! (s_threadinfo[tid].vg_thread_exists == False
107 && s_threadinfo[tid].posix_thread_exists == False
108 && s_threadinfo[tid].detached_posix_thread == False));
109}
110
111/**
112 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
113 * Valgrind's ThreadId does not yet exist.
114 **/
115DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
116{
117 int i;
118
119 if (tid == VG_INVALID_THREADID)
120 return DRD_INVALID_THREADID;
121
122 for (i = 1; i < DRD_N_THREADS; i++)
123 {
124 if (s_threadinfo[i].vg_thread_exists == True
125 && s_threadinfo[i].vg_threadid == tid)
126 {
127 return i;
128 }
129 }
130
131 return DRD_INVALID_THREADID;
132}
133
134static
135DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
136{
137 int i;
138
139 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
140
141 for (i = 1; i < DRD_N_THREADS; i++)
142 {
143 if (s_threadinfo[i].vg_thread_exists == False
144 && s_threadinfo[i].posix_thread_exists == False
145 && s_threadinfo[i].detached_posix_thread == False)
146 {
147 s_threadinfo[i].vg_thread_exists = True;
148 s_threadinfo[i].vg_threadid = tid;
149 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
150 s_threadinfo[i].stack_min_min = 0;
151 s_threadinfo[i].stack_min = 0;
152 s_threadinfo[i].stack_startup = 0;
153 s_threadinfo[i].stack_max = 0;
154 VG_(snprintf)(s_threadinfo[i].name, sizeof(s_threadinfo[i].name),
155 "thread %d", tid);
156 s_threadinfo[i].name[sizeof(s_threadinfo[i].name) - 1] = 0;
sewardjaf44c822007-11-25 14:01:38 +0000157 if (s_threadinfo[i].first != 0)
158 VG_(printf)("drd thread id = %d\n", i);
159 tl_assert(s_threadinfo[i].first == 0);
160 tl_assert(s_threadinfo[i].last == 0);
161 return i;
162 }
163 }
164
165 tl_assert(False);
166
167 return DRD_INVALID_THREADID;
168}
169
170DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
171{
172 int i;
173
174 tl_assert(tid != INVALID_POSIX_THREADID);
175
176 for (i = 1; i < DRD_N_THREADS; i++)
177 {
178 if (s_threadinfo[i].posix_thread_exists
179 && s_threadinfo[i].pt_threadid == tid)
180 {
181 return i;
182 }
183 }
184 return DRD_INVALID_THREADID;
185}
186
187ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
188{
189 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
190 return (s_threadinfo[tid].vg_thread_exists
191 ? s_threadinfo[tid].vg_threadid
192 : VG_INVALID_THREADID);
193}
194
bart26f73e12008-02-24 18:37:08 +0000195/** Sanity check of the doubly linked list of segments referenced by a
196 * ThreadInfo struct.
197 * @return True if sane, False if not.
sewardjaf44c822007-11-25 14:01:38 +0000198 */
199static Bool sane_ThreadInfo(const ThreadInfo* const ti)
200{
201 Segment* p;
202 for (p = ti->first; p; p = p->next) {
203 if (p->next && p->next->prev != p)
204 return False;
205 if (p->next == 0 && p != ti->last)
206 return False;
207 }
208 for (p = ti->last; p; p = p->prev) {
209 if (p->prev && p->prev->next != p)
210 return False;
211 if (p->prev == 0 && p != ti->first)
212 return False;
213 }
214 return True;
215}
216
217DrdThreadId thread_pre_create(const DrdThreadId creator,
218 const ThreadId vg_created)
219{
220 DrdThreadId created;
221
222 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
223 created = VgThreadIdToNewDrdThreadId(vg_created);
224 tl_assert(0 <= created && created < DRD_N_THREADS
225 && created != DRD_INVALID_THREADID);
226
227 tl_assert(s_threadinfo[created].first == 0);
228 tl_assert(s_threadinfo[created].last == 0);
229 thread_append_segment(created, sg_new(creator, created));
230
231 return created;
232}
233
bart26f73e12008-02-24 18:37:08 +0000234/** Allocate the first segment for a thread. Call this just after
235 * pthread_create().
sewardjaf44c822007-11-25 14:01:38 +0000236 */
237DrdThreadId thread_post_create(const ThreadId vg_created)
238{
239 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
240
241 tl_assert(0 <= created && created < DRD_N_THREADS
242 && created != DRD_INVALID_THREADID);
243
244 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
245 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
246 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
247 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
248 tl_assert(s_threadinfo[created].stack_max != 0);
249
250 return created;
251}
252
253/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
254/* and accesses this data structure from multiple threads without locking. */
255/* Any conflicting accesses in the range stack_startup..stack_max will be */
256/* ignored. */
257void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
258{
259#if 0
260 VG_(message)(Vg_DebugMsg, "thread_set_stack_startup: thread %d (%d)"
261 " stack 0x%x .. 0x%lx (size %d)",
262 s_threadinfo[tid].vg_threadid, tid,
263 stack_startup,
264 s_threadinfo[tid].stack_max,
265 s_threadinfo[tid].stack_max - stack_startup);
266#endif
267 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
268 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
269 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
270 s_threadinfo[tid].stack_startup = stack_startup;
271}
272
273Addr thread_get_stack_min(const DrdThreadId tid)
274{
275 tl_assert(0 <= tid && tid < DRD_N_THREADS
276 && tid != DRD_INVALID_THREADID);
277 return s_threadinfo[tid].stack_min;
278}
279
280void thread_set_stack_min(const DrdThreadId tid, const Addr stack_min)
281{
282#if 0
283 VG_(message)(Vg_DebugMsg, "thread %d (%d) stack_min = 0x%x"
284 " (size %d, max %d, delta %d)",
285 s_threadinfo[tid].vg_threadid, tid,
286 stack_min,
287 s_threadinfo[tid].stack_max - stack_min,
288 s_threadinfo[tid].stack_max - s_threadinfo[tid].stack_min_min,
289 s_threadinfo[tid].stack_min - stack_min);
290#endif
291 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
292 if (s_threadinfo[tid].stack_max)
293 {
294 s_threadinfo[tid].stack_min = stack_min;
295 if (stack_min < s_threadinfo[tid].stack_min_min)
296 {
297 s_threadinfo[tid].stack_min_min = stack_min;
298 }
299 tl_assert(s_threadinfo[tid].stack_min_min
300 <= s_threadinfo[tid].stack_min);
301 tl_assert(s_threadinfo[tid].stack_min < s_threadinfo[tid].stack_max);
302 }
303}
304
305DrdThreadId thread_lookup_stackaddr(const Addr a,
306 Addr* const stack_min,
307 Addr* const stack_max)
308{
309 unsigned i;
310 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
311 {
312 if (s_threadinfo[i].stack_min <= a && a <= s_threadinfo[i].stack_max)
313 {
314 *stack_min = s_threadinfo[i].stack_min;
315 *stack_max = s_threadinfo[i].stack_max;
316 return i;
317 }
318 }
319 return DRD_INVALID_THREADID;
320}
321
322/**
323 * Clean up thread-specific data structures. Call this just after
324 * pthread_join().
325 */
326void thread_delete(const DrdThreadId tid)
327{
328 Segment* sg;
329 Segment* sg_prev;
330
331 tl_assert(0 <= tid && tid < DRD_N_THREADS
332 && tid != DRD_INVALID_THREADID);
333 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
334 {
335 sg_prev = sg->prev;
336 sg_delete(sg);
337 }
338 s_threadinfo[tid].vg_thread_exists = False;
339 s_threadinfo[tid].posix_thread_exists = False;
340 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
341 s_threadinfo[tid].first = 0;
342 s_threadinfo[tid].last = 0;
343}
344
345/* Called after a thread performed its last memory access and before */
346/* thread_delete() is called. Note: thread_delete() is only called for */
347/* joinable threads, not for detached threads. */
348void thread_finished(const DrdThreadId tid)
349{
350 tl_assert(0 <= tid && tid < DRD_N_THREADS
351 && tid != DRD_INVALID_THREADID);
352
353 thread_stop_using_mem(s_threadinfo[tid].stack_min,
354 s_threadinfo[tid].stack_max);
355
356 s_threadinfo[tid].vg_thread_exists = False;
357
358 if (s_threadinfo[tid].detached_posix_thread)
359 {
360 /* Once a detached thread has finished, its stack is deallocated and */
361 /* should no longer be taken into account when computing the danger set*/
362 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
363
364 /* For a detached thread, calling pthread_exit() invalidates the */
365 /* POSIX thread ID associated with the detached thread. For joinable */
366 /* POSIX threads however, the POSIX thread ID remains live after the */
367 /* pthread_exit() call until pthread_join() is called. */
368 s_threadinfo[tid].posix_thread_exists = False;
369 }
370}
371
372void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
373{
374 tl_assert(0 <= tid && tid < DRD_N_THREADS
375 && tid != DRD_INVALID_THREADID);
376 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
377 tl_assert(ptid != INVALID_POSIX_THREADID);
378 s_threadinfo[tid].posix_thread_exists = True;
379 s_threadinfo[tid].pt_threadid = ptid;
380}
381
382Bool thread_get_joinable(const DrdThreadId tid)
383{
384 tl_assert(0 <= tid && tid < DRD_N_THREADS
385 && tid != DRD_INVALID_THREADID);
386 return ! s_threadinfo[tid].detached_posix_thread;
387}
388
389void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
390{
391 tl_assert(0 <= tid && tid < DRD_N_THREADS
392 && tid != DRD_INVALID_THREADID);
393 tl_assert(!! joinable == joinable);
394 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
395#if 0
396 VG_(message)(Vg_DebugMsg,
397 "thread_set_joinable(%d/%d, %s)",
398 tid,
399 s_threadinfo[tid].vg_threadid,
400 joinable ? "joinable" : "detached");
401#endif
402 s_threadinfo[tid].detached_posix_thread = ! joinable;
403}
404
405const char* thread_get_name(const DrdThreadId tid)
406{
407 tl_assert(0 <= tid && tid < DRD_N_THREADS
408 && tid != DRD_INVALID_THREADID);
409 return s_threadinfo[tid].name;
410}
411
412void thread_set_name(const DrdThreadId tid, const char* const name)
413{
414 tl_assert(0 <= tid && tid < DRD_N_THREADS
415 && tid != DRD_INVALID_THREADID);
416 VG_(strncpy)(s_threadinfo[tid].name, name,
417 sizeof(s_threadinfo[tid].name));
418 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
419}
420
421void thread_set_name_fmt(const DrdThreadId tid, const char* const fmt,
422 const UWord arg)
423{
424 tl_assert(0 <= tid && tid < DRD_N_THREADS
425 && tid != DRD_INVALID_THREADID);
426 VG_(snprintf)(s_threadinfo[tid].name, sizeof(s_threadinfo[tid].name),
427 fmt, arg);
428 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
429}
sewardj8b09d4f2007-12-04 21:27:18 +0000430
sewardjaf44c822007-11-25 14:01:38 +0000431DrdThreadId thread_get_running_tid(void)
432{
sewardj8b09d4f2007-12-04 21:27:18 +0000433 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
434 return s_drd_running_tid;
sewardjaf44c822007-11-25 14:01:38 +0000435}
436
sewardj8b09d4f2007-12-04 21:27:18 +0000437void thread_set_vg_running_tid(const ThreadId vg_tid)
sewardjaf44c822007-11-25 14:01:38 +0000438{
tom7c1a19a2008-01-02 10:13:04 +0000439 tl_assert(vg_tid != VG_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000440
441 if (vg_tid != s_vg_running_tid)
442 {
443 thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
444 }
445
446 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
447 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
448}
449
450void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
451{
sewardj8b09d4f2007-12-04 21:27:18 +0000452 tl_assert(vg_tid != VG_INVALID_THREADID);
453 tl_assert(drd_tid != DRD_INVALID_THREADID);
454
455 if (vg_tid != s_vg_running_tid)
456 {
bart26f73e12008-02-24 18:37:08 +0000457 if (s_trace_context_switches
458 && s_drd_running_tid != DRD_INVALID_THREADID)
459 {
460 VG_(message)(Vg_DebugMsg,
461 "Context switch from thread %d to thread %d",
462 s_drd_running_tid, drd_tid);
463 }
sewardj8b09d4f2007-12-04 21:27:18 +0000464 s_vg_running_tid = vg_tid;
465 s_drd_running_tid = drd_tid;
466 thread_update_danger_set(drd_tid);
467 s_context_switch_count++;
468 }
469
470 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
471 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000472}
473
474/**
475 * Return a pointer to the latest segment for the specified thread.
476 */
477Segment* thread_get_segment(const DrdThreadId tid)
478{
479 tl_assert(0 <= tid && tid < DRD_N_THREADS
480 && tid != DRD_INVALID_THREADID);
481 if (s_threadinfo[tid].last == 0)
482 {
483 VG_(message)(Vg_DebugMsg, "threadid = %d", tid);
484 thread_print_all();
485 }
486 tl_assert(s_threadinfo[tid].last);
487 return s_threadinfo[tid].last;
488}
489
bart26f73e12008-02-24 18:37:08 +0000490/** Append a new segment at the end of the segment list.
sewardjaf44c822007-11-25 14:01:38 +0000491 */
bart26f73e12008-02-24 18:37:08 +0000492static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000493{
494 tl_assert(0 <= tid && tid < DRD_N_THREADS
495 && tid != DRD_INVALID_THREADID);
496 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
497 sg->prev = s_threadinfo[tid].last;
498 sg->next = 0;
499 if (s_threadinfo[tid].last)
500 s_threadinfo[tid].last->next = sg;
501 s_threadinfo[tid].last = sg;
502 if (s_threadinfo[tid].first == 0)
503 s_threadinfo[tid].first = sg;
504 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
505}
506
bart26f73e12008-02-24 18:37:08 +0000507/** Remove a segment from the segment list of thread threadid, and free the
508 * associated memory.
sewardjaf44c822007-11-25 14:01:38 +0000509 */
bart26f73e12008-02-24 18:37:08 +0000510static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000511{
512 tl_assert(0 <= tid && tid < DRD_N_THREADS
513 && tid != DRD_INVALID_THREADID);
514 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart26f73e12008-02-24 18:37:08 +0000515
sewardjaf44c822007-11-25 14:01:38 +0000516 if (sg->prev)
517 sg->prev->next = sg->next;
518 if (sg->next)
519 sg->next->prev = sg->prev;
520 if (sg == s_threadinfo[tid].first)
521 s_threadinfo[tid].first = sg->next;
522 if (sg == s_threadinfo[tid].last)
523 s_threadinfo[tid].last = sg->prev;
524 sg_delete(sg);
525 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
526}
527
528VectorClock* thread_get_vc(const DrdThreadId tid)
529{
530 tl_assert(0 <= tid && tid < DRD_N_THREADS
531 && tid != DRD_INVALID_THREADID);
532 tl_assert(s_threadinfo[tid].last);
533 return &s_threadinfo[tid].last->vc;
534}
535
536/**
537 * Compute the minimum of all latest vector clocks of all threads
538 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
539 * @param vc pointer to a vectorclock, holds result upon return.
540 */
541static void thread_compute_minimum_vc(VectorClock* vc)
542{
bart2cf220a2008-03-01 07:35:52 +0000543 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000544 Bool first;
545 Segment* latest_sg;
546
547 first = True;
548 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
549 {
550 latest_sg = s_threadinfo[i].last;
551 if (latest_sg)
552 {
553 if (first)
bart26f73e12008-02-24 18:37:08 +0000554 vc_assign(vc, &latest_sg->vc);
sewardjaf44c822007-11-25 14:01:38 +0000555 else
556 vc_min(vc, &latest_sg->vc);
557 first = False;
558 }
559 }
560}
561
562static void thread_compute_maximum_vc(VectorClock* vc)
563{
bart2cf220a2008-03-01 07:35:52 +0000564 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000565 Bool first;
566 Segment* latest_sg;
567
568 first = True;
569 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
570 {
571 latest_sg = s_threadinfo[i].last;
572 if (latest_sg)
573 {
574 if (first)
bart26f73e12008-02-24 18:37:08 +0000575 vc_assign(vc, &latest_sg->vc);
sewardjaf44c822007-11-25 14:01:38 +0000576 else
577 vc_combine(vc, &latest_sg->vc);
578 first = False;
579 }
580 }
581}
582
583/**
bart5bd9f2d2008-03-03 20:31:58 +0000584 * Discard all segments that have a defined order against the latest vector
sewardjaf44c822007-11-25 14:01:38 +0000585 * clock of every thread -- these segments can no longer be involved in a
586 * data race.
587 */
588static void thread_discard_ordered_segments(void)
589{
bart2cf220a2008-03-01 07:35:52 +0000590 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000591 VectorClock thread_vc_min;
sewardjaf44c822007-11-25 14:01:38 +0000592
593 s_discard_ordered_segments_count++;
594
595 vc_init(&thread_vc_min, 0, 0);
596 thread_compute_minimum_vc(&thread_vc_min);
597 if (sg_get_trace())
598 {
599 char msg[256];
600 VectorClock thread_vc_max;
601
602 vc_init(&thread_vc_max, 0, 0);
603 thread_compute_maximum_vc(&thread_vc_max);
604 VG_(snprintf)(msg, sizeof(msg),
605 "Discarding ordered segments -- min vc is ");
606 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
607 &thread_vc_min);
608 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
609 ", max vc is ");
610 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
611 &thread_vc_max);
612 VG_(message)(Vg_DebugMsg, "%s", msg);
613 vc_cleanup(&thread_vc_max);
614 }
615
616 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
617 {
618 Segment* sg;
619 Segment* sg_next;
620 for (sg = s_threadinfo[i].first;
621 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
622 sg = sg_next)
623 {
sewardjaf44c822007-11-25 14:01:38 +0000624 thread_discard_segment(i, sg);
625 }
626 }
627 vc_cleanup(&thread_vc_min);
628}
629
630/**
631 * Create a new segment for the specified thread, and report all data races
632 * of the most recent thread segment with other threads.
633 */
634void thread_new_segment(const DrdThreadId tid)
635{
sewardjaf44c822007-11-25 14:01:38 +0000636 Segment* sg;
637
638 tl_assert(0 <= tid && tid < DRD_N_THREADS
639 && tid != DRD_INVALID_THREADID);
640
sewardjaf44c822007-11-25 14:01:38 +0000641 sg = sg_new(tid, tid);
642 thread_append_segment(tid, sg);
643
644 thread_discard_ordered_segments();
bart26f73e12008-02-24 18:37:08 +0000645
646 if (tid == s_drd_running_tid)
647 {
648 /* Every change in the vector clock of the current thread may cause */
649 /* segments that were previously ordered to this thread to become */
650 /* unordered. Hence, recalculate the danger set if the vector clock */
651 /* of the current thread is updated. */
652 thread_update_danger_set(tid);
653 }
sewardjaf44c822007-11-25 14:01:38 +0000654}
655
bart26f73e12008-02-24 18:37:08 +0000656/** Call this function after thread 'joiner' joined thread 'joinee'. */
sewardjaf44c822007-11-25 14:01:38 +0000657void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
658{
659 tl_assert(joiner != joinee);
660 tl_assert(0 <= joiner && joiner < DRD_N_THREADS
661 && joiner != DRD_INVALID_THREADID);
662 tl_assert(0 <= joinee && joinee < DRD_N_THREADS
663 && joinee != DRD_INVALID_THREADID);
664 tl_assert(s_threadinfo[joiner].last);
665 tl_assert(s_threadinfo[joinee].last);
666 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
667 thread_discard_ordered_segments();
668
sewardj8b09d4f2007-12-04 21:27:18 +0000669 if (joiner == s_drd_running_tid)
sewardjaf44c822007-11-25 14:01:38 +0000670 {
671 thread_update_danger_set(joiner);
672 }
673}
674
bart26f73e12008-02-24 18:37:08 +0000675/** Call this function after thread 'tid' had to wait because of thread
676 * synchronization until the memory accesses in the segment with vector clock
677 * 'vc' finished.
678 */
sewardjaf44c822007-11-25 14:01:38 +0000679void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
680{
681 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
682 tl_assert(s_threadinfo[tid].last);
683 tl_assert(vc);
684 vc_combine(&s_threadinfo[tid].last->vc, vc);
685 thread_discard_ordered_segments();
686}
687
bart26f73e12008-02-24 18:37:08 +0000688/** Call this function whenever a thread is no longer using the memory
689 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
690 * increase.
691 */
sewardjaf44c822007-11-25 14:01:38 +0000692void thread_stop_using_mem(const Addr a1, const Addr a2)
693{
694 DrdThreadId other_user = DRD_INVALID_THREADID;
695
bart26f73e12008-02-24 18:37:08 +0000696 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
sewardjaf44c822007-11-25 14:01:38 +0000697
698 unsigned i;
699 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
700 {
701 Segment* p;
702 for (p = s_threadinfo[i].first; p; p = p->next)
703 {
704 if (other_user == DRD_INVALID_THREADID
sewardj8b09d4f2007-12-04 21:27:18 +0000705 && i != s_drd_running_tid
sewardjaf44c822007-11-25 14:01:38 +0000706 && bm_has_any_access(p->bm, a1, a2))
707 {
708 other_user = i;
709 }
710 bm_clear(p->bm, a1, a2);
711 }
712 }
713
bart26f73e12008-02-24 18:37:08 +0000714 /* If any other thread had accessed memory in [ a1, a2 [, update the */
sewardjaf44c822007-11-25 14:01:38 +0000715 /* danger set. */
716 if (other_user != DRD_INVALID_THREADID
717 && bm_has_any_access(s_danger_set, a1, a2))
718 {
sewardjaf44c822007-11-25 14:01:38 +0000719 thread_update_danger_set(thread_get_running_tid());
720 }
721}
722
sewardjaf44c822007-11-25 14:01:38 +0000723void thread_print_all(void)
724{
725 unsigned i;
726 Segment* p;
727
728 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
729 {
730 if (s_threadinfo[i].first)
731 {
732 VG_(printf)("**************\n"
733 "* thread %3d (%d/%d/%d/0x%x/%d/%s) *\n"
734 "**************\n",
735 i,
736 s_threadinfo[i].vg_thread_exists,
737 s_threadinfo[i].vg_threadid,
738 s_threadinfo[i].posix_thread_exists,
739 s_threadinfo[i].pt_threadid,
740 s_threadinfo[i].detached_posix_thread,
741 s_threadinfo[i].name);
742 for (p = s_threadinfo[i].first; p; p = p->next)
743 {
744 sg_print(p);
745 }
746 }
747 }
748}
749
750static void show_call_stack(const DrdThreadId tid,
751 const Char* const msg,
752 ExeContext* const callstack)
753{
754 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
755
756 VG_(message)(Vg_UserMsg,
757 "%s (%s)",
758 msg,
759 thread_get_name(tid));
760
761 if (vg_tid != VG_INVALID_THREADID)
762 {
763 if (callstack)
764 {
765 VG_(pp_ExeContext)(callstack);
766 }
767 else
768 {
769 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
770 }
771 }
772 else
773 {
774 VG_(message)(Vg_UserMsg,
775 " (thread finished, call stack no longer available)");
776 }
777}
778
sewardjaf44c822007-11-25 14:01:38 +0000779static void
780thread_report_conflicting_segments_segment(const DrdThreadId tid,
781 const Addr addr,
782 const SizeT size,
783 const BmAccessTypeT access_type,
784 const Segment* const p)
785{
786 unsigned i;
787
788 tl_assert(0 <= tid && tid < DRD_N_THREADS
789 && tid != DRD_INVALID_THREADID);
790 tl_assert(p);
791
792 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
793 {
794 if (i != tid)
795 {
796 Segment* q;
797 for (q = s_threadinfo[i].last; q; q = q->prev)
798 {
799 // Since q iterates over the segments of thread i in order of
800 // decreasing vector clocks, if q->vc <= p->vc, then
801 // q->next->vc <= p->vc will also hold. Hence, break out of the
802 // loop once this condition is met.
803 if (vc_lte(&q->vc, &p->vc))
804 break;
805 if (! vc_lte(&p->vc, &q->vc))
806 {
807 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
808 {
809 tl_assert(q->stacktrace);
810 show_call_stack(i, "Other segment start",
811 q->stacktrace);
812 show_call_stack(i, "Other segment end",
813 q->next ? q->next->stacktrace : 0);
814 }
815 }
816 }
817 }
818 }
819}
820
821void thread_report_conflicting_segments(const DrdThreadId tid,
822 const Addr addr,
823 const SizeT size,
824 const BmAccessTypeT access_type)
825{
826 Segment* p;
827
828 tl_assert(0 <= tid && tid < DRD_N_THREADS
829 && tid != DRD_INVALID_THREADID);
830
831 for (p = s_threadinfo[tid].first; p; p = p->next)
832 {
833 if (bm_has(p->bm, addr, addr + size, access_type))
834 {
835 thread_report_conflicting_segments_segment(tid, addr, size,
836 access_type, p);
837 }
838 }
839}
sewardjaf44c822007-11-25 14:01:38 +0000840
bart26f73e12008-02-24 18:37:08 +0000841/** Compute a bitmap that represents the union of all memory accesses of all
842 * segments that are unordered to the current segment of the thread tid.
sewardjaf44c822007-11-25 14:01:38 +0000843 */
844static void thread_update_danger_set(const DrdThreadId tid)
845{
846 Segment* p;
847
bart26f73e12008-02-24 18:37:08 +0000848 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000849 tl_assert(tid == s_drd_running_tid);
sewardjaf44c822007-11-25 14:01:38 +0000850
851 s_update_danger_set_count++;
852 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
853 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
854
sewardjaf44c822007-11-25 14:01:38 +0000855 if (s_danger_set)
856 {
857 bm_clear_all(s_danger_set);
858 }
859 else
860 {
861 s_danger_set = bm_new();
862 }
bart26f73e12008-02-24 18:37:08 +0000863
864 if (s_trace_danger_set)
865 {
866 char msg[256];
867
868 VG_(snprintf)(msg, sizeof(msg),
869 "computing danger set for thread %d with vc ",
870 tid);
871 vc_snprint(msg + VG_(strlen)(msg),
872 sizeof(msg) - VG_(strlen)(msg),
873 &s_threadinfo[tid].last->vc);
874 VG_(message)(Vg_DebugMsg, "%s", msg);
875 }
sewardjaf44c822007-11-25 14:01:38 +0000876
bart5bd9f2d2008-03-03 20:31:58 +0000877 p = s_threadinfo[tid].last;
sewardjaf44c822007-11-25 14:01:38 +0000878 {
879 unsigned j;
880
bart26f73e12008-02-24 18:37:08 +0000881 if (s_trace_danger_set)
882 {
883 char msg[256];
884
885 VG_(snprintf)(msg, sizeof(msg),
886 "danger set: thread [%d] at vc ",
887 tid);
888 vc_snprint(msg + VG_(strlen)(msg),
889 sizeof(msg) - VG_(strlen)(msg),
890 &p->vc);
891 VG_(message)(Vg_DebugMsg, "%s", msg);
892 }
893
sewardjaf44c822007-11-25 14:01:38 +0000894 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
895 {
896 if (IsValidDrdThreadId(j))
897 {
bart5bd9f2d2008-03-03 20:31:58 +0000898 const Segment* q;
899 for (q = s_threadinfo[j].last; q; q = q->prev)
sewardjaf44c822007-11-25 14:01:38 +0000900 if (j != tid && q != 0
901 && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
902 {
bart26f73e12008-02-24 18:37:08 +0000903 if (s_trace_danger_set)
904 {
905 char msg[256];
906 VG_(snprintf)(msg, sizeof(msg),
907 "danger set: [%d] merging segment ", j);
908 vc_snprint(msg + VG_(strlen)(msg),
909 sizeof(msg) - VG_(strlen)(msg),
910 &q->vc);
911 VG_(message)(Vg_DebugMsg, "%s", msg);
912 }
sewardjaf44c822007-11-25 14:01:38 +0000913 bm_merge2(s_danger_set, q->bm);
914 }
bart26f73e12008-02-24 18:37:08 +0000915 else
916 {
917 if (s_trace_danger_set)
918 {
919 char msg[256];
920 VG_(snprintf)(msg, sizeof(msg),
921 "danger set: [%d] ignoring segment ", j);
922 vc_snprint(msg + VG_(strlen)(msg),
923 sizeof(msg) - VG_(strlen)(msg),
924 &q->vc);
925 VG_(message)(Vg_DebugMsg, "%s", msg);
926 }
927 }
sewardjaf44c822007-11-25 14:01:38 +0000928 }
929 }
930
931 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
932 {
933 if (IsValidDrdThreadId(j))
934 {
935 // NPTL hack: don't report data races on sizeof(struct pthread)
936 // bytes at the top of the stack, since the NPTL functions access
937 // this data without locking.
938 if (s_threadinfo[j].stack_min != 0)
939 {
940 tl_assert(s_threadinfo[j].stack_startup != 0);
941 if (s_threadinfo[j].stack_min < s_threadinfo[j].stack_startup)
942 {
943 bm_clear(s_danger_set,
944 s_threadinfo[j].stack_min,
945 s_threadinfo[j].stack_startup);
946 }
947 }
948 }
949 }
950 }
951
952 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
953 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
954
bart26f73e12008-02-24 18:37:08 +0000955 if (0 && s_trace_danger_set)
956 {
957 VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
958 bm_print(s_danger_set);
959 VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
960 }
sewardjaf44c822007-11-25 14:01:38 +0000961}
962
963Bool thread_conflicting_access(const Addr a,
964 const SizeT size,
965 const BmAccessTypeT access_type)
966{
967 tl_assert(s_danger_set);
968 return (bm_has_conflict_with(s_danger_set, a, a + size, access_type)
969 && ! drd_is_suppressed(a, a + size));
970}
971
972ULong thread_get_context_switch_count(void)
973{
974 return s_context_switch_count;
975}
976
sewardjaf44c822007-11-25 14:01:38 +0000977ULong thread_get_discard_ordered_segments_count(void)
978{
979 return s_discard_ordered_segments_count;
980}
981
982ULong thread_get_update_danger_set_count(void)
983{
984 return s_update_danger_set_count;
985}
986
987ULong thread_get_danger_set_bitmap_creation_count(void)
988{
989 return s_danger_set_bitmap_creation_count;
990}
991
992ULong thread_get_danger_set_bitmap2_creation_count(void)
993{
994 return s_danger_set_bitmap2_creation_count;
995}
996
997/*
998 * Local variables:
999 * c-basic-offset: 3
1000 * End:
1001 */