blob: 38121a2575dc0cce1c80e1ebf961423fb402b729 [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
4 Copyright (C) 2006-2007 Bart Van Assche
5 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
sewardjaf44c822007-11-25 14:01:38 +000030#include "pub_core_options.h" // VG_(clo_backtrace_size)
31#include "pub_tool_basics.h" // Addr, SizeT
32#include "pub_tool_errormgr.h" // VG_(unique_error)()
33#include "pub_tool_libcassert.h" // tl_assert()
34#include "pub_tool_libcbase.h" // VG_(strlen)()
35#include "pub_tool_libcprint.h" // VG_(printf)()
36#include "pub_tool_machine.h"
37#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
38#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
39
40
41// Defines.
42
43#define DRD_N_THREADS VG_N_THREADS
44
45
46// Type definitions.
47
48typedef struct
49{
50 Segment* first;
51 Segment* last;
52 ThreadId vg_threadid;
53 PThreadId pt_threadid;
54 Addr stack_min_min;
55 Addr stack_min;
56 Addr stack_startup;
57 Addr stack_max;
58 char name[32];
59 /// Indicates whether the Valgrind core knows about this thread.
60 Bool vg_thread_exists;
61 /// Indicates whether there is an associated POSIX thread ID.
62 Bool posix_thread_exists;
63 /// If true, indicates that there is a corresponding POSIX thread ID and
64 /// a corresponding OS thread that is detached.
65 Bool detached_posix_thread;
66 Bool is_recording;
67} ThreadInfo;
68
69
70// Local functions.
71
72static void thread_append_segment(const DrdThreadId tid,
73 Segment* const sg);
74static void thread_update_danger_set(const DrdThreadId tid);
75
76
77// Local variables.
78
79static ULong s_context_switch_count;
80static ULong s_discard_ordered_segments_count;
81#ifdef OLD_RACE_DETECTION_ALGORITHM
82static ULong s_report_races_count;
83#endif
84static ULong s_update_danger_set_count;
85static ULong s_danger_set_bitmap_creation_count;
86static ULong s_danger_set_bitmap2_creation_count;
87static DrdThreadId s_running_tid = DRD_INVALID_THREADID;
88static ThreadInfo s_threadinfo[DRD_N_THREADS];
89static struct bitmap* s_danger_set;
90
91
92// Function definitions.
93
94__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
95{
96 return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
97 && ! (s_threadinfo[tid].vg_thread_exists == False
98 && s_threadinfo[tid].posix_thread_exists == False
99 && s_threadinfo[tid].detached_posix_thread == False));
100}
101
102/**
103 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
104 * Valgrind's ThreadId does not yet exist.
105 **/
106DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
107{
108 int i;
109
110 if (tid == VG_INVALID_THREADID)
111 return DRD_INVALID_THREADID;
112
113 for (i = 1; i < DRD_N_THREADS; i++)
114 {
115 if (s_threadinfo[i].vg_thread_exists == True
116 && s_threadinfo[i].vg_threadid == tid)
117 {
118 return i;
119 }
120 }
121
122 return DRD_INVALID_THREADID;
123}
124
125static
126DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
127{
128 int i;
129
130 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
131
132 for (i = 1; i < DRD_N_THREADS; i++)
133 {
134 if (s_threadinfo[i].vg_thread_exists == False
135 && s_threadinfo[i].posix_thread_exists == False
136 && s_threadinfo[i].detached_posix_thread == False)
137 {
138 s_threadinfo[i].vg_thread_exists = True;
139 s_threadinfo[i].vg_threadid = tid;
140 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
141 s_threadinfo[i].stack_min_min = 0;
142 s_threadinfo[i].stack_min = 0;
143 s_threadinfo[i].stack_startup = 0;
144 s_threadinfo[i].stack_max = 0;
145 VG_(snprintf)(s_threadinfo[i].name, sizeof(s_threadinfo[i].name),
146 "thread %d", tid);
147 s_threadinfo[i].name[sizeof(s_threadinfo[i].name) - 1] = 0;
148 s_threadinfo[i].is_recording = True;
149 if (s_threadinfo[i].first != 0)
150 VG_(printf)("drd thread id = %d\n", i);
151 tl_assert(s_threadinfo[i].first == 0);
152 tl_assert(s_threadinfo[i].last == 0);
153 return i;
154 }
155 }
156
157 tl_assert(False);
158
159 return DRD_INVALID_THREADID;
160}
161
162DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
163{
164 int i;
165
166 tl_assert(tid != INVALID_POSIX_THREADID);
167
168 for (i = 1; i < DRD_N_THREADS; i++)
169 {
170 if (s_threadinfo[i].posix_thread_exists
171 && s_threadinfo[i].pt_threadid == tid)
172 {
173 return i;
174 }
175 }
176 return DRD_INVALID_THREADID;
177}
178
179ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
180{
181 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
182 return (s_threadinfo[tid].vg_thread_exists
183 ? s_threadinfo[tid].vg_threadid
184 : VG_INVALID_THREADID);
185}
186
187/**
188 * Sanity check of the doubly linked list of segments referenced by a ThreadInfo struct.
189 * @return True if sane, False if not.
190 */
191static Bool sane_ThreadInfo(const ThreadInfo* const ti)
192{
193 Segment* p;
194 for (p = ti->first; p; p = p->next) {
195 if (p->next && p->next->prev != p)
196 return False;
197 if (p->next == 0 && p != ti->last)
198 return False;
199 }
200 for (p = ti->last; p; p = p->prev) {
201 if (p->prev && p->prev->next != p)
202 return False;
203 if (p->prev == 0 && p != ti->first)
204 return False;
205 }
206 return True;
207}
208
209DrdThreadId thread_pre_create(const DrdThreadId creator,
210 const ThreadId vg_created)
211{
212 DrdThreadId created;
213
214 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
215 created = VgThreadIdToNewDrdThreadId(vg_created);
216 tl_assert(0 <= created && created < DRD_N_THREADS
217 && created != DRD_INVALID_THREADID);
218
219 tl_assert(s_threadinfo[created].first == 0);
220 tl_assert(s_threadinfo[created].last == 0);
221 thread_append_segment(created, sg_new(creator, created));
222
223 return created;
224}
225
226/**
227 * Allocate the first segment for a thread. Call this just after
228 * pthread_create().
229 */
230DrdThreadId thread_post_create(const ThreadId vg_created)
231{
232 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
233
234 tl_assert(0 <= created && created < DRD_N_THREADS
235 && created != DRD_INVALID_THREADID);
236
237 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
238 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
239 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
240 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
241 tl_assert(s_threadinfo[created].stack_max != 0);
242
243 return created;
244}
245
246/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
247/* and accesses this data structure from multiple threads without locking. */
248/* Any conflicting accesses in the range stack_startup..stack_max will be */
249/* ignored. */
250void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
251{
252#if 0
253 VG_(message)(Vg_DebugMsg, "thread_set_stack_startup: thread %d (%d)"
254 " stack 0x%x .. 0x%lx (size %d)",
255 s_threadinfo[tid].vg_threadid, tid,
256 stack_startup,
257 s_threadinfo[tid].stack_max,
258 s_threadinfo[tid].stack_max - stack_startup);
259#endif
260 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
261 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
262 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
263 s_threadinfo[tid].stack_startup = stack_startup;
264}
265
266Addr thread_get_stack_min(const DrdThreadId tid)
267{
268 tl_assert(0 <= tid && tid < DRD_N_THREADS
269 && tid != DRD_INVALID_THREADID);
270 return s_threadinfo[tid].stack_min;
271}
272
273void thread_set_stack_min(const DrdThreadId tid, const Addr stack_min)
274{
275#if 0
276 VG_(message)(Vg_DebugMsg, "thread %d (%d) stack_min = 0x%x"
277 " (size %d, max %d, delta %d)",
278 s_threadinfo[tid].vg_threadid, tid,
279 stack_min,
280 s_threadinfo[tid].stack_max - stack_min,
281 s_threadinfo[tid].stack_max - s_threadinfo[tid].stack_min_min,
282 s_threadinfo[tid].stack_min - stack_min);
283#endif
284 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
285 if (s_threadinfo[tid].stack_max)
286 {
287 s_threadinfo[tid].stack_min = stack_min;
288 if (stack_min < s_threadinfo[tid].stack_min_min)
289 {
290 s_threadinfo[tid].stack_min_min = stack_min;
291 }
292 tl_assert(s_threadinfo[tid].stack_min_min
293 <= s_threadinfo[tid].stack_min);
294 tl_assert(s_threadinfo[tid].stack_min < s_threadinfo[tid].stack_max);
295 }
296}
297
298DrdThreadId thread_lookup_stackaddr(const Addr a,
299 Addr* const stack_min,
300 Addr* const stack_max)
301{
302 unsigned i;
303 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
304 {
305 if (s_threadinfo[i].stack_min <= a && a <= s_threadinfo[i].stack_max)
306 {
307 *stack_min = s_threadinfo[i].stack_min;
308 *stack_max = s_threadinfo[i].stack_max;
309 return i;
310 }
311 }
312 return DRD_INVALID_THREADID;
313}
314
315/**
316 * Clean up thread-specific data structures. Call this just after
317 * pthread_join().
318 */
319void thread_delete(const DrdThreadId tid)
320{
321 Segment* sg;
322 Segment* sg_prev;
323
324 tl_assert(0 <= tid && tid < DRD_N_THREADS
325 && tid != DRD_INVALID_THREADID);
326 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
327 {
328 sg_prev = sg->prev;
329 sg_delete(sg);
330 }
331 s_threadinfo[tid].vg_thread_exists = False;
332 s_threadinfo[tid].posix_thread_exists = False;
333 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
334 s_threadinfo[tid].first = 0;
335 s_threadinfo[tid].last = 0;
336}
337
338/* Called after a thread performed its last memory access and before */
339/* thread_delete() is called. Note: thread_delete() is only called for */
340/* joinable threads, not for detached threads. */
341void thread_finished(const DrdThreadId tid)
342{
343 tl_assert(0 <= tid && tid < DRD_N_THREADS
344 && tid != DRD_INVALID_THREADID);
345
346 thread_stop_using_mem(s_threadinfo[tid].stack_min,
347 s_threadinfo[tid].stack_max);
348
349 s_threadinfo[tid].vg_thread_exists = False;
350
351 if (s_threadinfo[tid].detached_posix_thread)
352 {
353 /* Once a detached thread has finished, its stack is deallocated and */
354 /* should no longer be taken into account when computing the danger set*/
355 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
356
357 /* For a detached thread, calling pthread_exit() invalidates the */
358 /* POSIX thread ID associated with the detached thread. For joinable */
359 /* POSIX threads however, the POSIX thread ID remains live after the */
360 /* pthread_exit() call until pthread_join() is called. */
361 s_threadinfo[tid].posix_thread_exists = False;
362 }
363}
364
365void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
366{
367 tl_assert(0 <= tid && tid < DRD_N_THREADS
368 && tid != DRD_INVALID_THREADID);
369 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
370 tl_assert(ptid != INVALID_POSIX_THREADID);
371 s_threadinfo[tid].posix_thread_exists = True;
372 s_threadinfo[tid].pt_threadid = ptid;
373}
374
375Bool thread_get_joinable(const DrdThreadId tid)
376{
377 tl_assert(0 <= tid && tid < DRD_N_THREADS
378 && tid != DRD_INVALID_THREADID);
379 return ! s_threadinfo[tid].detached_posix_thread;
380}
381
382void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
383{
384 tl_assert(0 <= tid && tid < DRD_N_THREADS
385 && tid != DRD_INVALID_THREADID);
386 tl_assert(!! joinable == joinable);
387 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
388#if 0
389 VG_(message)(Vg_DebugMsg,
390 "thread_set_joinable(%d/%d, %s)",
391 tid,
392 s_threadinfo[tid].vg_threadid,
393 joinable ? "joinable" : "detached");
394#endif
395 s_threadinfo[tid].detached_posix_thread = ! joinable;
396}
397
398const char* thread_get_name(const DrdThreadId tid)
399{
400 tl_assert(0 <= tid && tid < DRD_N_THREADS
401 && tid != DRD_INVALID_THREADID);
402 return s_threadinfo[tid].name;
403}
404
405void thread_set_name(const DrdThreadId tid, const char* const name)
406{
407 tl_assert(0 <= tid && tid < DRD_N_THREADS
408 && tid != DRD_INVALID_THREADID);
409 VG_(strncpy)(s_threadinfo[tid].name, name,
410 sizeof(s_threadinfo[tid].name));
411 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
412}
413
414void thread_set_name_fmt(const DrdThreadId tid, const char* const fmt,
415 const UWord arg)
416{
417 tl_assert(0 <= tid && tid < DRD_N_THREADS
418 && tid != DRD_INVALID_THREADID);
419 VG_(snprintf)(s_threadinfo[tid].name, sizeof(s_threadinfo[tid].name),
420 fmt, arg);
421 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
422}
423DrdThreadId thread_get_running_tid(void)
424{
425 tl_assert(s_running_tid != DRD_INVALID_THREADID);
426 return s_running_tid;
427}
428
429void thread_set_running_tid(const DrdThreadId tid)
430{
431 s_running_tid = tid;
432 thread_update_danger_set(tid);
433 s_context_switch_count++;
434}
435
436/**
437 * Return a pointer to the latest segment for the specified thread.
438 */
439Segment* thread_get_segment(const DrdThreadId tid)
440{
441 tl_assert(0 <= tid && tid < DRD_N_THREADS
442 && tid != DRD_INVALID_THREADID);
443 if (s_threadinfo[tid].last == 0)
444 {
445 VG_(message)(Vg_DebugMsg, "threadid = %d", tid);
446 thread_print_all();
447 }
448 tl_assert(s_threadinfo[tid].last);
449 return s_threadinfo[tid].last;
450}
451
452/**
453 * Insert a new segment at the end of the segment list.
454 */
455static void thread_append_segment(const DrdThreadId tid,
456 Segment* const sg)
457{
458 tl_assert(0 <= tid && tid < DRD_N_THREADS
459 && tid != DRD_INVALID_THREADID);
460 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
461 sg->prev = s_threadinfo[tid].last;
462 sg->next = 0;
463 if (s_threadinfo[tid].last)
464 s_threadinfo[tid].last->next = sg;
465 s_threadinfo[tid].last = sg;
466 if (s_threadinfo[tid].first == 0)
467 s_threadinfo[tid].first = sg;
468 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
469}
470
471/**
472 * Remove a segment from the segment list of thread threadid, and free the
473 * associated memory.
474 */
475static void thread_discard_segment(const DrdThreadId tid,
476 Segment* const sg)
477{
478 tl_assert(0 <= tid && tid < DRD_N_THREADS
479 && tid != DRD_INVALID_THREADID);
480 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
481 if (sg->prev)
482 sg->prev->next = sg->next;
483 if (sg->next)
484 sg->next->prev = sg->prev;
485 if (sg == s_threadinfo[tid].first)
486 s_threadinfo[tid].first = sg->next;
487 if (sg == s_threadinfo[tid].last)
488 s_threadinfo[tid].last = sg->prev;
489 sg_delete(sg);
490 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
491}
492
493VectorClock* thread_get_vc(const DrdThreadId tid)
494{
495 tl_assert(0 <= tid && tid < DRD_N_THREADS
496 && tid != DRD_INVALID_THREADID);
497 tl_assert(s_threadinfo[tid].last);
498 return &s_threadinfo[tid].last->vc;
499}
500
501/**
502 * Compute the minimum of all latest vector clocks of all threads
503 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
504 * @param vc pointer to a vectorclock, holds result upon return.
505 */
506static void thread_compute_minimum_vc(VectorClock* vc)
507{
508 int i;
509 Bool first;
510 Segment* latest_sg;
511
512 first = True;
513 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
514 {
515 latest_sg = s_threadinfo[i].last;
516 if (latest_sg)
517 {
518 if (first)
519 {
520 vc_cleanup(vc);
521 vc_copy(vc, &latest_sg->vc);
522 }
523 else
524 vc_min(vc, &latest_sg->vc);
525 first = False;
526 }
527 }
528}
529
530static void thread_compute_maximum_vc(VectorClock* vc)
531{
532 int i;
533 Bool first;
534 Segment* latest_sg;
535
536 first = True;
537 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
538 {
539 latest_sg = s_threadinfo[i].last;
540 if (latest_sg)
541 {
542 if (first)
543 {
544 vc_cleanup(vc);
545 vc_copy(vc, &latest_sg->vc);
546 }
547 else
548 vc_combine(vc, &latest_sg->vc);
549 first = False;
550 }
551 }
552}
553
554/**
555 * Discard all segments that have a defined ordered against the latest vector
556 * clock of every thread -- these segments can no longer be involved in a
557 * data race.
558 */
559static void thread_discard_ordered_segments(void)
560{
561 VectorClock thread_vc_min;
562 int i;
563
564 s_discard_ordered_segments_count++;
565
566 vc_init(&thread_vc_min, 0, 0);
567 thread_compute_minimum_vc(&thread_vc_min);
568 if (sg_get_trace())
569 {
570 char msg[256];
571 VectorClock thread_vc_max;
572
573 vc_init(&thread_vc_max, 0, 0);
574 thread_compute_maximum_vc(&thread_vc_max);
575 VG_(snprintf)(msg, sizeof(msg),
576 "Discarding ordered segments -- min vc is ");
577 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
578 &thread_vc_min);
579 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
580 ", max vc is ");
581 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
582 &thread_vc_max);
583 VG_(message)(Vg_DebugMsg, "%s", msg);
584 vc_cleanup(&thread_vc_max);
585 }
586
587 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
588 {
589 Segment* sg;
590 Segment* sg_next;
591 for (sg = s_threadinfo[i].first;
592 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
593 sg = sg_next)
594 {
595#if 0
596 VG_(printf)("Discarding a segment of thread %d: ", i);
597 vc_print(&sg->vc);
598 VG_(printf)("\n");
599#endif
600 thread_discard_segment(i, sg);
601 }
602 }
603 vc_cleanup(&thread_vc_min);
604}
605
606/**
607 * Create a new segment for the specified thread, and report all data races
608 * of the most recent thread segment with other threads.
609 */
610void thread_new_segment(const DrdThreadId tid)
611{
612 //static int s_calls_since_last_discard = 0;
613 Segment* sg;
614
615 tl_assert(0 <= tid && tid < DRD_N_THREADS
616 && tid != DRD_INVALID_THREADID);
617
618#ifdef OLD_RACE_DETECTION_ALGORITHM
619 if (s_threadinfo[tid].last)
620 {
621 thread_report_races_segment(tid, s_threadinfo[tid].last);
622 }
623#endif
624
625 sg = sg_new(tid, tid);
626 thread_append_segment(tid, sg);
627
628 thread_discard_ordered_segments();
629}
630
631void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
632{
633 tl_assert(joiner != joinee);
634 tl_assert(0 <= joiner && joiner < DRD_N_THREADS
635 && joiner != DRD_INVALID_THREADID);
636 tl_assert(0 <= joinee && joinee < DRD_N_THREADS
637 && joinee != DRD_INVALID_THREADID);
638 tl_assert(s_threadinfo[joiner].last);
639 tl_assert(s_threadinfo[joinee].last);
640 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
641 thread_discard_ordered_segments();
642
643 if (joiner == s_running_tid)
644 {
645 thread_update_danger_set(joiner);
646 }
647}
648
649void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
650{
651 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
652 tl_assert(s_threadinfo[tid].last);
653 tl_assert(vc);
654 vc_combine(&s_threadinfo[tid].last->vc, vc);
655 thread_discard_ordered_segments();
656}
657
658void thread_stop_using_mem(const Addr a1, const Addr a2)
659{
660 DrdThreadId other_user = DRD_INVALID_THREADID;
661
662 /* For all threads, mark the range [a,a+size[ as no longer in use. */
663
664 unsigned i;
665 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
666 {
667 Segment* p;
668 for (p = s_threadinfo[i].first; p; p = p->next)
669 {
670 if (other_user == DRD_INVALID_THREADID
671 && i != s_running_tid
672 && bm_has_any_access(p->bm, a1, a2))
673 {
674 other_user = i;
675 }
676 bm_clear(p->bm, a1, a2);
677 }
678 }
679
680 /* If any other thread had accessed memory in [a,a+size[, update the */
681 /* danger set. */
682 if (other_user != DRD_INVALID_THREADID
683 && bm_has_any_access(s_danger_set, a1, a2))
684 {
685#if 0
686 VG_(message)(Vg_DebugMsg,
687 "recalculating danger set because thread %d / %d stopped"
688 " using memory at 0x%x sz %d",
689 other_user,
690 s_threadinfo[other_user].vg_threadid,
691 a1,
692 a2 - a1);
693#endif
694 thread_update_danger_set(thread_get_running_tid());
695 }
696}
697
698void thread_start_recording(const DrdThreadId tid)
699{
700 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
701 tl_assert(! s_threadinfo[tid].is_recording);
702 s_threadinfo[tid].is_recording = True;
703}
704
705void thread_stop_recording(const DrdThreadId tid)
706{
707 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
708 tl_assert(s_threadinfo[tid].is_recording);
709 s_threadinfo[tid].is_recording = False;
710}
711
712Bool thread_is_recording(const DrdThreadId tid)
713{
714 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
715 return s_threadinfo[tid].is_recording;
716}
717
718void thread_print_all(void)
719{
720 unsigned i;
721 Segment* p;
722
723 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
724 {
725 if (s_threadinfo[i].first)
726 {
727 VG_(printf)("**************\n"
728 "* thread %3d (%d/%d/%d/0x%x/%d/%s) *\n"
729 "**************\n",
730 i,
731 s_threadinfo[i].vg_thread_exists,
732 s_threadinfo[i].vg_threadid,
733 s_threadinfo[i].posix_thread_exists,
734 s_threadinfo[i].pt_threadid,
735 s_threadinfo[i].detached_posix_thread,
736 s_threadinfo[i].name);
737 for (p = s_threadinfo[i].first; p; p = p->next)
738 {
739 sg_print(p);
740 }
741 }
742 }
743}
744
745static void show_call_stack(const DrdThreadId tid,
746 const Char* const msg,
747 ExeContext* const callstack)
748{
749 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
750
751 VG_(message)(Vg_UserMsg,
752 "%s (%s)",
753 msg,
754 thread_get_name(tid));
755
756 if (vg_tid != VG_INVALID_THREADID)
757 {
758 if (callstack)
759 {
760 VG_(pp_ExeContext)(callstack);
761 }
762 else
763 {
764 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
765 }
766 }
767 else
768 {
769 VG_(message)(Vg_UserMsg,
770 " (thread finished, call stack no longer available)");
771 }
772}
773
774#ifdef OLD_RACE_DETECTION_ALGORITHM
775void thread_report_races(const DrdThreadId threadid)
776{
777 Segment* p;
778
779 s_report_races_count++;
780
781 tl_assert(0 <= threadid && threadid < DRD_N_THREADS
782 && threadid != DRD_INVALID_THREADID);
783
784 for (p = s_threadinfo[threadid].first; p; p = p->next)
785 {
786 thread_report_races_segment(threadid, p);
787 }
788}
789
790/**
791 * Report all data races for segment p of thread threadid against other
792 * threads.
793 */
794void thread_report_races_segment(const DrdThreadId threadid,
795 Segment* const p)
796{
797 unsigned i;
798
799 tl_assert(0 <= threadid && threadid < DRD_N_THREADS
800 && threadid != DRD_INVALID_THREADID);
801 tl_assert(p);
802
803 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
804 {
805 if (i != threadid)
806 {
807 Segment* q;
808 for (q = s_threadinfo[i].last; q; q = q->prev)
809 {
810#if 0
811 char msg[256];
812 VG_(snprintf)(msg, sizeof(msg), "Examining thread %d (vc ", threadid);
813 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
814 &p->vc);
815 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
816 ") versus thread %d (vc ", i);
817 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
818 &q->vc);
819 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
820 ") %d %d",
821 vc_lte(&p->vc, &q->vc), vc_lte(&q->vc, &p->vc));
822 VG_(message)(Vg_DebugMsg, "%s", msg);
823#endif
824 // Since q iterates over the segments of thread i in order of
825 // decreasing vector clocks, if q->vc <= p->vc, then
826 // q->next->vc <= p->vc will also hold. Hence, break out of the
827 // loop once this condition is met.
828 if (vc_lte(&q->vc, &p->vc))
829 break;
830 if (! vc_lte(&p->vc, &q->vc))
831 {
832 if (bm_has_races(p->bm, q->bm))
833 {
834 VG_(message)(Vg_UserMsg, "----------------------------------------------------------------------");
835 tl_assert(p->stacktrace);
836 show_call_stack(threadid, "1st segment start",
837 p->stacktrace);
838 show_call_stack(threadid, "1st segment end",
839 p->next ? p->next->stacktrace : 0);
840 tl_assert(q->stacktrace);
841 show_call_stack(i, "2nd segment start",
842 q->stacktrace);
843 show_call_stack(i, "2nd segment end",
844 q->next ? q->next->stacktrace : 0);
845 bm_report_races(threadid, i, p->bm, q->bm);
846 }
847 }
848 }
849 }
850 }
851}
852
853/**
854 * Report all detected data races for all threads.
855 */
856void thread_report_all_races(void)
857{
858 unsigned i;
859
860 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
861 {
862 if (s_threadinfo[i].last)
863 {
864 thread_report_races(i);
865 }
866 }
867}
868#else
869static void
870thread_report_conflicting_segments_segment(const DrdThreadId tid,
871 const Addr addr,
872 const SizeT size,
873 const BmAccessTypeT access_type,
874 const Segment* const p)
875{
876 unsigned i;
877
878 tl_assert(0 <= tid && tid < DRD_N_THREADS
879 && tid != DRD_INVALID_THREADID);
880 tl_assert(p);
881
882 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
883 {
884 if (i != tid)
885 {
886 Segment* q;
887 for (q = s_threadinfo[i].last; q; q = q->prev)
888 {
889 // Since q iterates over the segments of thread i in order of
890 // decreasing vector clocks, if q->vc <= p->vc, then
891 // q->next->vc <= p->vc will also hold. Hence, break out of the
892 // loop once this condition is met.
893 if (vc_lte(&q->vc, &p->vc))
894 break;
895 if (! vc_lte(&p->vc, &q->vc))
896 {
897 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
898 {
899 tl_assert(q->stacktrace);
900 show_call_stack(i, "Other segment start",
901 q->stacktrace);
902 show_call_stack(i, "Other segment end",
903 q->next ? q->next->stacktrace : 0);
904 }
905 }
906 }
907 }
908 }
909}
910
911void thread_report_conflicting_segments(const DrdThreadId tid,
912 const Addr addr,
913 const SizeT size,
914 const BmAccessTypeT access_type)
915{
916 Segment* p;
917
918 tl_assert(0 <= tid && tid < DRD_N_THREADS
919 && tid != DRD_INVALID_THREADID);
920
921 for (p = s_threadinfo[tid].first; p; p = p->next)
922 {
923 if (bm_has(p->bm, addr, addr + size, access_type))
924 {
925 thread_report_conflicting_segments_segment(tid, addr, size,
926 access_type, p);
927 }
928 }
929}
930#endif
931
932/**
933 * Compute a bitmap that represents the union of all memory accesses of all
934 * segments that are unordered to the current segment of the thread tid.
935 */
936static void thread_update_danger_set(const DrdThreadId tid)
937{
938 Segment* p;
939
940 tl_assert(0 <= tid && tid < DRD_N_THREADS
941 && tid != DRD_INVALID_THREADID);
942 tl_assert(tid == s_running_tid);
943
944 s_update_danger_set_count++;
945 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
946 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
947
948#if 0
949 if (s_danger_set)
950 {
951 bm_delete(s_danger_set);
952 s_danger_set = 0;
953 }
954 s_danger_set = bm_new();
955#else
956 // Marginally faster than the above code.
957 if (s_danger_set)
958 {
959 bm_clear_all(s_danger_set);
960 }
961 else
962 {
963 s_danger_set = bm_new();
964 }
965#endif
966
967 for (p = s_threadinfo[tid].first; p; p = p->next)
968 {
969 unsigned j;
970
971 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
972 {
973 if (IsValidDrdThreadId(j))
974 {
975 const Segment* const q = s_threadinfo[j].last;
976 if (j != tid && q != 0
977 && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
978 {
979 bm_merge2(s_danger_set, q->bm);
980 }
981
982 }
983 }
984
985 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
986 {
987 if (IsValidDrdThreadId(j))
988 {
989 // NPTL hack: don't report data races on sizeof(struct pthread)
990 // bytes at the top of the stack, since the NPTL functions access
991 // this data without locking.
992 if (s_threadinfo[j].stack_min != 0)
993 {
994 tl_assert(s_threadinfo[j].stack_startup != 0);
995 if (s_threadinfo[j].stack_min < s_threadinfo[j].stack_startup)
996 {
997 bm_clear(s_danger_set,
998 s_threadinfo[j].stack_min,
999 s_threadinfo[j].stack_startup);
1000 }
1001 }
1002 }
1003 }
1004 }
1005
1006 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
1007 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
1008
1009#if 0
1010 VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
1011 bm_print(s_danger_set);
1012 VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
1013#endif
1014}
1015
1016Bool thread_conflicting_access(const Addr a,
1017 const SizeT size,
1018 const BmAccessTypeT access_type)
1019{
1020 tl_assert(s_danger_set);
1021 return (bm_has_conflict_with(s_danger_set, a, a + size, access_type)
1022 && ! drd_is_suppressed(a, a + size));
1023}
1024
1025ULong thread_get_context_switch_count(void)
1026{
1027 return s_context_switch_count;
1028}
1029
1030#ifdef OLD_RACE_DETECTION_ALGORITHM
1031ULong thread_get_report_races_count(void)
1032{
1033 return s_report_races_count;
1034}
1035#endif
1036
1037ULong thread_get_discard_ordered_segments_count(void)
1038{
1039 return s_discard_ordered_segments_count;
1040}
1041
1042ULong thread_get_update_danger_set_count(void)
1043{
1044 return s_update_danger_set_count;
1045}
1046
1047ULong thread_get_danger_set_bitmap_creation_count(void)
1048{
1049 return s_danger_set_bitmap_creation_count;
1050}
1051
1052ULong thread_get_danger_set_bitmap2_creation_count(void)
1053{
1054 return s_danger_set_bitmap2_creation_count;
1055}
1056
1057/*
1058 * Local variables:
1059 * c-basic-offset: 3
1060 * End:
1061 */