blob: 4da1f024f3c4dc6dd23c8180bff8f67fddccf987 [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
sewardjaf44c822007-11-25 14:01:38 +000030#include "pub_tool_basics.h" // Addr, SizeT
31#include "pub_tool_errormgr.h" // VG_(unique_error)()
32#include "pub_tool_libcassert.h" // tl_assert()
33#include "pub_tool_libcbase.h" // VG_(strlen)()
34#include "pub_tool_libcprint.h" // VG_(printf)()
35#include "pub_tool_machine.h"
36#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
sewardj85642922008-01-14 11:54:56 +000037#include "pub_tool_options.h" // VG_(clo_backtrace_size)
sewardjaf44c822007-11-25 14:01:38 +000038#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
39
40
41// Defines.
42
43#define DRD_N_THREADS VG_N_THREADS
44
45
46// Type definitions.
47
48typedef struct
49{
50 Segment* first;
51 Segment* last;
52 ThreadId vg_threadid;
53 PThreadId pt_threadid;
54 Addr stack_min_min;
55 Addr stack_min;
56 Addr stack_startup;
57 Addr stack_max;
58 char name[32];
59 /// Indicates whether the Valgrind core knows about this thread.
60 Bool vg_thread_exists;
61 /// Indicates whether there is an associated POSIX thread ID.
62 Bool posix_thread_exists;
63 /// If true, indicates that there is a corresponding POSIX thread ID and
64 /// a corresponding OS thread that is detached.
65 Bool detached_posix_thread;
66 Bool is_recording;
67} ThreadInfo;
68
69
70// Local functions.
71
72static void thread_append_segment(const DrdThreadId tid,
73 Segment* const sg);
74static void thread_update_danger_set(const DrdThreadId tid);
75
76
77// Local variables.
78
79static ULong s_context_switch_count;
80static ULong s_discard_ordered_segments_count;
81#ifdef OLD_RACE_DETECTION_ALGORITHM
82static ULong s_report_races_count;
83#endif
84static ULong s_update_danger_set_count;
85static ULong s_danger_set_bitmap_creation_count;
86static ULong s_danger_set_bitmap2_creation_count;
sewardj8b09d4f2007-12-04 21:27:18 +000087static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
88static DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +000089static ThreadInfo s_threadinfo[DRD_N_THREADS];
90static struct bitmap* s_danger_set;
91
92
93// Function definitions.
94
95__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
96{
97 return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
98 && ! (s_threadinfo[tid].vg_thread_exists == False
99 && s_threadinfo[tid].posix_thread_exists == False
100 && s_threadinfo[tid].detached_posix_thread == False));
101}
102
103/**
104 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
105 * Valgrind's ThreadId does not yet exist.
106 **/
107DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
108{
109 int i;
110
111 if (tid == VG_INVALID_THREADID)
112 return DRD_INVALID_THREADID;
113
114 for (i = 1; i < DRD_N_THREADS; i++)
115 {
116 if (s_threadinfo[i].vg_thread_exists == True
117 && s_threadinfo[i].vg_threadid == tid)
118 {
119 return i;
120 }
121 }
122
123 return DRD_INVALID_THREADID;
124}
125
126static
127DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
128{
129 int i;
130
131 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
132
133 for (i = 1; i < DRD_N_THREADS; i++)
134 {
135 if (s_threadinfo[i].vg_thread_exists == False
136 && s_threadinfo[i].posix_thread_exists == False
137 && s_threadinfo[i].detached_posix_thread == False)
138 {
139 s_threadinfo[i].vg_thread_exists = True;
140 s_threadinfo[i].vg_threadid = tid;
141 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
142 s_threadinfo[i].stack_min_min = 0;
143 s_threadinfo[i].stack_min = 0;
144 s_threadinfo[i].stack_startup = 0;
145 s_threadinfo[i].stack_max = 0;
146 VG_(snprintf)(s_threadinfo[i].name, sizeof(s_threadinfo[i].name),
147 "thread %d", tid);
148 s_threadinfo[i].name[sizeof(s_threadinfo[i].name) - 1] = 0;
149 s_threadinfo[i].is_recording = True;
150 if (s_threadinfo[i].first != 0)
151 VG_(printf)("drd thread id = %d\n", i);
152 tl_assert(s_threadinfo[i].first == 0);
153 tl_assert(s_threadinfo[i].last == 0);
154 return i;
155 }
156 }
157
158 tl_assert(False);
159
160 return DRD_INVALID_THREADID;
161}
162
163DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
164{
165 int i;
166
167 tl_assert(tid != INVALID_POSIX_THREADID);
168
169 for (i = 1; i < DRD_N_THREADS; i++)
170 {
171 if (s_threadinfo[i].posix_thread_exists
172 && s_threadinfo[i].pt_threadid == tid)
173 {
174 return i;
175 }
176 }
177 return DRD_INVALID_THREADID;
178}
179
180ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
181{
182 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
183 return (s_threadinfo[tid].vg_thread_exists
184 ? s_threadinfo[tid].vg_threadid
185 : VG_INVALID_THREADID);
186}
187
188/**
189 * Sanity check of the doubly linked list of segments referenced by a ThreadInfo struct.
190 * @return True if sane, False if not.
191 */
192static Bool sane_ThreadInfo(const ThreadInfo* const ti)
193{
194 Segment* p;
195 for (p = ti->first; p; p = p->next) {
196 if (p->next && p->next->prev != p)
197 return False;
198 if (p->next == 0 && p != ti->last)
199 return False;
200 }
201 for (p = ti->last; p; p = p->prev) {
202 if (p->prev && p->prev->next != p)
203 return False;
204 if (p->prev == 0 && p != ti->first)
205 return False;
206 }
207 return True;
208}
209
210DrdThreadId thread_pre_create(const DrdThreadId creator,
211 const ThreadId vg_created)
212{
213 DrdThreadId created;
214
215 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
216 created = VgThreadIdToNewDrdThreadId(vg_created);
217 tl_assert(0 <= created && created < DRD_N_THREADS
218 && created != DRD_INVALID_THREADID);
219
220 tl_assert(s_threadinfo[created].first == 0);
221 tl_assert(s_threadinfo[created].last == 0);
222 thread_append_segment(created, sg_new(creator, created));
223
224 return created;
225}
226
227/**
228 * Allocate the first segment for a thread. Call this just after
229 * pthread_create().
230 */
231DrdThreadId thread_post_create(const ThreadId vg_created)
232{
233 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
234
235 tl_assert(0 <= created && created < DRD_N_THREADS
236 && created != DRD_INVALID_THREADID);
237
238 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
239 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
240 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
241 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
242 tl_assert(s_threadinfo[created].stack_max != 0);
243
244 return created;
245}
246
247/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
248/* and accesses this data structure from multiple threads without locking. */
249/* Any conflicting accesses in the range stack_startup..stack_max will be */
250/* ignored. */
251void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
252{
253#if 0
254 VG_(message)(Vg_DebugMsg, "thread_set_stack_startup: thread %d (%d)"
255 " stack 0x%x .. 0x%lx (size %d)",
256 s_threadinfo[tid].vg_threadid, tid,
257 stack_startup,
258 s_threadinfo[tid].stack_max,
259 s_threadinfo[tid].stack_max - stack_startup);
260#endif
261 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
262 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
263 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
264 s_threadinfo[tid].stack_startup = stack_startup;
265}
266
267Addr thread_get_stack_min(const DrdThreadId tid)
268{
269 tl_assert(0 <= tid && tid < DRD_N_THREADS
270 && tid != DRD_INVALID_THREADID);
271 return s_threadinfo[tid].stack_min;
272}
273
274void thread_set_stack_min(const DrdThreadId tid, const Addr stack_min)
275{
276#if 0
277 VG_(message)(Vg_DebugMsg, "thread %d (%d) stack_min = 0x%x"
278 " (size %d, max %d, delta %d)",
279 s_threadinfo[tid].vg_threadid, tid,
280 stack_min,
281 s_threadinfo[tid].stack_max - stack_min,
282 s_threadinfo[tid].stack_max - s_threadinfo[tid].stack_min_min,
283 s_threadinfo[tid].stack_min - stack_min);
284#endif
285 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
286 if (s_threadinfo[tid].stack_max)
287 {
288 s_threadinfo[tid].stack_min = stack_min;
289 if (stack_min < s_threadinfo[tid].stack_min_min)
290 {
291 s_threadinfo[tid].stack_min_min = stack_min;
292 }
293 tl_assert(s_threadinfo[tid].stack_min_min
294 <= s_threadinfo[tid].stack_min);
295 tl_assert(s_threadinfo[tid].stack_min < s_threadinfo[tid].stack_max);
296 }
297}
298
299DrdThreadId thread_lookup_stackaddr(const Addr a,
300 Addr* const stack_min,
301 Addr* const stack_max)
302{
303 unsigned i;
304 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
305 {
306 if (s_threadinfo[i].stack_min <= a && a <= s_threadinfo[i].stack_max)
307 {
308 *stack_min = s_threadinfo[i].stack_min;
309 *stack_max = s_threadinfo[i].stack_max;
310 return i;
311 }
312 }
313 return DRD_INVALID_THREADID;
314}
315
316/**
317 * Clean up thread-specific data structures. Call this just after
318 * pthread_join().
319 */
320void thread_delete(const DrdThreadId tid)
321{
322 Segment* sg;
323 Segment* sg_prev;
324
325 tl_assert(0 <= tid && tid < DRD_N_THREADS
326 && tid != DRD_INVALID_THREADID);
327 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
328 {
329 sg_prev = sg->prev;
330 sg_delete(sg);
331 }
332 s_threadinfo[tid].vg_thread_exists = False;
333 s_threadinfo[tid].posix_thread_exists = False;
334 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
335 s_threadinfo[tid].first = 0;
336 s_threadinfo[tid].last = 0;
337}
338
339/* Called after a thread performed its last memory access and before */
340/* thread_delete() is called. Note: thread_delete() is only called for */
341/* joinable threads, not for detached threads. */
342void thread_finished(const DrdThreadId tid)
343{
344 tl_assert(0 <= tid && tid < DRD_N_THREADS
345 && tid != DRD_INVALID_THREADID);
346
347 thread_stop_using_mem(s_threadinfo[tid].stack_min,
348 s_threadinfo[tid].stack_max);
349
350 s_threadinfo[tid].vg_thread_exists = False;
351
352 if (s_threadinfo[tid].detached_posix_thread)
353 {
354 /* Once a detached thread has finished, its stack is deallocated and */
355 /* should no longer be taken into account when computing the danger set*/
356 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
357
358 /* For a detached thread, calling pthread_exit() invalidates the */
359 /* POSIX thread ID associated with the detached thread. For joinable */
360 /* POSIX threads however, the POSIX thread ID remains live after the */
361 /* pthread_exit() call until pthread_join() is called. */
362 s_threadinfo[tid].posix_thread_exists = False;
363 }
364}
365
366void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
367{
368 tl_assert(0 <= tid && tid < DRD_N_THREADS
369 && tid != DRD_INVALID_THREADID);
370 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
371 tl_assert(ptid != INVALID_POSIX_THREADID);
372 s_threadinfo[tid].posix_thread_exists = True;
373 s_threadinfo[tid].pt_threadid = ptid;
374}
375
376Bool thread_get_joinable(const DrdThreadId tid)
377{
378 tl_assert(0 <= tid && tid < DRD_N_THREADS
379 && tid != DRD_INVALID_THREADID);
380 return ! s_threadinfo[tid].detached_posix_thread;
381}
382
383void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
384{
385 tl_assert(0 <= tid && tid < DRD_N_THREADS
386 && tid != DRD_INVALID_THREADID);
387 tl_assert(!! joinable == joinable);
388 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
389#if 0
390 VG_(message)(Vg_DebugMsg,
391 "thread_set_joinable(%d/%d, %s)",
392 tid,
393 s_threadinfo[tid].vg_threadid,
394 joinable ? "joinable" : "detached");
395#endif
396 s_threadinfo[tid].detached_posix_thread = ! joinable;
397}
398
399const char* thread_get_name(const DrdThreadId tid)
400{
401 tl_assert(0 <= tid && tid < DRD_N_THREADS
402 && tid != DRD_INVALID_THREADID);
403 return s_threadinfo[tid].name;
404}
405
406void thread_set_name(const DrdThreadId tid, const char* const name)
407{
408 tl_assert(0 <= tid && tid < DRD_N_THREADS
409 && tid != DRD_INVALID_THREADID);
410 VG_(strncpy)(s_threadinfo[tid].name, name,
411 sizeof(s_threadinfo[tid].name));
412 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
413}
414
415void thread_set_name_fmt(const DrdThreadId tid, const char* const fmt,
416 const UWord arg)
417{
418 tl_assert(0 <= tid && tid < DRD_N_THREADS
419 && tid != DRD_INVALID_THREADID);
420 VG_(snprintf)(s_threadinfo[tid].name, sizeof(s_threadinfo[tid].name),
421 fmt, arg);
422 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
423}
sewardj8b09d4f2007-12-04 21:27:18 +0000424
sewardjaf44c822007-11-25 14:01:38 +0000425DrdThreadId thread_get_running_tid(void)
426{
tom7c1a19a2008-01-02 10:13:04 +0000427 tl_assert(VG_(get_running_tid)() == s_vg_running_tid);
sewardj8b09d4f2007-12-04 21:27:18 +0000428 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
429 return s_drd_running_tid;
sewardjaf44c822007-11-25 14:01:38 +0000430}
431
sewardj8b09d4f2007-12-04 21:27:18 +0000432void thread_set_vg_running_tid(const ThreadId vg_tid)
sewardjaf44c822007-11-25 14:01:38 +0000433{
tom7c1a19a2008-01-02 10:13:04 +0000434 tl_assert(vg_tid != VG_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000435
436 if (vg_tid != s_vg_running_tid)
437 {
438 thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
439 }
440
441 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
442 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
443}
444
445void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
446{
sewardj8b09d4f2007-12-04 21:27:18 +0000447 tl_assert(vg_tid != VG_INVALID_THREADID);
448 tl_assert(drd_tid != DRD_INVALID_THREADID);
449
450 if (vg_tid != s_vg_running_tid)
451 {
452 s_vg_running_tid = vg_tid;
453 s_drd_running_tid = drd_tid;
454 thread_update_danger_set(drd_tid);
455 s_context_switch_count++;
456 }
457
458 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
459 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000460}
461
462/**
463 * Return a pointer to the latest segment for the specified thread.
464 */
465Segment* thread_get_segment(const DrdThreadId tid)
466{
467 tl_assert(0 <= tid && tid < DRD_N_THREADS
468 && tid != DRD_INVALID_THREADID);
469 if (s_threadinfo[tid].last == 0)
470 {
471 VG_(message)(Vg_DebugMsg, "threadid = %d", tid);
472 thread_print_all();
473 }
474 tl_assert(s_threadinfo[tid].last);
475 return s_threadinfo[tid].last;
476}
477
478/**
479 * Insert a new segment at the end of the segment list.
480 */
481static void thread_append_segment(const DrdThreadId tid,
482 Segment* const sg)
483{
484 tl_assert(0 <= tid && tid < DRD_N_THREADS
485 && tid != DRD_INVALID_THREADID);
486 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
487 sg->prev = s_threadinfo[tid].last;
488 sg->next = 0;
489 if (s_threadinfo[tid].last)
490 s_threadinfo[tid].last->next = sg;
491 s_threadinfo[tid].last = sg;
492 if (s_threadinfo[tid].first == 0)
493 s_threadinfo[tid].first = sg;
494 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
495}
496
497/**
498 * Remove a segment from the segment list of thread threadid, and free the
499 * associated memory.
500 */
501static void thread_discard_segment(const DrdThreadId tid,
502 Segment* const sg)
503{
504 tl_assert(0 <= tid && tid < DRD_N_THREADS
505 && tid != DRD_INVALID_THREADID);
506 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
507 if (sg->prev)
508 sg->prev->next = sg->next;
509 if (sg->next)
510 sg->next->prev = sg->prev;
511 if (sg == s_threadinfo[tid].first)
512 s_threadinfo[tid].first = sg->next;
513 if (sg == s_threadinfo[tid].last)
514 s_threadinfo[tid].last = sg->prev;
515 sg_delete(sg);
516 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
517}
518
519VectorClock* thread_get_vc(const DrdThreadId tid)
520{
521 tl_assert(0 <= tid && tid < DRD_N_THREADS
522 && tid != DRD_INVALID_THREADID);
523 tl_assert(s_threadinfo[tid].last);
524 return &s_threadinfo[tid].last->vc;
525}
526
527/**
528 * Compute the minimum of all latest vector clocks of all threads
529 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
530 * @param vc pointer to a vectorclock, holds result upon return.
531 */
532static void thread_compute_minimum_vc(VectorClock* vc)
533{
534 int i;
535 Bool first;
536 Segment* latest_sg;
537
538 first = True;
539 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
540 {
541 latest_sg = s_threadinfo[i].last;
542 if (latest_sg)
543 {
544 if (first)
545 {
546 vc_cleanup(vc);
547 vc_copy(vc, &latest_sg->vc);
548 }
549 else
550 vc_min(vc, &latest_sg->vc);
551 first = False;
552 }
553 }
554}
555
556static void thread_compute_maximum_vc(VectorClock* vc)
557{
558 int i;
559 Bool first;
560 Segment* latest_sg;
561
562 first = True;
563 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
564 {
565 latest_sg = s_threadinfo[i].last;
566 if (latest_sg)
567 {
568 if (first)
569 {
570 vc_cleanup(vc);
571 vc_copy(vc, &latest_sg->vc);
572 }
573 else
574 vc_combine(vc, &latest_sg->vc);
575 first = False;
576 }
577 }
578}
579
580/**
581 * Discard all segments that have a defined ordered against the latest vector
582 * clock of every thread -- these segments can no longer be involved in a
583 * data race.
584 */
585static void thread_discard_ordered_segments(void)
586{
587 VectorClock thread_vc_min;
588 int i;
589
590 s_discard_ordered_segments_count++;
591
592 vc_init(&thread_vc_min, 0, 0);
593 thread_compute_minimum_vc(&thread_vc_min);
594 if (sg_get_trace())
595 {
596 char msg[256];
597 VectorClock thread_vc_max;
598
599 vc_init(&thread_vc_max, 0, 0);
600 thread_compute_maximum_vc(&thread_vc_max);
601 VG_(snprintf)(msg, sizeof(msg),
602 "Discarding ordered segments -- min vc is ");
603 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
604 &thread_vc_min);
605 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
606 ", max vc is ");
607 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
608 &thread_vc_max);
609 VG_(message)(Vg_DebugMsg, "%s", msg);
610 vc_cleanup(&thread_vc_max);
611 }
612
613 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
614 {
615 Segment* sg;
616 Segment* sg_next;
617 for (sg = s_threadinfo[i].first;
618 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
619 sg = sg_next)
620 {
621#if 0
622 VG_(printf)("Discarding a segment of thread %d: ", i);
623 vc_print(&sg->vc);
624 VG_(printf)("\n");
625#endif
626 thread_discard_segment(i, sg);
627 }
628 }
629 vc_cleanup(&thread_vc_min);
630}
631
632/**
633 * Create a new segment for the specified thread, and report all data races
634 * of the most recent thread segment with other threads.
635 */
636void thread_new_segment(const DrdThreadId tid)
637{
638 //static int s_calls_since_last_discard = 0;
639 Segment* sg;
640
641 tl_assert(0 <= tid && tid < DRD_N_THREADS
642 && tid != DRD_INVALID_THREADID);
643
644#ifdef OLD_RACE_DETECTION_ALGORITHM
645 if (s_threadinfo[tid].last)
646 {
647 thread_report_races_segment(tid, s_threadinfo[tid].last);
648 }
649#endif
650
651 sg = sg_new(tid, tid);
652 thread_append_segment(tid, sg);
653
654 thread_discard_ordered_segments();
655}
656
657void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
658{
659 tl_assert(joiner != joinee);
660 tl_assert(0 <= joiner && joiner < DRD_N_THREADS
661 && joiner != DRD_INVALID_THREADID);
662 tl_assert(0 <= joinee && joinee < DRD_N_THREADS
663 && joinee != DRD_INVALID_THREADID);
664 tl_assert(s_threadinfo[joiner].last);
665 tl_assert(s_threadinfo[joinee].last);
666 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
667 thread_discard_ordered_segments();
668
sewardj8b09d4f2007-12-04 21:27:18 +0000669 if (joiner == s_drd_running_tid)
sewardjaf44c822007-11-25 14:01:38 +0000670 {
671 thread_update_danger_set(joiner);
672 }
673}
674
675void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
676{
677 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
678 tl_assert(s_threadinfo[tid].last);
679 tl_assert(vc);
680 vc_combine(&s_threadinfo[tid].last->vc, vc);
681 thread_discard_ordered_segments();
682}
683
684void thread_stop_using_mem(const Addr a1, const Addr a2)
685{
686 DrdThreadId other_user = DRD_INVALID_THREADID;
687
688 /* For all threads, mark the range [a,a+size[ as no longer in use. */
689
690 unsigned i;
691 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
692 {
693 Segment* p;
694 for (p = s_threadinfo[i].first; p; p = p->next)
695 {
696 if (other_user == DRD_INVALID_THREADID
sewardj8b09d4f2007-12-04 21:27:18 +0000697 && i != s_drd_running_tid
sewardjaf44c822007-11-25 14:01:38 +0000698 && bm_has_any_access(p->bm, a1, a2))
699 {
700 other_user = i;
701 }
702 bm_clear(p->bm, a1, a2);
703 }
704 }
705
706 /* If any other thread had accessed memory in [a,a+size[, update the */
707 /* danger set. */
708 if (other_user != DRD_INVALID_THREADID
709 && bm_has_any_access(s_danger_set, a1, a2))
710 {
711#if 0
712 VG_(message)(Vg_DebugMsg,
713 "recalculating danger set because thread %d / %d stopped"
714 " using memory at 0x%x sz %d",
715 other_user,
716 s_threadinfo[other_user].vg_threadid,
717 a1,
718 a2 - a1);
719#endif
720 thread_update_danger_set(thread_get_running_tid());
721 }
722}
723
724void thread_start_recording(const DrdThreadId tid)
725{
726 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
727 tl_assert(! s_threadinfo[tid].is_recording);
728 s_threadinfo[tid].is_recording = True;
729}
730
731void thread_stop_recording(const DrdThreadId tid)
732{
733 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
734 tl_assert(s_threadinfo[tid].is_recording);
735 s_threadinfo[tid].is_recording = False;
736}
737
738Bool thread_is_recording(const DrdThreadId tid)
739{
740 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
741 return s_threadinfo[tid].is_recording;
742}
743
744void thread_print_all(void)
745{
746 unsigned i;
747 Segment* p;
748
749 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
750 {
751 if (s_threadinfo[i].first)
752 {
753 VG_(printf)("**************\n"
754 "* thread %3d (%d/%d/%d/0x%x/%d/%s) *\n"
755 "**************\n",
756 i,
757 s_threadinfo[i].vg_thread_exists,
758 s_threadinfo[i].vg_threadid,
759 s_threadinfo[i].posix_thread_exists,
760 s_threadinfo[i].pt_threadid,
761 s_threadinfo[i].detached_posix_thread,
762 s_threadinfo[i].name);
763 for (p = s_threadinfo[i].first; p; p = p->next)
764 {
765 sg_print(p);
766 }
767 }
768 }
769}
770
771static void show_call_stack(const DrdThreadId tid,
772 const Char* const msg,
773 ExeContext* const callstack)
774{
775 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
776
777 VG_(message)(Vg_UserMsg,
778 "%s (%s)",
779 msg,
780 thread_get_name(tid));
781
782 if (vg_tid != VG_INVALID_THREADID)
783 {
784 if (callstack)
785 {
786 VG_(pp_ExeContext)(callstack);
787 }
788 else
789 {
790 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
791 }
792 }
793 else
794 {
795 VG_(message)(Vg_UserMsg,
796 " (thread finished, call stack no longer available)");
797 }
798}
799
800#ifdef OLD_RACE_DETECTION_ALGORITHM
801void thread_report_races(const DrdThreadId threadid)
802{
803 Segment* p;
804
805 s_report_races_count++;
806
807 tl_assert(0 <= threadid && threadid < DRD_N_THREADS
808 && threadid != DRD_INVALID_THREADID);
809
810 for (p = s_threadinfo[threadid].first; p; p = p->next)
811 {
812 thread_report_races_segment(threadid, p);
813 }
814}
815
816/**
817 * Report all data races for segment p of thread threadid against other
818 * threads.
819 */
820void thread_report_races_segment(const DrdThreadId threadid,
821 Segment* const p)
822{
823 unsigned i;
824
825 tl_assert(0 <= threadid && threadid < DRD_N_THREADS
826 && threadid != DRD_INVALID_THREADID);
827 tl_assert(p);
828
829 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
830 {
831 if (i != threadid)
832 {
833 Segment* q;
834 for (q = s_threadinfo[i].last; q; q = q->prev)
835 {
836#if 0
837 char msg[256];
838 VG_(snprintf)(msg, sizeof(msg), "Examining thread %d (vc ", threadid);
839 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
840 &p->vc);
841 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
842 ") versus thread %d (vc ", i);
843 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
844 &q->vc);
845 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
846 ") %d %d",
847 vc_lte(&p->vc, &q->vc), vc_lte(&q->vc, &p->vc));
848 VG_(message)(Vg_DebugMsg, "%s", msg);
849#endif
850 // Since q iterates over the segments of thread i in order of
851 // decreasing vector clocks, if q->vc <= p->vc, then
852 // q->next->vc <= p->vc will also hold. Hence, break out of the
853 // loop once this condition is met.
854 if (vc_lte(&q->vc, &p->vc))
855 break;
856 if (! vc_lte(&p->vc, &q->vc))
857 {
858 if (bm_has_races(p->bm, q->bm))
859 {
860 VG_(message)(Vg_UserMsg, "----------------------------------------------------------------------");
861 tl_assert(p->stacktrace);
862 show_call_stack(threadid, "1st segment start",
863 p->stacktrace);
864 show_call_stack(threadid, "1st segment end",
865 p->next ? p->next->stacktrace : 0);
866 tl_assert(q->stacktrace);
867 show_call_stack(i, "2nd segment start",
868 q->stacktrace);
869 show_call_stack(i, "2nd segment end",
870 q->next ? q->next->stacktrace : 0);
871 bm_report_races(threadid, i, p->bm, q->bm);
872 }
873 }
874 }
875 }
876 }
877}
878
879/**
880 * Report all detected data races for all threads.
881 */
882void thread_report_all_races(void)
883{
884 unsigned i;
885
886 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
887 {
888 if (s_threadinfo[i].last)
889 {
890 thread_report_races(i);
891 }
892 }
893}
894#else
895static void
896thread_report_conflicting_segments_segment(const DrdThreadId tid,
897 const Addr addr,
898 const SizeT size,
899 const BmAccessTypeT access_type,
900 const Segment* const p)
901{
902 unsigned i;
903
904 tl_assert(0 <= tid && tid < DRD_N_THREADS
905 && tid != DRD_INVALID_THREADID);
906 tl_assert(p);
907
908 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
909 {
910 if (i != tid)
911 {
912 Segment* q;
913 for (q = s_threadinfo[i].last; q; q = q->prev)
914 {
915 // Since q iterates over the segments of thread i in order of
916 // decreasing vector clocks, if q->vc <= p->vc, then
917 // q->next->vc <= p->vc will also hold. Hence, break out of the
918 // loop once this condition is met.
919 if (vc_lte(&q->vc, &p->vc))
920 break;
921 if (! vc_lte(&p->vc, &q->vc))
922 {
923 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
924 {
925 tl_assert(q->stacktrace);
926 show_call_stack(i, "Other segment start",
927 q->stacktrace);
928 show_call_stack(i, "Other segment end",
929 q->next ? q->next->stacktrace : 0);
930 }
931 }
932 }
933 }
934 }
935}
936
937void thread_report_conflicting_segments(const DrdThreadId tid,
938 const Addr addr,
939 const SizeT size,
940 const BmAccessTypeT access_type)
941{
942 Segment* p;
943
944 tl_assert(0 <= tid && tid < DRD_N_THREADS
945 && tid != DRD_INVALID_THREADID);
946
947 for (p = s_threadinfo[tid].first; p; p = p->next)
948 {
949 if (bm_has(p->bm, addr, addr + size, access_type))
950 {
951 thread_report_conflicting_segments_segment(tid, addr, size,
952 access_type, p);
953 }
954 }
955}
956#endif
957
958/**
959 * Compute a bitmap that represents the union of all memory accesses of all
960 * segments that are unordered to the current segment of the thread tid.
961 */
962static void thread_update_danger_set(const DrdThreadId tid)
963{
964 Segment* p;
965
966 tl_assert(0 <= tid && tid < DRD_N_THREADS
967 && tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000968 tl_assert(tid == s_drd_running_tid);
sewardjaf44c822007-11-25 14:01:38 +0000969
970 s_update_danger_set_count++;
971 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
972 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
973
974#if 0
975 if (s_danger_set)
976 {
977 bm_delete(s_danger_set);
978 s_danger_set = 0;
979 }
980 s_danger_set = bm_new();
981#else
982 // Marginally faster than the above code.
983 if (s_danger_set)
984 {
985 bm_clear_all(s_danger_set);
986 }
987 else
988 {
989 s_danger_set = bm_new();
990 }
991#endif
992
993 for (p = s_threadinfo[tid].first; p; p = p->next)
994 {
995 unsigned j;
996
997 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
998 {
999 if (IsValidDrdThreadId(j))
1000 {
1001 const Segment* const q = s_threadinfo[j].last;
1002 if (j != tid && q != 0
1003 && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
1004 {
1005 bm_merge2(s_danger_set, q->bm);
1006 }
1007
1008 }
1009 }
1010
1011 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
1012 {
1013 if (IsValidDrdThreadId(j))
1014 {
1015 // NPTL hack: don't report data races on sizeof(struct pthread)
1016 // bytes at the top of the stack, since the NPTL functions access
1017 // this data without locking.
1018 if (s_threadinfo[j].stack_min != 0)
1019 {
1020 tl_assert(s_threadinfo[j].stack_startup != 0);
1021 if (s_threadinfo[j].stack_min < s_threadinfo[j].stack_startup)
1022 {
1023 bm_clear(s_danger_set,
1024 s_threadinfo[j].stack_min,
1025 s_threadinfo[j].stack_startup);
1026 }
1027 }
1028 }
1029 }
1030 }
1031
1032 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
1033 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
1034
1035#if 0
1036 VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
1037 bm_print(s_danger_set);
1038 VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
1039#endif
1040}
1041
1042Bool thread_conflicting_access(const Addr a,
1043 const SizeT size,
1044 const BmAccessTypeT access_type)
1045{
1046 tl_assert(s_danger_set);
1047 return (bm_has_conflict_with(s_danger_set, a, a + size, access_type)
1048 && ! drd_is_suppressed(a, a + size));
1049}
1050
1051ULong thread_get_context_switch_count(void)
1052{
1053 return s_context_switch_count;
1054}
1055
1056#ifdef OLD_RACE_DETECTION_ALGORITHM
1057ULong thread_get_report_races_count(void)
1058{
1059 return s_report_races_count;
1060}
1061#endif
1062
1063ULong thread_get_discard_ordered_segments_count(void)
1064{
1065 return s_discard_ordered_segments_count;
1066}
1067
1068ULong thread_get_update_danger_set_count(void)
1069{
1070 return s_update_danger_set_count;
1071}
1072
1073ULong thread_get_danger_set_bitmap_creation_count(void)
1074{
1075 return s_danger_set_bitmap_creation_count;
1076}
1077
1078ULong thread_get_danger_set_bitmap2_creation_count(void)
1079{
1080 return s_danger_set_bitmap2_creation_count;
1081}
1082
1083/*
1084 * Local variables:
1085 * c-basic-offset: 3
1086 * End:
1087 */