blob: 19a8fcc41102d7001561f64354676fc7a6471ef6 [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
sewardjaf44c822007-11-25 14:01:38 +000030#include "pub_tool_basics.h" // Addr, SizeT
31#include "pub_tool_errormgr.h" // VG_(unique_error)()
32#include "pub_tool_libcassert.h" // tl_assert()
33#include "pub_tool_libcbase.h" // VG_(strlen)()
34#include "pub_tool_libcprint.h" // VG_(printf)()
35#include "pub_tool_machine.h"
36#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
sewardj85642922008-01-14 11:54:56 +000037#include "pub_tool_options.h" // VG_(clo_backtrace_size)
sewardjaf44c822007-11-25 14:01:38 +000038#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
39
40
sewardjaf44c822007-11-25 14:01:38 +000041// Local functions.
42
43static void thread_append_segment(const DrdThreadId tid,
44 Segment* const sg);
45static void thread_update_danger_set(const DrdThreadId tid);
46
47
48// Local variables.
49
50static ULong s_context_switch_count;
51static ULong s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +000052static ULong s_update_danger_set_count;
53static ULong s_danger_set_bitmap_creation_count;
54static ULong s_danger_set_bitmap2_creation_count;
sewardj8b09d4f2007-12-04 21:27:18 +000055static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
bartf00a85b2008-03-13 18:49:23 +000056DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
57ThreadInfo s_threadinfo[DRD_N_THREADS];
bart1a473c72008-03-13 19:03:38 +000058struct bitmap* s_danger_set;
bart26f73e12008-02-24 18:37:08 +000059static Bool s_trace_context_switches = False;
60static Bool s_trace_danger_set = False;
sewardjaf44c822007-11-25 14:01:38 +000061
62
63// Function definitions.
64
bart26f73e12008-02-24 18:37:08 +000065void thread_trace_context_switches(const Bool t)
66{
67 s_trace_context_switches = t;
68}
69
70void thread_trace_danger_set(const Bool t)
71{
72 s_trace_danger_set = t;
73}
74
sewardjaf44c822007-11-25 14:01:38 +000075__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
76{
77 return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
78 && ! (s_threadinfo[tid].vg_thread_exists == False
79 && s_threadinfo[tid].posix_thread_exists == False
80 && s_threadinfo[tid].detached_posix_thread == False));
81}
82
83/**
84 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
85 * Valgrind's ThreadId does not yet exist.
86 **/
87DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
88{
89 int i;
90
91 if (tid == VG_INVALID_THREADID)
92 return DRD_INVALID_THREADID;
93
94 for (i = 1; i < DRD_N_THREADS; i++)
95 {
96 if (s_threadinfo[i].vg_thread_exists == True
97 && s_threadinfo[i].vg_threadid == tid)
98 {
99 return i;
100 }
101 }
102
103 return DRD_INVALID_THREADID;
104}
105
106static
107DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
108{
109 int i;
110
111 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
112
113 for (i = 1; i < DRD_N_THREADS; i++)
114 {
115 if (s_threadinfo[i].vg_thread_exists == False
116 && s_threadinfo[i].posix_thread_exists == False
117 && s_threadinfo[i].detached_posix_thread == False)
118 {
119 s_threadinfo[i].vg_thread_exists = True;
120 s_threadinfo[i].vg_threadid = tid;
121 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
122 s_threadinfo[i].stack_min_min = 0;
123 s_threadinfo[i].stack_min = 0;
124 s_threadinfo[i].stack_startup = 0;
125 s_threadinfo[i].stack_max = 0;
126 VG_(snprintf)(s_threadinfo[i].name, sizeof(s_threadinfo[i].name),
127 "thread %d", tid);
128 s_threadinfo[i].name[sizeof(s_threadinfo[i].name) - 1] = 0;
bart0268dfa2008-03-11 20:10:21 +0000129 s_threadinfo[i].is_recording = True;
130 s_threadinfo[i].synchr_nesting = 0;
sewardjaf44c822007-11-25 14:01:38 +0000131 if (s_threadinfo[i].first != 0)
132 VG_(printf)("drd thread id = %d\n", i);
133 tl_assert(s_threadinfo[i].first == 0);
134 tl_assert(s_threadinfo[i].last == 0);
135 return i;
136 }
137 }
138
139 tl_assert(False);
140
141 return DRD_INVALID_THREADID;
142}
143
144DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
145{
146 int i;
147
148 tl_assert(tid != INVALID_POSIX_THREADID);
149
150 for (i = 1; i < DRD_N_THREADS; i++)
151 {
152 if (s_threadinfo[i].posix_thread_exists
153 && s_threadinfo[i].pt_threadid == tid)
154 {
155 return i;
156 }
157 }
158 return DRD_INVALID_THREADID;
159}
160
161ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
162{
163 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
164 return (s_threadinfo[tid].vg_thread_exists
165 ? s_threadinfo[tid].vg_threadid
166 : VG_INVALID_THREADID);
167}
168
bart26f73e12008-02-24 18:37:08 +0000169/** Sanity check of the doubly linked list of segments referenced by a
170 * ThreadInfo struct.
171 * @return True if sane, False if not.
sewardjaf44c822007-11-25 14:01:38 +0000172 */
173static Bool sane_ThreadInfo(const ThreadInfo* const ti)
174{
175 Segment* p;
176 for (p = ti->first; p; p = p->next) {
177 if (p->next && p->next->prev != p)
178 return False;
179 if (p->next == 0 && p != ti->last)
180 return False;
181 }
182 for (p = ti->last; p; p = p->prev) {
183 if (p->prev && p->prev->next != p)
184 return False;
185 if (p->prev == 0 && p != ti->first)
186 return False;
187 }
188 return True;
189}
190
191DrdThreadId thread_pre_create(const DrdThreadId creator,
192 const ThreadId vg_created)
193{
194 DrdThreadId created;
195
196 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
197 created = VgThreadIdToNewDrdThreadId(vg_created);
198 tl_assert(0 <= created && created < DRD_N_THREADS
199 && created != DRD_INVALID_THREADID);
200
201 tl_assert(s_threadinfo[created].first == 0);
202 tl_assert(s_threadinfo[created].last == 0);
203 thread_append_segment(created, sg_new(creator, created));
204
205 return created;
206}
207
bart26f73e12008-02-24 18:37:08 +0000208/** Allocate the first segment for a thread. Call this just after
209 * pthread_create().
sewardjaf44c822007-11-25 14:01:38 +0000210 */
211DrdThreadId thread_post_create(const ThreadId vg_created)
212{
213 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
214
215 tl_assert(0 <= created && created < DRD_N_THREADS
216 && created != DRD_INVALID_THREADID);
217
218 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
219 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
220 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
221 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
222 tl_assert(s_threadinfo[created].stack_max != 0);
223
224 return created;
225}
226
227/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
228/* and accesses this data structure from multiple threads without locking. */
229/* Any conflicting accesses in the range stack_startup..stack_max will be */
230/* ignored. */
231void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
232{
233#if 0
234 VG_(message)(Vg_DebugMsg, "thread_set_stack_startup: thread %d (%d)"
235 " stack 0x%x .. 0x%lx (size %d)",
236 s_threadinfo[tid].vg_threadid, tid,
237 stack_startup,
238 s_threadinfo[tid].stack_max,
239 s_threadinfo[tid].stack_max - stack_startup);
240#endif
241 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
242 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
243 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
244 s_threadinfo[tid].stack_startup = stack_startup;
245}
246
247Addr thread_get_stack_min(const DrdThreadId tid)
248{
249 tl_assert(0 <= tid && tid < DRD_N_THREADS
250 && tid != DRD_INVALID_THREADID);
251 return s_threadinfo[tid].stack_min;
252}
253
254void thread_set_stack_min(const DrdThreadId tid, const Addr stack_min)
255{
256#if 0
257 VG_(message)(Vg_DebugMsg, "thread %d (%d) stack_min = 0x%x"
258 " (size %d, max %d, delta %d)",
259 s_threadinfo[tid].vg_threadid, tid,
260 stack_min,
261 s_threadinfo[tid].stack_max - stack_min,
262 s_threadinfo[tid].stack_max - s_threadinfo[tid].stack_min_min,
263 s_threadinfo[tid].stack_min - stack_min);
264#endif
265 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
266 if (s_threadinfo[tid].stack_max)
267 {
268 s_threadinfo[tid].stack_min = stack_min;
269 if (stack_min < s_threadinfo[tid].stack_min_min)
270 {
271 s_threadinfo[tid].stack_min_min = stack_min;
272 }
273 tl_assert(s_threadinfo[tid].stack_min_min
274 <= s_threadinfo[tid].stack_min);
275 tl_assert(s_threadinfo[tid].stack_min < s_threadinfo[tid].stack_max);
276 }
277}
278
279DrdThreadId thread_lookup_stackaddr(const Addr a,
280 Addr* const stack_min,
281 Addr* const stack_max)
282{
283 unsigned i;
284 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
285 {
286 if (s_threadinfo[i].stack_min <= a && a <= s_threadinfo[i].stack_max)
287 {
288 *stack_min = s_threadinfo[i].stack_min;
289 *stack_max = s_threadinfo[i].stack_max;
290 return i;
291 }
292 }
293 return DRD_INVALID_THREADID;
294}
295
296/**
297 * Clean up thread-specific data structures. Call this just after
298 * pthread_join().
299 */
300void thread_delete(const DrdThreadId tid)
301{
302 Segment* sg;
303 Segment* sg_prev;
304
305 tl_assert(0 <= tid && tid < DRD_N_THREADS
306 && tid != DRD_INVALID_THREADID);
bart0268dfa2008-03-11 20:10:21 +0000307 tl_assert(s_threadinfo[tid].synchr_nesting == 0);
sewardjaf44c822007-11-25 14:01:38 +0000308 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
309 {
310 sg_prev = sg->prev;
311 sg_delete(sg);
312 }
313 s_threadinfo[tid].vg_thread_exists = False;
314 s_threadinfo[tid].posix_thread_exists = False;
315 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
316 s_threadinfo[tid].first = 0;
317 s_threadinfo[tid].last = 0;
318}
319
320/* Called after a thread performed its last memory access and before */
321/* thread_delete() is called. Note: thread_delete() is only called for */
322/* joinable threads, not for detached threads. */
323void thread_finished(const DrdThreadId tid)
324{
325 tl_assert(0 <= tid && tid < DRD_N_THREADS
326 && tid != DRD_INVALID_THREADID);
327
328 thread_stop_using_mem(s_threadinfo[tid].stack_min,
329 s_threadinfo[tid].stack_max);
330
331 s_threadinfo[tid].vg_thread_exists = False;
332
333 if (s_threadinfo[tid].detached_posix_thread)
334 {
335 /* Once a detached thread has finished, its stack is deallocated and */
336 /* should no longer be taken into account when computing the danger set*/
337 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
338
339 /* For a detached thread, calling pthread_exit() invalidates the */
340 /* POSIX thread ID associated with the detached thread. For joinable */
341 /* POSIX threads however, the POSIX thread ID remains live after the */
342 /* pthread_exit() call until pthread_join() is called. */
343 s_threadinfo[tid].posix_thread_exists = False;
344 }
345}
346
347void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
348{
349 tl_assert(0 <= tid && tid < DRD_N_THREADS
350 && tid != DRD_INVALID_THREADID);
351 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
352 tl_assert(ptid != INVALID_POSIX_THREADID);
353 s_threadinfo[tid].posix_thread_exists = True;
354 s_threadinfo[tid].pt_threadid = ptid;
355}
356
357Bool thread_get_joinable(const DrdThreadId tid)
358{
359 tl_assert(0 <= tid && tid < DRD_N_THREADS
360 && tid != DRD_INVALID_THREADID);
361 return ! s_threadinfo[tid].detached_posix_thread;
362}
363
364void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
365{
366 tl_assert(0 <= tid && tid < DRD_N_THREADS
367 && tid != DRD_INVALID_THREADID);
368 tl_assert(!! joinable == joinable);
369 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
370#if 0
371 VG_(message)(Vg_DebugMsg,
372 "thread_set_joinable(%d/%d, %s)",
373 tid,
374 s_threadinfo[tid].vg_threadid,
375 joinable ? "joinable" : "detached");
376#endif
377 s_threadinfo[tid].detached_posix_thread = ! joinable;
378}
379
380const char* thread_get_name(const DrdThreadId tid)
381{
382 tl_assert(0 <= tid && tid < DRD_N_THREADS
383 && tid != DRD_INVALID_THREADID);
384 return s_threadinfo[tid].name;
385}
386
387void thread_set_name(const DrdThreadId tid, const char* const name)
388{
389 tl_assert(0 <= tid && tid < DRD_N_THREADS
390 && tid != DRD_INVALID_THREADID);
391 VG_(strncpy)(s_threadinfo[tid].name, name,
392 sizeof(s_threadinfo[tid].name));
393 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
394}
395
396void thread_set_name_fmt(const DrdThreadId tid, const char* const fmt,
397 const UWord arg)
398{
399 tl_assert(0 <= tid && tid < DRD_N_THREADS
400 && tid != DRD_INVALID_THREADID);
401 VG_(snprintf)(s_threadinfo[tid].name, sizeof(s_threadinfo[tid].name),
402 fmt, arg);
403 s_threadinfo[tid].name[sizeof(s_threadinfo[tid].name) - 1] = 0;
404}
sewardj8b09d4f2007-12-04 21:27:18 +0000405
sewardjaf44c822007-11-25 14:01:38 +0000406DrdThreadId thread_get_running_tid(void)
407{
sewardj8b09d4f2007-12-04 21:27:18 +0000408 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
409 return s_drd_running_tid;
sewardjaf44c822007-11-25 14:01:38 +0000410}
411
sewardj8b09d4f2007-12-04 21:27:18 +0000412void thread_set_vg_running_tid(const ThreadId vg_tid)
sewardjaf44c822007-11-25 14:01:38 +0000413{
tom7c1a19a2008-01-02 10:13:04 +0000414 tl_assert(vg_tid != VG_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000415
416 if (vg_tid != s_vg_running_tid)
417 {
418 thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
419 }
420
421 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
422 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
423}
424
425void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
426{
sewardj8b09d4f2007-12-04 21:27:18 +0000427 tl_assert(vg_tid != VG_INVALID_THREADID);
428 tl_assert(drd_tid != DRD_INVALID_THREADID);
429
430 if (vg_tid != s_vg_running_tid)
431 {
bart26f73e12008-02-24 18:37:08 +0000432 if (s_trace_context_switches
433 && s_drd_running_tid != DRD_INVALID_THREADID)
434 {
435 VG_(message)(Vg_DebugMsg,
436 "Context switch from thread %d to thread %d",
437 s_drd_running_tid, drd_tid);
438 }
sewardj8b09d4f2007-12-04 21:27:18 +0000439 s_vg_running_tid = vg_tid;
440 s_drd_running_tid = drd_tid;
441 thread_update_danger_set(drd_tid);
442 s_context_switch_count++;
443 }
444
445 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
446 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000447}
448
bart0268dfa2008-03-11 20:10:21 +0000449int thread_enter_synchr(const DrdThreadId tid)
450{
451 tl_assert(IsValidDrdThreadId(tid));
452 return s_threadinfo[tid].synchr_nesting++;
453}
454
455int thread_leave_synchr(const DrdThreadId tid)
456{
457 tl_assert(IsValidDrdThreadId(tid));
458 tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
459 return --s_threadinfo[tid].synchr_nesting;
460}
461
462int thread_get_synchr_nesting_count(const DrdThreadId tid)
463{
464 tl_assert(IsValidDrdThreadId(tid));
465 return s_threadinfo[tid].synchr_nesting;
466}
467
bart1a473c72008-03-13 19:03:38 +0000468/** Append a new segment at the end of the segment list. */
bart26f73e12008-02-24 18:37:08 +0000469static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000470{
471 tl_assert(0 <= tid && tid < DRD_N_THREADS
472 && tid != DRD_INVALID_THREADID);
473 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
474 sg->prev = s_threadinfo[tid].last;
475 sg->next = 0;
476 if (s_threadinfo[tid].last)
477 s_threadinfo[tid].last->next = sg;
478 s_threadinfo[tid].last = sg;
479 if (s_threadinfo[tid].first == 0)
480 s_threadinfo[tid].first = sg;
481 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
482}
483
bart26f73e12008-02-24 18:37:08 +0000484/** Remove a segment from the segment list of thread threadid, and free the
485 * associated memory.
sewardjaf44c822007-11-25 14:01:38 +0000486 */
bart26f73e12008-02-24 18:37:08 +0000487static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000488{
489 tl_assert(0 <= tid && tid < DRD_N_THREADS
490 && tid != DRD_INVALID_THREADID);
491 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart26f73e12008-02-24 18:37:08 +0000492
sewardjaf44c822007-11-25 14:01:38 +0000493 if (sg->prev)
494 sg->prev->next = sg->next;
495 if (sg->next)
496 sg->next->prev = sg->prev;
497 if (sg == s_threadinfo[tid].first)
498 s_threadinfo[tid].first = sg->next;
499 if (sg == s_threadinfo[tid].last)
500 s_threadinfo[tid].last = sg->prev;
501 sg_delete(sg);
502 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
503}
504
505VectorClock* thread_get_vc(const DrdThreadId tid)
506{
507 tl_assert(0 <= tid && tid < DRD_N_THREADS
508 && tid != DRD_INVALID_THREADID);
509 tl_assert(s_threadinfo[tid].last);
510 return &s_threadinfo[tid].last->vc;
511}
512
513/**
514 * Compute the minimum of all latest vector clocks of all threads
515 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
516 * @param vc pointer to a vectorclock, holds result upon return.
517 */
518static void thread_compute_minimum_vc(VectorClock* vc)
519{
bart2cf220a2008-03-01 07:35:52 +0000520 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000521 Bool first;
522 Segment* latest_sg;
523
524 first = True;
525 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
526 {
527 latest_sg = s_threadinfo[i].last;
528 if (latest_sg)
529 {
530 if (first)
bart26f73e12008-02-24 18:37:08 +0000531 vc_assign(vc, &latest_sg->vc);
sewardjaf44c822007-11-25 14:01:38 +0000532 else
533 vc_min(vc, &latest_sg->vc);
534 first = False;
535 }
536 }
537}
538
539static void thread_compute_maximum_vc(VectorClock* vc)
540{
bart2cf220a2008-03-01 07:35:52 +0000541 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000542 Bool first;
543 Segment* latest_sg;
544
545 first = True;
546 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
547 {
548 latest_sg = s_threadinfo[i].last;
549 if (latest_sg)
550 {
551 if (first)
bart26f73e12008-02-24 18:37:08 +0000552 vc_assign(vc, &latest_sg->vc);
sewardjaf44c822007-11-25 14:01:38 +0000553 else
554 vc_combine(vc, &latest_sg->vc);
555 first = False;
556 }
557 }
558}
559
560/**
bart5bd9f2d2008-03-03 20:31:58 +0000561 * Discard all segments that have a defined order against the latest vector
sewardjaf44c822007-11-25 14:01:38 +0000562 * clock of every thread -- these segments can no longer be involved in a
563 * data race.
564 */
565static void thread_discard_ordered_segments(void)
566{
bart2cf220a2008-03-01 07:35:52 +0000567 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000568 VectorClock thread_vc_min;
sewardjaf44c822007-11-25 14:01:38 +0000569
570 s_discard_ordered_segments_count++;
571
572 vc_init(&thread_vc_min, 0, 0);
573 thread_compute_minimum_vc(&thread_vc_min);
574 if (sg_get_trace())
575 {
576 char msg[256];
577 VectorClock thread_vc_max;
578
579 vc_init(&thread_vc_max, 0, 0);
580 thread_compute_maximum_vc(&thread_vc_max);
581 VG_(snprintf)(msg, sizeof(msg),
582 "Discarding ordered segments -- min vc is ");
583 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
584 &thread_vc_min);
585 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
586 ", max vc is ");
587 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
588 &thread_vc_max);
589 VG_(message)(Vg_DebugMsg, "%s", msg);
590 vc_cleanup(&thread_vc_max);
591 }
592
593 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
594 {
595 Segment* sg;
596 Segment* sg_next;
597 for (sg = s_threadinfo[i].first;
598 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
599 sg = sg_next)
600 {
sewardjaf44c822007-11-25 14:01:38 +0000601 thread_discard_segment(i, sg);
602 }
603 }
604 vc_cleanup(&thread_vc_min);
605}
606
607/**
608 * Create a new segment for the specified thread, and report all data races
609 * of the most recent thread segment with other threads.
610 */
611void thread_new_segment(const DrdThreadId tid)
612{
sewardjaf44c822007-11-25 14:01:38 +0000613 Segment* sg;
614
615 tl_assert(0 <= tid && tid < DRD_N_THREADS
616 && tid != DRD_INVALID_THREADID);
617
sewardjaf44c822007-11-25 14:01:38 +0000618 sg = sg_new(tid, tid);
619 thread_append_segment(tid, sg);
620
621 thread_discard_ordered_segments();
bart26f73e12008-02-24 18:37:08 +0000622
623 if (tid == s_drd_running_tid)
624 {
625 /* Every change in the vector clock of the current thread may cause */
626 /* segments that were previously ordered to this thread to become */
627 /* unordered. Hence, recalculate the danger set if the vector clock */
628 /* of the current thread is updated. */
629 thread_update_danger_set(tid);
630 }
sewardjaf44c822007-11-25 14:01:38 +0000631}
632
bart26f73e12008-02-24 18:37:08 +0000633/** Call this function after thread 'joiner' joined thread 'joinee'. */
sewardjaf44c822007-11-25 14:01:38 +0000634void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
635{
636 tl_assert(joiner != joinee);
637 tl_assert(0 <= joiner && joiner < DRD_N_THREADS
638 && joiner != DRD_INVALID_THREADID);
639 tl_assert(0 <= joinee && joinee < DRD_N_THREADS
640 && joinee != DRD_INVALID_THREADID);
641 tl_assert(s_threadinfo[joiner].last);
642 tl_assert(s_threadinfo[joinee].last);
643 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
644 thread_discard_ordered_segments();
645
sewardj8b09d4f2007-12-04 21:27:18 +0000646 if (joiner == s_drd_running_tid)
sewardjaf44c822007-11-25 14:01:38 +0000647 {
648 thread_update_danger_set(joiner);
649 }
650}
651
bart26f73e12008-02-24 18:37:08 +0000652/** Call this function after thread 'tid' had to wait because of thread
653 * synchronization until the memory accesses in the segment with vector clock
654 * 'vc' finished.
655 */
sewardjaf44c822007-11-25 14:01:38 +0000656void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
657{
658 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
659 tl_assert(s_threadinfo[tid].last);
660 tl_assert(vc);
661 vc_combine(&s_threadinfo[tid].last->vc, vc);
662 thread_discard_ordered_segments();
663}
664
bart26f73e12008-02-24 18:37:08 +0000665/** Call this function whenever a thread is no longer using the memory
666 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
667 * increase.
668 */
sewardjaf44c822007-11-25 14:01:38 +0000669void thread_stop_using_mem(const Addr a1, const Addr a2)
670{
671 DrdThreadId other_user = DRD_INVALID_THREADID;
672
bart26f73e12008-02-24 18:37:08 +0000673 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
sewardjaf44c822007-11-25 14:01:38 +0000674
675 unsigned i;
676 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
677 {
678 Segment* p;
679 for (p = s_threadinfo[i].first; p; p = p->next)
680 {
681 if (other_user == DRD_INVALID_THREADID
sewardj8b09d4f2007-12-04 21:27:18 +0000682 && i != s_drd_running_tid
sewardjaf44c822007-11-25 14:01:38 +0000683 && bm_has_any_access(p->bm, a1, a2))
684 {
685 other_user = i;
686 }
687 bm_clear(p->bm, a1, a2);
688 }
689 }
690
bart26f73e12008-02-24 18:37:08 +0000691 /* If any other thread had accessed memory in [ a1, a2 [, update the */
sewardjaf44c822007-11-25 14:01:38 +0000692 /* danger set. */
693 if (other_user != DRD_INVALID_THREADID
694 && bm_has_any_access(s_danger_set, a1, a2))
695 {
sewardjaf44c822007-11-25 14:01:38 +0000696 thread_update_danger_set(thread_get_running_tid());
697 }
698}
699
bart0268dfa2008-03-11 20:10:21 +0000700void thread_start_recording(const DrdThreadId tid)
701{
702 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
703 tl_assert(! s_threadinfo[tid].is_recording);
704 s_threadinfo[tid].is_recording = True;
705}
706
707void thread_stop_recording(const DrdThreadId tid)
708{
709 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
710 tl_assert(s_threadinfo[tid].is_recording);
711 s_threadinfo[tid].is_recording = False;
712}
713
sewardjaf44c822007-11-25 14:01:38 +0000714void thread_print_all(void)
715{
716 unsigned i;
717 Segment* p;
718
719 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
720 {
721 if (s_threadinfo[i].first)
722 {
723 VG_(printf)("**************\n"
724 "* thread %3d (%d/%d/%d/0x%x/%d/%s) *\n"
725 "**************\n",
726 i,
727 s_threadinfo[i].vg_thread_exists,
728 s_threadinfo[i].vg_threadid,
729 s_threadinfo[i].posix_thread_exists,
730 s_threadinfo[i].pt_threadid,
731 s_threadinfo[i].detached_posix_thread,
732 s_threadinfo[i].name);
733 for (p = s_threadinfo[i].first; p; p = p->next)
734 {
735 sg_print(p);
736 }
737 }
738 }
739}
740
741static void show_call_stack(const DrdThreadId tid,
742 const Char* const msg,
743 ExeContext* const callstack)
744{
745 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
746
747 VG_(message)(Vg_UserMsg,
748 "%s (%s)",
749 msg,
750 thread_get_name(tid));
751
752 if (vg_tid != VG_INVALID_THREADID)
753 {
754 if (callstack)
755 {
756 VG_(pp_ExeContext)(callstack);
757 }
758 else
759 {
760 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
761 }
762 }
763 else
764 {
765 VG_(message)(Vg_UserMsg,
766 " (thread finished, call stack no longer available)");
767 }
768}
769
sewardjaf44c822007-11-25 14:01:38 +0000770static void
771thread_report_conflicting_segments_segment(const DrdThreadId tid,
772 const Addr addr,
773 const SizeT size,
774 const BmAccessTypeT access_type,
775 const Segment* const p)
776{
777 unsigned i;
778
779 tl_assert(0 <= tid && tid < DRD_N_THREADS
780 && tid != DRD_INVALID_THREADID);
781 tl_assert(p);
782
783 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
784 {
785 if (i != tid)
786 {
787 Segment* q;
788 for (q = s_threadinfo[i].last; q; q = q->prev)
789 {
790 // Since q iterates over the segments of thread i in order of
791 // decreasing vector clocks, if q->vc <= p->vc, then
792 // q->next->vc <= p->vc will also hold. Hence, break out of the
793 // loop once this condition is met.
794 if (vc_lte(&q->vc, &p->vc))
795 break;
796 if (! vc_lte(&p->vc, &q->vc))
797 {
798 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
799 {
800 tl_assert(q->stacktrace);
801 show_call_stack(i, "Other segment start",
802 q->stacktrace);
803 show_call_stack(i, "Other segment end",
804 q->next ? q->next->stacktrace : 0);
805 }
806 }
807 }
808 }
809 }
810}
811
812void thread_report_conflicting_segments(const DrdThreadId tid,
813 const Addr addr,
814 const SizeT size,
815 const BmAccessTypeT access_type)
816{
817 Segment* p;
818
819 tl_assert(0 <= tid && tid < DRD_N_THREADS
820 && tid != DRD_INVALID_THREADID);
821
822 for (p = s_threadinfo[tid].first; p; p = p->next)
823 {
824 if (bm_has(p->bm, addr, addr + size, access_type))
825 {
826 thread_report_conflicting_segments_segment(tid, addr, size,
827 access_type, p);
828 }
829 }
830}
sewardjaf44c822007-11-25 14:01:38 +0000831
bart26f73e12008-02-24 18:37:08 +0000832/** Compute a bitmap that represents the union of all memory accesses of all
833 * segments that are unordered to the current segment of the thread tid.
sewardjaf44c822007-11-25 14:01:38 +0000834 */
835static void thread_update_danger_set(const DrdThreadId tid)
836{
837 Segment* p;
838
bart26f73e12008-02-24 18:37:08 +0000839 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000840 tl_assert(tid == s_drd_running_tid);
sewardjaf44c822007-11-25 14:01:38 +0000841
842 s_update_danger_set_count++;
843 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
844 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
845
sewardjaf44c822007-11-25 14:01:38 +0000846 if (s_danger_set)
847 {
848 bm_clear_all(s_danger_set);
849 }
850 else
851 {
852 s_danger_set = bm_new();
853 }
bart26f73e12008-02-24 18:37:08 +0000854
855 if (s_trace_danger_set)
856 {
857 char msg[256];
858
859 VG_(snprintf)(msg, sizeof(msg),
860 "computing danger set for thread %d with vc ",
861 tid);
862 vc_snprint(msg + VG_(strlen)(msg),
863 sizeof(msg) - VG_(strlen)(msg),
864 &s_threadinfo[tid].last->vc);
865 VG_(message)(Vg_DebugMsg, "%s", msg);
866 }
sewardjaf44c822007-11-25 14:01:38 +0000867
bart5bd9f2d2008-03-03 20:31:58 +0000868 p = s_threadinfo[tid].last;
sewardjaf44c822007-11-25 14:01:38 +0000869 {
870 unsigned j;
871
bart26f73e12008-02-24 18:37:08 +0000872 if (s_trace_danger_set)
873 {
874 char msg[256];
875
876 VG_(snprintf)(msg, sizeof(msg),
877 "danger set: thread [%d] at vc ",
878 tid);
879 vc_snprint(msg + VG_(strlen)(msg),
880 sizeof(msg) - VG_(strlen)(msg),
881 &p->vc);
882 VG_(message)(Vg_DebugMsg, "%s", msg);
883 }
884
sewardjaf44c822007-11-25 14:01:38 +0000885 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
886 {
887 if (IsValidDrdThreadId(j))
888 {
bart5bd9f2d2008-03-03 20:31:58 +0000889 const Segment* q;
890 for (q = s_threadinfo[j].last; q; q = q->prev)
sewardjaf44c822007-11-25 14:01:38 +0000891 if (j != tid && q != 0
892 && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
893 {
bart26f73e12008-02-24 18:37:08 +0000894 if (s_trace_danger_set)
895 {
896 char msg[256];
897 VG_(snprintf)(msg, sizeof(msg),
898 "danger set: [%d] merging segment ", j);
899 vc_snprint(msg + VG_(strlen)(msg),
900 sizeof(msg) - VG_(strlen)(msg),
901 &q->vc);
902 VG_(message)(Vg_DebugMsg, "%s", msg);
903 }
sewardjaf44c822007-11-25 14:01:38 +0000904 bm_merge2(s_danger_set, q->bm);
905 }
bart26f73e12008-02-24 18:37:08 +0000906 else
907 {
908 if (s_trace_danger_set)
909 {
910 char msg[256];
911 VG_(snprintf)(msg, sizeof(msg),
912 "danger set: [%d] ignoring segment ", j);
913 vc_snprint(msg + VG_(strlen)(msg),
914 sizeof(msg) - VG_(strlen)(msg),
915 &q->vc);
916 VG_(message)(Vg_DebugMsg, "%s", msg);
917 }
918 }
sewardjaf44c822007-11-25 14:01:38 +0000919 }
920 }
921
922 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
923 {
924 if (IsValidDrdThreadId(j))
925 {
926 // NPTL hack: don't report data races on sizeof(struct pthread)
927 // bytes at the top of the stack, since the NPTL functions access
928 // this data without locking.
929 if (s_threadinfo[j].stack_min != 0)
930 {
931 tl_assert(s_threadinfo[j].stack_startup != 0);
932 if (s_threadinfo[j].stack_min < s_threadinfo[j].stack_startup)
933 {
934 bm_clear(s_danger_set,
935 s_threadinfo[j].stack_min,
936 s_threadinfo[j].stack_startup);
937 }
938 }
939 }
940 }
941 }
942
943 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
944 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
945
bart26f73e12008-02-24 18:37:08 +0000946 if (0 && s_trace_danger_set)
947 {
948 VG_(message)(Vg_DebugMsg, "[%d] new danger set:", tid);
949 bm_print(s_danger_set);
950 VG_(message)(Vg_DebugMsg, "[%d] end of new danger set.", tid);
951 }
sewardjaf44c822007-11-25 14:01:38 +0000952}
953
sewardjaf44c822007-11-25 14:01:38 +0000954ULong thread_get_context_switch_count(void)
955{
956 return s_context_switch_count;
957}
958
sewardjaf44c822007-11-25 14:01:38 +0000959ULong thread_get_discard_ordered_segments_count(void)
960{
961 return s_discard_ordered_segments_count;
962}
963
964ULong thread_get_update_danger_set_count(void)
965{
966 return s_update_danger_set_count;
967}
968
969ULong thread_get_danger_set_bitmap_creation_count(void)
970{
971 return s_danger_set_bitmap_creation_count;
972}
973
974ULong thread_get_danger_set_bitmap2_creation_count(void)
975{
976 return s_danger_set_bitmap2_creation_count;
977}
978
979/*
980 * Local variables:
981 * c-basic-offset: 3
982 * End:
983 */