blob: d20739d4816845e48d2f84fbbffa93573d28195d [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
bart82195c12008-04-13 17:35:08 +000030#include "pub_tool_vki.h"
sewardjaf44c822007-11-25 14:01:38 +000031#include "pub_tool_basics.h" // Addr, SizeT
32#include "pub_tool_errormgr.h" // VG_(unique_error)()
33#include "pub_tool_libcassert.h" // tl_assert()
34#include "pub_tool_libcbase.h" // VG_(strlen)()
35#include "pub_tool_libcprint.h" // VG_(printf)()
bart82195c12008-04-13 17:35:08 +000036#include "pub_tool_libcproc.h" // VG_(getenv)()
sewardjaf44c822007-11-25 14:01:38 +000037#include "pub_tool_machine.h"
38#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
sewardj85642922008-01-14 11:54:56 +000039#include "pub_tool_options.h" // VG_(clo_backtrace_size)
sewardjaf44c822007-11-25 14:01:38 +000040#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
41
bart32ba2082008-06-05 08:53:42 +000042/* Include the drd_bitmap.c source file here to allow the compiler to */
43/* inline the bitmap manipulation functions called from this source file. */
44#include "drd_bitmap.c"
45
sewardjaf44c822007-11-25 14:01:38 +000046
sewardjaf44c822007-11-25 14:01:38 +000047// Local functions.
48
49static void thread_append_segment(const DrdThreadId tid,
50 Segment* const sg);
barta2b6e1b2008-03-17 18:32:39 +000051static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
bart82195c12008-04-13 17:35:08 +000052static Bool thread_danger_set_up_to_date(const DrdThreadId tid);
bart4af812e2008-04-13 15:39:38 +000053static void thread_compute_danger_set(struct bitmap** danger_set,
54 const DrdThreadId tid);
sewardjaf44c822007-11-25 14:01:38 +000055
56
57// Local variables.
58
59static ULong s_context_switch_count;
60static ULong s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +000061static ULong s_update_danger_set_count;
bartd66e3a82008-04-06 15:02:17 +000062static ULong s_danger_set_new_segment_count;
63static ULong s_danger_set_combine_vc_count;
sewardjaf44c822007-11-25 14:01:38 +000064static ULong s_danger_set_bitmap_creation_count;
65static ULong s_danger_set_bitmap2_creation_count;
sewardj8b09d4f2007-12-04 21:27:18 +000066static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
bartf00a85b2008-03-13 18:49:23 +000067DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
68ThreadInfo s_threadinfo[DRD_N_THREADS];
bart1a473c72008-03-13 19:03:38 +000069struct bitmap* s_danger_set;
bart26f73e12008-02-24 18:37:08 +000070static Bool s_trace_context_switches = False;
71static Bool s_trace_danger_set = False;
barta9c37392008-03-22 09:38:48 +000072static Bool s_segment_merging = True;
sewardjaf44c822007-11-25 14:01:38 +000073
74
75// Function definitions.
76
bart26f73e12008-02-24 18:37:08 +000077void thread_trace_context_switches(const Bool t)
78{
bart3772a982008-03-15 08:11:03 +000079 s_trace_context_switches = t;
bart26f73e12008-02-24 18:37:08 +000080}
81
82void thread_trace_danger_set(const Bool t)
83{
bart3772a982008-03-15 08:11:03 +000084 s_trace_danger_set = t;
bart26f73e12008-02-24 18:37:08 +000085}
86
barta9c37392008-03-22 09:38:48 +000087void thread_set_segment_merging(const Bool m)
88{
89 s_segment_merging = m;
90}
91
sewardjaf44c822007-11-25 14:01:38 +000092__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
93{
bart74a5f212008-05-11 06:43:07 +000094 return (0 <= (int)tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
bart3772a982008-03-15 08:11:03 +000095 && ! (s_threadinfo[tid].vg_thread_exists == False
96 && s_threadinfo[tid].posix_thread_exists == False
97 && s_threadinfo[tid].detached_posix_thread == False));
sewardjaf44c822007-11-25 14:01:38 +000098}
99
100/**
101 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
102 * Valgrind's ThreadId does not yet exist.
103 **/
104DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
105{
bart3772a982008-03-15 08:11:03 +0000106 int i;
sewardjaf44c822007-11-25 14:01:38 +0000107
bart3772a982008-03-15 08:11:03 +0000108 if (tid == VG_INVALID_THREADID)
109 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000110
bart3772a982008-03-15 08:11:03 +0000111 for (i = 1; i < DRD_N_THREADS; i++)
112 {
113 if (s_threadinfo[i].vg_thread_exists == True
114 && s_threadinfo[i].vg_threadid == tid)
115 {
116 return i;
117 }
118 }
sewardjaf44c822007-11-25 14:01:38 +0000119
bart3772a982008-03-15 08:11:03 +0000120 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000121}
122
123static
124DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
125{
bart3772a982008-03-15 08:11:03 +0000126 int i;
sewardjaf44c822007-11-25 14:01:38 +0000127
bart3772a982008-03-15 08:11:03 +0000128 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000129
bart3772a982008-03-15 08:11:03 +0000130 for (i = 1; i < DRD_N_THREADS; i++)
131 {
132 if (s_threadinfo[i].vg_thread_exists == False
133 && s_threadinfo[i].posix_thread_exists == False
134 && s_threadinfo[i].detached_posix_thread == False)
135 {
136 s_threadinfo[i].vg_thread_exists = True;
137 s_threadinfo[i].vg_threadid = tid;
138 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
bart3772a982008-03-15 08:11:03 +0000139 s_threadinfo[i].stack_min = 0;
bartcac53462008-03-29 09:27:08 +0000140 s_threadinfo[i].stack_min_min = 0;
bart3772a982008-03-15 08:11:03 +0000141 s_threadinfo[i].stack_startup = 0;
142 s_threadinfo[i].stack_max = 0;
bart3772a982008-03-15 08:11:03 +0000143 s_threadinfo[i].is_recording = True;
144 s_threadinfo[i].synchr_nesting = 0;
145 if (s_threadinfo[i].first != 0)
146 VG_(printf)("drd thread id = %d\n", i);
147 tl_assert(s_threadinfo[i].first == 0);
148 tl_assert(s_threadinfo[i].last == 0);
149 return i;
150 }
151 }
sewardjaf44c822007-11-25 14:01:38 +0000152
bart3772a982008-03-15 08:11:03 +0000153 tl_assert(False);
sewardjaf44c822007-11-25 14:01:38 +0000154
bart3772a982008-03-15 08:11:03 +0000155 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000156}
157
158DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
159{
bart3772a982008-03-15 08:11:03 +0000160 int i;
sewardjaf44c822007-11-25 14:01:38 +0000161
bart3772a982008-03-15 08:11:03 +0000162 tl_assert(tid != INVALID_POSIX_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000163
bart3772a982008-03-15 08:11:03 +0000164 for (i = 1; i < DRD_N_THREADS; i++)
165 {
166 if (s_threadinfo[i].posix_thread_exists
167 && s_threadinfo[i].pt_threadid == tid)
168 {
169 return i;
170 }
171 }
172 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000173}
174
175ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
176{
bart74a5f212008-05-11 06:43:07 +0000177 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
178 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000179 return (s_threadinfo[tid].vg_thread_exists
180 ? s_threadinfo[tid].vg_threadid
181 : VG_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000182}
183
bart23d3a4e2008-04-05 12:53:00 +0000184#if 0
bart26f73e12008-02-24 18:37:08 +0000185/** Sanity check of the doubly linked list of segments referenced by a
186 * ThreadInfo struct.
187 * @return True if sane, False if not.
sewardjaf44c822007-11-25 14:01:38 +0000188 */
189static Bool sane_ThreadInfo(const ThreadInfo* const ti)
190{
bart3772a982008-03-15 08:11:03 +0000191 Segment* p;
192 for (p = ti->first; p; p = p->next) {
193 if (p->next && p->next->prev != p)
194 return False;
195 if (p->next == 0 && p != ti->last)
196 return False;
197 }
198 for (p = ti->last; p; p = p->prev) {
199 if (p->prev && p->prev->next != p)
200 return False;
201 if (p->prev == 0 && p != ti->first)
202 return False;
203 }
204 return True;
sewardjaf44c822007-11-25 14:01:38 +0000205}
bart23d3a4e2008-04-05 12:53:00 +0000206#endif
sewardjaf44c822007-11-25 14:01:38 +0000207
208DrdThreadId thread_pre_create(const DrdThreadId creator,
209 const ThreadId vg_created)
210{
bart3772a982008-03-15 08:11:03 +0000211 DrdThreadId created;
sewardjaf44c822007-11-25 14:01:38 +0000212
bart3772a982008-03-15 08:11:03 +0000213 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
214 created = VgThreadIdToNewDrdThreadId(vg_created);
bart74a5f212008-05-11 06:43:07 +0000215 tl_assert(0 <= (int)created && created < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000216 && created != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000217
bart3772a982008-03-15 08:11:03 +0000218 tl_assert(s_threadinfo[created].first == 0);
219 tl_assert(s_threadinfo[created].last == 0);
220 thread_append_segment(created, sg_new(creator, created));
sewardjaf44c822007-11-25 14:01:38 +0000221
bart3772a982008-03-15 08:11:03 +0000222 return created;
sewardjaf44c822007-11-25 14:01:38 +0000223}
224
bart26f73e12008-02-24 18:37:08 +0000225/** Allocate the first segment for a thread. Call this just after
226 * pthread_create().
sewardjaf44c822007-11-25 14:01:38 +0000227 */
228DrdThreadId thread_post_create(const ThreadId vg_created)
229{
bart3772a982008-03-15 08:11:03 +0000230 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
sewardjaf44c822007-11-25 14:01:38 +0000231
bart74a5f212008-05-11 06:43:07 +0000232 tl_assert(0 <= (int)created && created < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000233 && created != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000234
bart3772a982008-03-15 08:11:03 +0000235 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
236 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
237 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
bartcac53462008-03-29 09:27:08 +0000238 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
239 s_threadinfo[created].stack_size = VG_(thread_get_stack_size)(vg_created);
bart3772a982008-03-15 08:11:03 +0000240 tl_assert(s_threadinfo[created].stack_max != 0);
sewardjaf44c822007-11-25 14:01:38 +0000241
bart3772a982008-03-15 08:11:03 +0000242 return created;
sewardjaf44c822007-11-25 14:01:38 +0000243}
244
245/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
246/* and accesses this data structure from multiple threads without locking. */
247/* Any conflicting accesses in the range stack_startup..stack_max will be */
248/* ignored. */
249void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
250{
bart74a5f212008-05-11 06:43:07 +0000251 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
252 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000253 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
254 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
255 s_threadinfo[tid].stack_startup = stack_startup;
sewardjaf44c822007-11-25 14:01:38 +0000256}
257
258Addr thread_get_stack_min(const DrdThreadId tid)
259{
bart74a5f212008-05-11 06:43:07 +0000260 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000261 && tid != DRD_INVALID_THREADID);
262 return s_threadinfo[tid].stack_min;
sewardjaf44c822007-11-25 14:01:38 +0000263}
264
bartcac53462008-03-29 09:27:08 +0000265Addr thread_get_stack_min_min(const DrdThreadId tid)
266{
bart74a5f212008-05-11 06:43:07 +0000267 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bartcac53462008-03-29 09:27:08 +0000268 && tid != DRD_INVALID_THREADID);
269 return s_threadinfo[tid].stack_min_min;
270}
271
bartd43f8d32008-03-16 17:29:20 +0000272Addr thread_get_stack_max(const DrdThreadId tid)
sewardjaf44c822007-11-25 14:01:38 +0000273{
bart74a5f212008-05-11 06:43:07 +0000274 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bartd43f8d32008-03-16 17:29:20 +0000275 && tid != DRD_INVALID_THREADID);
276 return s_threadinfo[tid].stack_max;
sewardjaf44c822007-11-25 14:01:38 +0000277}
278
bartcac53462008-03-29 09:27:08 +0000279SizeT thread_get_stack_size(const DrdThreadId tid)
280{
bart74a5f212008-05-11 06:43:07 +0000281 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bartcac53462008-03-29 09:27:08 +0000282 && tid != DRD_INVALID_THREADID);
283 return s_threadinfo[tid].stack_size;
284}
285
barta2b6e1b2008-03-17 18:32:39 +0000286/** Clean up thread-specific data structures. Call this just after
287 * pthread_join().
sewardjaf44c822007-11-25 14:01:38 +0000288 */
289void thread_delete(const DrdThreadId tid)
290{
bart3772a982008-03-15 08:11:03 +0000291 Segment* sg;
292 Segment* sg_prev;
sewardjaf44c822007-11-25 14:01:38 +0000293
bart74a5f212008-05-11 06:43:07 +0000294 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000295 && tid != DRD_INVALID_THREADID);
296 tl_assert(s_threadinfo[tid].synchr_nesting == 0);
297 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
298 {
299 sg_prev = sg->prev;
barta2b6e1b2008-03-17 18:32:39 +0000300 sg->prev = 0;
301 sg->next = 0;
302 sg_put(sg);
bart3772a982008-03-15 08:11:03 +0000303 }
304 s_threadinfo[tid].vg_thread_exists = False;
305 s_threadinfo[tid].posix_thread_exists = False;
306 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
307 s_threadinfo[tid].first = 0;
308 s_threadinfo[tid].last = 0;
sewardjaf44c822007-11-25 14:01:38 +0000309}
310
311/* Called after a thread performed its last memory access and before */
312/* thread_delete() is called. Note: thread_delete() is only called for */
313/* joinable threads, not for detached threads. */
314void thread_finished(const DrdThreadId tid)
315{
bart74a5f212008-05-11 06:43:07 +0000316 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000317 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000318
bart3772a982008-03-15 08:11:03 +0000319 s_threadinfo[tid].vg_thread_exists = False;
sewardjaf44c822007-11-25 14:01:38 +0000320
bart3772a982008-03-15 08:11:03 +0000321 if (s_threadinfo[tid].detached_posix_thread)
322 {
323 /* Once a detached thread has finished, its stack is deallocated and */
324 /* should no longer be taken into account when computing the danger set*/
325 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
sewardjaf44c822007-11-25 14:01:38 +0000326
bart3772a982008-03-15 08:11:03 +0000327 /* For a detached thread, calling pthread_exit() invalidates the */
328 /* POSIX thread ID associated with the detached thread. For joinable */
329 /* POSIX threads however, the POSIX thread ID remains live after the */
330 /* pthread_exit() call until pthread_join() is called. */
331 s_threadinfo[tid].posix_thread_exists = False;
332 }
sewardjaf44c822007-11-25 14:01:38 +0000333}
334
335void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
336{
bart74a5f212008-05-11 06:43:07 +0000337 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000338 && tid != DRD_INVALID_THREADID);
339 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
340 tl_assert(ptid != INVALID_POSIX_THREADID);
341 s_threadinfo[tid].posix_thread_exists = True;
342 s_threadinfo[tid].pt_threadid = ptid;
sewardjaf44c822007-11-25 14:01:38 +0000343}
344
345Bool thread_get_joinable(const DrdThreadId tid)
346{
bart74a5f212008-05-11 06:43:07 +0000347 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000348 && tid != DRD_INVALID_THREADID);
349 return ! s_threadinfo[tid].detached_posix_thread;
sewardjaf44c822007-11-25 14:01:38 +0000350}
351
352void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
353{
bart74a5f212008-05-11 06:43:07 +0000354 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000355 && tid != DRD_INVALID_THREADID);
356 tl_assert(!! joinable == joinable);
357 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000358#if 0
bart3772a982008-03-15 08:11:03 +0000359 VG_(message)(Vg_DebugMsg,
360 "thread_set_joinable(%d/%d, %s)",
361 tid,
362 s_threadinfo[tid].vg_threadid,
363 joinable ? "joinable" : "detached");
sewardjaf44c822007-11-25 14:01:38 +0000364#endif
bart3772a982008-03-15 08:11:03 +0000365 s_threadinfo[tid].detached_posix_thread = ! joinable;
sewardjaf44c822007-11-25 14:01:38 +0000366}
367
sewardj8b09d4f2007-12-04 21:27:18 +0000368void thread_set_vg_running_tid(const ThreadId vg_tid)
sewardjaf44c822007-11-25 14:01:38 +0000369{
bart3772a982008-03-15 08:11:03 +0000370 tl_assert(vg_tid != VG_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000371
bart3772a982008-03-15 08:11:03 +0000372 if (vg_tid != s_vg_running_tid)
373 {
374 thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
375 }
sewardj8b09d4f2007-12-04 21:27:18 +0000376
bart3772a982008-03-15 08:11:03 +0000377 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
378 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000379}
380
381void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
382{
bart3772a982008-03-15 08:11:03 +0000383 tl_assert(vg_tid != VG_INVALID_THREADID);
384 tl_assert(drd_tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000385
bart3772a982008-03-15 08:11:03 +0000386 if (vg_tid != s_vg_running_tid)
387 {
388 if (s_trace_context_switches
389 && s_drd_running_tid != DRD_INVALID_THREADID)
390 {
391 VG_(message)(Vg_DebugMsg,
barta2b6e1b2008-03-17 18:32:39 +0000392 "Context switch from thread %d/%d to thread %d/%d;"
393 " segments: %llu",
bartaa97a542008-03-16 17:57:01 +0000394 s_vg_running_tid, s_drd_running_tid,
barta2b6e1b2008-03-17 18:32:39 +0000395 DrdThreadIdToVgThreadId(drd_tid), drd_tid,
396 sg_get_alive_segments_count());
bart3772a982008-03-15 08:11:03 +0000397 }
398 s_vg_running_tid = vg_tid;
399 s_drd_running_tid = drd_tid;
bart4af812e2008-04-13 15:39:38 +0000400 thread_compute_danger_set(&s_danger_set, drd_tid);
bart3772a982008-03-15 08:11:03 +0000401 s_context_switch_count++;
402 }
sewardj8b09d4f2007-12-04 21:27:18 +0000403
bart3772a982008-03-15 08:11:03 +0000404 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
405 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000406}
407
bart0268dfa2008-03-11 20:10:21 +0000408int thread_enter_synchr(const DrdThreadId tid)
409{
bart3772a982008-03-15 08:11:03 +0000410 tl_assert(IsValidDrdThreadId(tid));
411 return s_threadinfo[tid].synchr_nesting++;
bart0268dfa2008-03-11 20:10:21 +0000412}
413
414int thread_leave_synchr(const DrdThreadId tid)
415{
bart3772a982008-03-15 08:11:03 +0000416 tl_assert(IsValidDrdThreadId(tid));
417 tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
418 return --s_threadinfo[tid].synchr_nesting;
bart0268dfa2008-03-11 20:10:21 +0000419}
420
421int thread_get_synchr_nesting_count(const DrdThreadId tid)
422{
bart3772a982008-03-15 08:11:03 +0000423 tl_assert(IsValidDrdThreadId(tid));
424 return s_threadinfo[tid].synchr_nesting;
bart0268dfa2008-03-11 20:10:21 +0000425}
426
bart1a473c72008-03-13 19:03:38 +0000427/** Append a new segment at the end of the segment list. */
bart26f73e12008-02-24 18:37:08 +0000428static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000429{
bart74a5f212008-05-11 06:43:07 +0000430 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000431 && tid != DRD_INVALID_THREADID);
bart23d3a4e2008-04-05 12:53:00 +0000432 // tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart3772a982008-03-15 08:11:03 +0000433 sg->prev = s_threadinfo[tid].last;
434 sg->next = 0;
435 if (s_threadinfo[tid].last)
436 s_threadinfo[tid].last->next = sg;
437 s_threadinfo[tid].last = sg;
438 if (s_threadinfo[tid].first == 0)
439 s_threadinfo[tid].first = sg;
bart23d3a4e2008-04-05 12:53:00 +0000440 // tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
sewardjaf44c822007-11-25 14:01:38 +0000441}
442
bart26f73e12008-02-24 18:37:08 +0000443/** Remove a segment from the segment list of thread threadid, and free the
444 * associated memory.
sewardjaf44c822007-11-25 14:01:38 +0000445 */
bart26f73e12008-02-24 18:37:08 +0000446static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000447{
bart74a5f212008-05-11 06:43:07 +0000448 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000449 && tid != DRD_INVALID_THREADID);
bart3f749672008-03-22 09:49:40 +0000450 //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart26f73e12008-02-24 18:37:08 +0000451
bart3772a982008-03-15 08:11:03 +0000452 if (sg->prev)
453 sg->prev->next = sg->next;
454 if (sg->next)
455 sg->next->prev = sg->prev;
456 if (sg == s_threadinfo[tid].first)
457 s_threadinfo[tid].first = sg->next;
458 if (sg == s_threadinfo[tid].last)
459 s_threadinfo[tid].last = sg->prev;
barta2b6e1b2008-03-17 18:32:39 +0000460 sg_put(sg);
bart3f749672008-03-22 09:49:40 +0000461
462 //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
sewardjaf44c822007-11-25 14:01:38 +0000463}
464
465VectorClock* thread_get_vc(const DrdThreadId tid)
466{
bart74a5f212008-05-11 06:43:07 +0000467 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
468 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000469 tl_assert(s_threadinfo[tid].last);
470 return &s_threadinfo[tid].last->vc;
sewardjaf44c822007-11-25 14:01:38 +0000471}
472
barta2b6e1b2008-03-17 18:32:39 +0000473/** Return the latest segment of thread 'tid' and increment its reference
474 * count.
475 */
476void thread_get_latest_segment(Segment** sg, const DrdThreadId tid)
477{
478 tl_assert(sg);
bart74a5f212008-05-11 06:43:07 +0000479 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
480 && tid != DRD_INVALID_THREADID);
barta2b6e1b2008-03-17 18:32:39 +0000481 tl_assert(s_threadinfo[tid].last);
482
483 sg_put(*sg);
484 *sg = sg_get(s_threadinfo[tid].last);
485}
486
sewardjaf44c822007-11-25 14:01:38 +0000487/**
488 * Compute the minimum of all latest vector clocks of all threads
489 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
490 * @param vc pointer to a vectorclock, holds result upon return.
491 */
492static void thread_compute_minimum_vc(VectorClock* vc)
493{
bart3772a982008-03-15 08:11:03 +0000494 unsigned i;
495 Bool first;
496 Segment* latest_sg;
sewardjaf44c822007-11-25 14:01:38 +0000497
bart3772a982008-03-15 08:11:03 +0000498 first = True;
499 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
500 {
501 latest_sg = s_threadinfo[i].last;
502 if (latest_sg)
503 {
504 if (first)
505 vc_assign(vc, &latest_sg->vc);
506 else
507 vc_min(vc, &latest_sg->vc);
508 first = False;
509 }
510 }
sewardjaf44c822007-11-25 14:01:38 +0000511}
512
513static void thread_compute_maximum_vc(VectorClock* vc)
514{
bart3772a982008-03-15 08:11:03 +0000515 unsigned i;
516 Bool first;
517 Segment* latest_sg;
sewardjaf44c822007-11-25 14:01:38 +0000518
bart3772a982008-03-15 08:11:03 +0000519 first = True;
520 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
521 {
522 latest_sg = s_threadinfo[i].last;
523 if (latest_sg)
524 {
525 if (first)
526 vc_assign(vc, &latest_sg->vc);
527 else
528 vc_combine(vc, &latest_sg->vc);
529 first = False;
530 }
531 }
sewardjaf44c822007-11-25 14:01:38 +0000532}
533
534/**
bart5bd9f2d2008-03-03 20:31:58 +0000535 * Discard all segments that have a defined order against the latest vector
sewardjaf44c822007-11-25 14:01:38 +0000536 * clock of every thread -- these segments can no longer be involved in a
537 * data race.
538 */
539static void thread_discard_ordered_segments(void)
540{
bart3772a982008-03-15 08:11:03 +0000541 unsigned i;
542 VectorClock thread_vc_min;
sewardjaf44c822007-11-25 14:01:38 +0000543
bart3772a982008-03-15 08:11:03 +0000544 s_discard_ordered_segments_count++;
sewardjaf44c822007-11-25 14:01:38 +0000545
bart3772a982008-03-15 08:11:03 +0000546 vc_init(&thread_vc_min, 0, 0);
547 thread_compute_minimum_vc(&thread_vc_min);
548 if (sg_get_trace())
549 {
550 char msg[256];
551 VectorClock thread_vc_max;
sewardjaf44c822007-11-25 14:01:38 +0000552
bart3772a982008-03-15 08:11:03 +0000553 vc_init(&thread_vc_max, 0, 0);
554 thread_compute_maximum_vc(&thread_vc_max);
555 VG_(snprintf)(msg, sizeof(msg),
556 "Discarding ordered segments -- min vc is ");
557 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
558 &thread_vc_min);
559 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
560 ", max vc is ");
561 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
562 &thread_vc_max);
barta2b6e1b2008-03-17 18:32:39 +0000563 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000564 vc_cleanup(&thread_vc_max);
565 }
sewardjaf44c822007-11-25 14:01:38 +0000566
bart3772a982008-03-15 08:11:03 +0000567 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
568 {
569 Segment* sg;
570 Segment* sg_next;
571 for (sg = s_threadinfo[i].first;
572 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
573 sg = sg_next)
574 {
575 thread_discard_segment(i, sg);
576 }
577 }
578 vc_cleanup(&thread_vc_min);
sewardjaf44c822007-11-25 14:01:38 +0000579}
580
barta9c37392008-03-22 09:38:48 +0000581/** Merge all segments that may be merged without triggering false positives
582 * or discarding real data races. For the theoretical background of segment
583 * merging, see also the following paper:
584 * Mark Christiaens, Michiel Ronsse and Koen De Bosschere.
585 * Bounding the number of segment histories during data race detection.
586 * Parallel Computing archive, Volume 28, Issue 9, pp 1221-1238,
587 * September 2002.
588 */
589static void thread_merge_segments(void)
590{
591 unsigned i;
592
593 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
594 {
595 Segment* sg;
596
bart23d3a4e2008-04-05 12:53:00 +0000597 // tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
barta9c37392008-03-22 09:38:48 +0000598
599 for (sg = s_threadinfo[i].first; sg; sg = sg->next)
600 {
601 if (sg_get_refcnt(sg) == 1
602 && sg->next
603 && sg_get_refcnt(sg->next) == 1
604 && sg->next->next)
605 {
606 /* Merge sg and sg->next into sg. */
607 sg_merge(sg, sg->next);
608 thread_discard_segment(i, sg->next);
609 }
610 }
611
bart23d3a4e2008-04-05 12:53:00 +0000612 // tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
barta9c37392008-03-22 09:38:48 +0000613 }
614}
615
bartd66e3a82008-04-06 15:02:17 +0000616/** Every change in the vector clock of a thread may cause segments that
617 * were previously ordered to this thread to become unordered. Hence,
618 * it may be necessary to recalculate the danger set if the vector clock
619 * of the current thread is updated. This function check whether such a
620 * recalculation is necessary.
621 *
622 * @param tid Thread ID of the thread to which a new segment has been
623 * appended.
624 * @param new_sg Pointer to the most recent segment of thread tid.
625 */
626static Bool danger_set_update_needed(const DrdThreadId tid,
627 const Segment* const new_sg)
628{
bart5d421ba2008-04-19 15:15:12 +0000629#if 0
bartd66e3a82008-04-06 15:02:17 +0000630 unsigned i;
631 const Segment* old_sg;
632
633 tl_assert(new_sg);
634
635 /* If a new segment was added to another thread than the running thread, */
636 /* just tell the caller to update the danger set. */
637 if (tid != s_drd_running_tid)
638 return True;
639
640 /* Always let the caller update the danger set after creation of the */
641 /* first segment. */
642 old_sg = new_sg->prev;
643 if (old_sg == 0)
644 return True;
645
646 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
647 {
648 Segment* q;
649
650 if (i == s_drd_running_tid)
651 continue;
652
653 for (q = s_threadinfo[i].last; q; q = q->prev)
654 {
655 /* If the expression below evaluates to false, this expression will */
656 /* also evaluate to false for all subsequent iterations. So stop */
657 /* iterating. */
658 if (vc_lte(&q->vc, &old_sg->vc))
659 break;
660 /* If the vector clock of the 2nd the last segment is not ordered */
661 /* to the vector clock of segment q, and the last segment is, ask */
662 /* the caller to update the danger set. */
663 if (! vc_lte(&old_sg->vc, &q->vc))
664 {
665 return True;
666 }
667 /* If the vector clock of the last segment is not ordered to the */
668 /* vector clock of segment q, ask the caller to update the danger */
669 /* set. */
670 if (! vc_lte(&q->vc, &new_sg->vc) && ! vc_lte(&new_sg->vc, &q->vc))
671 {
672 return True;
673 }
674 }
675 }
676
677 return False;
bart5d421ba2008-04-19 15:15:12 +0000678#else
679 return True;
680#endif
bartd66e3a82008-04-06 15:02:17 +0000681}
682
barta2b6e1b2008-03-17 18:32:39 +0000683/** Create a new segment for the specified thread, and discard any segments
684 * that cannot cause races anymore.
sewardjaf44c822007-11-25 14:01:38 +0000685 */
686void thread_new_segment(const DrdThreadId tid)
687{
bartd66e3a82008-04-06 15:02:17 +0000688 Segment* new_sg;
689
bart74a5f212008-05-11 06:43:07 +0000690 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
691 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000692
bartd66e3a82008-04-06 15:02:17 +0000693 new_sg = sg_new(tid, tid);
694 thread_append_segment(tid, new_sg);
695
696 if (danger_set_update_needed(tid, new_sg))
697 {
bart4af812e2008-04-13 15:39:38 +0000698 thread_compute_danger_set(&s_danger_set, s_drd_running_tid);
bartd66e3a82008-04-06 15:02:17 +0000699 s_danger_set_new_segment_count++;
700 }
bart82195c12008-04-13 17:35:08 +0000701 else if (tid == s_drd_running_tid)
702 {
703 tl_assert(thread_danger_set_up_to_date(s_drd_running_tid));
704 }
sewardjaf44c822007-11-25 14:01:38 +0000705
bart3772a982008-03-15 08:11:03 +0000706 thread_discard_ordered_segments();
bart26f73e12008-02-24 18:37:08 +0000707
barta9c37392008-03-22 09:38:48 +0000708 if (s_segment_merging)
709 thread_merge_segments();
sewardjaf44c822007-11-25 14:01:38 +0000710}
711
bart26f73e12008-02-24 18:37:08 +0000712/** Call this function after thread 'joiner' joined thread 'joinee'. */
sewardjaf44c822007-11-25 14:01:38 +0000713void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
714{
bart3772a982008-03-15 08:11:03 +0000715 tl_assert(joiner != joinee);
bart74a5f212008-05-11 06:43:07 +0000716 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000717 && joiner != DRD_INVALID_THREADID);
bart74a5f212008-05-11 06:43:07 +0000718 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000719 && joinee != DRD_INVALID_THREADID);
720 tl_assert(s_threadinfo[joiner].last);
721 tl_assert(s_threadinfo[joinee].last);
722 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
723 thread_discard_ordered_segments();
sewardjaf44c822007-11-25 14:01:38 +0000724
bart3772a982008-03-15 08:11:03 +0000725 if (joiner == s_drd_running_tid)
726 {
bart4af812e2008-04-13 15:39:38 +0000727 thread_compute_danger_set(&s_danger_set, joiner);
bart3772a982008-03-15 08:11:03 +0000728 }
sewardjaf44c822007-11-25 14:01:38 +0000729}
730
bart26f73e12008-02-24 18:37:08 +0000731/** Call this function after thread 'tid' had to wait because of thread
732 * synchronization until the memory accesses in the segment with vector clock
733 * 'vc' finished.
734 */
sewardjaf44c822007-11-25 14:01:38 +0000735void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
736{
bart74a5f212008-05-11 06:43:07 +0000737 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
738 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000739 tl_assert(s_threadinfo[tid].last);
740 tl_assert(vc);
741 vc_combine(&s_threadinfo[tid].last->vc, vc);
bart4af812e2008-04-13 15:39:38 +0000742 thread_compute_danger_set(&s_danger_set, tid);
bart3772a982008-03-15 08:11:03 +0000743 thread_discard_ordered_segments();
bartd66e3a82008-04-06 15:02:17 +0000744 s_danger_set_combine_vc_count++;
sewardjaf44c822007-11-25 14:01:38 +0000745}
746
bartdfbae6e2008-06-05 08:29:53 +0000747Bool bm_access_load_1_triggers_conflict(const Addr a1)
748{
749 bm_access_load_1(running_thread_get_segment()->bm, a1);
750 return bm_load_1_has_conflict_with(thread_get_danger_set(), a1);
751}
752
753Bool bm_access_load_2_triggers_conflict(const Addr a1)
754{
bart32ba2082008-06-05 08:53:42 +0000755 if ((a1 & 1) == 0)
756 bm_access_aligned_load(running_thread_get_segment()->bm, a1, 2);
757 else
758 bm_access_range(running_thread_get_segment()->bm, a1, a1 + 2, eLoad);
759 if ((a1 & 1) == 0)
760 return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 2);
761 else
762 return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 2, eLoad);
bartdfbae6e2008-06-05 08:29:53 +0000763}
764
765Bool bm_access_load_4_triggers_conflict(const Addr a1)
766{
bart32ba2082008-06-05 08:53:42 +0000767 if ((a1 & 3) == 0)
768 bm_access_aligned_load(running_thread_get_segment()->bm, a1, 4);
769 else
770 bm_access_range(running_thread_get_segment()->bm, a1, a1 + 4, eLoad);
771 if ((a1 & 3) == 0)
772 return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 4);
773 else
774 return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 4, eLoad);
bartdfbae6e2008-06-05 08:29:53 +0000775}
776
777Bool bm_access_load_8_triggers_conflict(const Addr a1)
778{
bart32ba2082008-06-05 08:53:42 +0000779 if ((a1 & 7) == 0)
780 bm_access_aligned_load(running_thread_get_segment()->bm, a1, 8);
781 else if ((a1 & 3) == 0)
782 {
783 bm_access_aligned_load(running_thread_get_segment()->bm, a1 + 0, 4);
784 bm_access_aligned_load(running_thread_get_segment()->bm, a1 + 4, 4);
785 }
786 else
787 bm_access_range(running_thread_get_segment()->bm, a1, a1 + 8, eLoad);
788 if ((a1 & 7) == 0)
789 return bm_aligned_load_has_conflict_with(thread_get_danger_set(), a1, 8);
790 else
791 return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eLoad);
bartdfbae6e2008-06-05 08:29:53 +0000792}
793
794Bool bm_access_load_triggers_conflict(const Addr a1, const Addr a2)
795{
796 bm_access_range_load(running_thread_get_segment()->bm, a1, a2);
797 return bm_load_has_conflict_with(thread_get_danger_set(), a1, a2);
798}
799
800Bool bm_access_store_1_triggers_conflict(const Addr a1)
801{
802 bm_access_store_1(running_thread_get_segment()->bm, a1);
803 return bm_store_1_has_conflict_with(thread_get_danger_set(), a1);
804}
805
806Bool bm_access_store_2_triggers_conflict(const Addr a1)
807{
bart32ba2082008-06-05 08:53:42 +0000808 if ((a1 & 1) == 0)
809 bm_access_aligned_store(running_thread_get_segment()->bm, a1, 2);
810 else
811 bm_access_range(running_thread_get_segment()->bm, a1, a1 + 2, eStore);
812 if ((a1 & 1) == 0)
813 return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 2);
814 else
815 return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 2, eStore);
bartdfbae6e2008-06-05 08:29:53 +0000816}
817
818Bool bm_access_store_4_triggers_conflict(const Addr a1)
819{
bart32ba2082008-06-05 08:53:42 +0000820 if ((a1 & 3) == 0)
821 bm_access_aligned_store(running_thread_get_segment()->bm, a1, 4);
822 else
823 bm_access_range(running_thread_get_segment()->bm, a1, a1 + 4, eStore);
824 if ((a1 & 3) == 0)
825 return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 4);
826 else
827 return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 4, eStore);
bartdfbae6e2008-06-05 08:29:53 +0000828}
829
830Bool bm_access_store_8_triggers_conflict(const Addr a1)
831{
bart32ba2082008-06-05 08:53:42 +0000832 if ((a1 & 7) == 0)
833 bm_access_aligned_store(running_thread_get_segment()->bm, a1, 8);
834 else if ((a1 & 3) == 0)
835 {
836 bm_access_aligned_store(running_thread_get_segment()->bm, a1 + 0, 4);
837 bm_access_aligned_store(running_thread_get_segment()->bm, a1 + 4, 4);
838 }
839 else
840 bm_access_range(running_thread_get_segment()->bm, a1, a1 + 8, eStore);
841 if ((a1 & 7) == 0)
842 return bm_aligned_store_has_conflict_with(thread_get_danger_set(), a1, 8);
843 else
844 return bm_has_conflict_with(thread_get_danger_set(), a1, a1 + 8, eStore);
bartdfbae6e2008-06-05 08:29:53 +0000845}
846
847Bool bm_access_store_triggers_conflict(const Addr a1, const Addr a2)
848{
849 bm_access_range_store(running_thread_get_segment()->bm, a1, a2);
850 return bm_store_has_conflict_with(thread_get_danger_set(), a1, a2);
851}
852
bart26f73e12008-02-24 18:37:08 +0000853/** Call this function whenever a thread is no longer using the memory
854 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
855 * increase.
856 */
sewardjaf44c822007-11-25 14:01:38 +0000857void thread_stop_using_mem(const Addr a1, const Addr a2)
858{
bartd43f8d32008-03-16 17:29:20 +0000859 DrdThreadId other_user;
860 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000861
bart3772a982008-03-15 08:11:03 +0000862 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
bartd43f8d32008-03-16 17:29:20 +0000863 other_user = DRD_INVALID_THREADID;
bart3772a982008-03-15 08:11:03 +0000864 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
865 {
866 Segment* p;
867 for (p = s_threadinfo[i].first; p; p = p->next)
868 {
869 if (other_user == DRD_INVALID_THREADID
bart8bf2f8b2008-03-30 17:56:43 +0000870 && i != s_drd_running_tid)
sewardjaf44c822007-11-25 14:01:38 +0000871 {
bart8bf2f8b2008-03-30 17:56:43 +0000872 if (UNLIKELY(bm_test_and_clear(p->bm, a1, a2)))
873 {
874 other_user = i;
875 }
876 continue;
sewardjaf44c822007-11-25 14:01:38 +0000877 }
bart3772a982008-03-15 08:11:03 +0000878 bm_clear(p->bm, a1, a2);
879 }
880 }
sewardjaf44c822007-11-25 14:01:38 +0000881
bart3772a982008-03-15 08:11:03 +0000882 /* If any other thread had accessed memory in [ a1, a2 [, update the */
883 /* danger set. */
884 if (other_user != DRD_INVALID_THREADID
885 && bm_has_any_access(s_danger_set, a1, a2))
886 {
bart4af812e2008-04-13 15:39:38 +0000887 thread_compute_danger_set(&s_danger_set, thread_get_running_tid());
bart3772a982008-03-15 08:11:03 +0000888 }
sewardjaf44c822007-11-25 14:01:38 +0000889}
890
bart0268dfa2008-03-11 20:10:21 +0000891void thread_start_recording(const DrdThreadId tid)
892{
bart74a5f212008-05-11 06:43:07 +0000893 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
894 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000895 tl_assert(! s_threadinfo[tid].is_recording);
896 s_threadinfo[tid].is_recording = True;
bart0268dfa2008-03-11 20:10:21 +0000897}
898
899void thread_stop_recording(const DrdThreadId tid)
900{
bart74a5f212008-05-11 06:43:07 +0000901 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
902 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000903 tl_assert(s_threadinfo[tid].is_recording);
904 s_threadinfo[tid].is_recording = False;
bart0268dfa2008-03-11 20:10:21 +0000905}
906
sewardjaf44c822007-11-25 14:01:38 +0000907void thread_print_all(void)
908{
bart3772a982008-03-15 08:11:03 +0000909 unsigned i;
910 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000911
bart3772a982008-03-15 08:11:03 +0000912 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
913 {
914 if (s_threadinfo[i].first)
915 {
916 VG_(printf)("**************\n"
barta2b6e1b2008-03-17 18:32:39 +0000917 "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
bart3772a982008-03-15 08:11:03 +0000918 "**************\n",
919 i,
920 s_threadinfo[i].vg_thread_exists,
921 s_threadinfo[i].vg_threadid,
922 s_threadinfo[i].posix_thread_exists,
923 s_threadinfo[i].pt_threadid,
bart354009c2008-03-16 10:42:33 +0000924 s_threadinfo[i].detached_posix_thread);
bart3772a982008-03-15 08:11:03 +0000925 for (p = s_threadinfo[i].first; p; p = p->next)
sewardjaf44c822007-11-25 14:01:38 +0000926 {
bart3772a982008-03-15 08:11:03 +0000927 sg_print(p);
sewardjaf44c822007-11-25 14:01:38 +0000928 }
bart3772a982008-03-15 08:11:03 +0000929 }
930 }
sewardjaf44c822007-11-25 14:01:38 +0000931}
932
933static void show_call_stack(const DrdThreadId tid,
934 const Char* const msg,
935 ExeContext* const callstack)
936{
bart3772a982008-03-15 08:11:03 +0000937 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
sewardjaf44c822007-11-25 14:01:38 +0000938
bartaa97a542008-03-16 17:57:01 +0000939 VG_(message)(Vg_UserMsg, "%s (thread %d/%d)", msg, vg_tid, tid);
sewardjaf44c822007-11-25 14:01:38 +0000940
bart3772a982008-03-15 08:11:03 +0000941 if (vg_tid != VG_INVALID_THREADID)
942 {
943 if (callstack)
944 {
945 VG_(pp_ExeContext)(callstack);
946 }
947 else
948 {
949 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
950 }
951 }
952 else
953 {
954 VG_(message)(Vg_UserMsg,
955 " (thread finished, call stack no longer available)");
956 }
sewardjaf44c822007-11-25 14:01:38 +0000957}
958
sewardjaf44c822007-11-25 14:01:38 +0000959static void
960thread_report_conflicting_segments_segment(const DrdThreadId tid,
961 const Addr addr,
962 const SizeT size,
963 const BmAccessTypeT access_type,
964 const Segment* const p)
965{
bart3772a982008-03-15 08:11:03 +0000966 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000967
bart74a5f212008-05-11 06:43:07 +0000968 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000969 && tid != DRD_INVALID_THREADID);
970 tl_assert(p);
sewardjaf44c822007-11-25 14:01:38 +0000971
bart3772a982008-03-15 08:11:03 +0000972 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
973 {
974 if (i != tid)
975 {
976 Segment* q;
977 for (q = s_threadinfo[i].last; q; q = q->prev)
sewardjaf44c822007-11-25 14:01:38 +0000978 {
bart3772a982008-03-15 08:11:03 +0000979 // Since q iterates over the segments of thread i in order of
980 // decreasing vector clocks, if q->vc <= p->vc, then
981 // q->next->vc <= p->vc will also hold. Hence, break out of the
982 // loop once this condition is met.
983 if (vc_lte(&q->vc, &p->vc))
984 break;
985 if (! vc_lte(&p->vc, &q->vc))
986 {
987 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
988 {
989 tl_assert(q->stacktrace);
990 show_call_stack(i, "Other segment start",
991 q->stacktrace);
992 show_call_stack(i, "Other segment end",
993 q->next ? q->next->stacktrace : 0);
994 }
995 }
sewardjaf44c822007-11-25 14:01:38 +0000996 }
bart3772a982008-03-15 08:11:03 +0000997 }
998 }
sewardjaf44c822007-11-25 14:01:38 +0000999}
1000
1001void thread_report_conflicting_segments(const DrdThreadId tid,
1002 const Addr addr,
1003 const SizeT size,
1004 const BmAccessTypeT access_type)
1005{
bart3772a982008-03-15 08:11:03 +00001006 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +00001007
bart74a5f212008-05-11 06:43:07 +00001008 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +00001009 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +00001010
bart3772a982008-03-15 08:11:03 +00001011 for (p = s_threadinfo[tid].first; p; p = p->next)
1012 {
1013 if (bm_has(p->bm, addr, addr + size, access_type))
1014 {
1015 thread_report_conflicting_segments_segment(tid, addr, size,
1016 access_type, p);
1017 }
1018 }
sewardjaf44c822007-11-25 14:01:38 +00001019}
sewardjaf44c822007-11-25 14:01:38 +00001020
bart82195c12008-04-13 17:35:08 +00001021/** Verify whether the danger set for thread tid is up to date. Only perform
1022 * the check if the environment variable DRD_VERIFY_DANGER_SET has been set.
1023 */
1024static Bool thread_danger_set_up_to_date(const DrdThreadId tid)
1025{
1026 static int do_verify_danger_set = -1;
1027 Bool result;
1028 struct bitmap* computed_danger_set = 0;
1029
1030 if (do_verify_danger_set < 0)
1031 {
1032 //VG_(message)(Vg_DebugMsg, "%s", VG_(getenv)("DRD_VERIFY_DANGER_SET"));
1033 do_verify_danger_set = VG_(getenv)("DRD_VERIFY_DANGER_SET") != 0;
1034 }
1035 if (do_verify_danger_set == 0)
1036 return True;
1037
1038 thread_compute_danger_set(&computed_danger_set, tid);
barta3f61092008-05-04 07:46:20 +00001039 result = bm_equal(s_danger_set, computed_danger_set);
bart82195c12008-04-13 17:35:08 +00001040 bm_delete(computed_danger_set);
1041 return result;
1042}
1043
bart26f73e12008-02-24 18:37:08 +00001044/** Compute a bitmap that represents the union of all memory accesses of all
1045 * segments that are unordered to the current segment of the thread tid.
sewardjaf44c822007-11-25 14:01:38 +00001046 */
bart4af812e2008-04-13 15:39:38 +00001047static void thread_compute_danger_set(struct bitmap** danger_set,
1048 const DrdThreadId tid)
sewardjaf44c822007-11-25 14:01:38 +00001049{
bart3772a982008-03-15 08:11:03 +00001050 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +00001051
bart74a5f212008-05-11 06:43:07 +00001052 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
1053 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +00001054 tl_assert(tid == s_drd_running_tid);
sewardjaf44c822007-11-25 14:01:38 +00001055
bart3772a982008-03-15 08:11:03 +00001056 s_update_danger_set_count++;
1057 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
1058 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
sewardjaf44c822007-11-25 14:01:38 +00001059
bart4af812e2008-04-13 15:39:38 +00001060 if (*danger_set)
bart3772a982008-03-15 08:11:03 +00001061 {
bart4af812e2008-04-13 15:39:38 +00001062 bm_delete(*danger_set);
bart3772a982008-03-15 08:11:03 +00001063 }
bart4af812e2008-04-13 15:39:38 +00001064 *danger_set = bm_new();
bart26f73e12008-02-24 18:37:08 +00001065
bart3772a982008-03-15 08:11:03 +00001066 if (s_trace_danger_set)
1067 {
1068 char msg[256];
1069
1070 VG_(snprintf)(msg, sizeof(msg),
bartaa97a542008-03-16 17:57:01 +00001071 "computing danger set for thread %d/%d with vc ",
1072 DrdThreadIdToVgThreadId(tid), tid);
bart3772a982008-03-15 08:11:03 +00001073 vc_snprint(msg + VG_(strlen)(msg),
1074 sizeof(msg) - VG_(strlen)(msg),
1075 &s_threadinfo[tid].last->vc);
barta2b6e1b2008-03-17 18:32:39 +00001076 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +00001077 }
1078
1079 p = s_threadinfo[tid].last;
1080 {
1081 unsigned j;
1082
1083 if (s_trace_danger_set)
1084 {
bart26f73e12008-02-24 18:37:08 +00001085 char msg[256];
1086
1087 VG_(snprintf)(msg, sizeof(msg),
bart3772a982008-03-15 08:11:03 +00001088 "danger set: thread [%d] at vc ",
bart26f73e12008-02-24 18:37:08 +00001089 tid);
1090 vc_snprint(msg + VG_(strlen)(msg),
1091 sizeof(msg) - VG_(strlen)(msg),
bart3772a982008-03-15 08:11:03 +00001092 &p->vc);
barta2b6e1b2008-03-17 18:32:39 +00001093 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +00001094 }
sewardjaf44c822007-11-25 14:01:38 +00001095
bart3772a982008-03-15 08:11:03 +00001096 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
1097 {
bartd66e3a82008-04-06 15:02:17 +00001098 if (j != tid && IsValidDrdThreadId(j))
bart26f73e12008-02-24 18:37:08 +00001099 {
bart3772a982008-03-15 08:11:03 +00001100 const Segment* q;
1101 for (q = s_threadinfo[j].last; q; q = q->prev)
bartd66e3a82008-04-06 15:02:17 +00001102 {
1103 if (! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
bart3772a982008-03-15 08:11:03 +00001104 {
1105 if (s_trace_danger_set)
1106 {
1107 char msg[256];
1108 VG_(snprintf)(msg, sizeof(msg),
1109 "danger set: [%d] merging segment ", j);
1110 vc_snprint(msg + VG_(strlen)(msg),
1111 sizeof(msg) - VG_(strlen)(msg),
1112 &q->vc);
barta2b6e1b2008-03-17 18:32:39 +00001113 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +00001114 }
bart4af812e2008-04-13 15:39:38 +00001115 bm_merge2(*danger_set, q->bm);
bart3772a982008-03-15 08:11:03 +00001116 }
1117 else
1118 {
1119 if (s_trace_danger_set)
1120 {
1121 char msg[256];
1122 VG_(snprintf)(msg, sizeof(msg),
1123 "danger set: [%d] ignoring segment ", j);
1124 vc_snprint(msg + VG_(strlen)(msg),
1125 sizeof(msg) - VG_(strlen)(msg),
1126 &q->vc);
barta2b6e1b2008-03-17 18:32:39 +00001127 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +00001128 }
1129 }
bartd66e3a82008-04-06 15:02:17 +00001130 }
bart26f73e12008-02-24 18:37:08 +00001131 }
bart3772a982008-03-15 08:11:03 +00001132 }
bart3772a982008-03-15 08:11:03 +00001133 }
sewardjaf44c822007-11-25 14:01:38 +00001134
bart3772a982008-03-15 08:11:03 +00001135 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
1136 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
sewardjaf44c822007-11-25 14:01:38 +00001137
bart3772a982008-03-15 08:11:03 +00001138 if (0 && s_trace_danger_set)
1139 {
barta2b6e1b2008-03-17 18:32:39 +00001140 VG_(message)(Vg_UserMsg, "[%d] new danger set:", tid);
bart4af812e2008-04-13 15:39:38 +00001141 bm_print(*danger_set);
barta2b6e1b2008-03-17 18:32:39 +00001142 VG_(message)(Vg_UserMsg, "[%d] end of new danger set.", tid);
bart3772a982008-03-15 08:11:03 +00001143 }
sewardjaf44c822007-11-25 14:01:38 +00001144}
1145
sewardjaf44c822007-11-25 14:01:38 +00001146ULong thread_get_context_switch_count(void)
1147{
bart3772a982008-03-15 08:11:03 +00001148 return s_context_switch_count;
sewardjaf44c822007-11-25 14:01:38 +00001149}
1150
sewardjaf44c822007-11-25 14:01:38 +00001151ULong thread_get_discard_ordered_segments_count(void)
1152{
bart3772a982008-03-15 08:11:03 +00001153 return s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +00001154}
1155
bartd66e3a82008-04-06 15:02:17 +00001156ULong thread_get_update_danger_set_count(ULong* dsnsc, ULong* dscvc)
sewardjaf44c822007-11-25 14:01:38 +00001157{
bartd66e3a82008-04-06 15:02:17 +00001158 tl_assert(dsnsc);
1159 tl_assert(dscvc);
1160 *dsnsc = s_danger_set_new_segment_count;
1161 *dscvc = s_danger_set_combine_vc_count;
bart3772a982008-03-15 08:11:03 +00001162 return s_update_danger_set_count;
sewardjaf44c822007-11-25 14:01:38 +00001163}
1164
1165ULong thread_get_danger_set_bitmap_creation_count(void)
1166{
bart3772a982008-03-15 08:11:03 +00001167 return s_danger_set_bitmap_creation_count;
sewardjaf44c822007-11-25 14:01:38 +00001168}
1169
1170ULong thread_get_danger_set_bitmap2_creation_count(void)
1171{
bart3772a982008-03-15 08:11:03 +00001172 return s_danger_set_bitmap2_creation_count;
sewardjaf44c822007-11-25 14:01:38 +00001173}