blob: d96db4f5e6de48a48f23ce3b340dbc636b60cf78 [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
bart82195c12008-04-13 17:35:08 +000030#include "pub_tool_vki.h"
sewardjaf44c822007-11-25 14:01:38 +000031#include "pub_tool_basics.h" // Addr, SizeT
32#include "pub_tool_errormgr.h" // VG_(unique_error)()
33#include "pub_tool_libcassert.h" // tl_assert()
34#include "pub_tool_libcbase.h" // VG_(strlen)()
35#include "pub_tool_libcprint.h" // VG_(printf)()
bart82195c12008-04-13 17:35:08 +000036#include "pub_tool_libcproc.h" // VG_(getenv)()
sewardjaf44c822007-11-25 14:01:38 +000037#include "pub_tool_machine.h"
38#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
sewardj85642922008-01-14 11:54:56 +000039#include "pub_tool_options.h" // VG_(clo_backtrace_size)
sewardjaf44c822007-11-25 14:01:38 +000040#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
41
42
sewardjaf44c822007-11-25 14:01:38 +000043// Local functions.
44
45static void thread_append_segment(const DrdThreadId tid,
46 Segment* const sg);
barta2b6e1b2008-03-17 18:32:39 +000047static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
bart82195c12008-04-13 17:35:08 +000048static Bool thread_danger_set_up_to_date(const DrdThreadId tid);
bart4af812e2008-04-13 15:39:38 +000049static void thread_compute_danger_set(struct bitmap** danger_set,
50 const DrdThreadId tid);
sewardjaf44c822007-11-25 14:01:38 +000051
52
53// Local variables.
54
55static ULong s_context_switch_count;
56static ULong s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +000057static ULong s_update_danger_set_count;
bartd66e3a82008-04-06 15:02:17 +000058static ULong s_danger_set_new_segment_count;
59static ULong s_danger_set_combine_vc_count;
sewardjaf44c822007-11-25 14:01:38 +000060static ULong s_danger_set_bitmap_creation_count;
61static ULong s_danger_set_bitmap2_creation_count;
sewardj8b09d4f2007-12-04 21:27:18 +000062static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
bartf00a85b2008-03-13 18:49:23 +000063DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
64ThreadInfo s_threadinfo[DRD_N_THREADS];
bart1a473c72008-03-13 19:03:38 +000065struct bitmap* s_danger_set;
bart26f73e12008-02-24 18:37:08 +000066static Bool s_trace_context_switches = False;
67static Bool s_trace_danger_set = False;
barta9c37392008-03-22 09:38:48 +000068static Bool s_segment_merging = True;
sewardjaf44c822007-11-25 14:01:38 +000069
70
71// Function definitions.
72
bart26f73e12008-02-24 18:37:08 +000073void thread_trace_context_switches(const Bool t)
74{
bart3772a982008-03-15 08:11:03 +000075 s_trace_context_switches = t;
bart26f73e12008-02-24 18:37:08 +000076}
77
78void thread_trace_danger_set(const Bool t)
79{
bart3772a982008-03-15 08:11:03 +000080 s_trace_danger_set = t;
bart26f73e12008-02-24 18:37:08 +000081}
82
barta9c37392008-03-22 09:38:48 +000083void thread_set_segment_merging(const Bool m)
84{
85 s_segment_merging = m;
86}
87
sewardjaf44c822007-11-25 14:01:38 +000088__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
89{
bart74a5f212008-05-11 06:43:07 +000090 return (0 <= (int)tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
bart3772a982008-03-15 08:11:03 +000091 && ! (s_threadinfo[tid].vg_thread_exists == False
92 && s_threadinfo[tid].posix_thread_exists == False
93 && s_threadinfo[tid].detached_posix_thread == False));
sewardjaf44c822007-11-25 14:01:38 +000094}
95
96/**
97 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
98 * Valgrind's ThreadId does not yet exist.
99 **/
100DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
101{
bart3772a982008-03-15 08:11:03 +0000102 int i;
sewardjaf44c822007-11-25 14:01:38 +0000103
bart3772a982008-03-15 08:11:03 +0000104 if (tid == VG_INVALID_THREADID)
105 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000106
bart3772a982008-03-15 08:11:03 +0000107 for (i = 1; i < DRD_N_THREADS; i++)
108 {
109 if (s_threadinfo[i].vg_thread_exists == True
110 && s_threadinfo[i].vg_threadid == tid)
111 {
112 return i;
113 }
114 }
sewardjaf44c822007-11-25 14:01:38 +0000115
bart3772a982008-03-15 08:11:03 +0000116 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000117}
118
119static
120DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
121{
bart3772a982008-03-15 08:11:03 +0000122 int i;
sewardjaf44c822007-11-25 14:01:38 +0000123
bart3772a982008-03-15 08:11:03 +0000124 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000125
bart3772a982008-03-15 08:11:03 +0000126 for (i = 1; i < DRD_N_THREADS; i++)
127 {
128 if (s_threadinfo[i].vg_thread_exists == False
129 && s_threadinfo[i].posix_thread_exists == False
130 && s_threadinfo[i].detached_posix_thread == False)
131 {
132 s_threadinfo[i].vg_thread_exists = True;
133 s_threadinfo[i].vg_threadid = tid;
134 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
bart3772a982008-03-15 08:11:03 +0000135 s_threadinfo[i].stack_min = 0;
bartcac53462008-03-29 09:27:08 +0000136 s_threadinfo[i].stack_min_min = 0;
bart3772a982008-03-15 08:11:03 +0000137 s_threadinfo[i].stack_startup = 0;
138 s_threadinfo[i].stack_max = 0;
bart3772a982008-03-15 08:11:03 +0000139 s_threadinfo[i].is_recording = True;
140 s_threadinfo[i].synchr_nesting = 0;
141 if (s_threadinfo[i].first != 0)
142 VG_(printf)("drd thread id = %d\n", i);
143 tl_assert(s_threadinfo[i].first == 0);
144 tl_assert(s_threadinfo[i].last == 0);
145 return i;
146 }
147 }
sewardjaf44c822007-11-25 14:01:38 +0000148
bart3772a982008-03-15 08:11:03 +0000149 tl_assert(False);
sewardjaf44c822007-11-25 14:01:38 +0000150
bart3772a982008-03-15 08:11:03 +0000151 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000152}
153
154DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
155{
bart3772a982008-03-15 08:11:03 +0000156 int i;
sewardjaf44c822007-11-25 14:01:38 +0000157
bart3772a982008-03-15 08:11:03 +0000158 tl_assert(tid != INVALID_POSIX_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000159
bart3772a982008-03-15 08:11:03 +0000160 for (i = 1; i < DRD_N_THREADS; i++)
161 {
162 if (s_threadinfo[i].posix_thread_exists
163 && s_threadinfo[i].pt_threadid == tid)
164 {
165 return i;
166 }
167 }
168 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000169}
170
171ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
172{
bart74a5f212008-05-11 06:43:07 +0000173 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
174 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000175 return (s_threadinfo[tid].vg_thread_exists
176 ? s_threadinfo[tid].vg_threadid
177 : VG_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000178}
179
bart23d3a4e2008-04-05 12:53:00 +0000180#if 0
bart26f73e12008-02-24 18:37:08 +0000181/** Sanity check of the doubly linked list of segments referenced by a
182 * ThreadInfo struct.
183 * @return True if sane, False if not.
sewardjaf44c822007-11-25 14:01:38 +0000184 */
185static Bool sane_ThreadInfo(const ThreadInfo* const ti)
186{
bart3772a982008-03-15 08:11:03 +0000187 Segment* p;
188 for (p = ti->first; p; p = p->next) {
189 if (p->next && p->next->prev != p)
190 return False;
191 if (p->next == 0 && p != ti->last)
192 return False;
193 }
194 for (p = ti->last; p; p = p->prev) {
195 if (p->prev && p->prev->next != p)
196 return False;
197 if (p->prev == 0 && p != ti->first)
198 return False;
199 }
200 return True;
sewardjaf44c822007-11-25 14:01:38 +0000201}
bart23d3a4e2008-04-05 12:53:00 +0000202#endif
sewardjaf44c822007-11-25 14:01:38 +0000203
204DrdThreadId thread_pre_create(const DrdThreadId creator,
205 const ThreadId vg_created)
206{
bart3772a982008-03-15 08:11:03 +0000207 DrdThreadId created;
sewardjaf44c822007-11-25 14:01:38 +0000208
bart3772a982008-03-15 08:11:03 +0000209 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
210 created = VgThreadIdToNewDrdThreadId(vg_created);
bart74a5f212008-05-11 06:43:07 +0000211 tl_assert(0 <= (int)created && created < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000212 && created != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000213
bart3772a982008-03-15 08:11:03 +0000214 tl_assert(s_threadinfo[created].first == 0);
215 tl_assert(s_threadinfo[created].last == 0);
216 thread_append_segment(created, sg_new(creator, created));
sewardjaf44c822007-11-25 14:01:38 +0000217
bart3772a982008-03-15 08:11:03 +0000218 return created;
sewardjaf44c822007-11-25 14:01:38 +0000219}
220
bart26f73e12008-02-24 18:37:08 +0000221/** Allocate the first segment for a thread. Call this just after
222 * pthread_create().
sewardjaf44c822007-11-25 14:01:38 +0000223 */
224DrdThreadId thread_post_create(const ThreadId vg_created)
225{
bart3772a982008-03-15 08:11:03 +0000226 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
sewardjaf44c822007-11-25 14:01:38 +0000227
bart74a5f212008-05-11 06:43:07 +0000228 tl_assert(0 <= (int)created && created < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000229 && created != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000230
bart3772a982008-03-15 08:11:03 +0000231 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
232 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
233 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
bartcac53462008-03-29 09:27:08 +0000234 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
235 s_threadinfo[created].stack_size = VG_(thread_get_stack_size)(vg_created);
bart3772a982008-03-15 08:11:03 +0000236 tl_assert(s_threadinfo[created].stack_max != 0);
sewardjaf44c822007-11-25 14:01:38 +0000237
bart3772a982008-03-15 08:11:03 +0000238 return created;
sewardjaf44c822007-11-25 14:01:38 +0000239}
240
241/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
242/* and accesses this data structure from multiple threads without locking. */
243/* Any conflicting accesses in the range stack_startup..stack_max will be */
244/* ignored. */
245void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
246{
bart74a5f212008-05-11 06:43:07 +0000247 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
248 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000249 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
250 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
251 s_threadinfo[tid].stack_startup = stack_startup;
sewardjaf44c822007-11-25 14:01:38 +0000252}
253
254Addr thread_get_stack_min(const DrdThreadId tid)
255{
bart74a5f212008-05-11 06:43:07 +0000256 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000257 && tid != DRD_INVALID_THREADID);
258 return s_threadinfo[tid].stack_min;
sewardjaf44c822007-11-25 14:01:38 +0000259}
260
bartcac53462008-03-29 09:27:08 +0000261Addr thread_get_stack_min_min(const DrdThreadId tid)
262{
bart74a5f212008-05-11 06:43:07 +0000263 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bartcac53462008-03-29 09:27:08 +0000264 && tid != DRD_INVALID_THREADID);
265 return s_threadinfo[tid].stack_min_min;
266}
267
bartd43f8d32008-03-16 17:29:20 +0000268Addr thread_get_stack_max(const DrdThreadId tid)
sewardjaf44c822007-11-25 14:01:38 +0000269{
bart74a5f212008-05-11 06:43:07 +0000270 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bartd43f8d32008-03-16 17:29:20 +0000271 && tid != DRD_INVALID_THREADID);
272 return s_threadinfo[tid].stack_max;
sewardjaf44c822007-11-25 14:01:38 +0000273}
274
bartcac53462008-03-29 09:27:08 +0000275SizeT thread_get_stack_size(const DrdThreadId tid)
276{
bart74a5f212008-05-11 06:43:07 +0000277 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bartcac53462008-03-29 09:27:08 +0000278 && tid != DRD_INVALID_THREADID);
279 return s_threadinfo[tid].stack_size;
280}
281
barta2b6e1b2008-03-17 18:32:39 +0000282/** Clean up thread-specific data structures. Call this just after
283 * pthread_join().
sewardjaf44c822007-11-25 14:01:38 +0000284 */
285void thread_delete(const DrdThreadId tid)
286{
bart3772a982008-03-15 08:11:03 +0000287 Segment* sg;
288 Segment* sg_prev;
sewardjaf44c822007-11-25 14:01:38 +0000289
bart74a5f212008-05-11 06:43:07 +0000290 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000291 && tid != DRD_INVALID_THREADID);
292 tl_assert(s_threadinfo[tid].synchr_nesting == 0);
293 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
294 {
295 sg_prev = sg->prev;
barta2b6e1b2008-03-17 18:32:39 +0000296 sg->prev = 0;
297 sg->next = 0;
298 sg_put(sg);
bart3772a982008-03-15 08:11:03 +0000299 }
300 s_threadinfo[tid].vg_thread_exists = False;
301 s_threadinfo[tid].posix_thread_exists = False;
302 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
303 s_threadinfo[tid].first = 0;
304 s_threadinfo[tid].last = 0;
sewardjaf44c822007-11-25 14:01:38 +0000305}
306
307/* Called after a thread performed its last memory access and before */
308/* thread_delete() is called. Note: thread_delete() is only called for */
309/* joinable threads, not for detached threads. */
310void thread_finished(const DrdThreadId tid)
311{
bart74a5f212008-05-11 06:43:07 +0000312 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000313 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000314
bart3772a982008-03-15 08:11:03 +0000315 s_threadinfo[tid].vg_thread_exists = False;
sewardjaf44c822007-11-25 14:01:38 +0000316
bart3772a982008-03-15 08:11:03 +0000317 if (s_threadinfo[tid].detached_posix_thread)
318 {
319 /* Once a detached thread has finished, its stack is deallocated and */
320 /* should no longer be taken into account when computing the danger set*/
321 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
sewardjaf44c822007-11-25 14:01:38 +0000322
bart3772a982008-03-15 08:11:03 +0000323 /* For a detached thread, calling pthread_exit() invalidates the */
324 /* POSIX thread ID associated with the detached thread. For joinable */
325 /* POSIX threads however, the POSIX thread ID remains live after the */
326 /* pthread_exit() call until pthread_join() is called. */
327 s_threadinfo[tid].posix_thread_exists = False;
328 }
sewardjaf44c822007-11-25 14:01:38 +0000329}
330
331void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
332{
bart74a5f212008-05-11 06:43:07 +0000333 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000334 && tid != DRD_INVALID_THREADID);
335 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
336 tl_assert(ptid != INVALID_POSIX_THREADID);
337 s_threadinfo[tid].posix_thread_exists = True;
338 s_threadinfo[tid].pt_threadid = ptid;
sewardjaf44c822007-11-25 14:01:38 +0000339}
340
341Bool thread_get_joinable(const DrdThreadId tid)
342{
bart74a5f212008-05-11 06:43:07 +0000343 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000344 && tid != DRD_INVALID_THREADID);
345 return ! s_threadinfo[tid].detached_posix_thread;
sewardjaf44c822007-11-25 14:01:38 +0000346}
347
348void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
349{
bart74a5f212008-05-11 06:43:07 +0000350 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000351 && tid != DRD_INVALID_THREADID);
352 tl_assert(!! joinable == joinable);
353 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000354#if 0
bart3772a982008-03-15 08:11:03 +0000355 VG_(message)(Vg_DebugMsg,
356 "thread_set_joinable(%d/%d, %s)",
357 tid,
358 s_threadinfo[tid].vg_threadid,
359 joinable ? "joinable" : "detached");
sewardjaf44c822007-11-25 14:01:38 +0000360#endif
bart3772a982008-03-15 08:11:03 +0000361 s_threadinfo[tid].detached_posix_thread = ! joinable;
sewardjaf44c822007-11-25 14:01:38 +0000362}
363
sewardj8b09d4f2007-12-04 21:27:18 +0000364void thread_set_vg_running_tid(const ThreadId vg_tid)
sewardjaf44c822007-11-25 14:01:38 +0000365{
bart3772a982008-03-15 08:11:03 +0000366 tl_assert(vg_tid != VG_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000367
bart3772a982008-03-15 08:11:03 +0000368 if (vg_tid != s_vg_running_tid)
369 {
370 thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
371 }
sewardj8b09d4f2007-12-04 21:27:18 +0000372
bart3772a982008-03-15 08:11:03 +0000373 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
374 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000375}
376
377void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
378{
bart3772a982008-03-15 08:11:03 +0000379 tl_assert(vg_tid != VG_INVALID_THREADID);
380 tl_assert(drd_tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000381
bart3772a982008-03-15 08:11:03 +0000382 if (vg_tid != s_vg_running_tid)
383 {
384 if (s_trace_context_switches
385 && s_drd_running_tid != DRD_INVALID_THREADID)
386 {
387 VG_(message)(Vg_DebugMsg,
barta2b6e1b2008-03-17 18:32:39 +0000388 "Context switch from thread %d/%d to thread %d/%d;"
389 " segments: %llu",
bartaa97a542008-03-16 17:57:01 +0000390 s_vg_running_tid, s_drd_running_tid,
barta2b6e1b2008-03-17 18:32:39 +0000391 DrdThreadIdToVgThreadId(drd_tid), drd_tid,
392 sg_get_alive_segments_count());
bart3772a982008-03-15 08:11:03 +0000393 }
394 s_vg_running_tid = vg_tid;
395 s_drd_running_tid = drd_tid;
bart4af812e2008-04-13 15:39:38 +0000396 thread_compute_danger_set(&s_danger_set, drd_tid);
bart3772a982008-03-15 08:11:03 +0000397 s_context_switch_count++;
398 }
sewardj8b09d4f2007-12-04 21:27:18 +0000399
bart3772a982008-03-15 08:11:03 +0000400 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
401 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000402}
403
bart0268dfa2008-03-11 20:10:21 +0000404int thread_enter_synchr(const DrdThreadId tid)
405{
bart3772a982008-03-15 08:11:03 +0000406 tl_assert(IsValidDrdThreadId(tid));
407 return s_threadinfo[tid].synchr_nesting++;
bart0268dfa2008-03-11 20:10:21 +0000408}
409
410int thread_leave_synchr(const DrdThreadId tid)
411{
bart3772a982008-03-15 08:11:03 +0000412 tl_assert(IsValidDrdThreadId(tid));
413 tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
414 return --s_threadinfo[tid].synchr_nesting;
bart0268dfa2008-03-11 20:10:21 +0000415}
416
417int thread_get_synchr_nesting_count(const DrdThreadId tid)
418{
bart3772a982008-03-15 08:11:03 +0000419 tl_assert(IsValidDrdThreadId(tid));
420 return s_threadinfo[tid].synchr_nesting;
bart0268dfa2008-03-11 20:10:21 +0000421}
422
bart1a473c72008-03-13 19:03:38 +0000423/** Append a new segment at the end of the segment list. */
bart26f73e12008-02-24 18:37:08 +0000424static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000425{
bart74a5f212008-05-11 06:43:07 +0000426 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000427 && tid != DRD_INVALID_THREADID);
bart23d3a4e2008-04-05 12:53:00 +0000428 // tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart3772a982008-03-15 08:11:03 +0000429 sg->prev = s_threadinfo[tid].last;
430 sg->next = 0;
431 if (s_threadinfo[tid].last)
432 s_threadinfo[tid].last->next = sg;
433 s_threadinfo[tid].last = sg;
434 if (s_threadinfo[tid].first == 0)
435 s_threadinfo[tid].first = sg;
bart23d3a4e2008-04-05 12:53:00 +0000436 // tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
sewardjaf44c822007-11-25 14:01:38 +0000437}
438
bart26f73e12008-02-24 18:37:08 +0000439/** Remove a segment from the segment list of thread threadid, and free the
440 * associated memory.
sewardjaf44c822007-11-25 14:01:38 +0000441 */
bart26f73e12008-02-24 18:37:08 +0000442static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000443{
bart74a5f212008-05-11 06:43:07 +0000444 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000445 && tid != DRD_INVALID_THREADID);
bart3f749672008-03-22 09:49:40 +0000446 //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart26f73e12008-02-24 18:37:08 +0000447
bart3772a982008-03-15 08:11:03 +0000448 if (sg->prev)
449 sg->prev->next = sg->next;
450 if (sg->next)
451 sg->next->prev = sg->prev;
452 if (sg == s_threadinfo[tid].first)
453 s_threadinfo[tid].first = sg->next;
454 if (sg == s_threadinfo[tid].last)
455 s_threadinfo[tid].last = sg->prev;
barta2b6e1b2008-03-17 18:32:39 +0000456 sg_put(sg);
bart3f749672008-03-22 09:49:40 +0000457
458 //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
sewardjaf44c822007-11-25 14:01:38 +0000459}
460
461VectorClock* thread_get_vc(const DrdThreadId tid)
462{
bart74a5f212008-05-11 06:43:07 +0000463 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
464 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000465 tl_assert(s_threadinfo[tid].last);
466 return &s_threadinfo[tid].last->vc;
sewardjaf44c822007-11-25 14:01:38 +0000467}
468
barta2b6e1b2008-03-17 18:32:39 +0000469/** Return the latest segment of thread 'tid' and increment its reference
470 * count.
471 */
472void thread_get_latest_segment(Segment** sg, const DrdThreadId tid)
473{
474 tl_assert(sg);
bart74a5f212008-05-11 06:43:07 +0000475 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
476 && tid != DRD_INVALID_THREADID);
barta2b6e1b2008-03-17 18:32:39 +0000477 tl_assert(s_threadinfo[tid].last);
478
479 sg_put(*sg);
480 *sg = sg_get(s_threadinfo[tid].last);
481}
482
sewardjaf44c822007-11-25 14:01:38 +0000483/**
484 * Compute the minimum of all latest vector clocks of all threads
485 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
486 * @param vc pointer to a vectorclock, holds result upon return.
487 */
488static void thread_compute_minimum_vc(VectorClock* vc)
489{
bart3772a982008-03-15 08:11:03 +0000490 unsigned i;
491 Bool first;
492 Segment* latest_sg;
sewardjaf44c822007-11-25 14:01:38 +0000493
bart3772a982008-03-15 08:11:03 +0000494 first = True;
495 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
496 {
497 latest_sg = s_threadinfo[i].last;
498 if (latest_sg)
499 {
500 if (first)
501 vc_assign(vc, &latest_sg->vc);
502 else
503 vc_min(vc, &latest_sg->vc);
504 first = False;
505 }
506 }
sewardjaf44c822007-11-25 14:01:38 +0000507}
508
509static void thread_compute_maximum_vc(VectorClock* vc)
510{
bart3772a982008-03-15 08:11:03 +0000511 unsigned i;
512 Bool first;
513 Segment* latest_sg;
sewardjaf44c822007-11-25 14:01:38 +0000514
bart3772a982008-03-15 08:11:03 +0000515 first = True;
516 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
517 {
518 latest_sg = s_threadinfo[i].last;
519 if (latest_sg)
520 {
521 if (first)
522 vc_assign(vc, &latest_sg->vc);
523 else
524 vc_combine(vc, &latest_sg->vc);
525 first = False;
526 }
527 }
sewardjaf44c822007-11-25 14:01:38 +0000528}
529
530/**
bart5bd9f2d2008-03-03 20:31:58 +0000531 * Discard all segments that have a defined order against the latest vector
sewardjaf44c822007-11-25 14:01:38 +0000532 * clock of every thread -- these segments can no longer be involved in a
533 * data race.
534 */
535static void thread_discard_ordered_segments(void)
536{
bart3772a982008-03-15 08:11:03 +0000537 unsigned i;
538 VectorClock thread_vc_min;
sewardjaf44c822007-11-25 14:01:38 +0000539
bart3772a982008-03-15 08:11:03 +0000540 s_discard_ordered_segments_count++;
sewardjaf44c822007-11-25 14:01:38 +0000541
bart3772a982008-03-15 08:11:03 +0000542 vc_init(&thread_vc_min, 0, 0);
543 thread_compute_minimum_vc(&thread_vc_min);
544 if (sg_get_trace())
545 {
546 char msg[256];
547 VectorClock thread_vc_max;
sewardjaf44c822007-11-25 14:01:38 +0000548
bart3772a982008-03-15 08:11:03 +0000549 vc_init(&thread_vc_max, 0, 0);
550 thread_compute_maximum_vc(&thread_vc_max);
551 VG_(snprintf)(msg, sizeof(msg),
552 "Discarding ordered segments -- min vc is ");
553 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
554 &thread_vc_min);
555 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
556 ", max vc is ");
557 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
558 &thread_vc_max);
barta2b6e1b2008-03-17 18:32:39 +0000559 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000560 vc_cleanup(&thread_vc_max);
561 }
sewardjaf44c822007-11-25 14:01:38 +0000562
bart3772a982008-03-15 08:11:03 +0000563 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
564 {
565 Segment* sg;
566 Segment* sg_next;
567 for (sg = s_threadinfo[i].first;
568 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
569 sg = sg_next)
570 {
571 thread_discard_segment(i, sg);
572 }
573 }
574 vc_cleanup(&thread_vc_min);
sewardjaf44c822007-11-25 14:01:38 +0000575}
576
barta9c37392008-03-22 09:38:48 +0000577/** Merge all segments that may be merged without triggering false positives
578 * or discarding real data races. For the theoretical background of segment
579 * merging, see also the following paper:
580 * Mark Christiaens, Michiel Ronsse and Koen De Bosschere.
581 * Bounding the number of segment histories during data race detection.
582 * Parallel Computing archive, Volume 28, Issue 9, pp 1221-1238,
583 * September 2002.
584 */
585static void thread_merge_segments(void)
586{
587 unsigned i;
588
589 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
590 {
591 Segment* sg;
592
bart23d3a4e2008-04-05 12:53:00 +0000593 // tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
barta9c37392008-03-22 09:38:48 +0000594
595 for (sg = s_threadinfo[i].first; sg; sg = sg->next)
596 {
597 if (sg_get_refcnt(sg) == 1
598 && sg->next
599 && sg_get_refcnt(sg->next) == 1
600 && sg->next->next)
601 {
602 /* Merge sg and sg->next into sg. */
603 sg_merge(sg, sg->next);
604 thread_discard_segment(i, sg->next);
605 }
606 }
607
bart23d3a4e2008-04-05 12:53:00 +0000608 // tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
barta9c37392008-03-22 09:38:48 +0000609 }
610}
611
bartd66e3a82008-04-06 15:02:17 +0000612/** Every change in the vector clock of a thread may cause segments that
613 * were previously ordered to this thread to become unordered. Hence,
614 * it may be necessary to recalculate the danger set if the vector clock
615 * of the current thread is updated. This function check whether such a
616 * recalculation is necessary.
617 *
618 * @param tid Thread ID of the thread to which a new segment has been
619 * appended.
620 * @param new_sg Pointer to the most recent segment of thread tid.
621 */
622static Bool danger_set_update_needed(const DrdThreadId tid,
623 const Segment* const new_sg)
624{
bart5d421ba2008-04-19 15:15:12 +0000625#if 0
bartd66e3a82008-04-06 15:02:17 +0000626 unsigned i;
627 const Segment* old_sg;
628
629 tl_assert(new_sg);
630
631 /* If a new segment was added to another thread than the running thread, */
632 /* just tell the caller to update the danger set. */
633 if (tid != s_drd_running_tid)
634 return True;
635
636 /* Always let the caller update the danger set after creation of the */
637 /* first segment. */
638 old_sg = new_sg->prev;
639 if (old_sg == 0)
640 return True;
641
642 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
643 {
644 Segment* q;
645
646 if (i == s_drd_running_tid)
647 continue;
648
649 for (q = s_threadinfo[i].last; q; q = q->prev)
650 {
651 /* If the expression below evaluates to false, this expression will */
652 /* also evaluate to false for all subsequent iterations. So stop */
653 /* iterating. */
654 if (vc_lte(&q->vc, &old_sg->vc))
655 break;
656 /* If the vector clock of the 2nd the last segment is not ordered */
657 /* to the vector clock of segment q, and the last segment is, ask */
658 /* the caller to update the danger set. */
659 if (! vc_lte(&old_sg->vc, &q->vc))
660 {
661 return True;
662 }
663 /* If the vector clock of the last segment is not ordered to the */
664 /* vector clock of segment q, ask the caller to update the danger */
665 /* set. */
666 if (! vc_lte(&q->vc, &new_sg->vc) && ! vc_lte(&new_sg->vc, &q->vc))
667 {
668 return True;
669 }
670 }
671 }
672
673 return False;
bart5d421ba2008-04-19 15:15:12 +0000674#else
675 return True;
676#endif
bartd66e3a82008-04-06 15:02:17 +0000677}
678
barta2b6e1b2008-03-17 18:32:39 +0000679/** Create a new segment for the specified thread, and discard any segments
680 * that cannot cause races anymore.
sewardjaf44c822007-11-25 14:01:38 +0000681 */
682void thread_new_segment(const DrdThreadId tid)
683{
bartd66e3a82008-04-06 15:02:17 +0000684 Segment* new_sg;
685
bart74a5f212008-05-11 06:43:07 +0000686 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
687 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000688
bartd66e3a82008-04-06 15:02:17 +0000689 new_sg = sg_new(tid, tid);
690 thread_append_segment(tid, new_sg);
691
692 if (danger_set_update_needed(tid, new_sg))
693 {
bart4af812e2008-04-13 15:39:38 +0000694 thread_compute_danger_set(&s_danger_set, s_drd_running_tid);
bartd66e3a82008-04-06 15:02:17 +0000695 s_danger_set_new_segment_count++;
696 }
bart82195c12008-04-13 17:35:08 +0000697 else if (tid == s_drd_running_tid)
698 {
699 tl_assert(thread_danger_set_up_to_date(s_drd_running_tid));
700 }
sewardjaf44c822007-11-25 14:01:38 +0000701
bart3772a982008-03-15 08:11:03 +0000702 thread_discard_ordered_segments();
bart26f73e12008-02-24 18:37:08 +0000703
barta9c37392008-03-22 09:38:48 +0000704 if (s_segment_merging)
705 thread_merge_segments();
sewardjaf44c822007-11-25 14:01:38 +0000706}
707
bart26f73e12008-02-24 18:37:08 +0000708/** Call this function after thread 'joiner' joined thread 'joinee'. */
sewardjaf44c822007-11-25 14:01:38 +0000709void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
710{
bart3772a982008-03-15 08:11:03 +0000711 tl_assert(joiner != joinee);
bart74a5f212008-05-11 06:43:07 +0000712 tl_assert(0 <= (int)joiner && joiner < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000713 && joiner != DRD_INVALID_THREADID);
bart74a5f212008-05-11 06:43:07 +0000714 tl_assert(0 <= (int)joinee && joinee < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000715 && joinee != DRD_INVALID_THREADID);
716 tl_assert(s_threadinfo[joiner].last);
717 tl_assert(s_threadinfo[joinee].last);
718 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
719 thread_discard_ordered_segments();
sewardjaf44c822007-11-25 14:01:38 +0000720
bart3772a982008-03-15 08:11:03 +0000721 if (joiner == s_drd_running_tid)
722 {
bart4af812e2008-04-13 15:39:38 +0000723 thread_compute_danger_set(&s_danger_set, joiner);
bart3772a982008-03-15 08:11:03 +0000724 }
sewardjaf44c822007-11-25 14:01:38 +0000725}
726
bart26f73e12008-02-24 18:37:08 +0000727/** Call this function after thread 'tid' had to wait because of thread
728 * synchronization until the memory accesses in the segment with vector clock
729 * 'vc' finished.
730 */
sewardjaf44c822007-11-25 14:01:38 +0000731void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
732{
bart74a5f212008-05-11 06:43:07 +0000733 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
734 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000735 tl_assert(s_threadinfo[tid].last);
736 tl_assert(vc);
737 vc_combine(&s_threadinfo[tid].last->vc, vc);
bart4af812e2008-04-13 15:39:38 +0000738 thread_compute_danger_set(&s_danger_set, tid);
bart3772a982008-03-15 08:11:03 +0000739 thread_discard_ordered_segments();
bartd66e3a82008-04-06 15:02:17 +0000740 s_danger_set_combine_vc_count++;
sewardjaf44c822007-11-25 14:01:38 +0000741}
742
bart26f73e12008-02-24 18:37:08 +0000743/** Call this function whenever a thread is no longer using the memory
744 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
745 * increase.
746 */
sewardjaf44c822007-11-25 14:01:38 +0000747void thread_stop_using_mem(const Addr a1, const Addr a2)
748{
bartd43f8d32008-03-16 17:29:20 +0000749 DrdThreadId other_user;
750 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000751
bart3772a982008-03-15 08:11:03 +0000752 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
bartd43f8d32008-03-16 17:29:20 +0000753 other_user = DRD_INVALID_THREADID;
bart3772a982008-03-15 08:11:03 +0000754 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
755 {
756 Segment* p;
757 for (p = s_threadinfo[i].first; p; p = p->next)
758 {
759 if (other_user == DRD_INVALID_THREADID
bart8bf2f8b2008-03-30 17:56:43 +0000760 && i != s_drd_running_tid)
sewardjaf44c822007-11-25 14:01:38 +0000761 {
bart8bf2f8b2008-03-30 17:56:43 +0000762 if (UNLIKELY(bm_test_and_clear(p->bm, a1, a2)))
763 {
764 other_user = i;
765 }
766 continue;
sewardjaf44c822007-11-25 14:01:38 +0000767 }
bart3772a982008-03-15 08:11:03 +0000768 bm_clear(p->bm, a1, a2);
769 }
770 }
sewardjaf44c822007-11-25 14:01:38 +0000771
bart3772a982008-03-15 08:11:03 +0000772 /* If any other thread had accessed memory in [ a1, a2 [, update the */
773 /* danger set. */
774 if (other_user != DRD_INVALID_THREADID
775 && bm_has_any_access(s_danger_set, a1, a2))
776 {
bart4af812e2008-04-13 15:39:38 +0000777 thread_compute_danger_set(&s_danger_set, thread_get_running_tid());
bart3772a982008-03-15 08:11:03 +0000778 }
sewardjaf44c822007-11-25 14:01:38 +0000779}
780
bart0268dfa2008-03-11 20:10:21 +0000781void thread_start_recording(const DrdThreadId tid)
782{
bart74a5f212008-05-11 06:43:07 +0000783 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
784 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000785 tl_assert(! s_threadinfo[tid].is_recording);
786 s_threadinfo[tid].is_recording = True;
bart0268dfa2008-03-11 20:10:21 +0000787}
788
789void thread_stop_recording(const DrdThreadId tid)
790{
bart74a5f212008-05-11 06:43:07 +0000791 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
792 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000793 tl_assert(s_threadinfo[tid].is_recording);
794 s_threadinfo[tid].is_recording = False;
bart0268dfa2008-03-11 20:10:21 +0000795}
796
sewardjaf44c822007-11-25 14:01:38 +0000797void thread_print_all(void)
798{
bart3772a982008-03-15 08:11:03 +0000799 unsigned i;
800 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000801
bart3772a982008-03-15 08:11:03 +0000802 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
803 {
804 if (s_threadinfo[i].first)
805 {
806 VG_(printf)("**************\n"
barta2b6e1b2008-03-17 18:32:39 +0000807 "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
bart3772a982008-03-15 08:11:03 +0000808 "**************\n",
809 i,
810 s_threadinfo[i].vg_thread_exists,
811 s_threadinfo[i].vg_threadid,
812 s_threadinfo[i].posix_thread_exists,
813 s_threadinfo[i].pt_threadid,
bart354009c2008-03-16 10:42:33 +0000814 s_threadinfo[i].detached_posix_thread);
bart3772a982008-03-15 08:11:03 +0000815 for (p = s_threadinfo[i].first; p; p = p->next)
sewardjaf44c822007-11-25 14:01:38 +0000816 {
bart3772a982008-03-15 08:11:03 +0000817 sg_print(p);
sewardjaf44c822007-11-25 14:01:38 +0000818 }
bart3772a982008-03-15 08:11:03 +0000819 }
820 }
sewardjaf44c822007-11-25 14:01:38 +0000821}
822
823static void show_call_stack(const DrdThreadId tid,
824 const Char* const msg,
825 ExeContext* const callstack)
826{
bart3772a982008-03-15 08:11:03 +0000827 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
sewardjaf44c822007-11-25 14:01:38 +0000828
bartaa97a542008-03-16 17:57:01 +0000829 VG_(message)(Vg_UserMsg, "%s (thread %d/%d)", msg, vg_tid, tid);
sewardjaf44c822007-11-25 14:01:38 +0000830
bart3772a982008-03-15 08:11:03 +0000831 if (vg_tid != VG_INVALID_THREADID)
832 {
833 if (callstack)
834 {
835 VG_(pp_ExeContext)(callstack);
836 }
837 else
838 {
839 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
840 }
841 }
842 else
843 {
844 VG_(message)(Vg_UserMsg,
845 " (thread finished, call stack no longer available)");
846 }
sewardjaf44c822007-11-25 14:01:38 +0000847}
848
sewardjaf44c822007-11-25 14:01:38 +0000849static void
850thread_report_conflicting_segments_segment(const DrdThreadId tid,
851 const Addr addr,
852 const SizeT size,
853 const BmAccessTypeT access_type,
854 const Segment* const p)
855{
bart3772a982008-03-15 08:11:03 +0000856 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000857
bart74a5f212008-05-11 06:43:07 +0000858 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000859 && tid != DRD_INVALID_THREADID);
860 tl_assert(p);
sewardjaf44c822007-11-25 14:01:38 +0000861
bart3772a982008-03-15 08:11:03 +0000862 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
863 {
864 if (i != tid)
865 {
866 Segment* q;
867 for (q = s_threadinfo[i].last; q; q = q->prev)
sewardjaf44c822007-11-25 14:01:38 +0000868 {
bart3772a982008-03-15 08:11:03 +0000869 // Since q iterates over the segments of thread i in order of
870 // decreasing vector clocks, if q->vc <= p->vc, then
871 // q->next->vc <= p->vc will also hold. Hence, break out of the
872 // loop once this condition is met.
873 if (vc_lte(&q->vc, &p->vc))
874 break;
875 if (! vc_lte(&p->vc, &q->vc))
876 {
877 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
878 {
879 tl_assert(q->stacktrace);
880 show_call_stack(i, "Other segment start",
881 q->stacktrace);
882 show_call_stack(i, "Other segment end",
883 q->next ? q->next->stacktrace : 0);
884 }
885 }
sewardjaf44c822007-11-25 14:01:38 +0000886 }
bart3772a982008-03-15 08:11:03 +0000887 }
888 }
sewardjaf44c822007-11-25 14:01:38 +0000889}
890
891void thread_report_conflicting_segments(const DrdThreadId tid,
892 const Addr addr,
893 const SizeT size,
894 const BmAccessTypeT access_type)
895{
bart3772a982008-03-15 08:11:03 +0000896 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000897
bart74a5f212008-05-11 06:43:07 +0000898 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
bart3772a982008-03-15 08:11:03 +0000899 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000900
bart3772a982008-03-15 08:11:03 +0000901 for (p = s_threadinfo[tid].first; p; p = p->next)
902 {
903 if (bm_has(p->bm, addr, addr + size, access_type))
904 {
905 thread_report_conflicting_segments_segment(tid, addr, size,
906 access_type, p);
907 }
908 }
sewardjaf44c822007-11-25 14:01:38 +0000909}
sewardjaf44c822007-11-25 14:01:38 +0000910
bart82195c12008-04-13 17:35:08 +0000911/** Verify whether the danger set for thread tid is up to date. Only perform
912 * the check if the environment variable DRD_VERIFY_DANGER_SET has been set.
913 */
914static Bool thread_danger_set_up_to_date(const DrdThreadId tid)
915{
916 static int do_verify_danger_set = -1;
917 Bool result;
918 struct bitmap* computed_danger_set = 0;
919
920 if (do_verify_danger_set < 0)
921 {
922 //VG_(message)(Vg_DebugMsg, "%s", VG_(getenv)("DRD_VERIFY_DANGER_SET"));
923 do_verify_danger_set = VG_(getenv)("DRD_VERIFY_DANGER_SET") != 0;
924 }
925 if (do_verify_danger_set == 0)
926 return True;
927
928 thread_compute_danger_set(&computed_danger_set, tid);
barta3f61092008-05-04 07:46:20 +0000929 result = bm_equal(s_danger_set, computed_danger_set);
bart82195c12008-04-13 17:35:08 +0000930 bm_delete(computed_danger_set);
931 return result;
932}
933
bart26f73e12008-02-24 18:37:08 +0000934/** Compute a bitmap that represents the union of all memory accesses of all
935 * segments that are unordered to the current segment of the thread tid.
sewardjaf44c822007-11-25 14:01:38 +0000936 */
bart4af812e2008-04-13 15:39:38 +0000937static void thread_compute_danger_set(struct bitmap** danger_set,
938 const DrdThreadId tid)
sewardjaf44c822007-11-25 14:01:38 +0000939{
bart3772a982008-03-15 08:11:03 +0000940 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000941
bart74a5f212008-05-11 06:43:07 +0000942 tl_assert(0 <= (int)tid && tid < DRD_N_THREADS
943 && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000944 tl_assert(tid == s_drd_running_tid);
sewardjaf44c822007-11-25 14:01:38 +0000945
bart3772a982008-03-15 08:11:03 +0000946 s_update_danger_set_count++;
947 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
948 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
sewardjaf44c822007-11-25 14:01:38 +0000949
bart4af812e2008-04-13 15:39:38 +0000950 if (*danger_set)
bart3772a982008-03-15 08:11:03 +0000951 {
bart4af812e2008-04-13 15:39:38 +0000952 bm_delete(*danger_set);
bart3772a982008-03-15 08:11:03 +0000953 }
bart4af812e2008-04-13 15:39:38 +0000954 *danger_set = bm_new();
bart26f73e12008-02-24 18:37:08 +0000955
bart3772a982008-03-15 08:11:03 +0000956 if (s_trace_danger_set)
957 {
958 char msg[256];
959
960 VG_(snprintf)(msg, sizeof(msg),
bartaa97a542008-03-16 17:57:01 +0000961 "computing danger set for thread %d/%d with vc ",
962 DrdThreadIdToVgThreadId(tid), tid);
bart3772a982008-03-15 08:11:03 +0000963 vc_snprint(msg + VG_(strlen)(msg),
964 sizeof(msg) - VG_(strlen)(msg),
965 &s_threadinfo[tid].last->vc);
barta2b6e1b2008-03-17 18:32:39 +0000966 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000967 }
968
969 p = s_threadinfo[tid].last;
970 {
971 unsigned j;
972
973 if (s_trace_danger_set)
974 {
bart26f73e12008-02-24 18:37:08 +0000975 char msg[256];
976
977 VG_(snprintf)(msg, sizeof(msg),
bart3772a982008-03-15 08:11:03 +0000978 "danger set: thread [%d] at vc ",
bart26f73e12008-02-24 18:37:08 +0000979 tid);
980 vc_snprint(msg + VG_(strlen)(msg),
981 sizeof(msg) - VG_(strlen)(msg),
bart3772a982008-03-15 08:11:03 +0000982 &p->vc);
barta2b6e1b2008-03-17 18:32:39 +0000983 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000984 }
sewardjaf44c822007-11-25 14:01:38 +0000985
bart3772a982008-03-15 08:11:03 +0000986 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
987 {
bartd66e3a82008-04-06 15:02:17 +0000988 if (j != tid && IsValidDrdThreadId(j))
bart26f73e12008-02-24 18:37:08 +0000989 {
bart3772a982008-03-15 08:11:03 +0000990 const Segment* q;
991 for (q = s_threadinfo[j].last; q; q = q->prev)
bartd66e3a82008-04-06 15:02:17 +0000992 {
993 if (! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
bart3772a982008-03-15 08:11:03 +0000994 {
995 if (s_trace_danger_set)
996 {
997 char msg[256];
998 VG_(snprintf)(msg, sizeof(msg),
999 "danger set: [%d] merging segment ", j);
1000 vc_snprint(msg + VG_(strlen)(msg),
1001 sizeof(msg) - VG_(strlen)(msg),
1002 &q->vc);
barta2b6e1b2008-03-17 18:32:39 +00001003 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +00001004 }
bart4af812e2008-04-13 15:39:38 +00001005 bm_merge2(*danger_set, q->bm);
bart3772a982008-03-15 08:11:03 +00001006 }
1007 else
1008 {
1009 if (s_trace_danger_set)
1010 {
1011 char msg[256];
1012 VG_(snprintf)(msg, sizeof(msg),
1013 "danger set: [%d] ignoring segment ", j);
1014 vc_snprint(msg + VG_(strlen)(msg),
1015 sizeof(msg) - VG_(strlen)(msg),
1016 &q->vc);
barta2b6e1b2008-03-17 18:32:39 +00001017 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +00001018 }
1019 }
bartd66e3a82008-04-06 15:02:17 +00001020 }
bart26f73e12008-02-24 18:37:08 +00001021 }
bart3772a982008-03-15 08:11:03 +00001022 }
bart3772a982008-03-15 08:11:03 +00001023 }
sewardjaf44c822007-11-25 14:01:38 +00001024
bart3772a982008-03-15 08:11:03 +00001025 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
1026 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
sewardjaf44c822007-11-25 14:01:38 +00001027
bart3772a982008-03-15 08:11:03 +00001028 if (0 && s_trace_danger_set)
1029 {
barta2b6e1b2008-03-17 18:32:39 +00001030 VG_(message)(Vg_UserMsg, "[%d] new danger set:", tid);
bart4af812e2008-04-13 15:39:38 +00001031 bm_print(*danger_set);
barta2b6e1b2008-03-17 18:32:39 +00001032 VG_(message)(Vg_UserMsg, "[%d] end of new danger set.", tid);
bart3772a982008-03-15 08:11:03 +00001033 }
sewardjaf44c822007-11-25 14:01:38 +00001034}
1035
sewardjaf44c822007-11-25 14:01:38 +00001036ULong thread_get_context_switch_count(void)
1037{
bart3772a982008-03-15 08:11:03 +00001038 return s_context_switch_count;
sewardjaf44c822007-11-25 14:01:38 +00001039}
1040
sewardjaf44c822007-11-25 14:01:38 +00001041ULong thread_get_discard_ordered_segments_count(void)
1042{
bart3772a982008-03-15 08:11:03 +00001043 return s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +00001044}
1045
bartd66e3a82008-04-06 15:02:17 +00001046ULong thread_get_update_danger_set_count(ULong* dsnsc, ULong* dscvc)
sewardjaf44c822007-11-25 14:01:38 +00001047{
bartd66e3a82008-04-06 15:02:17 +00001048 tl_assert(dsnsc);
1049 tl_assert(dscvc);
1050 *dsnsc = s_danger_set_new_segment_count;
1051 *dscvc = s_danger_set_combine_vc_count;
bart3772a982008-03-15 08:11:03 +00001052 return s_update_danger_set_count;
sewardjaf44c822007-11-25 14:01:38 +00001053}
1054
1055ULong thread_get_danger_set_bitmap_creation_count(void)
1056{
bart3772a982008-03-15 08:11:03 +00001057 return s_danger_set_bitmap_creation_count;
sewardjaf44c822007-11-25 14:01:38 +00001058}
1059
1060ULong thread_get_danger_set_bitmap2_creation_count(void)
1061{
bart3772a982008-03-15 08:11:03 +00001062 return s_danger_set_bitmap2_creation_count;
sewardjaf44c822007-11-25 14:01:38 +00001063}