blob: 2a31856aa55225321252bbeb1b1703f99023df6e [file] [log] [blame]
sewardjaf44c822007-11-25 14:01:38 +00001/*
2 This file is part of drd, a data race detector.
3
sewardj85642922008-01-14 11:54:56 +00004 Copyright (C) 2006-2008 Bart Van Assche
sewardjaf44c822007-11-25 14:01:38 +00005 bart.vanassche@gmail.com
6
7 This program is free software; you can redistribute it and/or
8 modify it under the terms of the GNU General Public License as
9 published by the Free Software Foundation; either version 2 of the
10 License, or (at your option) any later version.
11
12 This program is distributed in the hope that it will be useful, but
13 WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
20 02111-1307, USA.
21
22 The GNU General Public License is contained in the file COPYING.
23*/
24
25
26#include "drd_error.h"
27#include "drd_segment.h"
28#include "drd_suppression.h"
29#include "drd_thread.h"
sewardjaf44c822007-11-25 14:01:38 +000030#include "pub_tool_basics.h" // Addr, SizeT
31#include "pub_tool_errormgr.h" // VG_(unique_error)()
32#include "pub_tool_libcassert.h" // tl_assert()
33#include "pub_tool_libcbase.h" // VG_(strlen)()
34#include "pub_tool_libcprint.h" // VG_(printf)()
35#include "pub_tool_machine.h"
36#include "pub_tool_mallocfree.h" // VG_(malloc)(), VG_(free)()
sewardj85642922008-01-14 11:54:56 +000037#include "pub_tool_options.h" // VG_(clo_backtrace_size)
sewardjaf44c822007-11-25 14:01:38 +000038#include "pub_tool_threadstate.h" // VG_(get_pthread_id)()
39
40
sewardjaf44c822007-11-25 14:01:38 +000041// Local functions.
42
43static void thread_append_segment(const DrdThreadId tid,
44 Segment* const sg);
barta2b6e1b2008-03-17 18:32:39 +000045static void thread_discard_segment(const DrdThreadId tid, Segment* const sg);
sewardjaf44c822007-11-25 14:01:38 +000046static void thread_update_danger_set(const DrdThreadId tid);
47
48
49// Local variables.
50
51static ULong s_context_switch_count;
52static ULong s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +000053static ULong s_update_danger_set_count;
54static ULong s_danger_set_bitmap_creation_count;
55static ULong s_danger_set_bitmap2_creation_count;
sewardj8b09d4f2007-12-04 21:27:18 +000056static ThreadId s_vg_running_tid = VG_INVALID_THREADID;
bartf00a85b2008-03-13 18:49:23 +000057DrdThreadId s_drd_running_tid = DRD_INVALID_THREADID;
58ThreadInfo s_threadinfo[DRD_N_THREADS];
bart1a473c72008-03-13 19:03:38 +000059struct bitmap* s_danger_set;
bart26f73e12008-02-24 18:37:08 +000060static Bool s_trace_context_switches = False;
61static Bool s_trace_danger_set = False;
barta9c37392008-03-22 09:38:48 +000062static Bool s_segment_merging = True;
sewardjaf44c822007-11-25 14:01:38 +000063
64
65// Function definitions.
66
bart26f73e12008-02-24 18:37:08 +000067void thread_trace_context_switches(const Bool t)
68{
bart3772a982008-03-15 08:11:03 +000069 s_trace_context_switches = t;
bart26f73e12008-02-24 18:37:08 +000070}
71
72void thread_trace_danger_set(const Bool t)
73{
bart3772a982008-03-15 08:11:03 +000074 s_trace_danger_set = t;
bart26f73e12008-02-24 18:37:08 +000075}
76
barta9c37392008-03-22 09:38:48 +000077void thread_set_segment_merging(const Bool m)
78{
79 s_segment_merging = m;
80}
81
sewardjaf44c822007-11-25 14:01:38 +000082__inline__ Bool IsValidDrdThreadId(const DrdThreadId tid)
83{
bart3772a982008-03-15 08:11:03 +000084 return (0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID
85 && ! (s_threadinfo[tid].vg_thread_exists == False
86 && s_threadinfo[tid].posix_thread_exists == False
87 && s_threadinfo[tid].detached_posix_thread == False));
sewardjaf44c822007-11-25 14:01:38 +000088}
89
90/**
91 * Convert Valgrind's ThreadId into a DrdThreadId. Report failure if
92 * Valgrind's ThreadId does not yet exist.
93 **/
94DrdThreadId VgThreadIdToDrdThreadId(const ThreadId tid)
95{
bart3772a982008-03-15 08:11:03 +000096 int i;
sewardjaf44c822007-11-25 14:01:38 +000097
bart3772a982008-03-15 08:11:03 +000098 if (tid == VG_INVALID_THREADID)
99 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000100
bart3772a982008-03-15 08:11:03 +0000101 for (i = 1; i < DRD_N_THREADS; i++)
102 {
103 if (s_threadinfo[i].vg_thread_exists == True
104 && s_threadinfo[i].vg_threadid == tid)
105 {
106 return i;
107 }
108 }
sewardjaf44c822007-11-25 14:01:38 +0000109
bart3772a982008-03-15 08:11:03 +0000110 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000111}
112
113static
114DrdThreadId VgThreadIdToNewDrdThreadId(const ThreadId tid)
115{
bart3772a982008-03-15 08:11:03 +0000116 int i;
sewardjaf44c822007-11-25 14:01:38 +0000117
bart3772a982008-03-15 08:11:03 +0000118 tl_assert(VgThreadIdToDrdThreadId(tid) == DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000119
bart3772a982008-03-15 08:11:03 +0000120 for (i = 1; i < DRD_N_THREADS; i++)
121 {
122 if (s_threadinfo[i].vg_thread_exists == False
123 && s_threadinfo[i].posix_thread_exists == False
124 && s_threadinfo[i].detached_posix_thread == False)
125 {
126 s_threadinfo[i].vg_thread_exists = True;
127 s_threadinfo[i].vg_threadid = tid;
128 s_threadinfo[i].pt_threadid = INVALID_POSIX_THREADID;
bart3772a982008-03-15 08:11:03 +0000129 s_threadinfo[i].stack_min = 0;
bartcac53462008-03-29 09:27:08 +0000130 s_threadinfo[i].stack_min_min = 0;
bart3772a982008-03-15 08:11:03 +0000131 s_threadinfo[i].stack_startup = 0;
132 s_threadinfo[i].stack_max = 0;
bart3772a982008-03-15 08:11:03 +0000133 s_threadinfo[i].is_recording = True;
134 s_threadinfo[i].synchr_nesting = 0;
135 if (s_threadinfo[i].first != 0)
136 VG_(printf)("drd thread id = %d\n", i);
137 tl_assert(s_threadinfo[i].first == 0);
138 tl_assert(s_threadinfo[i].last == 0);
139 return i;
140 }
141 }
sewardjaf44c822007-11-25 14:01:38 +0000142
bart3772a982008-03-15 08:11:03 +0000143 tl_assert(False);
sewardjaf44c822007-11-25 14:01:38 +0000144
bart3772a982008-03-15 08:11:03 +0000145 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000146}
147
148DrdThreadId PtThreadIdToDrdThreadId(const PThreadId tid)
149{
bart3772a982008-03-15 08:11:03 +0000150 int i;
sewardjaf44c822007-11-25 14:01:38 +0000151
bart3772a982008-03-15 08:11:03 +0000152 tl_assert(tid != INVALID_POSIX_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000153
bart3772a982008-03-15 08:11:03 +0000154 for (i = 1; i < DRD_N_THREADS; i++)
155 {
156 if (s_threadinfo[i].posix_thread_exists
157 && s_threadinfo[i].pt_threadid == tid)
158 {
159 return i;
160 }
161 }
162 return DRD_INVALID_THREADID;
sewardjaf44c822007-11-25 14:01:38 +0000163}
164
165ThreadId DrdThreadIdToVgThreadId(const DrdThreadId tid)
166{
bart3772a982008-03-15 08:11:03 +0000167 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
168 return (s_threadinfo[tid].vg_thread_exists
169 ? s_threadinfo[tid].vg_threadid
170 : VG_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000171}
172
bart26f73e12008-02-24 18:37:08 +0000173/** Sanity check of the doubly linked list of segments referenced by a
174 * ThreadInfo struct.
175 * @return True if sane, False if not.
sewardjaf44c822007-11-25 14:01:38 +0000176 */
177static Bool sane_ThreadInfo(const ThreadInfo* const ti)
178{
bart3772a982008-03-15 08:11:03 +0000179 Segment* p;
180 for (p = ti->first; p; p = p->next) {
181 if (p->next && p->next->prev != p)
182 return False;
183 if (p->next == 0 && p != ti->last)
184 return False;
185 }
186 for (p = ti->last; p; p = p->prev) {
187 if (p->prev && p->prev->next != p)
188 return False;
189 if (p->prev == 0 && p != ti->first)
190 return False;
191 }
192 return True;
sewardjaf44c822007-11-25 14:01:38 +0000193}
194
195DrdThreadId thread_pre_create(const DrdThreadId creator,
196 const ThreadId vg_created)
197{
bart3772a982008-03-15 08:11:03 +0000198 DrdThreadId created;
sewardjaf44c822007-11-25 14:01:38 +0000199
bart3772a982008-03-15 08:11:03 +0000200 tl_assert(VgThreadIdToDrdThreadId(vg_created) == DRD_INVALID_THREADID);
201 created = VgThreadIdToNewDrdThreadId(vg_created);
202 tl_assert(0 <= created && created < DRD_N_THREADS
203 && created != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000204
bart3772a982008-03-15 08:11:03 +0000205 tl_assert(s_threadinfo[created].first == 0);
206 tl_assert(s_threadinfo[created].last == 0);
207 thread_append_segment(created, sg_new(creator, created));
sewardjaf44c822007-11-25 14:01:38 +0000208
bart3772a982008-03-15 08:11:03 +0000209 return created;
sewardjaf44c822007-11-25 14:01:38 +0000210}
211
bart26f73e12008-02-24 18:37:08 +0000212/** Allocate the first segment for a thread. Call this just after
213 * pthread_create().
sewardjaf44c822007-11-25 14:01:38 +0000214 */
215DrdThreadId thread_post_create(const ThreadId vg_created)
216{
bart3772a982008-03-15 08:11:03 +0000217 const DrdThreadId created = VgThreadIdToDrdThreadId(vg_created);
sewardjaf44c822007-11-25 14:01:38 +0000218
bart3772a982008-03-15 08:11:03 +0000219 tl_assert(0 <= created && created < DRD_N_THREADS
220 && created != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000221
bart3772a982008-03-15 08:11:03 +0000222 s_threadinfo[created].stack_max = VG_(thread_get_stack_max)(vg_created);
223 s_threadinfo[created].stack_startup = s_threadinfo[created].stack_max;
224 s_threadinfo[created].stack_min = s_threadinfo[created].stack_max;
bartcac53462008-03-29 09:27:08 +0000225 s_threadinfo[created].stack_min_min = s_threadinfo[created].stack_max;
226 s_threadinfo[created].stack_size = VG_(thread_get_stack_size)(vg_created);
bart3772a982008-03-15 08:11:03 +0000227 tl_assert(s_threadinfo[created].stack_max != 0);
sewardjaf44c822007-11-25 14:01:38 +0000228
bart3772a982008-03-15 08:11:03 +0000229 return created;
sewardjaf44c822007-11-25 14:01:38 +0000230}
231
232/* NPTL hack: NPTL allocates the 'struct pthread' on top of the stack, */
233/* and accesses this data structure from multiple threads without locking. */
234/* Any conflicting accesses in the range stack_startup..stack_max will be */
235/* ignored. */
236void thread_set_stack_startup(const DrdThreadId tid, const Addr stack_startup)
237{
bart3772a982008-03-15 08:11:03 +0000238 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
239 tl_assert(s_threadinfo[tid].stack_min <= stack_startup);
240 tl_assert(stack_startup <= s_threadinfo[tid].stack_max);
241 s_threadinfo[tid].stack_startup = stack_startup;
sewardjaf44c822007-11-25 14:01:38 +0000242}
243
244Addr thread_get_stack_min(const DrdThreadId tid)
245{
bart3772a982008-03-15 08:11:03 +0000246 tl_assert(0 <= tid && tid < DRD_N_THREADS
247 && tid != DRD_INVALID_THREADID);
248 return s_threadinfo[tid].stack_min;
sewardjaf44c822007-11-25 14:01:38 +0000249}
250
bartcac53462008-03-29 09:27:08 +0000251Addr thread_get_stack_min_min(const DrdThreadId tid)
252{
253 tl_assert(0 <= tid && tid < DRD_N_THREADS
254 && tid != DRD_INVALID_THREADID);
255 return s_threadinfo[tid].stack_min_min;
256}
257
bartd43f8d32008-03-16 17:29:20 +0000258Addr thread_get_stack_max(const DrdThreadId tid)
sewardjaf44c822007-11-25 14:01:38 +0000259{
bartd43f8d32008-03-16 17:29:20 +0000260 tl_assert(0 <= tid && tid < DRD_N_THREADS
261 && tid != DRD_INVALID_THREADID);
262 return s_threadinfo[tid].stack_max;
sewardjaf44c822007-11-25 14:01:38 +0000263}
264
bartcac53462008-03-29 09:27:08 +0000265SizeT thread_get_stack_size(const DrdThreadId tid)
266{
267 tl_assert(0 <= tid && tid < DRD_N_THREADS
268 && tid != DRD_INVALID_THREADID);
269 return s_threadinfo[tid].stack_size;
270}
271
barta2b6e1b2008-03-17 18:32:39 +0000272/** Clean up thread-specific data structures. Call this just after
273 * pthread_join().
sewardjaf44c822007-11-25 14:01:38 +0000274 */
275void thread_delete(const DrdThreadId tid)
276{
bart3772a982008-03-15 08:11:03 +0000277 Segment* sg;
278 Segment* sg_prev;
sewardjaf44c822007-11-25 14:01:38 +0000279
bart3772a982008-03-15 08:11:03 +0000280 tl_assert(0 <= tid && tid < DRD_N_THREADS
281 && tid != DRD_INVALID_THREADID);
282 tl_assert(s_threadinfo[tid].synchr_nesting == 0);
283 for (sg = s_threadinfo[tid].last; sg; sg = sg_prev)
284 {
285 sg_prev = sg->prev;
barta2b6e1b2008-03-17 18:32:39 +0000286 sg->prev = 0;
287 sg->next = 0;
288 sg_put(sg);
bart3772a982008-03-15 08:11:03 +0000289 }
290 s_threadinfo[tid].vg_thread_exists = False;
291 s_threadinfo[tid].posix_thread_exists = False;
292 tl_assert(s_threadinfo[tid].detached_posix_thread == False);
293 s_threadinfo[tid].first = 0;
294 s_threadinfo[tid].last = 0;
sewardjaf44c822007-11-25 14:01:38 +0000295}
296
297/* Called after a thread performed its last memory access and before */
298/* thread_delete() is called. Note: thread_delete() is only called for */
299/* joinable threads, not for detached threads. */
300void thread_finished(const DrdThreadId tid)
301{
bart3772a982008-03-15 08:11:03 +0000302 tl_assert(0 <= tid && tid < DRD_N_THREADS
303 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000304
bart3772a982008-03-15 08:11:03 +0000305 s_threadinfo[tid].vg_thread_exists = False;
sewardjaf44c822007-11-25 14:01:38 +0000306
bart3772a982008-03-15 08:11:03 +0000307 if (s_threadinfo[tid].detached_posix_thread)
308 {
309 /* Once a detached thread has finished, its stack is deallocated and */
310 /* should no longer be taken into account when computing the danger set*/
311 s_threadinfo[tid].stack_min = s_threadinfo[tid].stack_max;
sewardjaf44c822007-11-25 14:01:38 +0000312
bart3772a982008-03-15 08:11:03 +0000313 /* For a detached thread, calling pthread_exit() invalidates the */
314 /* POSIX thread ID associated with the detached thread. For joinable */
315 /* POSIX threads however, the POSIX thread ID remains live after the */
316 /* pthread_exit() call until pthread_join() is called. */
317 s_threadinfo[tid].posix_thread_exists = False;
318 }
sewardjaf44c822007-11-25 14:01:38 +0000319}
320
321void thread_set_pthreadid(const DrdThreadId tid, const PThreadId ptid)
322{
bart3772a982008-03-15 08:11:03 +0000323 tl_assert(0 <= tid && tid < DRD_N_THREADS
324 && tid != DRD_INVALID_THREADID);
325 tl_assert(s_threadinfo[tid].pt_threadid == INVALID_POSIX_THREADID);
326 tl_assert(ptid != INVALID_POSIX_THREADID);
327 s_threadinfo[tid].posix_thread_exists = True;
328 s_threadinfo[tid].pt_threadid = ptid;
sewardjaf44c822007-11-25 14:01:38 +0000329}
330
331Bool thread_get_joinable(const DrdThreadId tid)
332{
bart3772a982008-03-15 08:11:03 +0000333 tl_assert(0 <= tid && tid < DRD_N_THREADS
334 && tid != DRD_INVALID_THREADID);
335 return ! s_threadinfo[tid].detached_posix_thread;
sewardjaf44c822007-11-25 14:01:38 +0000336}
337
338void thread_set_joinable(const DrdThreadId tid, const Bool joinable)
339{
bart3772a982008-03-15 08:11:03 +0000340 tl_assert(0 <= tid && tid < DRD_N_THREADS
341 && tid != DRD_INVALID_THREADID);
342 tl_assert(!! joinable == joinable);
343 tl_assert(s_threadinfo[tid].pt_threadid != INVALID_POSIX_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000344#if 0
bart3772a982008-03-15 08:11:03 +0000345 VG_(message)(Vg_DebugMsg,
346 "thread_set_joinable(%d/%d, %s)",
347 tid,
348 s_threadinfo[tid].vg_threadid,
349 joinable ? "joinable" : "detached");
sewardjaf44c822007-11-25 14:01:38 +0000350#endif
bart3772a982008-03-15 08:11:03 +0000351 s_threadinfo[tid].detached_posix_thread = ! joinable;
sewardjaf44c822007-11-25 14:01:38 +0000352}
353
sewardj8b09d4f2007-12-04 21:27:18 +0000354void thread_set_vg_running_tid(const ThreadId vg_tid)
sewardjaf44c822007-11-25 14:01:38 +0000355{
bart3772a982008-03-15 08:11:03 +0000356 tl_assert(vg_tid != VG_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000357
bart3772a982008-03-15 08:11:03 +0000358 if (vg_tid != s_vg_running_tid)
359 {
360 thread_set_running_tid(vg_tid, VgThreadIdToDrdThreadId(vg_tid));
361 }
sewardj8b09d4f2007-12-04 21:27:18 +0000362
bart3772a982008-03-15 08:11:03 +0000363 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
364 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000365}
366
367void thread_set_running_tid(const ThreadId vg_tid, const DrdThreadId drd_tid)
368{
bart3772a982008-03-15 08:11:03 +0000369 tl_assert(vg_tid != VG_INVALID_THREADID);
370 tl_assert(drd_tid != DRD_INVALID_THREADID);
sewardj8b09d4f2007-12-04 21:27:18 +0000371
bart3772a982008-03-15 08:11:03 +0000372 if (vg_tid != s_vg_running_tid)
373 {
374 if (s_trace_context_switches
375 && s_drd_running_tid != DRD_INVALID_THREADID)
376 {
377 VG_(message)(Vg_DebugMsg,
barta2b6e1b2008-03-17 18:32:39 +0000378 "Context switch from thread %d/%d to thread %d/%d;"
379 " segments: %llu",
bartaa97a542008-03-16 17:57:01 +0000380 s_vg_running_tid, s_drd_running_tid,
barta2b6e1b2008-03-17 18:32:39 +0000381 DrdThreadIdToVgThreadId(drd_tid), drd_tid,
382 sg_get_alive_segments_count());
bart3772a982008-03-15 08:11:03 +0000383 }
384 s_vg_running_tid = vg_tid;
385 s_drd_running_tid = drd_tid;
386 thread_update_danger_set(drd_tid);
387 s_context_switch_count++;
388 }
sewardj8b09d4f2007-12-04 21:27:18 +0000389
bart3772a982008-03-15 08:11:03 +0000390 tl_assert(s_vg_running_tid != VG_INVALID_THREADID);
391 tl_assert(s_drd_running_tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000392}
393
bart0268dfa2008-03-11 20:10:21 +0000394int thread_enter_synchr(const DrdThreadId tid)
395{
bart3772a982008-03-15 08:11:03 +0000396 tl_assert(IsValidDrdThreadId(tid));
397 return s_threadinfo[tid].synchr_nesting++;
bart0268dfa2008-03-11 20:10:21 +0000398}
399
400int thread_leave_synchr(const DrdThreadId tid)
401{
bart3772a982008-03-15 08:11:03 +0000402 tl_assert(IsValidDrdThreadId(tid));
403 tl_assert(s_threadinfo[tid].synchr_nesting >= 1);
404 return --s_threadinfo[tid].synchr_nesting;
bart0268dfa2008-03-11 20:10:21 +0000405}
406
407int thread_get_synchr_nesting_count(const DrdThreadId tid)
408{
bart3772a982008-03-15 08:11:03 +0000409 tl_assert(IsValidDrdThreadId(tid));
410 return s_threadinfo[tid].synchr_nesting;
bart0268dfa2008-03-11 20:10:21 +0000411}
412
bart1a473c72008-03-13 19:03:38 +0000413/** Append a new segment at the end of the segment list. */
bart26f73e12008-02-24 18:37:08 +0000414static void thread_append_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000415{
bart3772a982008-03-15 08:11:03 +0000416 tl_assert(0 <= tid && tid < DRD_N_THREADS
417 && tid != DRD_INVALID_THREADID);
418 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
419 sg->prev = s_threadinfo[tid].last;
420 sg->next = 0;
421 if (s_threadinfo[tid].last)
422 s_threadinfo[tid].last->next = sg;
423 s_threadinfo[tid].last = sg;
424 if (s_threadinfo[tid].first == 0)
425 s_threadinfo[tid].first = sg;
426 tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
sewardjaf44c822007-11-25 14:01:38 +0000427}
428
bart26f73e12008-02-24 18:37:08 +0000429/** Remove a segment from the segment list of thread threadid, and free the
430 * associated memory.
sewardjaf44c822007-11-25 14:01:38 +0000431 */
bart26f73e12008-02-24 18:37:08 +0000432static void thread_discard_segment(const DrdThreadId tid, Segment* const sg)
sewardjaf44c822007-11-25 14:01:38 +0000433{
bart3772a982008-03-15 08:11:03 +0000434 tl_assert(0 <= tid && tid < DRD_N_THREADS
435 && tid != DRD_INVALID_THREADID);
bart3f749672008-03-22 09:49:40 +0000436 //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
bart26f73e12008-02-24 18:37:08 +0000437
bart3772a982008-03-15 08:11:03 +0000438 if (sg->prev)
439 sg->prev->next = sg->next;
440 if (sg->next)
441 sg->next->prev = sg->prev;
442 if (sg == s_threadinfo[tid].first)
443 s_threadinfo[tid].first = sg->next;
444 if (sg == s_threadinfo[tid].last)
445 s_threadinfo[tid].last = sg->prev;
barta2b6e1b2008-03-17 18:32:39 +0000446 sg_put(sg);
bart3f749672008-03-22 09:49:40 +0000447
448 //tl_assert(sane_ThreadInfo(&s_threadinfo[tid]));
sewardjaf44c822007-11-25 14:01:38 +0000449}
450
451VectorClock* thread_get_vc(const DrdThreadId tid)
452{
barta2b6e1b2008-03-17 18:32:39 +0000453 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
bart3772a982008-03-15 08:11:03 +0000454 tl_assert(s_threadinfo[tid].last);
455 return &s_threadinfo[tid].last->vc;
sewardjaf44c822007-11-25 14:01:38 +0000456}
457
barta2b6e1b2008-03-17 18:32:39 +0000458/** Return the latest segment of thread 'tid' and increment its reference
459 * count.
460 */
461void thread_get_latest_segment(Segment** sg, const DrdThreadId tid)
462{
463 tl_assert(sg);
464 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
465 tl_assert(s_threadinfo[tid].last);
466
467 sg_put(*sg);
468 *sg = sg_get(s_threadinfo[tid].last);
469}
470
sewardjaf44c822007-11-25 14:01:38 +0000471/**
472 * Compute the minimum of all latest vector clocks of all threads
473 * (Michiel Ronsse calls this "clock snooping" in his papers about DIOTA).
474 * @param vc pointer to a vectorclock, holds result upon return.
475 */
476static void thread_compute_minimum_vc(VectorClock* vc)
477{
bart3772a982008-03-15 08:11:03 +0000478 unsigned i;
479 Bool first;
480 Segment* latest_sg;
sewardjaf44c822007-11-25 14:01:38 +0000481
bart3772a982008-03-15 08:11:03 +0000482 first = True;
483 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
484 {
485 latest_sg = s_threadinfo[i].last;
486 if (latest_sg)
487 {
488 if (first)
489 vc_assign(vc, &latest_sg->vc);
490 else
491 vc_min(vc, &latest_sg->vc);
492 first = False;
493 }
494 }
sewardjaf44c822007-11-25 14:01:38 +0000495}
496
497static void thread_compute_maximum_vc(VectorClock* vc)
498{
bart3772a982008-03-15 08:11:03 +0000499 unsigned i;
500 Bool first;
501 Segment* latest_sg;
sewardjaf44c822007-11-25 14:01:38 +0000502
bart3772a982008-03-15 08:11:03 +0000503 first = True;
504 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
505 {
506 latest_sg = s_threadinfo[i].last;
507 if (latest_sg)
508 {
509 if (first)
510 vc_assign(vc, &latest_sg->vc);
511 else
512 vc_combine(vc, &latest_sg->vc);
513 first = False;
514 }
515 }
sewardjaf44c822007-11-25 14:01:38 +0000516}
517
518/**
bart5bd9f2d2008-03-03 20:31:58 +0000519 * Discard all segments that have a defined order against the latest vector
sewardjaf44c822007-11-25 14:01:38 +0000520 * clock of every thread -- these segments can no longer be involved in a
521 * data race.
522 */
523static void thread_discard_ordered_segments(void)
524{
bart3772a982008-03-15 08:11:03 +0000525 unsigned i;
526 VectorClock thread_vc_min;
sewardjaf44c822007-11-25 14:01:38 +0000527
bart3772a982008-03-15 08:11:03 +0000528 s_discard_ordered_segments_count++;
sewardjaf44c822007-11-25 14:01:38 +0000529
bart3772a982008-03-15 08:11:03 +0000530 vc_init(&thread_vc_min, 0, 0);
531 thread_compute_minimum_vc(&thread_vc_min);
532 if (sg_get_trace())
533 {
534 char msg[256];
535 VectorClock thread_vc_max;
sewardjaf44c822007-11-25 14:01:38 +0000536
bart3772a982008-03-15 08:11:03 +0000537 vc_init(&thread_vc_max, 0, 0);
538 thread_compute_maximum_vc(&thread_vc_max);
539 VG_(snprintf)(msg, sizeof(msg),
540 "Discarding ordered segments -- min vc is ");
541 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
542 &thread_vc_min);
543 VG_(snprintf)(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
544 ", max vc is ");
545 vc_snprint(msg + VG_(strlen)(msg), sizeof(msg) - VG_(strlen)(msg),
546 &thread_vc_max);
barta2b6e1b2008-03-17 18:32:39 +0000547 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000548 vc_cleanup(&thread_vc_max);
549 }
sewardjaf44c822007-11-25 14:01:38 +0000550
bart3772a982008-03-15 08:11:03 +0000551 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
552 {
553 Segment* sg;
554 Segment* sg_next;
555 for (sg = s_threadinfo[i].first;
556 sg && (sg_next = sg->next) && vc_lte(&sg->vc, &thread_vc_min);
557 sg = sg_next)
558 {
559 thread_discard_segment(i, sg);
560 }
561 }
562 vc_cleanup(&thread_vc_min);
sewardjaf44c822007-11-25 14:01:38 +0000563}
564
barta9c37392008-03-22 09:38:48 +0000565/** Merge all segments that may be merged without triggering false positives
566 * or discarding real data races. For the theoretical background of segment
567 * merging, see also the following paper:
568 * Mark Christiaens, Michiel Ronsse and Koen De Bosschere.
569 * Bounding the number of segment histories during data race detection.
570 * Parallel Computing archive, Volume 28, Issue 9, pp 1221-1238,
571 * September 2002.
572 */
573static void thread_merge_segments(void)
574{
575 unsigned i;
576
577 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
578 {
579 Segment* sg;
580
581 tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
582
583 for (sg = s_threadinfo[i].first; sg; sg = sg->next)
584 {
585 if (sg_get_refcnt(sg) == 1
586 && sg->next
587 && sg_get_refcnt(sg->next) == 1
588 && sg->next->next)
589 {
590 /* Merge sg and sg->next into sg. */
591 sg_merge(sg, sg->next);
592 thread_discard_segment(i, sg->next);
593 }
594 }
595
596 tl_assert(sane_ThreadInfo(&s_threadinfo[i]));
597 }
598}
599
barta2b6e1b2008-03-17 18:32:39 +0000600/** Create a new segment for the specified thread, and discard any segments
601 * that cannot cause races anymore.
sewardjaf44c822007-11-25 14:01:38 +0000602 */
603void thread_new_segment(const DrdThreadId tid)
604{
barta2b6e1b2008-03-17 18:32:39 +0000605 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000606
barta2b6e1b2008-03-17 18:32:39 +0000607 thread_append_segment(tid, sg_new(tid, tid));
sewardjaf44c822007-11-25 14:01:38 +0000608
bart3772a982008-03-15 08:11:03 +0000609 thread_discard_ordered_segments();
bart26f73e12008-02-24 18:37:08 +0000610
barta9c37392008-03-22 09:38:48 +0000611 if (s_segment_merging)
612 thread_merge_segments();
613
bart3772a982008-03-15 08:11:03 +0000614 if (tid == s_drd_running_tid)
615 {
616 /* Every change in the vector clock of the current thread may cause */
617 /* segments that were previously ordered to this thread to become */
618 /* unordered. Hence, recalculate the danger set if the vector clock */
619 /* of the current thread is updated. */
620 thread_update_danger_set(tid);
621 }
sewardjaf44c822007-11-25 14:01:38 +0000622}
623
bart26f73e12008-02-24 18:37:08 +0000624/** Call this function after thread 'joiner' joined thread 'joinee'. */
sewardjaf44c822007-11-25 14:01:38 +0000625void thread_combine_vc(DrdThreadId joiner, DrdThreadId joinee)
626{
bart3772a982008-03-15 08:11:03 +0000627 tl_assert(joiner != joinee);
628 tl_assert(0 <= joiner && joiner < DRD_N_THREADS
629 && joiner != DRD_INVALID_THREADID);
630 tl_assert(0 <= joinee && joinee < DRD_N_THREADS
631 && joinee != DRD_INVALID_THREADID);
632 tl_assert(s_threadinfo[joiner].last);
633 tl_assert(s_threadinfo[joinee].last);
634 vc_combine(&s_threadinfo[joiner].last->vc, &s_threadinfo[joinee].last->vc);
635 thread_discard_ordered_segments();
sewardjaf44c822007-11-25 14:01:38 +0000636
bart3772a982008-03-15 08:11:03 +0000637 if (joiner == s_drd_running_tid)
638 {
639 thread_update_danger_set(joiner);
640 }
sewardjaf44c822007-11-25 14:01:38 +0000641}
642
bart26f73e12008-02-24 18:37:08 +0000643/** Call this function after thread 'tid' had to wait because of thread
644 * synchronization until the memory accesses in the segment with vector clock
645 * 'vc' finished.
646 */
sewardjaf44c822007-11-25 14:01:38 +0000647void thread_combine_vc2(DrdThreadId tid, const VectorClock* const vc)
648{
bart3772a982008-03-15 08:11:03 +0000649 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
650 tl_assert(s_threadinfo[tid].last);
651 tl_assert(vc);
652 vc_combine(&s_threadinfo[tid].last->vc, vc);
653 thread_discard_ordered_segments();
sewardjaf44c822007-11-25 14:01:38 +0000654}
655
bart26f73e12008-02-24 18:37:08 +0000656/** Call this function whenever a thread is no longer using the memory
657 * [ a1, a2 [, e.g. because of a call to free() or a stack pointer
658 * increase.
659 */
sewardjaf44c822007-11-25 14:01:38 +0000660void thread_stop_using_mem(const Addr a1, const Addr a2)
661{
bartd43f8d32008-03-16 17:29:20 +0000662 DrdThreadId other_user;
663 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000664
bart3772a982008-03-15 08:11:03 +0000665 /* For all threads, mark the range [ a1, a2 [ as no longer in use. */
bartd43f8d32008-03-16 17:29:20 +0000666 other_user = DRD_INVALID_THREADID;
bart3772a982008-03-15 08:11:03 +0000667 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
668 {
669 Segment* p;
670 for (p = s_threadinfo[i].first; p; p = p->next)
671 {
672 if (other_user == DRD_INVALID_THREADID
bart8bf2f8b2008-03-30 17:56:43 +0000673 && i != s_drd_running_tid)
sewardjaf44c822007-11-25 14:01:38 +0000674 {
bart8bf2f8b2008-03-30 17:56:43 +0000675 if (UNLIKELY(bm_test_and_clear(p->bm, a1, a2)))
676 {
677 other_user = i;
678 }
679 continue;
sewardjaf44c822007-11-25 14:01:38 +0000680 }
bart3772a982008-03-15 08:11:03 +0000681 bm_clear(p->bm, a1, a2);
682 }
683 }
sewardjaf44c822007-11-25 14:01:38 +0000684
bart3772a982008-03-15 08:11:03 +0000685 /* If any other thread had accessed memory in [ a1, a2 [, update the */
686 /* danger set. */
687 if (other_user != DRD_INVALID_THREADID
688 && bm_has_any_access(s_danger_set, a1, a2))
689 {
690 thread_update_danger_set(thread_get_running_tid());
691 }
sewardjaf44c822007-11-25 14:01:38 +0000692}
693
bart0268dfa2008-03-11 20:10:21 +0000694void thread_start_recording(const DrdThreadId tid)
695{
bart3772a982008-03-15 08:11:03 +0000696 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
697 tl_assert(! s_threadinfo[tid].is_recording);
698 s_threadinfo[tid].is_recording = True;
bart0268dfa2008-03-11 20:10:21 +0000699}
700
701void thread_stop_recording(const DrdThreadId tid)
702{
bart3772a982008-03-15 08:11:03 +0000703 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
704 tl_assert(s_threadinfo[tid].is_recording);
705 s_threadinfo[tid].is_recording = False;
bart0268dfa2008-03-11 20:10:21 +0000706}
707
sewardjaf44c822007-11-25 14:01:38 +0000708void thread_print_all(void)
709{
bart3772a982008-03-15 08:11:03 +0000710 unsigned i;
711 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000712
bart3772a982008-03-15 08:11:03 +0000713 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
714 {
715 if (s_threadinfo[i].first)
716 {
717 VG_(printf)("**************\n"
barta2b6e1b2008-03-17 18:32:39 +0000718 "* thread %3d (%d/%d/%d/0x%lx/%d) *\n"
bart3772a982008-03-15 08:11:03 +0000719 "**************\n",
720 i,
721 s_threadinfo[i].vg_thread_exists,
722 s_threadinfo[i].vg_threadid,
723 s_threadinfo[i].posix_thread_exists,
724 s_threadinfo[i].pt_threadid,
bart354009c2008-03-16 10:42:33 +0000725 s_threadinfo[i].detached_posix_thread);
bart3772a982008-03-15 08:11:03 +0000726 for (p = s_threadinfo[i].first; p; p = p->next)
sewardjaf44c822007-11-25 14:01:38 +0000727 {
bart3772a982008-03-15 08:11:03 +0000728 sg_print(p);
sewardjaf44c822007-11-25 14:01:38 +0000729 }
bart3772a982008-03-15 08:11:03 +0000730 }
731 }
sewardjaf44c822007-11-25 14:01:38 +0000732}
733
734static void show_call_stack(const DrdThreadId tid,
735 const Char* const msg,
736 ExeContext* const callstack)
737{
bart3772a982008-03-15 08:11:03 +0000738 const ThreadId vg_tid = DrdThreadIdToVgThreadId(tid);
sewardjaf44c822007-11-25 14:01:38 +0000739
bartaa97a542008-03-16 17:57:01 +0000740 VG_(message)(Vg_UserMsg, "%s (thread %d/%d)", msg, vg_tid, tid);
sewardjaf44c822007-11-25 14:01:38 +0000741
bart3772a982008-03-15 08:11:03 +0000742 if (vg_tid != VG_INVALID_THREADID)
743 {
744 if (callstack)
745 {
746 VG_(pp_ExeContext)(callstack);
747 }
748 else
749 {
750 VG_(get_and_pp_StackTrace)(vg_tid, VG_(clo_backtrace_size));
751 }
752 }
753 else
754 {
755 VG_(message)(Vg_UserMsg,
756 " (thread finished, call stack no longer available)");
757 }
sewardjaf44c822007-11-25 14:01:38 +0000758}
759
sewardjaf44c822007-11-25 14:01:38 +0000760static void
761thread_report_conflicting_segments_segment(const DrdThreadId tid,
762 const Addr addr,
763 const SizeT size,
764 const BmAccessTypeT access_type,
765 const Segment* const p)
766{
bart3772a982008-03-15 08:11:03 +0000767 unsigned i;
sewardjaf44c822007-11-25 14:01:38 +0000768
bart3772a982008-03-15 08:11:03 +0000769 tl_assert(0 <= tid && tid < DRD_N_THREADS
770 && tid != DRD_INVALID_THREADID);
771 tl_assert(p);
sewardjaf44c822007-11-25 14:01:38 +0000772
bart3772a982008-03-15 08:11:03 +0000773 for (i = 0; i < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); i++)
774 {
775 if (i != tid)
776 {
777 Segment* q;
778 for (q = s_threadinfo[i].last; q; q = q->prev)
sewardjaf44c822007-11-25 14:01:38 +0000779 {
bart3772a982008-03-15 08:11:03 +0000780 // Since q iterates over the segments of thread i in order of
781 // decreasing vector clocks, if q->vc <= p->vc, then
782 // q->next->vc <= p->vc will also hold. Hence, break out of the
783 // loop once this condition is met.
784 if (vc_lte(&q->vc, &p->vc))
785 break;
786 if (! vc_lte(&p->vc, &q->vc))
787 {
788 if (bm_has_conflict_with(q->bm, addr, addr + size, access_type))
789 {
790 tl_assert(q->stacktrace);
791 show_call_stack(i, "Other segment start",
792 q->stacktrace);
793 show_call_stack(i, "Other segment end",
794 q->next ? q->next->stacktrace : 0);
795 }
796 }
sewardjaf44c822007-11-25 14:01:38 +0000797 }
bart3772a982008-03-15 08:11:03 +0000798 }
799 }
sewardjaf44c822007-11-25 14:01:38 +0000800}
801
802void thread_report_conflicting_segments(const DrdThreadId tid,
803 const Addr addr,
804 const SizeT size,
805 const BmAccessTypeT access_type)
806{
bart3772a982008-03-15 08:11:03 +0000807 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000808
bart3772a982008-03-15 08:11:03 +0000809 tl_assert(0 <= tid && tid < DRD_N_THREADS
810 && tid != DRD_INVALID_THREADID);
sewardjaf44c822007-11-25 14:01:38 +0000811
bart3772a982008-03-15 08:11:03 +0000812 for (p = s_threadinfo[tid].first; p; p = p->next)
813 {
814 if (bm_has(p->bm, addr, addr + size, access_type))
815 {
816 thread_report_conflicting_segments_segment(tid, addr, size,
817 access_type, p);
818 }
819 }
sewardjaf44c822007-11-25 14:01:38 +0000820}
sewardjaf44c822007-11-25 14:01:38 +0000821
bart26f73e12008-02-24 18:37:08 +0000822/** Compute a bitmap that represents the union of all memory accesses of all
823 * segments that are unordered to the current segment of the thread tid.
sewardjaf44c822007-11-25 14:01:38 +0000824 */
825static void thread_update_danger_set(const DrdThreadId tid)
826{
bart3772a982008-03-15 08:11:03 +0000827 Segment* p;
sewardjaf44c822007-11-25 14:01:38 +0000828
bart3772a982008-03-15 08:11:03 +0000829 tl_assert(0 <= tid && tid < DRD_N_THREADS && tid != DRD_INVALID_THREADID);
830 tl_assert(tid == s_drd_running_tid);
sewardjaf44c822007-11-25 14:01:38 +0000831
bart3772a982008-03-15 08:11:03 +0000832 s_update_danger_set_count++;
833 s_danger_set_bitmap_creation_count -= bm_get_bitmap_creation_count();
834 s_danger_set_bitmap2_creation_count -= bm_get_bitmap2_creation_count();
sewardjaf44c822007-11-25 14:01:38 +0000835
bart3772a982008-03-15 08:11:03 +0000836 if (s_danger_set)
837 {
bart1ea5fff2008-03-16 08:36:23 +0000838 bm_delete(s_danger_set);
bart3772a982008-03-15 08:11:03 +0000839 }
bart1ea5fff2008-03-16 08:36:23 +0000840 s_danger_set = bm_new();
bart26f73e12008-02-24 18:37:08 +0000841
bart3772a982008-03-15 08:11:03 +0000842 if (s_trace_danger_set)
843 {
844 char msg[256];
845
846 VG_(snprintf)(msg, sizeof(msg),
bartaa97a542008-03-16 17:57:01 +0000847 "computing danger set for thread %d/%d with vc ",
848 DrdThreadIdToVgThreadId(tid), tid);
bart3772a982008-03-15 08:11:03 +0000849 vc_snprint(msg + VG_(strlen)(msg),
850 sizeof(msg) - VG_(strlen)(msg),
851 &s_threadinfo[tid].last->vc);
barta2b6e1b2008-03-17 18:32:39 +0000852 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000853 }
854
855 p = s_threadinfo[tid].last;
856 {
857 unsigned j;
858
859 if (s_trace_danger_set)
860 {
bart26f73e12008-02-24 18:37:08 +0000861 char msg[256];
862
863 VG_(snprintf)(msg, sizeof(msg),
bart3772a982008-03-15 08:11:03 +0000864 "danger set: thread [%d] at vc ",
bart26f73e12008-02-24 18:37:08 +0000865 tid);
866 vc_snprint(msg + VG_(strlen)(msg),
867 sizeof(msg) - VG_(strlen)(msg),
bart3772a982008-03-15 08:11:03 +0000868 &p->vc);
barta2b6e1b2008-03-17 18:32:39 +0000869 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000870 }
sewardjaf44c822007-11-25 14:01:38 +0000871
bart3772a982008-03-15 08:11:03 +0000872 for (j = 0; j < sizeof(s_threadinfo) / sizeof(s_threadinfo[0]); j++)
873 {
874 if (IsValidDrdThreadId(j))
bart26f73e12008-02-24 18:37:08 +0000875 {
bart3772a982008-03-15 08:11:03 +0000876 const Segment* q;
877 for (q = s_threadinfo[j].last; q; q = q->prev)
878 if (j != tid && q != 0
879 && ! vc_lte(&q->vc, &p->vc) && ! vc_lte(&p->vc, &q->vc))
880 {
881 if (s_trace_danger_set)
882 {
883 char msg[256];
884 VG_(snprintf)(msg, sizeof(msg),
885 "danger set: [%d] merging segment ", j);
886 vc_snprint(msg + VG_(strlen)(msg),
887 sizeof(msg) - VG_(strlen)(msg),
888 &q->vc);
barta2b6e1b2008-03-17 18:32:39 +0000889 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000890 }
891 bm_merge2(s_danger_set, q->bm);
892 }
893 else
894 {
895 if (s_trace_danger_set)
896 {
897 char msg[256];
898 VG_(snprintf)(msg, sizeof(msg),
899 "danger set: [%d] ignoring segment ", j);
900 vc_snprint(msg + VG_(strlen)(msg),
901 sizeof(msg) - VG_(strlen)(msg),
902 &q->vc);
barta2b6e1b2008-03-17 18:32:39 +0000903 VG_(message)(Vg_UserMsg, "%s", msg);
bart3772a982008-03-15 08:11:03 +0000904 }
905 }
bart26f73e12008-02-24 18:37:08 +0000906 }
bart3772a982008-03-15 08:11:03 +0000907 }
bart3772a982008-03-15 08:11:03 +0000908 }
sewardjaf44c822007-11-25 14:01:38 +0000909
bart3772a982008-03-15 08:11:03 +0000910 s_danger_set_bitmap_creation_count += bm_get_bitmap_creation_count();
911 s_danger_set_bitmap2_creation_count += bm_get_bitmap2_creation_count();
sewardjaf44c822007-11-25 14:01:38 +0000912
bart3772a982008-03-15 08:11:03 +0000913 if (0 && s_trace_danger_set)
914 {
barta2b6e1b2008-03-17 18:32:39 +0000915 VG_(message)(Vg_UserMsg, "[%d] new danger set:", tid);
bart3772a982008-03-15 08:11:03 +0000916 bm_print(s_danger_set);
barta2b6e1b2008-03-17 18:32:39 +0000917 VG_(message)(Vg_UserMsg, "[%d] end of new danger set.", tid);
bart3772a982008-03-15 08:11:03 +0000918 }
sewardjaf44c822007-11-25 14:01:38 +0000919}
920
sewardjaf44c822007-11-25 14:01:38 +0000921ULong thread_get_context_switch_count(void)
922{
bart3772a982008-03-15 08:11:03 +0000923 return s_context_switch_count;
sewardjaf44c822007-11-25 14:01:38 +0000924}
925
sewardjaf44c822007-11-25 14:01:38 +0000926ULong thread_get_discard_ordered_segments_count(void)
927{
bart3772a982008-03-15 08:11:03 +0000928 return s_discard_ordered_segments_count;
sewardjaf44c822007-11-25 14:01:38 +0000929}
930
931ULong thread_get_update_danger_set_count(void)
932{
bart3772a982008-03-15 08:11:03 +0000933 return s_update_danger_set_count;
sewardjaf44c822007-11-25 14:01:38 +0000934}
935
936ULong thread_get_danger_set_bitmap_creation_count(void)
937{
bart3772a982008-03-15 08:11:03 +0000938 return s_danger_set_bitmap_creation_count;
sewardjaf44c822007-11-25 14:01:38 +0000939}
940
941ULong thread_get_danger_set_bitmap2_creation_count(void)
942{
bart3772a982008-03-15 08:11:03 +0000943 return s_danger_set_bitmap2_creation_count;
sewardjaf44c822007-11-25 14:01:38 +0000944}