[OpenMP] New Tsan annotations to remove false positive on reduction and barriers
Added new ThreadSanitizer annotations to remove false positives with OpenMP reduction.
Cleaned up Tsan annotations header file from unused annotations.
Patch by Simone Atzeni!
Differential Revision: https://reviews.llvm.org/D29202
llvm-svn: 295158
diff --git a/openmp/runtime/src/kmp_barrier.cpp b/openmp/runtime/src/kmp_barrier.cpp
index 41062453..d08873f 100644
--- a/openmp/runtime/src/kmp_barrier.cpp
+++ b/openmp/runtime/src/kmp_barrier.cpp
@@ -74,6 +74,7 @@
// Mark arrival to master thread
/* After performing this write, a worker thread may not assume that the team is valid
any more - it could be deallocated by the master thread at any time. */
+ ANNOTATE_BARRIER_BEGIN(this_thr);
kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[0]);
flag.release();
} else {
@@ -99,6 +100,7 @@
kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state);
flag.wait(this_thr, FALSE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(other_threads[i]);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and the other thread time to the thread.
if (__kmp_forkjoin_frames_mode == 2) {
@@ -175,6 +177,7 @@
&other_threads[i]->th.th_bar[bt].bb.b_go,
other_threads[i]->th.th_bar[bt].bb.b_go,
other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP));
+ ANNOTATE_BARRIER_BEGIN(other_threads[i]);
kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_go, other_threads[i]);
flag.release();
}
@@ -185,6 +188,7 @@
kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
// In a fork barrier; cannot get the object reliably (or ITTNOTIFY is disabled)
@@ -268,6 +272,7 @@
kmp_flag_64 flag(&child_bar->b_arrived, new_state);
flag.wait(this_thr, FALSE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(child_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and a child time to the thread.
if (__kmp_forkjoin_frames_mode == 2) {
@@ -302,6 +307,7 @@
// Mark arrival to parent thread
/* After performing this write, a worker thread may not assume that the team is valid
any more - it could be deallocated by the master thread at any time. */
+ ANNOTATE_BARRIER_BEGIN(this_thr);
kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[parent_tid]);
flag.release();
} else {
@@ -340,6 +346,7 @@
kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
// In fork barrier where we could not get the object reliably (or ITTNOTIFY is disabled)
@@ -408,6 +415,7 @@
child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child from barrier
+ ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64 flag(&child_bar->b_go, child_thr);
flag.release();
child++;
@@ -468,6 +476,7 @@
/* After performing this write (in the last iteration of the enclosing for loop),
a worker thread may not assume that the team is valid any more - it could be
deallocated by the master thread at any time. */
+ ANNOTATE_BARRIER_BEGIN(this_thr);
p_flag.set_waiter(other_threads[parent_tid]);
p_flag.release();
break;
@@ -495,6 +504,7 @@
kmp_flag_64 c_flag(&child_bar->b_arrived, new_state);
c_flag.wait(this_thr, FALSE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(child_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
// Barrier imbalance - write min of the thread time and a child time to the thread.
if (__kmp_forkjoin_frames_mode == 2) {
@@ -568,6 +578,7 @@
kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(this_thr);
#if USE_ITT_BUILD && USE_ITT_NOTIFY
if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
// In fork barrier where we could not get the object reliably
@@ -655,6 +666,7 @@
child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child from barrier
+ ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64 flag(&child_bar->b_go, child_thr);
flag.release();
}
@@ -788,6 +800,7 @@
KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
team->t.t_id, child_tid));
+ ANNOTATE_BARRIER_END(other_threads[child_tid]);
(*reduce)(this_thr->th.th_local.reduce_data, other_threads[child_tid]->th.th_local.reduce_data);
}
ANNOTATE_REDUCE_BEFORE(reduce);
@@ -809,6 +822,7 @@
kmp_flag_64 flag(&child_bar->b_arrived, new_state);
flag.wait(this_thr, FALSE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(child_thr);
if (reduce) {
KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -835,6 +849,7 @@
kmp_flag_64 flag(&child_bar->b_arrived, new_state);
flag.wait(this_thr, FALSE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(child_thr);
if (reduce) {
KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
@@ -859,6 +874,7 @@
the team is valid any more - it could be deallocated by the master thread at any time. */
if (thr_bar->my_level || __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME
|| !thr_bar->use_oncore_barrier) { // Parent is waiting on my b_arrived flag; release it
+ ANNOTATE_BARRIER_BEGIN(this_thr);
kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[thr_bar->parent_tid]);
flag.release();
}
@@ -904,6 +920,7 @@
kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
flag.wait(this_thr, TRUE
USE_ITT_BUILD_ARG(itt_sync_obj) );
+ ANNOTATE_BARRIER_END(this_thr);
TCW_8(thr_bar->b_go, KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
}
else { // Thread barrier data is initialized, this is a leaf, blocktime is infinite, not nested
@@ -1020,6 +1037,7 @@
team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child using child's b_go flag
+ ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64 flag(&child_bar->b_go, child_thr);
flag.release();
}
@@ -1043,6 +1061,7 @@
team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
child_bar->b_go + KMP_BARRIER_STATE_BUMP));
// Release child using child's b_go flag
+ ANNOTATE_BARRIER_BEGIN(child_thr);
kmp_flag_64 flag(&child_bar->b_go, child_thr);
flag.release();
}
@@ -1082,7 +1101,7 @@
KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) has arrived\n",
gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid)));
- ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar);
+ ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
#if OMPT_SUPPORT
if (ompt_enabled) {
#if OMPT_BLAME
@@ -1325,7 +1344,7 @@
this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
}
#endif
- ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+ ANNOTATE_BARRIER_END(&team->t.t_bar);
return status;
}
@@ -1340,7 +1359,7 @@
kmp_info_t *this_thr = __kmp_threads[gtid];
kmp_team_t *team = this_thr->th.th_team;
- ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar);
+ ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
if (!team->t.t_serialized) {
if (KMP_MASTER_GTID(gtid)) {
switch (__kmp_barrier_release_pattern[bt]) {
@@ -1371,7 +1390,7 @@
} // if
}
}
- ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+ ANNOTATE_BARRIER_END(&team->t.t_bar);
}
@@ -1422,7 +1441,7 @@
KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]);
KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n", gtid, team_id, tid));
- ANNOTATE_NEW_BARRIER_BEGIN(&team->t.t_bar);
+ ANNOTATE_BARRIER_BEGIN(&team->t.t_bar);
#if OMPT_SUPPORT
#if OMPT_TRACE
if (ompt_enabled &&
@@ -1587,7 +1606,7 @@
this_thr->th.ompt_thread_info.state = ompt_state_overhead;
}
#endif
- ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+ ANNOTATE_BARRIER_END(&team->t.t_bar);
}
@@ -1603,7 +1622,7 @@
void * itt_sync_obj = NULL;
#endif /* USE_ITT_BUILD */
if (team)
- ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+ ANNOTATE_BARRIER_END(&team->t.t_bar);
KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) has arrived\n",
gtid, (team != NULL) ? team->t.t_id : -1, tid));
@@ -1758,7 +1777,7 @@
} // (prepare called inside barrier_release)
}
#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
- ANNOTATE_NEW_BARRIER_END(&team->t.t_bar);
+ ANNOTATE_BARRIER_END(&team->t.t_bar);
KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid, team->t.t_id, tid));
}