blob: 89bf828aae644c8c6c32dbc16b50065f2980b167 [file] [log] [blame]
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001/*
2 * kmp_barrier.cpp
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16#include "kmp.h"
17#include "kmp_wait_release.h"
18#include "kmp_stats.h"
19#include "kmp_itt.h"
Jonathan Peytona0e159f2015-10-08 18:23:38 +000020#include "kmp_os.h"
21
Jim Cownie4cc4bb42014-10-07 16:25:50 +000022
23#if KMP_MIC
24#include <immintrin.h>
25#define USE_NGO_STORES 1
26#endif // KMP_MIC
27
28#if KMP_MIC && USE_NGO_STORES
29// ICV copying
30#define ngo_load(src) __m512d Vt = _mm512_load_pd((void *)(src))
31#define ngo_store_icvs(dst, src) _mm512_storenrngo_pd((void *)(dst), Vt)
32#define ngo_store_go(dst, src) _mm512_storenrngo_pd((void *)(dst), Vt)
33#define ngo_sync() __asm__ volatile ("lock; addl $0,0(%%rsp)" ::: "memory")
34#else
35#define ngo_load(src) ((void)0)
36#define ngo_store_icvs(dst, src) copy_icvs((dst), (src))
Jonathan Peyton01b58b72015-07-09 18:20:51 +000037#define ngo_store_go(dst, src) KMP_MEMCPY((dst), (src), CACHE_LINE)
Jim Cownie4cc4bb42014-10-07 16:25:50 +000038#define ngo_sync() ((void)0)
39#endif /* KMP_MIC && USE_NGO_STORES */
40
41void __kmp_print_structure(void); // Forward declaration
42
43// ---------------------------- Barrier Algorithms ----------------------------
44
45// Linear Barrier
46static void
47__kmp_linear_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
48 void (*reduce)(void *, void *)
49 USE_ITT_BUILD_ARG(void * itt_sync_obj) )
50{
Jonathan Peyton45be4502015-08-11 21:36:41 +000051 KMP_TIME_DEVELOPER_BLOCK(KMP_linear_gather);
Jim Cownie4cc4bb42014-10-07 16:25:50 +000052 register kmp_team_t *team = this_thr->th.th_team;
53 register kmp_bstate_t *thr_bar = & this_thr->th.th_bar[bt].bb;
54 register kmp_info_t **other_threads = team->t.t_threads;
55
56 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
57 gtid, team->t.t_id, tid, bt));
58 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
59
60#if USE_ITT_BUILD && USE_ITT_NOTIFY
61 // Barrier imbalance - save arrive time to the thread
Andrey Churbanov51aecb82015-05-06 19:22:36 +000062 if(__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +000063 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time = __itt_get_timestamp();
64 }
65#endif
66 // We now perform a linear reduction to signal that all of the threads have arrived.
67 if (!KMP_MASTER_TID(tid)) {
68 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d)"
Jonathan Peytond26e2132015-09-10 18:44:30 +000069 "arrived(%p): %llu => %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +000070 __kmp_gtid_from_tid(0, team), team->t.t_id, 0, &thr_bar->b_arrived,
71 thr_bar->b_arrived, thr_bar->b_arrived + KMP_BARRIER_STATE_BUMP));
72 // Mark arrival to master thread
73 /* After performing this write, a worker thread may not assume that the team is valid
74 any more - it could be deallocated by the master thread at any time. */
75 kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[0]);
76 flag.release();
77 } else {
78 register kmp_balign_team_t *team_bar = &team->t.t_bar[bt];
79 register int nproc = this_thr->th.th_team_nproc;
80 register int i;
81 // Don't have to worry about sleep bit here or atomic since team setting
Jonathan Peytond26e2132015-09-10 18:44:30 +000082 register kmp_uint64 new_state = team_bar->b_arrived + KMP_BARRIER_STATE_BUMP;
Jim Cownie4cc4bb42014-10-07 16:25:50 +000083
84 // Collect all the worker team member threads.
85 for (i=1; i<nproc; ++i) {
86#if KMP_CACHE_MANAGE
87 // Prefetch next thread's arrived count
88 if (i+1 < nproc)
89 KMP_CACHE_PREFETCH(&other_threads[i+1]->th.th_bar[bt].bb.b_arrived);
90#endif /* KMP_CACHE_MANAGE */
91 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
Jonathan Peytond26e2132015-09-10 18:44:30 +000092 "arrived(%p) == %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +000093 __kmp_gtid_from_tid(i, team), team->t.t_id, i,
94 &other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state));
95
96 // Wait for worker thread to arrive
97 kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_arrived, new_state);
98 flag.wait(this_thr, FALSE
99 USE_ITT_BUILD_ARG(itt_sync_obj) );
100#if USE_ITT_BUILD && USE_ITT_NOTIFY
101 // Barrier imbalance - write min of the thread time and the other thread time to the thread.
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000102 if (__kmp_forkjoin_frames_mode == 2) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000103 this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
104 other_threads[i]->th.th_bar_min_time);
105 }
106#endif
107 if (reduce) {
108 KA_TRACE(100, ("__kmp_linear_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n", gtid,
109 team->t.t_id, tid, __kmp_gtid_from_tid(i, team), team->t.t_id, i));
110 (*reduce)(this_thr->th.th_local.reduce_data,
111 other_threads[i]->th.th_local.reduce_data);
112 }
113 }
114 // Don't have to worry about sleep bit here or atomic since team setting
115 team_bar->b_arrived = new_state;
Jonathan Peytond26e2132015-09-10 18:44:30 +0000116 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) set team %d arrived(%p) = %llu\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000117 gtid, team->t.t_id, tid, team->t.t_id, &team_bar->b_arrived, new_state));
118 }
119 KA_TRACE(20, ("__kmp_linear_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
120 gtid, team->t.t_id, tid, bt));
121}
122
123static void
124__kmp_linear_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
125 int propagate_icvs
126 USE_ITT_BUILD_ARG(void *itt_sync_obj) )
127{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000128 KMP_TIME_DEVELOPER_BLOCK(KMP_linear_release);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000129 register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
130 register kmp_team_t *team;
131
132 if (KMP_MASTER_TID(tid)) {
133 register unsigned int i;
134 register kmp_uint32 nproc = this_thr->th.th_team_nproc;
135 register kmp_info_t **other_threads;
136
137 team = __kmp_threads[gtid]->th.th_team;
138 KMP_DEBUG_ASSERT(team != NULL);
139 other_threads = team->t.t_threads;
140
141 KA_TRACE(20, ("__kmp_linear_barrier_release: T#%d(%d:%d) master enter for barrier type %d\n",
142 gtid, team->t.t_id, tid, bt));
143
144 if (nproc > 1) {
145#if KMP_BARRIER_ICV_PUSH
Jonathan Peyton45be4502015-08-11 21:36:41 +0000146 {
147 KMP_TIME_DEVELOPER_BLOCK(USER_icv_copy);
148 if (propagate_icvs) {
149 ngo_load(&team->t.t_implicit_task_taskdata[0].td_icvs);
150 for (i=1; i<nproc; ++i) {
151 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[i], team, i, FALSE);
152 ngo_store_icvs(&team->t.t_implicit_task_taskdata[i].td_icvs,
153 &team->t.t_implicit_task_taskdata[0].td_icvs);
154 }
155 ngo_sync();
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000156 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000157 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000158#endif // KMP_BARRIER_ICV_PUSH
159
160 // Now, release all of the worker threads
161 for (i=1; i<nproc; ++i) {
162#if KMP_CACHE_MANAGE
163 // Prefetch next thread's go flag
164 if (i+1 < nproc)
165 KMP_CACHE_PREFETCH(&other_threads[i+1]->th.th_bar[bt].bb.b_go);
166#endif /* KMP_CACHE_MANAGE */
167 KA_TRACE(20, ("__kmp_linear_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d) "
168 "go(%p): %u => %u\n", gtid, team->t.t_id, tid,
169 other_threads[i]->th.th_info.ds.ds_gtid, team->t.t_id, i,
170 &other_threads[i]->th.th_bar[bt].bb.b_go,
171 other_threads[i]->th.th_bar[bt].bb.b_go,
172 other_threads[i]->th.th_bar[bt].bb.b_go + KMP_BARRIER_STATE_BUMP));
173 kmp_flag_64 flag(&other_threads[i]->th.th_bar[bt].bb.b_go, other_threads[i]);
174 flag.release();
175 }
176 }
177 } else { // Wait for the MASTER thread to release us
178 KA_TRACE(20, ("__kmp_linear_barrier_release: T#%d wait go(%p) == %u\n",
179 gtid, &thr_bar->b_go, KMP_BARRIER_STATE_BUMP));
180 kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
181 flag.wait(this_thr, TRUE
182 USE_ITT_BUILD_ARG(itt_sync_obj) );
183#if USE_ITT_BUILD && USE_ITT_NOTIFY
184 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
185 // In a fork barrier; cannot get the object reliably (or ITTNOTIFY is disabled)
186 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier, 0, -1);
187 // Cancel wait on previous parallel region...
188 __kmp_itt_task_starting(itt_sync_obj);
189
190 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
191 return;
192
193 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
194 if (itt_sync_obj != NULL)
195 // Call prepare as early as possible for "new" barrier
196 __kmp_itt_task_finished(itt_sync_obj);
197 } else
198#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
199 // Early exit for reaping threads releasing forkjoin barrier
200 if ( bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done) )
201 return;
202 // The worker thread may now assume that the team is valid.
203#ifdef KMP_DEBUG
204 tid = __kmp_tid_from_gtid(gtid);
205 team = __kmp_threads[gtid]->th.th_team;
206#endif
207 KMP_DEBUG_ASSERT(team != NULL);
208 TCW_4(thr_bar->b_go, KMP_INIT_BARRIER_STATE);
209 KA_TRACE(20, ("__kmp_linear_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
210 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
211 KMP_MB(); // Flush all pending memory write invalidates.
212 }
213 KA_TRACE(20, ("__kmp_linear_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
214 gtid, team->t.t_id, tid, bt));
215}
216
217// Tree barrier
218static void
219__kmp_tree_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
220 void (*reduce)(void *, void *)
221 USE_ITT_BUILD_ARG(void *itt_sync_obj) )
222{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000223 KMP_TIME_DEVELOPER_BLOCK(KMP_tree_gather);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000224 register kmp_team_t *team = this_thr->th.th_team;
225 register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
226 register kmp_info_t **other_threads = team->t.t_threads;
227 register kmp_uint32 nproc = this_thr->th.th_team_nproc;
228 register kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
229 register kmp_uint32 branch_factor = 1 << branch_bits;
230 register kmp_uint32 child;
231 register kmp_uint32 child_tid;
Jonathan Peytond26e2132015-09-10 18:44:30 +0000232 register kmp_uint64 new_state;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000233
234 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
235 gtid, team->t.t_id, tid, bt));
236 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
237
238#if USE_ITT_BUILD && USE_ITT_NOTIFY
239 // Barrier imbalance - save arrive time to the thread
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000240 if(__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000241 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time = __itt_get_timestamp();
242 }
243#endif
244 // Perform tree gather to wait until all threads have arrived; reduce any required data as we go
245 child_tid = (tid << branch_bits) + 1;
246 if (child_tid < nproc) {
247 // Parent threads wait for all their children to arrive
248 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
249 child = 1;
250 do {
251 register kmp_info_t *child_thr = other_threads[child_tid];
252 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
253#if KMP_CACHE_MANAGE
254 // Prefetch next thread's arrived count
255 if (child+1 <= branch_factor && child_tid+1 < nproc)
256 KMP_CACHE_PREFETCH(&other_threads[child_tid+1]->th.th_bar[bt].bb.b_arrived);
257#endif /* KMP_CACHE_MANAGE */
258 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%u) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000259 "arrived(%p) == %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000260 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, child_tid,
261 &child_bar->b_arrived, new_state));
262 // Wait for child to arrive
263 kmp_flag_64 flag(&child_bar->b_arrived, new_state);
264 flag.wait(this_thr, FALSE
265 USE_ITT_BUILD_ARG(itt_sync_obj) );
266#if USE_ITT_BUILD && USE_ITT_NOTIFY
267 // Barrier imbalance - write min of the thread time and a child time to the thread.
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000268 if (__kmp_forkjoin_frames_mode == 2) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000269 this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
270 child_thr->th.th_bar_min_time);
271 }
272#endif
273 if (reduce) {
274 KA_TRACE(100, ("__kmp_tree_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
275 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
276 team->t.t_id, child_tid));
277 (*reduce)(this_thr->th.th_local.reduce_data, child_thr->th.th_local.reduce_data);
278 }
279 child++;
280 child_tid++;
281 }
282 while (child <= branch_factor && child_tid < nproc);
283 }
284
285 if (!KMP_MASTER_TID(tid)) { // Worker threads
286 register kmp_int32 parent_tid = (tid - 1) >> branch_bits;
287
288 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000289 "arrived(%p): %llu => %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000290 __kmp_gtid_from_tid(parent_tid, team), team->t.t_id, parent_tid,
291 &thr_bar->b_arrived, thr_bar->b_arrived,
292 thr_bar->b_arrived + KMP_BARRIER_STATE_BUMP));
293
294 // Mark arrival to parent thread
295 /* After performing this write, a worker thread may not assume that the team is valid
296 any more - it could be deallocated by the master thread at any time. */
297 kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[parent_tid]);
298 flag.release();
299 } else {
300 // Need to update the team arrived pointer if we are the master thread
301 if (nproc > 1) // New value was already computed above
302 team->t.t_bar[bt].b_arrived = new_state;
303 else
304 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
Jonathan Peytond26e2132015-09-10 18:44:30 +0000305 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) set team %d arrived(%p) = %llu\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000306 gtid, team->t.t_id, tid, team->t.t_id,
307 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
308 }
309 KA_TRACE(20, ("__kmp_tree_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
310 gtid, team->t.t_id, tid, bt));
311}
312
313static void
314__kmp_tree_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
315 int propagate_icvs
316 USE_ITT_BUILD_ARG(void *itt_sync_obj) )
317{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000318 KMP_TIME_DEVELOPER_BLOCK(KMP_tree_release);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000319 register kmp_team_t *team;
320 register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
321 register kmp_uint32 nproc;
322 register kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[bt];
323 register kmp_uint32 branch_factor = 1 << branch_bits;
324 register kmp_uint32 child;
325 register kmp_uint32 child_tid;
326
327 // Perform a tree release for all of the threads that have been gathered
328 if (!KMP_MASTER_TID(tid)) { // Handle fork barrier workers who aren't part of a team yet
329 KA_TRACE(20, ("__kmp_tree_barrier_release: T#%d wait go(%p) == %u\n",
330 gtid, &thr_bar->b_go, KMP_BARRIER_STATE_BUMP));
331 // Wait for parent thread to release us
332 kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
333 flag.wait(this_thr, TRUE
334 USE_ITT_BUILD_ARG(itt_sync_obj) );
335#if USE_ITT_BUILD && USE_ITT_NOTIFY
336 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
337 // In fork barrier where we could not get the object reliably (or ITTNOTIFY is disabled)
338 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier, 0, -1);
339 // Cancel wait on previous parallel region...
340 __kmp_itt_task_starting(itt_sync_obj);
341
342 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
343 return;
344
345 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
346 if (itt_sync_obj != NULL)
347 // Call prepare as early as possible for "new" barrier
348 __kmp_itt_task_finished(itt_sync_obj);
349 } else
350#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
351 // Early exit for reaping threads releasing forkjoin barrier
352 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
353 return;
354
355 // The worker thread may now assume that the team is valid.
356 team = __kmp_threads[gtid]->th.th_team;
357 KMP_DEBUG_ASSERT(team != NULL);
358 tid = __kmp_tid_from_gtid(gtid);
359
360 TCW_4(thr_bar->b_go, KMP_INIT_BARRIER_STATE);
361 KA_TRACE(20, ("__kmp_tree_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
362 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
363 KMP_MB(); // Flush all pending memory write invalidates.
364 } else {
365 team = __kmp_threads[gtid]->th.th_team;
366 KMP_DEBUG_ASSERT(team != NULL);
367 KA_TRACE(20, ("__kmp_tree_barrier_release: T#%d(%d:%d) master enter for barrier type %d\n",
368 gtid, team->t.t_id, tid, bt));
369 }
370 nproc = this_thr->th.th_team_nproc;
371 child_tid = (tid << branch_bits) + 1;
372
373 if (child_tid < nproc) {
374 register kmp_info_t **other_threads = team->t.t_threads;
375 child = 1;
376 // Parent threads release all their children
377 do {
378 register kmp_info_t *child_thr = other_threads[child_tid];
379 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
380#if KMP_CACHE_MANAGE
381 // Prefetch next thread's go count
382 if (child+1 <= branch_factor && child_tid+1 < nproc)
383 KMP_CACHE_PREFETCH(&other_threads[child_tid+1]->th.th_bar[bt].bb.b_go);
384#endif /* KMP_CACHE_MANAGE */
385
386#if KMP_BARRIER_ICV_PUSH
Jonathan Peyton45be4502015-08-11 21:36:41 +0000387 {
388 KMP_TIME_DEVELOPER_BLOCK(USER_icv_copy);
389 if (propagate_icvs) {
390 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[child_tid],
391 team, child_tid, FALSE);
392 copy_icvs(&team->t.t_implicit_task_taskdata[child_tid].td_icvs,
393 &team->t.t_implicit_task_taskdata[0].td_icvs);
394 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000395 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000396#endif // KMP_BARRIER_ICV_PUSH
397 KA_TRACE(20, ("__kmp_tree_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%u)"
398 "go(%p): %u => %u\n", gtid, team->t.t_id, tid,
399 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
400 child_tid, &child_bar->b_go, child_bar->b_go,
401 child_bar->b_go + KMP_BARRIER_STATE_BUMP));
402 // Release child from barrier
403 kmp_flag_64 flag(&child_bar->b_go, child_thr);
404 flag.release();
405 child++;
406 child_tid++;
407 }
408 while (child <= branch_factor && child_tid < nproc);
409 }
410 KA_TRACE(20, ("__kmp_tree_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
411 gtid, team->t.t_id, tid, bt));
412}
413
414
415// Hyper Barrier
416static void
417__kmp_hyper_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
418 void (*reduce)(void *, void *)
419 USE_ITT_BUILD_ARG(void *itt_sync_obj) )
420{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000421 KMP_TIME_DEVELOPER_BLOCK(KMP_hyper_gather);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000422 register kmp_team_t *team = this_thr->th.th_team;
423 register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
424 register kmp_info_t **other_threads = team->t.t_threads;
Jonathan Peytond26e2132015-09-10 18:44:30 +0000425 register kmp_uint64 new_state = KMP_BARRIER_UNUSED_STATE;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000426 register kmp_uint32 num_threads = this_thr->th.th_team_nproc;
427 register kmp_uint32 branch_bits = __kmp_barrier_gather_branch_bits[bt];
428 register kmp_uint32 branch_factor = 1 << branch_bits;
429 register kmp_uint32 offset;
430 register kmp_uint32 level;
431
432 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
433 gtid, team->t.t_id, tid, bt));
434
435 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
436
437#if USE_ITT_BUILD && USE_ITT_NOTIFY
438 // Barrier imbalance - save arrive time to the thread
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000439 if(__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000440 this_thr->th.th_bar_arrive_time = this_thr->th.th_bar_min_time = __itt_get_timestamp();
441 }
442#endif
443 /* Perform a hypercube-embedded tree gather to wait until all of the threads have
444 arrived, and reduce any required data as we go. */
445 kmp_flag_64 p_flag(&thr_bar->b_arrived);
446 for (level=0, offset=1; offset<num_threads; level+=branch_bits, offset<<=branch_bits)
447 {
448 register kmp_uint32 child;
449 register kmp_uint32 child_tid;
450
451 if (((tid >> level) & (branch_factor - 1)) != 0) {
452 register kmp_int32 parent_tid = tid & ~((1 << (level + branch_bits)) -1);
453
454 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000455 "arrived(%p): %llu => %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000456 __kmp_gtid_from_tid(parent_tid, team), team->t.t_id, parent_tid,
457 &thr_bar->b_arrived, thr_bar->b_arrived,
458 thr_bar->b_arrived + KMP_BARRIER_STATE_BUMP));
459 // Mark arrival to parent thread
460 /* After performing this write (in the last iteration of the enclosing for loop),
461 a worker thread may not assume that the team is valid any more - it could be
462 deallocated by the master thread at any time. */
463 p_flag.set_waiter(other_threads[parent_tid]);
Jonathan Peyton1bd61b42015-10-08 19:44:16 +0000464 p_flag.release();
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000465 break;
466 }
467
468 // Parent threads wait for children to arrive
469 if (new_state == KMP_BARRIER_UNUSED_STATE)
470 new_state = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
471 for (child=1, child_tid=tid+(1 << level); child<branch_factor && child_tid<num_threads;
472 child++, child_tid+=(1 << level))
473 {
474 register kmp_info_t *child_thr = other_threads[child_tid];
475 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
476#if KMP_CACHE_MANAGE
477 register kmp_uint32 next_child_tid = child_tid + (1 << level);
478 // Prefetch next thread's arrived count
479 if (child+1 < branch_factor && next_child_tid < num_threads)
480 KMP_CACHE_PREFETCH(&other_threads[next_child_tid]->th.th_bar[bt].bb.b_arrived);
481#endif /* KMP_CACHE_MANAGE */
482 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%u) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000483 "arrived(%p) == %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000484 __kmp_gtid_from_tid(child_tid, team), team->t.t_id, child_tid,
485 &child_bar->b_arrived, new_state));
486 // Wait for child to arrive
487 kmp_flag_64 c_flag(&child_bar->b_arrived, new_state);
488 c_flag.wait(this_thr, FALSE
489 USE_ITT_BUILD_ARG(itt_sync_obj) );
490#if USE_ITT_BUILD && USE_ITT_NOTIFY
491 // Barrier imbalance - write min of the thread time and a child time to the thread.
Andrey Churbanov51aecb82015-05-06 19:22:36 +0000492 if (__kmp_forkjoin_frames_mode == 2) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000493 this_thr->th.th_bar_min_time = KMP_MIN(this_thr->th.th_bar_min_time,
494 child_thr->th.th_bar_min_time);
495 }
496#endif
497 if (reduce) {
498 KA_TRACE(100, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) += T#%d(%d:%u)\n",
499 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
500 team->t.t_id, child_tid));
501 (*reduce)(this_thr->th.th_local.reduce_data, child_thr->th.th_local.reduce_data);
502 }
503 }
504 }
505
506 if (KMP_MASTER_TID(tid)) {
507 // Need to update the team arrived pointer if we are the master thread
508 if (new_state == KMP_BARRIER_UNUSED_STATE)
509 team->t.t_bar[bt].b_arrived += KMP_BARRIER_STATE_BUMP;
510 else
511 team->t.t_bar[bt].b_arrived = new_state;
Jonathan Peytond26e2132015-09-10 18:44:30 +0000512 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) set team %d arrived(%p) = %llu\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000513 gtid, team->t.t_id, tid, team->t.t_id,
514 &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
515 }
516 KA_TRACE(20, ("__kmp_hyper_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
517 gtid, team->t.t_id, tid, bt));
518}
519
520// The reverse versions seem to beat the forward versions overall
521#define KMP_REVERSE_HYPER_BAR
522static void
523__kmp_hyper_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
524 int propagate_icvs
525 USE_ITT_BUILD_ARG(void *itt_sync_obj) )
526{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000527 KMP_TIME_DEVELOPER_BLOCK(KMP_hyper_release);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000528 register kmp_team_t *team;
529 register kmp_bstate_t *thr_bar = & this_thr -> th.th_bar[ bt ].bb;
530 register kmp_info_t **other_threads;
531 register kmp_uint32 num_threads;
532 register kmp_uint32 branch_bits = __kmp_barrier_release_branch_bits[ bt ];
533 register kmp_uint32 branch_factor = 1 << branch_bits;
534 register kmp_uint32 child;
535 register kmp_uint32 child_tid;
536 register kmp_uint32 offset;
537 register kmp_uint32 level;
538
539 /* Perform a hypercube-embedded tree release for all of the threads that have been gathered.
540 If KMP_REVERSE_HYPER_BAR is defined (default) the threads are released in the reverse
541 order of the corresponding gather, otherwise threads are released in the same order. */
542 if (KMP_MASTER_TID(tid)) { // master
543 team = __kmp_threads[gtid]->th.th_team;
544 KMP_DEBUG_ASSERT(team != NULL);
545 KA_TRACE(20, ("__kmp_hyper_barrier_release: T#%d(%d:%d) master enter for barrier type %d\n",
546 gtid, team->t.t_id, tid, bt));
547#if KMP_BARRIER_ICV_PUSH
548 if (propagate_icvs) { // master already has ICVs in final destination; copy
549 copy_icvs(&thr_bar->th_fixed_icvs, &team->t.t_implicit_task_taskdata[tid].td_icvs);
550 }
551#endif
552 }
553 else { // Handle fork barrier workers who aren't part of a team yet
554 KA_TRACE(20, ("__kmp_hyper_barrier_release: T#%d wait go(%p) == %u\n",
555 gtid, &thr_bar->b_go, KMP_BARRIER_STATE_BUMP));
556 // Wait for parent thread to release us
557 kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
558 flag.wait(this_thr, TRUE
559 USE_ITT_BUILD_ARG(itt_sync_obj) );
560#if USE_ITT_BUILD && USE_ITT_NOTIFY
561 if ((__itt_sync_create_ptr && itt_sync_obj == NULL) || KMP_ITT_DEBUG) {
562 // In fork barrier where we could not get the object reliably
563 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier, 0, -1);
564 // Cancel wait on previous parallel region...
565 __kmp_itt_task_starting(itt_sync_obj);
566
567 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
568 return;
569
570 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
571 if (itt_sync_obj != NULL)
572 // Call prepare as early as possible for "new" barrier
573 __kmp_itt_task_finished(itt_sync_obj);
574 } else
575#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
576 // Early exit for reaping threads releasing forkjoin barrier
577 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
578 return;
579
580 // The worker thread may now assume that the team is valid.
581 team = __kmp_threads[gtid]->th.th_team;
582 KMP_DEBUG_ASSERT(team != NULL);
583 tid = __kmp_tid_from_gtid(gtid);
584
585 TCW_4(thr_bar->b_go, KMP_INIT_BARRIER_STATE);
586 KA_TRACE(20, ("__kmp_hyper_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
587 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
588 KMP_MB(); // Flush all pending memory write invalidates.
589 }
590 num_threads = this_thr->th.th_team_nproc;
591 other_threads = team->t.t_threads;
592
593#ifdef KMP_REVERSE_HYPER_BAR
594 // Count up to correct level for parent
595 for (level=0, offset=1; offset<num_threads && (((tid>>level) & (branch_factor-1)) == 0);
596 level+=branch_bits, offset<<=branch_bits);
597
598 // Now go down from there
599 for (level-=branch_bits, offset>>=branch_bits; offset != 0;
600 level-=branch_bits, offset>>=branch_bits)
601#else
602 // Go down the tree, level by level
603 for (level=0, offset=1; offset<num_threads; level+=branch_bits, offset<<=branch_bits)
604#endif // KMP_REVERSE_HYPER_BAR
605 {
606#ifdef KMP_REVERSE_HYPER_BAR
607 /* Now go in reverse order through the children, highest to lowest.
608 Initial setting of child is conservative here. */
609 child = num_threads >> ((level==0)?level:level-1);
610 for (child=(child<branch_factor-1) ? child : branch_factor-1, child_tid=tid+(child<<level);
611 child>=1; child--, child_tid-=(1<<level))
612#else
613 if (((tid >> level) & (branch_factor - 1)) != 0)
614 // No need to go lower than this, since this is the level parent would be notified
615 break;
616 // Iterate through children on this level of the tree
617 for (child=1, child_tid=tid+(1<<level); child<branch_factor && child_tid<num_threads;
618 child++, child_tid+=(1<<level))
619#endif // KMP_REVERSE_HYPER_BAR
620 {
621 if (child_tid >= num_threads) continue; // Child doesn't exist so keep going
622 else {
623 register kmp_info_t *child_thr = other_threads[child_tid];
624 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
625#if KMP_CACHE_MANAGE
626 register kmp_uint32 next_child_tid = child_tid - (1 << level);
627 // Prefetch next thread's go count
628# ifdef KMP_REVERSE_HYPER_BAR
629 if (child-1 >= 1 && next_child_tid < num_threads)
630# else
631 if (child+1 < branch_factor && next_child_tid < num_threads)
632# endif // KMP_REVERSE_HYPER_BAR
633 KMP_CACHE_PREFETCH(&other_threads[next_child_tid]->th.th_bar[bt].bb.b_go);
634#endif /* KMP_CACHE_MANAGE */
635
636#if KMP_BARRIER_ICV_PUSH
637 if (propagate_icvs) // push my fixed ICVs to my child
638 copy_icvs(&child_bar->th_fixed_icvs, &thr_bar->th_fixed_icvs);
639#endif // KMP_BARRIER_ICV_PUSH
640
641 KA_TRACE(20, ("__kmp_hyper_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%u)"
642 "go(%p): %u => %u\n", gtid, team->t.t_id, tid,
643 __kmp_gtid_from_tid(child_tid, team), team->t.t_id,
644 child_tid, &child_bar->b_go, child_bar->b_go,
645 child_bar->b_go + KMP_BARRIER_STATE_BUMP));
646 // Release child from barrier
647 kmp_flag_64 flag(&child_bar->b_go, child_thr);
648 flag.release();
649 }
650 }
651 }
652#if KMP_BARRIER_ICV_PUSH
653 if (propagate_icvs && !KMP_MASTER_TID(tid)) { // copy ICVs locally to final dest
654 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid, FALSE);
655 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, &thr_bar->th_fixed_icvs);
656 }
657#endif
658 KA_TRACE(20, ("__kmp_hyper_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
659 gtid, team->t.t_id, tid, bt));
660}
661
662// Hierarchical Barrier
663
664// Initialize thread barrier data
665/* Initializes/re-initializes the hierarchical barrier data stored on a thread. Performs the
666 minimum amount of initialization required based on how the team has changed. Returns true if
667 leaf children will require both on-core and traditional wake-up mechanisms. For example, if the
668 team size increases, threads already in the team will respond to on-core wakeup on their parent
669 thread, but threads newly added to the team will only be listening on the their local b_go. */
670static bool
671__kmp_init_hierarchical_barrier_thread(enum barrier_type bt, kmp_bstate_t *thr_bar, kmp_uint32 nproc,
672 int gtid, int tid, kmp_team_t *team)
673{
674 // Checks to determine if (re-)initialization is needed
675 bool uninitialized = thr_bar->team == NULL;
676 bool team_changed = team != thr_bar->team;
677 bool team_sz_changed = nproc != thr_bar->nproc;
678 bool tid_changed = tid != thr_bar->old_tid;
679 bool retval = false;
680
681 if (uninitialized || team_sz_changed) {
682 __kmp_get_hierarchy(nproc, thr_bar);
683 }
684
685 if (uninitialized || team_sz_changed || tid_changed) {
686 thr_bar->my_level = thr_bar->depth-1; // default for master
687 thr_bar->parent_tid = -1; // default for master
688 if (!KMP_MASTER_TID(tid)) { // if not master, find parent thread in hierarchy
689 kmp_uint32 d=0;
690 while (d<thr_bar->depth) { // find parent based on level of thread in hierarchy, and note level
691 kmp_uint32 rem;
692 if (d == thr_bar->depth-2) { // reached level right below the master
693 thr_bar->parent_tid = 0;
694 thr_bar->my_level = d;
695 break;
696 }
697 else if ((rem = tid%thr_bar->skip_per_level[d+1]) != 0) { // TODO: can we make this op faster?
698 // thread is not a subtree root at next level, so this is max
699 thr_bar->parent_tid = tid - rem;
700 thr_bar->my_level = d;
701 break;
702 }
703 ++d;
704 }
705 }
706 thr_bar->offset = 7-(tid-thr_bar->parent_tid-1);
707 thr_bar->old_tid = tid;
708 thr_bar->wait_flag = KMP_BARRIER_NOT_WAITING;
Jonathan Peytonb0b83c82015-11-09 16:28:32 +0000709 thr_bar->team = team;
710 thr_bar->parent_bar = &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000711 }
712 if (uninitialized || team_changed || tid_changed) {
713 thr_bar->team = team;
714 thr_bar->parent_bar = &team->t.t_threads[thr_bar->parent_tid]->th.th_bar[bt].bb;
715 retval = true;
716 }
717 if (uninitialized || team_sz_changed || tid_changed) {
718 thr_bar->nproc = nproc;
719 thr_bar->leaf_kids = thr_bar->base_leaf_kids;
720 if (thr_bar->my_level == 0) thr_bar->leaf_kids=0;
721 if (thr_bar->leaf_kids && (kmp_uint32)tid+thr_bar->leaf_kids+1 > nproc)
722 thr_bar->leaf_kids = nproc - tid - 1;
723 thr_bar->leaf_state = 0;
724 for (int i=0; i<thr_bar->leaf_kids; ++i) ((char *)&(thr_bar->leaf_state))[7-i] = 1;
725 }
726 return retval;
727}
728
729static void
730__kmp_hierarchical_barrier_gather(enum barrier_type bt, kmp_info_t *this_thr,
731 int gtid, int tid, void (*reduce) (void *, void *)
732 USE_ITT_BUILD_ARG(void * itt_sync_obj) )
733{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000734 KMP_TIME_DEVELOPER_BLOCK(KMP_hier_gather);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000735 register kmp_team_t *team = this_thr->th.th_team;
736 register kmp_bstate_t *thr_bar = & this_thr->th.th_bar[bt].bb;
737 register kmp_uint32 nproc = this_thr->th.th_team_nproc;
738 register kmp_info_t **other_threads = team->t.t_threads;
739 register kmp_uint64 new_state;
740
Andrey Churbanov42a79212015-01-27 16:50:31 +0000741 int level = team->t.t_level;
Jonathan Peyton441f3372015-09-21 17:24:46 +0000742#if OMP_40_ENABLED
Andrey Churbanov42a79212015-01-27 16:50:31 +0000743 if (other_threads[0]->th.th_teams_microtask) // are we inside the teams construct?
744 if (this_thr->th.th_teams_size.nteams > 1)
745 ++level; // level was not increased in teams construct for team_of_masters
Jonathan Peyton441f3372015-09-21 17:24:46 +0000746#endif
Andrey Churbanov42a79212015-01-27 16:50:31 +0000747 if (level == 1) thr_bar->use_oncore_barrier = 1;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000748 else thr_bar->use_oncore_barrier = 0; // Do not use oncore barrier when nested
749
750 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) enter for barrier type %d\n",
751 gtid, team->t.t_id, tid, bt));
752 KMP_DEBUG_ASSERT(this_thr == other_threads[this_thr->th.th_info.ds.ds_tid]);
753
Andrey Churbanove6bfb732015-05-06 18:34:15 +0000754#if USE_ITT_BUILD && USE_ITT_NOTIFY
755 // Barrier imbalance - save arrive time to the thread
756 if(__kmp_forkjoin_frames_mode == 3 || __kmp_forkjoin_frames_mode == 2) {
757 this_thr->th.th_bar_arrive_time = __itt_get_timestamp();
758 }
759#endif
760
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000761 (void)__kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid, tid, team);
762
763 if (thr_bar->my_level) { // not a leaf (my_level==0 means leaf)
764 register kmp_int32 child_tid;
765 new_state = (kmp_uint64)team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
766 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && thr_bar->use_oncore_barrier) {
767 if (thr_bar->leaf_kids) { // First, wait for leaf children to check-in on my b_arrived flag
Jonathan Peytond26e2132015-09-10 18:44:30 +0000768 kmp_uint64 leaf_state = KMP_MASTER_TID(tid) ? thr_bar->b_arrived | thr_bar->leaf_state : team->t.t_bar[bt].b_arrived | thr_bar->leaf_state;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000769 kmp_flag_64 flag(&thr_bar->b_arrived, leaf_state);
770 flag.wait(this_thr, FALSE
771 USE_ITT_BUILD_ARG(itt_sync_obj) );
772 if (reduce) {
773 for (child_tid=tid+1; child_tid<=tid+thr_bar->leaf_kids; ++child_tid) {
774 KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
775 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
776 team->t.t_id, child_tid));
777 (*reduce)(this_thr->th.th_local.reduce_data, other_threads[child_tid]->th.th_local.reduce_data);
778 }
779 }
780 (void) KMP_TEST_THEN_AND64((volatile kmp_int64 *)&thr_bar->b_arrived, ~(thr_bar->leaf_state)); // clear leaf_state bits
781 }
782 // Next, wait for higher level children on each child's b_arrived flag
783 for (kmp_uint32 d=1; d<thr_bar->my_level; ++d) { // gather lowest level threads first, but skip 0
784 kmp_uint32 last = tid+thr_bar->skip_per_level[d+1], skip = thr_bar->skip_per_level[d];
785 if (last > nproc) last = nproc;
786 for (child_tid=tid+skip; child_tid<(int)last; child_tid+=skip) {
787 register kmp_info_t *child_thr = other_threads[child_tid];
788 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
789 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000790 "arrived(%p) == %llu\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000791 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
792 team->t.t_id, child_tid, &child_bar->b_arrived, new_state));
793 kmp_flag_64 flag(&child_bar->b_arrived, new_state);
794 flag.wait(this_thr, FALSE
795 USE_ITT_BUILD_ARG(itt_sync_obj) );
796 if (reduce) {
797 KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
798 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
799 team->t.t_id, child_tid));
800 (*reduce)(this_thr->th.th_local.reduce_data, child_thr->th.th_local.reduce_data);
801 }
802 }
803 }
804 }
805 else { // Blocktime is not infinite
806 for (kmp_uint32 d=0; d<thr_bar->my_level; ++d) { // Gather lowest level threads first
807 kmp_uint32 last = tid+thr_bar->skip_per_level[d+1], skip = thr_bar->skip_per_level[d];
808 if (last > nproc) last = nproc;
809 for (child_tid=tid+skip; child_tid<(int)last; child_tid+=skip) {
810 register kmp_info_t *child_thr = other_threads[child_tid];
811 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
812 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) wait T#%d(%d:%d) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000813 "arrived(%p) == %llu\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000814 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
815 team->t.t_id, child_tid, &child_bar->b_arrived, new_state));
816 kmp_flag_64 flag(&child_bar->b_arrived, new_state);
817 flag.wait(this_thr, FALSE
818 USE_ITT_BUILD_ARG(itt_sync_obj) );
819 if (reduce) {
820 KA_TRACE(100, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) += T#%d(%d:%d)\n",
821 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
822 team->t.t_id, child_tid));
823 (*reduce)(this_thr->th.th_local.reduce_data, child_thr->th.th_local.reduce_data);
824 }
825 }
826 }
827 }
828 }
829 // All subordinates are gathered; now release parent if not master thread
830
831 if (!KMP_MASTER_TID(tid)) { // worker threads release parent in hierarchy
832 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) releasing T#%d(%d:%d) "
Jonathan Peytond26e2132015-09-10 18:44:30 +0000833 "arrived(%p): %llu => %llu\n", gtid, team->t.t_id, tid,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000834 __kmp_gtid_from_tid(thr_bar->parent_tid, team), team->t.t_id, thr_bar->parent_tid,
835 &thr_bar->b_arrived, thr_bar->b_arrived, thr_bar->b_arrived+KMP_BARRIER_STATE_BUMP));
836 /* Mark arrival to parent: After performing this write, a worker thread may not assume that
837 the team is valid any more - it could be deallocated by the master thread at any time. */
838 if (thr_bar->my_level || __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME
839 || !thr_bar->use_oncore_barrier) { // Parent is waiting on my b_arrived flag; release it
840 kmp_flag_64 flag(&thr_bar->b_arrived, other_threads[thr_bar->parent_tid]);
841 flag.release();
842 }
843 else { // Leaf does special release on the "offset" bits of parent's b_arrived flag
Jonathan Peytond26e2132015-09-10 18:44:30 +0000844 thr_bar->b_arrived = team->t.t_bar[bt].b_arrived + KMP_BARRIER_STATE_BUMP;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000845 kmp_flag_oncore flag(&thr_bar->parent_bar->b_arrived, thr_bar->offset);
846 flag.set_waiter(other_threads[thr_bar->parent_tid]);
847 flag.release();
848 }
849 } else { // Master thread needs to update the team's b_arrived value
Jonathan Peytond26e2132015-09-10 18:44:30 +0000850 team->t.t_bar[bt].b_arrived = new_state;
851 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) set team %d arrived(%p) = %llu\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000852 gtid, team->t.t_id, tid, team->t.t_id, &team->t.t_bar[bt].b_arrived, team->t.t_bar[bt].b_arrived));
853 }
854 // Is the team access below unsafe or just technically invalid?
855 KA_TRACE(20, ("__kmp_hierarchical_barrier_gather: T#%d(%d:%d) exit for barrier type %d\n",
856 gtid, team->t.t_id, tid, bt));
857}
858
859static void
860__kmp_hierarchical_barrier_release(enum barrier_type bt, kmp_info_t *this_thr, int gtid, int tid,
861 int propagate_icvs
862 USE_ITT_BUILD_ARG(void * itt_sync_obj) )
863{
Jonathan Peyton45be4502015-08-11 21:36:41 +0000864 KMP_TIME_DEVELOPER_BLOCK(KMP_hier_release);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000865 register kmp_team_t *team;
866 register kmp_bstate_t *thr_bar = &this_thr->th.th_bar[bt].bb;
867 register kmp_uint32 nproc;
868 bool team_change = false; // indicates on-core barrier shouldn't be used
869
870 if (KMP_MASTER_TID(tid)) {
871 team = __kmp_threads[gtid]->th.th_team;
872 KMP_DEBUG_ASSERT(team != NULL);
873 KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) master entered barrier type %d\n",
874 gtid, team->t.t_id, tid, bt));
875 }
876 else { // Worker threads
877 // Wait for parent thread to release me
878 if (!thr_bar->use_oncore_barrier || __kmp_dflt_blocktime != KMP_MAX_BLOCKTIME
879 || thr_bar->my_level != 0 || thr_bar->team == NULL) {
880 // Use traditional method of waiting on my own b_go flag
881 thr_bar->wait_flag = KMP_BARRIER_OWN_FLAG;
882 kmp_flag_64 flag(&thr_bar->b_go, KMP_BARRIER_STATE_BUMP);
883 flag.wait(this_thr, TRUE
884 USE_ITT_BUILD_ARG(itt_sync_obj) );
885 TCW_8(thr_bar->b_go, KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
886 }
887 else { // Thread barrier data is initialized, this is a leaf, blocktime is infinite, not nested
888 // Wait on my "offset" bits on parent's b_go flag
889 thr_bar->wait_flag = KMP_BARRIER_PARENT_FLAG;
890 kmp_flag_oncore flag(&thr_bar->parent_bar->b_go, KMP_BARRIER_STATE_BUMP, thr_bar->offset,
891 bt, this_thr
892 USE_ITT_BUILD_ARG(itt_sync_obj) );
Jonathan Peytona0e159f2015-10-08 18:23:38 +0000893 flag.wait(this_thr, TRUE);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000894 if (thr_bar->wait_flag == KMP_BARRIER_SWITCHING) { // Thread was switched to own b_go
895 TCW_8(thr_bar->b_go, KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
896 }
897 else { // Reset my bits on parent's b_go flag
898 ((char*)&(thr_bar->parent_bar->b_go))[thr_bar->offset] = 0;
899 }
900 }
901 thr_bar->wait_flag = KMP_BARRIER_NOT_WAITING;
902 // Early exit for reaping threads releasing forkjoin barrier
903 if (bt == bs_forkjoin_barrier && TCR_4(__kmp_global.g.g_done))
904 return;
905 // The worker thread may now assume that the team is valid.
906 team = __kmp_threads[gtid]->th.th_team;
907 KMP_DEBUG_ASSERT(team != NULL);
908 tid = __kmp_tid_from_gtid(gtid);
909
910 KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) set go(%p) = %u\n",
911 gtid, team->t.t_id, tid, &thr_bar->b_go, KMP_INIT_BARRIER_STATE));
912 KMP_MB(); // Flush all pending memory write invalidates.
913 }
914
Jonathan Peytona0e159f2015-10-08 18:23:38 +0000915 nproc = this_thr->th.th_team_nproc;
Andrey Churbanov42a79212015-01-27 16:50:31 +0000916 int level = team->t.t_level;
Jonathan Peyton441f3372015-09-21 17:24:46 +0000917#if OMP_40_ENABLED
Andrey Churbanov42a79212015-01-27 16:50:31 +0000918 if (team->t.t_threads[0]->th.th_teams_microtask ) { // are we inside the teams construct?
919 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master && this_thr->th.th_teams_level == level)
920 ++level; // level was not increased in teams construct for team_of_workers
921 if( this_thr->th.th_teams_size.nteams > 1 )
922 ++level; // level was not increased in teams construct for team_of_masters
923 }
Jonathan Peyton441f3372015-09-21 17:24:46 +0000924#endif
Andrey Churbanov42a79212015-01-27 16:50:31 +0000925 if (level == 1) thr_bar->use_oncore_barrier = 1;
926 else thr_bar->use_oncore_barrier = 0; // Do not use oncore barrier when nested
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000927
928 // If the team size has increased, we still communicate with old leaves via oncore barrier.
929 unsigned short int old_leaf_kids = thr_bar->leaf_kids;
930 kmp_uint64 old_leaf_state = thr_bar->leaf_state;
931 team_change = __kmp_init_hierarchical_barrier_thread(bt, thr_bar, nproc, gtid, tid, team);
932 // But if the entire team changes, we won't use oncore barrier at all
933 if (team_change) old_leaf_kids = 0;
934
935#if KMP_BARRIER_ICV_PUSH
936 if (propagate_icvs) {
Jonathan Peyton2211cfe2015-08-12 20:59:48 +0000937 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid, FALSE);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000938 if (KMP_MASTER_TID(tid)) { // master already has copy in final destination; copy
939 copy_icvs(&thr_bar->th_fixed_icvs, &team->t.t_implicit_task_taskdata[tid].td_icvs);
940 }
941 else if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && thr_bar->use_oncore_barrier) { // optimization for inf blocktime
942 if (!thr_bar->my_level) // I'm a leaf in the hierarchy (my_level==0)
943 // leaves (on-core children) pull parent's fixed ICVs directly to local ICV store
944 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
945 &thr_bar->parent_bar->th_fixed_icvs);
946 // non-leaves will get ICVs piggybacked with b_go via NGO store
947 }
948 else { // blocktime is not infinite; pull ICVs from parent's fixed ICVs
949 if (thr_bar->my_level) // not a leaf; copy ICVs to my fixed ICVs child can access
950 copy_icvs(&thr_bar->th_fixed_icvs, &thr_bar->parent_bar->th_fixed_icvs);
951 else // leaves copy parent's fixed ICVs directly to local ICV store
952 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
953 &thr_bar->parent_bar->th_fixed_icvs);
954 }
955 }
956#endif // KMP_BARRIER_ICV_PUSH
957
958 // Now, release my children
959 if (thr_bar->my_level) { // not a leaf
960 register kmp_int32 child_tid;
961 kmp_uint32 last;
962 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && thr_bar->use_oncore_barrier) {
963 if (KMP_MASTER_TID(tid)) { // do a flat release
964 // Set local b_go to bump children via NGO store of the cache line containing IVCs and b_go.
965 thr_bar->b_go = KMP_BARRIER_STATE_BUMP;
966 // Use ngo stores if available; b_go piggybacks in the last 8 bytes of the cache line
967 ngo_load(&thr_bar->th_fixed_icvs);
968 // This loops over all the threads skipping only the leaf nodes in the hierarchy
969 for (child_tid=thr_bar->skip_per_level[1]; child_tid<(int)nproc; child_tid+=thr_bar->skip_per_level[1]) {
970 register kmp_bstate_t *child_bar = &team->t.t_threads[child_tid]->th.th_bar[bt].bb;
971 KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d)"
972 " go(%p): %u => %u\n",
973 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
974 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
975 child_bar->b_go + KMP_BARRIER_STATE_BUMP));
976 // Use ngo store (if available) to both store ICVs and release child via child's b_go
977 ngo_store_go(&child_bar->th_fixed_icvs, &thr_bar->th_fixed_icvs);
978 }
979 ngo_sync();
980 }
981 TCW_8(thr_bar->b_go, KMP_INIT_BARRIER_STATE); // Reset my b_go flag for next time
982 // Now, release leaf children
983 if (thr_bar->leaf_kids) { // if there are any
984 // We test team_change on the off-chance that the level 1 team changed.
985 if (team_change || old_leaf_kids < thr_bar->leaf_kids) { // some old leaf_kids, some new
986 if (old_leaf_kids) { // release old leaf kids
987 thr_bar->b_go |= old_leaf_state;
988 }
989 // Release new leaf kids
990 last = tid+thr_bar->skip_per_level[1];
991 if (last > nproc) last = nproc;
992 for (child_tid=tid+1+old_leaf_kids; child_tid<(int)last; ++child_tid) { // skip_per_level[0]=1
993 register kmp_info_t *child_thr = team->t.t_threads[child_tid];
994 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
995 KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing"
996 " T#%d(%d:%d) go(%p): %u => %u\n",
997 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
998 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
999 child_bar->b_go + KMP_BARRIER_STATE_BUMP));
1000 // Release child using child's b_go flag
1001 kmp_flag_64 flag(&child_bar->b_go, child_thr);
1002 flag.release();
1003 }
1004 }
1005 else { // Release all children at once with leaf_state bits on my own b_go flag
1006 thr_bar->b_go |= thr_bar->leaf_state;
1007 }
1008 }
1009 }
1010 else { // Blocktime is not infinite; do a simple hierarchical release
1011 for (int d=thr_bar->my_level-1; d>=0; --d) { // Release highest level threads first
1012 last = tid+thr_bar->skip_per_level[d+1];
1013 kmp_uint32 skip = thr_bar->skip_per_level[d];
1014 if (last > nproc) last = nproc;
1015 for (child_tid=tid+skip; child_tid<(int)last; child_tid+=skip) {
1016 register kmp_info_t *child_thr = team->t.t_threads[child_tid];
1017 register kmp_bstate_t *child_bar = &child_thr->th.th_bar[bt].bb;
1018 KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) releasing T#%d(%d:%d)"
1019 " go(%p): %u => %u\n",
1020 gtid, team->t.t_id, tid, __kmp_gtid_from_tid(child_tid, team),
1021 team->t.t_id, child_tid, &child_bar->b_go, child_bar->b_go,
1022 child_bar->b_go + KMP_BARRIER_STATE_BUMP));
1023 // Release child using child's b_go flag
1024 kmp_flag_64 flag(&child_bar->b_go, child_thr);
1025 flag.release();
1026 }
1027 }
1028 }
1029#if KMP_BARRIER_ICV_PUSH
1030 if (propagate_icvs && !KMP_MASTER_TID(tid)) // non-leaves copy ICVs from fixed ICVs to local dest
1031 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs, &thr_bar->th_fixed_icvs);
1032#endif // KMP_BARRIER_ICV_PUSH
1033 }
1034 KA_TRACE(20, ("__kmp_hierarchical_barrier_release: T#%d(%d:%d) exit for barrier type %d\n",
1035 gtid, team->t.t_id, tid, bt));
1036}
1037
1038// ---------------------------- End of Barrier Algorithms ----------------------------
1039
1040// Internal function to do a barrier.
1041/* If is_split is true, do a split barrier, otherwise, do a plain barrier
1042 If reduce is non-NULL, do a split reduction barrier, otherwise, do a split barrier
1043 Returns 0 if master thread, 1 if worker thread. */
1044int
1045__kmp_barrier(enum barrier_type bt, int gtid, int is_split, size_t reduce_size,
1046 void *reduce_data, void (*reduce)(void *, void *))
1047{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001048 KMP_TIME_DEVELOPER_BLOCK(KMP_barrier);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001049 register int tid = __kmp_tid_from_gtid(gtid);
1050 register kmp_info_t *this_thr = __kmp_threads[gtid];
1051 register kmp_team_t *team = this_thr->th.th_team;
1052 register int status = 0;
1053 ident_t *loc = __kmp_threads[gtid]->th.th_ident;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001054#if OMPT_SUPPORT
1055 ompt_task_id_t my_task_id;
1056 ompt_parallel_id_t my_parallel_id;
1057#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001058
1059 KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) has arrived\n",
1060 gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid)));
1061
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001062#if OMPT_SUPPORT
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001063 if (ompt_enabled) {
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001064#if OMPT_BLAME
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001065 my_task_id = team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id;
1066 my_parallel_id = team->t.ompt_team_info.parallel_id;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001067
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001068#if OMPT_TRACE
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001069 if (this_thr->th.ompt_thread_info.state == ompt_state_wait_single) {
1070 if (ompt_callbacks.ompt_callback(ompt_event_single_others_end)) {
1071 ompt_callbacks.ompt_callback(ompt_event_single_others_end)(
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001072 my_parallel_id, my_task_id);
1073 }
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001074 }
1075#endif
1076 if (ompt_callbacks.ompt_callback(ompt_event_barrier_begin)) {
1077 ompt_callbacks.ompt_callback(ompt_event_barrier_begin)(
1078 my_parallel_id, my_task_id);
1079 }
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001080#endif
1081 // It is OK to report the barrier state after the barrier begin callback.
1082 // According to the OMPT specification, a compliant implementation may
1083 // even delay reporting this state until the barrier begins to wait.
1084 this_thr->th.ompt_thread_info.state = ompt_state_wait_barrier;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001085 }
1086#endif
1087
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001088 if (! team->t.t_serialized) {
1089#if USE_ITT_BUILD
1090 // This value will be used in itt notify events below.
1091 void *itt_sync_obj = NULL;
1092# if USE_ITT_NOTIFY
1093 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1094 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
1095# endif
1096#endif /* USE_ITT_BUILD */
1097 if (__kmp_tasking_mode == tskm_extra_barrier) {
1098 __kmp_tasking_barrier(team, this_thr, gtid);
1099 KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) past tasking barrier\n",
1100 gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid)));
1101 }
1102
1103 /* Copy the blocktime info to the thread, where __kmp_wait_template() can access it when
1104 the team struct is not guaranteed to exist. */
1105 // See note about the corresponding code in __kmp_join_barrier() being performance-critical.
1106 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
1107 this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1108 this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1109 }
1110
1111#if USE_ITT_BUILD
1112 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1113 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
1114#endif /* USE_ITT_BUILD */
Jonathan Peyton8fbb49a2015-07-09 18:16:58 +00001115#if USE_DEBUGGER
1116 // Let the debugger know: the thread arrived to the barrier and waiting.
1117 if (KMP_MASTER_TID(tid)) { // Master counter is stored in team structure.
1118 team->t.t_bar[bt].b_master_arrived += 1;
1119 } else {
1120 this_thr->th.th_bar[bt].bb.b_worker_arrived += 1;
1121 } // if
1122#endif /* USE_DEBUGGER */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001123 if (reduce != NULL) {
1124 //KMP_DEBUG_ASSERT( is_split == TRUE ); // #C69956
1125 this_thr->th.th_local.reduce_data = reduce_data;
1126 }
Jonathan Peytonb0b83c82015-11-09 16:28:32 +00001127
1128 if (KMP_MASTER_TID(tid) && __kmp_tasking_mode != tskm_immediate_exec)
1129 __kmp_task_team_setup(this_thr, team, 0); // use 0 to only setup the current team if nthreads > 1
1130
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001131 switch (__kmp_barrier_gather_pattern[bt]) {
1132 case bp_hyper_bar: {
1133 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]); // don't set branch bits to 0; use linear
1134 __kmp_hyper_barrier_gather(bt, this_thr, gtid, tid, reduce
1135 USE_ITT_BUILD_ARG(itt_sync_obj) );
1136 break;
1137 }
1138 case bp_hierarchical_bar: {
1139 __kmp_hierarchical_barrier_gather(bt, this_thr, gtid, tid, reduce
1140 USE_ITT_BUILD_ARG(itt_sync_obj));
1141 break;
1142 }
1143 case bp_tree_bar: {
1144 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bt]); // don't set branch bits to 0; use linear
1145 __kmp_tree_barrier_gather(bt, this_thr, gtid, tid, reduce
1146 USE_ITT_BUILD_ARG(itt_sync_obj) );
1147 break;
1148 }
1149 default: {
1150 __kmp_linear_barrier_gather(bt, this_thr, gtid, tid, reduce
1151 USE_ITT_BUILD_ARG(itt_sync_obj) );
1152 }
1153 }
1154
1155 KMP_MB();
1156
1157 if (KMP_MASTER_TID(tid)) {
1158 status = 0;
1159 if (__kmp_tasking_mode != tskm_immediate_exec) {
1160 __kmp_task_team_wait(this_thr, team
1161 USE_ITT_BUILD_ARG(itt_sync_obj) );
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001162 }
Jonathan Peyton8fbb49a2015-07-09 18:16:58 +00001163#if USE_DEBUGGER
1164 // Let the debugger know: All threads are arrived and starting leaving the barrier.
1165 team->t.t_bar[bt].b_team_arrived += 1;
1166#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001167
1168#if USE_ITT_BUILD
1169 /* TODO: In case of split reduction barrier, master thread may send acquired event early,
1170 before the final summation into the shared variable is done (final summation can be a
1171 long operation for array reductions). */
1172 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1173 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1174#endif /* USE_ITT_BUILD */
1175#if USE_ITT_BUILD && USE_ITT_NOTIFY
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001176 // Barrier - report frame end (only if active_level == 1)
1177 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) && __kmp_forkjoin_frames_mode &&
1178#if OMP_40_ENABLED
1179 this_thr->th.th_teams_microtask == NULL &&
1180#endif
1181 team->t.t_active_level == 1)
1182 {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001183 kmp_uint64 cur_time = __itt_get_timestamp();
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001184 kmp_info_t **other_threads = team->t.t_threads;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001185 int nproc = this_thr->th.th_team_nproc;
1186 int i;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001187 switch(__kmp_forkjoin_frames_mode) {
1188 case 1:
1189 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0, loc, nproc);
1190 this_thr->th.th_frame_time = cur_time;
1191 break;
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001192 case 2: // AC 2015-01-19: currently does not work for hierarchical (to be fixed)
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001193 __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time, 1, loc, nproc);
1194 break;
1195 case 3:
1196 if( __itt_metadata_add_ptr ) {
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001197 // Initialize with master's wait time
1198 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001199 for (i=1; i<nproc; ++i) {
1200 delta += ( cur_time - other_threads[i]->th.th_bar_arrive_time );
1201 }
1202 __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time, cur_time, delta, (kmp_uint64)( reduce != NULL));
1203 }
1204 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0, loc, nproc);
1205 this_thr->th.th_frame_time = cur_time;
1206 break;
1207 }
1208 }
1209#endif /* USE_ITT_BUILD */
1210 } else {
1211 status = 1;
1212#if USE_ITT_BUILD
1213 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1214 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1215#endif /* USE_ITT_BUILD */
1216 }
1217 if (status == 1 || ! is_split) {
1218 switch (__kmp_barrier_release_pattern[bt]) {
1219 case bp_hyper_bar: {
1220 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
1221 __kmp_hyper_barrier_release(bt, this_thr, gtid, tid, FALSE
1222 USE_ITT_BUILD_ARG(itt_sync_obj) );
1223 break;
1224 }
1225 case bp_hierarchical_bar: {
1226 __kmp_hierarchical_barrier_release(bt, this_thr, gtid, tid, FALSE
1227 USE_ITT_BUILD_ARG(itt_sync_obj) );
1228 break;
1229 }
1230 case bp_tree_bar: {
1231 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
1232 __kmp_tree_barrier_release(bt, this_thr, gtid, tid, FALSE
1233 USE_ITT_BUILD_ARG(itt_sync_obj) );
1234 break;
1235 }
1236 default: {
1237 __kmp_linear_barrier_release(bt, this_thr, gtid, tid, FALSE
1238 USE_ITT_BUILD_ARG(itt_sync_obj) );
1239 }
1240 }
1241 if (__kmp_tasking_mode != tskm_immediate_exec) {
1242 __kmp_task_team_sync(this_thr, team);
1243 }
1244 }
1245
1246#if USE_ITT_BUILD
1247 /* GEH: TODO: Move this under if-condition above and also include in
1248 __kmp_end_split_barrier(). This will more accurately represent the actual release time
1249 of the threads for split barriers. */
1250 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1251 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
1252#endif /* USE_ITT_BUILD */
1253 } else { // Team is serialized.
1254 status = 0;
1255 if (__kmp_tasking_mode != tskm_immediate_exec) {
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001256#if OMP_41_ENABLED
1257 if ( this_thr->th.th_task_team != NULL ) {
1258 void *itt_sync_obj = NULL;
1259#if USE_ITT_NOTIFY
1260 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
1261 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt, 1);
1262 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
1263 }
1264#endif
1265
Jonathan Peytonfe9a1d72015-08-26 19:58:48 +00001266 KMP_DEBUG_ASSERT(this_thr->th.th_task_team->tt.tt_found_proxy_tasks == TRUE);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001267 __kmp_task_team_wait(this_thr, team
1268 USE_ITT_BUILD_ARG(itt_sync_obj));
Jonathan Peyton54127982015-11-04 21:37:48 +00001269 __kmp_task_team_setup(this_thr, team, 0);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001270
1271#if USE_ITT_BUILD
1272 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1273 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
1274#endif /* USE_ITT_BUILD */
1275 }
1276#else
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001277 // The task team should be NULL for serialized code (tasks will be executed immediately)
Andrey Churbanov6d224db2015-02-10 18:37:43 +00001278 KMP_DEBUG_ASSERT(team->t.t_task_team[this_thr->th.th_task_state] == NULL);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001279 KMP_DEBUG_ASSERT(this_thr->th.th_task_team == NULL);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001280#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001281 }
1282 }
1283 KA_TRACE(15, ("__kmp_barrier: T#%d(%d:%d) is leaving with return value %d\n",
1284 gtid, __kmp_team_from_gtid(gtid)->t.t_id, __kmp_tid_from_gtid(gtid), status));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001285
1286#if OMPT_SUPPORT
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001287 if (ompt_enabled) {
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001288#if OMPT_BLAME
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001289 if (ompt_callbacks.ompt_callback(ompt_event_barrier_end)) {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001290 ompt_callbacks.ompt_callback(ompt_event_barrier_end)(
1291 my_parallel_id, my_task_id);
1292 }
1293#endif
1294 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
1295 }
1296#endif
1297
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001298 return status;
1299}
1300
1301
1302void
1303__kmp_end_split_barrier(enum barrier_type bt, int gtid)
1304{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001305 KMP_TIME_DEVELOPER_BLOCK(KMP_end_split_barrier);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001306 int tid = __kmp_tid_from_gtid(gtid);
1307 kmp_info_t *this_thr = __kmp_threads[gtid];
1308 kmp_team_t *team = this_thr->th.th_team;
1309
1310 if (!team->t.t_serialized) {
1311 if (KMP_MASTER_GTID(gtid)) {
1312 switch (__kmp_barrier_release_pattern[bt]) {
1313 case bp_hyper_bar: {
1314 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
1315 __kmp_hyper_barrier_release(bt, this_thr, gtid, tid, FALSE
1316 USE_ITT_BUILD_ARG(NULL) );
1317 break;
1318 }
1319 case bp_hierarchical_bar: {
1320 __kmp_hierarchical_barrier_release(bt, this_thr, gtid, tid, FALSE
1321 USE_ITT_BUILD_ARG(NULL));
1322 break;
1323 }
1324 case bp_tree_bar: {
1325 KMP_ASSERT(__kmp_barrier_release_branch_bits[bt]);
1326 __kmp_tree_barrier_release(bt, this_thr, gtid, tid, FALSE
1327 USE_ITT_BUILD_ARG(NULL) );
1328 break;
1329 }
1330 default: {
1331 __kmp_linear_barrier_release(bt, this_thr, gtid, tid, FALSE
1332 USE_ITT_BUILD_ARG(NULL) );
1333 }
1334 }
1335 if (__kmp_tasking_mode != tskm_immediate_exec) {
1336 __kmp_task_team_sync(this_thr, team);
1337 } // if
1338 }
1339 }
1340}
1341
1342
1343void
1344__kmp_join_barrier(int gtid)
1345{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001346 KMP_TIME_DEVELOPER_BLOCK(KMP_join_barrier);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001347 register kmp_info_t *this_thr = __kmp_threads[gtid];
1348 register kmp_team_t *team;
1349 register kmp_uint nproc;
1350 kmp_info_t *master_thread;
1351 int tid;
1352#ifdef KMP_DEBUG
1353 int team_id;
1354#endif /* KMP_DEBUG */
1355#if USE_ITT_BUILD
1356 void *itt_sync_obj = NULL;
1357# if USE_ITT_NOTIFY
1358 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) // Don't call routine without need
1359 // Get object created at fork_barrier
1360 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
1361# endif
1362#endif /* USE_ITT_BUILD */
1363 KMP_MB();
1364
1365 // Get current info
1366 team = this_thr->th.th_team;
1367 nproc = this_thr->th.th_team_nproc;
1368 KMP_DEBUG_ASSERT((int)nproc == team->t.t_nproc);
1369 tid = __kmp_tid_from_gtid(gtid);
1370#ifdef KMP_DEBUG
1371 team_id = team->t.t_id;
1372#endif /* KMP_DEBUG */
1373 master_thread = this_thr->th.th_team_master;
1374#ifdef KMP_DEBUG
1375 if (master_thread != team->t.t_threads[0]) {
1376 __kmp_print_structure();
1377 }
1378#endif /* KMP_DEBUG */
1379 KMP_DEBUG_ASSERT(master_thread == team->t.t_threads[0]);
1380 KMP_MB();
1381
1382 // Verify state
1383 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
1384 KMP_DEBUG_ASSERT(TCR_PTR(this_thr->th.th_team));
1385 KMP_DEBUG_ASSERT(TCR_PTR(this_thr->th.th_root));
1386 KMP_DEBUG_ASSERT(this_thr == team->t.t_threads[tid]);
1387 KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) arrived at join barrier\n", gtid, team_id, tid));
1388
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001389#if OMPT_SUPPORT
1390#if OMPT_TRACE
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001391 if (ompt_enabled &&
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001392 ompt_callbacks.ompt_callback(ompt_event_barrier_begin)) {
1393 ompt_callbacks.ompt_callback(ompt_event_barrier_begin)(
1394 team->t.ompt_team_info.parallel_id,
1395 team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
1396 }
Jonathan Peyton117a94f2015-06-29 17:28:57 +00001397#endif
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001398 this_thr->th.ompt_thread_info.state = ompt_state_wait_barrier;
1399#endif
1400
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001401 if (__kmp_tasking_mode == tskm_extra_barrier) {
1402 __kmp_tasking_barrier(team, this_thr, gtid);
1403 KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) past taking barrier\n", gtid, team_id, tid));
1404 }
1405# ifdef KMP_DEBUG
1406 if (__kmp_tasking_mode != tskm_immediate_exec) {
1407 KA_TRACE(20, ( "__kmp_join_barrier: T#%d, old team = %d, old task_team = %p, th_task_team = %p\n",
Andrey Churbanov6d224db2015-02-10 18:37:43 +00001408 __kmp_gtid_from_thread(this_thr), team_id, team->t.t_task_team[this_thr->th.th_task_state],
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001409 this_thr->th.th_task_team));
Andrey Churbanov6d224db2015-02-10 18:37:43 +00001410 KMP_DEBUG_ASSERT(this_thr->th.th_task_team == team->t.t_task_team[this_thr->th.th_task_state]);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001411 }
1412# endif /* KMP_DEBUG */
1413
1414 /* Copy the blocktime info to the thread, where __kmp_wait_template() can access it when the
1415 team struct is not guaranteed to exist. Doing these loads causes a cache miss slows
1416 down EPCC parallel by 2x. As a workaround, we do not perform the copy if blocktime=infinite,
1417 since the values are not used by __kmp_wait_template() in that case. */
1418 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
1419 this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1420 this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1421 }
1422
1423#if USE_ITT_BUILD
1424 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1425 __kmp_itt_barrier_starting(gtid, itt_sync_obj);
1426#endif /* USE_ITT_BUILD */
1427
1428 switch (__kmp_barrier_gather_pattern[bs_forkjoin_barrier]) {
1429 case bp_hyper_bar: {
1430 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bs_forkjoin_barrier]);
1431 __kmp_hyper_barrier_gather(bs_forkjoin_barrier, this_thr, gtid, tid, NULL
1432 USE_ITT_BUILD_ARG(itt_sync_obj) );
1433 break;
1434 }
1435 case bp_hierarchical_bar: {
1436 __kmp_hierarchical_barrier_gather(bs_forkjoin_barrier, this_thr, gtid, tid, NULL
1437 USE_ITT_BUILD_ARG(itt_sync_obj) );
1438 break;
1439 }
1440 case bp_tree_bar: {
1441 KMP_ASSERT(__kmp_barrier_gather_branch_bits[bs_forkjoin_barrier]);
1442 __kmp_tree_barrier_gather(bs_forkjoin_barrier, this_thr, gtid, tid, NULL
1443 USE_ITT_BUILD_ARG(itt_sync_obj) );
1444 break;
1445 }
1446 default: {
1447 __kmp_linear_barrier_gather(bs_forkjoin_barrier, this_thr, gtid, tid, NULL
1448 USE_ITT_BUILD_ARG(itt_sync_obj) );
1449 }
1450 }
1451
1452 /* From this point on, the team data structure may be deallocated at any time by the
1453 master thread - it is unsafe to reference it in any of the worker threads. Any per-team
1454 data items that need to be referenced before the end of the barrier should be moved to
1455 the kmp_task_team_t structs. */
1456 if (KMP_MASTER_TID(tid)) {
1457 if (__kmp_tasking_mode != tskm_immediate_exec) {
1458 // Master shouldn't call decrease_load(). // TODO: enable master threads.
1459 // Master should have th_may_decrease_load == 0. // TODO: enable master threads.
1460 __kmp_task_team_wait(this_thr, team
1461 USE_ITT_BUILD_ARG(itt_sync_obj) );
1462 }
1463#if USE_ITT_BUILD
1464 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1465 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1466#endif /* USE_ITT_BUILD */
1467
1468# if USE_ITT_BUILD && USE_ITT_NOTIFY
1469 // Join barrier - report frame end
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001470 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) && __kmp_forkjoin_frames_mode &&
1471#if OMP_40_ENABLED
1472 this_thr->th.th_teams_microtask == NULL &&
1473#endif
1474 team->t.t_active_level == 1)
1475 {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001476 kmp_uint64 cur_time = __itt_get_timestamp();
1477 ident_t * loc = team->t.t_ident;
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001478 kmp_info_t **other_threads = team->t.t_threads;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001479 int nproc = this_thr->th.th_team_nproc;
1480 int i;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001481 switch(__kmp_forkjoin_frames_mode) {
1482 case 1:
1483 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0, loc, nproc);
1484 break;
1485 case 2:
1486 __kmp_itt_frame_submit(gtid, this_thr->th.th_bar_min_time, cur_time, 1, loc, nproc);
1487 break;
1488 case 3:
1489 if( __itt_metadata_add_ptr ) {
Andrey Churbanov51aecb82015-05-06 19:22:36 +00001490 // Initialize with master's wait time
1491 kmp_uint64 delta = cur_time - this_thr->th.th_bar_arrive_time;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001492 for (i=1; i<nproc; ++i) {
1493 delta += ( cur_time - other_threads[i]->th.th_bar_arrive_time );
1494 }
1495 __kmp_itt_metadata_imbalance(gtid, this_thr->th.th_frame_time, cur_time, delta, 0);
1496 }
1497 __kmp_itt_frame_submit(gtid, this_thr->th.th_frame_time, cur_time, 0, loc, nproc);
1498 this_thr->th.th_frame_time = cur_time;
1499 break;
1500 }
1501 }
1502# endif /* USE_ITT_BUILD */
1503 }
1504#if USE_ITT_BUILD
1505 else {
1506 if (__itt_sync_create_ptr || KMP_ITT_DEBUG)
1507 __kmp_itt_barrier_middle(gtid, itt_sync_obj);
1508 }
1509#endif /* USE_ITT_BUILD */
1510
1511#if KMP_DEBUG
1512 if (KMP_MASTER_TID(tid)) {
1513 KA_TRACE(15, ("__kmp_join_barrier: T#%d(%d:%d) says all %d team threads arrived\n",
1514 gtid, team_id, tid, nproc));
1515 }
1516#endif /* KMP_DEBUG */
1517
1518 // TODO now, mark worker threads as done so they may be disbanded
1519 KMP_MB(); // Flush all pending memory write invalidates.
1520 KA_TRACE(10, ("__kmp_join_barrier: T#%d(%d:%d) leaving\n", gtid, team_id, tid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001521
1522#if OMPT_SUPPORT
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001523 if (ompt_enabled) {
Jonathan Peytoncab67cc2015-09-18 16:24:46 +00001524#if OMPT_BLAME
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001525 if (ompt_callbacks.ompt_callback(ompt_event_barrier_end)) {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001526 ompt_callbacks.ompt_callback(ompt_event_barrier_end)(
1527 team->t.ompt_team_info.parallel_id,
1528 team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id);
Jonathan Peytonb68a85d2015-09-21 18:11:22 +00001529 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001530#endif
1531
1532 // return to default state
1533 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
1534 }
1535#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001536}
1537
1538
1539// TODO release worker threads' fork barriers as we are ready instead of all at once
1540void
1541__kmp_fork_barrier(int gtid, int tid)
1542{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001543 KMP_TIME_DEVELOPER_BLOCK(KMP_fork_barrier);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001544 kmp_info_t *this_thr = __kmp_threads[gtid];
1545 kmp_team_t *team = (tid == 0) ? this_thr->th.th_team : NULL;
1546#if USE_ITT_BUILD
1547 void * itt_sync_obj = NULL;
1548#endif /* USE_ITT_BUILD */
1549
1550 KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) has arrived\n",
1551 gtid, (team != NULL) ? team->t.t_id : -1, tid));
1552
1553 // th_team pointer only valid for master thread here
1554 if (KMP_MASTER_TID(tid)) {
1555#if USE_ITT_BUILD && USE_ITT_NOTIFY
1556 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
1557 // Create itt barrier object
1558 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier, 1);
1559 __kmp_itt_barrier_middle(gtid, itt_sync_obj); // Call acquired/releasing
1560 }
1561#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
1562
1563#ifdef KMP_DEBUG
1564 register kmp_info_t **other_threads = team->t.t_threads;
1565 register int i;
1566
1567 // Verify state
1568 KMP_MB();
1569
1570 for(i=1; i<team->t.t_nproc; ++i) {
1571 KA_TRACE(500, ("__kmp_fork_barrier: T#%d(%d:0) checking T#%d(%d:%d) fork go == %u.\n",
1572 gtid, team->t.t_id, other_threads[i]->th.th_info.ds.ds_gtid,
1573 team->t.t_id, other_threads[i]->th.th_info.ds.ds_tid,
1574 other_threads[i]->th.th_bar[bs_forkjoin_barrier].bb.b_go));
1575 KMP_DEBUG_ASSERT((TCR_4(other_threads[i]->th.th_bar[bs_forkjoin_barrier].bb.b_go)
1576 & ~(KMP_BARRIER_SLEEP_STATE))
1577 == KMP_INIT_BARRIER_STATE);
1578 KMP_DEBUG_ASSERT(other_threads[i]->th.th_team == team);
1579 }
1580#endif
1581
1582 if (__kmp_tasking_mode != tskm_immediate_exec) {
Jonathan Peyton54127982015-11-04 21:37:48 +00001583 __kmp_task_team_setup(this_thr, team, 0); // 0 indicates setup current task team if nthreads > 1
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001584 }
1585
1586 /* The master thread may have changed its blocktime between the join barrier and the
1587 fork barrier. Copy the blocktime info to the thread, where __kmp_wait_template() can
1588 access it when the team struct is not guaranteed to exist. */
1589 // See note about the corresponding code in __kmp_join_barrier() being performance-critical
1590 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
1591 this_thr->th.th_team_bt_intervals = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_intervals;
1592 this_thr->th.th_team_bt_set = team->t.t_implicit_task_taskdata[tid].td_icvs.bt_set;
1593 }
1594 } // master
1595
1596 switch (__kmp_barrier_release_pattern[bs_forkjoin_barrier]) {
1597 case bp_hyper_bar: {
1598 KMP_ASSERT(__kmp_barrier_release_branch_bits[bs_forkjoin_barrier]);
1599 __kmp_hyper_barrier_release(bs_forkjoin_barrier, this_thr, gtid, tid, TRUE
1600 USE_ITT_BUILD_ARG(itt_sync_obj) );
1601 break;
1602 }
1603 case bp_hierarchical_bar: {
1604 __kmp_hierarchical_barrier_release(bs_forkjoin_barrier, this_thr, gtid, tid, TRUE
1605 USE_ITT_BUILD_ARG(itt_sync_obj) );
1606 break;
1607 }
1608 case bp_tree_bar: {
1609 KMP_ASSERT(__kmp_barrier_release_branch_bits[bs_forkjoin_barrier]);
1610 __kmp_tree_barrier_release(bs_forkjoin_barrier, this_thr, gtid, tid, TRUE
1611 USE_ITT_BUILD_ARG(itt_sync_obj) );
1612 break;
1613 }
1614 default: {
1615 __kmp_linear_barrier_release(bs_forkjoin_barrier, this_thr, gtid, tid, TRUE
1616 USE_ITT_BUILD_ARG(itt_sync_obj) );
1617 }
1618 }
1619
1620 // Early exit for reaping threads releasing forkjoin barrier
1621 if (TCR_4(__kmp_global.g.g_done)) {
Jonathan Peyton54127982015-11-04 21:37:48 +00001622 this_thr->th.th_task_team = NULL;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001623
1624#if USE_ITT_BUILD && USE_ITT_NOTIFY
1625 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
1626 if (!KMP_MASTER_TID(tid)) {
1627 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
1628 if (itt_sync_obj)
1629 __kmp_itt_barrier_finished(gtid, itt_sync_obj);
1630 }
1631 }
1632#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
1633 KA_TRACE(10, ("__kmp_fork_barrier: T#%d is leaving early\n", gtid));
1634 return;
1635 }
1636
1637 /* We can now assume that a valid team structure has been allocated by the master and
1638 propagated to all worker threads. The current thread, however, may not be part of the
1639 team, so we can't blindly assume that the team pointer is non-null. */
1640 team = (kmp_team_t *)TCR_PTR(this_thr->th.th_team);
1641 KMP_DEBUG_ASSERT(team != NULL);
1642 tid = __kmp_tid_from_gtid(gtid);
1643
1644
1645#if KMP_BARRIER_ICV_PULL
1646 /* Master thread's copy of the ICVs was set up on the implicit taskdata in
1647 __kmp_reinitialize_team. __kmp_fork_call() assumes the master thread's implicit task has
1648 this data before this function is called. We cannot modify __kmp_fork_call() to look at
1649 the fixed ICVs in the master's thread struct, because it is not always the case that the
1650 threads arrays have been allocated when __kmp_fork_call() is executed. */
Jonathan Peyton45be4502015-08-11 21:36:41 +00001651 {
1652 KMP_TIME_DEVELOPER_BLOCK(USER_icv_copy);
1653 if (!KMP_MASTER_TID(tid)) { // master thread already has ICVs
1654 // Copy the initial ICVs from the master's thread struct to the implicit task for this tid.
1655 KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d) is PULLing ICVs\n", gtid, tid));
1656 __kmp_init_implicit_task(team->t.t_ident, team->t.t_threads[tid], team, tid, FALSE);
1657 copy_icvs(&team->t.t_implicit_task_taskdata[tid].td_icvs,
1658 &team->t.t_threads[0]->th.th_bar[bs_forkjoin_barrier].bb.th_fixed_icvs);
1659 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001660 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001661#endif // KMP_BARRIER_ICV_PULL
1662
1663 if (__kmp_tasking_mode != tskm_immediate_exec) {
1664 __kmp_task_team_sync(this_thr, team);
1665 }
1666
1667#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
1668 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
1669 if (proc_bind == proc_bind_intel) {
1670#endif
Andrey Churbanovf28f6132015-01-13 14:54:00 +00001671#if KMP_AFFINITY_SUPPORTED
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001672 // Call dynamic affinity settings
1673 if(__kmp_affinity_type == affinity_balanced && team->t.t_size_changed) {
1674 __kmp_balanced_affinity(tid, team->t.t_nproc);
1675 }
Andrey Churbanovf28f6132015-01-13 14:54:00 +00001676#endif // KMP_AFFINITY_SUPPORTED
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001677#if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
1678 }
Andrey Churbanov94e569e2015-03-10 09:19:47 +00001679 else if (proc_bind != proc_bind_false) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001680 if (this_thr->th.th_new_place == this_thr->th.th_current_place) {
1681 KA_TRACE(100, ("__kmp_fork_barrier: T#%d already in correct place %d\n",
1682 __kmp_gtid_from_thread(this_thr), this_thr->th.th_current_place));
1683 }
1684 else {
1685 __kmp_affinity_set_place(gtid);
1686 }
1687 }
1688#endif
1689
1690#if USE_ITT_BUILD && USE_ITT_NOTIFY
1691 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
1692 if (!KMP_MASTER_TID(tid)) {
1693 // Get correct barrier object
1694 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
1695 __kmp_itt_barrier_finished(gtid, itt_sync_obj); // Workers call acquired
1696 } // (prepare called inside barrier_release)
1697 }
1698#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
1699 KA_TRACE(10, ("__kmp_fork_barrier: T#%d(%d:%d) is leaving\n", gtid, team->t.t_id, tid));
1700}
1701
1702
1703void
1704__kmp_setup_icv_copy(kmp_team_t *team, int new_nproc, kmp_internal_control_t *new_icvs, ident_t *loc )
1705{
Jonathan Peyton45be4502015-08-11 21:36:41 +00001706 KMP_TIME_DEVELOPER_BLOCK(KMP_setup_icv_copy);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001707
1708 KMP_DEBUG_ASSERT(team && new_nproc && new_icvs);
1709 KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
1710
1711 /* Master thread's copy of the ICVs was set up on the implicit taskdata in
1712 __kmp_reinitialize_team. __kmp_fork_call() assumes the master thread's implicit task has
1713 this data before this function is called. */
1714#if KMP_BARRIER_ICV_PULL
1715 /* Copy ICVs to master's thread structure into th_fixed_icvs (which remains untouched), where
1716 all of the worker threads can access them and make their own copies after the barrier. */
1717 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be allocated at this point
1718 copy_icvs(&team->t.t_threads[0]->th.th_bar[bs_forkjoin_barrier].bb.th_fixed_icvs, new_icvs);
1719 KF_TRACE(10, ("__kmp_setup_icv_copy: PULL: T#%d this_thread=%p team=%p\n",
1720 0, team->t.t_threads[0], team));
1721#elif KMP_BARRIER_ICV_PUSH
1722 // The ICVs will be propagated in the fork barrier, so nothing needs to be done here.
1723 KF_TRACE(10, ("__kmp_setup_icv_copy: PUSH: T#%d this_thread=%p team=%p\n",
1724 0, team->t.t_threads[0], team));
1725#else
1726 // Copy the ICVs to each of the non-master threads. This takes O(nthreads) time.
1727 ngo_load(new_icvs);
1728 KMP_DEBUG_ASSERT(team->t.t_threads[0]); // The threads arrays should be allocated at this point
Jonathan Peyton91b78702015-06-08 19:39:07 +00001729 for (int f=1; f<new_nproc; ++f) { // Skip the master thread
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001730 // TODO: GEH - pass in better source location info since usually NULL here
1731 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
1732 f, team->t.t_threads[f], team));
1733 __kmp_init_implicit_task(loc, team->t.t_threads[f], team, f, FALSE);
1734 ngo_store_icvs(&team->t.t_implicit_task_taskdata[f].td_icvs, new_icvs);
1735 KF_TRACE(10, ("__kmp_setup_icv_copy: LINEAR: T#%d this_thread=%p team=%p\n",
1736 f, team->t.t_threads[f], team));
1737 }
1738 ngo_sync();
1739#endif // KMP_BARRIER_ICV_PULL
1740}