blob: ac76794c56f2129bed9bee07e88915f6a12bd4e0 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_csupport.cpp -- kfront linkage support for OpenMP.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
Jim Cownie5e8470a2013-09-27 10:38:44 +00005//===----------------------------------------------------------------------===//
6//
7// The LLVM Compiler Infrastructure
8//
9// This file is dual licensed under the MIT and the University of Illinois Open
10// Source Licenses. See LICENSE.txt for details.
11//
12//===----------------------------------------------------------------------===//
13
Jonathan Peyton92ca6182018-09-07 18:25:49 +000014#define __KMP_IMP
Jonathan Peyton30419822017-05-12 18:01:32 +000015#include "omp.h" /* extern "C" declarations of user-visible routines */
Jim Cownie5e8470a2013-09-27 10:38:44 +000016#include "kmp.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000017#include "kmp_error.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000018#include "kmp_i18n.h"
19#include "kmp_itt.h"
Paul Osmialowskifb043fd2016-05-16 09:44:11 +000020#include "kmp_lock.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000021#include "kmp_stats.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000022
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000023#if OMPT_SUPPORT
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000024#include "ompt-specific.h"
25#endif
26
Jim Cownie5e8470a2013-09-27 10:38:44 +000027#define MAX_MESSAGE 512
28
Jonathan Peyton30419822017-05-12 18:01:32 +000029// flags will be used in future, e.g. to implement openmp_strict library
30// restrictions
Jim Cownie5e8470a2013-09-27 10:38:44 +000031
32/*!
33 * @ingroup STARTUP_SHUTDOWN
34 * @param loc in source location information
35 * @param flags in for future use (currently ignored)
36 *
37 * Initialize the runtime library. This call is optional; if it is not made then
Jim Cownie4cc4bb42014-10-07 16:25:50 +000038 * it will be implicitly called by attempts to use other library functions.
Jim Cownie5e8470a2013-09-27 10:38:44 +000039 */
Jonathan Peyton30419822017-05-12 18:01:32 +000040void __kmpc_begin(ident_t *loc, kmp_int32 flags) {
41 // By default __kmpc_begin() is no-op.
42 char *env;
43 if ((env = getenv("KMP_INITIAL_THREAD_BIND")) != NULL &&
44 __kmp_str_match_true(env)) {
45 __kmp_middle_initialize();
46 KC_TRACE(10, ("__kmpc_begin: middle initialization called\n"));
47 } else if (__kmp_ignore_mppbeg() == FALSE) {
48 // By default __kmp_ignore_mppbeg() returns TRUE.
49 __kmp_internal_begin();
50 KC_TRACE(10, ("__kmpc_begin: called\n"));
51 }
Jim Cownie5e8470a2013-09-27 10:38:44 +000052}
53
54/*!
55 * @ingroup STARTUP_SHUTDOWN
56 * @param loc source location information
57 *
Jonathan Peyton30419822017-05-12 18:01:32 +000058 * Shutdown the runtime library. This is also optional, and even if called will
59 * not do anything unless the `KMP_IGNORE_MPPEND` environment variable is set to
60 * zero.
61 */
62void __kmpc_end(ident_t *loc) {
63 // By default, __kmp_ignore_mppend() returns TRUE which makes __kmpc_end()
64 // call no-op. However, this can be overridden with KMP_IGNORE_MPPEND
65 // environment variable. If KMP_IGNORE_MPPEND is 0, __kmp_ignore_mppend()
66 // returns FALSE and __kmpc_end() will unregister this root (it can cause
67 // library shut down).
68 if (__kmp_ignore_mppend() == FALSE) {
69 KC_TRACE(10, ("__kmpc_end: called\n"));
70 KA_TRACE(30, ("__kmpc_end\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +000071
Jonathan Peyton30419822017-05-12 18:01:32 +000072 __kmp_internal_end_thread(-1);
73 }
Jonathan Peyton8bb8a922018-10-02 19:15:04 +000074#if KMP_OS_WINDOWS && OMPT_SUPPORT
75 // Normal exit process on Windows does not allow worker threads of the final
76 // parallel region to finish reporting their events, so shutting down the
77 // library here fixes the issue at least for the cases where __kmpc_end() is
78 // placed properly.
79 if (ompt_enabled.enabled)
80 __kmp_internal_end_library(__kmp_gtid_get_specific());
81#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000082}
83
84/*!
85@ingroup THREAD_STATES
86@param loc Source location information.
87@return The global thread index of the active thread.
88
89This function can be called in any context.
90
91If the runtime has ony been entered at the outermost level from a
Jonathan Peyton30419822017-05-12 18:01:32 +000092single (necessarily non-OpenMP<sup>*</sup>) thread, then the thread number is
93that which would be returned by omp_get_thread_num() in the outermost
Jim Cownie5e8470a2013-09-27 10:38:44 +000094active parallel construct. (Or zero if there is no active parallel
95construct, since the master thread is necessarily thread zero).
96
97If multiple non-OpenMP threads all enter an OpenMP construct then this
98will be a unique thread identifier among all the threads created by
99the OpenMP runtime (but the value cannote be defined in terms of
100OpenMP thread ids returned by omp_get_thread_num()).
Jim Cownie5e8470a2013-09-27 10:38:44 +0000101*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000102kmp_int32 __kmpc_global_thread_num(ident_t *loc) {
103 kmp_int32 gtid = __kmp_entry_gtid();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000104
Jonathan Peyton30419822017-05-12 18:01:32 +0000105 KC_TRACE(10, ("__kmpc_global_thread_num: T#%d\n", gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000106
Jonathan Peyton30419822017-05-12 18:01:32 +0000107 return gtid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000108}
109
110/*!
111@ingroup THREAD_STATES
112@param loc Source location information.
113@return The number of threads under control of the OpenMP<sup>*</sup> runtime
114
115This function can be called in any context.
Jonathan Peyton30419822017-05-12 18:01:32 +0000116It returns the total number of threads under the control of the OpenMP runtime.
117That is not a number that can be determined by any OpenMP standard calls, since
118the library may be called from more than one non-OpenMP thread, and this
119reflects the total over all such calls. Similarly the runtime maintains
120underlying threads even when they are not active (since the cost of creating
121and destroying OS threads is high), this call counts all such threads even if
122they are not waiting for work.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000123*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000124kmp_int32 __kmpc_global_num_threads(ident_t *loc) {
125 KC_TRACE(10,
126 ("__kmpc_global_num_threads: num_threads = %d\n", __kmp_all_nth));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000127
Jonathan Peyton30419822017-05-12 18:01:32 +0000128 return TCR_4(__kmp_all_nth);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000129}
130
131/*!
132@ingroup THREAD_STATES
133@param loc Source location information.
Jonathan Peyton30419822017-05-12 18:01:32 +0000134@return The thread number of the calling thread in the innermost active parallel
135construct.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000136*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000137kmp_int32 __kmpc_bound_thread_num(ident_t *loc) {
138 KC_TRACE(10, ("__kmpc_bound_thread_num: called\n"));
139 return __kmp_tid_from_gtid(__kmp_entry_gtid());
Jim Cownie5e8470a2013-09-27 10:38:44 +0000140}
141
142/*!
143@ingroup THREAD_STATES
144@param loc Source location information.
145@return The number of threads in the innermost active parallel construct.
146*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000147kmp_int32 __kmpc_bound_num_threads(ident_t *loc) {
148 KC_TRACE(10, ("__kmpc_bound_num_threads: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000149
Jonathan Peyton30419822017-05-12 18:01:32 +0000150 return __kmp_entry_thread()->th.th_team->t.t_nproc;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000151}
152
153/*!
154 * @ingroup DEPRECATED
155 * @param loc location description
156 *
157 * This function need not be called. It always returns TRUE.
158 */
Jonathan Peyton30419822017-05-12 18:01:32 +0000159kmp_int32 __kmpc_ok_to_fork(ident_t *loc) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000160#ifndef KMP_DEBUG
161
Jonathan Peyton30419822017-05-12 18:01:32 +0000162 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000163
164#else
165
Jonathan Peyton30419822017-05-12 18:01:32 +0000166 const char *semi2;
167 const char *semi3;
168 int line_no;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000169
Jonathan Peyton30419822017-05-12 18:01:32 +0000170 if (__kmp_par_range == 0) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000171 return TRUE;
Jonathan Peyton30419822017-05-12 18:01:32 +0000172 }
173 semi2 = loc->psource;
174 if (semi2 == NULL) {
175 return TRUE;
176 }
177 semi2 = strchr(semi2, ';');
178 if (semi2 == NULL) {
179 return TRUE;
180 }
181 semi2 = strchr(semi2 + 1, ';');
182 if (semi2 == NULL) {
183 return TRUE;
184 }
185 if (__kmp_par_range_filename[0]) {
186 const char *name = semi2 - 1;
187 while ((name > loc->psource) && (*name != '/') && (*name != ';')) {
188 name--;
189 }
190 if ((*name == '/') || (*name == ';')) {
191 name++;
192 }
193 if (strncmp(__kmp_par_range_filename, name, semi2 - name)) {
194 return __kmp_par_range < 0;
195 }
196 }
197 semi3 = strchr(semi2 + 1, ';');
198 if (__kmp_par_range_routine[0]) {
199 if ((semi3 != NULL) && (semi3 > semi2) &&
200 (strncmp(__kmp_par_range_routine, semi2 + 1, semi3 - semi2 - 1))) {
201 return __kmp_par_range < 0;
202 }
203 }
204 if (KMP_SSCANF(semi3 + 1, "%d", &line_no) == 1) {
205 if ((line_no >= __kmp_par_range_lb) && (line_no <= __kmp_par_range_ub)) {
206 return __kmp_par_range > 0;
207 }
208 return __kmp_par_range < 0;
209 }
210 return TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000211
212#endif /* KMP_DEBUG */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000213}
214
215/*!
216@ingroup THREAD_STATES
217@param loc Source location information.
Jonathan Peyton30419822017-05-12 18:01:32 +0000218@return 1 if this thread is executing inside an active parallel region, zero if
219not.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000220*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000221kmp_int32 __kmpc_in_parallel(ident_t *loc) {
222 return __kmp_entry_thread()->th.th_root->r.r_active;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000223}
224
225/*!
226@ingroup PARALLEL
227@param loc source location information
228@param global_tid global thread number
229@param num_threads number of threads requested for this parallel construct
230
231Set the number of threads to be used by the next fork spawned by this thread.
232This call is only required if the parallel construct has a `num_threads` clause.
233*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000234void __kmpc_push_num_threads(ident_t *loc, kmp_int32 global_tid,
235 kmp_int32 num_threads) {
236 KA_TRACE(20, ("__kmpc_push_num_threads: enter T#%d num_threads=%d\n",
237 global_tid, num_threads));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000238
Jonathan Peyton30419822017-05-12 18:01:32 +0000239 __kmp_push_num_threads(loc, global_tid, num_threads);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000240}
241
Jonathan Peyton30419822017-05-12 18:01:32 +0000242void __kmpc_pop_num_threads(ident_t *loc, kmp_int32 global_tid) {
243 KA_TRACE(20, ("__kmpc_pop_num_threads: enter\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000244
Jonathan Peyton30419822017-05-12 18:01:32 +0000245 /* the num_threads are automatically popped */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000246}
247
Jim Cownie5e8470a2013-09-27 10:38:44 +0000248#if OMP_40_ENABLED
249
Jonathan Peyton30419822017-05-12 18:01:32 +0000250void __kmpc_push_proc_bind(ident_t *loc, kmp_int32 global_tid,
251 kmp_int32 proc_bind) {
252 KA_TRACE(20, ("__kmpc_push_proc_bind: enter T#%d proc_bind=%d\n", global_tid,
253 proc_bind));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000254
Jonathan Peyton30419822017-05-12 18:01:32 +0000255 __kmp_push_proc_bind(loc, global_tid, (kmp_proc_bind_t)proc_bind);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000256}
257
258#endif /* OMP_40_ENABLED */
259
Jim Cownie5e8470a2013-09-27 10:38:44 +0000260/*!
261@ingroup PARALLEL
262@param loc source location information
263@param argc total number of arguments in the ellipsis
Jonathan Peyton30419822017-05-12 18:01:32 +0000264@param microtask pointer to callback routine consisting of outlined parallel
265construct
Jim Cownie5e8470a2013-09-27 10:38:44 +0000266@param ... pointers to shared variables that aren't global
267
268Do the actual fork and call the microtask in the relevant number of threads.
269*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000270void __kmpc_fork_call(ident_t *loc, kmp_int32 argc, kmpc_micro microtask, ...) {
271 int gtid = __kmp_entry_gtid();
Jonathan Peyton45be4502015-08-11 21:36:41 +0000272
Jonathan Peyton61118492016-05-20 19:03:38 +0000273#if (KMP_STATS_ENABLED)
Jonathan Peytonf0682ac2018-07-30 17:41:08 +0000274 // If we were in a serial region, then stop the serial timer, record
275 // the event, and start parallel region timer
276 stats_state_e previous_state = KMP_GET_THREAD_STATE();
277 if (previous_state == stats_state_e::SERIAL_REGION) {
278 KMP_EXCHANGE_PARTITIONED_TIMER(OMP_parallel_overhead);
279 } else {
280 KMP_PUSH_PARTITIONED_TIMER(OMP_parallel_overhead);
281 }
Jonathan Peyton45be4502015-08-11 21:36:41 +0000282 int inParallel = __kmpc_in_parallel(loc);
Jonathan Peyton30419822017-05-12 18:01:32 +0000283 if (inParallel) {
284 KMP_COUNT_BLOCK(OMP_NESTED_PARALLEL);
285 } else {
286 KMP_COUNT_BLOCK(OMP_PARALLEL);
Jonathan Peyton45be4502015-08-11 21:36:41 +0000287 }
288#endif
289
Jim Cownie5e8470a2013-09-27 10:38:44 +0000290 // maybe to save thr_state is enough here
291 {
Jonathan Peyton30419822017-05-12 18:01:32 +0000292 va_list ap;
293 va_start(ap, microtask);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000294
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000295#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +0000296 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +0000297 if (ompt_enabled.enabled) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000298 kmp_info_t *master_th = __kmp_threads[gtid];
299 kmp_team_t *parent_team = master_th->th.th_team;
300 ompt_lw_taskteam_t *lwt = parent_team->t.ompt_serialized_team_info;
301 if (lwt)
302 ompt_frame = &(lwt->ompt_task_info.frame);
303 else {
304 int tid = __kmp_tid_from_gtid(gtid);
305 ompt_frame = &(
306 parent_team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame);
307 }
Joachim Protzec255ca72017-11-05 14:11:10 +0000308 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +0000309 OMPT_STORE_RETURN_ADDRESS(gtid);
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000310 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000311#endif
312
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000313#if INCLUDE_SSC_MARKS
314 SSC_MARK_FORKING();
315#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000316 __kmp_fork_call(loc, gtid, fork_context_intel, argc,
Jonathan Peyton30419822017-05-12 18:01:32 +0000317 VOLATILE_CAST(microtask_t) microtask, // "wrapped" task
318 VOLATILE_CAST(launch_t) __kmp_invoke_task_func,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000319/* TODO: revert workaround for Intel(R) 64 tracker #96 */
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000320#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
Jonathan Peyton30419822017-05-12 18:01:32 +0000321 &ap
Jim Cownie5e8470a2013-09-27 10:38:44 +0000322#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000323 ap
Jim Cownie5e8470a2013-09-27 10:38:44 +0000324#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000325 );
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000326#if INCLUDE_SSC_MARKS
327 SSC_MARK_JOINING();
328#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000329 __kmp_join_call(loc, gtid
Jonathan Peytonf89fbbb2015-08-31 18:15:00 +0000330#if OMPT_SUPPORT
Jonathan Peyton30419822017-05-12 18:01:32 +0000331 ,
332 fork_context_intel
Jonathan Peytonf89fbbb2015-08-31 18:15:00 +0000333#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000334 );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000335
Jonathan Peyton30419822017-05-12 18:01:32 +0000336 va_end(ap);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000337 }
Jonathan Peytonf0682ac2018-07-30 17:41:08 +0000338
339#if KMP_STATS_ENABLED
340 if (previous_state == stats_state_e::SERIAL_REGION) {
341 KMP_EXCHANGE_PARTITIONED_TIMER(OMP_serial);
342 } else {
343 KMP_POP_PARTITIONED_TIMER();
344 }
345#endif // KMP_STATS_ENABLED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000346}
347
348#if OMP_40_ENABLED
349/*!
350@ingroup PARALLEL
351@param loc source location information
352@param global_tid global thread number
353@param num_teams number of teams requested for the teams construct
Jonathan Peyton81f9cd12015-05-22 22:37:22 +0000354@param num_threads number of threads per team requested for the teams construct
Jim Cownie5e8470a2013-09-27 10:38:44 +0000355
356Set the number of teams to be used by the teams construct.
357This call is only required if the teams construct has a `num_teams` clause
358or a `thread_limit` clause (or both).
359*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000360void __kmpc_push_num_teams(ident_t *loc, kmp_int32 global_tid,
361 kmp_int32 num_teams, kmp_int32 num_threads) {
362 KA_TRACE(20,
363 ("__kmpc_push_num_teams: enter T#%d num_teams=%d num_threads=%d\n",
364 global_tid, num_teams, num_threads));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000365
Jonathan Peyton30419822017-05-12 18:01:32 +0000366 __kmp_push_num_teams(loc, global_tid, num_teams, num_threads);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000367}
368
369/*!
370@ingroup PARALLEL
371@param loc source location information
372@param argc total number of arguments in the ellipsis
Jonathan Peyton30419822017-05-12 18:01:32 +0000373@param microtask pointer to callback routine consisting of outlined teams
374construct
Jim Cownie5e8470a2013-09-27 10:38:44 +0000375@param ... pointers to shared variables that aren't global
376
377Do the actual fork and call the microtask in the relevant number of threads.
378*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000379void __kmpc_fork_teams(ident_t *loc, kmp_int32 argc, kmpc_micro microtask,
380 ...) {
381 int gtid = __kmp_entry_gtid();
382 kmp_info_t *this_thr = __kmp_threads[gtid];
383 va_list ap;
384 va_start(ap, microtask);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000385
Jonathan Peyton30419822017-05-12 18:01:32 +0000386 KMP_COUNT_BLOCK(OMP_TEAMS);
Jonathan Peyton45be4502015-08-11 21:36:41 +0000387
Jonathan Peyton30419822017-05-12 18:01:32 +0000388 // remember teams entry point and nesting level
389 this_thr->th.th_teams_microtask = microtask;
390 this_thr->th.th_teams_level =
391 this_thr->th.th_team->t.t_level; // AC: can be >0 on host
Jim Cownie5e8470a2013-09-27 10:38:44 +0000392
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000393#if OMPT_SUPPORT
Jonathan Peyton30419822017-05-12 18:01:32 +0000394 kmp_team_t *parent_team = this_thr->th.th_team;
395 int tid = __kmp_tid_from_gtid(gtid);
Joachim Protze82e94a52017-11-01 10:08:30 +0000396 if (ompt_enabled.enabled) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000397 parent_team->t.t_implicit_task_taskdata[tid]
Joachim Protzec255ca72017-11-05 14:11:10 +0000398 .ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Jonathan Peyton30419822017-05-12 18:01:32 +0000399 }
Joachim Protze82e94a52017-11-01 10:08:30 +0000400 OMPT_STORE_RETURN_ADDRESS(gtid);
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000401#endif
402
Jonathan Peyton30419822017-05-12 18:01:32 +0000403 // check if __kmpc_push_num_teams called, set default number of teams
404 // otherwise
405 if (this_thr->th.th_teams_size.nteams == 0) {
406 __kmp_push_num_teams(loc, gtid, 0, 0);
407 }
408 KMP_DEBUG_ASSERT(this_thr->th.th_set_nproc >= 1);
409 KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nteams >= 1);
410 KMP_DEBUG_ASSERT(this_thr->th.th_teams_size.nth >= 1);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000411
Jonathan Peyton30419822017-05-12 18:01:32 +0000412 __kmp_fork_call(loc, gtid, fork_context_intel, argc,
Jonathan Peyton30419822017-05-12 18:01:32 +0000413 VOLATILE_CAST(microtask_t)
414 __kmp_teams_master, // "wrapped" task
415 VOLATILE_CAST(launch_t) __kmp_invoke_teams_master,
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000416#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
Jonathan Peyton30419822017-05-12 18:01:32 +0000417 &ap
Jim Cownie5e8470a2013-09-27 10:38:44 +0000418#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000419 ap
Jim Cownie5e8470a2013-09-27 10:38:44 +0000420#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000421 );
422 __kmp_join_call(loc, gtid
Jonathan Peytonf89fbbb2015-08-31 18:15:00 +0000423#if OMPT_SUPPORT
Jonathan Peyton30419822017-05-12 18:01:32 +0000424 ,
425 fork_context_intel
Jonathan Peytonf89fbbb2015-08-31 18:15:00 +0000426#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000427 );
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000428
Jonathan Peyton30419822017-05-12 18:01:32 +0000429 this_thr->th.th_teams_microtask = NULL;
430 this_thr->th.th_teams_level = 0;
431 *(kmp_int64 *)(&this_thr->th.th_teams_size) = 0L;
432 va_end(ap);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000433}
434#endif /* OMP_40_ENABLED */
435
Jim Cownie5e8470a2013-09-27 10:38:44 +0000436// I don't think this function should ever have been exported.
437// The __kmpc_ prefix was misapplied. I'm fairly certain that no generated
438// openmp code ever called it, but it's been exported from the RTL for so
439// long that I'm afraid to remove the definition.
Jonathan Peyton30419822017-05-12 18:01:32 +0000440int __kmpc_invoke_task_func(int gtid) { return __kmp_invoke_task_func(gtid); }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000441
442/*!
443@ingroup PARALLEL
444@param loc source location information
445@param global_tid global thread number
446
447Enter a serialized parallel construct. This interface is used to handle a
448conditional parallel region, like this,
449@code
450#pragma omp parallel if (condition)
451@endcode
452when the condition is false.
453*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000454void __kmpc_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
Joachim Protze82e94a52017-11-01 10:08:30 +0000455// The implementation is now in kmp_runtime.cpp so that it can share static
456// functions with kmp_fork_call since the tasks to be done are similar in
457// each case.
458#if OMPT_SUPPORT
459 OMPT_STORE_RETURN_ADDRESS(global_tid);
460#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000461 __kmp_serialized_parallel(loc, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000462}
463
464/*!
465@ingroup PARALLEL
466@param loc source location information
467@param global_tid global thread number
468
469Leave a serialized parallel construct.
470*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000471void __kmpc_end_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
472 kmp_internal_control_t *top;
473 kmp_info_t *this_thr;
474 kmp_team_t *serial_team;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000475
Jonathan Peyton30419822017-05-12 18:01:32 +0000476 KC_TRACE(10,
477 ("__kmpc_end_serialized_parallel: called by T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000478
Jonathan Peyton30419822017-05-12 18:01:32 +0000479 /* skip all this code for autopar serialized loops since it results in
480 unacceptable overhead */
481 if (loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR))
482 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000483
Jonathan Peyton30419822017-05-12 18:01:32 +0000484 // Not autopar code
485 if (!TCR_4(__kmp_init_parallel))
486 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000487
Jonathan Peyton30419822017-05-12 18:01:32 +0000488 this_thr = __kmp_threads[global_tid];
489 serial_team = this_thr->th.th_serial_team;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000490
Jonathan Peyton30419822017-05-12 18:01:32 +0000491#if OMP_45_ENABLED
492 kmp_task_team_t *task_team = this_thr->th.th_task_team;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000493
Jonathan Peyton30419822017-05-12 18:01:32 +0000494 // we need to wait for the proxy tasks before finishing the thread
495 if (task_team != NULL && task_team->tt.tt_found_proxy_tasks)
496 __kmp_task_team_wait(this_thr, serial_team USE_ITT_BUILD_ARG(NULL));
497#endif
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000498
Jonathan Peyton30419822017-05-12 18:01:32 +0000499 KMP_MB();
500 KMP_DEBUG_ASSERT(serial_team);
501 KMP_ASSERT(serial_team->t.t_serialized);
502 KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
503 KMP_DEBUG_ASSERT(serial_team != this_thr->th.th_root->r.r_root_team);
504 KMP_DEBUG_ASSERT(serial_team->t.t_threads);
505 KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000506
Joachim Protze82e94a52017-11-01 10:08:30 +0000507#if OMPT_SUPPORT
508 if (ompt_enabled.enabled &&
509 this_thr->th.ompt_thread_info.state != omp_state_overhead) {
Joachim Protzec255ca72017-11-05 14:11:10 +0000510 OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +0000511 if (ompt_enabled.ompt_callback_implicit_task) {
512 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
513 ompt_scope_end, NULL, OMPT_CUR_TASK_DATA(this_thr), 1,
Joachim Protze9be9cf22018-05-07 12:42:21 +0000514 OMPT_CUR_TASK_INFO(this_thr)->thread_num);
Joachim Protze82e94a52017-11-01 10:08:30 +0000515 }
516
517 // reset clear the task id only after unlinking the task
518 ompt_data_t *parent_task_data;
519 __ompt_get_task_info_internal(1, NULL, &parent_task_data, NULL, NULL, NULL);
520
521 if (ompt_enabled.ompt_callback_parallel_end) {
522 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
523 &(serial_team->t.ompt_team_info.parallel_data), parent_task_data,
Joachim Protze489cdb72018-09-10 14:34:54 +0000524 ompt_parallel_invoker_program, OMPT_LOAD_RETURN_ADDRESS(global_tid));
Joachim Protze82e94a52017-11-01 10:08:30 +0000525 }
526 __ompt_lw_taskteam_unlink(this_thr);
527 this_thr->th.ompt_thread_info.state = omp_state_overhead;
528 }
529#endif
530
Jonathan Peyton30419822017-05-12 18:01:32 +0000531 /* If necessary, pop the internal control stack values and replace the team
532 * values */
533 top = serial_team->t.t_control_stack_top;
534 if (top && top->serial_nesting_level == serial_team->t.t_serialized) {
535 copy_icvs(&serial_team->t.t_threads[0]->th.th_current_task->td_icvs, top);
536 serial_team->t.t_control_stack_top = top->next;
537 __kmp_free(top);
538 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000539
Jonathan Peyton30419822017-05-12 18:01:32 +0000540 // if( serial_team -> t.t_serialized > 1 )
541 serial_team->t.t_level--;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000542
Jonathan Peyton30419822017-05-12 18:01:32 +0000543 /* pop dispatch buffers stack */
544 KMP_DEBUG_ASSERT(serial_team->t.t_dispatch->th_disp_buffer);
545 {
546 dispatch_private_info_t *disp_buffer =
547 serial_team->t.t_dispatch->th_disp_buffer;
548 serial_team->t.t_dispatch->th_disp_buffer =
549 serial_team->t.t_dispatch->th_disp_buffer->next;
550 __kmp_free(disp_buffer);
551 }
Jonathan Peyton92ca6182018-09-07 18:25:49 +0000552#if OMP_50_ENABLED
553 this_thr->th.th_def_allocator = serial_team->t.t_def_allocator; // restore
554#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000555
Jonathan Peyton30419822017-05-12 18:01:32 +0000556 --serial_team->t.t_serialized;
557 if (serial_team->t.t_serialized == 0) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000558
Jonathan Peyton30419822017-05-12 18:01:32 +0000559/* return to the parallel section */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000560
561#if KMP_ARCH_X86 || KMP_ARCH_X86_64
Jonathan Peyton30419822017-05-12 18:01:32 +0000562 if (__kmp_inherit_fp_control && serial_team->t.t_fp_control_saved) {
563 __kmp_clear_x87_fpu_status_word();
564 __kmp_load_x87_fpu_control_word(&serial_team->t.t_x87_fpu_control_word);
565 __kmp_load_mxcsr(&serial_team->t.t_mxcsr);
566 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000567#endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
568
Jonathan Peyton30419822017-05-12 18:01:32 +0000569 this_thr->th.th_team = serial_team->t.t_parent;
570 this_thr->th.th_info.ds.ds_tid = serial_team->t.t_master_tid;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000571
Jonathan Peyton30419822017-05-12 18:01:32 +0000572 /* restore values cached in the thread */
573 this_thr->th.th_team_nproc = serial_team->t.t_parent->t.t_nproc; /* JPH */
574 this_thr->th.th_team_master =
575 serial_team->t.t_parent->t.t_threads[0]; /* JPH */
576 this_thr->th.th_team_serialized = this_thr->th.th_team->t.t_serialized;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000577
Jonathan Peyton30419822017-05-12 18:01:32 +0000578 /* TODO the below shouldn't need to be adjusted for serialized teams */
579 this_thr->th.th_dispatch =
580 &this_thr->th.th_team->t.t_dispatch[serial_team->t.t_master_tid];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000581
Jonathan Peyton30419822017-05-12 18:01:32 +0000582 __kmp_pop_current_task_from_thread(this_thr);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000583
Jonathan Peyton30419822017-05-12 18:01:32 +0000584 KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 0);
585 this_thr->th.th_current_task->td_flags.executing = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000586
Jonathan Peyton30419822017-05-12 18:01:32 +0000587 if (__kmp_tasking_mode != tskm_immediate_exec) {
588 // Copy the task team from the new child / old parent team to the thread.
589 this_thr->th.th_task_team =
590 this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state];
591 KA_TRACE(20,
592 ("__kmpc_end_serialized_parallel: T#%d restoring task_team %p / "
593 "team %p\n",
594 global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000595 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000596 } else {
597 if (__kmp_tasking_mode != tskm_immediate_exec) {
598 KA_TRACE(20, ("__kmpc_end_serialized_parallel: T#%d decreasing nesting "
599 "depth of serial team %p to %d\n",
600 global_tid, serial_team, serial_team->t.t_serialized));
601 }
602 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000603
Jonathan Peyton30419822017-05-12 18:01:32 +0000604 if (__kmp_env_consistency_check)
605 __kmp_pop_parallel(global_tid, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +0000606#if OMPT_SUPPORT
607 if (ompt_enabled.enabled)
608 this_thr->th.ompt_thread_info.state =
609 ((this_thr->th.th_team_serialized) ? omp_state_work_serial
610 : omp_state_work_parallel);
611#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000612}
613
614/*!
615@ingroup SYNCHRONIZATION
616@param loc source location information.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000617
Andrey Churbanov723a6b62015-02-20 18:09:27 +0000618Execute <tt>flush</tt>. This is implemented as a full memory fence. (Though
Jim Cownie5e8470a2013-09-27 10:38:44 +0000619depending on the memory ordering convention obeyed by the compiler
620even that may not be necessary).
621*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000622void __kmpc_flush(ident_t *loc) {
623 KC_TRACE(10, ("__kmpc_flush: called\n"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000624
Jonathan Peyton30419822017-05-12 18:01:32 +0000625 /* need explicit __mf() here since use volatile instead in library */
626 KMP_MB(); /* Flush all pending memory write invalidates. */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000627
Jonathan Peyton30419822017-05-12 18:01:32 +0000628#if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
629#if KMP_MIC
630// fence-style instructions do not exist, but lock; xaddl $0,(%rsp) can be used.
631// We shouldn't need it, though, since the ABI rules require that
632// * If the compiler generates NGO stores it also generates the fence
633// * If users hand-code NGO stores they should insert the fence
634// therefore no incomplete unordered stores should be visible.
635#else
636 // C74404
637 // This is to address non-temporal store instructions (sfence needed).
638 // The clflush instruction is addressed either (mfence needed).
639 // Probably the non-temporal load monvtdqa instruction should also be
640 // addressed.
641 // mfence is a SSE2 instruction. Do not execute it if CPU is not SSE2.
642 if (!__kmp_cpuinfo.initialized) {
643 __kmp_query_cpuid(&__kmp_cpuinfo);
Jonathan Peytonbd3a7632017-09-27 20:36:27 +0000644 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000645 if (!__kmp_cpuinfo.sse2) {
646 // CPU cannot execute SSE2 instructions.
647 } else {
648#if KMP_COMPILER_ICC
649 _mm_mfence();
650#elif KMP_COMPILER_MSVC
651 MemoryBarrier();
652#else
653 __sync_synchronize();
654#endif // KMP_COMPILER_ICC
Jonathan Peytonbd3a7632017-09-27 20:36:27 +0000655 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000656#endif // KMP_MIC
657#elif (KMP_ARCH_ARM || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS || KMP_ARCH_MIPS64)
658// Nothing to see here move along
659#elif KMP_ARCH_PPC64
660// Nothing needed here (we have a real MB above).
661#if KMP_OS_CNK
662 // The flushing thread needs to yield here; this prevents a
663 // busy-waiting thread from saturating the pipeline. flush is
664 // often used in loops like this:
665 // while (!flag) {
666 // #pragma omp flush(flag)
667 // }
668 // and adding the yield here is good for at least a 10x speedup
669 // when running >2 threads per core (on the NAS LU benchmark).
670 __kmp_yield(TRUE);
671#endif
672#else
673#error Unknown or unsupported architecture
674#endif
Joachim Protze82e94a52017-11-01 10:08:30 +0000675
676#if OMPT_SUPPORT && OMPT_OPTIONAL
677 if (ompt_enabled.ompt_callback_flush) {
678 ompt_callbacks.ompt_callback(ompt_callback_flush)(
679 __ompt_get_thread_data_internal(), OMPT_GET_RETURN_ADDRESS(0));
680 }
681#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000682}
683
684/* -------------------------------------------------------------------------- */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000685/*!
686@ingroup SYNCHRONIZATION
687@param loc source location information
688@param global_tid thread id.
689
690Execute a barrier.
691*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000692void __kmpc_barrier(ident_t *loc, kmp_int32 global_tid) {
693 KMP_COUNT_BLOCK(OMP_BARRIER);
694 KC_TRACE(10, ("__kmpc_barrier: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000695
Jonathan Peyton30419822017-05-12 18:01:32 +0000696 if (!TCR_4(__kmp_init_parallel))
697 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000698
Jonathan Peyton30419822017-05-12 18:01:32 +0000699 if (__kmp_env_consistency_check) {
700 if (loc == 0) {
701 KMP_WARNING(ConstructIdentInvalid); // ??? What does it mean for the user?
Jonathan Peytonbd3a7632017-09-27 20:36:27 +0000702 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000703
Jonathan Peyton30419822017-05-12 18:01:32 +0000704 __kmp_check_barrier(global_tid, ct_barrier, loc);
705 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000706
Joachim Protze82e94a52017-11-01 10:08:30 +0000707#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +0000708 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +0000709 if (ompt_enabled.enabled) {
710 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +0000711 if (ompt_frame->enter_frame == NULL)
712 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +0000713 OMPT_STORE_RETURN_ADDRESS(global_tid);
Jonathan Peyton30419822017-05-12 18:01:32 +0000714 }
Jonas Hahnfeldfd0614d2016-09-14 13:59:13 +0000715#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000716 __kmp_threads[global_tid]->th.th_ident = loc;
717 // TODO: explicit barrier_wait_id:
718 // this function is called when 'barrier' directive is present or
719 // implicit barrier at the end of a worksharing construct.
720 // 1) better to add a per-thread barrier counter to a thread data structure
721 // 2) set to 0 when a new team is created
722 // 4) no sync is required
Jim Cownie5e8470a2013-09-27 10:38:44 +0000723
Jonathan Peyton30419822017-05-12 18:01:32 +0000724 __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +0000725#if OMPT_SUPPORT && OMPT_OPTIONAL
726 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +0000727 ompt_frame->enter_frame = NULL;
Jonathan Peyton30419822017-05-12 18:01:32 +0000728 }
Jonas Hahnfeldfd0614d2016-09-14 13:59:13 +0000729#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000730}
731
732/* The BARRIER for a MASTER section is always explicit */
733/*!
734@ingroup WORK_SHARING
735@param loc source location information.
736@param global_tid global thread number .
737@return 1 if this thread should execute the <tt>master</tt> block, 0 otherwise.
738*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000739kmp_int32 __kmpc_master(ident_t *loc, kmp_int32 global_tid) {
740 int status = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000741
Jonathan Peyton30419822017-05-12 18:01:32 +0000742 KC_TRACE(10, ("__kmpc_master: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000743
Jonathan Peyton30419822017-05-12 18:01:32 +0000744 if (!TCR_4(__kmp_init_parallel))
745 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000746
Jonathan Peyton30419822017-05-12 18:01:32 +0000747 if (KMP_MASTER_GTID(global_tid)) {
748 KMP_COUNT_BLOCK(OMP_MASTER);
749 KMP_PUSH_PARTITIONED_TIMER(OMP_master);
750 status = 1;
751 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000752
Joachim Protze82e94a52017-11-01 10:08:30 +0000753#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonathan Peyton30419822017-05-12 18:01:32 +0000754 if (status) {
Joachim Protze82e94a52017-11-01 10:08:30 +0000755 if (ompt_enabled.ompt_callback_master) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000756 kmp_info_t *this_thr = __kmp_threads[global_tid];
757 kmp_team_t *team = this_thr->th.th_team;
Jonathan Peyton122dd762015-07-13 18:55:45 +0000758
Jonathan Peyton30419822017-05-12 18:01:32 +0000759 int tid = __kmp_tid_from_gtid(global_tid);
Joachim Protze82e94a52017-11-01 10:08:30 +0000760 ompt_callbacks.ompt_callback(ompt_callback_master)(
761 ompt_scope_begin, &(team->t.ompt_team_info.parallel_data),
762 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
763 OMPT_GET_RETURN_ADDRESS(0));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000764 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000765 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000766#endif
767
Jonathan Peyton30419822017-05-12 18:01:32 +0000768 if (__kmp_env_consistency_check) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000769#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +0000770 if (status)
771 __kmp_push_sync(global_tid, ct_master, loc, NULL, 0);
772 else
773 __kmp_check_sync(global_tid, ct_master, loc, NULL, 0);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000774#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000775 if (status)
776 __kmp_push_sync(global_tid, ct_master, loc, NULL);
777 else
778 __kmp_check_sync(global_tid, ct_master, loc, NULL);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000779#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000780 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000781
Jonathan Peyton30419822017-05-12 18:01:32 +0000782 return status;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000783}
784
785/*!
786@ingroup WORK_SHARING
787@param loc source location information.
788@param global_tid global thread number .
789
Jonathan Peyton30419822017-05-12 18:01:32 +0000790Mark the end of a <tt>master</tt> region. This should only be called by the
791thread that executes the <tt>master</tt> region.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000792*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000793void __kmpc_end_master(ident_t *loc, kmp_int32 global_tid) {
794 KC_TRACE(10, ("__kmpc_end_master: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000795
Jonathan Peyton30419822017-05-12 18:01:32 +0000796 KMP_DEBUG_ASSERT(KMP_MASTER_GTID(global_tid));
797 KMP_POP_PARTITIONED_TIMER();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000798
Joachim Protze82e94a52017-11-01 10:08:30 +0000799#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonathan Peyton30419822017-05-12 18:01:32 +0000800 kmp_info_t *this_thr = __kmp_threads[global_tid];
801 kmp_team_t *team = this_thr->th.th_team;
Joachim Protze82e94a52017-11-01 10:08:30 +0000802 if (ompt_enabled.ompt_callback_master) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000803 int tid = __kmp_tid_from_gtid(global_tid);
Joachim Protze82e94a52017-11-01 10:08:30 +0000804 ompt_callbacks.ompt_callback(ompt_callback_master)(
805 ompt_scope_end, &(team->t.ompt_team_info.parallel_data),
806 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
807 OMPT_GET_RETURN_ADDRESS(0));
Jonathan Peyton30419822017-05-12 18:01:32 +0000808 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000809#endif
810
Jonathan Peyton30419822017-05-12 18:01:32 +0000811 if (__kmp_env_consistency_check) {
812 if (global_tid < 0)
813 KMP_WARNING(ThreadIdentInvalid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000814
Jonathan Peyton30419822017-05-12 18:01:32 +0000815 if (KMP_MASTER_GTID(global_tid))
816 __kmp_pop_sync(global_tid, ct_master, loc);
817 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000818}
819
820/*!
821@ingroup WORK_SHARING
822@param loc source location information.
823@param gtid global thread number.
824
825Start execution of an <tt>ordered</tt> construct.
826*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000827void __kmpc_ordered(ident_t *loc, kmp_int32 gtid) {
828 int cid = 0;
829 kmp_info_t *th;
830 KMP_DEBUG_ASSERT(__kmp_init_serial);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000831
Jonathan Peyton30419822017-05-12 18:01:32 +0000832 KC_TRACE(10, ("__kmpc_ordered: called T#%d\n", gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000833
Jonathan Peyton30419822017-05-12 18:01:32 +0000834 if (!TCR_4(__kmp_init_parallel))
835 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000836
837#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +0000838 __kmp_itt_ordered_prep(gtid);
839// TODO: ordered_wait_id
Jim Cownie5e8470a2013-09-27 10:38:44 +0000840#endif /* USE_ITT_BUILD */
841
Jonathan Peyton30419822017-05-12 18:01:32 +0000842 th = __kmp_threads[gtid];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000843
Joachim Protze82e94a52017-11-01 10:08:30 +0000844#if OMPT_SUPPORT && OMPT_OPTIONAL
845 kmp_team_t *team;
Joachim Protze40636132018-05-28 08:16:08 +0000846 omp_wait_id_t lck;
Joachim Protze82e94a52017-11-01 10:08:30 +0000847 void *codeptr_ra;
848 if (ompt_enabled.enabled) {
849 OMPT_STORE_RETURN_ADDRESS(gtid);
850 team = __kmp_team_from_gtid(gtid);
Joachim Protze40636132018-05-28 08:16:08 +0000851 lck = (omp_wait_id_t)&team->t.t_ordered.dt.t_value;
Jonathan Peyton30419822017-05-12 18:01:32 +0000852 /* OMPT state update */
Joachim Protze82e94a52017-11-01 10:08:30 +0000853 th->th.ompt_thread_info.wait_id = lck;
854 th->th.ompt_thread_info.state = omp_state_wait_ordered;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000855
Jonathan Peyton30419822017-05-12 18:01:32 +0000856 /* OMPT event callback */
Joachim Protze82e94a52017-11-01 10:08:30 +0000857 codeptr_ra = OMPT_LOAD_RETURN_ADDRESS(gtid);
858 if (ompt_enabled.ompt_callback_mutex_acquire) {
859 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
Joachim Protze1b2bd262018-01-17 10:06:01 +0000860 ompt_mutex_ordered, omp_lock_hint_none, kmp_mutex_impl_spin,
Joachim Protze40636132018-05-28 08:16:08 +0000861 (omp_wait_id_t)lck, codeptr_ra);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000862 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000863 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000864#endif
865
Jonathan Peyton30419822017-05-12 18:01:32 +0000866 if (th->th.th_dispatch->th_deo_fcn != 0)
867 (*th->th.th_dispatch->th_deo_fcn)(&gtid, &cid, loc);
868 else
869 __kmp_parallel_deo(&gtid, &cid, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000870
Joachim Protze82e94a52017-11-01 10:08:30 +0000871#if OMPT_SUPPORT && OMPT_OPTIONAL
872 if (ompt_enabled.enabled) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000873 /* OMPT state update */
Joachim Protze82e94a52017-11-01 10:08:30 +0000874 th->th.ompt_thread_info.state = omp_state_work_parallel;
Jonathan Peyton30419822017-05-12 18:01:32 +0000875 th->th.ompt_thread_info.wait_id = 0;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000876
Jonathan Peyton30419822017-05-12 18:01:32 +0000877 /* OMPT event callback */
Joachim Protze82e94a52017-11-01 10:08:30 +0000878 if (ompt_enabled.ompt_callback_mutex_acquired) {
879 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +0000880 ompt_mutex_ordered, (omp_wait_id_t)lck, codeptr_ra);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000881 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000882 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000883#endif
884
Jim Cownie5e8470a2013-09-27 10:38:44 +0000885#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +0000886 __kmp_itt_ordered_start(gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000887#endif /* USE_ITT_BUILD */
888}
889
890/*!
891@ingroup WORK_SHARING
892@param loc source location information.
893@param gtid global thread number.
894
895End execution of an <tt>ordered</tt> construct.
896*/
Jonathan Peyton30419822017-05-12 18:01:32 +0000897void __kmpc_end_ordered(ident_t *loc, kmp_int32 gtid) {
898 int cid = 0;
899 kmp_info_t *th;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000900
Jonathan Peyton30419822017-05-12 18:01:32 +0000901 KC_TRACE(10, ("__kmpc_end_ordered: called T#%d\n", gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000902
903#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +0000904 __kmp_itt_ordered_end(gtid);
905// TODO: ordered_wait_id
Jim Cownie5e8470a2013-09-27 10:38:44 +0000906#endif /* USE_ITT_BUILD */
907
Jonathan Peyton30419822017-05-12 18:01:32 +0000908 th = __kmp_threads[gtid];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000909
Jonathan Peyton30419822017-05-12 18:01:32 +0000910 if (th->th.th_dispatch->th_dxo_fcn != 0)
911 (*th->th.th_dispatch->th_dxo_fcn)(&gtid, &cid, loc);
912 else
913 __kmp_parallel_dxo(&gtid, &cid, loc);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000914
Joachim Protze82e94a52017-11-01 10:08:30 +0000915#if OMPT_SUPPORT && OMPT_OPTIONAL
916 OMPT_STORE_RETURN_ADDRESS(gtid);
917 if (ompt_enabled.ompt_callback_mutex_released) {
918 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
919 ompt_mutex_ordered,
Joachim Protze40636132018-05-28 08:16:08 +0000920 (omp_wait_id_t)&__kmp_team_from_gtid(gtid)->t.t_ordered.dt.t_value,
Joachim Protze82e94a52017-11-01 10:08:30 +0000921 OMPT_LOAD_RETURN_ADDRESS(gtid));
Jonathan Peyton30419822017-05-12 18:01:32 +0000922 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000923#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000924}
925
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000926#if KMP_USE_DYNAMIC_LOCK
927
Jonathan Peytondae13d82015-12-11 21:57:06 +0000928static __forceinline void
Jonathan Peyton30419822017-05-12 18:01:32 +0000929__kmp_init_indirect_csptr(kmp_critical_name *crit, ident_t const *loc,
930 kmp_int32 gtid, kmp_indirect_locktag_t tag) {
931 // Pointer to the allocated indirect lock is written to crit, while indexing
932 // is ignored.
933 void *idx;
934 kmp_indirect_lock_t **lck;
935 lck = (kmp_indirect_lock_t **)crit;
936 kmp_indirect_lock_t *ilk = __kmp_allocate_indirect_lock(&idx, gtid, tag);
937 KMP_I_LOCK_FUNC(ilk, init)(ilk->lock);
938 KMP_SET_I_LOCK_LOCATION(ilk, loc);
939 KMP_SET_I_LOCK_FLAGS(ilk, kmp_lf_critical_section);
940 KA_TRACE(20,
941 ("__kmp_init_indirect_csptr: initialized indirect lock #%d\n", tag));
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000942#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +0000943 __kmp_itt_critical_creating(ilk->lock, loc);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000944#endif
Andrey Churbanov5ba90c72017-07-17 09:03:14 +0000945 int status = KMP_COMPARE_AND_STORE_PTR(lck, nullptr, ilk);
Jonathan Peyton30419822017-05-12 18:01:32 +0000946 if (status == 0) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000947#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +0000948 __kmp_itt_critical_destroyed(ilk->lock);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000949#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000950 // We don't really need to destroy the unclaimed lock here since it will be
951 // cleaned up at program exit.
952 // KMP_D_LOCK_FUNC(&idx, destroy)((kmp_dyna_lock_t *)&idx);
953 }
954 KMP_DEBUG_ASSERT(*lck != NULL);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000955}
956
957// Fast-path acquire tas lock
Jonathan Peyton30419822017-05-12 18:01:32 +0000958#define KMP_ACQUIRE_TAS_LOCK(lock, gtid) \
959 { \
960 kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000961 kmp_int32 tas_free = KMP_LOCK_FREE(tas); \
962 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); \
963 if (KMP_ATOMIC_LD_RLX(&l->lk.poll) != tas_free || \
964 !__kmp_atomic_compare_store_acq(&l->lk.poll, tas_free, tas_busy)) { \
Jonathan Peyton30419822017-05-12 18:01:32 +0000965 kmp_uint32 spins; \
966 KMP_FSYNC_PREPARE(l); \
967 KMP_INIT_YIELD(spins); \
968 if (TCR_4(__kmp_nth) > \
969 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
970 KMP_YIELD(TRUE); \
971 } else { \
972 KMP_YIELD_SPIN(spins); \
973 } \
974 kmp_backoff_t backoff = __kmp_spin_backoff_params; \
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000975 while ( \
976 KMP_ATOMIC_LD_RLX(&l->lk.poll) != tas_free || \
977 !__kmp_atomic_compare_store_acq(&l->lk.poll, tas_free, tas_busy)) { \
Jonathan Peyton30419822017-05-12 18:01:32 +0000978 __kmp_spin_backoff(&backoff); \
979 if (TCR_4(__kmp_nth) > \
980 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { \
981 KMP_YIELD(TRUE); \
982 } else { \
983 KMP_YIELD_SPIN(spins); \
984 } \
985 } \
986 } \
987 KMP_FSYNC_ACQUIRED(l); \
988 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000989
990// Fast-path test tas lock
Jonathan Peyton30419822017-05-12 18:01:32 +0000991#define KMP_TEST_TAS_LOCK(lock, gtid, rc) \
992 { \
993 kmp_tas_lock_t *l = (kmp_tas_lock_t *)lock; \
Jonathan Peyton37e2ef52018-07-09 17:36:22 +0000994 kmp_int32 tas_free = KMP_LOCK_FREE(tas); \
995 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); \
996 rc = KMP_ATOMIC_LD_RLX(&l->lk.poll) == tas_free && \
997 __kmp_atomic_compare_store_acq(&l->lk.poll, tas_free, tas_busy); \
Jonathan Peyton30419822017-05-12 18:01:32 +0000998 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +0000999
1000// Fast-path release tas lock
Jonathan Peyton30419822017-05-12 18:01:32 +00001001#define KMP_RELEASE_TAS_LOCK(lock, gtid) \
Jonathan Peyton37e2ef52018-07-09 17:36:22 +00001002 { KMP_ATOMIC_ST_REL(&((kmp_tas_lock_t *)lock)->lk.poll, KMP_LOCK_FREE(tas)); }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001003
Jonathan Peytondae13d82015-12-11 21:57:06 +00001004#if KMP_USE_FUTEX
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001005
Jonathan Peyton30419822017-05-12 18:01:32 +00001006#include <sys/syscall.h>
1007#include <unistd.h>
1008#ifndef FUTEX_WAIT
1009#define FUTEX_WAIT 0
1010#endif
1011#ifndef FUTEX_WAKE
1012#define FUTEX_WAKE 1
1013#endif
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001014
1015// Fast-path acquire futex lock
Jonathan Peyton30419822017-05-12 18:01:32 +00001016#define KMP_ACQUIRE_FUTEX_LOCK(lock, gtid) \
1017 { \
1018 kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
1019 kmp_int32 gtid_code = (gtid + 1) << 1; \
1020 KMP_MB(); \
1021 KMP_FSYNC_PREPARE(ftx); \
1022 kmp_int32 poll_val; \
1023 while ((poll_val = KMP_COMPARE_AND_STORE_RET32( \
1024 &(ftx->lk.poll), KMP_LOCK_FREE(futex), \
1025 KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { \
1026 kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; \
1027 if (!cond) { \
1028 if (!KMP_COMPARE_AND_STORE_RET32(&(ftx->lk.poll), poll_val, \
1029 poll_val | \
1030 KMP_LOCK_BUSY(1, futex))) { \
1031 continue; \
1032 } \
1033 poll_val |= KMP_LOCK_BUSY(1, futex); \
1034 } \
1035 kmp_int32 rc; \
1036 if ((rc = syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAIT, poll_val, \
1037 NULL, NULL, 0)) != 0) { \
1038 continue; \
1039 } \
1040 gtid_code |= 1; \
1041 } \
1042 KMP_FSYNC_ACQUIRED(ftx); \
1043 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001044
1045// Fast-path test futex lock
Jonathan Peyton30419822017-05-12 18:01:32 +00001046#define KMP_TEST_FUTEX_LOCK(lock, gtid, rc) \
1047 { \
1048 kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
1049 if (KMP_COMPARE_AND_STORE_ACQ32(&(ftx->lk.poll), KMP_LOCK_FREE(futex), \
1050 KMP_LOCK_BUSY(gtid + 1 << 1, futex))) { \
1051 KMP_FSYNC_ACQUIRED(ftx); \
1052 rc = TRUE; \
1053 } else { \
1054 rc = FALSE; \
1055 } \
1056 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001057
1058// Fast-path release futex lock
Jonathan Peyton30419822017-05-12 18:01:32 +00001059#define KMP_RELEASE_FUTEX_LOCK(lock, gtid) \
1060 { \
1061 kmp_futex_lock_t *ftx = (kmp_futex_lock_t *)lock; \
1062 KMP_MB(); \
1063 KMP_FSYNC_RELEASING(ftx); \
1064 kmp_int32 poll_val = \
1065 KMP_XCHG_FIXED32(&(ftx->lk.poll), KMP_LOCK_FREE(futex)); \
1066 if (KMP_LOCK_STRIP(poll_val) & 1) { \
1067 syscall(__NR_futex, &(ftx->lk.poll), FUTEX_WAKE, \
1068 KMP_LOCK_BUSY(1, futex), NULL, NULL, 0); \
1069 } \
1070 KMP_MB(); \
1071 KMP_YIELD(TCR_4(__kmp_nth) > \
1072 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); \
1073 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001074
Jonathan Peytondae13d82015-12-11 21:57:06 +00001075#endif // KMP_USE_FUTEX
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001076
1077#else // KMP_USE_DYNAMIC_LOCK
1078
Jonathan Peyton30419822017-05-12 18:01:32 +00001079static kmp_user_lock_p __kmp_get_critical_section_ptr(kmp_critical_name *crit,
1080 ident_t const *loc,
1081 kmp_int32 gtid) {
1082 kmp_user_lock_p *lck_pp = (kmp_user_lock_p *)crit;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001083
Jonathan Peyton30419822017-05-12 18:01:32 +00001084 // Because of the double-check, the following load doesn't need to be volatile
1085 kmp_user_lock_p lck = (kmp_user_lock_p)TCR_PTR(*lck_pp);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001086
Jonathan Peyton30419822017-05-12 18:01:32 +00001087 if (lck == NULL) {
1088 void *idx;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001089
Jonathan Peyton30419822017-05-12 18:01:32 +00001090 // Allocate & initialize the lock.
1091 // Remember alloc'ed locks in table in order to free them in __kmp_cleanup()
1092 lck = __kmp_user_lock_allocate(&idx, gtid, kmp_lf_critical_section);
1093 __kmp_init_user_lock_with_checks(lck);
1094 __kmp_set_user_lock_location(lck, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001095#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001096 __kmp_itt_critical_creating(lck);
1097// __kmp_itt_critical_creating() should be called *before* the first usage
1098// of underlying lock. It is the only place where we can guarantee it. There
1099// are chances the lock will destroyed with no usage, but it is not a
1100// problem, because this is not real event seen by user but rather setting
1101// name for object (lock). See more details in kmp_itt.h.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001102#endif /* USE_ITT_BUILD */
1103
Jonathan Peyton30419822017-05-12 18:01:32 +00001104 // Use a cmpxchg instruction to slam the start of the critical section with
1105 // the lock pointer. If another thread beat us to it, deallocate the lock,
1106 // and use the lock that the other thread allocated.
1107 int status = KMP_COMPARE_AND_STORE_PTR(lck_pp, 0, lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001108
Jonathan Peyton30419822017-05-12 18:01:32 +00001109 if (status == 0) {
1110// Deallocate the lock and reload the value.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001111#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001112 __kmp_itt_critical_destroyed(lck);
1113// Let ITT know the lock is destroyed and the same memory location may be reused
1114// for another purpose.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001115#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00001116 __kmp_destroy_user_lock_with_checks(lck);
1117 __kmp_user_lock_free(&idx, gtid, lck);
1118 lck = (kmp_user_lock_p)TCR_PTR(*lck_pp);
1119 KMP_DEBUG_ASSERT(lck != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001120 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001121 }
1122 return lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001123}
1124
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001125#endif // KMP_USE_DYNAMIC_LOCK
1126
Jim Cownie5e8470a2013-09-27 10:38:44 +00001127/*!
1128@ingroup WORK_SHARING
1129@param loc source location information.
1130@param global_tid global thread number .
Jonathan Peyton30419822017-05-12 18:01:32 +00001131@param crit identity of the critical section. This could be a pointer to a lock
1132associated with the critical section, or some other suitably unique value.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001133
1134Enter code protected by a `critical` construct.
1135This function blocks until the executing thread can enter the critical section.
1136*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001137void __kmpc_critical(ident_t *loc, kmp_int32 global_tid,
1138 kmp_critical_name *crit) {
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001139#if KMP_USE_DYNAMIC_LOCK
Joachim Protze82e94a52017-11-01 10:08:30 +00001140#if OMPT_SUPPORT && OMPT_OPTIONAL
1141 OMPT_STORE_RETURN_ADDRESS(global_tid);
1142#endif // OMPT_SUPPORT
Jonathan Peyton30419822017-05-12 18:01:32 +00001143 __kmpc_critical_with_hint(loc, global_tid, crit, omp_lock_hint_none);
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001144#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001145 KMP_COUNT_BLOCK(OMP_CRITICAL);
Joachim Protze82e94a52017-11-01 10:08:30 +00001146#if OMPT_SUPPORT && OMPT_OPTIONAL
1147 omp_state_t prev_state = omp_state_undefined;
1148 ompt_thread_info_t ti;
1149#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001150 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001151
Jonathan Peyton30419822017-05-12 18:01:32 +00001152 KC_TRACE(10, ("__kmpc_critical: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001153
Jonathan Peyton30419822017-05-12 18:01:32 +00001154 // TODO: add THR_OVHD_STATE
Jim Cownie5e8470a2013-09-27 10:38:44 +00001155
Jonathan Peytonf0682ac2018-07-30 17:41:08 +00001156 KMP_PUSH_PARTITIONED_TIMER(OMP_critical_wait);
Jonathan Peyton30419822017-05-12 18:01:32 +00001157 KMP_CHECK_USER_LOCK_INIT();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001158
Jonathan Peyton30419822017-05-12 18:01:32 +00001159 if ((__kmp_user_lock_kind == lk_tas) &&
1160 (sizeof(lck->tas.lk.poll) <= OMP_CRITICAL_SIZE)) {
1161 lck = (kmp_user_lock_p)crit;
1162 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00001163#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00001164 else if ((__kmp_user_lock_kind == lk_futex) &&
1165 (sizeof(lck->futex.lk.poll) <= OMP_CRITICAL_SIZE)) {
1166 lck = (kmp_user_lock_p)crit;
1167 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001168#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001169 else { // ticket, queuing or drdpa
1170 lck = __kmp_get_critical_section_ptr(crit, loc, global_tid);
1171 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001172
Jonathan Peyton30419822017-05-12 18:01:32 +00001173 if (__kmp_env_consistency_check)
1174 __kmp_push_sync(global_tid, ct_critical, loc, lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001175
Jonathan Peyton30419822017-05-12 18:01:32 +00001176// since the critical directive binds to all threads, not just the current
1177// team we have to check this even if we are in a serialized team.
1178// also, even if we are the uber thread, we still have to conduct the lock,
1179// as we have to contend with sibling threads.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001180
1181#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001182 __kmp_itt_critical_acquiring(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001183#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00001184#if OMPT_SUPPORT && OMPT_OPTIONAL
1185 OMPT_STORE_RETURN_ADDRESS(gtid);
1186 void *codeptr_ra = NULL;
1187 if (ompt_enabled.enabled) {
1188 ti = __kmp_threads[global_tid]->th.ompt_thread_info;
1189 /* OMPT state update */
1190 prev_state = ti.state;
Joachim Protze40636132018-05-28 08:16:08 +00001191 ti.wait_id = (omp_wait_id_t)lck;
Joachim Protze82e94a52017-11-01 10:08:30 +00001192 ti.state = omp_state_wait_critical;
1193
1194 /* OMPT event callback */
1195 codeptr_ra = OMPT_LOAD_RETURN_ADDRESS(gtid);
1196 if (ompt_enabled.ompt_callback_mutex_acquire) {
1197 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
1198 ompt_mutex_critical, omp_lock_hint_none, __ompt_get_mutex_impl_type(),
Joachim Protze40636132018-05-28 08:16:08 +00001199 (omp_wait_id_t)crit, codeptr_ra);
Joachim Protze82e94a52017-11-01 10:08:30 +00001200 }
1201 }
1202#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001203 // Value of 'crit' should be good for using as a critical_id of the critical
1204 // section directive.
1205 __kmp_acquire_user_lock_with_checks(lck, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001206
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001207#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001208 __kmp_itt_critical_acquired(lck);
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001209#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00001210#if OMPT_SUPPORT && OMPT_OPTIONAL
1211 if (ompt_enabled.enabled) {
1212 /* OMPT state update */
1213 ti.state = prev_state;
1214 ti.wait_id = 0;
1215
1216 /* OMPT event callback */
1217 if (ompt_enabled.ompt_callback_mutex_acquired) {
1218 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00001219 ompt_mutex_critical, (omp_wait_id_t)crit, codeptr_ra);
Joachim Protze82e94a52017-11-01 10:08:30 +00001220 }
1221 }
1222#endif
Jonathan Peytonf0682ac2018-07-30 17:41:08 +00001223 KMP_POP_PARTITIONED_TIMER();
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001224
Jonathan Peytonf0682ac2018-07-30 17:41:08 +00001225 KMP_PUSH_PARTITIONED_TIMER(OMP_critical);
Jonathan Peyton30419822017-05-12 18:01:32 +00001226 KA_TRACE(15, ("__kmpc_critical: done T#%d\n", global_tid));
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001227#endif // KMP_USE_DYNAMIC_LOCK
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001228}
1229
1230#if KMP_USE_DYNAMIC_LOCK
1231
1232// Converts the given hint to an internal lock implementation
Jonathan Peyton30419822017-05-12 18:01:32 +00001233static __forceinline kmp_dyna_lockseq_t __kmp_map_hint_to_lock(uintptr_t hint) {
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001234#if KMP_USE_TSX
Jonathan Peyton30419822017-05-12 18:01:32 +00001235#define KMP_TSX_LOCK(seq) lockseq_##seq
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001236#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001237#define KMP_TSX_LOCK(seq) __kmp_user_lock_seq
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001238#endif
Hal Finkel01bb2402016-03-27 13:24:09 +00001239
1240#if KMP_ARCH_X86 || KMP_ARCH_X86_64
Jonathan Peyton30419822017-05-12 18:01:32 +00001241#define KMP_CPUINFO_RTM (__kmp_cpuinfo.rtm)
Hal Finkel01bb2402016-03-27 13:24:09 +00001242#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001243#define KMP_CPUINFO_RTM 0
Hal Finkel01bb2402016-03-27 13:24:09 +00001244#endif
1245
Jonathan Peyton30419822017-05-12 18:01:32 +00001246 // Hints that do not require further logic
1247 if (hint & kmp_lock_hint_hle)
1248 return KMP_TSX_LOCK(hle);
1249 if (hint & kmp_lock_hint_rtm)
1250 return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(rtm) : __kmp_user_lock_seq;
1251 if (hint & kmp_lock_hint_adaptive)
1252 return KMP_CPUINFO_RTM ? KMP_TSX_LOCK(adaptive) : __kmp_user_lock_seq;
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001253
Jonathan Peyton30419822017-05-12 18:01:32 +00001254 // Rule out conflicting hints first by returning the default lock
1255 if ((hint & omp_lock_hint_contended) && (hint & omp_lock_hint_uncontended))
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001256 return __kmp_user_lock_seq;
Jonathan Peyton30419822017-05-12 18:01:32 +00001257 if ((hint & omp_lock_hint_speculative) &&
1258 (hint & omp_lock_hint_nonspeculative))
1259 return __kmp_user_lock_seq;
1260
1261 // Do not even consider speculation when it appears to be contended
1262 if (hint & omp_lock_hint_contended)
1263 return lockseq_queuing;
1264
1265 // Uncontended lock without speculation
1266 if ((hint & omp_lock_hint_uncontended) && !(hint & omp_lock_hint_speculative))
1267 return lockseq_tas;
1268
1269 // HLE lock for speculation
1270 if (hint & omp_lock_hint_speculative)
1271 return KMP_TSX_LOCK(hle);
1272
1273 return __kmp_user_lock_seq;
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001274}
1275
Joachim Protze82e94a52017-11-01 10:08:30 +00001276#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonathan Peytonbaad3f62018-08-09 22:04:30 +00001277#if KMP_USE_DYNAMIC_LOCK
Joachim Protze1b2bd262018-01-17 10:06:01 +00001278static kmp_mutex_impl_t
Joachim Protze82e94a52017-11-01 10:08:30 +00001279__ompt_get_mutex_impl_type(void *user_lock, kmp_indirect_lock_t *ilock = 0) {
1280 if (user_lock) {
1281 switch (KMP_EXTRACT_D_TAG(user_lock)) {
1282 case 0:
1283 break;
1284#if KMP_USE_FUTEX
1285 case locktag_futex:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001286 return kmp_mutex_impl_queuing;
Joachim Protze82e94a52017-11-01 10:08:30 +00001287#endif
1288 case locktag_tas:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001289 return kmp_mutex_impl_spin;
Joachim Protze82e94a52017-11-01 10:08:30 +00001290#if KMP_USE_TSX
1291 case locktag_hle:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001292 return kmp_mutex_impl_speculative;
Joachim Protze82e94a52017-11-01 10:08:30 +00001293#endif
1294 default:
Joachim Protzee6269e32018-01-17 11:13:11 +00001295 return ompt_mutex_impl_unknown;
Joachim Protze82e94a52017-11-01 10:08:30 +00001296 }
1297 ilock = KMP_LOOKUP_I_LOCK(user_lock);
1298 }
1299 KMP_ASSERT(ilock);
1300 switch (ilock->type) {
1301#if KMP_USE_TSX
1302 case locktag_adaptive:
1303 case locktag_rtm:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001304 return kmp_mutex_impl_speculative;
Joachim Protze82e94a52017-11-01 10:08:30 +00001305#endif
1306 case locktag_nested_tas:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001307 return kmp_mutex_impl_spin;
Joachim Protze82e94a52017-11-01 10:08:30 +00001308#if KMP_USE_FUTEX
1309 case locktag_nested_futex:
1310#endif
1311 case locktag_ticket:
1312 case locktag_queuing:
1313 case locktag_drdpa:
1314 case locktag_nested_ticket:
1315 case locktag_nested_queuing:
1316 case locktag_nested_drdpa:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001317 return kmp_mutex_impl_queuing;
Joachim Protze82e94a52017-11-01 10:08:30 +00001318 default:
Joachim Protzee6269e32018-01-17 11:13:11 +00001319 return ompt_mutex_impl_unknown;
Joachim Protze82e94a52017-11-01 10:08:30 +00001320 }
1321}
Jonathan Peytonbaad3f62018-08-09 22:04:30 +00001322#else
Joachim Protze82e94a52017-11-01 10:08:30 +00001323// For locks without dynamic binding
Joachim Protze1b2bd262018-01-17 10:06:01 +00001324static kmp_mutex_impl_t __ompt_get_mutex_impl_type() {
Joachim Protze82e94a52017-11-01 10:08:30 +00001325 switch (__kmp_user_lock_kind) {
1326 case lk_tas:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001327 return kmp_mutex_impl_spin;
Joachim Protze82e94a52017-11-01 10:08:30 +00001328#if KMP_USE_FUTEX
1329 case lk_futex:
1330#endif
1331 case lk_ticket:
1332 case lk_queuing:
1333 case lk_drdpa:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001334 return kmp_mutex_impl_queuing;
Joachim Protze82e94a52017-11-01 10:08:30 +00001335#if KMP_USE_TSX
1336 case lk_hle:
1337 case lk_rtm:
1338 case lk_adaptive:
Joachim Protze1b2bd262018-01-17 10:06:01 +00001339 return kmp_mutex_impl_speculative;
Joachim Protze82e94a52017-11-01 10:08:30 +00001340#endif
1341 default:
Joachim Protzee6269e32018-01-17 11:13:11 +00001342 return ompt_mutex_impl_unknown;
Joachim Protze82e94a52017-11-01 10:08:30 +00001343 }
1344}
Jonathan Peytonbaad3f62018-08-09 22:04:30 +00001345#endif // KMP_USE_DYNAMIC_LOCK
1346#endif // OMPT_SUPPORT && OMPT_OPTIONAL
Joachim Protze82e94a52017-11-01 10:08:30 +00001347
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001348/*!
1349@ingroup WORK_SHARING
1350@param loc source location information.
1351@param global_tid global thread number.
Jonathan Peyton30419822017-05-12 18:01:32 +00001352@param crit identity of the critical section. This could be a pointer to a lock
1353associated with the critical section, or some other suitably unique value.
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001354@param hint the lock hint.
1355
Jonathan Peyton30419822017-05-12 18:01:32 +00001356Enter code protected by a `critical` construct with a hint. The hint value is
1357used to suggest a lock implementation. This function blocks until the executing
1358thread can enter the critical section unless the hint suggests use of
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001359speculative execution and the hardware supports it.
1360*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001361void __kmpc_critical_with_hint(ident_t *loc, kmp_int32 global_tid,
Jonathan Peytona2f6eff2018-09-07 18:46:40 +00001362 kmp_critical_name *crit, uint32_t hint) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001363 KMP_COUNT_BLOCK(OMP_CRITICAL);
1364 kmp_user_lock_p lck;
Joachim Protze82e94a52017-11-01 10:08:30 +00001365#if OMPT_SUPPORT && OMPT_OPTIONAL
1366 omp_state_t prev_state = omp_state_undefined;
1367 ompt_thread_info_t ti;
1368 // This is the case, if called from __kmpc_critical:
1369 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
1370 if (!codeptr)
1371 codeptr = OMPT_GET_RETURN_ADDRESS(0);
1372#endif
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001373
Jonathan Peyton30419822017-05-12 18:01:32 +00001374 KC_TRACE(10, ("__kmpc_critical: called T#%d\n", global_tid));
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001375
Jonathan Peyton30419822017-05-12 18:01:32 +00001376 kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
1377 // Check if it is initialized.
Jonathan Peytonf0682ac2018-07-30 17:41:08 +00001378 KMP_PUSH_PARTITIONED_TIMER(OMP_critical_wait);
Jonathan Peyton30419822017-05-12 18:01:32 +00001379 if (*lk == 0) {
1380 kmp_dyna_lockseq_t lckseq = __kmp_map_hint_to_lock(hint);
1381 if (KMP_IS_D_LOCK(lckseq)) {
1382 KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0,
1383 KMP_GET_D_TAG(lckseq));
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001384 } else {
Jonathan Peyton30419822017-05-12 18:01:32 +00001385 __kmp_init_indirect_csptr(crit, loc, global_tid, KMP_GET_I_TAG(lckseq));
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001386 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001387 }
1388 // Branch for accessing the actual lock object and set operation. This
1389 // branching is inevitable since this lock initialization does not follow the
1390 // normal dispatch path (lock table is not used).
1391 if (KMP_EXTRACT_D_TAG(lk) != 0) {
1392 lck = (kmp_user_lock_p)lk;
1393 if (__kmp_env_consistency_check) {
1394 __kmp_push_sync(global_tid, ct_critical, loc, lck,
1395 __kmp_map_hint_to_lock(hint));
1396 }
1397#if USE_ITT_BUILD
1398 __kmp_itt_critical_acquiring(lck);
1399#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00001400#if OMPT_SUPPORT && OMPT_OPTIONAL
1401 if (ompt_enabled.enabled) {
1402 ti = __kmp_threads[global_tid]->th.ompt_thread_info;
1403 /* OMPT state update */
1404 prev_state = ti.state;
Joachim Protze40636132018-05-28 08:16:08 +00001405 ti.wait_id = (omp_wait_id_t)lck;
Joachim Protze82e94a52017-11-01 10:08:30 +00001406 ti.state = omp_state_wait_critical;
1407
1408 /* OMPT event callback */
1409 if (ompt_enabled.ompt_callback_mutex_acquire) {
1410 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
1411 ompt_mutex_critical, (unsigned int)hint,
Joachim Protze40636132018-05-28 08:16:08 +00001412 __ompt_get_mutex_impl_type(crit), (omp_wait_id_t)crit, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00001413 }
1414 }
1415#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001416#if KMP_USE_INLINED_TAS
1417 if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
1418 KMP_ACQUIRE_TAS_LOCK(lck, global_tid);
1419 } else
1420#elif KMP_USE_INLINED_FUTEX
1421 if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
1422 KMP_ACQUIRE_FUTEX_LOCK(lck, global_tid);
1423 } else
1424#endif
1425 {
1426 KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
1427 }
1428 } else {
1429 kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
1430 lck = ilk->lock;
1431 if (__kmp_env_consistency_check) {
1432 __kmp_push_sync(global_tid, ct_critical, loc, lck,
1433 __kmp_map_hint_to_lock(hint));
1434 }
1435#if USE_ITT_BUILD
1436 __kmp_itt_critical_acquiring(lck);
1437#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00001438#if OMPT_SUPPORT && OMPT_OPTIONAL
1439 if (ompt_enabled.enabled) {
1440 ti = __kmp_threads[global_tid]->th.ompt_thread_info;
1441 /* OMPT state update */
1442 prev_state = ti.state;
Joachim Protze40636132018-05-28 08:16:08 +00001443 ti.wait_id = (omp_wait_id_t)lck;
Joachim Protze82e94a52017-11-01 10:08:30 +00001444 ti.state = omp_state_wait_critical;
1445
1446 /* OMPT event callback */
1447 if (ompt_enabled.ompt_callback_mutex_acquire) {
1448 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
1449 ompt_mutex_critical, (unsigned int)hint,
Joachim Protze40636132018-05-28 08:16:08 +00001450 __ompt_get_mutex_impl_type(0, ilk), (omp_wait_id_t)crit, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00001451 }
1452 }
1453#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001454 KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
1455 }
Jonathan Peytonf0682ac2018-07-30 17:41:08 +00001456 KMP_POP_PARTITIONED_TIMER();
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001457
Jim Cownie5e8470a2013-09-27 10:38:44 +00001458#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001459 __kmp_itt_critical_acquired(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001460#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00001461#if OMPT_SUPPORT && OMPT_OPTIONAL
1462 if (ompt_enabled.enabled) {
1463 /* OMPT state update */
1464 ti.state = prev_state;
1465 ti.wait_id = 0;
1466
1467 /* OMPT event callback */
1468 if (ompt_enabled.ompt_callback_mutex_acquired) {
1469 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00001470 ompt_mutex_critical, (omp_wait_id_t)crit, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00001471 }
1472 }
1473#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001474
Jonathan Peyton30419822017-05-12 18:01:32 +00001475 KMP_PUSH_PARTITIONED_TIMER(OMP_critical);
1476 KA_TRACE(15, ("__kmpc_critical: done T#%d\n", global_tid));
Jonathan Peytonb87b5812015-12-11 22:04:05 +00001477} // __kmpc_critical_with_hint
1478
1479#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00001480
1481/*!
1482@ingroup WORK_SHARING
1483@param loc source location information.
1484@param global_tid global thread number .
Jonathan Peyton30419822017-05-12 18:01:32 +00001485@param crit identity of the critical section. This could be a pointer to a lock
1486associated with the critical section, or some other suitably unique value.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001487
1488Leave a critical section, releasing any lock that was held during its execution.
1489*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001490void __kmpc_end_critical(ident_t *loc, kmp_int32 global_tid,
1491 kmp_critical_name *crit) {
1492 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001493
Jonathan Peyton30419822017-05-12 18:01:32 +00001494 KC_TRACE(10, ("__kmpc_end_critical: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001495
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001496#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00001497 if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
1498 lck = (kmp_user_lock_p)crit;
1499 KMP_ASSERT(lck != NULL);
1500 if (__kmp_env_consistency_check) {
1501 __kmp_pop_sync(global_tid, ct_critical, loc);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001502 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001503#if USE_ITT_BUILD
1504 __kmp_itt_critical_releasing(lck);
1505#endif
1506#if KMP_USE_INLINED_TAS
1507 if (__kmp_user_lock_seq == lockseq_tas && !__kmp_env_consistency_check) {
1508 KMP_RELEASE_TAS_LOCK(lck, global_tid);
1509 } else
1510#elif KMP_USE_INLINED_FUTEX
1511 if (__kmp_user_lock_seq == lockseq_futex && !__kmp_env_consistency_check) {
1512 KMP_RELEASE_FUTEX_LOCK(lck, global_tid);
1513 } else
1514#endif
1515 {
1516 KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
1517 }
1518 } else {
1519 kmp_indirect_lock_t *ilk =
1520 (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
1521 KMP_ASSERT(ilk != NULL);
1522 lck = ilk->lock;
1523 if (__kmp_env_consistency_check) {
1524 __kmp_pop_sync(global_tid, ct_critical, loc);
1525 }
1526#if USE_ITT_BUILD
1527 __kmp_itt_critical_releasing(lck);
1528#endif
1529 KMP_I_LOCK_FUNC(ilk, unset)(lck, global_tid);
1530 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00001531
1532#else // KMP_USE_DYNAMIC_LOCK
1533
Jonathan Peyton30419822017-05-12 18:01:32 +00001534 if ((__kmp_user_lock_kind == lk_tas) &&
1535 (sizeof(lck->tas.lk.poll) <= OMP_CRITICAL_SIZE)) {
1536 lck = (kmp_user_lock_p)crit;
1537 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00001538#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00001539 else if ((__kmp_user_lock_kind == lk_futex) &&
1540 (sizeof(lck->futex.lk.poll) <= OMP_CRITICAL_SIZE)) {
1541 lck = (kmp_user_lock_p)crit;
1542 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001543#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001544 else { // ticket, queuing or drdpa
1545 lck = (kmp_user_lock_p)TCR_PTR(*((kmp_user_lock_p *)crit));
1546 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001547
Jonathan Peyton30419822017-05-12 18:01:32 +00001548 KMP_ASSERT(lck != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001549
Jonathan Peyton30419822017-05-12 18:01:32 +00001550 if (__kmp_env_consistency_check)
1551 __kmp_pop_sync(global_tid, ct_critical, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001552
1553#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001554 __kmp_itt_critical_releasing(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001555#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00001556 // Value of 'crit' should be good for using as a critical_id of the critical
1557 // section directive.
1558 __kmp_release_user_lock_with_checks(lck, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001559
Joachim Protze82e94a52017-11-01 10:08:30 +00001560#endif // KMP_USE_DYNAMIC_LOCK
1561
1562#if OMPT_SUPPORT && OMPT_OPTIONAL
1563 /* OMPT release event triggers after lock is released; place here to trigger
1564 * for all #if branches */
1565 OMPT_STORE_RETURN_ADDRESS(global_tid);
1566 if (ompt_enabled.ompt_callback_mutex_released) {
1567 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00001568 ompt_mutex_critical, (omp_wait_id_t)crit, OMPT_LOAD_RETURN_ADDRESS(0));
Jonathan Peyton30419822017-05-12 18:01:32 +00001569 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001570#endif
1571
Jonathan Peyton30419822017-05-12 18:01:32 +00001572 KMP_POP_PARTITIONED_TIMER();
1573 KA_TRACE(15, ("__kmpc_end_critical: done T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001574}
1575
1576/*!
1577@ingroup SYNCHRONIZATION
1578@param loc source location information
1579@param global_tid thread id.
1580@return one if the thread should execute the master block, zero otherwise
1581
Jonathan Peyton30419822017-05-12 18:01:32 +00001582Start execution of a combined barrier and master. The barrier is executed inside
1583this function.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001584*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001585kmp_int32 __kmpc_barrier_master(ident_t *loc, kmp_int32 global_tid) {
1586 int status;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001587
Jonathan Peyton30419822017-05-12 18:01:32 +00001588 KC_TRACE(10, ("__kmpc_barrier_master: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001589
Jonathan Peyton30419822017-05-12 18:01:32 +00001590 if (!TCR_4(__kmp_init_parallel))
1591 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001592
Jonathan Peyton30419822017-05-12 18:01:32 +00001593 if (__kmp_env_consistency_check)
1594 __kmp_check_barrier(global_tid, ct_barrier, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001595
Joachim Protze82e94a52017-11-01 10:08:30 +00001596#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00001597 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00001598 if (ompt_enabled.enabled) {
1599 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00001600 if (ompt_frame->enter_frame == NULL)
1601 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00001602 OMPT_STORE_RETURN_ADDRESS(global_tid);
1603 }
1604#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001605#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00001606 __kmp_threads[global_tid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001607#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001608 status = __kmp_barrier(bs_plain_barrier, global_tid, TRUE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00001609#if OMPT_SUPPORT && OMPT_OPTIONAL
1610 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00001611 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00001612 }
1613#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001614
Jonathan Peyton30419822017-05-12 18:01:32 +00001615 return (status != 0) ? 0 : 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001616}
1617
1618/*!
1619@ingroup SYNCHRONIZATION
1620@param loc source location information
1621@param global_tid thread id.
1622
1623Complete the execution of a combined barrier and master. This function should
1624only be called at the completion of the <tt>master</tt> code. Other threads will
1625still be waiting at the barrier and this call releases them.
1626*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001627void __kmpc_end_barrier_master(ident_t *loc, kmp_int32 global_tid) {
1628 KC_TRACE(10, ("__kmpc_end_barrier_master: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001629
Jonathan Peyton30419822017-05-12 18:01:32 +00001630 __kmp_end_split_barrier(bs_plain_barrier, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001631}
1632
1633/*!
1634@ingroup SYNCHRONIZATION
1635@param loc source location information
1636@param global_tid thread id.
1637@return one if the thread should execute the master block, zero otherwise
1638
1639Start execution of a combined barrier and master(nowait) construct.
1640The barrier is executed inside this function.
1641There is no equivalent "end" function, since the
1642*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001643kmp_int32 __kmpc_barrier_master_nowait(ident_t *loc, kmp_int32 global_tid) {
1644 kmp_int32 ret;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001645
Jonathan Peyton30419822017-05-12 18:01:32 +00001646 KC_TRACE(10, ("__kmpc_barrier_master_nowait: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001647
Jonathan Peyton30419822017-05-12 18:01:32 +00001648 if (!TCR_4(__kmp_init_parallel))
1649 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001650
Jonathan Peyton30419822017-05-12 18:01:32 +00001651 if (__kmp_env_consistency_check) {
1652 if (loc == 0) {
1653 KMP_WARNING(ConstructIdentInvalid); // ??? What does it mean for the user?
Jim Cownie5e8470a2013-09-27 10:38:44 +00001654 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001655 __kmp_check_barrier(global_tid, ct_barrier, loc);
1656 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001657
Joachim Protze82e94a52017-11-01 10:08:30 +00001658#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00001659 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00001660 if (ompt_enabled.enabled) {
1661 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00001662 if (ompt_frame->enter_frame == NULL)
1663 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00001664 OMPT_STORE_RETURN_ADDRESS(global_tid);
1665 }
1666#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001667#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00001668 __kmp_threads[global_tid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001669#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001670 __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00001671#if OMPT_SUPPORT && OMPT_OPTIONAL
1672 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00001673 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00001674 }
1675#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001676
Jonathan Peyton30419822017-05-12 18:01:32 +00001677 ret = __kmpc_master(loc, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001678
Jonathan Peyton30419822017-05-12 18:01:32 +00001679 if (__kmp_env_consistency_check) {
1680 /* there's no __kmpc_end_master called; so the (stats) */
1681 /* actions of __kmpc_end_master are done here */
Jim Cownie5e8470a2013-09-27 10:38:44 +00001682
Jonathan Peyton30419822017-05-12 18:01:32 +00001683 if (global_tid < 0) {
1684 KMP_WARNING(ThreadIdentInvalid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001685 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001686 if (ret) {
1687 /* only one thread should do the pop since only */
1688 /* one did the push (see __kmpc_master()) */
Jim Cownie5e8470a2013-09-27 10:38:44 +00001689
Jonathan Peyton30419822017-05-12 18:01:32 +00001690 __kmp_pop_sync(global_tid, ct_master, loc);
1691 }
1692 }
1693
1694 return (ret);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001695}
1696
1697/* The BARRIER for a SINGLE process section is always explicit */
1698/*!
1699@ingroup WORK_SHARING
1700@param loc source location information
1701@param global_tid global thread number
1702@return One if this thread should execute the single construct, zero otherwise.
1703
1704Test whether to execute a <tt>single</tt> construct.
Jonathan Peyton30419822017-05-12 18:01:32 +00001705There are no implicit barriers in the two "single" calls, rather the compiler
1706should introduce an explicit barrier if it is required.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001707*/
1708
Jonathan Peyton30419822017-05-12 18:01:32 +00001709kmp_int32 __kmpc_single(ident_t *loc, kmp_int32 global_tid) {
1710 kmp_int32 rc = __kmp_enter_single(global_tid, loc, TRUE);
Jonathan Peyton30138252016-03-03 21:21:05 +00001711
Jonathan Peyton30419822017-05-12 18:01:32 +00001712 if (rc) {
1713 // We are going to execute the single statement, so we should count it.
1714 KMP_COUNT_BLOCK(OMP_SINGLE);
1715 KMP_PUSH_PARTITIONED_TIMER(OMP_single);
1716 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001717
Joachim Protze82e94a52017-11-01 10:08:30 +00001718#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonathan Peyton30419822017-05-12 18:01:32 +00001719 kmp_info_t *this_thr = __kmp_threads[global_tid];
1720 kmp_team_t *team = this_thr->th.th_team;
1721 int tid = __kmp_tid_from_gtid(global_tid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001722
Joachim Protze82e94a52017-11-01 10:08:30 +00001723 if (ompt_enabled.enabled) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001724 if (rc) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001725 if (ompt_enabled.ompt_callback_work) {
1726 ompt_callbacks.ompt_callback(ompt_callback_work)(
1727 ompt_work_single_executor, ompt_scope_begin,
1728 &(team->t.ompt_team_info.parallel_data),
1729 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
1730 1, OMPT_GET_RETURN_ADDRESS(0));
Jonathan Peyton30419822017-05-12 18:01:32 +00001731 }
1732 } else {
Joachim Protze82e94a52017-11-01 10:08:30 +00001733 if (ompt_enabled.ompt_callback_work) {
1734 ompt_callbacks.ompt_callback(ompt_callback_work)(
1735 ompt_work_single_other, ompt_scope_begin,
1736 &(team->t.ompt_team_info.parallel_data),
1737 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
1738 1, OMPT_GET_RETURN_ADDRESS(0));
1739 ompt_callbacks.ompt_callback(ompt_callback_work)(
1740 ompt_work_single_other, ompt_scope_end,
1741 &(team->t.ompt_team_info.parallel_data),
1742 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data),
1743 1, OMPT_GET_RETURN_ADDRESS(0));
Jonathan Peyton30419822017-05-12 18:01:32 +00001744 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001745 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001746 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001747#endif
1748
Jonathan Peyton30419822017-05-12 18:01:32 +00001749 return rc;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001750}
1751
1752/*!
1753@ingroup WORK_SHARING
1754@param loc source location information
1755@param global_tid global thread number
1756
1757Mark the end of a <tt>single</tt> construct. This function should
1758only be called by the thread that executed the block of code protected
1759by the `single` construct.
1760*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001761void __kmpc_end_single(ident_t *loc, kmp_int32 global_tid) {
1762 __kmp_exit_single(global_tid);
1763 KMP_POP_PARTITIONED_TIMER();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001764
Joachim Protze82e94a52017-11-01 10:08:30 +00001765#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonathan Peyton30419822017-05-12 18:01:32 +00001766 kmp_info_t *this_thr = __kmp_threads[global_tid];
1767 kmp_team_t *team = this_thr->th.th_team;
1768 int tid = __kmp_tid_from_gtid(global_tid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001769
Joachim Protze82e94a52017-11-01 10:08:30 +00001770 if (ompt_enabled.ompt_callback_work) {
1771 ompt_callbacks.ompt_callback(ompt_callback_work)(
1772 ompt_work_single_executor, ompt_scope_end,
1773 &(team->t.ompt_team_info.parallel_data),
1774 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), 1,
1775 OMPT_GET_RETURN_ADDRESS(0));
Jonathan Peyton30419822017-05-12 18:01:32 +00001776 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001777#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001778}
1779
1780/*!
1781@ingroup WORK_SHARING
1782@param loc Source location
1783@param global_tid Global thread id
1784
1785Mark the end of a statically scheduled loop.
1786*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001787void __kmpc_for_static_fini(ident_t *loc, kmp_int32 global_tid) {
Jonathan Peytonf0682ac2018-07-30 17:41:08 +00001788 KMP_POP_PARTITIONED_TIMER();
Jonathan Peyton30419822017-05-12 18:01:32 +00001789 KE_TRACE(10, ("__kmpc_for_static_fini called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001790
Joachim Protze82e94a52017-11-01 10:08:30 +00001791#if OMPT_SUPPORT && OMPT_OPTIONAL
1792 if (ompt_enabled.ompt_callback_work) {
Joachim Protze489cdb72018-09-10 14:34:54 +00001793 ompt_work_t ompt_work_type = ompt_work_loop;
Jonathan Peyton30419822017-05-12 18:01:32 +00001794 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00001795 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
1796 // Determine workshare type
1797 if (loc != NULL) {
1798 if ((loc->flags & KMP_IDENT_WORK_LOOP) != 0) {
1799 ompt_work_type = ompt_work_loop;
1800 } else if ((loc->flags & KMP_IDENT_WORK_SECTIONS) != 0) {
1801 ompt_work_type = ompt_work_sections;
1802 } else if ((loc->flags & KMP_IDENT_WORK_DISTRIBUTE) != 0) {
1803 ompt_work_type = ompt_work_distribute;
1804 } else {
Joachim Protze91732472017-11-10 21:07:01 +00001805 // use default set above.
1806 // a warning about this case is provided in __kmpc_for_static_init
Joachim Protze82e94a52017-11-01 10:08:30 +00001807 }
1808 KMP_DEBUG_ASSERT(ompt_work_type);
1809 }
1810 ompt_callbacks.ompt_callback(ompt_callback_work)(
1811 ompt_work_type, ompt_scope_end, &(team_info->parallel_data),
1812 &(task_info->task_data), 0, OMPT_GET_RETURN_ADDRESS(0));
Jonathan Peyton30419822017-05-12 18:01:32 +00001813 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001814#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001815 if (__kmp_env_consistency_check)
1816 __kmp_pop_workshare(global_tid, ct_pdo, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001817}
1818
Jonathan Peyton30419822017-05-12 18:01:32 +00001819// User routines which take C-style arguments (call by value)
1820// different from the Fortran equivalent routines
Jim Cownie5e8470a2013-09-27 10:38:44 +00001821
Jonathan Peyton30419822017-05-12 18:01:32 +00001822void ompc_set_num_threads(int arg) {
1823 // !!!!! TODO: check the per-task binding
1824 __kmp_set_num_threads(arg, __kmp_entry_gtid());
Jim Cownie5e8470a2013-09-27 10:38:44 +00001825}
1826
Jonathan Peyton30419822017-05-12 18:01:32 +00001827void ompc_set_dynamic(int flag) {
1828 kmp_info_t *thread;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001829
Jonathan Peyton30419822017-05-12 18:01:32 +00001830 /* For the thread-private implementation of the internal controls */
1831 thread = __kmp_entry_thread();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001832
Jonathan Peyton30419822017-05-12 18:01:32 +00001833 __kmp_save_internal_controls(thread);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001834
Jonathan Peyton30419822017-05-12 18:01:32 +00001835 set__dynamic(thread, flag ? TRUE : FALSE);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001836}
1837
Jonathan Peyton30419822017-05-12 18:01:32 +00001838void ompc_set_nested(int flag) {
1839 kmp_info_t *thread;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001840
Jonathan Peyton30419822017-05-12 18:01:32 +00001841 /* For the thread-private internal controls implementation */
1842 thread = __kmp_entry_thread();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001843
Jonathan Peyton30419822017-05-12 18:01:32 +00001844 __kmp_save_internal_controls(thread);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001845
Jonathan Peyton30419822017-05-12 18:01:32 +00001846 set__nested(thread, flag ? TRUE : FALSE);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001847}
1848
Jonathan Peyton30419822017-05-12 18:01:32 +00001849void ompc_set_max_active_levels(int max_active_levels) {
1850 /* TO DO */
1851 /* we want per-task implementation of this internal control */
Jim Cownie5e8470a2013-09-27 10:38:44 +00001852
Jonathan Peyton30419822017-05-12 18:01:32 +00001853 /* For the per-thread internal controls implementation */
1854 __kmp_set_max_active_levels(__kmp_entry_gtid(), max_active_levels);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001855}
1856
Jonathan Peyton30419822017-05-12 18:01:32 +00001857void ompc_set_schedule(omp_sched_t kind, int modifier) {
1858 // !!!!! TODO: check the per-task binding
1859 __kmp_set_schedule(__kmp_entry_gtid(), (kmp_sched_t)kind, modifier);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001860}
1861
Jonathan Peyton30419822017-05-12 18:01:32 +00001862int ompc_get_ancestor_thread_num(int level) {
1863 return __kmp_get_ancestor_thread_num(__kmp_entry_gtid(), level);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001864}
1865
Jonathan Peyton30419822017-05-12 18:01:32 +00001866int ompc_get_team_size(int level) {
1867 return __kmp_get_team_size(__kmp_entry_gtid(), level);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001868}
1869
Jonathan Peyton30419822017-05-12 18:01:32 +00001870void kmpc_set_stacksize(int arg) {
1871 // __kmp_aux_set_stacksize initializes the library if needed
1872 __kmp_aux_set_stacksize(arg);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001873}
1874
Jonathan Peyton30419822017-05-12 18:01:32 +00001875void kmpc_set_stacksize_s(size_t arg) {
1876 // __kmp_aux_set_stacksize initializes the library if needed
1877 __kmp_aux_set_stacksize(arg);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001878}
1879
Jonathan Peyton30419822017-05-12 18:01:32 +00001880void kmpc_set_blocktime(int arg) {
1881 int gtid, tid;
1882 kmp_info_t *thread;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001883
Jonathan Peyton30419822017-05-12 18:01:32 +00001884 gtid = __kmp_entry_gtid();
1885 tid = __kmp_tid_from_gtid(gtid);
1886 thread = __kmp_thread_from_gtid(gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001887
Jonathan Peyton30419822017-05-12 18:01:32 +00001888 __kmp_aux_set_blocktime(arg, thread, tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001889}
1890
Jonathan Peyton30419822017-05-12 18:01:32 +00001891void kmpc_set_library(int arg) {
1892 // __kmp_user_set_library initializes the library if needed
1893 __kmp_user_set_library((enum library_type)arg);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001894}
1895
Jonathan Peyton30419822017-05-12 18:01:32 +00001896void kmpc_set_defaults(char const *str) {
1897 // __kmp_aux_set_defaults initializes the library if needed
1898 __kmp_aux_set_defaults(str, KMP_STRLEN(str));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001899}
1900
Jonathan Peyton30419822017-05-12 18:01:32 +00001901void kmpc_set_disp_num_buffers(int arg) {
1902 // ignore after initialization because some teams have already
1903 // allocated dispatch buffers
1904 if (__kmp_init_serial == 0 && arg > 0)
1905 __kmp_dispatch_num_buffers = arg;
Jonathan Peyton067325f2016-05-31 19:01:15 +00001906}
1907
Jonathan Peyton30419822017-05-12 18:01:32 +00001908int kmpc_set_affinity_mask_proc(int proc, void **mask) {
Alp Toker98758b02014-03-02 04:12:06 +00001909#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
Jonathan Peyton30419822017-05-12 18:01:32 +00001910 return -1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001911#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001912 if (!TCR_4(__kmp_init_middle)) {
1913 __kmp_middle_initialize();
1914 }
1915 return __kmp_aux_set_affinity_mask_proc(proc, mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001916#endif
1917}
1918
Jonathan Peyton30419822017-05-12 18:01:32 +00001919int kmpc_unset_affinity_mask_proc(int proc, void **mask) {
Alp Toker98758b02014-03-02 04:12:06 +00001920#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
Jonathan Peyton30419822017-05-12 18:01:32 +00001921 return -1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001922#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001923 if (!TCR_4(__kmp_init_middle)) {
1924 __kmp_middle_initialize();
1925 }
1926 return __kmp_aux_unset_affinity_mask_proc(proc, mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001927#endif
1928}
1929
Jonathan Peyton30419822017-05-12 18:01:32 +00001930int kmpc_get_affinity_mask_proc(int proc, void **mask) {
Alp Toker98758b02014-03-02 04:12:06 +00001931#if defined(KMP_STUB) || !KMP_AFFINITY_SUPPORTED
Jonathan Peyton30419822017-05-12 18:01:32 +00001932 return -1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001933#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001934 if (!TCR_4(__kmp_init_middle)) {
1935 __kmp_middle_initialize();
1936 }
1937 return __kmp_aux_get_affinity_mask_proc(proc, mask);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001938#endif
1939}
1940
Jim Cownie5e8470a2013-09-27 10:38:44 +00001941/* -------------------------------------------------------------------------- */
1942/*!
1943@ingroup THREADPRIVATE
1944@param loc source location information
1945@param gtid global thread number
1946@param cpy_size size of the cpy_data buffer
1947@param cpy_data pointer to data to be copied
1948@param cpy_func helper function to call for copying data
1949@param didit flag variable: 1=single thread; 0=not single thread
1950
Jonathan Peyton30419822017-05-12 18:01:32 +00001951__kmpc_copyprivate implements the interface for the private data broadcast
1952needed for the copyprivate clause associated with a single region in an
1953OpenMP<sup>*</sup> program (both C and Fortran).
Jim Cownie5e8470a2013-09-27 10:38:44 +00001954All threads participating in the parallel region call this routine.
Jonathan Peyton30419822017-05-12 18:01:32 +00001955One of the threads (called the single thread) should have the <tt>didit</tt>
1956variable set to 1 and all other threads should have that variable set to 0.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001957All threads pass a pointer to a data buffer (cpy_data) that they have built.
1958
Jonathan Peyton30419822017-05-12 18:01:32 +00001959The OpenMP specification forbids the use of nowait on the single region when a
1960copyprivate clause is present. However, @ref __kmpc_copyprivate implements a
1961barrier internally to avoid race conditions, so the code generation for the
1962single region should avoid generating a barrier after the call to @ref
1963__kmpc_copyprivate.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001964
1965The <tt>gtid</tt> parameter is the global thread id for the current thread.
1966The <tt>loc</tt> parameter is a pointer to source location information.
1967
Jonathan Peyton30419822017-05-12 18:01:32 +00001968Internal implementation: The single thread will first copy its descriptor
1969address (cpy_data) to a team-private location, then the other threads will each
1970call the function pointed to by the parameter cpy_func, which carries out the
1971copy by copying the data using the cpy_data buffer.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001972
Jonathan Peyton30419822017-05-12 18:01:32 +00001973The cpy_func routine used for the copy and the contents of the data area defined
1974by cpy_data and cpy_size may be built in any fashion that will allow the copy
1975to be done. For instance, the cpy_data buffer can hold the actual data to be
1976copied or it may hold a list of pointers to the data. The cpy_func routine must
1977interpret the cpy_data buffer appropriately.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001978
1979The interface to cpy_func is as follows:
1980@code
1981void cpy_func( void *destination, void *source )
1982@endcode
1983where void *destination is the cpy_data pointer for the thread being copied to
1984and void *source is the cpy_data pointer for the thread being copied from.
1985*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001986void __kmpc_copyprivate(ident_t *loc, kmp_int32 gtid, size_t cpy_size,
1987 void *cpy_data, void (*cpy_func)(void *, void *),
1988 kmp_int32 didit) {
1989 void **data_ptr;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001990
Jonathan Peyton30419822017-05-12 18:01:32 +00001991 KC_TRACE(10, ("__kmpc_copyprivate: called T#%d\n", gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001992
Jonathan Peyton30419822017-05-12 18:01:32 +00001993 KMP_MB();
Jim Cownie5e8470a2013-09-27 10:38:44 +00001994
Jonathan Peyton30419822017-05-12 18:01:32 +00001995 data_ptr = &__kmp_team_from_gtid(gtid)->t.t_copypriv_data;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001996
Jonathan Peyton30419822017-05-12 18:01:32 +00001997 if (__kmp_env_consistency_check) {
1998 if (loc == 0) {
1999 KMP_WARNING(ConstructIdentInvalid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002000 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002001 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002002
Jonathan Peyton30419822017-05-12 18:01:32 +00002003 // ToDo: Optimize the following two barriers into some kind of split barrier
Jim Cownie5e8470a2013-09-27 10:38:44 +00002004
Jonathan Peyton30419822017-05-12 18:01:32 +00002005 if (didit)
2006 *data_ptr = cpy_data;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002007
Joachim Protze82e94a52017-11-01 10:08:30 +00002008#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00002009 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00002010 if (ompt_enabled.enabled) {
2011 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00002012 if (ompt_frame->enter_frame == NULL)
2013 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00002014 OMPT_STORE_RETURN_ADDRESS(gtid);
2015 }
2016#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002017/* This barrier is not a barrier region boundary */
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002018#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00002019 __kmp_threads[gtid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002020#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002021 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002022
Jonathan Peyton30419822017-05-12 18:01:32 +00002023 if (!didit)
2024 (*cpy_func)(cpy_data, *data_ptr);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002025
Jonathan Peyton30419822017-05-12 18:01:32 +00002026// Consider next barrier a user-visible barrier for barrier region boundaries
2027// Nesting checks are already handled by the single construct checks
Jim Cownie5e8470a2013-09-27 10:38:44 +00002028
Joachim Protze82e94a52017-11-01 10:08:30 +00002029#if OMPT_SUPPORT
2030 if (ompt_enabled.enabled) {
2031 OMPT_STORE_RETURN_ADDRESS(gtid);
2032 }
2033#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002034#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00002035 __kmp_threads[gtid]->th.th_ident = loc; // TODO: check if it is needed (e.g.
2036// tasks can overwrite the location)
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002037#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002038 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00002039#if OMPT_SUPPORT && OMPT_OPTIONAL
2040 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00002041 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00002042 }
2043#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00002044}
2045
2046/* -------------------------------------------------------------------------- */
2047
Jonathan Peyton30419822017-05-12 18:01:32 +00002048#define INIT_LOCK __kmp_init_user_lock_with_checks
2049#define INIT_NESTED_LOCK __kmp_init_nested_user_lock_with_checks
2050#define ACQUIRE_LOCK __kmp_acquire_user_lock_with_checks
2051#define ACQUIRE_LOCK_TIMED __kmp_acquire_user_lock_with_checks_timed
2052#define ACQUIRE_NESTED_LOCK __kmp_acquire_nested_user_lock_with_checks
2053#define ACQUIRE_NESTED_LOCK_TIMED \
2054 __kmp_acquire_nested_user_lock_with_checks_timed
2055#define RELEASE_LOCK __kmp_release_user_lock_with_checks
2056#define RELEASE_NESTED_LOCK __kmp_release_nested_user_lock_with_checks
2057#define TEST_LOCK __kmp_test_user_lock_with_checks
2058#define TEST_NESTED_LOCK __kmp_test_nested_user_lock_with_checks
2059#define DESTROY_LOCK __kmp_destroy_user_lock_with_checks
2060#define DESTROY_NESTED_LOCK __kmp_destroy_nested_user_lock_with_checks
Jim Cownie5e8470a2013-09-27 10:38:44 +00002061
Jonathan Peyton30419822017-05-12 18:01:32 +00002062// TODO: Make check abort messages use location info & pass it into
2063// with_checks routines
Jim Cownie5e8470a2013-09-27 10:38:44 +00002064
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002065#if KMP_USE_DYNAMIC_LOCK
2066
2067// internal lock initializer
Jonathan Peyton30419822017-05-12 18:01:32 +00002068static __forceinline void __kmp_init_lock_with_hint(ident_t *loc, void **lock,
2069 kmp_dyna_lockseq_t seq) {
2070 if (KMP_IS_D_LOCK(seq)) {
2071 KMP_INIT_D_LOCK(lock, seq);
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002072#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002073 __kmp_itt_lock_creating((kmp_user_lock_p)lock, NULL);
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002074#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002075 } else {
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002076 KMP_INIT_I_LOCK(lock, seq);
2077#if USE_ITT_BUILD
2078 kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
2079 __kmp_itt_lock_creating(ilk->lock, loc);
2080#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002081 }
2082}
2083
2084// internal nest lock initializer
2085static __forceinline void
2086__kmp_init_nest_lock_with_hint(ident_t *loc, void **lock,
2087 kmp_dyna_lockseq_t seq) {
2088#if KMP_USE_TSX
2089 // Don't have nested lock implementation for speculative locks
2090 if (seq == lockseq_hle || seq == lockseq_rtm || seq == lockseq_adaptive)
2091 seq = __kmp_user_lock_seq;
2092#endif
2093 switch (seq) {
2094 case lockseq_tas:
2095 seq = lockseq_nested_tas;
2096 break;
2097#if KMP_USE_FUTEX
2098 case lockseq_futex:
2099 seq = lockseq_nested_futex;
2100 break;
2101#endif
2102 case lockseq_ticket:
2103 seq = lockseq_nested_ticket;
2104 break;
2105 case lockseq_queuing:
2106 seq = lockseq_nested_queuing;
2107 break;
2108 case lockseq_drdpa:
2109 seq = lockseq_nested_drdpa;
2110 break;
2111 default:
2112 seq = lockseq_nested_queuing;
2113 }
2114 KMP_INIT_I_LOCK(lock, seq);
2115#if USE_ITT_BUILD
2116 kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(lock);
2117 __kmp_itt_lock_creating(ilk->lock, loc);
2118#endif
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002119}
2120
2121/* initialize the lock with a hint */
Jonathan Peyton30419822017-05-12 18:01:32 +00002122void __kmpc_init_lock_with_hint(ident_t *loc, kmp_int32 gtid, void **user_lock,
2123 uintptr_t hint) {
2124 KMP_DEBUG_ASSERT(__kmp_init_serial);
2125 if (__kmp_env_consistency_check && user_lock == NULL) {
2126 KMP_FATAL(LockIsUninitialized, "omp_init_lock_with_hint");
2127 }
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002128
Jonathan Peyton30419822017-05-12 18:01:32 +00002129 __kmp_init_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint));
Joachim Protze82e94a52017-11-01 10:08:30 +00002130
2131#if OMPT_SUPPORT && OMPT_OPTIONAL
2132 // This is the case, if called from omp_init_lock_with_hint:
2133 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2134 if (!codeptr)
2135 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2136 if (ompt_enabled.ompt_callback_lock_init) {
2137 ompt_callbacks.ompt_callback(ompt_callback_lock_init)(
2138 ompt_mutex_lock, (omp_lock_hint_t)hint,
Joachim Protze40636132018-05-28 08:16:08 +00002139 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002140 codeptr);
2141 }
2142#endif
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002143}
2144
2145/* initialize the lock with a hint */
Jonathan Peyton30419822017-05-12 18:01:32 +00002146void __kmpc_init_nest_lock_with_hint(ident_t *loc, kmp_int32 gtid,
2147 void **user_lock, uintptr_t hint) {
2148 KMP_DEBUG_ASSERT(__kmp_init_serial);
2149 if (__kmp_env_consistency_check && user_lock == NULL) {
2150 KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock_with_hint");
2151 }
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002152
Jonathan Peyton30419822017-05-12 18:01:32 +00002153 __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_map_hint_to_lock(hint));
Joachim Protze82e94a52017-11-01 10:08:30 +00002154
2155#if OMPT_SUPPORT && OMPT_OPTIONAL
2156 // This is the case, if called from omp_init_lock_with_hint:
2157 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2158 if (!codeptr)
2159 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2160 if (ompt_enabled.ompt_callback_lock_init) {
2161 ompt_callbacks.ompt_callback(ompt_callback_lock_init)(
2162 ompt_mutex_nest_lock, (omp_lock_hint_t)hint,
Joachim Protze40636132018-05-28 08:16:08 +00002163 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002164 codeptr);
2165 }
2166#endif
Jonathan Peytonb87b5812015-12-11 22:04:05 +00002167}
2168
2169#endif // KMP_USE_DYNAMIC_LOCK
2170
Jim Cownie5e8470a2013-09-27 10:38:44 +00002171/* initialize the lock */
Jonathan Peyton30419822017-05-12 18:01:32 +00002172void __kmpc_init_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002173#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00002174
2175 KMP_DEBUG_ASSERT(__kmp_init_serial);
2176 if (__kmp_env_consistency_check && user_lock == NULL) {
2177 KMP_FATAL(LockIsUninitialized, "omp_init_lock");
2178 }
2179 __kmp_init_lock_with_hint(loc, user_lock, __kmp_user_lock_seq);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002180
Joachim Protze82e94a52017-11-01 10:08:30 +00002181#if OMPT_SUPPORT && OMPT_OPTIONAL
2182 // This is the case, if called from omp_init_lock_with_hint:
2183 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2184 if (!codeptr)
2185 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2186 if (ompt_enabled.ompt_callback_lock_init) {
2187 ompt_callbacks.ompt_callback(ompt_callback_lock_init)(
2188 ompt_mutex_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00002189 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002190 codeptr);
2191 }
2192#endif
2193
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002194#else // KMP_USE_DYNAMIC_LOCK
2195
Jonathan Peyton30419822017-05-12 18:01:32 +00002196 static char const *const func = "omp_init_lock";
2197 kmp_user_lock_p lck;
2198 KMP_DEBUG_ASSERT(__kmp_init_serial);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002199
Jonathan Peyton30419822017-05-12 18:01:32 +00002200 if (__kmp_env_consistency_check) {
2201 if (user_lock == NULL) {
2202 KMP_FATAL(LockIsUninitialized, func);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002203 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002204 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002205
Jonathan Peyton30419822017-05-12 18:01:32 +00002206 KMP_CHECK_USER_LOCK_INIT();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002207
Jonathan Peyton30419822017-05-12 18:01:32 +00002208 if ((__kmp_user_lock_kind == lk_tas) &&
2209 (sizeof(lck->tas.lk.poll) <= OMP_LOCK_T_SIZE)) {
2210 lck = (kmp_user_lock_p)user_lock;
2211 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002212#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002213 else if ((__kmp_user_lock_kind == lk_futex) &&
2214 (sizeof(lck->futex.lk.poll) <= OMP_LOCK_T_SIZE)) {
2215 lck = (kmp_user_lock_p)user_lock;
2216 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002217#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002218 else {
2219 lck = __kmp_user_lock_allocate(user_lock, gtid, 0);
2220 }
2221 INIT_LOCK(lck);
2222 __kmp_set_user_lock_location(lck, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002223
Joachim Protze82e94a52017-11-01 10:08:30 +00002224#if OMPT_SUPPORT && OMPT_OPTIONAL
2225 // This is the case, if called from omp_init_lock_with_hint:
2226 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2227 if (!codeptr)
2228 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2229 if (ompt_enabled.ompt_callback_lock_init) {
2230 ompt_callbacks.ompt_callback(ompt_callback_lock_init)(
2231 ompt_mutex_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(),
Joachim Protze40636132018-05-28 08:16:08 +00002232 (omp_wait_id_t)user_lock, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002233 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002234#endif
2235
Jim Cownie5e8470a2013-09-27 10:38:44 +00002236#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002237 __kmp_itt_lock_creating(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002238#endif /* USE_ITT_BUILD */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002239
2240#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002241} // __kmpc_init_lock
2242
2243/* initialize the lock */
Jonathan Peyton30419822017-05-12 18:01:32 +00002244void __kmpc_init_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002245#if KMP_USE_DYNAMIC_LOCK
2246
Jonathan Peyton30419822017-05-12 18:01:32 +00002247 KMP_DEBUG_ASSERT(__kmp_init_serial);
2248 if (__kmp_env_consistency_check && user_lock == NULL) {
2249 KMP_FATAL(LockIsUninitialized, "omp_init_nest_lock");
2250 }
2251 __kmp_init_nest_lock_with_hint(loc, user_lock, __kmp_user_lock_seq);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002252
Joachim Protze82e94a52017-11-01 10:08:30 +00002253#if OMPT_SUPPORT && OMPT_OPTIONAL
2254 // This is the case, if called from omp_init_lock_with_hint:
2255 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2256 if (!codeptr)
2257 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2258 if (ompt_enabled.ompt_callback_lock_init) {
2259 ompt_callbacks.ompt_callback(ompt_callback_lock_init)(
2260 ompt_mutex_nest_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00002261 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002262 codeptr);
2263 }
2264#endif
2265
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002266#else // KMP_USE_DYNAMIC_LOCK
2267
Jonathan Peyton30419822017-05-12 18:01:32 +00002268 static char const *const func = "omp_init_nest_lock";
2269 kmp_user_lock_p lck;
2270 KMP_DEBUG_ASSERT(__kmp_init_serial);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002271
Jonathan Peyton30419822017-05-12 18:01:32 +00002272 if (__kmp_env_consistency_check) {
2273 if (user_lock == NULL) {
2274 KMP_FATAL(LockIsUninitialized, func);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002275 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002276 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002277
Jonathan Peyton30419822017-05-12 18:01:32 +00002278 KMP_CHECK_USER_LOCK_INIT();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002279
Jonathan Peyton30419822017-05-12 18:01:32 +00002280 if ((__kmp_user_lock_kind == lk_tas) &&
2281 (sizeof(lck->tas.lk.poll) + sizeof(lck->tas.lk.depth_locked) <=
2282 OMP_NEST_LOCK_T_SIZE)) {
2283 lck = (kmp_user_lock_p)user_lock;
2284 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002285#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002286 else if ((__kmp_user_lock_kind == lk_futex) &&
2287 (sizeof(lck->futex.lk.poll) + sizeof(lck->futex.lk.depth_locked) <=
2288 OMP_NEST_LOCK_T_SIZE)) {
2289 lck = (kmp_user_lock_p)user_lock;
2290 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002291#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002292 else {
2293 lck = __kmp_user_lock_allocate(user_lock, gtid, 0);
2294 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002295
Jonathan Peyton30419822017-05-12 18:01:32 +00002296 INIT_NESTED_LOCK(lck);
2297 __kmp_set_user_lock_location(lck, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002298
Joachim Protze82e94a52017-11-01 10:08:30 +00002299#if OMPT_SUPPORT && OMPT_OPTIONAL
2300 // This is the case, if called from omp_init_lock_with_hint:
2301 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2302 if (!codeptr)
2303 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2304 if (ompt_enabled.ompt_callback_lock_init) {
2305 ompt_callbacks.ompt_callback(ompt_callback_lock_init)(
2306 ompt_mutex_nest_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(),
Joachim Protze40636132018-05-28 08:16:08 +00002307 (omp_wait_id_t)user_lock, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002308 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002309#endif
2310
Jim Cownie5e8470a2013-09-27 10:38:44 +00002311#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002312 __kmp_itt_lock_creating(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002313#endif /* USE_ITT_BUILD */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002314
2315#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002316} // __kmpc_init_nest_lock
2317
Jonathan Peyton30419822017-05-12 18:01:32 +00002318void __kmpc_destroy_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002319#if KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002320
Jonathan Peyton30419822017-05-12 18:01:32 +00002321#if USE_ITT_BUILD
2322 kmp_user_lock_p lck;
2323 if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
2324 lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
2325 } else {
2326 lck = (kmp_user_lock_p)user_lock;
2327 }
2328 __kmp_itt_lock_destroyed(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002329#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002330#if OMPT_SUPPORT && OMPT_OPTIONAL
2331 // This is the case, if called from omp_init_lock_with_hint:
2332 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2333 if (!codeptr)
2334 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2335 if (ompt_enabled.ompt_callback_lock_destroy) {
2336 kmp_user_lock_p lck;
2337 if (KMP_EXTRACT_D_TAG(user_lock) == 0) {
2338 lck = ((kmp_indirect_lock_t *)KMP_LOOKUP_I_LOCK(user_lock))->lock;
2339 } else {
2340 lck = (kmp_user_lock_p)user_lock;
2341 }
2342 ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)(
Joachim Protze40636132018-05-28 08:16:08 +00002343 ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002344 }
2345#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002346 KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
2347#else
2348 kmp_user_lock_p lck;
2349
2350 if ((__kmp_user_lock_kind == lk_tas) &&
2351 (sizeof(lck->tas.lk.poll) <= OMP_LOCK_T_SIZE)) {
2352 lck = (kmp_user_lock_p)user_lock;
2353 }
2354#if KMP_USE_FUTEX
2355 else if ((__kmp_user_lock_kind == lk_futex) &&
2356 (sizeof(lck->futex.lk.poll) <= OMP_LOCK_T_SIZE)) {
2357 lck = (kmp_user_lock_p)user_lock;
2358 }
2359#endif
2360 else {
2361 lck = __kmp_lookup_user_lock(user_lock, "omp_destroy_lock");
2362 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002363
Joachim Protze82e94a52017-11-01 10:08:30 +00002364#if OMPT_SUPPORT && OMPT_OPTIONAL
2365 // This is the case, if called from omp_init_lock_with_hint:
2366 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2367 if (!codeptr)
2368 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2369 if (ompt_enabled.ompt_callback_lock_destroy) {
2370 ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)(
Joachim Protze40636132018-05-28 08:16:08 +00002371 ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002372 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002373#endif
2374
Jim Cownie5e8470a2013-09-27 10:38:44 +00002375#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002376 __kmp_itt_lock_destroyed(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002377#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00002378 DESTROY_LOCK(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002379
Jonathan Peyton30419822017-05-12 18:01:32 +00002380 if ((__kmp_user_lock_kind == lk_tas) &&
2381 (sizeof(lck->tas.lk.poll) <= OMP_LOCK_T_SIZE)) {
2382 ;
2383 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002384#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002385 else if ((__kmp_user_lock_kind == lk_futex) &&
2386 (sizeof(lck->futex.lk.poll) <= OMP_LOCK_T_SIZE)) {
2387 ;
2388 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002389#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002390 else {
2391 __kmp_user_lock_free(user_lock, gtid, lck);
2392 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002393#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002394} // __kmpc_destroy_lock
2395
2396/* destroy the lock */
Jonathan Peyton30419822017-05-12 18:01:32 +00002397void __kmpc_destroy_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002398#if KMP_USE_DYNAMIC_LOCK
2399
Jonathan Peyton30419822017-05-12 18:01:32 +00002400#if USE_ITT_BUILD
2401 kmp_indirect_lock_t *ilk = KMP_LOOKUP_I_LOCK(user_lock);
2402 __kmp_itt_lock_destroyed(ilk->lock);
2403#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002404#if OMPT_SUPPORT && OMPT_OPTIONAL
2405 // This is the case, if called from omp_init_lock_with_hint:
2406 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2407 if (!codeptr)
2408 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2409 if (ompt_enabled.ompt_callback_lock_destroy) {
2410 ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)(
Joachim Protze40636132018-05-28 08:16:08 +00002411 ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002412 }
2413#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002414 KMP_D_LOCK_FUNC(user_lock, destroy)((kmp_dyna_lock_t *)user_lock);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002415
2416#else // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002417
Jonathan Peyton30419822017-05-12 18:01:32 +00002418 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002419
Jonathan Peyton30419822017-05-12 18:01:32 +00002420 if ((__kmp_user_lock_kind == lk_tas) &&
2421 (sizeof(lck->tas.lk.poll) + sizeof(lck->tas.lk.depth_locked) <=
2422 OMP_NEST_LOCK_T_SIZE)) {
2423 lck = (kmp_user_lock_p)user_lock;
2424 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002425#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002426 else if ((__kmp_user_lock_kind == lk_futex) &&
2427 (sizeof(lck->futex.lk.poll) + sizeof(lck->futex.lk.depth_locked) <=
2428 OMP_NEST_LOCK_T_SIZE)) {
2429 lck = (kmp_user_lock_p)user_lock;
2430 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002431#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002432 else {
2433 lck = __kmp_lookup_user_lock(user_lock, "omp_destroy_nest_lock");
2434 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002435
Joachim Protze82e94a52017-11-01 10:08:30 +00002436#if OMPT_SUPPORT && OMPT_OPTIONAL
2437 // This is the case, if called from omp_init_lock_with_hint:
2438 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2439 if (!codeptr)
2440 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2441 if (ompt_enabled.ompt_callback_lock_destroy) {
2442 ompt_callbacks.ompt_callback(ompt_callback_lock_destroy)(
Joachim Protze40636132018-05-28 08:16:08 +00002443 ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002444 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002445#endif
2446
Jim Cownie5e8470a2013-09-27 10:38:44 +00002447#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002448 __kmp_itt_lock_destroyed(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002449#endif /* USE_ITT_BUILD */
2450
Jonathan Peyton30419822017-05-12 18:01:32 +00002451 DESTROY_NESTED_LOCK(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002452
Jonathan Peyton30419822017-05-12 18:01:32 +00002453 if ((__kmp_user_lock_kind == lk_tas) &&
2454 (sizeof(lck->tas.lk.poll) + sizeof(lck->tas.lk.depth_locked) <=
2455 OMP_NEST_LOCK_T_SIZE)) {
2456 ;
2457 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002458#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002459 else if ((__kmp_user_lock_kind == lk_futex) &&
2460 (sizeof(lck->futex.lk.poll) + sizeof(lck->futex.lk.depth_locked) <=
2461 OMP_NEST_LOCK_T_SIZE)) {
2462 ;
2463 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002464#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002465 else {
2466 __kmp_user_lock_free(user_lock, gtid, lck);
2467 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002468#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002469} // __kmpc_destroy_nest_lock
2470
Jonathan Peyton30419822017-05-12 18:01:32 +00002471void __kmpc_set_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
2472 KMP_COUNT_BLOCK(OMP_set_lock);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002473#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00002474 int tag = KMP_EXTRACT_D_TAG(user_lock);
2475#if USE_ITT_BUILD
2476 __kmp_itt_lock_acquiring(
2477 (kmp_user_lock_p)
2478 user_lock); // itt function will get to the right lock object.
2479#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002480#if OMPT_SUPPORT && OMPT_OPTIONAL
2481 // This is the case, if called from omp_init_lock_with_hint:
2482 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2483 if (!codeptr)
2484 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2485 if (ompt_enabled.ompt_callback_mutex_acquire) {
2486 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
2487 ompt_mutex_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00002488 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002489 codeptr);
2490 }
2491#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002492#if KMP_USE_INLINED_TAS
2493 if (tag == locktag_tas && !__kmp_env_consistency_check) {
2494 KMP_ACQUIRE_TAS_LOCK(user_lock, gtid);
2495 } else
2496#elif KMP_USE_INLINED_FUTEX
2497 if (tag == locktag_futex && !__kmp_env_consistency_check) {
2498 KMP_ACQUIRE_FUTEX_LOCK(user_lock, gtid);
2499 } else
2500#endif
2501 {
2502 __kmp_direct_set[tag]((kmp_dyna_lock_t *)user_lock, gtid);
2503 }
2504#if USE_ITT_BUILD
2505 __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
2506#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002507#if OMPT_SUPPORT && OMPT_OPTIONAL
2508 if (ompt_enabled.ompt_callback_mutex_acquired) {
2509 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00002510 ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002511 }
2512#endif
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002513
2514#else // KMP_USE_DYNAMIC_LOCK
2515
Jonathan Peyton30419822017-05-12 18:01:32 +00002516 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002517
Jonathan Peyton30419822017-05-12 18:01:32 +00002518 if ((__kmp_user_lock_kind == lk_tas) &&
2519 (sizeof(lck->tas.lk.poll) <= OMP_LOCK_T_SIZE)) {
2520 lck = (kmp_user_lock_p)user_lock;
2521 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002522#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002523 else if ((__kmp_user_lock_kind == lk_futex) &&
2524 (sizeof(lck->futex.lk.poll) <= OMP_LOCK_T_SIZE)) {
2525 lck = (kmp_user_lock_p)user_lock;
2526 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002527#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002528 else {
2529 lck = __kmp_lookup_user_lock(user_lock, "omp_set_lock");
2530 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002531
2532#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002533 __kmp_itt_lock_acquiring(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002534#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00002535#if OMPT_SUPPORT && OMPT_OPTIONAL
2536 // This is the case, if called from omp_init_lock_with_hint:
2537 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2538 if (!codeptr)
2539 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2540 if (ompt_enabled.ompt_callback_mutex_acquire) {
2541 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
2542 ompt_mutex_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(),
Joachim Protze40636132018-05-28 08:16:08 +00002543 (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002544 }
2545#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00002546
Jonathan Peyton30419822017-05-12 18:01:32 +00002547 ACQUIRE_LOCK(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002548
2549#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002550 __kmp_itt_lock_acquired(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002551#endif /* USE_ITT_BUILD */
Jim Cownie5e8470a2013-09-27 10:38:44 +00002552
Joachim Protze82e94a52017-11-01 10:08:30 +00002553#if OMPT_SUPPORT && OMPT_OPTIONAL
2554 if (ompt_enabled.ompt_callback_mutex_acquired) {
2555 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00002556 ompt_mutex_lock, (omp_wait_id_t)lck, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002557 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002558#endif
2559
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002560#endif // KMP_USE_DYNAMIC_LOCK
2561}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002562
Jonathan Peyton30419822017-05-12 18:01:32 +00002563void __kmpc_set_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002564#if KMP_USE_DYNAMIC_LOCK
2565
Jonathan Peyton30419822017-05-12 18:01:32 +00002566#if USE_ITT_BUILD
2567 __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
2568#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002569#if OMPT_SUPPORT && OMPT_OPTIONAL
2570 // This is the case, if called from omp_init_lock_with_hint:
2571 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2572 if (!codeptr)
2573 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2574 if (ompt_enabled.enabled) {
2575 if (ompt_enabled.ompt_callback_mutex_acquire) {
2576 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
2577 ompt_mutex_nest_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00002578 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002579 codeptr);
2580 }
2581 }
2582#endif
2583 int acquire_status =
2584 KMP_D_LOCK_FUNC(user_lock, set)((kmp_dyna_lock_t *)user_lock, gtid);
Gheorghe-Teodor Bercea15f54072018-08-27 19:54:26 +00002585 (void) acquire_status;
Jonathan Peyton30419822017-05-12 18:01:32 +00002586#if USE_ITT_BUILD
2587 __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002588#endif
2589
Joachim Protze82e94a52017-11-01 10:08:30 +00002590#if OMPT_SUPPORT && OMPT_OPTIONAL
2591 if (ompt_enabled.enabled) {
2592 if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) {
2593 if (ompt_enabled.ompt_callback_mutex_acquired) {
2594 // lock_first
2595 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00002596 ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002597 }
2598 } else {
2599 if (ompt_enabled.ompt_callback_nest_lock) {
2600 // lock_next
2601 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00002602 ompt_scope_begin, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002603 }
2604 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002605 }
Jonathan Peyton2c295c42015-12-23 02:34:03 +00002606#endif
2607
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002608#else // KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00002609 int acquire_status;
2610 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002611
Jonathan Peyton30419822017-05-12 18:01:32 +00002612 if ((__kmp_user_lock_kind == lk_tas) &&
2613 (sizeof(lck->tas.lk.poll) + sizeof(lck->tas.lk.depth_locked) <=
2614 OMP_NEST_LOCK_T_SIZE)) {
2615 lck = (kmp_user_lock_p)user_lock;
2616 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002617#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002618 else if ((__kmp_user_lock_kind == lk_futex) &&
2619 (sizeof(lck->futex.lk.poll) + sizeof(lck->futex.lk.depth_locked) <=
2620 OMP_NEST_LOCK_T_SIZE)) {
2621 lck = (kmp_user_lock_p)user_lock;
2622 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002623#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002624 else {
2625 lck = __kmp_lookup_user_lock(user_lock, "omp_set_nest_lock");
2626 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002627
2628#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002629 __kmp_itt_lock_acquiring(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002630#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00002631#if OMPT_SUPPORT && OMPT_OPTIONAL
2632 // This is the case, if called from omp_init_lock_with_hint:
2633 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2634 if (!codeptr)
2635 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2636 if (ompt_enabled.enabled) {
2637 if (ompt_enabled.ompt_callback_mutex_acquire) {
2638 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
2639 ompt_mutex_nest_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00002640 __ompt_get_mutex_impl_type(), (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002641 }
2642 }
2643#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00002644
Jonathan Peyton30419822017-05-12 18:01:32 +00002645 ACQUIRE_NESTED_LOCK(lck, gtid, &acquire_status);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002646
2647#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002648 __kmp_itt_lock_acquired(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002649#endif /* USE_ITT_BUILD */
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002650
Joachim Protze82e94a52017-11-01 10:08:30 +00002651#if OMPT_SUPPORT && OMPT_OPTIONAL
2652 if (ompt_enabled.enabled) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002653 if (acquire_status == KMP_LOCK_ACQUIRED_FIRST) {
Joachim Protze82e94a52017-11-01 10:08:30 +00002654 if (ompt_enabled.ompt_callback_mutex_acquired) {
2655 // lock_first
2656 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00002657 ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002658 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002659 } else {
Joachim Protze82e94a52017-11-01 10:08:30 +00002660 if (ompt_enabled.ompt_callback_nest_lock) {
2661 // lock_next
2662 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00002663 ompt_scope_begin, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002664 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002665 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002666 }
Jonathan Peyton0e6d4572015-10-16 16:52:58 +00002667#endif
Jonathan Peyton2c295c42015-12-23 02:34:03 +00002668
2669#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002670}
2671
Jonathan Peyton30419822017-05-12 18:01:32 +00002672void __kmpc_unset_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002673#if KMP_USE_DYNAMIC_LOCK
2674
Jonathan Peyton30419822017-05-12 18:01:32 +00002675 int tag = KMP_EXTRACT_D_TAG(user_lock);
2676#if USE_ITT_BUILD
2677 __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
2678#endif
2679#if KMP_USE_INLINED_TAS
2680 if (tag == locktag_tas && !__kmp_env_consistency_check) {
2681 KMP_RELEASE_TAS_LOCK(user_lock, gtid);
2682 } else
2683#elif KMP_USE_INLINED_FUTEX
2684 if (tag == locktag_futex && !__kmp_env_consistency_check) {
2685 KMP_RELEASE_FUTEX_LOCK(user_lock, gtid);
2686 } else
2687#endif
2688 {
2689 __kmp_direct_unset[tag]((kmp_dyna_lock_t *)user_lock, gtid);
2690 }
2691
Joachim Protze82e94a52017-11-01 10:08:30 +00002692#if OMPT_SUPPORT && OMPT_OPTIONAL
2693 // This is the case, if called from omp_init_lock_with_hint:
2694 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2695 if (!codeptr)
2696 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2697 if (ompt_enabled.ompt_callback_mutex_released) {
2698 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00002699 ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002700 }
2701#endif
2702
Jonathan Peyton30419822017-05-12 18:01:32 +00002703#else // KMP_USE_DYNAMIC_LOCK
2704
2705 kmp_user_lock_p lck;
2706
2707 /* Can't use serial interval since not block structured */
2708 /* release the lock */
2709
2710 if ((__kmp_user_lock_kind == lk_tas) &&
2711 (sizeof(lck->tas.lk.poll) <= OMP_LOCK_T_SIZE)) {
2712#if KMP_OS_LINUX && \
2713 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
2714// "fast" path implemented to fix customer performance issue
2715#if USE_ITT_BUILD
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002716 __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002717#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00002718 TCW_4(((kmp_user_lock_p)user_lock)->tas.lk.poll, 0);
2719 KMP_MB();
Joachim Protze82e94a52017-11-01 10:08:30 +00002720
2721#if OMPT_SUPPORT && OMPT_OPTIONAL
2722 // This is the case, if called from omp_init_lock_with_hint:
2723 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2724 if (!codeptr)
2725 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2726 if (ompt_enabled.ompt_callback_mutex_released) {
2727 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00002728 ompt_mutex_lock, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002729 }
2730#endif
2731
Jonathan Peyton30419822017-05-12 18:01:32 +00002732 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002733#else
Jonathan Peyton30419822017-05-12 18:01:32 +00002734 lck = (kmp_user_lock_p)user_lock;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002735#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002736 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002737#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002738 else if ((__kmp_user_lock_kind == lk_futex) &&
2739 (sizeof(lck->futex.lk.poll) <= OMP_LOCK_T_SIZE)) {
2740 lck = (kmp_user_lock_p)user_lock;
2741 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002742#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002743 else {
2744 lck = __kmp_lookup_user_lock(user_lock, "omp_unset_lock");
2745 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002746
2747#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002748 __kmp_itt_lock_releasing(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002749#endif /* USE_ITT_BUILD */
2750
Jonathan Peyton30419822017-05-12 18:01:32 +00002751 RELEASE_LOCK(lck, gtid);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002752
Joachim Protze82e94a52017-11-01 10:08:30 +00002753#if OMPT_SUPPORT && OMPT_OPTIONAL
2754 // This is the case, if called from omp_init_lock_with_hint:
2755 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2756 if (!codeptr)
2757 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2758 if (ompt_enabled.ompt_callback_mutex_released) {
2759 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00002760 ompt_mutex_lock, (omp_wait_id_t)lck, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002761 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00002762#endif
2763
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002764#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002765}
2766
2767/* release the lock */
Jonathan Peyton30419822017-05-12 18:01:32 +00002768void __kmpc_unset_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002769#if KMP_USE_DYNAMIC_LOCK
2770
Jonathan Peyton30419822017-05-12 18:01:32 +00002771#if USE_ITT_BUILD
2772 __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
2773#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002774 int release_status =
2775 KMP_D_LOCK_FUNC(user_lock, unset)((kmp_dyna_lock_t *)user_lock, gtid);
Gheorghe-Teodor Bercea15f54072018-08-27 19:54:26 +00002776 (void) release_status;
Joachim Protze82e94a52017-11-01 10:08:30 +00002777
2778#if OMPT_SUPPORT && OMPT_OPTIONAL
2779 // This is the case, if called from omp_init_lock_with_hint:
2780 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2781 if (!codeptr)
2782 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2783 if (ompt_enabled.enabled) {
2784 if (release_status == KMP_LOCK_RELEASED) {
2785 if (ompt_enabled.ompt_callback_mutex_released) {
2786 // release_lock_last
2787 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00002788 ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002789 }
2790 } else if (ompt_enabled.ompt_callback_nest_lock) {
2791 // release_lock_prev
2792 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00002793 ompt_scope_end, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002794 }
2795 }
2796#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002797
2798#else // KMP_USE_DYNAMIC_LOCK
2799
2800 kmp_user_lock_p lck;
2801
2802 /* Can't use serial interval since not block structured */
2803
2804 if ((__kmp_user_lock_kind == lk_tas) &&
2805 (sizeof(lck->tas.lk.poll) + sizeof(lck->tas.lk.depth_locked) <=
2806 OMP_NEST_LOCK_T_SIZE)) {
2807#if KMP_OS_LINUX && \
2808 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
2809 // "fast" path implemented to fix customer performance issue
2810 kmp_tas_lock_t *tl = (kmp_tas_lock_t *)user_lock;
2811#if USE_ITT_BUILD
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002812 __kmp_itt_lock_releasing((kmp_user_lock_p)user_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002813#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00002814
2815#if OMPT_SUPPORT && OMPT_OPTIONAL
2816 int release_status = KMP_LOCK_STILL_HELD;
2817#endif
2818
Jonathan Peyton30419822017-05-12 18:01:32 +00002819 if (--(tl->lk.depth_locked) == 0) {
2820 TCW_4(tl->lk.poll, 0);
Joachim Protze82e94a52017-11-01 10:08:30 +00002821#if OMPT_SUPPORT && OMPT_OPTIONAL
2822 release_status = KMP_LOCK_RELEASED;
2823#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002824 }
2825 KMP_MB();
Joachim Protze82e94a52017-11-01 10:08:30 +00002826
2827#if OMPT_SUPPORT && OMPT_OPTIONAL
2828 // This is the case, if called from omp_init_lock_with_hint:
2829 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2830 if (!codeptr)
2831 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2832 if (ompt_enabled.enabled) {
2833 if (release_status == KMP_LOCK_RELEASED) {
2834 if (ompt_enabled.ompt_callback_mutex_released) {
2835 // release_lock_last
2836 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00002837 ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002838 }
2839 } else if (ompt_enabled.ompt_callback_nest_lock) {
2840 // release_lock_previous
2841 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00002842 ompt_mutex_scope_end, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002843 }
2844 }
2845#endif
2846
Jonathan Peyton30419822017-05-12 18:01:32 +00002847 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002848#else
Jonathan Peyton30419822017-05-12 18:01:32 +00002849 lck = (kmp_user_lock_p)user_lock;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002850#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002851 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002852#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002853 else if ((__kmp_user_lock_kind == lk_futex) &&
2854 (sizeof(lck->futex.lk.poll) + sizeof(lck->futex.lk.depth_locked) <=
2855 OMP_NEST_LOCK_T_SIZE)) {
2856 lck = (kmp_user_lock_p)user_lock;
2857 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002858#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002859 else {
2860 lck = __kmp_lookup_user_lock(user_lock, "omp_unset_nest_lock");
2861 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002862
2863#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002864 __kmp_itt_lock_releasing(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002865#endif /* USE_ITT_BUILD */
2866
Jonathan Peyton30419822017-05-12 18:01:32 +00002867 int release_status;
2868 release_status = RELEASE_NESTED_LOCK(lck, gtid);
Joachim Protze82e94a52017-11-01 10:08:30 +00002869#if OMPT_SUPPORT && OMPT_OPTIONAL
2870 // This is the case, if called from omp_init_lock_with_hint:
2871 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2872 if (!codeptr)
2873 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2874 if (ompt_enabled.enabled) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002875 if (release_status == KMP_LOCK_RELEASED) {
Joachim Protze82e94a52017-11-01 10:08:30 +00002876 if (ompt_enabled.ompt_callback_mutex_released) {
2877 // release_lock_last
2878 ompt_callbacks.ompt_callback(ompt_callback_mutex_released)(
Joachim Protze40636132018-05-28 08:16:08 +00002879 ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr);
Jonathan Peyton30419822017-05-12 18:01:32 +00002880 }
Joachim Protze82e94a52017-11-01 10:08:30 +00002881 } else if (ompt_enabled.ompt_callback_nest_lock) {
2882 // release_lock_previous
2883 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00002884 ompt_mutex_scope_end, (omp_wait_id_t)lck, codeptr);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00002885 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002886 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00002887#endif
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002888
2889#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002890}
2891
2892/* try to acquire the lock */
Jonathan Peyton30419822017-05-12 18:01:32 +00002893int __kmpc_test_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
2894 KMP_COUNT_BLOCK(OMP_test_lock);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002895
2896#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00002897 int rc;
2898 int tag = KMP_EXTRACT_D_TAG(user_lock);
2899#if USE_ITT_BUILD
2900 __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
2901#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002902#if OMPT_SUPPORT && OMPT_OPTIONAL
2903 // This is the case, if called from omp_init_lock_with_hint:
2904 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2905 if (!codeptr)
2906 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2907 if (ompt_enabled.ompt_callback_mutex_acquire) {
2908 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
2909 ompt_mutex_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00002910 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00002911 codeptr);
2912 }
2913#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002914#if KMP_USE_INLINED_TAS
2915 if (tag == locktag_tas && !__kmp_env_consistency_check) {
2916 KMP_TEST_TAS_LOCK(user_lock, gtid, rc);
2917 } else
2918#elif KMP_USE_INLINED_FUTEX
2919 if (tag == locktag_futex && !__kmp_env_consistency_check) {
2920 KMP_TEST_FUTEX_LOCK(user_lock, gtid, rc);
2921 } else
2922#endif
2923 {
2924 rc = __kmp_direct_test[tag]((kmp_dyna_lock_t *)user_lock, gtid);
2925 }
2926 if (rc) {
2927#if USE_ITT_BUILD
2928 __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
2929#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00002930#if OMPT_SUPPORT && OMPT_OPTIONAL
2931 if (ompt_enabled.ompt_callback_mutex_acquired) {
2932 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00002933 ompt_mutex_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002934 }
2935#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002936 return FTN_TRUE;
2937 } else {
2938#if USE_ITT_BUILD
2939 __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock);
2940#endif
2941 return FTN_FALSE;
2942 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002943
2944#else // KMP_USE_DYNAMIC_LOCK
2945
Jonathan Peyton30419822017-05-12 18:01:32 +00002946 kmp_user_lock_p lck;
2947 int rc;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002948
Jonathan Peyton30419822017-05-12 18:01:32 +00002949 if ((__kmp_user_lock_kind == lk_tas) &&
2950 (sizeof(lck->tas.lk.poll) <= OMP_LOCK_T_SIZE)) {
2951 lck = (kmp_user_lock_p)user_lock;
2952 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00002953#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00002954 else if ((__kmp_user_lock_kind == lk_futex) &&
2955 (sizeof(lck->futex.lk.poll) <= OMP_LOCK_T_SIZE)) {
2956 lck = (kmp_user_lock_p)user_lock;
2957 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002958#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002959 else {
2960 lck = __kmp_lookup_user_lock(user_lock, "omp_test_lock");
2961 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002962
2963#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002964 __kmp_itt_lock_acquiring(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002965#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00002966#if OMPT_SUPPORT && OMPT_OPTIONAL
2967 // This is the case, if called from omp_init_lock_with_hint:
2968 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2969 if (!codeptr)
2970 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2971 if (ompt_enabled.ompt_callback_mutex_acquire) {
2972 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
2973 ompt_mutex_lock, omp_lock_hint_none, __ompt_get_mutex_impl_type(),
Joachim Protze40636132018-05-28 08:16:08 +00002974 (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002975 }
2976#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00002977
Jonathan Peyton30419822017-05-12 18:01:32 +00002978 rc = TEST_LOCK(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002979#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002980 if (rc) {
2981 __kmp_itt_lock_acquired(lck);
2982 } else {
2983 __kmp_itt_lock_cancelled(lck);
2984 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002985#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00002986#if OMPT_SUPPORT && OMPT_OPTIONAL
2987 if (rc && ompt_enabled.ompt_callback_mutex_acquired) {
2988 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00002989 ompt_mutex_lock, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00002990 }
2991#endif
2992
Jonathan Peyton30419822017-05-12 18:01:32 +00002993 return (rc ? FTN_TRUE : FTN_FALSE);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002994
Jonathan Peyton30419822017-05-12 18:01:32 +00002995/* Can't use serial interval since not block structured */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00002996
2997#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00002998}
2999
3000/* try to acquire the lock */
Jonathan Peyton30419822017-05-12 18:01:32 +00003001int __kmpc_test_nest_lock(ident_t *loc, kmp_int32 gtid, void **user_lock) {
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003002#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00003003 int rc;
3004#if USE_ITT_BUILD
3005 __kmp_itt_lock_acquiring((kmp_user_lock_p)user_lock);
3006#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00003007#if OMPT_SUPPORT && OMPT_OPTIONAL
3008 // This is the case, if called from omp_init_lock_with_hint:
3009 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
3010 if (!codeptr)
3011 codeptr = OMPT_GET_RETURN_ADDRESS(0);
3012 if (ompt_enabled.ompt_callback_mutex_acquire) {
3013 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
3014 ompt_mutex_nest_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00003015 __ompt_get_mutex_impl_type(user_lock), (omp_wait_id_t)user_lock,
Joachim Protze82e94a52017-11-01 10:08:30 +00003016 codeptr);
3017 }
3018#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003019 rc = KMP_D_LOCK_FUNC(user_lock, test)((kmp_dyna_lock_t *)user_lock, gtid);
3020#if USE_ITT_BUILD
3021 if (rc) {
3022 __kmp_itt_lock_acquired((kmp_user_lock_p)user_lock);
3023 } else {
3024 __kmp_itt_lock_cancelled((kmp_user_lock_p)user_lock);
3025 }
3026#endif
Joachim Protze82e94a52017-11-01 10:08:30 +00003027#if OMPT_SUPPORT && OMPT_OPTIONAL
3028 if (ompt_enabled.enabled && rc) {
3029 if (rc == 1) {
3030 if (ompt_enabled.ompt_callback_mutex_acquired) {
3031 // lock_first
3032 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00003033 ompt_mutex_nest_lock, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00003034 }
3035 } else {
3036 if (ompt_enabled.ompt_callback_nest_lock) {
3037 // lock_next
3038 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00003039 ompt_scope_begin, (omp_wait_id_t)user_lock, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00003040 }
3041 }
3042 }
3043#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003044 return rc;
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003045
3046#else // KMP_USE_DYNAMIC_LOCK
3047
Jonathan Peyton30419822017-05-12 18:01:32 +00003048 kmp_user_lock_p lck;
3049 int rc;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003050
Jonathan Peyton30419822017-05-12 18:01:32 +00003051 if ((__kmp_user_lock_kind == lk_tas) &&
3052 (sizeof(lck->tas.lk.poll) + sizeof(lck->tas.lk.depth_locked) <=
3053 OMP_NEST_LOCK_T_SIZE)) {
3054 lck = (kmp_user_lock_p)user_lock;
3055 }
Jonathan Peyton9d2412c2016-06-22 16:35:12 +00003056#if KMP_USE_FUTEX
Jonathan Peyton30419822017-05-12 18:01:32 +00003057 else if ((__kmp_user_lock_kind == lk_futex) &&
3058 (sizeof(lck->futex.lk.poll) + sizeof(lck->futex.lk.depth_locked) <=
3059 OMP_NEST_LOCK_T_SIZE)) {
3060 lck = (kmp_user_lock_p)user_lock;
3061 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003062#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003063 else {
3064 lck = __kmp_lookup_user_lock(user_lock, "omp_test_nest_lock");
3065 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003066
3067#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00003068 __kmp_itt_lock_acquiring(lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003069#endif /* USE_ITT_BUILD */
3070
Joachim Protze82e94a52017-11-01 10:08:30 +00003071#if OMPT_SUPPORT && OMPT_OPTIONAL
3072 // This is the case, if called from omp_init_lock_with_hint:
3073 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
3074 if (!codeptr)
3075 codeptr = OMPT_GET_RETURN_ADDRESS(0);
3076 if (ompt_enabled.enabled) &&
3077 ompt_enabled.ompt_callback_mutex_acquire) {
3078 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquire)(
3079 ompt_mutex_nest_lock, omp_lock_hint_none,
Joachim Protze40636132018-05-28 08:16:08 +00003080 __ompt_get_mutex_impl_type(), (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00003081 }
3082#endif
3083
Jonathan Peyton30419822017-05-12 18:01:32 +00003084 rc = TEST_NESTED_LOCK(lck, gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003085#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00003086 if (rc) {
3087 __kmp_itt_lock_acquired(lck);
3088 } else {
3089 __kmp_itt_lock_cancelled(lck);
3090 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003091#endif /* USE_ITT_BUILD */
Joachim Protze82e94a52017-11-01 10:08:30 +00003092#if OMPT_SUPPORT && OMPT_OPTIONAL
3093 if (ompt_enabled.enabled && rc) {
3094 if (rc == 1) {
3095 if (ompt_enabled.ompt_callback_mutex_acquired) {
3096 // lock_first
3097 ompt_callbacks.ompt_callback(ompt_callback_mutex_acquired)(
Joachim Protze40636132018-05-28 08:16:08 +00003098 ompt_mutex_nest_lock, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00003099 }
3100 } else {
3101 if (ompt_enabled.ompt_callback_nest_lock) {
3102 // lock_next
3103 ompt_callbacks.ompt_callback(ompt_callback_nest_lock)(
Joachim Protze40636132018-05-28 08:16:08 +00003104 ompt_mutex_scope_begin, (omp_wait_id_t)lck, codeptr);
Joachim Protze82e94a52017-11-01 10:08:30 +00003105 }
3106 }
3107 }
3108#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003109 return rc;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003110
Jonathan Peyton30419822017-05-12 18:01:32 +00003111/* Can't use serial interval since not block structured */
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003112
3113#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00003114}
3115
Jonathan Peyton30419822017-05-12 18:01:32 +00003116// Interface to fast scalable reduce methods routines
Jim Cownie5e8470a2013-09-27 10:38:44 +00003117
Jonathan Peyton30419822017-05-12 18:01:32 +00003118// keep the selected method in a thread local structure for cross-function
3119// usage: will be used in __kmpc_end_reduce* functions;
3120// another solution: to re-determine the method one more time in
3121// __kmpc_end_reduce* functions (new prototype required then)
Jim Cownie5e8470a2013-09-27 10:38:44 +00003122// AT: which solution is better?
Jonathan Peyton30419822017-05-12 18:01:32 +00003123#define __KMP_SET_REDUCTION_METHOD(gtid, rmethod) \
3124 ((__kmp_threads[(gtid)]->th.th_local.packed_reduction_method) = (rmethod))
Jim Cownie5e8470a2013-09-27 10:38:44 +00003125
Jonathan Peyton30419822017-05-12 18:01:32 +00003126#define __KMP_GET_REDUCTION_METHOD(gtid) \
3127 (__kmp_threads[(gtid)]->th.th_local.packed_reduction_method)
Jim Cownie5e8470a2013-09-27 10:38:44 +00003128
Jonathan Peyton30419822017-05-12 18:01:32 +00003129// description of the packed_reduction_method variable: look at the macros in
3130// kmp.h
Jim Cownie5e8470a2013-09-27 10:38:44 +00003131
3132// used in a critical section reduce block
3133static __forceinline void
Jonathan Peyton30419822017-05-12 18:01:32 +00003134__kmp_enter_critical_section_reduce_block(ident_t *loc, kmp_int32 global_tid,
3135 kmp_critical_name *crit) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003136
Jonathan Peyton30419822017-05-12 18:01:32 +00003137 // this lock was visible to a customer and to the threading profile tool as a
3138 // serial overhead span (although it's used for an internal purpose only)
3139 // why was it visible in previous implementation?
3140 // should we keep it visible in new reduce block?
3141 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003142
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003143#if KMP_USE_DYNAMIC_LOCK
3144
Jonathan Peyton30419822017-05-12 18:01:32 +00003145 kmp_dyna_lock_t *lk = (kmp_dyna_lock_t *)crit;
3146 // Check if it is initialized.
3147 if (*lk == 0) {
3148 if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
3149 KMP_COMPARE_AND_STORE_ACQ32((volatile kmp_int32 *)crit, 0,
3150 KMP_GET_D_TAG(__kmp_user_lock_seq));
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003151 } else {
Jonathan Peyton30419822017-05-12 18:01:32 +00003152 __kmp_init_indirect_csptr(crit, loc, global_tid,
3153 KMP_GET_I_TAG(__kmp_user_lock_seq));
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003154 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003155 }
3156 // Branch for accessing the actual lock object and set operation. This
3157 // branching is inevitable since this lock initialization does not follow the
3158 // normal dispatch path (lock table is not used).
3159 if (KMP_EXTRACT_D_TAG(lk) != 0) {
3160 lck = (kmp_user_lock_p)lk;
3161 KMP_DEBUG_ASSERT(lck != NULL);
3162 if (__kmp_env_consistency_check) {
3163 __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
3164 }
3165 KMP_D_LOCK_FUNC(lk, set)(lk, global_tid);
3166 } else {
3167 kmp_indirect_lock_t *ilk = *((kmp_indirect_lock_t **)lk);
3168 lck = ilk->lock;
3169 KMP_DEBUG_ASSERT(lck != NULL);
3170 if (__kmp_env_consistency_check) {
3171 __kmp_push_sync(global_tid, ct_critical, loc, lck, __kmp_user_lock_seq);
3172 }
3173 KMP_I_LOCK_FUNC(ilk, set)(lck, global_tid);
3174 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003175
3176#else // KMP_USE_DYNAMIC_LOCK
3177
Jonathan Peyton30419822017-05-12 18:01:32 +00003178 // We know that the fast reduction code is only emitted by Intel compilers
3179 // with 32 byte critical sections. If there isn't enough space, then we
3180 // have to use a pointer.
3181 if (__kmp_base_user_lock_size <= INTEL_CRITICAL_SIZE) {
3182 lck = (kmp_user_lock_p)crit;
3183 } else {
3184 lck = __kmp_get_critical_section_ptr(crit, loc, global_tid);
3185 }
3186 KMP_DEBUG_ASSERT(lck != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003187
Jonathan Peyton30419822017-05-12 18:01:32 +00003188 if (__kmp_env_consistency_check)
3189 __kmp_push_sync(global_tid, ct_critical, loc, lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003190
Jonathan Peyton30419822017-05-12 18:01:32 +00003191 __kmp_acquire_user_lock_with_checks(lck, global_tid);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003192
3193#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00003194}
3195
3196// used in a critical section reduce block
3197static __forceinline void
Jonathan Peyton30419822017-05-12 18:01:32 +00003198__kmp_end_critical_section_reduce_block(ident_t *loc, kmp_int32 global_tid,
3199 kmp_critical_name *crit) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003200
Jonathan Peyton30419822017-05-12 18:01:32 +00003201 kmp_user_lock_p lck;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003202
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003203#if KMP_USE_DYNAMIC_LOCK
3204
Jonathan Peyton30419822017-05-12 18:01:32 +00003205 if (KMP_IS_D_LOCK(__kmp_user_lock_seq)) {
3206 lck = (kmp_user_lock_p)crit;
3207 if (__kmp_env_consistency_check)
3208 __kmp_pop_sync(global_tid, ct_critical, loc);
3209 KMP_D_LOCK_FUNC(lck, unset)((kmp_dyna_lock_t *)lck, global_tid);
3210 } else {
3211 kmp_indirect_lock_t *ilk =
3212 (kmp_indirect_lock_t *)TCR_PTR(*((kmp_indirect_lock_t **)crit));
3213 if (__kmp_env_consistency_check)
3214 __kmp_pop_sync(global_tid, ct_critical, loc);
3215 KMP_I_LOCK_FUNC(ilk, unset)(ilk->lock, global_tid);
3216 }
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003217
3218#else // KMP_USE_DYNAMIC_LOCK
3219
Jonathan Peyton30419822017-05-12 18:01:32 +00003220 // We know that the fast reduction code is only emitted by Intel compilers
3221 // with 32 byte critical sections. If there isn't enough space, then we have
3222 // to use a pointer.
3223 if (__kmp_base_user_lock_size > 32) {
3224 lck = *((kmp_user_lock_p *)crit);
3225 KMP_ASSERT(lck != NULL);
3226 } else {
3227 lck = (kmp_user_lock_p)crit;
3228 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003229
Jonathan Peyton30419822017-05-12 18:01:32 +00003230 if (__kmp_env_consistency_check)
3231 __kmp_pop_sync(global_tid, ct_critical, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003232
Jonathan Peyton30419822017-05-12 18:01:32 +00003233 __kmp_release_user_lock_with_checks(lck, global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003234
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003235#endif // KMP_USE_DYNAMIC_LOCK
Jim Cownie5e8470a2013-09-27 10:38:44 +00003236} // __kmp_end_critical_section_reduce_block
3237
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003238#if OMP_40_ENABLED
3239static __forceinline int
3240__kmp_swap_teams_for_teams_reduction(kmp_info_t *th, kmp_team_t **team_p,
3241 int *task_state) {
3242 kmp_team_t *team;
3243
3244 // Check if we are inside the teams construct?
3245 if (th->th.th_teams_microtask) {
3246 *team_p = team = th->th.th_team;
3247 if (team->t.t_level == th->th.th_teams_level) {
3248 // This is reduction at teams construct.
3249 KMP_DEBUG_ASSERT(!th->th.th_info.ds.ds_tid); // AC: check that tid == 0
3250 // Let's swap teams temporarily for the reduction.
3251 th->th.th_info.ds.ds_tid = team->t.t_master_tid;
3252 th->th.th_team = team->t.t_parent;
3253 th->th.th_team_nproc = th->th.th_team->t.t_nproc;
3254 th->th.th_task_team = th->th.th_team->t.t_task_team[0];
3255 *task_state = th->th.th_task_state;
3256 th->th.th_task_state = 0;
3257
3258 return 1;
3259 }
3260 }
3261 return 0;
3262}
3263
3264static __forceinline void
3265__kmp_restore_swapped_teams(kmp_info_t *th, kmp_team_t *team, int task_state) {
3266 // Restore thread structure swapped in __kmp_swap_teams_for_teams_reduction.
3267 th->th.th_info.ds.ds_tid = 0;
3268 th->th.th_team = team;
3269 th->th.th_team_nproc = team->t.t_nproc;
3270 th->th.th_task_team = team->t.t_task_team[task_state];
3271 th->th.th_task_state = task_state;
3272}
3273#endif
3274
Jim Cownie5e8470a2013-09-27 10:38:44 +00003275/* 2.a.i. Reduce Block without a terminating barrier */
3276/*!
3277@ingroup SYNCHRONIZATION
3278@param loc source location information
3279@param global_tid global thread number
3280@param num_vars number of items (variables) to be reduced
3281@param reduce_size size of data in bytes to be reduced
3282@param reduce_data pointer to data to be reduced
Jonathan Peyton30419822017-05-12 18:01:32 +00003283@param reduce_func callback function providing reduction operation on two
3284operands and returning result of reduction in lhs_data
Jim Cownie5e8470a2013-09-27 10:38:44 +00003285@param lck pointer to the unique lock data structure
Jonathan Peyton30419822017-05-12 18:01:32 +00003286@result 1 for the master thread, 0 for all other team threads, 2 for all team
3287threads if atomic reduction needed
Jim Cownie5e8470a2013-09-27 10:38:44 +00003288
3289The nowait version is used for a reduce clause with the nowait argument.
3290*/
3291kmp_int32
Jonathan Peyton30419822017-05-12 18:01:32 +00003292__kmpc_reduce_nowait(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
3293 size_t reduce_size, void *reduce_data,
3294 void (*reduce_func)(void *lhs_data, void *rhs_data),
3295 kmp_critical_name *lck) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003296
Jonathan Peyton30419822017-05-12 18:01:32 +00003297 KMP_COUNT_BLOCK(REDUCE_nowait);
3298 int retval = 0;
3299 PACKED_REDUCTION_METHOD_T packed_reduction_method;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003300#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00003301 kmp_info_t *th;
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003302 kmp_team_t *team;
Jonathan Peyton30419822017-05-12 18:01:32 +00003303 int teams_swapped = 0, task_state;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003304#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003305 KA_TRACE(10, ("__kmpc_reduce_nowait() enter: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003306
Jonathan Peyton30419822017-05-12 18:01:32 +00003307 // why do we need this initialization here at all?
3308 // Reduction clause can not be used as a stand-alone directive.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003309
Jonathan Peyton30419822017-05-12 18:01:32 +00003310 // do not call __kmp_serial_initialize(), it will be called by
3311 // __kmp_parallel_initialize() if needed
3312 // possible detection of false-positive race by the threadchecker ???
3313 if (!TCR_4(__kmp_init_parallel))
3314 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00003315
Jonathan Peyton30419822017-05-12 18:01:32 +00003316// check correctness of reduce block nesting
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003317#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00003318 if (__kmp_env_consistency_check)
3319 __kmp_push_sync(global_tid, ct_reduce, loc, NULL, 0);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003320#else
Jonathan Peyton30419822017-05-12 18:01:32 +00003321 if (__kmp_env_consistency_check)
3322 __kmp_push_sync(global_tid, ct_reduce, loc, NULL);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003323#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003324
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003325#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00003326 th = __kmp_thread_from_gtid(global_tid);
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003327 teams_swapped = __kmp_swap_teams_for_teams_reduction(th, &team, &task_state);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003328#endif // OMP_40_ENABLED
Jim Cownie5e8470a2013-09-27 10:38:44 +00003329
Jonathan Peyton30419822017-05-12 18:01:32 +00003330 // packed_reduction_method value will be reused by __kmp_end_reduce* function,
3331 // the value should be kept in a variable
3332 // the variable should be either a construct-specific or thread-specific
3333 // property, not a team specific property
3334 // (a thread can reach the next reduce block on the next construct, reduce
3335 // method may differ on the next construct)
3336 // an ident_t "loc" parameter could be used as a construct-specific property
3337 // (what if loc == 0?)
3338 // (if both construct-specific and team-specific variables were shared,
3339 // then unness extra syncs should be needed)
3340 // a thread-specific variable is better regarding two issues above (next
3341 // construct and extra syncs)
3342 // a thread-specific "th_local.reduction_method" variable is used currently
3343 // each thread executes 'determine' and 'set' lines (no need to execute by one
3344 // thread, to avoid unness extra syncs)
Jim Cownie5e8470a2013-09-27 10:38:44 +00003345
Jonathan Peyton30419822017-05-12 18:01:32 +00003346 packed_reduction_method = __kmp_determine_reduction_method(
3347 loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck);
3348 __KMP_SET_REDUCTION_METHOD(global_tid, packed_reduction_method);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003349
Jonathan Peyton30419822017-05-12 18:01:32 +00003350 if (packed_reduction_method == critical_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003351
Jonathan Peyton30419822017-05-12 18:01:32 +00003352 __kmp_enter_critical_section_reduce_block(loc, global_tid, lck);
3353 retval = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003354
Jonathan Peyton30419822017-05-12 18:01:32 +00003355 } else if (packed_reduction_method == empty_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003356
Jonathan Peyton30419822017-05-12 18:01:32 +00003357 // usage: if team size == 1, no synchronization is required ( Intel
3358 // platforms only )
3359 retval = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003360
Jonathan Peyton30419822017-05-12 18:01:32 +00003361 } else if (packed_reduction_method == atomic_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003362
Jonathan Peyton30419822017-05-12 18:01:32 +00003363 retval = 2;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003364
Jonathan Peyton30419822017-05-12 18:01:32 +00003365 // all threads should do this pop here (because __kmpc_end_reduce_nowait()
3366 // won't be called by the code gen)
3367 // (it's not quite good, because the checking block has been closed by
3368 // this 'pop',
3369 // but atomic operation has not been executed yet, will be executed
3370 // slightly later, literally on next instruction)
3371 if (__kmp_env_consistency_check)
3372 __kmp_pop_sync(global_tid, ct_reduce, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003373
Jonathan Peyton30419822017-05-12 18:01:32 +00003374 } else if (TEST_REDUCTION_METHOD(packed_reduction_method,
3375 tree_reduce_block)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003376
Jonathan Peyton30419822017-05-12 18:01:32 +00003377// AT: performance issue: a real barrier here
3378// AT: (if master goes slow, other threads are blocked here waiting for the
3379// master to come and release them)
3380// AT: (it's not what a customer might expect specifying NOWAIT clause)
3381// AT: (specifying NOWAIT won't result in improvement of performance, it'll
3382// be confusing to a customer)
3383// AT: another implementation of *barrier_gather*nowait() (or some other design)
3384// might go faster and be more in line with sense of NOWAIT
3385// AT: TO DO: do epcc test and compare times
Jim Cownie5e8470a2013-09-27 10:38:44 +00003386
Jonathan Peyton30419822017-05-12 18:01:32 +00003387// this barrier should be invisible to a customer and to the threading profile
3388// tool (it's neither a terminating barrier nor customer's code, it's
3389// used for an internal purpose)
Joachim Protze82e94a52017-11-01 10:08:30 +00003390#if OMPT_SUPPORT
3391 // JP: can this barrier potentially leed to task scheduling?
3392 // JP: as long as there is a barrier in the implementation, OMPT should and
3393 // will provide the barrier events
3394 // so we set-up the necessary frame/return addresses.
Joachim Protzec5836064b2018-05-28 08:14:58 +00003395 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00003396 if (ompt_enabled.enabled) {
3397 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00003398 if (ompt_frame->enter_frame == NULL)
3399 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003400 OMPT_STORE_RETURN_ADDRESS(global_tid);
3401 }
3402#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003403#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00003404 __kmp_threads[global_tid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003405#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003406 retval =
3407 __kmp_barrier(UNPACK_REDUCTION_BARRIER(packed_reduction_method),
3408 global_tid, FALSE, reduce_size, reduce_data, reduce_func);
3409 retval = (retval != 0) ? (0) : (1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003410#if OMPT_SUPPORT && OMPT_OPTIONAL
3411 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00003412 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00003413 }
3414#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003415
Jonathan Peyton30419822017-05-12 18:01:32 +00003416 // all other workers except master should do this pop here
3417 // ( none of other workers will get to __kmpc_end_reduce_nowait() )
3418 if (__kmp_env_consistency_check) {
3419 if (retval == 0) {
3420 __kmp_pop_sync(global_tid, ct_reduce, loc);
3421 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003422 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003423
3424 } else {
3425
3426 // should never reach this block
3427 KMP_ASSERT(0); // "unexpected method"
3428 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003429#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00003430 if (teams_swapped) {
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003431 __kmp_restore_swapped_teams(th, team, task_state);
Jonathan Peyton30419822017-05-12 18:01:32 +00003432 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003433#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003434 KA_TRACE(
3435 10,
3436 ("__kmpc_reduce_nowait() exit: called T#%d: method %08x, returns %08x\n",
3437 global_tid, packed_reduction_method, retval));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003438
Jonathan Peyton30419822017-05-12 18:01:32 +00003439 return retval;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003440}
3441
3442/*!
3443@ingroup SYNCHRONIZATION
3444@param loc source location information
3445@param global_tid global thread id.
3446@param lck pointer to the unique lock data structure
3447
3448Finish the execution of a reduce nowait.
3449*/
Jonathan Peyton30419822017-05-12 18:01:32 +00003450void __kmpc_end_reduce_nowait(ident_t *loc, kmp_int32 global_tid,
3451 kmp_critical_name *lck) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003452
Jonathan Peyton30419822017-05-12 18:01:32 +00003453 PACKED_REDUCTION_METHOD_T packed_reduction_method;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003454
Jonathan Peyton30419822017-05-12 18:01:32 +00003455 KA_TRACE(10, ("__kmpc_end_reduce_nowait() enter: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003456
Jonathan Peyton30419822017-05-12 18:01:32 +00003457 packed_reduction_method = __KMP_GET_REDUCTION_METHOD(global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003458
Jonathan Peyton30419822017-05-12 18:01:32 +00003459 if (packed_reduction_method == critical_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003460
Jonathan Peyton30419822017-05-12 18:01:32 +00003461 __kmp_end_critical_section_reduce_block(loc, global_tid, lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003462
Jonathan Peyton30419822017-05-12 18:01:32 +00003463 } else if (packed_reduction_method == empty_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003464
Jonathan Peyton30419822017-05-12 18:01:32 +00003465 // usage: if team size == 1, no synchronization is required ( on Intel
3466 // platforms only )
Jim Cownie5e8470a2013-09-27 10:38:44 +00003467
Jonathan Peyton30419822017-05-12 18:01:32 +00003468 } else if (packed_reduction_method == atomic_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003469
Jonathan Peyton30419822017-05-12 18:01:32 +00003470 // neither master nor other workers should get here
3471 // (code gen does not generate this call in case 2: atomic reduce block)
3472 // actually it's better to remove this elseif at all;
3473 // after removal this value will checked by the 'else' and will assert
Jim Cownie5e8470a2013-09-27 10:38:44 +00003474
Jonathan Peyton30419822017-05-12 18:01:32 +00003475 } else if (TEST_REDUCTION_METHOD(packed_reduction_method,
3476 tree_reduce_block)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003477
Jonathan Peyton30419822017-05-12 18:01:32 +00003478 // only master gets here
Jim Cownie5e8470a2013-09-27 10:38:44 +00003479
Jonathan Peyton30419822017-05-12 18:01:32 +00003480 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003481
Jonathan Peyton30419822017-05-12 18:01:32 +00003482 // should never reach this block
3483 KMP_ASSERT(0); // "unexpected method"
3484 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003485
Jonathan Peyton30419822017-05-12 18:01:32 +00003486 if (__kmp_env_consistency_check)
3487 __kmp_pop_sync(global_tid, ct_reduce, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003488
Jonathan Peyton30419822017-05-12 18:01:32 +00003489 KA_TRACE(10, ("__kmpc_end_reduce_nowait() exit: called T#%d: method %08x\n",
3490 global_tid, packed_reduction_method));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003491
Jonathan Peyton30419822017-05-12 18:01:32 +00003492 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003493}
3494
3495/* 2.a.ii. Reduce Block with a terminating barrier */
3496
3497/*!
3498@ingroup SYNCHRONIZATION
3499@param loc source location information
3500@param global_tid global thread number
3501@param num_vars number of items (variables) to be reduced
3502@param reduce_size size of data in bytes to be reduced
3503@param reduce_data pointer to data to be reduced
Jonathan Peyton30419822017-05-12 18:01:32 +00003504@param reduce_func callback function providing reduction operation on two
3505operands and returning result of reduction in lhs_data
Jim Cownie5e8470a2013-09-27 10:38:44 +00003506@param lck pointer to the unique lock data structure
Jonathan Peyton30419822017-05-12 18:01:32 +00003507@result 1 for the master thread, 0 for all other team threads, 2 for all team
3508threads if atomic reduction needed
Jim Cownie5e8470a2013-09-27 10:38:44 +00003509
3510A blocking reduce that includes an implicit barrier.
3511*/
Jonathan Peyton30419822017-05-12 18:01:32 +00003512kmp_int32 __kmpc_reduce(ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
3513 size_t reduce_size, void *reduce_data,
3514 void (*reduce_func)(void *lhs_data, void *rhs_data),
3515 kmp_critical_name *lck) {
3516 KMP_COUNT_BLOCK(REDUCE_wait);
3517 int retval = 0;
3518 PACKED_REDUCTION_METHOD_T packed_reduction_method;
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003519#if OMP_40_ENABLED
3520 kmp_info_t *th;
3521 kmp_team_t *team;
3522 int teams_swapped = 0, task_state;
3523#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003524
Jonathan Peyton30419822017-05-12 18:01:32 +00003525 KA_TRACE(10, ("__kmpc_reduce() enter: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003526
Jonathan Peyton30419822017-05-12 18:01:32 +00003527 // why do we need this initialization here at all?
3528 // Reduction clause can not be a stand-alone directive.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003529
Jonathan Peyton30419822017-05-12 18:01:32 +00003530 // do not call __kmp_serial_initialize(), it will be called by
3531 // __kmp_parallel_initialize() if needed
3532 // possible detection of false-positive race by the threadchecker ???
3533 if (!TCR_4(__kmp_init_parallel))
3534 __kmp_parallel_initialize();
Jim Cownie5e8470a2013-09-27 10:38:44 +00003535
Jonathan Peyton30419822017-05-12 18:01:32 +00003536// check correctness of reduce block nesting
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003537#if KMP_USE_DYNAMIC_LOCK
Jonathan Peyton30419822017-05-12 18:01:32 +00003538 if (__kmp_env_consistency_check)
3539 __kmp_push_sync(global_tid, ct_reduce, loc, NULL, 0);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003540#else
Jonathan Peyton30419822017-05-12 18:01:32 +00003541 if (__kmp_env_consistency_check)
3542 __kmp_push_sync(global_tid, ct_reduce, loc, NULL);
Andrey Churbanov5c56fb52015-02-20 18:05:17 +00003543#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003544
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003545#if OMP_40_ENABLED
3546 th = __kmp_thread_from_gtid(global_tid);
3547 teams_swapped = __kmp_swap_teams_for_teams_reduction(th, &team, &task_state);
3548#endif // OMP_40_ENABLED
3549
Jonathan Peyton30419822017-05-12 18:01:32 +00003550 packed_reduction_method = __kmp_determine_reduction_method(
3551 loc, global_tid, num_vars, reduce_size, reduce_data, reduce_func, lck);
3552 __KMP_SET_REDUCTION_METHOD(global_tid, packed_reduction_method);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003553
Jonathan Peyton30419822017-05-12 18:01:32 +00003554 if (packed_reduction_method == critical_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003555
Jonathan Peyton30419822017-05-12 18:01:32 +00003556 __kmp_enter_critical_section_reduce_block(loc, global_tid, lck);
3557 retval = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003558
Jonathan Peyton30419822017-05-12 18:01:32 +00003559 } else if (packed_reduction_method == empty_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003560
Jonathan Peyton30419822017-05-12 18:01:32 +00003561 // usage: if team size == 1, no synchronization is required ( Intel
3562 // platforms only )
3563 retval = 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003564
Jonathan Peyton30419822017-05-12 18:01:32 +00003565 } else if (packed_reduction_method == atomic_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003566
Jonathan Peyton30419822017-05-12 18:01:32 +00003567 retval = 2;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003568
Jonathan Peyton30419822017-05-12 18:01:32 +00003569 } else if (TEST_REDUCTION_METHOD(packed_reduction_method,
3570 tree_reduce_block)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003571
Jonathan Peyton30419822017-05-12 18:01:32 +00003572// case tree_reduce_block:
3573// this barrier should be visible to a customer and to the threading profile
3574// tool (it's a terminating barrier on constructs if NOWAIT not specified)
Joachim Protze82e94a52017-11-01 10:08:30 +00003575#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00003576 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00003577 if (ompt_enabled.enabled) {
3578 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00003579 if (ompt_frame->enter_frame == NULL)
3580 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003581 OMPT_STORE_RETURN_ADDRESS(global_tid);
3582 }
3583#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003584#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00003585 __kmp_threads[global_tid]->th.th_ident =
3586 loc; // needed for correct notification of frames
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003587#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003588 retval =
3589 __kmp_barrier(UNPACK_REDUCTION_BARRIER(packed_reduction_method),
3590 global_tid, TRUE, reduce_size, reduce_data, reduce_func);
3591 retval = (retval != 0) ? (0) : (1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003592#if OMPT_SUPPORT && OMPT_OPTIONAL
3593 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00003594 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00003595 }
3596#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003597
Jonathan Peyton30419822017-05-12 18:01:32 +00003598 // all other workers except master should do this pop here
3599 // ( none of other workers except master will enter __kmpc_end_reduce() )
3600 if (__kmp_env_consistency_check) {
3601 if (retval == 0) { // 0: all other workers; 1: master
3602 __kmp_pop_sync(global_tid, ct_reduce, loc);
3603 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003604 }
3605
Jonathan Peyton30419822017-05-12 18:01:32 +00003606 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003607
Jonathan Peyton30419822017-05-12 18:01:32 +00003608 // should never reach this block
3609 KMP_ASSERT(0); // "unexpected method"
3610 }
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003611#if OMP_40_ENABLED
3612 if (teams_swapped) {
3613 __kmp_restore_swapped_teams(th, team, task_state);
3614 }
3615#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003616
3617 KA_TRACE(10,
3618 ("__kmpc_reduce() exit: called T#%d: method %08x, returns %08x\n",
3619 global_tid, packed_reduction_method, retval));
3620
3621 return retval;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003622}
3623
3624/*!
3625@ingroup SYNCHRONIZATION
3626@param loc source location information
3627@param global_tid global thread id.
3628@param lck pointer to the unique lock data structure
3629
3630Finish the execution of a blocking reduce.
Jonathan Peyton30419822017-05-12 18:01:32 +00003631The <tt>lck</tt> pointer must be the same as that used in the corresponding
3632start function.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003633*/
Jonathan Peyton30419822017-05-12 18:01:32 +00003634void __kmpc_end_reduce(ident_t *loc, kmp_int32 global_tid,
3635 kmp_critical_name *lck) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003636
Jonathan Peyton30419822017-05-12 18:01:32 +00003637 PACKED_REDUCTION_METHOD_T packed_reduction_method;
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003638#if OMP_40_ENABLED
3639 kmp_info_t *th;
3640 kmp_team_t *team;
3641 int teams_swapped = 0, task_state;
3642#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003643
Jonathan Peyton30419822017-05-12 18:01:32 +00003644 KA_TRACE(10, ("__kmpc_end_reduce() enter: called T#%d\n", global_tid));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003645
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003646#if OMP_40_ENABLED
3647 th = __kmp_thread_from_gtid(global_tid);
3648 teams_swapped = __kmp_swap_teams_for_teams_reduction(th, &team, &task_state);
3649#endif // OMP_40_ENABLED
3650
Jonathan Peyton30419822017-05-12 18:01:32 +00003651 packed_reduction_method = __KMP_GET_REDUCTION_METHOD(global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003652
Jonathan Peyton30419822017-05-12 18:01:32 +00003653 // this barrier should be visible to a customer and to the threading profile
3654 // tool (it's a terminating barrier on constructs if NOWAIT not specified)
Jim Cownie5e8470a2013-09-27 10:38:44 +00003655
Jonathan Peyton30419822017-05-12 18:01:32 +00003656 if (packed_reduction_method == critical_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003657
Jonathan Peyton30419822017-05-12 18:01:32 +00003658 __kmp_end_critical_section_reduce_block(loc, global_tid, lck);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003659
Jonathan Peyton30419822017-05-12 18:01:32 +00003660// TODO: implicit barrier: should be exposed
Joachim Protze82e94a52017-11-01 10:08:30 +00003661#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00003662 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00003663 if (ompt_enabled.enabled) {
3664 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00003665 if (ompt_frame->enter_frame == NULL)
3666 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003667 OMPT_STORE_RETURN_ADDRESS(global_tid);
3668 }
3669#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003670#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00003671 __kmp_threads[global_tid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003672#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003673 __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00003674#if OMPT_SUPPORT && OMPT_OPTIONAL
3675 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00003676 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00003677 }
3678#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003679
Jonathan Peyton30419822017-05-12 18:01:32 +00003680 } else if (packed_reduction_method == empty_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003681
Jonathan Peyton30419822017-05-12 18:01:32 +00003682// usage: if team size==1, no synchronization is required (Intel platforms only)
Jim Cownie5e8470a2013-09-27 10:38:44 +00003683
Jonathan Peyton30419822017-05-12 18:01:32 +00003684// TODO: implicit barrier: should be exposed
Joachim Protze82e94a52017-11-01 10:08:30 +00003685#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00003686 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00003687 if (ompt_enabled.enabled) {
3688 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00003689 if (ompt_frame->enter_frame == NULL)
3690 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003691 OMPT_STORE_RETURN_ADDRESS(global_tid);
3692 }
3693#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003694#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00003695 __kmp_threads[global_tid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003696#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003697 __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00003698#if OMPT_SUPPORT && OMPT_OPTIONAL
3699 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00003700 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00003701 }
3702#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003703
Jonathan Peyton30419822017-05-12 18:01:32 +00003704 } else if (packed_reduction_method == atomic_reduce_block) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003705
Joachim Protze82e94a52017-11-01 10:08:30 +00003706#if OMPT_SUPPORT
Joachim Protzec5836064b2018-05-28 08:14:58 +00003707 omp_frame_t *ompt_frame;
Joachim Protze82e94a52017-11-01 10:08:30 +00003708 if (ompt_enabled.enabled) {
3709 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +00003710 if (ompt_frame->enter_frame == NULL)
3711 ompt_frame->enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00003712 OMPT_STORE_RETURN_ADDRESS(global_tid);
3713 }
3714#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003715// TODO: implicit barrier: should be exposed
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003716#if USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00003717 __kmp_threads[global_tid]->th.th_ident = loc;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00003718#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003719 __kmp_barrier(bs_plain_barrier, global_tid, FALSE, 0, NULL, NULL);
Joachim Protze82e94a52017-11-01 10:08:30 +00003720#if OMPT_SUPPORT && OMPT_OPTIONAL
3721 if (ompt_enabled.enabled) {
Joachim Protzec255ca72017-11-05 14:11:10 +00003722 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00003723 }
3724#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003725
Jonathan Peyton30419822017-05-12 18:01:32 +00003726 } else if (TEST_REDUCTION_METHOD(packed_reduction_method,
3727 tree_reduce_block)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003728
Jonathan Peyton30419822017-05-12 18:01:32 +00003729 // only master executes here (master releases all other workers)
3730 __kmp_end_split_barrier(UNPACK_REDUCTION_BARRIER(packed_reduction_method),
3731 global_tid);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003732
Jonathan Peyton30419822017-05-12 18:01:32 +00003733 } else {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003734
Jonathan Peyton30419822017-05-12 18:01:32 +00003735 // should never reach this block
3736 KMP_ASSERT(0); // "unexpected method"
3737 }
Jonas Hahnfelda4ca5252017-12-05 16:51:24 +00003738#if OMP_40_ENABLED
3739 if (teams_swapped) {
3740 __kmp_restore_swapped_teams(th, team, task_state);
3741 }
3742#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00003743
Jonathan Peyton30419822017-05-12 18:01:32 +00003744 if (__kmp_env_consistency_check)
3745 __kmp_pop_sync(global_tid, ct_reduce, loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003746
Jonathan Peyton30419822017-05-12 18:01:32 +00003747 KA_TRACE(10, ("__kmpc_end_reduce() exit: called T#%d: method %08x\n",
3748 global_tid, packed_reduction_method));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003749
Jonathan Peyton30419822017-05-12 18:01:32 +00003750 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003751}
3752
3753#undef __KMP_GET_REDUCTION_METHOD
3754#undef __KMP_SET_REDUCTION_METHOD
3755
Jonathan Peyton30419822017-05-12 18:01:32 +00003756/* end of interface to fast scalable reduce routines */
Jim Cownie5e8470a2013-09-27 10:38:44 +00003757
Jonathan Peyton30419822017-05-12 18:01:32 +00003758kmp_uint64 __kmpc_get_taskid() {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003759
Jonathan Peyton30419822017-05-12 18:01:32 +00003760 kmp_int32 gtid;
3761 kmp_info_t *thread;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003762
Jonathan Peyton30419822017-05-12 18:01:32 +00003763 gtid = __kmp_get_gtid();
3764 if (gtid < 0) {
3765 return 0;
Jonathan Peytonbd3a7632017-09-27 20:36:27 +00003766 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003767 thread = __kmp_thread_from_gtid(gtid);
3768 return thread->th.th_current_task->td_task_id;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003769
3770} // __kmpc_get_taskid
3771
Jonathan Peyton30419822017-05-12 18:01:32 +00003772kmp_uint64 __kmpc_get_parent_taskid() {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003773
Jonathan Peyton30419822017-05-12 18:01:32 +00003774 kmp_int32 gtid;
3775 kmp_info_t *thread;
3776 kmp_taskdata_t *parent_task;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003777
Jonathan Peyton30419822017-05-12 18:01:32 +00003778 gtid = __kmp_get_gtid();
3779 if (gtid < 0) {
3780 return 0;
Jonathan Peytonbd3a7632017-09-27 20:36:27 +00003781 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003782 thread = __kmp_thread_from_gtid(gtid);
3783 parent_task = thread->th.th_current_task->td_parent;
3784 return (parent_task == NULL ? 0 : parent_task->td_task_id);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003785
3786} // __kmpc_get_parent_taskid
3787
Jonathan Peytondf6818b2016-06-14 17:57:47 +00003788#if OMP_45_ENABLED
Jonathan Peyton71909c52016-03-02 22:42:06 +00003789/*!
3790@ingroup WORK_SHARING
3791@param loc source location information.
3792@param gtid global thread number.
3793@param num_dims number of associated doacross loops.
3794@param dims info on loops bounds.
3795
3796Initialize doacross loop information.
3797Expect compiler send us inclusive bounds,
3798e.g. for(i=2;i<9;i+=2) lo=2, up=8, st=2.
3799*/
Jonathan Peyton30419822017-05-12 18:01:32 +00003800void __kmpc_doacross_init(ident_t *loc, int gtid, int num_dims,
Jonathan Peyton369d72d2018-07-30 17:48:33 +00003801 const struct kmp_dim *dims) {
Jonathan Peyton30419822017-05-12 18:01:32 +00003802 int j, idx;
3803 kmp_int64 last, trace_count;
3804 kmp_info_t *th = __kmp_threads[gtid];
3805 kmp_team_t *team = th->th.th_team;
3806 kmp_uint32 *flags;
3807 kmp_disp_t *pr_buf = th->th.th_dispatch;
3808 dispatch_shared_info_t *sh_buf;
Jonathan Peyton71909c52016-03-02 22:42:06 +00003809
Jonathan Peyton30419822017-05-12 18:01:32 +00003810 KA_TRACE(
3811 20,
3812 ("__kmpc_doacross_init() enter: called T#%d, num dims %d, active %d\n",
3813 gtid, num_dims, !team->t.t_serialized));
3814 KMP_DEBUG_ASSERT(dims != NULL);
3815 KMP_DEBUG_ASSERT(num_dims > 0);
Jonathan Peyton71909c52016-03-02 22:42:06 +00003816
Jonathan Peyton30419822017-05-12 18:01:32 +00003817 if (team->t.t_serialized) {
3818 KA_TRACE(20, ("__kmpc_doacross_init() exit: serialized team\n"));
3819 return; // no dependencies if team is serialized
3820 }
3821 KMP_DEBUG_ASSERT(team->t.t_nproc > 1);
3822 idx = pr_buf->th_doacross_buf_idx++; // Increment index of shared buffer for
3823 // the next loop
3824 sh_buf = &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
Jonathan Peyton71909c52016-03-02 22:42:06 +00003825
Jonathan Peyton30419822017-05-12 18:01:32 +00003826 // Save bounds info into allocated private buffer
3827 KMP_DEBUG_ASSERT(pr_buf->th_doacross_info == NULL);
3828 pr_buf->th_doacross_info = (kmp_int64 *)__kmp_thread_malloc(
3829 th, sizeof(kmp_int64) * (4 * num_dims + 1));
3830 KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
3831 pr_buf->th_doacross_info[0] =
3832 (kmp_int64)num_dims; // first element is number of dimensions
3833 // Save also address of num_done in order to access it later without knowing
3834 // the buffer index
3835 pr_buf->th_doacross_info[1] = (kmp_int64)&sh_buf->doacross_num_done;
3836 pr_buf->th_doacross_info[2] = dims[0].lo;
3837 pr_buf->th_doacross_info[3] = dims[0].up;
3838 pr_buf->th_doacross_info[4] = dims[0].st;
3839 last = 5;
3840 for (j = 1; j < num_dims; ++j) {
3841 kmp_int64
3842 range_length; // To keep ranges of all dimensions but the first dims[0]
3843 if (dims[j].st == 1) { // most common case
3844 // AC: should we care of ranges bigger than LLONG_MAX? (not for now)
3845 range_length = dims[j].up - dims[j].lo + 1;
3846 } else {
3847 if (dims[j].st > 0) {
3848 KMP_DEBUG_ASSERT(dims[j].up > dims[j].lo);
3849 range_length = (kmp_uint64)(dims[j].up - dims[j].lo) / dims[j].st + 1;
3850 } else { // negative increment
3851 KMP_DEBUG_ASSERT(dims[j].lo > dims[j].up);
3852 range_length =
3853 (kmp_uint64)(dims[j].lo - dims[j].up) / (-dims[j].st) + 1;
3854 }
Jonathan Peyton71909c52016-03-02 22:42:06 +00003855 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003856 pr_buf->th_doacross_info[last++] = range_length;
3857 pr_buf->th_doacross_info[last++] = dims[j].lo;
3858 pr_buf->th_doacross_info[last++] = dims[j].up;
3859 pr_buf->th_doacross_info[last++] = dims[j].st;
3860 }
Jonathan Peyton71909c52016-03-02 22:42:06 +00003861
Jonathan Peyton30419822017-05-12 18:01:32 +00003862 // Compute total trip count.
3863 // Start with range of dims[0] which we don't need to keep in the buffer.
3864 if (dims[0].st == 1) { // most common case
3865 trace_count = dims[0].up - dims[0].lo + 1;
3866 } else if (dims[0].st > 0) {
3867 KMP_DEBUG_ASSERT(dims[0].up > dims[0].lo);
3868 trace_count = (kmp_uint64)(dims[0].up - dims[0].lo) / dims[0].st + 1;
3869 } else { // negative increment
3870 KMP_DEBUG_ASSERT(dims[0].lo > dims[0].up);
3871 trace_count = (kmp_uint64)(dims[0].lo - dims[0].up) / (-dims[0].st) + 1;
3872 }
3873 for (j = 1; j < num_dims; ++j) {
3874 trace_count *= pr_buf->th_doacross_info[4 * j + 1]; // use kept ranges
3875 }
3876 KMP_DEBUG_ASSERT(trace_count > 0);
Jonathan Peyton71909c52016-03-02 22:42:06 +00003877
Jonathan Peyton30419822017-05-12 18:01:32 +00003878 // Check if shared buffer is not occupied by other loop (idx -
3879 // __kmp_dispatch_num_buffers)
3880 if (idx != sh_buf->doacross_buf_idx) {
3881 // Shared buffer is occupied, wait for it to be free
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003882 __kmp_wait_yield_4((volatile kmp_uint32 *)&sh_buf->doacross_buf_idx, idx,
3883 __kmp_eq_4, NULL);
Jonathan Peyton30419822017-05-12 18:01:32 +00003884 }
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003885#if KMP_32_BIT_ARCH
Jonathan Peyton30419822017-05-12 18:01:32 +00003886 // Check if we are the first thread. After the CAS the first thread gets 0,
3887 // others get 1 if initialization is in progress, allocated pointer otherwise.
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003888 // Treat pointer as volatile integer (value 0 or 1) until memory is allocated.
3889 flags = (kmp_uint32 *)KMP_COMPARE_AND_STORE_RET32(
3890 (volatile kmp_int32 *)&sh_buf->doacross_flags, NULL, 1);
3891#else
Jonathan Peyton30419822017-05-12 18:01:32 +00003892 flags = (kmp_uint32 *)KMP_COMPARE_AND_STORE_RET64(
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003893 (volatile kmp_int64 *)&sh_buf->doacross_flags, NULL, 1LL);
3894#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003895 if (flags == NULL) {
3896 // we are the first thread, allocate the array of flags
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003897 size_t size = trace_count / 8 + 8; // in bytes, use single bit per iteration
Jonas Hahnfeld221e7bb2017-11-22 17:15:20 +00003898 flags = (kmp_uint32 *)__kmp_thread_calloc(th, size, 1);
3899 KMP_MB();
3900 sh_buf->doacross_flags = flags;
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003901 } else if (flags == (kmp_uint32 *)1) {
3902#if KMP_32_BIT_ARCH
Jonathan Peyton30419822017-05-12 18:01:32 +00003903 // initialization is still in progress, need to wait
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003904 while (*(volatile kmp_int32 *)&sh_buf->doacross_flags == 1)
3905#else
3906 while (*(volatile kmp_int64 *)&sh_buf->doacross_flags == 1LL)
3907#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003908 KMP_YIELD(TRUE);
Jonas Hahnfeld221e7bb2017-11-22 17:15:20 +00003909 KMP_MB();
3910 } else {
3911 KMP_MB();
Jonathan Peyton30419822017-05-12 18:01:32 +00003912 }
Andrey Churbanov58acafc2017-11-20 16:00:42 +00003913 KMP_DEBUG_ASSERT(sh_buf->doacross_flags > (kmp_uint32 *)1); // check ptr value
Jonathan Peyton30419822017-05-12 18:01:32 +00003914 pr_buf->th_doacross_flags =
3915 sh_buf->doacross_flags; // save private copy in order to not
3916 // touch shared buffer on each iteration
3917 KA_TRACE(20, ("__kmpc_doacross_init() exit: T#%d\n", gtid));
Jonathan Peyton71909c52016-03-02 22:42:06 +00003918}
3919
Jonathan Peyton369d72d2018-07-30 17:48:33 +00003920void __kmpc_doacross_wait(ident_t *loc, int gtid, const kmp_int64 *vec) {
Jonathan Peyton30419822017-05-12 18:01:32 +00003921 kmp_int32 shft, num_dims, i;
3922 kmp_uint32 flag;
3923 kmp_int64 iter_number; // iteration number of "collapsed" loop nest
3924 kmp_info_t *th = __kmp_threads[gtid];
3925 kmp_team_t *team = th->th.th_team;
3926 kmp_disp_t *pr_buf;
3927 kmp_int64 lo, up, st;
Jonathan Peyton71909c52016-03-02 22:42:06 +00003928
Jonathan Peyton30419822017-05-12 18:01:32 +00003929 KA_TRACE(20, ("__kmpc_doacross_wait() enter: called T#%d\n", gtid));
3930 if (team->t.t_serialized) {
3931 KA_TRACE(20, ("__kmpc_doacross_wait() exit: serialized team\n"));
3932 return; // no dependencies if team is serialized
3933 }
Jonathan Peyton71909c52016-03-02 22:42:06 +00003934
Jonathan Peyton30419822017-05-12 18:01:32 +00003935 // calculate sequential iteration number and check out-of-bounds condition
3936 pr_buf = th->th.th_dispatch;
3937 KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
3938 num_dims = pr_buf->th_doacross_info[0];
3939 lo = pr_buf->th_doacross_info[2];
3940 up = pr_buf->th_doacross_info[3];
3941 st = pr_buf->th_doacross_info[4];
3942 if (st == 1) { // most common case
3943 if (vec[0] < lo || vec[0] > up) {
3944 KA_TRACE(20, ("__kmpc_doacross_wait() exit: T#%d iter %lld is out of "
3945 "bounds [%lld,%lld]\n",
3946 gtid, vec[0], lo, up));
3947 return;
Jonathan Peyton71909c52016-03-02 22:42:06 +00003948 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003949 iter_number = vec[0] - lo;
3950 } else if (st > 0) {
3951 if (vec[0] < lo || vec[0] > up) {
3952 KA_TRACE(20, ("__kmpc_doacross_wait() exit: T#%d iter %lld is out of "
3953 "bounds [%lld,%lld]\n",
3954 gtid, vec[0], lo, up));
3955 return;
Jonathan Peyton71909c52016-03-02 22:42:06 +00003956 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003957 iter_number = (kmp_uint64)(vec[0] - lo) / st;
3958 } else { // negative increment
3959 if (vec[0] > lo || vec[0] < up) {
3960 KA_TRACE(20, ("__kmpc_doacross_wait() exit: T#%d iter %lld is out of "
3961 "bounds [%lld,%lld]\n",
3962 gtid, vec[0], lo, up));
3963 return;
Jonathan Peyton71909c52016-03-02 22:42:06 +00003964 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003965 iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
3966 }
3967 for (i = 1; i < num_dims; ++i) {
3968 kmp_int64 iter, ln;
3969 kmp_int32 j = i * 4;
3970 ln = pr_buf->th_doacross_info[j + 1];
3971 lo = pr_buf->th_doacross_info[j + 2];
3972 up = pr_buf->th_doacross_info[j + 3];
3973 st = pr_buf->th_doacross_info[j + 4];
3974 if (st == 1) {
3975 if (vec[i] < lo || vec[i] > up) {
3976 KA_TRACE(20, ("__kmpc_doacross_wait() exit: T#%d iter %lld is out of "
3977 "bounds [%lld,%lld]\n",
3978 gtid, vec[i], lo, up));
3979 return;
3980 }
3981 iter = vec[i] - lo;
3982 } else if (st > 0) {
3983 if (vec[i] < lo || vec[i] > up) {
3984 KA_TRACE(20, ("__kmpc_doacross_wait() exit: T#%d iter %lld is out of "
3985 "bounds [%lld,%lld]\n",
3986 gtid, vec[i], lo, up));
3987 return;
3988 }
3989 iter = (kmp_uint64)(vec[i] - lo) / st;
3990 } else { // st < 0
3991 if (vec[i] > lo || vec[i] < up) {
3992 KA_TRACE(20, ("__kmpc_doacross_wait() exit: T#%d iter %lld is out of "
3993 "bounds [%lld,%lld]\n",
3994 gtid, vec[i], lo, up));
3995 return;
3996 }
3997 iter = (kmp_uint64)(lo - vec[i]) / (-st);
3998 }
3999 iter_number = iter + ln * iter_number;
4000 }
4001 shft = iter_number % 32; // use 32-bit granularity
4002 iter_number >>= 5; // divided by 32
4003 flag = 1 << shft;
4004 while ((flag & pr_buf->th_doacross_flags[iter_number]) == 0) {
4005 KMP_YIELD(TRUE);
4006 }
Jonas Hahnfeld221e7bb2017-11-22 17:15:20 +00004007 KMP_MB();
Jonathan Peyton30419822017-05-12 18:01:32 +00004008 KA_TRACE(20,
4009 ("__kmpc_doacross_wait() exit: T#%d wait for iter %lld completed\n",
4010 gtid, (iter_number << 5) + shft));
Jonathan Peyton71909c52016-03-02 22:42:06 +00004011}
4012
Jonathan Peyton369d72d2018-07-30 17:48:33 +00004013void __kmpc_doacross_post(ident_t *loc, int gtid, const kmp_int64 *vec) {
Jonathan Peyton30419822017-05-12 18:01:32 +00004014 kmp_int32 shft, num_dims, i;
4015 kmp_uint32 flag;
4016 kmp_int64 iter_number; // iteration number of "collapsed" loop nest
4017 kmp_info_t *th = __kmp_threads[gtid];
4018 kmp_team_t *team = th->th.th_team;
4019 kmp_disp_t *pr_buf;
4020 kmp_int64 lo, st;
Jonathan Peyton71909c52016-03-02 22:42:06 +00004021
Jonathan Peyton30419822017-05-12 18:01:32 +00004022 KA_TRACE(20, ("__kmpc_doacross_post() enter: called T#%d\n", gtid));
4023 if (team->t.t_serialized) {
4024 KA_TRACE(20, ("__kmpc_doacross_post() exit: serialized team\n"));
4025 return; // no dependencies if team is serialized
4026 }
Jonathan Peyton71909c52016-03-02 22:42:06 +00004027
Jonathan Peyton30419822017-05-12 18:01:32 +00004028 // calculate sequential iteration number (same as in "wait" but no
4029 // out-of-bounds checks)
4030 pr_buf = th->th.th_dispatch;
4031 KMP_DEBUG_ASSERT(pr_buf->th_doacross_info != NULL);
4032 num_dims = pr_buf->th_doacross_info[0];
4033 lo = pr_buf->th_doacross_info[2];
4034 st = pr_buf->th_doacross_info[4];
4035 if (st == 1) { // most common case
4036 iter_number = vec[0] - lo;
4037 } else if (st > 0) {
4038 iter_number = (kmp_uint64)(vec[0] - lo) / st;
4039 } else { // negative increment
4040 iter_number = (kmp_uint64)(lo - vec[0]) / (-st);
4041 }
4042 for (i = 1; i < num_dims; ++i) {
4043 kmp_int64 iter, ln;
4044 kmp_int32 j = i * 4;
4045 ln = pr_buf->th_doacross_info[j + 1];
4046 lo = pr_buf->th_doacross_info[j + 2];
4047 st = pr_buf->th_doacross_info[j + 4];
4048 if (st == 1) {
4049 iter = vec[i] - lo;
4050 } else if (st > 0) {
4051 iter = (kmp_uint64)(vec[i] - lo) / st;
4052 } else { // st < 0
4053 iter = (kmp_uint64)(lo - vec[i]) / (-st);
Jonathan Peyton71909c52016-03-02 22:42:06 +00004054 }
Jonathan Peyton30419822017-05-12 18:01:32 +00004055 iter_number = iter + ln * iter_number;
4056 }
4057 shft = iter_number % 32; // use 32-bit granularity
4058 iter_number >>= 5; // divided by 32
4059 flag = 1 << shft;
Jonas Hahnfeld221e7bb2017-11-22 17:15:20 +00004060 KMP_MB();
Jonathan Peyton30419822017-05-12 18:01:32 +00004061 if ((flag & pr_buf->th_doacross_flags[iter_number]) == 0)
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00004062 KMP_TEST_THEN_OR32(&pr_buf->th_doacross_flags[iter_number], flag);
Jonathan Peyton30419822017-05-12 18:01:32 +00004063 KA_TRACE(20, ("__kmpc_doacross_post() exit: T#%d iter %lld posted\n", gtid,
4064 (iter_number << 5) + shft));
Jonathan Peyton71909c52016-03-02 22:42:06 +00004065}
4066
Jonathan Peyton30419822017-05-12 18:01:32 +00004067void __kmpc_doacross_fini(ident_t *loc, int gtid) {
Jonas Hahnfeld3ffca792018-01-07 16:54:36 +00004068 kmp_int32 num_done;
Jonathan Peyton30419822017-05-12 18:01:32 +00004069 kmp_info_t *th = __kmp_threads[gtid];
4070 kmp_team_t *team = th->th.th_team;
4071 kmp_disp_t *pr_buf = th->th.th_dispatch;
Jonathan Peyton71909c52016-03-02 22:42:06 +00004072
Jonathan Peyton30419822017-05-12 18:01:32 +00004073 KA_TRACE(20, ("__kmpc_doacross_fini() enter: called T#%d\n", gtid));
4074 if (team->t.t_serialized) {
4075 KA_TRACE(20, ("__kmpc_doacross_fini() exit: serialized team %p\n", team));
4076 return; // nothing to do
4077 }
Jonas Hahnfeld3ffca792018-01-07 16:54:36 +00004078 num_done = KMP_TEST_THEN_INC32((kmp_int32 *)pr_buf->th_doacross_info[1]) + 1;
Jonathan Peyton30419822017-05-12 18:01:32 +00004079 if (num_done == th->th.th_team_nproc) {
4080 // we are the last thread, need to free shared resources
4081 int idx = pr_buf->th_doacross_buf_idx - 1;
4082 dispatch_shared_info_t *sh_buf =
4083 &team->t.t_disp_buffer[idx % __kmp_dispatch_num_buffers];
4084 KMP_DEBUG_ASSERT(pr_buf->th_doacross_info[1] ==
4085 (kmp_int64)&sh_buf->doacross_num_done);
Jonas Hahnfeld3ffca792018-01-07 16:54:36 +00004086 KMP_DEBUG_ASSERT(num_done == sh_buf->doacross_num_done);
Jonathan Peyton30419822017-05-12 18:01:32 +00004087 KMP_DEBUG_ASSERT(idx == sh_buf->doacross_buf_idx);
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00004088 __kmp_thread_free(th, CCAST(kmp_uint32 *, sh_buf->doacross_flags));
Jonathan Peyton30419822017-05-12 18:01:32 +00004089 sh_buf->doacross_flags = NULL;
4090 sh_buf->doacross_num_done = 0;
4091 sh_buf->doacross_buf_idx +=
4092 __kmp_dispatch_num_buffers; // free buffer for future re-use
4093 }
4094 // free private resources (need to keep buffer index forever)
Jonathan Peyton369d72d2018-07-30 17:48:33 +00004095 pr_buf->th_doacross_flags = NULL;
Jonathan Peyton30419822017-05-12 18:01:32 +00004096 __kmp_thread_free(th, (void *)pr_buf->th_doacross_info);
4097 pr_buf->th_doacross_info = NULL;
4098 KA_TRACE(20, ("__kmpc_doacross_fini() exit: T#%d\n", gtid));
Jonathan Peyton71909c52016-03-02 22:42:06 +00004099}
4100#endif
4101
Jonathan Peyton78f977f2018-03-20 21:18:17 +00004102#if OMP_50_ENABLED
Andrey Churbanov2d91a8a2018-03-22 18:51:51 +00004103int __kmpc_get_target_offload(void) {
4104 if (!__kmp_init_serial) {
4105 __kmp_serial_initialize();
4106 }
4107 return __kmp_target_offload;
4108}
Jonathan Peyton78f977f2018-03-20 21:18:17 +00004109#endif // OMP_50_ENABLED
4110
Jim Cownie5e8470a2013-09-27 10:38:44 +00004111// end of file //