blob: 9397e6fd988f8a3f83c261685312b1247f0e9508 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_gsupport.c
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
Andrey Churbanovcbda8682015-01-13 14:43:35 +000016#if defined(__x86_64) || defined (__powerpc64__) || defined(__aarch64__)
Jim Cownie5e8470a2013-09-27 10:38:44 +000017# define KMP_I8
18#endif
19#include "kmp.h"
20#include "kmp_atomic.h"
21
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000022#if OMPT_SUPPORT
23#include "ompt-specific.h"
24#endif
25
Jim Cownie5e8470a2013-09-27 10:38:44 +000026#ifdef __cplusplus
27 extern "C" {
28#endif // __cplusplus
29
30#define MKLOC(loc,routine) \
31 static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
32
Jim Cownie181b4bb2013-12-23 17:28:57 +000033#include "kmp_ftn_os.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000034
35void
Jim Cownie181b4bb2013-12-23 17:28:57 +000036xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000037{
38 int gtid = __kmp_entry_gtid();
39 MKLOC(loc, "GOMP_barrier");
40 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
41 __kmpc_barrier(&loc, gtid);
42}
43
44
Jim Cownie5e8470a2013-09-27 10:38:44 +000045//
46// Mutual exclusion
47//
48
49//
50// The symbol that icc/ifort generates for unnamed for unnamed critical
51// sections - .gomp_critical_user_ - is defined using .comm in any objects
52// reference it. We can't reference it directly here in C code, as the
53// symbol contains a ".".
54//
55// The RTL contains an assembly language definition of .gomp_critical_user_
56// with another symbol __kmp_unnamed_critical_addr initialized with it's
57// address.
58//
59extern kmp_critical_name *__kmp_unnamed_critical_addr;
60
61
62void
Jim Cownie181b4bb2013-12-23 17:28:57 +000063xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000064{
65 int gtid = __kmp_entry_gtid();
66 MKLOC(loc, "GOMP_critical_start");
67 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
68 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
69}
70
71
72void
Jim Cownie181b4bb2013-12-23 17:28:57 +000073xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000074{
75 int gtid = __kmp_get_gtid();
76 MKLOC(loc, "GOMP_critical_end");
77 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
78 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
79}
80
81
82void
Jim Cownie181b4bb2013-12-23 17:28:57 +000083xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000084{
85 int gtid = __kmp_entry_gtid();
86 MKLOC(loc, "GOMP_critical_name_start");
87 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
88 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
89}
90
91
92void
Jim Cownie181b4bb2013-12-23 17:28:57 +000093xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000094{
95 int gtid = __kmp_get_gtid();
96 MKLOC(loc, "GOMP_critical_name_end");
97 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
98 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
99}
100
101
102//
103// The Gnu codegen tries to use locked operations to perform atomic updates
104// inline. If it can't, then it calls GOMP_atomic_start() before performing
105// the update and GOMP_atomic_end() afterward, regardless of the data type.
106//
107
108void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000109xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110{
111 int gtid = __kmp_entry_gtid();
112 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000113
114#if OMPT_SUPPORT
115 __ompt_thread_assign_wait_id(0);
116#endif
117
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
119}
120
121
122void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000123xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000124{
125 int gtid = __kmp_get_gtid();
126 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
127 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
128}
129
130
131int
Jim Cownie181b4bb2013-12-23 17:28:57 +0000132xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000133{
134 int gtid = __kmp_entry_gtid();
135 MKLOC(loc, "GOMP_single_start");
136 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
137
138 if (! TCR_4(__kmp_init_parallel))
139 __kmp_parallel_initialize();
140
141 //
142 // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
143 // workshare when USE_CHECKS is defined. We need to avoid the push,
144 // as there is no corresponding GOMP_single_end() call.
145 //
146 return __kmp_enter_single(gtid, &loc, FALSE);
147}
148
149
150void *
Jim Cownie181b4bb2013-12-23 17:28:57 +0000151xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000152{
153 void *retval;
154 int gtid = __kmp_entry_gtid();
155 MKLOC(loc, "GOMP_single_copy_start");
156 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
157
158 if (! TCR_4(__kmp_init_parallel))
159 __kmp_parallel_initialize();
160
161 //
162 // If this is the first thread to enter, return NULL. The generated
163 // code will then call GOMP_single_copy_end() for this thread only,
164 // with the copyprivate data pointer as an argument.
165 //
166 if (__kmp_enter_single(gtid, &loc, FALSE))
167 return NULL;
168
169 //
170 // Wait for the first thread to set the copyprivate data pointer,
171 // and for all other threads to reach this point.
172 //
173 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
174
175 //
176 // Retrieve the value of the copyprivate data point, and wait for all
177 // threads to do likewise, then return.
178 //
179 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
180 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
181 return retval;
182}
183
184
185void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000186xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000187{
188 int gtid = __kmp_get_gtid();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000189 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
190
191 //
192 // Set the copyprivate data pointer fo the team, then hit the barrier
193 // so that the other threads will continue on and read it. Hit another
194 // barrier before continuing, so that the know that the copyprivate
195 // data pointer has been propagated to all threads before trying to
196 // reuse the t_copypriv_data field.
197 //
198 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
199 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
200 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
201}
202
203
204void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000205xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000206{
207 int gtid = __kmp_entry_gtid();
208 MKLOC(loc, "GOMP_ordered_start");
209 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
210 __kmpc_ordered(&loc, gtid);
211}
212
213
214void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000215xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000216{
217 int gtid = __kmp_get_gtid();
218 MKLOC(loc, "GOMP_ordered_end");
219 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
220 __kmpc_end_ordered(&loc, gtid);
221}
222
223
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224//
225// Dispatch macro defs
226//
227// They come in two flavors: 64-bit unsigned, and either 32-bit signed
228// (IA-32 architecture) or 64-bit signed (Intel(R) 64).
229//
230
Jim Cownie181b4bb2013-12-23 17:28:57 +0000231#if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000232# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
233# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
234# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
235#else
236# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
237# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
238# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
239#endif /* KMP_ARCH_X86 */
240
241# define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
242# define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
243# define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
244
245
Jim Cownie5e8470a2013-09-27 10:38:44 +0000246//
247// The parallel contruct
248//
249
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000250#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000251static
252#endif /* KMP_DEBUG */
253void
254__kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
255 void *data)
256{
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000257#if OMPT_SUPPORT
258 kmp_info_t *thr;
259 ompt_frame_t *ompt_frame;
260 ompt_state_t enclosing_state;
261
262 if (ompt_status & ompt_status_track) {
263 // get pointer to thread data structure
264 thr = __kmp_threads[*gtid];
265
266 // save enclosing task state; set current state for task
267 enclosing_state = thr->th.ompt_thread_info.state;
268 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
269
270 // set task frame
271 ompt_frame = __ompt_get_task_frame_internal(0);
272 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
273 }
274#endif
275
Jim Cownie5e8470a2013-09-27 10:38:44 +0000276 task(data);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000277
278#if OMPT_SUPPORT
279 if (ompt_status & ompt_status_track) {
280 // clear task frame
281 ompt_frame->exit_runtime_frame = NULL;
282
283 // restore enclosing state
284 thr->th.ompt_thread_info.state = enclosing_state;
285 }
286#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000287}
288
289
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000290#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000291static
292#endif /* KMP_DEBUG */
293void
294__kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
295 void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
296 enum sched_type schedule, long start, long end, long incr, long chunk_size)
297{
298 //
299 // Intialize the loop worksharing construct.
300 //
301 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
302 schedule != kmp_sch_static);
303
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000304#if OMPT_SUPPORT
305 kmp_info_t *thr;
306 ompt_frame_t *ompt_frame;
307 ompt_state_t enclosing_state;
308
309 if (ompt_status & ompt_status_track) {
310 thr = __kmp_threads[*gtid];
311 // save enclosing task state; set current state for task
312 enclosing_state = thr->th.ompt_thread_info.state;
313 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
314
315 // set task frame
316 ompt_frame = __ompt_get_task_frame_internal(0);
317 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
318 }
319#endif
320
Jim Cownie5e8470a2013-09-27 10:38:44 +0000321 //
322 // Now invoke the microtask.
323 //
324 task(data);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000325
326#if OMPT_SUPPORT
327 if (ompt_status & ompt_status_track) {
328 // clear task frame
329 ompt_frame->exit_runtime_frame = NULL;
330
331 // reset enclosing state
332 thr->th.ompt_thread_info.state = enclosing_state;
333 }
334#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000335}
336
337
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000338#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000339static
340#endif /* KMP_DEBUG */
341void
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000342__kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000343{
344 int rc;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000345 kmp_info_t *thr = __kmp_threads[gtid];
346 kmp_team_t *team = thr->th.th_team;
347 int tid = __kmp_tid_from_gtid(gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348
349 va_list ap;
350 va_start(ap, argc);
351
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000352 rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc,
353#if OMPT_SUPPORT
354 VOLATILE_CAST(void *) unwrapped_task,
355#endif
356 wrapper, __kmp_invoke_task_func,
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000357#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000358 &ap
359#else
360 ap
361#endif
362 );
363
364 va_end(ap);
365
366 if (rc) {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000367 __kmp_run_before_invoked_task(gtid, tid, thr, team);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000368 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000369
Jonathan Peyton122dd762015-07-13 18:55:45 +0000370#if OMPT_SUPPORT
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000371 if (ompt_status & ompt_status_track) {
Jonathan Peyton122dd762015-07-13 18:55:45 +0000372#if OMPT_TRACE
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000373 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
374 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
375
376 // implicit task callback
377 if ((ompt_status == ompt_status_track_callback) &&
378 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
379 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
380 team_info->parallel_id, task_info->task_id);
381 }
Jonathan Peyton122dd762015-07-13 18:55:45 +0000382#endif
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000383 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
384 }
385#endif
386}
387
388static void
389__kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *))
390{
391 __kmp_serialized_parallel(loc, gtid);
392
393#if OMPT_SUPPORT
394 if (ompt_status & ompt_status_track) {
395 ompt_task_id_t ompt_task_id = __ompt_get_task_id_internal(0);
396 ompt_frame_t *ompt_frame = __ompt_get_task_frame_internal(0);
397 kmp_info_t *thr = __kmp_threads[gtid];
398
399 ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(gtid);
400 ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid);
401
402 ompt_frame->exit_runtime_frame = NULL;
403
404 // parallel region callback
405 if ((ompt_status == ompt_status_track_callback) &&
406 ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
407 int team_size = 1;
408 ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
409 ompt_task_id, ompt_frame, ompt_parallel_id,
410 team_size, (void *) task);
411 }
412
413 // set up lightweight task
414 ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *)
415 __kmp_allocate(sizeof(ompt_lw_taskteam_t));
416 __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id);
417 lwt->ompt_task_info.task_id = my_ompt_task_id;
418 lwt->ompt_task_info.frame.exit_runtime_frame = 0;
419 __ompt_lw_taskteam_link(lwt, thr);
420
421#if OMPT_TRACE
422 // implicit task callback
423 if ((ompt_status == ompt_status_track_callback) &&
424 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
425 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
426 ompt_parallel_id, my_ompt_task_id);
427 }
428 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
429#endif
430 }
431#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000432}
433
434
435void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000436xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000437{
438 int gtid = __kmp_entry_gtid();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000439
440#if OMPT_SUPPORT
441 ompt_frame_t *parent_frame;
442
443 if (ompt_status & ompt_status_track) {
444 parent_frame = __ompt_get_task_frame_internal(0);
445 parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
446 }
447#endif
448
Jim Cownie5e8470a2013-09-27 10:38:44 +0000449 MKLOC(loc, "GOMP_parallel_start");
450 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
451
452 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
453 if (num_threads != 0) {
454 __kmp_push_num_threads(&loc, gtid, num_threads);
455 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000456 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000457 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
458 }
459 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000460 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000461 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000462
463#if OMPT_SUPPORT
464 if (ompt_status & ompt_status_track) {
465 parent_frame->reenter_runtime_frame = NULL;
466 }
467#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000468}
469
470
471void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000472xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000473{
474 int gtid = __kmp_get_gtid();
Jonathan Peytone8104ad2015-06-08 18:56:33 +0000475 kmp_info_t *thr;
476
477 thr = __kmp_threads[gtid];
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000478
Jim Cownie5e8470a2013-09-27 10:38:44 +0000479 MKLOC(loc, "GOMP_parallel_end");
480 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
481
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000482
483#if OMPT_SUPPORT
484 ompt_parallel_id_t parallel_id;
485 ompt_frame_t *ompt_frame = NULL;
486
487 if (ompt_status & ompt_status_track) {
488 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
489 parallel_id = team_info->parallel_id;
490
491 ompt_frame = __ompt_get_task_frame_internal(0);
492 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
493
494#if OMPT_TRACE
495 if ((ompt_status == ompt_status_track_callback) &&
496 ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
497 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
498 ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
499 parallel_id, task_info->task_id);
500 }
501#endif
502
503 // unlink if necessary. no-op if there is not a lightweight task.
504 ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
505 // GOMP allocates/frees lwt since it can't be kept on the stack
506 if (lwt) __kmp_free(lwt);
507 }
508#endif
509
Jim Cownie5e8470a2013-09-27 10:38:44 +0000510 if (! __kmp_threads[gtid]->th.th_team->t.t_serialized) {
511 kmp_info_t *thr = __kmp_threads[gtid];
512 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
513 thr->th.th_team);
514 __kmp_join_call(&loc, gtid);
515 }
516 else {
517 __kmpc_end_serialized_parallel(&loc, gtid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000518
519#if OMPT_SUPPORT
520 if (ompt_status & ompt_status_track) {
521 if ((ompt_status == ompt_status_track_callback) &&
522 ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
523 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
524 ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
525 parallel_id, task_info->task_id);
526 }
527
528 thr->th.ompt_thread_info.state =
529 (((thr->th.th_team)->t.t_serialized) ?
530 ompt_state_work_serial : ompt_state_work_parallel);
531 }
532#endif
533
Jim Cownie5e8470a2013-09-27 10:38:44 +0000534 }
535}
536
537
Jim Cownie5e8470a2013-09-27 10:38:44 +0000538//
539// Loop worksharing constructs
540//
541
542//
543// The Gnu codegen passes in an exclusive upper bound for the overall range,
544// but the libguide dispatch code expects an inclusive upper bound, hence the
545// "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
546// argument to __kmp_GOMP_fork_call).
547//
548// Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
549// but the Gnu codegen expects an excluside upper bound, so the adjustment
550// "*p_ub += stride" compenstates for the discrepancy.
551//
552// Correction: the gnu codegen always adjusts the upper bound by +-1, not the
553// stride value. We adjust the dispatch parameters accordingly (by +-1), but
554// we still adjust p_ub by the actual stride value.
555//
556// The "runtime" versions do not take a chunk_sz parameter.
557//
558// The profile lib cannot support construct checking of unordered loops that
559// are predetermined by the compiler to be statically scheduled, as the gcc
560// codegen will not always emit calls to GOMP_loop_static_next() to get the
561// next iteration. Instead, it emits inline code to call omp_get_thread_num()
562// num and calculate the iteration space using the result. It doesn't do this
563// with ordered static loop, so they can be checked.
564//
565
566#define LOOP_START(func,schedule) \
567 int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
568 long *p_ub) \
569 { \
570 int status; \
571 long stride; \
572 int gtid = __kmp_entry_gtid(); \
573 MKLOC(loc, #func); \
574 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
575 gtid, lb, ub, str, chunk_sz )); \
576 \
577 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
578 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
579 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
580 (schedule) != kmp_sch_static); \
581 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
582 (kmp_int *)p_ub, (kmp_int *)&stride); \
583 if (status) { \
584 KMP_DEBUG_ASSERT(stride == str); \
585 *p_ub += (str > 0) ? 1 : -1; \
586 } \
587 } \
588 else { \
589 status = 0; \
590 } \
591 \
592 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
593 gtid, *p_lb, *p_ub, status)); \
594 return status; \
595 }
596
597
598#define LOOP_RUNTIME_START(func,schedule) \
599 int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
600 { \
601 int status; \
602 long stride; \
603 long chunk_sz = 0; \
604 int gtid = __kmp_entry_gtid(); \
605 MKLOC(loc, #func); \
606 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
607 gtid, lb, ub, str, chunk_sz )); \
608 \
609 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
610 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
611 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
612 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
613 (kmp_int *)p_ub, (kmp_int *)&stride); \
614 if (status) { \
615 KMP_DEBUG_ASSERT(stride == str); \
616 *p_ub += (str > 0) ? 1 : -1; \
617 } \
618 } \
619 else { \
620 status = 0; \
621 } \
622 \
623 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
624 gtid, *p_lb, *p_ub, status)); \
625 return status; \
626 }
627
628
629#define LOOP_NEXT(func,fini_code) \
630 int func(long *p_lb, long *p_ub) \
631 { \
632 int status; \
633 long stride; \
634 int gtid = __kmp_get_gtid(); \
635 MKLOC(loc, #func); \
636 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
637 \
638 fini_code \
639 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
640 (kmp_int *)p_ub, (kmp_int *)&stride); \
641 if (status) { \
642 *p_ub += (stride > 0) ? 1 : -1; \
643 } \
644 \
645 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
646 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
647 return status; \
648 }
649
650
Jim Cownie181b4bb2013-12-23 17:28:57 +0000651LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
652LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
653LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
654LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
655LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
656LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
657LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
658LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
Jim Cownie5e8470a2013-09-27 10:38:44 +0000659
Jim Cownie181b4bb2013-12-23 17:28:57 +0000660LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
661LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000662 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000663LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
664LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000665 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000666LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
667LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000668 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000669LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
670LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000671 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
672
673
674void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000675xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000676{
677 int gtid = __kmp_get_gtid();
678 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
679
680 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
681
682 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
683}
684
685
686void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000687xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000688{
689 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
690}
691
692
Jim Cownie5e8470a2013-09-27 10:38:44 +0000693//
694// Unsigned long long loop worksharing constructs
695//
696// These are new with gcc 4.4
697//
698
699#define LOOP_START_ULL(func,schedule) \
700 int func (int up, unsigned long long lb, unsigned long long ub, \
701 unsigned long long str, unsigned long long chunk_sz, \
702 unsigned long long *p_lb, unsigned long long *p_ub) \
703 { \
704 int status; \
705 long long str2 = up ? ((long long)str) : -((long long)str); \
706 long long stride; \
707 int gtid = __kmp_entry_gtid(); \
708 MKLOC(loc, #func); \
709 \
710 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
711 gtid, up, lb, ub, str, chunk_sz )); \
712 \
713 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
714 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
715 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
716 (schedule) != kmp_sch_static); \
717 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
718 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
719 if (status) { \
720 KMP_DEBUG_ASSERT(stride == str2); \
721 *p_ub += (str > 0) ? 1 : -1; \
722 } \
723 } \
724 else { \
725 status = 0; \
726 } \
727 \
728 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
729 gtid, *p_lb, *p_ub, status)); \
730 return status; \
731 }
732
733
734#define LOOP_RUNTIME_START_ULL(func,schedule) \
735 int func (int up, unsigned long long lb, unsigned long long ub, \
736 unsigned long long str, unsigned long long *p_lb, \
737 unsigned long long *p_ub) \
738 { \
739 int status; \
740 long long str2 = up ? ((long long)str) : -((long long)str); \
741 unsigned long long stride; \
742 unsigned long long chunk_sz = 0; \
743 int gtid = __kmp_entry_gtid(); \
744 MKLOC(loc, #func); \
745 \
746 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
747 gtid, up, lb, ub, str, chunk_sz )); \
748 \
749 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
750 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
751 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
752 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
753 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
754 if (status) { \
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000755 KMP_DEBUG_ASSERT((long long)stride == str2); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000756 *p_ub += (str > 0) ? 1 : -1; \
757 } \
758 } \
759 else { \
760 status = 0; \
761 } \
762 \
763 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
764 gtid, *p_lb, *p_ub, status)); \
765 return status; \
766 }
767
768
769#define LOOP_NEXT_ULL(func,fini_code) \
770 int func(unsigned long long *p_lb, unsigned long long *p_ub) \
771 { \
772 int status; \
773 long long stride; \
774 int gtid = __kmp_get_gtid(); \
775 MKLOC(loc, #func); \
776 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
777 \
778 fini_code \
779 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
780 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
781 if (status) { \
782 *p_ub += (stride > 0) ? 1 : -1; \
783 } \
784 \
785 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
786 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
787 return status; \
788 }
789
790
Jim Cownie181b4bb2013-12-23 17:28:57 +0000791LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
792LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
793LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
794LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
795LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
796LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
797LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
798LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
Jim Cownie5e8470a2013-09-27 10:38:44 +0000799
Jim Cownie181b4bb2013-12-23 17:28:57 +0000800LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
801LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000802 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000803LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
804LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000805 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000806LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
807LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000808 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000809LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
810LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000811 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
812
813
Jim Cownie5e8470a2013-09-27 10:38:44 +0000814//
815// Combined parallel / loop worksharing constructs
816//
817// There are no ull versions (yet).
818//
819
820#define PARALLEL_LOOP_START(func, schedule) \
821 void func (void (*task) (void *), void *data, unsigned num_threads, \
822 long lb, long ub, long str, long chunk_sz) \
823 { \
824 int gtid = __kmp_entry_gtid(); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000825 MKLOC(loc, #func); \
826 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
827 gtid, lb, ub, str, chunk_sz )); \
828 \
829 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
830 if (num_threads != 0) { \
831 __kmp_push_num_threads(&loc, gtid, num_threads); \
832 } \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000833 __kmp_GOMP_fork_call(&loc, gtid, task, \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000834 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
835 task, data, num_threads, &loc, (schedule), lb, \
836 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
837 } \
838 else { \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000839 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000840 } \
841 \
842 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
843 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
844 (schedule) != kmp_sch_static); \
845 \
846 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
847 }
848
849
Jim Cownie181b4bb2013-12-23 17:28:57 +0000850PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), kmp_sch_static)
851PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
852PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), kmp_sch_guided_chunked)
853PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), kmp_sch_runtime)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000854
855
Jim Cownie5e8470a2013-09-27 10:38:44 +0000856//
857// Tasking constructs
858//
859
860void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000861xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
Jim Cownie5e8470a2013-09-27 10:38:44 +0000862 long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
863{
864 MKLOC(loc, "GOMP_task");
865 int gtid = __kmp_entry_gtid();
866 kmp_int32 flags = 0;
867 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
868
869 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
870
871 // The low-order bit is the "tied" flag
872 if (gomp_flags & 1) {
873 input_flags->tiedness = 1;
874 }
875 input_flags->native = 1;
876 // __kmp_task_alloc() sets up all other flags
877
878 if (! if_cond) {
879 arg_size = 0;
880 }
881
882 kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
883 sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
884 (kmp_routine_entry_t)func);
885
886 if (arg_size > 0) {
887 if (arg_align > 0) {
888 task->shareds = (void *)((((size_t)task->shareds)
889 + arg_align - 1) / arg_align * arg_align);
890 }
891 //else error??
892
893 if (copy_func) {
894 (*copy_func)(task->shareds, data);
895 }
896 else {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000897 KMP_MEMCPY(task->shareds, data, arg_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000898 }
899 }
900
901 if (if_cond) {
902 __kmpc_omp_task(&loc, gtid, task);
903 }
904 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000905#if OMPT_SUPPORT
906 ompt_thread_info_t oldInfo;
907 kmp_info_t *thread;
908 kmp_taskdata_t *taskdata;
909 if (ompt_status & ompt_status_track) {
910 // Store the threads states and restore them after the task
911 thread = __kmp_threads[ gtid ];
912 taskdata = KMP_TASK_TO_TASKDATA(task);
913 oldInfo = thread->th.ompt_thread_info;
914 thread->th.ompt_thread_info.wait_id = 0;
915 thread->th.ompt_thread_info.state = ompt_state_work_parallel;
916 taskdata->ompt_task_info.frame.exit_runtime_frame =
917 __builtin_frame_address(0);
918 }
919#endif
920
Jim Cownie5e8470a2013-09-27 10:38:44 +0000921 __kmpc_omp_task_begin_if0(&loc, gtid, task);
922 func(data);
923 __kmpc_omp_task_complete_if0(&loc, gtid, task);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000924
925#if OMPT_SUPPORT
926 if (ompt_status & ompt_status_track) {
927 thread->th.ompt_thread_info = oldInfo;
928 taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
929 }
930#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000931 }
932
933 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
934}
935
936
937void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000938xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000939{
940 MKLOC(loc, "GOMP_taskwait");
941 int gtid = __kmp_entry_gtid();
942
943 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
944
945 __kmpc_omp_taskwait(&loc, gtid);
946
947 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
948}
949
950
Jim Cownie5e8470a2013-09-27 10:38:44 +0000951//
952// Sections worksharing constructs
953//
954
955//
956// For the sections construct, we initialize a dynamically scheduled loop
957// worksharing construct with lb 1 and stride 1, and use the iteration #'s
958// that its returns as sections ids.
959//
960// There are no special entry points for ordered sections, so we always use
961// the dynamically scheduled workshare, even if the sections aren't ordered.
962//
963
964unsigned
Jim Cownie181b4bb2013-12-23 17:28:57 +0000965xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000966{
967 int status;
968 kmp_int lb, ub, stride;
969 int gtid = __kmp_entry_gtid();
970 MKLOC(loc, "GOMP_sections_start");
971 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
972
973 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
974
975 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
976 if (status) {
977 KMP_DEBUG_ASSERT(stride == 1);
978 KMP_DEBUG_ASSERT(lb > 0);
979 KMP_ASSERT(lb == ub);
980 }
981 else {
982 lb = 0;
983 }
984
985 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
986 (unsigned)lb));
987 return (unsigned)lb;
988}
989
990
991unsigned
Jim Cownie181b4bb2013-12-23 17:28:57 +0000992xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000993{
994 int status;
995 kmp_int lb, ub, stride;
996 int gtid = __kmp_get_gtid();
997 MKLOC(loc, "GOMP_sections_next");
998 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
999
1000 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1001 if (status) {
1002 KMP_DEBUG_ASSERT(stride == 1);
1003 KMP_DEBUG_ASSERT(lb > 0);
1004 KMP_ASSERT(lb == ub);
1005 }
1006 else {
1007 lb = 0;
1008 }
1009
1010 KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
1011 (unsigned)lb));
1012 return (unsigned)lb;
1013}
1014
1015
1016void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001017xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001018 unsigned num_threads, unsigned count)
1019{
1020 int gtid = __kmp_entry_gtid();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001021
1022#if OMPT_SUPPORT
1023 ompt_frame_t *parent_frame;
1024
1025 if (ompt_status & ompt_status_track) {
1026 parent_frame = __ompt_get_task_frame_internal(0);
1027 parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
1028 }
1029#endif
1030
Jim Cownie5e8470a2013-09-27 10:38:44 +00001031 MKLOC(loc, "GOMP_parallel_sections_start");
1032 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1033
1034 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1035 if (num_threads != 0) {
1036 __kmp_push_num_threads(&loc, gtid, num_threads);
1037 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001038 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001039 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1040 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1041 (kmp_int)count, (kmp_int)1, (kmp_int)1);
1042 }
1043 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001044 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001045 }
1046
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001047#if OMPT_SUPPORT
1048 if (ompt_status & ompt_status_track) {
1049 parent_frame->reenter_runtime_frame = NULL;
1050 }
1051#endif
1052
Jim Cownie5e8470a2013-09-27 10:38:44 +00001053 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1054
1055 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1056}
1057
1058
1059void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001060xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001061{
1062 int gtid = __kmp_get_gtid();
1063 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1064
1065 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1066
1067 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1068}
1069
1070
1071void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001072xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001073{
1074 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1075}
1076
Jim Cownie181b4bb2013-12-23 17:28:57 +00001077// libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1078void
1079xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
1080{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001081 KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1082 return;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001083}
1084
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001085#if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1086
1087void
1088xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags)
1089{
1090 int gtid = __kmp_entry_gtid();
1091 MKLOC(loc, "GOMP_parallel");
1092 KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1093
1094 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1095 if (num_threads != 0) {
1096 __kmp_push_num_threads(&loc, gtid, num_threads);
1097 }
1098 if(flags != 0) {
1099 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1100 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001101 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001102 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
1103 }
1104 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001105 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001106 }
1107 task(data);
1108 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1109}
1110
1111void
1112xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data,
1113 unsigned num_threads, unsigned count, unsigned flags)
1114{
1115 int gtid = __kmp_entry_gtid();
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001116 MKLOC(loc, "GOMP_parallel_sections");
1117 KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1118
1119 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1120 if (num_threads != 0) {
1121 __kmp_push_num_threads(&loc, gtid, num_threads);
1122 }
1123 if(flags != 0) {
1124 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1125 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001126 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001127 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1128 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1129 (kmp_int)count, (kmp_int)1, (kmp_int)1);
1130 }
1131 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001132 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001133 }
1134
1135 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1136
1137 task(data);
1138 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1139 KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1140}
1141
1142#define PARALLEL_LOOP(func, schedule) \
1143 void func (void (*task) (void *), void *data, unsigned num_threads, \
1144 long lb, long ub, long str, long chunk_sz, unsigned flags) \
1145 { \
1146 int gtid = __kmp_entry_gtid(); \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001147 MKLOC(loc, #func); \
1148 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1149 gtid, lb, ub, str, chunk_sz )); \
1150 \
1151 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1152 if (num_threads != 0) { \
1153 __kmp_push_num_threads(&loc, gtid, num_threads); \
1154 } \
1155 if (flags != 0) { \
1156 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \
1157 } \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001158 __kmp_GOMP_fork_call(&loc, gtid, task, \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001159 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
1160 task, data, num_threads, &loc, (schedule), lb, \
1161 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1162 } \
1163 else { \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001164 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001165 } \
1166 \
1167 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1168 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1169 (schedule) != kmp_sch_static); \
1170 task(data); \
1171 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1172 \
1173 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
1174 }
1175
1176PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static)
1177PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked)
1178PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked)
1179PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime)
1180
1181
1182void
1183xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void)
1184{
1185 int gtid = __kmp_get_gtid();
1186 MKLOC(loc, "GOMP_taskgroup_start");
1187 KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1188
1189 __kmpc_taskgroup(&loc, gtid);
1190
1191 return;
1192}
1193
1194void
1195xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void)
1196{
1197 int gtid = __kmp_get_gtid();
1198 MKLOC(loc, "GOMP_taskgroup_end");
1199 KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1200
1201 __kmpc_end_taskgroup(&loc, gtid);
1202
1203 return;
1204}
1205
1206#ifndef KMP_DEBUG
1207static
1208#endif /* KMP_DEBUG */
Jonathan Peyton66338292015-06-01 02:37:28 +00001209kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001210 kmp_int32 cncl_kind = 0;
1211 switch(gomp_kind) {
1212 case 1:
1213 cncl_kind = cancel_parallel;
1214 break;
1215 case 2:
1216 cncl_kind = cancel_loop;
1217 break;
1218 case 4:
1219 cncl_kind = cancel_sections;
1220 break;
1221 case 8:
1222 cncl_kind = cancel_taskgroup;
1223 break;
1224 }
1225 return cncl_kind;
1226}
1227
1228bool
1229xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which)
1230{
1231 if(__kmp_omp_cancellation) {
1232 KMP_FATAL(NoGompCancellation);
1233 }
1234 int gtid = __kmp_get_gtid();
1235 MKLOC(loc, "GOMP_cancellation_point");
1236 KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1237
Jonathan Peyton66338292015-06-01 02:37:28 +00001238 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001239
1240 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1241}
1242
1243bool
1244xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void)
1245{
1246 if(__kmp_omp_cancellation) {
1247 KMP_FATAL(NoGompCancellation);
1248 }
1249 KMP_FATAL(NoGompCancellation);
1250 int gtid = __kmp_get_gtid();
1251 MKLOC(loc, "GOMP_barrier_cancel");
1252 KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1253
1254 return __kmpc_cancel_barrier(&loc, gtid);
1255}
1256
1257bool
1258xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel)
1259{
1260 if(__kmp_omp_cancellation) {
1261 KMP_FATAL(NoGompCancellation);
1262 } else {
1263 return FALSE;
1264 }
1265
1266 int gtid = __kmp_get_gtid();
1267 MKLOC(loc, "GOMP_cancel");
1268 KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1269
Jonathan Peyton66338292015-06-01 02:37:28 +00001270 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001271
1272 if(do_cancel == FALSE) {
1273 return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1274 } else {
1275 return __kmpc_cancel(&loc, gtid, cncl_kind);
1276 }
1277}
1278
1279bool
1280xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void)
1281{
1282 if(__kmp_omp_cancellation) {
1283 KMP_FATAL(NoGompCancellation);
1284 }
1285 int gtid = __kmp_get_gtid();
1286 MKLOC(loc, "GOMP_sections_end_cancel");
1287 KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1288
1289 return __kmpc_cancel_barrier(&loc, gtid);
1290}
1291
1292bool
1293xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void)
1294{
1295 if(__kmp_omp_cancellation) {
1296 KMP_FATAL(NoGompCancellation);
1297 }
1298 int gtid = __kmp_get_gtid();
1299 MKLOC(loc, "GOMP_loop_end_cancel");
1300 KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1301
1302 return __kmpc_cancel_barrier(&loc, gtid);
1303}
1304
1305// All target functions are empty as of 2014-05-29
1306void
1307xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target,
1308 size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds)
1309{
1310 return;
1311}
1312
1313void
1314xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum,
1315 void **hostaddrs, size_t *sizes, unsigned char *kinds)
1316{
1317 return;
1318}
1319
1320void
1321xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void)
1322{
1323 return;
1324}
1325
1326void
1327xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum,
1328 void **hostaddrs, size_t *sizes, unsigned char *kinds)
1329{
1330 return;
1331}
1332
1333void
1334xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit)
1335{
1336 return;
1337}
1338#endif // OMP_40_ENABLED
1339
1340
Jim Cownie181b4bb2013-12-23 17:28:57 +00001341/*
1342 The following sections of code create aliases for the GOMP_* functions,
1343 then create versioned symbols using the assembler directive .symver.
1344 This is only pertinent for ELF .so library
1345 xaliasify and xversionify are defined in kmp_ftn_os.h
1346*/
1347
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001348#ifdef KMP_USE_VERSION_SYMBOLS
Jim Cownie181b4bb2013-12-23 17:28:57 +00001349
1350// GOMP_1.0 aliases
1351xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
1352xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
1353xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
1354xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
1355xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
1356xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
1357xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
1358xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
1359xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
1360xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
1361xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
1362xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
1363xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
1364xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
1365xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
1366xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
1367xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
1368xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
1369xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
1370xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
1371xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
1372xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
1373xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
1374xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
1375xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
1376xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
1377xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
1378xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
1379xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
1380xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
1381xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
1382xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
1383xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
1384xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
1385xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
1386xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
1387xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
1388xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
1389xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
1390xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
1391xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
1392
1393// GOMP_2.0 aliases
Jim Cownie181b4bb2013-12-23 17:28:57 +00001394xaliasify(KMP_API_NAME_GOMP_TASK, 20);
1395xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
Jim Cownie181b4bb2013-12-23 17:28:57 +00001396xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
1397xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
1398xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
1399xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
1400xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
1401xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
1402xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
1403xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
1404xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
1405xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
1406xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
1407xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
1408xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
1409xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
1410xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
1411xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
1412
1413// GOMP_3.0 aliases
1414xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
1415
1416// GOMP_4.0 aliases
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001417// The GOMP_parallel* entry points below aren't OpenMP 4.0 related.
1418#if OMP_40_ENABLED
1419xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40);
1420xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40);
1421xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40);
1422xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40);
1423xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40);
1424xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40);
1425xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40);
1426xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40);
1427xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40);
1428xaliasify(KMP_API_NAME_GOMP_CANCEL, 40);
1429xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40);
1430xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40);
1431xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40);
1432xaliasify(KMP_API_NAME_GOMP_TARGET, 40);
1433xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40);
1434xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40);
1435xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40);
1436xaliasify(KMP_API_NAME_GOMP_TEAMS, 40);
1437#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001438
1439// GOMP_1.0 versioned symbols
1440xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1441xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1442xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1443xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1444xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1445xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1446xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1447xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1448xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1449xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1450xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1451xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1452xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1453xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1454xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
1455xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1456xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1457xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1458xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
1459xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1460xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1461xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1462xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1463xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1464xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1465xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1466xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1467xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1468xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1469xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
1470xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1471xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1472xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1473xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1474xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1475xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1476xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1477xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1478xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1479xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1480xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1481
1482// GOMP_2.0 versioned symbols
Jim Cownie181b4bb2013-12-23 17:28:57 +00001483xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1484xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
Jim Cownie181b4bb2013-12-23 17:28:57 +00001485xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1486xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1487xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1488xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1489xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1490xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1491xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1492xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1493xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1494xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1495xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1496xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1497xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1498xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1499xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1500xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1501
1502// GOMP_3.0 versioned symbols
1503xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1504
1505// GOMP_4.0 versioned symbols
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001506#if OMP_40_ENABLED
1507xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1508xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1509xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1510xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1511xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1512xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1513xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1514xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1515xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1516xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1517xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1518xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1519xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1520xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1521xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1522xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1523xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1524xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1525#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001526
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001527#endif // KMP_USE_VERSION_SYMBOLS
Jim Cownie181b4bb2013-12-23 17:28:57 +00001528
Jim Cownie5e8470a2013-09-27 10:38:44 +00001529#ifdef __cplusplus
1530 } //extern "C"
1531#endif // __cplusplus
1532
1533