blob: 21bc0fbbfb68e7e944d3d2ff82a0f7dfd842f585 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_gsupport.c
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
Andrey Churbanovcbda8682015-01-13 14:43:35 +000016#if defined(__x86_64) || defined (__powerpc64__) || defined(__aarch64__)
Jim Cownie5e8470a2013-09-27 10:38:44 +000017# define KMP_I8
18#endif
19#include "kmp.h"
20#include "kmp_atomic.h"
21
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000022#if OMPT_SUPPORT
23#include "ompt-specific.h"
24#endif
25
Jim Cownie5e8470a2013-09-27 10:38:44 +000026#ifdef __cplusplus
27 extern "C" {
28#endif // __cplusplus
29
30#define MKLOC(loc,routine) \
31 static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
32
Jim Cownie181b4bb2013-12-23 17:28:57 +000033#include "kmp_ftn_os.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000034
35void
Jim Cownie181b4bb2013-12-23 17:28:57 +000036xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000037{
38 int gtid = __kmp_entry_gtid();
39 MKLOC(loc, "GOMP_barrier");
40 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
41 __kmpc_barrier(&loc, gtid);
42}
43
44
Jim Cownie5e8470a2013-09-27 10:38:44 +000045//
46// Mutual exclusion
47//
48
49//
50// The symbol that icc/ifort generates for unnamed for unnamed critical
51// sections - .gomp_critical_user_ - is defined using .comm in any objects
52// reference it. We can't reference it directly here in C code, as the
53// symbol contains a ".".
54//
55// The RTL contains an assembly language definition of .gomp_critical_user_
56// with another symbol __kmp_unnamed_critical_addr initialized with it's
57// address.
58//
59extern kmp_critical_name *__kmp_unnamed_critical_addr;
60
61
62void
Jim Cownie181b4bb2013-12-23 17:28:57 +000063xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000064{
65 int gtid = __kmp_entry_gtid();
66 MKLOC(loc, "GOMP_critical_start");
67 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
68 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
69}
70
71
72void
Jim Cownie181b4bb2013-12-23 17:28:57 +000073xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000074{
75 int gtid = __kmp_get_gtid();
76 MKLOC(loc, "GOMP_critical_end");
77 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
78 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
79}
80
81
82void
Jim Cownie181b4bb2013-12-23 17:28:57 +000083xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000084{
85 int gtid = __kmp_entry_gtid();
86 MKLOC(loc, "GOMP_critical_name_start");
87 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
88 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
89}
90
91
92void
Jim Cownie181b4bb2013-12-23 17:28:57 +000093xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000094{
95 int gtid = __kmp_get_gtid();
96 MKLOC(loc, "GOMP_critical_name_end");
97 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
98 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
99}
100
101
102//
103// The Gnu codegen tries to use locked operations to perform atomic updates
104// inline. If it can't, then it calls GOMP_atomic_start() before performing
105// the update and GOMP_atomic_end() afterward, regardless of the data type.
106//
107
108void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000109xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110{
111 int gtid = __kmp_entry_gtid();
112 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000113
114#if OMPT_SUPPORT
115 __ompt_thread_assign_wait_id(0);
116#endif
117
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
119}
120
121
122void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000123xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000124{
125 int gtid = __kmp_get_gtid();
126 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
127 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
128}
129
130
131int
Jim Cownie181b4bb2013-12-23 17:28:57 +0000132xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000133{
134 int gtid = __kmp_entry_gtid();
135 MKLOC(loc, "GOMP_single_start");
136 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
137
138 if (! TCR_4(__kmp_init_parallel))
139 __kmp_parallel_initialize();
140
141 //
142 // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
143 // workshare when USE_CHECKS is defined. We need to avoid the push,
144 // as there is no corresponding GOMP_single_end() call.
145 //
146 return __kmp_enter_single(gtid, &loc, FALSE);
147}
148
149
150void *
Jim Cownie181b4bb2013-12-23 17:28:57 +0000151xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000152{
153 void *retval;
154 int gtid = __kmp_entry_gtid();
155 MKLOC(loc, "GOMP_single_copy_start");
156 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
157
158 if (! TCR_4(__kmp_init_parallel))
159 __kmp_parallel_initialize();
160
161 //
162 // If this is the first thread to enter, return NULL. The generated
163 // code will then call GOMP_single_copy_end() for this thread only,
164 // with the copyprivate data pointer as an argument.
165 //
166 if (__kmp_enter_single(gtid, &loc, FALSE))
167 return NULL;
168
169 //
170 // Wait for the first thread to set the copyprivate data pointer,
171 // and for all other threads to reach this point.
172 //
173 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
174
175 //
176 // Retrieve the value of the copyprivate data point, and wait for all
177 // threads to do likewise, then return.
178 //
179 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
180 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
181 return retval;
182}
183
184
185void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000186xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000187{
188 int gtid = __kmp_get_gtid();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000189 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
190
191 //
192 // Set the copyprivate data pointer fo the team, then hit the barrier
193 // so that the other threads will continue on and read it. Hit another
194 // barrier before continuing, so that the know that the copyprivate
195 // data pointer has been propagated to all threads before trying to
196 // reuse the t_copypriv_data field.
197 //
198 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
199 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
200 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
201}
202
203
204void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000205xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000206{
207 int gtid = __kmp_entry_gtid();
208 MKLOC(loc, "GOMP_ordered_start");
209 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
210 __kmpc_ordered(&loc, gtid);
211}
212
213
214void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000215xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000216{
217 int gtid = __kmp_get_gtid();
218 MKLOC(loc, "GOMP_ordered_end");
219 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
220 __kmpc_end_ordered(&loc, gtid);
221}
222
223
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224//
225// Dispatch macro defs
226//
227// They come in two flavors: 64-bit unsigned, and either 32-bit signed
228// (IA-32 architecture) or 64-bit signed (Intel(R) 64).
229//
230
Jim Cownie181b4bb2013-12-23 17:28:57 +0000231#if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000232# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
233# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
234# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
235#else
236# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
237# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
238# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
239#endif /* KMP_ARCH_X86 */
240
241# define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
242# define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
243# define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
244
245
Jim Cownie5e8470a2013-09-27 10:38:44 +0000246//
247// The parallel contruct
248//
249
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000250#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000251static
252#endif /* KMP_DEBUG */
253void
254__kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
255 void *data)
256{
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000257#if OMPT_SUPPORT
258 kmp_info_t *thr;
259 ompt_frame_t *ompt_frame;
260 ompt_state_t enclosing_state;
261
262 if (ompt_status & ompt_status_track) {
263 // get pointer to thread data structure
264 thr = __kmp_threads[*gtid];
265
266 // save enclosing task state; set current state for task
267 enclosing_state = thr->th.ompt_thread_info.state;
268 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
269
270 // set task frame
271 ompt_frame = __ompt_get_task_frame_internal(0);
272 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
273 }
274#endif
275
Jim Cownie5e8470a2013-09-27 10:38:44 +0000276 task(data);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000277
278#if OMPT_SUPPORT
279 if (ompt_status & ompt_status_track) {
280 // clear task frame
281 ompt_frame->exit_runtime_frame = NULL;
282
283 // restore enclosing state
284 thr->th.ompt_thread_info.state = enclosing_state;
285 }
286#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000287}
288
289
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000290#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000291static
292#endif /* KMP_DEBUG */
293void
294__kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
295 void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
296 enum sched_type schedule, long start, long end, long incr, long chunk_size)
297{
298 //
299 // Intialize the loop worksharing construct.
300 //
301 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
302 schedule != kmp_sch_static);
303
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000304#if OMPT_SUPPORT
305 kmp_info_t *thr;
306 ompt_frame_t *ompt_frame;
307 ompt_state_t enclosing_state;
308
309 if (ompt_status & ompt_status_track) {
310 thr = __kmp_threads[*gtid];
311 // save enclosing task state; set current state for task
312 enclosing_state = thr->th.ompt_thread_info.state;
313 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
314
315 // set task frame
316 ompt_frame = __ompt_get_task_frame_internal(0);
317 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
318 }
319#endif
320
Jim Cownie5e8470a2013-09-27 10:38:44 +0000321 //
322 // Now invoke the microtask.
323 //
324 task(data);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000325
326#if OMPT_SUPPORT
327 if (ompt_status & ompt_status_track) {
328 // clear task frame
329 ompt_frame->exit_runtime_frame = NULL;
330
331 // reset enclosing state
332 thr->th.ompt_thread_info.state = enclosing_state;
333 }
334#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000335}
336
337
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000338#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000339static
340#endif /* KMP_DEBUG */
341void
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000342__kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000343{
344 int rc;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000345 kmp_info_t *thr = __kmp_threads[gtid];
346 kmp_team_t *team = thr->th.th_team;
347 int tid = __kmp_tid_from_gtid(gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348
349 va_list ap;
350 va_start(ap, argc);
351
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000352 rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc,
353#if OMPT_SUPPORT
354 VOLATILE_CAST(void *) unwrapped_task,
355#endif
356 wrapper, __kmp_invoke_task_func,
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000357#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000358 &ap
359#else
360 ap
361#endif
362 );
363
364 va_end(ap);
365
366 if (rc) {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000367 __kmp_run_before_invoked_task(gtid, tid, thr, team);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000368 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000369
Jonathan Peyton122dd762015-07-13 18:55:45 +0000370#if OMPT_SUPPORT
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000371 if (ompt_status & ompt_status_track) {
Jonathan Peyton122dd762015-07-13 18:55:45 +0000372#if OMPT_TRACE
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000373 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
374 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
375
376 // implicit task callback
377 if ((ompt_status == ompt_status_track_callback) &&
378 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
379 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
380 team_info->parallel_id, task_info->task_id);
381 }
Jonathan Peyton122dd762015-07-13 18:55:45 +0000382#endif
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000383 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
384 }
385#endif
386}
387
388static void
389__kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *))
390{
391 __kmp_serialized_parallel(loc, gtid);
392
393#if OMPT_SUPPORT
394 if (ompt_status & ompt_status_track) {
395 ompt_task_id_t ompt_task_id = __ompt_get_task_id_internal(0);
396 ompt_frame_t *ompt_frame = __ompt_get_task_frame_internal(0);
397 kmp_info_t *thr = __kmp_threads[gtid];
398
399 ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(gtid);
400 ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid);
401
402 ompt_frame->exit_runtime_frame = NULL;
403
404 // parallel region callback
405 if ((ompt_status == ompt_status_track_callback) &&
406 ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
407 int team_size = 1;
408 ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
409 ompt_task_id, ompt_frame, ompt_parallel_id,
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000410 team_size, (void *) task,
411 OMPT_INVOKER(fork_context_gnu));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000412 }
413
414 // set up lightweight task
415 ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *)
416 __kmp_allocate(sizeof(ompt_lw_taskteam_t));
417 __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id);
418 lwt->ompt_task_info.task_id = my_ompt_task_id;
419 lwt->ompt_task_info.frame.exit_runtime_frame = 0;
420 __ompt_lw_taskteam_link(lwt, thr);
421
422#if OMPT_TRACE
423 // implicit task callback
424 if ((ompt_status == ompt_status_track_callback) &&
425 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
426 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
427 ompt_parallel_id, my_ompt_task_id);
428 }
429 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
430#endif
431 }
432#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000433}
434
435
436void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000437xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000438{
439 int gtid = __kmp_entry_gtid();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000440
441#if OMPT_SUPPORT
442 ompt_frame_t *parent_frame;
443
444 if (ompt_status & ompt_status_track) {
445 parent_frame = __ompt_get_task_frame_internal(0);
446 parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
447 }
448#endif
449
Jim Cownie5e8470a2013-09-27 10:38:44 +0000450 MKLOC(loc, "GOMP_parallel_start");
451 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
452
453 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
454 if (num_threads != 0) {
455 __kmp_push_num_threads(&loc, gtid, num_threads);
456 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000457 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000458 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
459 }
460 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000461 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000462 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000463
464#if OMPT_SUPPORT
465 if (ompt_status & ompt_status_track) {
466 parent_frame->reenter_runtime_frame = NULL;
467 }
468#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000469}
470
471
472void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000473xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000474{
475 int gtid = __kmp_get_gtid();
Jonathan Peytone8104ad2015-06-08 18:56:33 +0000476 kmp_info_t *thr;
477
478 thr = __kmp_threads[gtid];
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000479
Jim Cownie5e8470a2013-09-27 10:38:44 +0000480 MKLOC(loc, "GOMP_parallel_end");
481 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
482
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000483
484#if OMPT_SUPPORT
485 ompt_parallel_id_t parallel_id;
486 ompt_frame_t *ompt_frame = NULL;
487
488 if (ompt_status & ompt_status_track) {
489 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
490 parallel_id = team_info->parallel_id;
491
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000492 // Record that we re-entered the runtime system in the implicit
493 // task frame representing the parallel region.
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000494 ompt_frame = __ompt_get_task_frame_internal(0);
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000495 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000496
497#if OMPT_TRACE
498 if ((ompt_status == ompt_status_track_callback) &&
499 ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
500 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
501 ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
502 parallel_id, task_info->task_id);
503 }
504#endif
505
506 // unlink if necessary. no-op if there is not a lightweight task.
507 ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
508 // GOMP allocates/frees lwt since it can't be kept on the stack
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000509 if (lwt) {
510 __kmp_free(lwt);
511
512#if OMPT_SUPPORT
513 if (ompt_status & ompt_status_track) {
514 // Since a lightweight task was destroyed, make sure that the
515 // remaining deepest task knows the stack frame where the runtime
516 // was reentered.
517 ompt_frame = __ompt_get_task_frame_internal(0);
518 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
519 }
520#endif
521 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000522 }
523#endif
524
Jonathan Peyton57d19ce2015-08-26 19:55:13 +0000525 if (! thr->th.th_team->t.t_serialized) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000526 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
527 thr->th.th_team);
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000528
529#if OMPT_SUPPORT
530 if (ompt_status & ompt_status_track) {
531 // Set reenter frame in parent task, which will become current task
532 // in the midst of join. This is needed before the end_parallel callback.
533 ompt_frame = __ompt_get_task_frame_internal(1);
534 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
535 }
536#endif
537
538 __kmp_join_call(&loc, gtid, fork_context_gnu);
539
540#if OMPT_SUPPORT
541 if (ompt_status & ompt_status_track) {
542 ompt_frame->reenter_runtime_frame = NULL;
543 }
544#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000545 }
546 else {
547 __kmpc_end_serialized_parallel(&loc, gtid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000548
549#if OMPT_SUPPORT
550 if (ompt_status & ompt_status_track) {
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000551 // Record that we re-entered the runtime system in the frame that
552 // created the parallel region.
553 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
554
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000555 if ((ompt_status == ompt_status_track_callback) &&
556 ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
557 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
558 ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000559 parallel_id, task_info->task_id,
560 OMPT_INVOKER(fork_context_gnu));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000561 }
562
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000563 ompt_frame->reenter_runtime_frame = NULL;
564
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000565 thr->th.ompt_thread_info.state =
566 (((thr->th.th_team)->t.t_serialized) ?
567 ompt_state_work_serial : ompt_state_work_parallel);
568 }
569#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000570 }
571}
572
573
Jim Cownie5e8470a2013-09-27 10:38:44 +0000574//
575// Loop worksharing constructs
576//
577
578//
579// The Gnu codegen passes in an exclusive upper bound for the overall range,
580// but the libguide dispatch code expects an inclusive upper bound, hence the
581// "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
582// argument to __kmp_GOMP_fork_call).
583//
584// Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
585// but the Gnu codegen expects an excluside upper bound, so the adjustment
586// "*p_ub += stride" compenstates for the discrepancy.
587//
588// Correction: the gnu codegen always adjusts the upper bound by +-1, not the
589// stride value. We adjust the dispatch parameters accordingly (by +-1), but
590// we still adjust p_ub by the actual stride value.
591//
592// The "runtime" versions do not take a chunk_sz parameter.
593//
594// The profile lib cannot support construct checking of unordered loops that
595// are predetermined by the compiler to be statically scheduled, as the gcc
596// codegen will not always emit calls to GOMP_loop_static_next() to get the
597// next iteration. Instead, it emits inline code to call omp_get_thread_num()
598// num and calculate the iteration space using the result. It doesn't do this
599// with ordered static loop, so they can be checked.
600//
601
602#define LOOP_START(func,schedule) \
603 int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
604 long *p_ub) \
605 { \
606 int status; \
607 long stride; \
608 int gtid = __kmp_entry_gtid(); \
609 MKLOC(loc, #func); \
610 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
611 gtid, lb, ub, str, chunk_sz )); \
612 \
613 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
614 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
615 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
616 (schedule) != kmp_sch_static); \
617 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
618 (kmp_int *)p_ub, (kmp_int *)&stride); \
619 if (status) { \
620 KMP_DEBUG_ASSERT(stride == str); \
621 *p_ub += (str > 0) ? 1 : -1; \
622 } \
623 } \
624 else { \
625 status = 0; \
626 } \
627 \
628 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
629 gtid, *p_lb, *p_ub, status)); \
630 return status; \
631 }
632
633
634#define LOOP_RUNTIME_START(func,schedule) \
635 int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
636 { \
637 int status; \
638 long stride; \
639 long chunk_sz = 0; \
640 int gtid = __kmp_entry_gtid(); \
641 MKLOC(loc, #func); \
642 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
643 gtid, lb, ub, str, chunk_sz )); \
644 \
645 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
646 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
647 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
648 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
649 (kmp_int *)p_ub, (kmp_int *)&stride); \
650 if (status) { \
651 KMP_DEBUG_ASSERT(stride == str); \
652 *p_ub += (str > 0) ? 1 : -1; \
653 } \
654 } \
655 else { \
656 status = 0; \
657 } \
658 \
659 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
660 gtid, *p_lb, *p_ub, status)); \
661 return status; \
662 }
663
664
665#define LOOP_NEXT(func,fini_code) \
666 int func(long *p_lb, long *p_ub) \
667 { \
668 int status; \
669 long stride; \
670 int gtid = __kmp_get_gtid(); \
671 MKLOC(loc, #func); \
672 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
673 \
674 fini_code \
675 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
676 (kmp_int *)p_ub, (kmp_int *)&stride); \
677 if (status) { \
678 *p_ub += (stride > 0) ? 1 : -1; \
679 } \
680 \
681 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
682 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
683 return status; \
684 }
685
686
Jim Cownie181b4bb2013-12-23 17:28:57 +0000687LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
688LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
689LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
690LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
691LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
692LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
693LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
694LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
Jim Cownie5e8470a2013-09-27 10:38:44 +0000695
Jim Cownie181b4bb2013-12-23 17:28:57 +0000696LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
697LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000698 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000699LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
700LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000701 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000702LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
703LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000704 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000705LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
706LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000707 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
708
709
710void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000711xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000712{
713 int gtid = __kmp_get_gtid();
714 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
715
716 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
717
718 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
719}
720
721
722void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000723xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000724{
725 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
726}
727
728
Jim Cownie5e8470a2013-09-27 10:38:44 +0000729//
730// Unsigned long long loop worksharing constructs
731//
732// These are new with gcc 4.4
733//
734
735#define LOOP_START_ULL(func,schedule) \
736 int func (int up, unsigned long long lb, unsigned long long ub, \
737 unsigned long long str, unsigned long long chunk_sz, \
738 unsigned long long *p_lb, unsigned long long *p_ub) \
739 { \
740 int status; \
741 long long str2 = up ? ((long long)str) : -((long long)str); \
742 long long stride; \
743 int gtid = __kmp_entry_gtid(); \
744 MKLOC(loc, #func); \
745 \
746 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
747 gtid, up, lb, ub, str, chunk_sz )); \
748 \
749 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
750 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
751 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
752 (schedule) != kmp_sch_static); \
753 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
754 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
755 if (status) { \
756 KMP_DEBUG_ASSERT(stride == str2); \
757 *p_ub += (str > 0) ? 1 : -1; \
758 } \
759 } \
760 else { \
761 status = 0; \
762 } \
763 \
764 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
765 gtid, *p_lb, *p_ub, status)); \
766 return status; \
767 }
768
769
770#define LOOP_RUNTIME_START_ULL(func,schedule) \
771 int func (int up, unsigned long long lb, unsigned long long ub, \
772 unsigned long long str, unsigned long long *p_lb, \
773 unsigned long long *p_ub) \
774 { \
775 int status; \
776 long long str2 = up ? ((long long)str) : -((long long)str); \
777 unsigned long long stride; \
778 unsigned long long chunk_sz = 0; \
779 int gtid = __kmp_entry_gtid(); \
780 MKLOC(loc, #func); \
781 \
782 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
783 gtid, up, lb, ub, str, chunk_sz )); \
784 \
785 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
786 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
787 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
788 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
789 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
790 if (status) { \
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000791 KMP_DEBUG_ASSERT((long long)stride == str2); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000792 *p_ub += (str > 0) ? 1 : -1; \
793 } \
794 } \
795 else { \
796 status = 0; \
797 } \
798 \
799 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
800 gtid, *p_lb, *p_ub, status)); \
801 return status; \
802 }
803
804
805#define LOOP_NEXT_ULL(func,fini_code) \
806 int func(unsigned long long *p_lb, unsigned long long *p_ub) \
807 { \
808 int status; \
809 long long stride; \
810 int gtid = __kmp_get_gtid(); \
811 MKLOC(loc, #func); \
812 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
813 \
814 fini_code \
815 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
816 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
817 if (status) { \
818 *p_ub += (stride > 0) ? 1 : -1; \
819 } \
820 \
821 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
822 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
823 return status; \
824 }
825
826
Jim Cownie181b4bb2013-12-23 17:28:57 +0000827LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
828LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
829LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
830LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
831LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
832LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
833LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
834LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
Jim Cownie5e8470a2013-09-27 10:38:44 +0000835
Jim Cownie181b4bb2013-12-23 17:28:57 +0000836LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
837LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000838 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000839LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
840LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000841 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000842LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
843LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000844 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000845LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
846LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000847 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
848
849
Jim Cownie5e8470a2013-09-27 10:38:44 +0000850//
851// Combined parallel / loop worksharing constructs
852//
853// There are no ull versions (yet).
854//
855
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000856#define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000857 void func (void (*task) (void *), void *data, unsigned num_threads, \
858 long lb, long ub, long str, long chunk_sz) \
859 { \
860 int gtid = __kmp_entry_gtid(); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000861 MKLOC(loc, #func); \
862 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
863 gtid, lb, ub, str, chunk_sz )); \
864 \
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000865 ompt_pre(); \
866 \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000867 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
868 if (num_threads != 0) { \
869 __kmp_push_num_threads(&loc, gtid, num_threads); \
870 } \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000871 __kmp_GOMP_fork_call(&loc, gtid, task, \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000872 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
873 task, data, num_threads, &loc, (schedule), lb, \
874 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
875 } \
876 else { \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000877 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000878 } \
879 \
880 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
881 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
882 (schedule) != kmp_sch_static); \
883 \
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000884 ompt_post(); \
885 \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000886 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
887 }
888
889
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000890
891#if OMPT_SUPPORT
892
893#define OMPT_LOOP_PRE() \
894 ompt_frame_t *parent_frame; \
895 if (ompt_status & ompt_status_track) { \
896 parent_frame = __ompt_get_task_frame_internal(0); \
897 parent_frame->reenter_runtime_frame = __builtin_frame_address(0); \
898 }
899
900
901#define OMPT_LOOP_POST() \
902 if (ompt_status & ompt_status_track) { \
903 parent_frame->reenter_runtime_frame = NULL; \
904 }
905
906#else
907
908#define OMPT_LOOP_PRE()
909
910#define OMPT_LOOP_POST()
911
912#endif
913
914
915PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
916 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
917PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
918 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
919PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
920 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
921PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
922 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000923
924
Jim Cownie5e8470a2013-09-27 10:38:44 +0000925//
926// Tasking constructs
927//
928
929void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000930xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
Jim Cownie5e8470a2013-09-27 10:38:44 +0000931 long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
932{
933 MKLOC(loc, "GOMP_task");
934 int gtid = __kmp_entry_gtid();
935 kmp_int32 flags = 0;
936 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
937
938 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
939
940 // The low-order bit is the "tied" flag
941 if (gomp_flags & 1) {
942 input_flags->tiedness = 1;
943 }
944 input_flags->native = 1;
945 // __kmp_task_alloc() sets up all other flags
946
947 if (! if_cond) {
948 arg_size = 0;
949 }
950
951 kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
952 sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
953 (kmp_routine_entry_t)func);
954
955 if (arg_size > 0) {
956 if (arg_align > 0) {
957 task->shareds = (void *)((((size_t)task->shareds)
958 + arg_align - 1) / arg_align * arg_align);
959 }
960 //else error??
961
962 if (copy_func) {
963 (*copy_func)(task->shareds, data);
964 }
965 else {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000966 KMP_MEMCPY(task->shareds, data, arg_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000967 }
968 }
969
970 if (if_cond) {
971 __kmpc_omp_task(&loc, gtid, task);
972 }
973 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000974#if OMPT_SUPPORT
975 ompt_thread_info_t oldInfo;
976 kmp_info_t *thread;
977 kmp_taskdata_t *taskdata;
978 if (ompt_status & ompt_status_track) {
979 // Store the threads states and restore them after the task
980 thread = __kmp_threads[ gtid ];
981 taskdata = KMP_TASK_TO_TASKDATA(task);
982 oldInfo = thread->th.ompt_thread_info;
983 thread->th.ompt_thread_info.wait_id = 0;
984 thread->th.ompt_thread_info.state = ompt_state_work_parallel;
985 taskdata->ompt_task_info.frame.exit_runtime_frame =
986 __builtin_frame_address(0);
987 }
988#endif
989
Jim Cownie5e8470a2013-09-27 10:38:44 +0000990 __kmpc_omp_task_begin_if0(&loc, gtid, task);
991 func(data);
992 __kmpc_omp_task_complete_if0(&loc, gtid, task);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000993
994#if OMPT_SUPPORT
995 if (ompt_status & ompt_status_track) {
996 thread->th.ompt_thread_info = oldInfo;
997 taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
998 }
999#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001000 }
1001
1002 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
1003}
1004
1005
1006void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001007xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001008{
1009 MKLOC(loc, "GOMP_taskwait");
1010 int gtid = __kmp_entry_gtid();
1011
1012 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
1013
1014 __kmpc_omp_taskwait(&loc, gtid);
1015
1016 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1017}
1018
1019
Jim Cownie5e8470a2013-09-27 10:38:44 +00001020//
1021// Sections worksharing constructs
1022//
1023
1024//
1025// For the sections construct, we initialize a dynamically scheduled loop
1026// worksharing construct with lb 1 and stride 1, and use the iteration #'s
1027// that its returns as sections ids.
1028//
1029// There are no special entry points for ordered sections, so we always use
1030// the dynamically scheduled workshare, even if the sections aren't ordered.
1031//
1032
1033unsigned
Jim Cownie181b4bb2013-12-23 17:28:57 +00001034xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001035{
1036 int status;
1037 kmp_int lb, ub, stride;
1038 int gtid = __kmp_entry_gtid();
1039 MKLOC(loc, "GOMP_sections_start");
1040 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1041
1042 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1043
1044 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1045 if (status) {
1046 KMP_DEBUG_ASSERT(stride == 1);
1047 KMP_DEBUG_ASSERT(lb > 0);
1048 KMP_ASSERT(lb == ub);
1049 }
1050 else {
1051 lb = 0;
1052 }
1053
1054 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1055 (unsigned)lb));
1056 return (unsigned)lb;
1057}
1058
1059
1060unsigned
Jim Cownie181b4bb2013-12-23 17:28:57 +00001061xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001062{
1063 int status;
1064 kmp_int lb, ub, stride;
1065 int gtid = __kmp_get_gtid();
1066 MKLOC(loc, "GOMP_sections_next");
1067 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1068
1069 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1070 if (status) {
1071 KMP_DEBUG_ASSERT(stride == 1);
1072 KMP_DEBUG_ASSERT(lb > 0);
1073 KMP_ASSERT(lb == ub);
1074 }
1075 else {
1076 lb = 0;
1077 }
1078
1079 KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
1080 (unsigned)lb));
1081 return (unsigned)lb;
1082}
1083
1084
1085void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001086xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001087 unsigned num_threads, unsigned count)
1088{
1089 int gtid = __kmp_entry_gtid();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001090
1091#if OMPT_SUPPORT
1092 ompt_frame_t *parent_frame;
1093
1094 if (ompt_status & ompt_status_track) {
1095 parent_frame = __ompt_get_task_frame_internal(0);
1096 parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
1097 }
1098#endif
1099
Jim Cownie5e8470a2013-09-27 10:38:44 +00001100 MKLOC(loc, "GOMP_parallel_sections_start");
1101 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1102
1103 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1104 if (num_threads != 0) {
1105 __kmp_push_num_threads(&loc, gtid, num_threads);
1106 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001107 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001108 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1109 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1110 (kmp_int)count, (kmp_int)1, (kmp_int)1);
1111 }
1112 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001113 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001114 }
1115
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001116#if OMPT_SUPPORT
1117 if (ompt_status & ompt_status_track) {
1118 parent_frame->reenter_runtime_frame = NULL;
1119 }
1120#endif
1121
Jim Cownie5e8470a2013-09-27 10:38:44 +00001122 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1123
1124 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1125}
1126
1127
1128void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001129xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001130{
1131 int gtid = __kmp_get_gtid();
1132 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1133
1134 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1135
1136 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1137}
1138
1139
1140void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001141xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001142{
1143 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1144}
1145
Jim Cownie181b4bb2013-12-23 17:28:57 +00001146// libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1147void
1148xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
1149{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001150 KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1151 return;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001152}
1153
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001154#if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1155
1156void
1157xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags)
1158{
1159 int gtid = __kmp_entry_gtid();
1160 MKLOC(loc, "GOMP_parallel");
1161 KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1162
1163 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1164 if (num_threads != 0) {
1165 __kmp_push_num_threads(&loc, gtid, num_threads);
1166 }
1167 if(flags != 0) {
1168 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1169 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001170 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001171 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
1172 }
1173 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001174 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001175 }
1176 task(data);
1177 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1178}
1179
1180void
1181xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data,
1182 unsigned num_threads, unsigned count, unsigned flags)
1183{
1184 int gtid = __kmp_entry_gtid();
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001185 MKLOC(loc, "GOMP_parallel_sections");
1186 KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1187
1188 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1189 if (num_threads != 0) {
1190 __kmp_push_num_threads(&loc, gtid, num_threads);
1191 }
1192 if(flags != 0) {
1193 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1194 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001195 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001196 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1197 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1198 (kmp_int)count, (kmp_int)1, (kmp_int)1);
1199 }
1200 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001201 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001202 }
1203
1204 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1205
1206 task(data);
1207 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1208 KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1209}
1210
1211#define PARALLEL_LOOP(func, schedule) \
1212 void func (void (*task) (void *), void *data, unsigned num_threads, \
1213 long lb, long ub, long str, long chunk_sz, unsigned flags) \
1214 { \
1215 int gtid = __kmp_entry_gtid(); \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001216 MKLOC(loc, #func); \
1217 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1218 gtid, lb, ub, str, chunk_sz )); \
1219 \
1220 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1221 if (num_threads != 0) { \
1222 __kmp_push_num_threads(&loc, gtid, num_threads); \
1223 } \
1224 if (flags != 0) { \
1225 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \
1226 } \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001227 __kmp_GOMP_fork_call(&loc, gtid, task, \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001228 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
1229 task, data, num_threads, &loc, (schedule), lb, \
1230 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1231 } \
1232 else { \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001233 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001234 } \
1235 \
1236 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1237 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1238 (schedule) != kmp_sch_static); \
1239 task(data); \
1240 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1241 \
1242 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
1243 }
1244
1245PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static)
1246PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked)
1247PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked)
1248PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime)
1249
1250
1251void
1252xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void)
1253{
1254 int gtid = __kmp_get_gtid();
1255 MKLOC(loc, "GOMP_taskgroup_start");
1256 KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1257
1258 __kmpc_taskgroup(&loc, gtid);
1259
1260 return;
1261}
1262
1263void
1264xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void)
1265{
1266 int gtid = __kmp_get_gtid();
1267 MKLOC(loc, "GOMP_taskgroup_end");
1268 KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1269
1270 __kmpc_end_taskgroup(&loc, gtid);
1271
1272 return;
1273}
1274
1275#ifndef KMP_DEBUG
1276static
1277#endif /* KMP_DEBUG */
Jonathan Peyton66338292015-06-01 02:37:28 +00001278kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001279 kmp_int32 cncl_kind = 0;
1280 switch(gomp_kind) {
1281 case 1:
1282 cncl_kind = cancel_parallel;
1283 break;
1284 case 2:
1285 cncl_kind = cancel_loop;
1286 break;
1287 case 4:
1288 cncl_kind = cancel_sections;
1289 break;
1290 case 8:
1291 cncl_kind = cancel_taskgroup;
1292 break;
1293 }
1294 return cncl_kind;
1295}
1296
1297bool
1298xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which)
1299{
1300 if(__kmp_omp_cancellation) {
1301 KMP_FATAL(NoGompCancellation);
1302 }
1303 int gtid = __kmp_get_gtid();
1304 MKLOC(loc, "GOMP_cancellation_point");
1305 KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1306
Jonathan Peyton66338292015-06-01 02:37:28 +00001307 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001308
1309 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1310}
1311
1312bool
1313xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void)
1314{
1315 if(__kmp_omp_cancellation) {
1316 KMP_FATAL(NoGompCancellation);
1317 }
1318 KMP_FATAL(NoGompCancellation);
1319 int gtid = __kmp_get_gtid();
1320 MKLOC(loc, "GOMP_barrier_cancel");
1321 KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1322
1323 return __kmpc_cancel_barrier(&loc, gtid);
1324}
1325
1326bool
1327xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel)
1328{
1329 if(__kmp_omp_cancellation) {
1330 KMP_FATAL(NoGompCancellation);
1331 } else {
1332 return FALSE;
1333 }
1334
1335 int gtid = __kmp_get_gtid();
1336 MKLOC(loc, "GOMP_cancel");
1337 KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1338
Jonathan Peyton66338292015-06-01 02:37:28 +00001339 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001340
1341 if(do_cancel == FALSE) {
1342 return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1343 } else {
1344 return __kmpc_cancel(&loc, gtid, cncl_kind);
1345 }
1346}
1347
1348bool
1349xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void)
1350{
1351 if(__kmp_omp_cancellation) {
1352 KMP_FATAL(NoGompCancellation);
1353 }
1354 int gtid = __kmp_get_gtid();
1355 MKLOC(loc, "GOMP_sections_end_cancel");
1356 KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1357
1358 return __kmpc_cancel_barrier(&loc, gtid);
1359}
1360
1361bool
1362xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void)
1363{
1364 if(__kmp_omp_cancellation) {
1365 KMP_FATAL(NoGompCancellation);
1366 }
1367 int gtid = __kmp_get_gtid();
1368 MKLOC(loc, "GOMP_loop_end_cancel");
1369 KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1370
1371 return __kmpc_cancel_barrier(&loc, gtid);
1372}
1373
1374// All target functions are empty as of 2014-05-29
1375void
1376xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target,
1377 size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds)
1378{
1379 return;
1380}
1381
1382void
1383xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum,
1384 void **hostaddrs, size_t *sizes, unsigned char *kinds)
1385{
1386 return;
1387}
1388
1389void
1390xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void)
1391{
1392 return;
1393}
1394
1395void
1396xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum,
1397 void **hostaddrs, size_t *sizes, unsigned char *kinds)
1398{
1399 return;
1400}
1401
1402void
1403xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit)
1404{
1405 return;
1406}
1407#endif // OMP_40_ENABLED
1408
1409
Jim Cownie181b4bb2013-12-23 17:28:57 +00001410/*
1411 The following sections of code create aliases for the GOMP_* functions,
1412 then create versioned symbols using the assembler directive .symver.
1413 This is only pertinent for ELF .so library
1414 xaliasify and xversionify are defined in kmp_ftn_os.h
1415*/
1416
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001417#ifdef KMP_USE_VERSION_SYMBOLS
Jim Cownie181b4bb2013-12-23 17:28:57 +00001418
1419// GOMP_1.0 aliases
1420xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
1421xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
1422xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
1423xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
1424xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
1425xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
1426xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
1427xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
1428xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
1429xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
1430xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
1431xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
1432xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
1433xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
1434xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
1435xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
1436xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
1437xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
1438xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
1439xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
1440xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
1441xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
1442xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
1443xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
1444xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
1445xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
1446xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
1447xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
1448xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
1449xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
1450xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
1451xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
1452xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
1453xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
1454xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
1455xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
1456xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
1457xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
1458xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
1459xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
1460xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
1461
1462// GOMP_2.0 aliases
Jim Cownie181b4bb2013-12-23 17:28:57 +00001463xaliasify(KMP_API_NAME_GOMP_TASK, 20);
1464xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
Jim Cownie181b4bb2013-12-23 17:28:57 +00001465xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
1466xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
1467xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
1468xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
1469xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
1470xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
1471xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
1472xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
1473xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
1474xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
1475xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
1476xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
1477xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
1478xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
1479xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
1480xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
1481
1482// GOMP_3.0 aliases
1483xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
1484
1485// GOMP_4.0 aliases
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001486// The GOMP_parallel* entry points below aren't OpenMP 4.0 related.
1487#if OMP_40_ENABLED
1488xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40);
1489xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40);
1490xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40);
1491xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40);
1492xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40);
1493xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40);
1494xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40);
1495xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40);
1496xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40);
1497xaliasify(KMP_API_NAME_GOMP_CANCEL, 40);
1498xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40);
1499xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40);
1500xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40);
1501xaliasify(KMP_API_NAME_GOMP_TARGET, 40);
1502xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40);
1503xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40);
1504xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40);
1505xaliasify(KMP_API_NAME_GOMP_TEAMS, 40);
1506#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001507
1508// GOMP_1.0 versioned symbols
1509xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1510xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1511xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1512xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1513xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1514xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1515xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1516xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1517xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1518xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1519xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1520xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1521xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1522xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1523xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
1524xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1525xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1526xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1527xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
1528xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1529xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1530xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1531xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1532xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1533xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1534xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1535xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1536xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1537xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1538xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
1539xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1540xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1541xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1542xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1543xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1544xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1545xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1546xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1547xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1548xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1549xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1550
1551// GOMP_2.0 versioned symbols
Jim Cownie181b4bb2013-12-23 17:28:57 +00001552xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1553xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
Jim Cownie181b4bb2013-12-23 17:28:57 +00001554xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1555xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1556xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1557xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1558xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1559xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1560xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1561xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1562xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1563xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1564xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1565xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1566xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1567xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1568xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1569xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1570
1571// GOMP_3.0 versioned symbols
1572xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1573
1574// GOMP_4.0 versioned symbols
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001575#if OMP_40_ENABLED
1576xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1577xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1578xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1579xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1580xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1581xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1582xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1583xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1584xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1585xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1586xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1587xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1588xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1589xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1590xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1591xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1592xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1593xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1594#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001595
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001596#endif // KMP_USE_VERSION_SYMBOLS
Jim Cownie181b4bb2013-12-23 17:28:57 +00001597
Jim Cownie5e8470a2013-09-27 10:38:44 +00001598#ifdef __cplusplus
1599 } //extern "C"
1600#endif // __cplusplus
1601
1602