blob: 42d58e50532080151aad963cd85f999860c3987c [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_gsupport.c
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
Andrey Churbanovcbda8682015-01-13 14:43:35 +000016#if defined(__x86_64) || defined (__powerpc64__) || defined(__aarch64__)
Jim Cownie5e8470a2013-09-27 10:38:44 +000017# define KMP_I8
18#endif
19#include "kmp.h"
20#include "kmp_atomic.h"
21
Andrey Churbanovd7d088f2015-04-29 16:42:24 +000022#if OMPT_SUPPORT
23#include "ompt-specific.h"
24#endif
25
Jim Cownie5e8470a2013-09-27 10:38:44 +000026#ifdef __cplusplus
27 extern "C" {
28#endif // __cplusplus
29
30#define MKLOC(loc,routine) \
31 static ident_t (loc) = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;" };
32
Jim Cownie181b4bb2013-12-23 17:28:57 +000033#include "kmp_ftn_os.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000034
35void
Jim Cownie181b4bb2013-12-23 17:28:57 +000036xexpand(KMP_API_NAME_GOMP_BARRIER)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000037{
38 int gtid = __kmp_entry_gtid();
39 MKLOC(loc, "GOMP_barrier");
40 KA_TRACE(20, ("GOMP_barrier: T#%d\n", gtid));
41 __kmpc_barrier(&loc, gtid);
42}
43
44
Jim Cownie5e8470a2013-09-27 10:38:44 +000045//
46// Mutual exclusion
47//
48
49//
50// The symbol that icc/ifort generates for unnamed for unnamed critical
51// sections - .gomp_critical_user_ - is defined using .comm in any objects
52// reference it. We can't reference it directly here in C code, as the
53// symbol contains a ".".
54//
55// The RTL contains an assembly language definition of .gomp_critical_user_
56// with another symbol __kmp_unnamed_critical_addr initialized with it's
57// address.
58//
59extern kmp_critical_name *__kmp_unnamed_critical_addr;
60
61
62void
Jim Cownie181b4bb2013-12-23 17:28:57 +000063xexpand(KMP_API_NAME_GOMP_CRITICAL_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000064{
65 int gtid = __kmp_entry_gtid();
66 MKLOC(loc, "GOMP_critical_start");
67 KA_TRACE(20, ("GOMP_critical_start: T#%d\n", gtid));
68 __kmpc_critical(&loc, gtid, __kmp_unnamed_critical_addr);
69}
70
71
72void
Jim Cownie181b4bb2013-12-23 17:28:57 +000073xexpand(KMP_API_NAME_GOMP_CRITICAL_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +000074{
75 int gtid = __kmp_get_gtid();
76 MKLOC(loc, "GOMP_critical_end");
77 KA_TRACE(20, ("GOMP_critical_end: T#%d\n", gtid));
78 __kmpc_end_critical(&loc, gtid, __kmp_unnamed_critical_addr);
79}
80
81
82void
Jim Cownie181b4bb2013-12-23 17:28:57 +000083xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000084{
85 int gtid = __kmp_entry_gtid();
86 MKLOC(loc, "GOMP_critical_name_start");
87 KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n", gtid));
88 __kmpc_critical(&loc, gtid, (kmp_critical_name *)pptr);
89}
90
91
92void
Jim Cownie181b4bb2013-12-23 17:28:57 +000093xexpand(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr)
Jim Cownie5e8470a2013-09-27 10:38:44 +000094{
95 int gtid = __kmp_get_gtid();
96 MKLOC(loc, "GOMP_critical_name_end");
97 KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n", gtid));
98 __kmpc_end_critical(&loc, gtid, (kmp_critical_name *)pptr);
99}
100
101
102//
103// The Gnu codegen tries to use locked operations to perform atomic updates
104// inline. If it can't, then it calls GOMP_atomic_start() before performing
105// the update and GOMP_atomic_end() afterward, regardless of the data type.
106//
107
108void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000109xexpand(KMP_API_NAME_GOMP_ATOMIC_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110{
111 int gtid = __kmp_entry_gtid();
112 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000113
114#if OMPT_SUPPORT
115 __ompt_thread_assign_wait_id(0);
116#endif
117
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118 __kmp_acquire_atomic_lock(&__kmp_atomic_lock, gtid);
119}
120
121
122void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000123xexpand(KMP_API_NAME_GOMP_ATOMIC_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000124{
125 int gtid = __kmp_get_gtid();
126 KA_TRACE(20, ("GOMP_atomic_start: T#%d\n", gtid));
127 __kmp_release_atomic_lock(&__kmp_atomic_lock, gtid);
128}
129
130
131int
Jim Cownie181b4bb2013-12-23 17:28:57 +0000132xexpand(KMP_API_NAME_GOMP_SINGLE_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000133{
134 int gtid = __kmp_entry_gtid();
135 MKLOC(loc, "GOMP_single_start");
136 KA_TRACE(20, ("GOMP_single_start: T#%d\n", gtid));
137
138 if (! TCR_4(__kmp_init_parallel))
139 __kmp_parallel_initialize();
140
141 //
142 // 3rd parameter == FALSE prevents kmp_enter_single from pushing a
143 // workshare when USE_CHECKS is defined. We need to avoid the push,
144 // as there is no corresponding GOMP_single_end() call.
145 //
146 return __kmp_enter_single(gtid, &loc, FALSE);
147}
148
149
150void *
Jim Cownie181b4bb2013-12-23 17:28:57 +0000151xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000152{
153 void *retval;
154 int gtid = __kmp_entry_gtid();
155 MKLOC(loc, "GOMP_single_copy_start");
156 KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n", gtid));
157
158 if (! TCR_4(__kmp_init_parallel))
159 __kmp_parallel_initialize();
160
161 //
162 // If this is the first thread to enter, return NULL. The generated
163 // code will then call GOMP_single_copy_end() for this thread only,
164 // with the copyprivate data pointer as an argument.
165 //
166 if (__kmp_enter_single(gtid, &loc, FALSE))
167 return NULL;
168
169 //
170 // Wait for the first thread to set the copyprivate data pointer,
171 // and for all other threads to reach this point.
172 //
173 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
174
175 //
176 // Retrieve the value of the copyprivate data point, and wait for all
177 // threads to do likewise, then return.
178 //
179 retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data;
180 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
181 return retval;
182}
183
184
185void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000186xexpand(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000187{
188 int gtid = __kmp_get_gtid();
Jim Cownie5e8470a2013-09-27 10:38:44 +0000189 KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n", gtid));
190
191 //
192 // Set the copyprivate data pointer fo the team, then hit the barrier
193 // so that the other threads will continue on and read it. Hit another
194 // barrier before continuing, so that the know that the copyprivate
195 // data pointer has been propagated to all threads before trying to
196 // reuse the t_copypriv_data field.
197 //
198 __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data;
199 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
200 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
201}
202
203
204void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000205xexpand(KMP_API_NAME_GOMP_ORDERED_START)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000206{
207 int gtid = __kmp_entry_gtid();
208 MKLOC(loc, "GOMP_ordered_start");
209 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
210 __kmpc_ordered(&loc, gtid);
211}
212
213
214void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000215xexpand(KMP_API_NAME_GOMP_ORDERED_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000216{
217 int gtid = __kmp_get_gtid();
218 MKLOC(loc, "GOMP_ordered_end");
219 KA_TRACE(20, ("GOMP_ordered_start: T#%d\n", gtid));
220 __kmpc_end_ordered(&loc, gtid);
221}
222
223
Jim Cownie5e8470a2013-09-27 10:38:44 +0000224//
225// Dispatch macro defs
226//
227// They come in two flavors: 64-bit unsigned, and either 32-bit signed
228// (IA-32 architecture) or 64-bit signed (Intel(R) 64).
229//
230
Jim Cownie181b4bb2013-12-23 17:28:57 +0000231#if KMP_ARCH_X86 || KMP_ARCH_ARM
Jim Cownie5e8470a2013-09-27 10:38:44 +0000232# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4
233# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4
234# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4
235#else
236# define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8
237# define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8
238# define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8
239#endif /* KMP_ARCH_X86 */
240
241# define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u
242# define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u
243# define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u
244
245
Jim Cownie5e8470a2013-09-27 10:38:44 +0000246//
247// The parallel contruct
248//
249
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000250#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000251static
252#endif /* KMP_DEBUG */
253void
254__kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *),
255 void *data)
256{
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000257#if OMPT_SUPPORT
258 kmp_info_t *thr;
259 ompt_frame_t *ompt_frame;
260 ompt_state_t enclosing_state;
261
262 if (ompt_status & ompt_status_track) {
263 // get pointer to thread data structure
264 thr = __kmp_threads[*gtid];
265
266 // save enclosing task state; set current state for task
267 enclosing_state = thr->th.ompt_thread_info.state;
268 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
269
270 // set task frame
271 ompt_frame = __ompt_get_task_frame_internal(0);
272 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
273 }
274#endif
275
Jim Cownie5e8470a2013-09-27 10:38:44 +0000276 task(data);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000277
278#if OMPT_SUPPORT
279 if (ompt_status & ompt_status_track) {
280 // clear task frame
281 ompt_frame->exit_runtime_frame = NULL;
282
283 // restore enclosing state
284 thr->th.ompt_thread_info.state = enclosing_state;
285 }
286#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000287}
288
289
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000290#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000291static
292#endif /* KMP_DEBUG */
293void
294__kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr,
295 void (*task)(void *), void *data, unsigned num_threads, ident_t *loc,
296 enum sched_type schedule, long start, long end, long incr, long chunk_size)
297{
298 //
299 // Intialize the loop worksharing construct.
300 //
301 KMP_DISPATCH_INIT(loc, *gtid, schedule, start, end, incr, chunk_size,
302 schedule != kmp_sch_static);
303
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000304#if OMPT_SUPPORT
305 kmp_info_t *thr;
306 ompt_frame_t *ompt_frame;
307 ompt_state_t enclosing_state;
308
309 if (ompt_status & ompt_status_track) {
310 thr = __kmp_threads[*gtid];
311 // save enclosing task state; set current state for task
312 enclosing_state = thr->th.ompt_thread_info.state;
313 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
314
315 // set task frame
316 ompt_frame = __ompt_get_task_frame_internal(0);
317 ompt_frame->exit_runtime_frame = __builtin_frame_address(0);
318 }
319#endif
320
Jim Cownie5e8470a2013-09-27 10:38:44 +0000321 //
322 // Now invoke the microtask.
323 //
324 task(data);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000325
326#if OMPT_SUPPORT
327 if (ompt_status & ompt_status_track) {
328 // clear task frame
329 ompt_frame->exit_runtime_frame = NULL;
330
331 // reset enclosing state
332 thr->th.ompt_thread_info.state = enclosing_state;
333 }
334#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000335}
336
337
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000338#ifndef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000339static
340#endif /* KMP_DEBUG */
341void
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000342__kmp_GOMP_fork_call(ident_t *loc, int gtid, void (*unwrapped_task)(void *), microtask_t wrapper, int argc,...)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000343{
344 int rc;
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000345 kmp_info_t *thr = __kmp_threads[gtid];
346 kmp_team_t *team = thr->th.th_team;
347 int tid = __kmp_tid_from_gtid(gtid);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348
349 va_list ap;
350 va_start(ap, argc);
351
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000352 rc = __kmp_fork_call(loc, gtid, fork_context_gnu, argc,
353#if OMPT_SUPPORT
354 VOLATILE_CAST(void *) unwrapped_task,
355#endif
356 wrapper, __kmp_invoke_task_func,
Andrey Churbanovcbda8682015-01-13 14:43:35 +0000357#if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
Jim Cownie5e8470a2013-09-27 10:38:44 +0000358 &ap
359#else
360 ap
361#endif
362 );
363
364 va_end(ap);
365
366 if (rc) {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000367 __kmp_run_before_invoked_task(gtid, tid, thr, team);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000368 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000369
Jonathan Peyton122dd762015-07-13 18:55:45 +0000370#if OMPT_SUPPORT
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000371 if (ompt_status & ompt_status_track) {
Jonathan Peyton122dd762015-07-13 18:55:45 +0000372#if OMPT_TRACE
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000373 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
374 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
375
376 // implicit task callback
377 if ((ompt_status == ompt_status_track_callback) &&
378 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
379 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
380 team_info->parallel_id, task_info->task_id);
381 }
Jonathan Peyton122dd762015-07-13 18:55:45 +0000382#endif
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000383 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
384 }
385#endif
386}
387
388static void
389__kmp_GOMP_serialized_parallel(ident_t *loc, kmp_int32 gtid, void (*task)(void *))
390{
391 __kmp_serialized_parallel(loc, gtid);
392
393#if OMPT_SUPPORT
394 if (ompt_status & ompt_status_track) {
395 ompt_task_id_t ompt_task_id = __ompt_get_task_id_internal(0);
396 ompt_frame_t *ompt_frame = __ompt_get_task_frame_internal(0);
397 kmp_info_t *thr = __kmp_threads[gtid];
398
399 ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(gtid);
400 ompt_task_id_t my_ompt_task_id = __ompt_task_id_new(gtid);
401
402 ompt_frame->exit_runtime_frame = NULL;
403
404 // parallel region callback
405 if ((ompt_status == ompt_status_track_callback) &&
406 ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
407 int team_size = 1;
408 ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
409 ompt_task_id, ompt_frame, ompt_parallel_id,
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000410 team_size, (void *) task,
411 OMPT_INVOKER(fork_context_gnu));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000412 }
413
414 // set up lightweight task
415 ompt_lw_taskteam_t *lwt = (ompt_lw_taskteam_t *)
416 __kmp_allocate(sizeof(ompt_lw_taskteam_t));
417 __ompt_lw_taskteam_init(lwt, thr, gtid, (void *) task, ompt_parallel_id);
418 lwt->ompt_task_info.task_id = my_ompt_task_id;
419 lwt->ompt_task_info.frame.exit_runtime_frame = 0;
420 __ompt_lw_taskteam_link(lwt, thr);
421
422#if OMPT_TRACE
423 // implicit task callback
424 if ((ompt_status == ompt_status_track_callback) &&
425 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
426 ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
427 ompt_parallel_id, my_ompt_task_id);
428 }
429 thr->th.ompt_thread_info.state = ompt_state_work_parallel;
430#endif
431 }
432#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000433}
434
435
436void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000437xexpand(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), void *data, unsigned num_threads)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000438{
439 int gtid = __kmp_entry_gtid();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000440
441#if OMPT_SUPPORT
442 ompt_frame_t *parent_frame;
443
444 if (ompt_status & ompt_status_track) {
445 parent_frame = __ompt_get_task_frame_internal(0);
446 parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
447 }
448#endif
449
Jim Cownie5e8470a2013-09-27 10:38:44 +0000450 MKLOC(loc, "GOMP_parallel_start");
451 KA_TRACE(20, ("GOMP_parallel_start: T#%d\n", gtid));
452
453 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
454 if (num_threads != 0) {
455 __kmp_push_num_threads(&loc, gtid, num_threads);
456 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000457 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie5e8470a2013-09-27 10:38:44 +0000458 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
459 }
460 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000461 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000462 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000463
464#if OMPT_SUPPORT
465 if (ompt_status & ompt_status_track) {
466 parent_frame->reenter_runtime_frame = NULL;
467 }
468#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000469}
470
471
472void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000473xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000474{
475 int gtid = __kmp_get_gtid();
Jonathan Peytone8104ad2015-06-08 18:56:33 +0000476 kmp_info_t *thr;
477
478 thr = __kmp_threads[gtid];
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000479
Jim Cownie5e8470a2013-09-27 10:38:44 +0000480 MKLOC(loc, "GOMP_parallel_end");
481 KA_TRACE(20, ("GOMP_parallel_end: T#%d\n", gtid));
482
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000483
484#if OMPT_SUPPORT
485 ompt_parallel_id_t parallel_id;
486 ompt_frame_t *ompt_frame = NULL;
487
488 if (ompt_status & ompt_status_track) {
489 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
490 parallel_id = team_info->parallel_id;
491
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000492 // Record that we re-entered the runtime system in the implicit
493 // task frame representing the parallel region.
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000494 ompt_frame = __ompt_get_task_frame_internal(0);
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000495 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000496
497#if OMPT_TRACE
498 if ((ompt_status == ompt_status_track_callback) &&
499 ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
500 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
501 ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
502 parallel_id, task_info->task_id);
503 }
504#endif
505
506 // unlink if necessary. no-op if there is not a lightweight task.
507 ompt_lw_taskteam_t *lwt = __ompt_lw_taskteam_unlink(thr);
508 // GOMP allocates/frees lwt since it can't be kept on the stack
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000509 if (lwt) {
510 __kmp_free(lwt);
511
512#if OMPT_SUPPORT
513 if (ompt_status & ompt_status_track) {
514 // Since a lightweight task was destroyed, make sure that the
515 // remaining deepest task knows the stack frame where the runtime
516 // was reentered.
517 ompt_frame = __ompt_get_task_frame_internal(0);
518 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
519 }
520#endif
521 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000522 }
523#endif
524
Jim Cownie5e8470a2013-09-27 10:38:44 +0000525 if (! __kmp_threads[gtid]->th.th_team->t.t_serialized) {
526 kmp_info_t *thr = __kmp_threads[gtid];
527 __kmp_run_after_invoked_task(gtid, __kmp_tid_from_gtid(gtid), thr,
528 thr->th.th_team);
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000529
530#if OMPT_SUPPORT
531 if (ompt_status & ompt_status_track) {
532 // Set reenter frame in parent task, which will become current task
533 // in the midst of join. This is needed before the end_parallel callback.
534 ompt_frame = __ompt_get_task_frame_internal(1);
535 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
536 }
537#endif
538
539 __kmp_join_call(&loc, gtid, fork_context_gnu);
540
541#if OMPT_SUPPORT
542 if (ompt_status & ompt_status_track) {
543 ompt_frame->reenter_runtime_frame = NULL;
544 }
545#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000546 }
547 else {
548 __kmpc_end_serialized_parallel(&loc, gtid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000549
550#if OMPT_SUPPORT
551 if (ompt_status & ompt_status_track) {
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000552 // Record that we re-entered the runtime system in the frame that
553 // created the parallel region.
554 ompt_frame->reenter_runtime_frame = __builtin_frame_address(0);
555
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000556 if ((ompt_status == ompt_status_track_callback) &&
557 ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
558 ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
559 ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000560 parallel_id, task_info->task_id,
561 OMPT_INVOKER(fork_context_gnu));
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000562 }
563
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000564 ompt_frame->reenter_runtime_frame = NULL;
565
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000566 thr->th.ompt_thread_info.state =
567 (((thr->th.th_team)->t.t_serialized) ?
568 ompt_state_work_serial : ompt_state_work_parallel);
569 }
570#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000571 }
572}
573
574
Jim Cownie5e8470a2013-09-27 10:38:44 +0000575//
576// Loop worksharing constructs
577//
578
579//
580// The Gnu codegen passes in an exclusive upper bound for the overall range,
581// but the libguide dispatch code expects an inclusive upper bound, hence the
582// "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th
583// argument to __kmp_GOMP_fork_call).
584//
585// Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub,
586// but the Gnu codegen expects an excluside upper bound, so the adjustment
587// "*p_ub += stride" compenstates for the discrepancy.
588//
589// Correction: the gnu codegen always adjusts the upper bound by +-1, not the
590// stride value. We adjust the dispatch parameters accordingly (by +-1), but
591// we still adjust p_ub by the actual stride value.
592//
593// The "runtime" versions do not take a chunk_sz parameter.
594//
595// The profile lib cannot support construct checking of unordered loops that
596// are predetermined by the compiler to be statically scheduled, as the gcc
597// codegen will not always emit calls to GOMP_loop_static_next() to get the
598// next iteration. Instead, it emits inline code to call omp_get_thread_num()
599// num and calculate the iteration space using the result. It doesn't do this
600// with ordered static loop, so they can be checked.
601//
602
603#define LOOP_START(func,schedule) \
604 int func (long lb, long ub, long str, long chunk_sz, long *p_lb, \
605 long *p_ub) \
606 { \
607 int status; \
608 long stride; \
609 int gtid = __kmp_entry_gtid(); \
610 MKLOC(loc, #func); \
611 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
612 gtid, lb, ub, str, chunk_sz )); \
613 \
614 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
615 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
616 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
617 (schedule) != kmp_sch_static); \
618 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
619 (kmp_int *)p_ub, (kmp_int *)&stride); \
620 if (status) { \
621 KMP_DEBUG_ASSERT(stride == str); \
622 *p_ub += (str > 0) ? 1 : -1; \
623 } \
624 } \
625 else { \
626 status = 0; \
627 } \
628 \
629 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
630 gtid, *p_lb, *p_ub, status)); \
631 return status; \
632 }
633
634
635#define LOOP_RUNTIME_START(func,schedule) \
636 int func (long lb, long ub, long str, long *p_lb, long *p_ub) \
637 { \
638 int status; \
639 long stride; \
640 long chunk_sz = 0; \
641 int gtid = __kmp_entry_gtid(); \
642 MKLOC(loc, #func); \
643 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \
644 gtid, lb, ub, str, chunk_sz )); \
645 \
646 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
647 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
648 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \
649 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
650 (kmp_int *)p_ub, (kmp_int *)&stride); \
651 if (status) { \
652 KMP_DEBUG_ASSERT(stride == str); \
653 *p_ub += (str > 0) ? 1 : -1; \
654 } \
655 } \
656 else { \
657 status = 0; \
658 } \
659 \
660 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \
661 gtid, *p_lb, *p_ub, status)); \
662 return status; \
663 }
664
665
666#define LOOP_NEXT(func,fini_code) \
667 int func(long *p_lb, long *p_ub) \
668 { \
669 int status; \
670 long stride; \
671 int gtid = __kmp_get_gtid(); \
672 MKLOC(loc, #func); \
673 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
674 \
675 fini_code \
676 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \
677 (kmp_int *)p_ub, (kmp_int *)&stride); \
678 if (status) { \
679 *p_ub += (stride > 0) ? 1 : -1; \
680 } \
681 \
682 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \
683 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
684 return status; \
685 }
686
687
Jim Cownie181b4bb2013-12-23 17:28:57 +0000688LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static)
689LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {})
690LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), kmp_sch_dynamic_chunked)
691LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {})
692LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_START), kmp_sch_guided_chunked)
693LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {})
694LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), kmp_sch_runtime)
695LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {})
Jim Cownie5e8470a2013-09-27 10:38:44 +0000696
Jim Cownie181b4bb2013-12-23 17:28:57 +0000697LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), kmp_ord_static)
698LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000699 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000700LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
701LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000702 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000703LOOP_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
704LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000705 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000706LOOP_RUNTIME_START(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), kmp_ord_runtime)
707LOOP_NEXT(xexpand(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000708 { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); })
709
710
711void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000712xexpand(KMP_API_NAME_GOMP_LOOP_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000713{
714 int gtid = __kmp_get_gtid();
715 KA_TRACE(20, ("GOMP_loop_end: T#%d\n", gtid))
716
717 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
718
719 KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n", gtid))
720}
721
722
723void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000724xexpand(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000725{
726 KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n", __kmp_get_gtid()))
727}
728
729
Jim Cownie5e8470a2013-09-27 10:38:44 +0000730//
731// Unsigned long long loop worksharing constructs
732//
733// These are new with gcc 4.4
734//
735
736#define LOOP_START_ULL(func,schedule) \
737 int func (int up, unsigned long long lb, unsigned long long ub, \
738 unsigned long long str, unsigned long long chunk_sz, \
739 unsigned long long *p_lb, unsigned long long *p_ub) \
740 { \
741 int status; \
742 long long str2 = up ? ((long long)str) : -((long long)str); \
743 long long stride; \
744 int gtid = __kmp_entry_gtid(); \
745 MKLOC(loc, #func); \
746 \
747 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
748 gtid, up, lb, ub, str, chunk_sz )); \
749 \
750 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
751 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
752 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \
753 (schedule) != kmp_sch_static); \
754 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
755 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
756 if (status) { \
757 KMP_DEBUG_ASSERT(stride == str2); \
758 *p_ub += (str > 0) ? 1 : -1; \
759 } \
760 } \
761 else { \
762 status = 0; \
763 } \
764 \
765 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
766 gtid, *p_lb, *p_ub, status)); \
767 return status; \
768 }
769
770
771#define LOOP_RUNTIME_START_ULL(func,schedule) \
772 int func (int up, unsigned long long lb, unsigned long long ub, \
773 unsigned long long str, unsigned long long *p_lb, \
774 unsigned long long *p_ub) \
775 { \
776 int status; \
777 long long str2 = up ? ((long long)str) : -((long long)str); \
778 unsigned long long stride; \
779 unsigned long long chunk_sz = 0; \
780 int gtid = __kmp_entry_gtid(); \
781 MKLOC(loc, #func); \
782 \
783 KA_TRACE(20, ( #func ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str 0x%llx, chunk_sz 0x%llx\n", \
784 gtid, up, lb, ub, str, chunk_sz )); \
785 \
786 if ((str > 0) ? (lb < ub) : (lb > ub)) { \
787 KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \
788 (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, TRUE); \
789 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, \
790 (kmp_uint64 *)p_lb, (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
791 if (status) { \
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000792 KMP_DEBUG_ASSERT((long long)stride == str2); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000793 *p_ub += (str > 0) ? 1 : -1; \
794 } \
795 } \
796 else { \
797 status = 0; \
798 } \
799 \
800 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \
801 gtid, *p_lb, *p_ub, status)); \
802 return status; \
803 }
804
805
806#define LOOP_NEXT_ULL(func,fini_code) \
807 int func(unsigned long long *p_lb, unsigned long long *p_ub) \
808 { \
809 int status; \
810 long long stride; \
811 int gtid = __kmp_get_gtid(); \
812 MKLOC(loc, #func); \
813 KA_TRACE(20, ( #func ": T#%d\n", gtid)); \
814 \
815 fini_code \
816 status = KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \
817 (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \
818 if (status) { \
819 *p_ub += (stride > 0) ? 1 : -1; \
820 } \
821 \
822 KA_TRACE(20, ( #func " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \
823 "returning %d\n", gtid, *p_lb, *p_ub, stride, status)); \
824 return status; \
825 }
826
827
Jim Cownie181b4bb2013-12-23 17:28:57 +0000828LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), kmp_sch_static)
829LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {})
830LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), kmp_sch_dynamic_chunked)
831LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {})
832LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), kmp_sch_guided_chunked)
833LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {})
834LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime)
835LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {})
Jim Cownie5e8470a2013-09-27 10:38:44 +0000836
Jim Cownie181b4bb2013-12-23 17:28:57 +0000837LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), kmp_ord_static)
838LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000839 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000840LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), kmp_ord_dynamic_chunked)
841LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000842 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000843LOOP_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), kmp_ord_guided_chunked)
844LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000845 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
Jim Cownie181b4bb2013-12-23 17:28:57 +0000846LOOP_RUNTIME_START_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), kmp_ord_runtime)
847LOOP_NEXT_ULL(xexpand(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000848 { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); })
849
850
Jim Cownie5e8470a2013-09-27 10:38:44 +0000851//
852// Combined parallel / loop worksharing constructs
853//
854// There are no ull versions (yet).
855//
856
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000857#define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000858 void func (void (*task) (void *), void *data, unsigned num_threads, \
859 long lb, long ub, long str, long chunk_sz) \
860 { \
861 int gtid = __kmp_entry_gtid(); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000862 MKLOC(loc, #func); \
863 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
864 gtid, lb, ub, str, chunk_sz )); \
865 \
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000866 ompt_pre(); \
867 \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000868 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
869 if (num_threads != 0) { \
870 __kmp_push_num_threads(&loc, gtid, num_threads); \
871 } \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000872 __kmp_GOMP_fork_call(&loc, gtid, task, \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000873 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
874 task, data, num_threads, &loc, (schedule), lb, \
875 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
876 } \
877 else { \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000878 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000879 } \
880 \
881 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
882 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
883 (schedule) != kmp_sch_static); \
884 \
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000885 ompt_post(); \
886 \
Jim Cownie5e8470a2013-09-27 10:38:44 +0000887 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
888 }
889
890
Jonathan Peyton3fdf3292015-07-21 18:03:30 +0000891
892#if OMPT_SUPPORT
893
894#define OMPT_LOOP_PRE() \
895 ompt_frame_t *parent_frame; \
896 if (ompt_status & ompt_status_track) { \
897 parent_frame = __ompt_get_task_frame_internal(0); \
898 parent_frame->reenter_runtime_frame = __builtin_frame_address(0); \
899 }
900
901
902#define OMPT_LOOP_POST() \
903 if (ompt_status & ompt_status_track) { \
904 parent_frame->reenter_runtime_frame = NULL; \
905 }
906
907#else
908
909#define OMPT_LOOP_PRE()
910
911#define OMPT_LOOP_POST()
912
913#endif
914
915
916PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START),
917 kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST)
918PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START),
919 kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
920PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START),
921 kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST)
922PARALLEL_LOOP_START(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START),
923 kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST)
Jim Cownie5e8470a2013-09-27 10:38:44 +0000924
925
Jim Cownie5e8470a2013-09-27 10:38:44 +0000926//
927// Tasking constructs
928//
929
930void
Jim Cownie181b4bb2013-12-23 17:28:57 +0000931xexpand(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, void (*copy_func)(void *, void *),
Jim Cownie5e8470a2013-09-27 10:38:44 +0000932 long arg_size, long arg_align, int if_cond, unsigned gomp_flags)
933{
934 MKLOC(loc, "GOMP_task");
935 int gtid = __kmp_entry_gtid();
936 kmp_int32 flags = 0;
937 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *) & flags;
938
939 KA_TRACE(20, ("GOMP_task: T#%d\n", gtid));
940
941 // The low-order bit is the "tied" flag
942 if (gomp_flags & 1) {
943 input_flags->tiedness = 1;
944 }
945 input_flags->native = 1;
946 // __kmp_task_alloc() sets up all other flags
947
948 if (! if_cond) {
949 arg_size = 0;
950 }
951
952 kmp_task_t *task = __kmp_task_alloc(&loc, gtid, input_flags,
953 sizeof(kmp_task_t), arg_size ? arg_size + arg_align - 1 : 0,
954 (kmp_routine_entry_t)func);
955
956 if (arg_size > 0) {
957 if (arg_align > 0) {
958 task->shareds = (void *)((((size_t)task->shareds)
959 + arg_align - 1) / arg_align * arg_align);
960 }
961 //else error??
962
963 if (copy_func) {
964 (*copy_func)(task->shareds, data);
965 }
966 else {
Andrey Churbanov74bf17b2015-04-02 13:27:08 +0000967 KMP_MEMCPY(task->shareds, data, arg_size);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000968 }
969 }
970
971 if (if_cond) {
972 __kmpc_omp_task(&loc, gtid, task);
973 }
974 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000975#if OMPT_SUPPORT
976 ompt_thread_info_t oldInfo;
977 kmp_info_t *thread;
978 kmp_taskdata_t *taskdata;
979 if (ompt_status & ompt_status_track) {
980 // Store the threads states and restore them after the task
981 thread = __kmp_threads[ gtid ];
982 taskdata = KMP_TASK_TO_TASKDATA(task);
983 oldInfo = thread->th.ompt_thread_info;
984 thread->th.ompt_thread_info.wait_id = 0;
985 thread->th.ompt_thread_info.state = ompt_state_work_parallel;
986 taskdata->ompt_task_info.frame.exit_runtime_frame =
987 __builtin_frame_address(0);
988 }
989#endif
990
Jim Cownie5e8470a2013-09-27 10:38:44 +0000991 __kmpc_omp_task_begin_if0(&loc, gtid, task);
992 func(data);
993 __kmpc_omp_task_complete_if0(&loc, gtid, task);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000994
995#if OMPT_SUPPORT
996 if (ompt_status & ompt_status_track) {
997 thread->th.ompt_thread_info = oldInfo;
998 taskdata->ompt_task_info.frame.exit_runtime_frame = 0;
999 }
1000#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001001 }
1002
1003 KA_TRACE(20, ("GOMP_task exit: T#%d\n", gtid));
1004}
1005
1006
1007void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001008xexpand(KMP_API_NAME_GOMP_TASKWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001009{
1010 MKLOC(loc, "GOMP_taskwait");
1011 int gtid = __kmp_entry_gtid();
1012
1013 KA_TRACE(20, ("GOMP_taskwait: T#%d\n", gtid));
1014
1015 __kmpc_omp_taskwait(&loc, gtid);
1016
1017 KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n", gtid));
1018}
1019
1020
Jim Cownie5e8470a2013-09-27 10:38:44 +00001021//
1022// Sections worksharing constructs
1023//
1024
1025//
1026// For the sections construct, we initialize a dynamically scheduled loop
1027// worksharing construct with lb 1 and stride 1, and use the iteration #'s
1028// that its returns as sections ids.
1029//
1030// There are no special entry points for ordered sections, so we always use
1031// the dynamically scheduled workshare, even if the sections aren't ordered.
1032//
1033
1034unsigned
Jim Cownie181b4bb2013-12-23 17:28:57 +00001035xexpand(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001036{
1037 int status;
1038 kmp_int lb, ub, stride;
1039 int gtid = __kmp_entry_gtid();
1040 MKLOC(loc, "GOMP_sections_start");
1041 KA_TRACE(20, ("GOMP_sections_start: T#%d\n", gtid));
1042
1043 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1044
1045 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1046 if (status) {
1047 KMP_DEBUG_ASSERT(stride == 1);
1048 KMP_DEBUG_ASSERT(lb > 0);
1049 KMP_ASSERT(lb == ub);
1050 }
1051 else {
1052 lb = 0;
1053 }
1054
1055 KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n", gtid,
1056 (unsigned)lb));
1057 return (unsigned)lb;
1058}
1059
1060
1061unsigned
Jim Cownie181b4bb2013-12-23 17:28:57 +00001062xexpand(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001063{
1064 int status;
1065 kmp_int lb, ub, stride;
1066 int gtid = __kmp_get_gtid();
1067 MKLOC(loc, "GOMP_sections_next");
1068 KA_TRACE(20, ("GOMP_sections_next: T#%d\n", gtid));
1069
1070 status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, &lb, &ub, &stride);
1071 if (status) {
1072 KMP_DEBUG_ASSERT(stride == 1);
1073 KMP_DEBUG_ASSERT(lb > 0);
1074 KMP_ASSERT(lb == ub);
1075 }
1076 else {
1077 lb = 0;
1078 }
1079
1080 KA_TRACE(20, ("GOMP_sections_next exit: T#%d returning %u\n", gtid,
1081 (unsigned)lb));
1082 return (unsigned)lb;
1083}
1084
1085
1086void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001087xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)(void (*task) (void *), void *data,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001088 unsigned num_threads, unsigned count)
1089{
1090 int gtid = __kmp_entry_gtid();
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001091
1092#if OMPT_SUPPORT
1093 ompt_frame_t *parent_frame;
1094
1095 if (ompt_status & ompt_status_track) {
1096 parent_frame = __ompt_get_task_frame_internal(0);
1097 parent_frame->reenter_runtime_frame = __builtin_frame_address(0);
1098 }
1099#endif
1100
Jim Cownie5e8470a2013-09-27 10:38:44 +00001101 MKLOC(loc, "GOMP_parallel_sections_start");
1102 KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n", gtid));
1103
1104 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1105 if (num_threads != 0) {
1106 __kmp_push_num_threads(&loc, gtid, num_threads);
1107 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001108 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie5e8470a2013-09-27 10:38:44 +00001109 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1110 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1111 (kmp_int)count, (kmp_int)1, (kmp_int)1);
1112 }
1113 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001114 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001115 }
1116
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001117#if OMPT_SUPPORT
1118 if (ompt_status & ompt_status_track) {
1119 parent_frame->reenter_runtime_frame = NULL;
1120 }
1121#endif
1122
Jim Cownie5e8470a2013-09-27 10:38:44 +00001123 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1124
1125 KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n", gtid));
1126}
1127
1128
1129void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001130xexpand(KMP_API_NAME_GOMP_SECTIONS_END)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001131{
1132 int gtid = __kmp_get_gtid();
1133 KA_TRACE(20, ("GOMP_sections_end: T#%d\n", gtid))
1134
1135 __kmp_barrier(bs_plain_barrier, gtid, FALSE, 0, NULL, NULL);
1136
1137 KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n", gtid))
1138}
1139
1140
1141void
Jim Cownie181b4bb2013-12-23 17:28:57 +00001142xexpand(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void)
Jim Cownie5e8470a2013-09-27 10:38:44 +00001143{
1144 KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n", __kmp_get_gtid()))
1145}
1146
Jim Cownie181b4bb2013-12-23 17:28:57 +00001147// libgomp has an empty function for GOMP_taskyield as of 2013-10-10
1148void
1149xexpand(KMP_API_NAME_GOMP_TASKYIELD)(void)
1150{
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001151 KA_TRACE(20, ("GOMP_taskyield: T#%d\n", __kmp_get_gtid()))
1152 return;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001153}
1154
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001155#if OMP_40_ENABLED // these are new GOMP_4.0 entry points
1156
1157void
1158xexpand(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), void *data, unsigned num_threads, unsigned int flags)
1159{
1160 int gtid = __kmp_entry_gtid();
1161 MKLOC(loc, "GOMP_parallel");
1162 KA_TRACE(20, ("GOMP_parallel: T#%d\n", gtid));
1163
1164 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1165 if (num_threads != 0) {
1166 __kmp_push_num_threads(&loc, gtid, num_threads);
1167 }
1168 if(flags != 0) {
1169 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1170 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001171 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001172 (microtask_t)__kmp_GOMP_microtask_wrapper, 2, task, data);
1173 }
1174 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001175 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001176 }
1177 task(data);
1178 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1179}
1180
1181void
1182xexpand(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task) (void *), void *data,
1183 unsigned num_threads, unsigned count, unsigned flags)
1184{
1185 int gtid = __kmp_entry_gtid();
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001186 MKLOC(loc, "GOMP_parallel_sections");
1187 KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n", gtid));
1188
1189 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) {
1190 if (num_threads != 0) {
1191 __kmp_push_num_threads(&loc, gtid, num_threads);
1192 }
1193 if(flags != 0) {
1194 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags);
1195 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001196 __kmp_GOMP_fork_call(&loc, gtid, task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001197 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, task, data,
1198 num_threads, &loc, kmp_nm_dynamic_chunked, (kmp_int)1,
1199 (kmp_int)count, (kmp_int)1, (kmp_int)1);
1200 }
1201 else {
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001202 __kmp_GOMP_serialized_parallel(&loc, gtid, task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001203 }
1204
1205 KMP_DISPATCH_INIT(&loc, gtid, kmp_nm_dynamic_chunked, 1, count, 1, 1, TRUE);
1206
1207 task(data);
1208 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)();
1209 KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n", gtid));
1210}
1211
1212#define PARALLEL_LOOP(func, schedule) \
1213 void func (void (*task) (void *), void *data, unsigned num_threads, \
1214 long lb, long ub, long str, long chunk_sz, unsigned flags) \
1215 { \
1216 int gtid = __kmp_entry_gtid(); \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001217 MKLOC(loc, #func); \
1218 KA_TRACE(20, ( #func ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \
1219 gtid, lb, ub, str, chunk_sz )); \
1220 \
1221 if (__kmpc_ok_to_fork(&loc) && (num_threads != 1)) { \
1222 if (num_threads != 0) { \
1223 __kmp_push_num_threads(&loc, gtid, num_threads); \
1224 } \
1225 if (flags != 0) { \
1226 __kmp_push_proc_bind(&loc, gtid, (kmp_proc_bind_t)flags); \
1227 } \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001228 __kmp_GOMP_fork_call(&loc, gtid, task, \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001229 (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, 9, \
1230 task, data, num_threads, &loc, (schedule), lb, \
1231 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \
1232 } \
1233 else { \
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001234 __kmp_GOMP_serialized_parallel(&loc, gtid, task); \
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001235 } \
1236 \
1237 KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \
1238 (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \
1239 (schedule) != kmp_sch_static); \
1240 task(data); \
1241 xexpand(KMP_API_NAME_GOMP_PARALLEL_END)(); \
1242 \
1243 KA_TRACE(20, ( #func " exit: T#%d\n", gtid)); \
1244 }
1245
1246PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), kmp_sch_static)
1247PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), kmp_sch_dynamic_chunked)
1248PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), kmp_sch_guided_chunked)
1249PARALLEL_LOOP(xexpand(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), kmp_sch_runtime)
1250
1251
1252void
1253xexpand(KMP_API_NAME_GOMP_TASKGROUP_START)(void)
1254{
1255 int gtid = __kmp_get_gtid();
1256 MKLOC(loc, "GOMP_taskgroup_start");
1257 KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n", gtid));
1258
1259 __kmpc_taskgroup(&loc, gtid);
1260
1261 return;
1262}
1263
1264void
1265xexpand(KMP_API_NAME_GOMP_TASKGROUP_END)(void)
1266{
1267 int gtid = __kmp_get_gtid();
1268 MKLOC(loc, "GOMP_taskgroup_end");
1269 KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n", gtid));
1270
1271 __kmpc_end_taskgroup(&loc, gtid);
1272
1273 return;
1274}
1275
1276#ifndef KMP_DEBUG
1277static
1278#endif /* KMP_DEBUG */
Jonathan Peyton66338292015-06-01 02:37:28 +00001279kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001280 kmp_int32 cncl_kind = 0;
1281 switch(gomp_kind) {
1282 case 1:
1283 cncl_kind = cancel_parallel;
1284 break;
1285 case 2:
1286 cncl_kind = cancel_loop;
1287 break;
1288 case 4:
1289 cncl_kind = cancel_sections;
1290 break;
1291 case 8:
1292 cncl_kind = cancel_taskgroup;
1293 break;
1294 }
1295 return cncl_kind;
1296}
1297
1298bool
1299xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which)
1300{
1301 if(__kmp_omp_cancellation) {
1302 KMP_FATAL(NoGompCancellation);
1303 }
1304 int gtid = __kmp_get_gtid();
1305 MKLOC(loc, "GOMP_cancellation_point");
1306 KA_TRACE(20, ("GOMP_cancellation_point: T#%d\n", gtid));
1307
Jonathan Peyton66338292015-06-01 02:37:28 +00001308 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001309
1310 return __kmpc_cancellationpoint(&loc, gtid, cncl_kind);
1311}
1312
1313bool
1314xexpand(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void)
1315{
1316 if(__kmp_omp_cancellation) {
1317 KMP_FATAL(NoGompCancellation);
1318 }
1319 KMP_FATAL(NoGompCancellation);
1320 int gtid = __kmp_get_gtid();
1321 MKLOC(loc, "GOMP_barrier_cancel");
1322 KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n", gtid));
1323
1324 return __kmpc_cancel_barrier(&loc, gtid);
1325}
1326
1327bool
1328xexpand(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel)
1329{
1330 if(__kmp_omp_cancellation) {
1331 KMP_FATAL(NoGompCancellation);
1332 } else {
1333 return FALSE;
1334 }
1335
1336 int gtid = __kmp_get_gtid();
1337 MKLOC(loc, "GOMP_cancel");
1338 KA_TRACE(20, ("GOMP_cancel: T#%d\n", gtid));
1339
Jonathan Peyton66338292015-06-01 02:37:28 +00001340 kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(which);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001341
1342 if(do_cancel == FALSE) {
1343 return xexpand(KMP_API_NAME_GOMP_CANCELLATION_POINT)(which);
1344 } else {
1345 return __kmpc_cancel(&loc, gtid, cncl_kind);
1346 }
1347}
1348
1349bool
1350xexpand(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void)
1351{
1352 if(__kmp_omp_cancellation) {
1353 KMP_FATAL(NoGompCancellation);
1354 }
1355 int gtid = __kmp_get_gtid();
1356 MKLOC(loc, "GOMP_sections_end_cancel");
1357 KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n", gtid));
1358
1359 return __kmpc_cancel_barrier(&loc, gtid);
1360}
1361
1362bool
1363xexpand(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void)
1364{
1365 if(__kmp_omp_cancellation) {
1366 KMP_FATAL(NoGompCancellation);
1367 }
1368 int gtid = __kmp_get_gtid();
1369 MKLOC(loc, "GOMP_loop_end_cancel");
1370 KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n", gtid));
1371
1372 return __kmpc_cancel_barrier(&loc, gtid);
1373}
1374
1375// All target functions are empty as of 2014-05-29
1376void
1377xexpand(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn) (void *), const void *openmp_target,
1378 size_t mapnum, void **hostaddrs, size_t *sizes, unsigned char *kinds)
1379{
1380 return;
1381}
1382
1383void
1384xexpand(KMP_API_NAME_GOMP_TARGET_DATA)(int device, const void *openmp_target, size_t mapnum,
1385 void **hostaddrs, size_t *sizes, unsigned char *kinds)
1386{
1387 return;
1388}
1389
1390void
1391xexpand(KMP_API_NAME_GOMP_TARGET_END_DATA)(void)
1392{
1393 return;
1394}
1395
1396void
1397xexpand(KMP_API_NAME_GOMP_TARGET_UPDATE)(int device, const void *openmp_target, size_t mapnum,
1398 void **hostaddrs, size_t *sizes, unsigned char *kinds)
1399{
1400 return;
1401}
1402
1403void
1404xexpand(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, unsigned int thread_limit)
1405{
1406 return;
1407}
1408#endif // OMP_40_ENABLED
1409
1410
Jim Cownie181b4bb2013-12-23 17:28:57 +00001411/*
1412 The following sections of code create aliases for the GOMP_* functions,
1413 then create versioned symbols using the assembler directive .symver.
1414 This is only pertinent for ELF .so library
1415 xaliasify and xversionify are defined in kmp_ftn_os.h
1416*/
1417
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001418#ifdef KMP_USE_VERSION_SYMBOLS
Jim Cownie181b4bb2013-12-23 17:28:57 +00001419
1420// GOMP_1.0 aliases
1421xaliasify(KMP_API_NAME_GOMP_ATOMIC_END, 10);
1422xaliasify(KMP_API_NAME_GOMP_ATOMIC_START, 10);
1423xaliasify(KMP_API_NAME_GOMP_BARRIER, 10);
1424xaliasify(KMP_API_NAME_GOMP_CRITICAL_END, 10);
1425xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10);
1426xaliasify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10);
1427xaliasify(KMP_API_NAME_GOMP_CRITICAL_START, 10);
1428xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10);
1429xaliasify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10);
1430xaliasify(KMP_API_NAME_GOMP_LOOP_END, 10);
1431xaliasify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10);
1432xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10);
1433xaliasify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10);
1434xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10);
1435xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10);
1436xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10);
1437xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10);
1438xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10);
1439xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10);
1440xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10);
1441xaliasify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10);
1442xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10);
1443xaliasify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10);
1444xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10);
1445xaliasify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10);
1446xaliasify(KMP_API_NAME_GOMP_ORDERED_END, 10);
1447xaliasify(KMP_API_NAME_GOMP_ORDERED_START, 10);
1448xaliasify(KMP_API_NAME_GOMP_PARALLEL_END, 10);
1449xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10);
1450xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10);
1451xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10);
1452xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10);
1453xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10);
1454xaliasify(KMP_API_NAME_GOMP_PARALLEL_START, 10);
1455xaliasify(KMP_API_NAME_GOMP_SECTIONS_END, 10);
1456xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10);
1457xaliasify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10);
1458xaliasify(KMP_API_NAME_GOMP_SECTIONS_START, 10);
1459xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10);
1460xaliasify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10);
1461xaliasify(KMP_API_NAME_GOMP_SINGLE_START, 10);
1462
1463// GOMP_2.0 aliases
Jim Cownie181b4bb2013-12-23 17:28:57 +00001464xaliasify(KMP_API_NAME_GOMP_TASK, 20);
1465xaliasify(KMP_API_NAME_GOMP_TASKWAIT, 20);
Jim Cownie181b4bb2013-12-23 17:28:57 +00001466xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20);
1467xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20);
1468xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20);
1469xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20);
1470xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20);
1471xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20);
1472xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20);
1473xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20);
1474xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20);
1475xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20);
1476xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20);
1477xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20);
1478xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20);
1479xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20);
1480xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20);
1481xaliasify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20);
1482
1483// GOMP_3.0 aliases
1484xaliasify(KMP_API_NAME_GOMP_TASKYIELD, 30);
1485
1486// GOMP_4.0 aliases
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001487// The GOMP_parallel* entry points below aren't OpenMP 4.0 related.
1488#if OMP_40_ENABLED
1489xaliasify(KMP_API_NAME_GOMP_PARALLEL, 40);
1490xaliasify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40);
1491xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40);
1492xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40);
1493xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40);
1494xaliasify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40);
1495xaliasify(KMP_API_NAME_GOMP_TASKGROUP_START, 40);
1496xaliasify(KMP_API_NAME_GOMP_TASKGROUP_END, 40);
1497xaliasify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40);
1498xaliasify(KMP_API_NAME_GOMP_CANCEL, 40);
1499xaliasify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40);
1500xaliasify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40);
1501xaliasify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40);
1502xaliasify(KMP_API_NAME_GOMP_TARGET, 40);
1503xaliasify(KMP_API_NAME_GOMP_TARGET_DATA, 40);
1504xaliasify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40);
1505xaliasify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40);
1506xaliasify(KMP_API_NAME_GOMP_TEAMS, 40);
1507#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001508
1509// GOMP_1.0 versioned symbols
1510xversionify(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0");
1511xversionify(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0");
1512xversionify(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0");
1513xversionify(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0");
1514xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0");
1515xversionify(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0");
1516xversionify(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0");
1517xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0");
1518xversionify(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1519xversionify(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0");
1520xversionify(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0");
1521xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0");
1522xversionify(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0");
1523xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0");
1524xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, "GOMP_1.0");
1525xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0");
1526xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0");
1527xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0");
1528xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, "GOMP_1.0");
1529xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0");
1530xversionify(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0");
1531xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0");
1532xversionify(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1533xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0");
1534xversionify(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0");
1535xversionify(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0");
1536xversionify(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0");
1537xversionify(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0");
1538xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, "GOMP_1.0");
1539xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, "GOMP_1.0");
1540xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, "GOMP_1.0");
1541xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, "GOMP_1.0");
1542xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0");
1543xversionify(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0");
1544xversionify(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0");
1545xversionify(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0");
1546xversionify(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0");
1547xversionify(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0");
1548xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0");
1549xversionify(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0");
1550xversionify(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0");
1551
1552// GOMP_2.0 versioned symbols
Jim Cownie181b4bb2013-12-23 17:28:57 +00001553xversionify(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0");
1554xversionify(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0");
Jim Cownie181b4bb2013-12-23 17:28:57 +00001555xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0");
1556xversionify(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0");
1557xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0");
1558xversionify(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0");
1559xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, "GOMP_2.0");
1560xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, "GOMP_2.0");
1561xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, "GOMP_2.0");
1562xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, "GOMP_2.0");
1563xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, "GOMP_2.0");
1564xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, "GOMP_2.0");
1565xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, "GOMP_2.0");
1566xversionify(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, "GOMP_2.0");
1567xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0");
1568xversionify(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0");
1569xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0");
1570xversionify(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0");
1571
1572// GOMP_3.0 versioned symbols
1573xversionify(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0");
1574
1575// GOMP_4.0 versioned symbols
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001576#if OMP_40_ENABLED
1577xversionify(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0");
1578xversionify(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0");
1579xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0");
1580xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0");
1581xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0");
1582xversionify(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0");
1583xversionify(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0");
1584xversionify(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0");
1585xversionify(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0");
1586xversionify(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0");
1587xversionify(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0");
1588xversionify(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0");
1589xversionify(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0");
1590xversionify(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0");
1591xversionify(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0");
1592xversionify(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0");
1593xversionify(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0");
1594xversionify(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0");
1595#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001596
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001597#endif // KMP_USE_VERSION_SYMBOLS
Jim Cownie181b4bb2013-12-23 17:28:57 +00001598
Jim Cownie5e8470a2013-09-27 10:38:44 +00001599#ifdef __cplusplus
1600 } //extern "C"
1601#endif // __cplusplus
1602
1603