blob: 97baaf701edd7284988e4bc7388ff16ed321575a [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
Jonathan Peytonde4749b2016-12-14 23:01:24 +00002 * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
Jim Cownie5e8470a2013-09-27 10:38:44 +00005//===----------------------------------------------------------------------===//
6//
7// The LLVM Compiler Infrastructure
8//
9// This file is dual licensed under the MIT and the University of Illinois Open
10// Source Licenses. See LICENSE.txt for details.
11//
12//===----------------------------------------------------------------------===//
13
Jim Cownie5e8470a2013-09-27 10:38:44 +000014#include "kmp.h"
15#include "kmp_i18n.h"
16#include "kmp_itt.h"
Jonathan Peyton45be4502015-08-11 21:36:41 +000017#include "kmp_stats.h"
Jonathan Peyton30419822017-05-12 18:01:32 +000018#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000019
Andrey Churbanove5f44922015-04-29 16:22:07 +000020#if OMPT_SUPPORT
21#include "ompt-specific.h"
22#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +000023
Jonas Hahnfeld50fed042016-11-07 15:58:36 +000024#include "tsan_annotations.h"
25
Jim Cownie5e8470a2013-09-27 10:38:44 +000026/* forward declaration */
Jonathan Peyton30419822017-05-12 18:01:32 +000027static void __kmp_enable_tasking(kmp_task_team_t *task_team,
28 kmp_info_t *this_thr);
29static void __kmp_alloc_task_deque(kmp_info_t *thread,
30 kmp_thread_data_t *thread_data);
31static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
32 kmp_task_team_t *task_team);
Jim Cownie5e8470a2013-09-27 10:38:44 +000033
Jonathan Peytondf6818b2016-06-14 17:57:47 +000034#ifdef OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +000035static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +000036#endif
37
Jim Cownie5e8470a2013-09-27 10:38:44 +000038#ifdef BUILD_TIED_TASK_STACK
39
Jim Cownie5e8470a2013-09-27 10:38:44 +000040// __kmp_trace_task_stack: print the tied tasks from the task stack in order
Jonathan Peyton30419822017-05-12 18:01:32 +000041// from top do bottom
Jim Cownie5e8470a2013-09-27 10:38:44 +000042//
43// gtid: global thread identifier for thread containing stack
44// thread_data: thread data for task team thread containing stack
45// threshold: value above which the trace statement triggers
46// location: string identifying call site of this function (for trace)
Jonathan Peyton30419822017-05-12 18:01:32 +000047static void __kmp_trace_task_stack(kmp_int32 gtid,
48 kmp_thread_data_t *thread_data,
49 int threshold, char *location) {
50 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
51 kmp_taskdata_t **stack_top = task_stack->ts_top;
52 kmp_int32 entries = task_stack->ts_entries;
53 kmp_taskdata_t *tied_task;
Jim Cownie5e8470a2013-09-27 10:38:44 +000054
Jonathan Peyton30419822017-05-12 18:01:32 +000055 KA_TRACE(
56 threshold,
57 ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
58 "first_block = %p, stack_top = %p \n",
59 location, gtid, entries, task_stack->ts_first_block, stack_top));
Jim Cownie5e8470a2013-09-27 10:38:44 +000060
Jonathan Peyton30419822017-05-12 18:01:32 +000061 KMP_DEBUG_ASSERT(stack_top != NULL);
62 KMP_DEBUG_ASSERT(entries > 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +000063
Jonathan Peyton30419822017-05-12 18:01:32 +000064 while (entries != 0) {
65 KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
66 // fix up ts_top if we need to pop from previous block
67 if (entries & TASK_STACK_INDEX_MASK == 0) {
68 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
Jim Cownie5e8470a2013-09-27 10:38:44 +000069
Jonathan Peyton30419822017-05-12 18:01:32 +000070 stack_block = stack_block->sb_prev;
71 stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
Jim Cownie5e8470a2013-09-27 10:38:44 +000072 }
Jim Cownie5e8470a2013-09-27 10:38:44 +000073
Jonathan Peyton30419822017-05-12 18:01:32 +000074 // finish bookkeeping
75 stack_top--;
76 entries--;
77
78 tied_task = *stack_top;
79
80 KMP_DEBUG_ASSERT(tied_task != NULL);
81 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
82
83 KA_TRACE(threshold,
84 ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
85 "stack_top=%p, tied_task=%p\n",
86 location, gtid, entries, stack_top, tied_task));
87 }
88 KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
89
90 KA_TRACE(threshold,
91 ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
92 location, gtid));
Jim Cownie5e8470a2013-09-27 10:38:44 +000093}
94
Jim Cownie5e8470a2013-09-27 10:38:44 +000095// __kmp_init_task_stack: initialize the task stack for the first time
Jonathan Peyton30419822017-05-12 18:01:32 +000096// after a thread_data structure is created.
97// It should not be necessary to do this again (assuming the stack works).
Jim Cownie5e8470a2013-09-27 10:38:44 +000098//
99// gtid: global thread identifier of calling thread
100// thread_data: thread data for task team thread containing stack
Jonathan Peyton30419822017-05-12 18:01:32 +0000101static void __kmp_init_task_stack(kmp_int32 gtid,
102 kmp_thread_data_t *thread_data) {
103 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
104 kmp_stack_block_t *first_block;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000105
Jonathan Peyton30419822017-05-12 18:01:32 +0000106 // set up the first block of the stack
107 first_block = &task_stack->ts_first_block;
108 task_stack->ts_top = (kmp_taskdata_t **)first_block;
109 memset((void *)first_block, '\0',
110 TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000111
Jonathan Peyton30419822017-05-12 18:01:32 +0000112 // initialize the stack to be empty
113 task_stack->ts_entries = TASK_STACK_EMPTY;
114 first_block->sb_next = NULL;
115 first_block->sb_prev = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000116}
117
Jim Cownie5e8470a2013-09-27 10:38:44 +0000118// __kmp_free_task_stack: free the task stack when thread_data is destroyed.
119//
120// gtid: global thread identifier for calling thread
121// thread_data: thread info for thread containing stack
Jonathan Peyton30419822017-05-12 18:01:32 +0000122static void __kmp_free_task_stack(kmp_int32 gtid,
123 kmp_thread_data_t *thread_data) {
124 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
125 kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000126
Jonathan Peyton30419822017-05-12 18:01:32 +0000127 KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
128 // free from the second block of the stack
129 while (stack_block != NULL) {
130 kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000131
Jonathan Peyton30419822017-05-12 18:01:32 +0000132 stack_block->sb_next = NULL;
133 stack_block->sb_prev = NULL;
134 if (stack_block != &task_stack->ts_first_block) {
135 __kmp_thread_free(thread,
136 stack_block); // free the block, if not the first
Jim Cownie5e8470a2013-09-27 10:38:44 +0000137 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000138 stack_block = next_block;
139 }
140 // initialize the stack to be empty
141 task_stack->ts_entries = 0;
142 task_stack->ts_top = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000143}
144
Jim Cownie5e8470a2013-09-27 10:38:44 +0000145// __kmp_push_task_stack: Push the tied task onto the task stack.
146// Grow the stack if necessary by allocating another block.
147//
148// gtid: global thread identifier for calling thread
149// thread: thread info for thread containing stack
150// tied_task: the task to push on the stack
Jonathan Peyton30419822017-05-12 18:01:32 +0000151static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
152 kmp_taskdata_t *tied_task) {
153 // GEH - need to consider what to do if tt_threads_data not allocated yet
154 kmp_thread_data_t *thread_data =
155 &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
156 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000157
Jonathan Peyton30419822017-05-12 18:01:32 +0000158 if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
159 return; // Don't push anything on stack if team or team tasks are serialized
160 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000161
Jonathan Peyton30419822017-05-12 18:01:32 +0000162 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
163 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
164
165 KA_TRACE(20,
166 ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
167 gtid, thread, tied_task));
168 // Store entry
169 *(task_stack->ts_top) = tied_task;
170
171 // Do bookkeeping for next push
172 task_stack->ts_top++;
173 task_stack->ts_entries++;
174
175 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
176 // Find beginning of this task block
177 kmp_stack_block_t *stack_block =
178 (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
179
180 // Check if we already have a block
181 if (stack_block->sb_next !=
182 NULL) { // reset ts_top to beginning of next block
183 task_stack->ts_top = &stack_block->sb_next->sb_block[0];
184 } else { // Alloc new block and link it up
185 kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
186 thread, sizeof(kmp_stack_block_t));
187
188 task_stack->ts_top = &new_block->sb_block[0];
189 stack_block->sb_next = new_block;
190 new_block->sb_prev = stack_block;
191 new_block->sb_next = NULL;
192
193 KA_TRACE(
194 30,
195 ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
196 gtid, tied_task, new_block));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000197 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000198 }
199 KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
200 tied_task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000201}
202
Jim Cownie5e8470a2013-09-27 10:38:44 +0000203// __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return
Jonathan Peyton30419822017-05-12 18:01:32 +0000204// the task, just check to make sure it matches the ending task passed in.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000205//
206// gtid: global thread identifier for the calling thread
207// thread: thread info structure containing stack
208// tied_task: the task popped off the stack
209// ending_task: the task that is ending (should match popped task)
Jonathan Peyton30419822017-05-12 18:01:32 +0000210static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
211 kmp_taskdata_t *ending_task) {
212 // GEH - need to consider what to do if tt_threads_data not allocated yet
213 kmp_thread_data_t *thread_data =
214 &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
215 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
216 kmp_taskdata_t *tied_task;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000217
Jonathan Peyton30419822017-05-12 18:01:32 +0000218 if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
219 // Don't pop anything from stack if team or team tasks are serialized
Jim Cownie5e8470a2013-09-27 10:38:44 +0000220 return;
Jonathan Peyton30419822017-05-12 18:01:32 +0000221 }
222
223 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
224 KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
225
226 KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
227 thread));
228
229 // fix up ts_top if we need to pop from previous block
230 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
231 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
232
233 stack_block = stack_block->sb_prev;
234 task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
235 }
236
237 // finish bookkeeping
238 task_stack->ts_top--;
239 task_stack->ts_entries--;
240
241 tied_task = *(task_stack->ts_top);
242
243 KMP_DEBUG_ASSERT(tied_task != NULL);
244 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
245 KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
246
247 KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
248 tied_task));
249 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000250}
251#endif /* BUILD_TIED_TASK_STACK */
252
Jim Cownie5e8470a2013-09-27 10:38:44 +0000253// __kmp_push_task: Add a task to the thread's deque
Jonathan Peyton30419822017-05-12 18:01:32 +0000254static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
255 kmp_info_t *thread = __kmp_threads[gtid];
256 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
257 kmp_task_team_t *task_team = thread->th.th_task_team;
258 kmp_int32 tid = __kmp_tid_from_gtid(gtid);
259 kmp_thread_data_t *thread_data;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000260
Jonathan Peyton30419822017-05-12 18:01:32 +0000261 KA_TRACE(20,
262 ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000263
Jonathan Peyton30419822017-05-12 18:01:32 +0000264 if (taskdata->td_flags.tiedness == TASK_UNTIED) {
265 // untied task needs to increment counter so that the task structure is not
266 // freed prematurely
267 kmp_int32 counter = 1 + KMP_TEST_THEN_INC32(&taskdata->td_untied_count);
268 KA_TRACE(
269 20,
270 ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
271 gtid, counter, taskdata));
272 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000273
Jonathan Peyton30419822017-05-12 18:01:32 +0000274 // The first check avoids building task_team thread data if serialized
275 if (taskdata->td_flags.task_serial) {
276 KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
277 "TASK_NOT_PUSHED for task %p\n",
278 gtid, taskdata));
279 return TASK_NOT_PUSHED;
280 }
Jonathan Peytone6643da2016-04-18 21:35:14 +0000281
Jonathan Peyton30419822017-05-12 18:01:32 +0000282 // Now that serialized tasks have returned, we can assume that we are not in
283 // immediate exec mode
284 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
285 if (!KMP_TASKING_ENABLED(task_team)) {
286 __kmp_enable_tasking(task_team, thread);
287 }
288 KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
289 KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000290
Jonathan Peyton30419822017-05-12 18:01:32 +0000291 // Find tasking deque specific to encountering thread
292 thread_data = &task_team->tt.tt_threads_data[tid];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000293
Jonathan Peyton30419822017-05-12 18:01:32 +0000294 // No lock needed since only owner can allocate
295 if (thread_data->td.td_deque == NULL) {
296 __kmp_alloc_task_deque(thread, thread_data);
297 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000298
Jonathan Peyton30419822017-05-12 18:01:32 +0000299 // Check if deque is full
300 if (TCR_4(thread_data->td.td_deque_ntasks) >=
301 TASK_DEQUE_SIZE(thread_data->td)) {
302 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
303 "TASK_NOT_PUSHED for task %p\n",
304 gtid, taskdata));
305 return TASK_NOT_PUSHED;
306 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000307
Jonathan Peyton30419822017-05-12 18:01:32 +0000308 // Lock the deque for the task push operation
309 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000310
Jonathan Peytondf6818b2016-06-14 17:57:47 +0000311#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000312 // Need to recheck as we can get a proxy task from a thread outside of OpenMP
313 if (TCR_4(thread_data->td.td_deque_ntasks) >=
314 TASK_DEQUE_SIZE(thread_data->td)) {
315 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
316 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; returning "
317 "TASK_NOT_PUSHED for task %p\n",
318 gtid, taskdata));
319 return TASK_NOT_PUSHED;
320 }
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000321#else
Jonathan Peyton30419822017-05-12 18:01:32 +0000322 // Must have room since no thread can add tasks but calling thread
323 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
324 TASK_DEQUE_SIZE(thread_data->td));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000325#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000326
Jonathan Peyton30419822017-05-12 18:01:32 +0000327 thread_data->td.td_deque[thread_data->td.td_deque_tail] =
328 taskdata; // Push taskdata
329 // Wrap index.
330 thread_data->td.td_deque_tail =
331 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
332 TCW_4(thread_data->td.td_deque_ntasks,
333 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
Jim Cownie5e8470a2013-09-27 10:38:44 +0000334
Jonathan Peyton30419822017-05-12 18:01:32 +0000335 KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
336 "task=%p ntasks=%d head=%u tail=%u\n",
337 gtid, taskdata, thread_data->td.td_deque_ntasks,
338 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
Andrey Churbanov5dee8c42016-12-14 08:29:00 +0000339
Jonathan Peyton30419822017-05-12 18:01:32 +0000340 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000341
Jonathan Peyton30419822017-05-12 18:01:32 +0000342 return TASK_SUCCESSFULLY_PUSHED;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000343}
344
Jonathan Peyton30419822017-05-12 18:01:32 +0000345// __kmp_pop_current_task_from_thread: set up current task from called thread
346// when team ends
347//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000348// this_thr: thread structure to set current_task in.
Jonathan Peyton30419822017-05-12 18:01:32 +0000349void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
350 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
351 "this_thread=%p, curtask=%p, "
352 "curtask_parent=%p\n",
353 0, this_thr, this_thr->th.th_current_task,
354 this_thr->th.th_current_task->td_parent));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000355
Jonathan Peyton30419822017-05-12 18:01:32 +0000356 this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000357
Jonathan Peyton30419822017-05-12 18:01:32 +0000358 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
359 "this_thread=%p, curtask=%p, "
360 "curtask_parent=%p\n",
361 0, this_thr, this_thr->th.th_current_task,
362 this_thr->th.th_current_task->td_parent));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000363}
364
Jonathan Peyton30419822017-05-12 18:01:32 +0000365// __kmp_push_current_task_to_thread: set up current task in called thread for a
366// new team
367//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000368// this_thr: thread structure to set up
369// team: team for implicit task data
370// tid: thread within team to set up
Jonathan Peyton30419822017-05-12 18:01:32 +0000371void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
372 int tid) {
373 // current task of the thread is a parent of the new just created implicit
374 // tasks of new team
375 KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
376 "curtask=%p "
377 "parent_task=%p\n",
378 tid, this_thr, this_thr->th.th_current_task,
379 team->t.t_implicit_task_taskdata[tid].td_parent));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000380
Jonathan Peyton30419822017-05-12 18:01:32 +0000381 KMP_DEBUG_ASSERT(this_thr != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000382
Jonathan Peyton30419822017-05-12 18:01:32 +0000383 if (tid == 0) {
384 if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
385 team->t.t_implicit_task_taskdata[0].td_parent =
386 this_thr->th.th_current_task;
387 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000388 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000389 } else {
390 team->t.t_implicit_task_taskdata[tid].td_parent =
391 team->t.t_implicit_task_taskdata[0].td_parent;
392 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
393 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000394
Jonathan Peyton30419822017-05-12 18:01:32 +0000395 KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
396 "curtask=%p "
397 "parent_task=%p\n",
398 tid, this_thr, this_thr->th.th_current_task,
399 team->t.t_implicit_task_taskdata[tid].td_parent));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000400}
401
Jim Cownie5e8470a2013-09-27 10:38:44 +0000402// __kmp_task_start: bookkeeping for a task starting execution
Jonathan Peyton30419822017-05-12 18:01:32 +0000403//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000404// GTID: global thread id of calling thread
405// task: task starting execution
406// current_task: task suspending
Jonathan Peyton30419822017-05-12 18:01:32 +0000407static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
408 kmp_taskdata_t *current_task) {
409 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
410 kmp_info_t *thread = __kmp_threads[gtid];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000411
Jonathan Peyton30419822017-05-12 18:01:32 +0000412 KA_TRACE(10,
413 ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
414 gtid, taskdata, current_task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000415
Jonathan Peyton30419822017-05-12 18:01:32 +0000416 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000417
Jonathan Peyton30419822017-05-12 18:01:32 +0000418 // mark currently executing task as suspended
419 // TODO: GEH - make sure root team implicit task is initialized properly.
420 // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
421 current_task->td_flags.executing = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000422
Jonathan Peyton30419822017-05-12 18:01:32 +0000423// Add task to stack if tied
Jim Cownie5e8470a2013-09-27 10:38:44 +0000424#ifdef BUILD_TIED_TASK_STACK
Jonathan Peyton30419822017-05-12 18:01:32 +0000425 if (taskdata->td_flags.tiedness == TASK_TIED) {
426 __kmp_push_task_stack(gtid, thread, taskdata);
427 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000428#endif /* BUILD_TIED_TASK_STACK */
429
Jonathan Peyton30419822017-05-12 18:01:32 +0000430 // mark starting task as executing and as current task
431 thread->th.th_current_task = taskdata;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000432
Jonathan Peyton30419822017-05-12 18:01:32 +0000433 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
434 taskdata->td_flags.tiedness == TASK_UNTIED);
435 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
436 taskdata->td_flags.tiedness == TASK_UNTIED);
437 taskdata->td_flags.started = 1;
438 taskdata->td_flags.executing = 1;
439 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
440 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000441
Jonathan Peyton30419822017-05-12 18:01:32 +0000442 // GEH TODO: shouldn't we pass some sort of location identifier here?
443 // APT: yes, we will pass location here.
444 // need to store current thread state (in a thread or taskdata structure)
445 // before setting work_state, otherwise wrong state is set after end of task
Jim Cownie5e8470a2013-09-27 10:38:44 +0000446
Jonathan Peyton30419822017-05-12 18:01:32 +0000447 KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000448
Jonathan Peyton30419822017-05-12 18:01:32 +0000449 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000450}
451
Joachim Protze82e94a52017-11-01 10:08:30 +0000452#if OMPT_SUPPORT
453//------------------------------------------------------------------------------
454// __ompt_task_init:
455// Initialize OMPT fields maintained by a task. This will only be called after
456// ompt_start_tool, so we already know whether ompt is enabled or not.
457
458static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
459 // The calls to __ompt_task_init already have the ompt_enabled condition.
460 task->ompt_task_info.task_data.value = 0;
Joachim Protzec255ca72017-11-05 14:11:10 +0000461 task->ompt_task_info.frame.exit_frame = NULL;
462 task->ompt_task_info.frame.enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +0000463#if OMP_40_ENABLED
464 task->ompt_task_info.ndeps = 0;
465 task->ompt_task_info.deps = NULL;
466#endif /* OMP_40_ENABLED */
467}
468
469// __ompt_task_start:
470// Build and trigger task-begin event
471static inline void __ompt_task_start(kmp_task_t *task,
472 kmp_taskdata_t *current_task,
473 kmp_int32 gtid) {
474 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
475 ompt_task_status_t status = ompt_task_others;
476 if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
477 status = ompt_task_yield;
478 __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
479 }
480 /* let OMPT know that we're about to run this task */
481 if (ompt_enabled.ompt_callback_task_schedule) {
482 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
483 &(current_task->ompt_task_info.task_data), status,
484 &(taskdata->ompt_task_info.task_data));
485 }
486 taskdata->ompt_task_info.scheduling_parent = current_task;
487}
488
489// __ompt_task_finish:
490// Build and trigger final task-schedule event
491static inline void __ompt_task_finish(kmp_task_t *task,
492 kmp_taskdata_t *resumed_task) {
493 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
494 ompt_task_status_t status = ompt_task_complete;
495 if (taskdata->td_flags.tiedness == TASK_UNTIED &&
496 KMP_TEST_THEN_ADD32(&(taskdata->td_untied_count), 0) > 1)
497 status = ompt_task_others;
498 if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
499 taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
500 status = ompt_task_cancel;
501 }
502
503 /* let OMPT know that we're returning to the callee task */
504 if (ompt_enabled.ompt_callback_task_schedule) {
505 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
506 &(taskdata->ompt_task_info.task_data), status,
507 &((resumed_task ? resumed_task
508 : (taskdata->ompt_task_info.scheduling_parent
509 ? taskdata->ompt_task_info.scheduling_parent
510 : taskdata->td_parent))
511 ->ompt_task_info.task_data));
512 }
513}
514#endif
515
516template <bool ompt>
517static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
518 kmp_task_t *task,
519 void *frame_address,
520 void *return_address) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000521 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
522 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000523
Jonathan Peyton30419822017-05-12 18:01:32 +0000524 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
525 "current_task=%p\n",
526 gtid, loc_ref, taskdata, current_task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000527
Jonathan Peyton30419822017-05-12 18:01:32 +0000528 if (taskdata->td_flags.tiedness == TASK_UNTIED) {
529 // untied task needs to increment counter so that the task structure is not
530 // freed prematurely
531 kmp_int32 counter = 1 + KMP_TEST_THEN_INC32(&taskdata->td_untied_count);
532 KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
533 "incremented for task %p\n",
534 gtid, counter, taskdata));
535 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000536
Jonathan Peyton30419822017-05-12 18:01:32 +0000537 taskdata->td_flags.task_serial =
538 1; // Execute this task immediately, not deferred.
539 __kmp_task_start(gtid, task, current_task);
Jonathan Peytone6643da2016-04-18 21:35:14 +0000540
Joachim Protze82e94a52017-11-01 10:08:30 +0000541#if OMPT_SUPPORT
542 if (ompt) {
Joachim Protzec255ca72017-11-05 14:11:10 +0000543 if (current_task->ompt_task_info.frame.enter_frame == NULL) {
544 current_task->ompt_task_info.frame.enter_frame =
545 taskdata->ompt_task_info.frame.exit_frame = frame_address;
Joachim Protze82e94a52017-11-01 10:08:30 +0000546 }
547 if (ompt_enabled.ompt_callback_task_create) {
548 ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
549 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
550 &(parent_info->task_data), &(parent_info->frame),
551 &(taskdata->ompt_task_info.task_data),
552 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
553 return_address);
554 }
555 __ompt_task_start(task, current_task, gtid);
556 }
557#endif // OMPT_SUPPORT
558
Jonathan Peyton30419822017-05-12 18:01:32 +0000559 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
560 loc_ref, taskdata));
Joachim Protze82e94a52017-11-01 10:08:30 +0000561}
Jim Cownie5e8470a2013-09-27 10:38:44 +0000562
Joachim Protze82e94a52017-11-01 10:08:30 +0000563#if OMPT_SUPPORT
564OMPT_NOINLINE
565static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
566 kmp_task_t *task,
567 void *frame_address,
568 void *return_address) {
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +0000569 __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
570 return_address);
Joachim Protze82e94a52017-11-01 10:08:30 +0000571}
572#endif // OMPT_SUPPORT
573
574// __kmpc_omp_task_begin_if0: report that a given serialized task has started
575// execution
576//
577// loc_ref: source location information; points to beginning of task block.
578// gtid: global thread number.
579// task: task thunk for the started task.
580void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
581 kmp_task_t *task) {
582#if OMPT_SUPPORT
583 if (UNLIKELY(ompt_enabled.enabled)) {
584 OMPT_STORE_RETURN_ADDRESS(gtid);
585 __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
586 OMPT_GET_FRAME_ADDRESS(1),
587 OMPT_LOAD_RETURN_ADDRESS(gtid));
588 return;
589 }
590#endif
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +0000591 __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000592}
593
594#ifdef TASK_UNUSED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000595// __kmpc_omp_task_begin: report that a given task has started execution
596// NEVER GENERATED BY COMPILER, DEPRECATED!!!
Jonathan Peyton30419822017-05-12 18:01:32 +0000597void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
598 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000599
Jonathan Peyton30419822017-05-12 18:01:32 +0000600 KA_TRACE(
601 10,
602 ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
603 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000604
Jonathan Peyton30419822017-05-12 18:01:32 +0000605 __kmp_task_start(gtid, task, current_task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000606
Jonathan Peyton30419822017-05-12 18:01:32 +0000607 KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
608 loc_ref, KMP_TASK_TO_TASKDATA(task)));
609 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000610}
611#endif // TASK_UNUSED
612
Jim Cownie5e8470a2013-09-27 10:38:44 +0000613// __kmp_free_task: free the current task space and the space for shareds
Jim Cownie5e8470a2013-09-27 10:38:44 +0000614//
615// gtid: Global thread ID of calling thread
616// taskdata: task to free
617// thread: thread data structure of caller
Jonathan Peyton30419822017-05-12 18:01:32 +0000618static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
619 kmp_info_t *thread) {
620 KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
621 taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000622
Jonathan Peyton30419822017-05-12 18:01:32 +0000623 // Check to make sure all flags and counters have the correct values
624 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
625 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
626 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
627 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
628 KMP_DEBUG_ASSERT(TCR_4(taskdata->td_allocated_child_tasks) == 0 ||
629 taskdata->td_flags.task_serial == 1);
630 KMP_DEBUG_ASSERT(TCR_4(taskdata->td_incomplete_child_tasks) == 0);
631
632 taskdata->td_flags.freed = 1;
633 ANNOTATE_HAPPENS_BEFORE(taskdata);
634// deallocate the taskdata and shared variable blocks associated with this task
635#if USE_FAST_MEMORY
636 __kmp_fast_free(thread, taskdata);
637#else /* ! USE_FAST_MEMORY */
638 __kmp_thread_free(thread, taskdata);
Jonathan Peyton0ac7b752016-10-18 17:39:06 +0000639#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000640
Jonathan Peyton30419822017-05-12 18:01:32 +0000641 KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000642}
643
Jonathan Peyton30419822017-05-12 18:01:32 +0000644// __kmp_free_task_and_ancestors: free the current task and ancestors without
645// children
646//
647// gtid: Global thread ID of calling thread
648// taskdata: task to free
649// thread: thread data structure of caller
650static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
651 kmp_taskdata_t *taskdata,
652 kmp_info_t *thread) {
653#if OMP_45_ENABLED
654 // Proxy tasks must always be allowed to free their parents
655 // because they can be run in background even in serial mode.
656 kmp_int32 team_serial =
657 (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
658 !taskdata->td_flags.proxy;
659#else
660 kmp_int32 team_serial =
661 taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser;
662#endif
663 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
664
Andrey Churbanov5ba90c72017-07-17 09:03:14 +0000665 kmp_int32 children =
666 KMP_TEST_THEN_DEC32(&taskdata->td_allocated_child_tasks) - 1;
Jonathan Peyton30419822017-05-12 18:01:32 +0000667 KMP_DEBUG_ASSERT(children >= 0);
668
669 // Now, go up the ancestor tree to see if any ancestors can now be freed.
670 while (children == 0) {
671 kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
672
673 KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
674 "and freeing itself\n",
675 gtid, taskdata));
676
677 // --- Deallocate my ancestor task ---
678 __kmp_free_task(gtid, taskdata, thread);
679
680 taskdata = parent_taskdata;
681
682 // Stop checking ancestors at implicit task instead of walking up ancestor
683 // tree to avoid premature deallocation of ancestors.
684 if (team_serial || taskdata->td_flags.tasktype == TASK_IMPLICIT)
685 return;
686
687 // Predecrement simulated by "- 1" calculation
Andrey Churbanov5ba90c72017-07-17 09:03:14 +0000688 children = KMP_TEST_THEN_DEC32(&taskdata->td_allocated_child_tasks) - 1;
Jonathan Peyton30419822017-05-12 18:01:32 +0000689 KMP_DEBUG_ASSERT(children >= 0);
690 }
691
692 KA_TRACE(
693 20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
694 "not freeing it yet\n",
695 gtid, taskdata, children));
696}
697
Jim Cownie5e8470a2013-09-27 10:38:44 +0000698// __kmp_task_finish: bookkeeping to do when a task finishes execution
Jonathan Peyton30419822017-05-12 18:01:32 +0000699//
Jim Cownie5e8470a2013-09-27 10:38:44 +0000700// gtid: global thread ID for calling thread
701// task: task to be finished
702// resumed_task: task to be resumed. (may be NULL if task is serialized)
Jonathan Peyton30419822017-05-12 18:01:32 +0000703static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
704 kmp_taskdata_t *resumed_task) {
705 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
706 kmp_info_t *thread = __kmp_threads[gtid];
707 kmp_task_team_t *task_team =
708 thread->th.th_task_team; // might be NULL for serial teams...
709 kmp_int32 children = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000710
Jonathan Peyton30419822017-05-12 18:01:32 +0000711 KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
712 "task %p\n",
713 gtid, taskdata, resumed_task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000714
Jonathan Peyton30419822017-05-12 18:01:32 +0000715 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000716
Jonathan Peyton30419822017-05-12 18:01:32 +0000717// Pop task from stack if tied
Jim Cownie5e8470a2013-09-27 10:38:44 +0000718#ifdef BUILD_TIED_TASK_STACK
Jonathan Peyton30419822017-05-12 18:01:32 +0000719 if (taskdata->td_flags.tiedness == TASK_TIED) {
720 __kmp_pop_task_stack(gtid, thread, taskdata);
721 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000722#endif /* BUILD_TIED_TASK_STACK */
723
Jonathan Peyton30419822017-05-12 18:01:32 +0000724 if (taskdata->td_flags.tiedness == TASK_UNTIED) {
725 // untied task needs to check the counter so that the task structure is not
726 // freed prematurely
727 kmp_int32 counter = KMP_TEST_THEN_DEC32(&taskdata->td_untied_count) - 1;
728 KA_TRACE(
729 20,
730 ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
731 gtid, counter, taskdata));
732 if (counter > 0) {
733 // untied task is not done, to be continued possibly by other thread, do
734 // not free it now
735 if (resumed_task == NULL) {
736 KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
737 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
738 // task is the parent
739 }
740 thread->th.th_current_task = resumed_task; // restore current_task
741 resumed_task->td_flags.executing = 1; // resume previous task
742 KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
743 "resuming task %p\n",
744 gtid, taskdata, resumed_task));
745 return;
Jonathan Peytone6643da2016-04-18 21:35:14 +0000746 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000747 }
Jonathan Peytone6643da2016-04-18 21:35:14 +0000748
Jonathan Peyton30419822017-05-12 18:01:32 +0000749 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
750 taskdata->td_flags.complete = 1; // mark the task as completed
751 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
752 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000753
Jonathan Peyton30419822017-05-12 18:01:32 +0000754 // Only need to keep track of count if team parallel and tasking not
755 // serialized
756 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
757 // Predecrement simulated by "- 1" calculation
758 children =
Andrey Churbanov5ba90c72017-07-17 09:03:14 +0000759 KMP_TEST_THEN_DEC32(&taskdata->td_parent->td_incomplete_child_tasks) -
Jonathan Peyton30419822017-05-12 18:01:32 +0000760 1;
761 KMP_DEBUG_ASSERT(children >= 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000762#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000763 if (taskdata->td_taskgroup)
764 KMP_TEST_THEN_DEC32((kmp_int32 *)(&taskdata->td_taskgroup->count));
Jonas Hahnfeldbedc3712016-08-08 10:08:14 +0000765#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000766 }
767 // if we found proxy tasks there could exist a dependency chain
768 // with the proxy task as origin
769 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) ||
770 (task_team && task_team->tt.tt_found_proxy_tasks)) {
Jonas Hahnfeldbedc3712016-08-08 10:08:14 +0000771#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000772 __kmp_release_deps(gtid, taskdata);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000773#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000774 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000775
Jonathan Peyton30419822017-05-12 18:01:32 +0000776 // td_flags.executing must be marked as 0 after __kmp_release_deps has been
777 // called. Othertwise, if a task is executed immediately from the release_deps
778 // code, the flag will be reset to 1 again by this same function
779 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
780 taskdata->td_flags.executing = 0; // suspend the finishing task
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000781
Jonathan Peyton30419822017-05-12 18:01:32 +0000782 KA_TRACE(
783 20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
784 gtid, taskdata, children));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000785
Jim Cownie181b4bb2013-12-23 17:28:57 +0000786#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000787 /* If the tasks' destructor thunk flag has been set, we need to invoke the
788 destructor thunk that has been generated by the compiler. The code is
789 placed here, since at this point other tasks might have been released
790 hence overlapping the destructor invokations with some other work in the
791 released tasks. The OpenMP spec is not specific on when the destructors
792 are invoked, so we should be free to choose. */
793 if (taskdata->td_flags.destructors_thunk) {
794 kmp_routine_entry_t destr_thunk = task->data1.destructors;
795 KMP_ASSERT(destr_thunk);
796 destr_thunk(gtid, task);
797 }
Jim Cownie181b4bb2013-12-23 17:28:57 +0000798#endif // OMP_40_ENABLED
799
Jonathan Peyton30419822017-05-12 18:01:32 +0000800 // bookkeeping for resuming task:
801 // GEH - note tasking_ser => task_serial
802 KMP_DEBUG_ASSERT(
803 (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
804 taskdata->td_flags.task_serial);
805 if (taskdata->td_flags.task_serial) {
806 if (resumed_task == NULL) {
807 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
808 // task is the parent
Jim Cownie5e8470a2013-09-27 10:38:44 +0000809 }
Jonathan Peyton30419822017-05-12 18:01:32 +0000810 } else {
811 KMP_DEBUG_ASSERT(resumed_task !=
812 NULL); // verify that resumed task is passed as arguemnt
813 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000814
Jonathan Peyton30419822017-05-12 18:01:32 +0000815 // Free this task and then ancestor tasks if they have no children.
816 // Restore th_current_task first as suggested by John:
817 // johnmc: if an asynchronous inquiry peers into the runtime system
818 // it doesn't see the freed task as the current task.
819 thread->th.th_current_task = resumed_task;
820 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000821
Jonathan Peyton30419822017-05-12 18:01:32 +0000822 // TODO: GEH - make sure root team implicit task is initialized properly.
823 // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
824 resumed_task->td_flags.executing = 1; // resume previous task
Jim Cownie5e8470a2013-09-27 10:38:44 +0000825
Jonathan Peyton30419822017-05-12 18:01:32 +0000826 KA_TRACE(
827 10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
828 gtid, taskdata, resumed_task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000829
Jonathan Peyton30419822017-05-12 18:01:32 +0000830 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000831}
832
Joachim Protze82e94a52017-11-01 10:08:30 +0000833template <bool ompt>
834static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
835 kmp_int32 gtid,
836 kmp_task_t *task) {
Jonathan Peyton30419822017-05-12 18:01:32 +0000837 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
838 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
839 // this routine will provide task to resume
840 __kmp_task_finish(gtid, task, NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000841
Jonathan Peyton30419822017-05-12 18:01:32 +0000842 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
843 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
Joachim Protze82e94a52017-11-01 10:08:30 +0000844
845#if OMPT_SUPPORT
846 if (ompt) {
847 __ompt_task_finish(task, NULL);
848 ompt_frame_t *ompt_frame;
849 __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
Joachim Protzec255ca72017-11-05 14:11:10 +0000850 ompt_frame->enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +0000851 }
852#endif
853
Jonathan Peyton30419822017-05-12 18:01:32 +0000854 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000855}
856
Joachim Protze82e94a52017-11-01 10:08:30 +0000857#if OMPT_SUPPORT
858OMPT_NOINLINE
859void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
860 kmp_task_t *task) {
861 __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
862}
863#endif // OMPT_SUPPORT
864
865// __kmpc_omp_task_complete_if0: report that a task has completed execution
866//
867// loc_ref: source location information; points to end of task block.
868// gtid: global thread number.
869// task: task thunk for the completed task.
870void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
871 kmp_task_t *task) {
872#if OMPT_SUPPORT
873 if (UNLIKELY(ompt_enabled.enabled)) {
874 __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
875 return;
876 }
877#endif
878 __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
879}
880
Jim Cownie5e8470a2013-09-27 10:38:44 +0000881#ifdef TASK_UNUSED
Jim Cownie5e8470a2013-09-27 10:38:44 +0000882// __kmpc_omp_task_complete: report that a task has completed execution
883// NEVER GENERATED BY COMPILER, DEPRECATED!!!
Jonathan Peyton30419822017-05-12 18:01:32 +0000884void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
885 kmp_task_t *task) {
886 KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
887 loc_ref, KMP_TASK_TO_TASKDATA(task)));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000888
Jonathan Peyton30419822017-05-12 18:01:32 +0000889 __kmp_task_finish(gtid, task, NULL); // Not sure how to find task to resume
Jim Cownie5e8470a2013-09-27 10:38:44 +0000890
Jonathan Peyton30419822017-05-12 18:01:32 +0000891 KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
892 loc_ref, KMP_TASK_TO_TASKDATA(task)));
893 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000894}
895#endif // TASK_UNUSED
896
Jonathan Peyton30419822017-05-12 18:01:32 +0000897// __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
898// task for a given thread
Jim Cownie5e8470a2013-09-27 10:38:44 +0000899//
900// loc_ref: reference to source location of parallel region
901// this_thr: thread data structure corresponding to implicit task
902// team: team for this_thr
903// tid: thread id of given thread within team
904// set_curr_task: TRUE if need to push current task to thread
Jonathan Peyton30419822017-05-12 18:01:32 +0000905// NOTE: Routine does not set up the implicit task ICVS. This is assumed to
906// have already been done elsewhere.
Jim Cownie5e8470a2013-09-27 10:38:44 +0000907// TODO: Get better loc_ref. Value passed in may be NULL
Jonathan Peyton30419822017-05-12 18:01:32 +0000908void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
909 kmp_team_t *team, int tid, int set_curr_task) {
910 kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
Jim Cownie5e8470a2013-09-27 10:38:44 +0000911
Jonathan Peyton30419822017-05-12 18:01:32 +0000912 KF_TRACE(
913 10,
914 ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
915 tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000916
Jonathan Peyton30419822017-05-12 18:01:32 +0000917 task->td_task_id = KMP_GEN_TASK_ID();
918 task->td_team = team;
919 // task->td_parent = NULL; // fix for CQ230101 (broken parent task info
920 // in debugger)
921 task->td_ident = loc_ref;
922 task->td_taskwait_ident = NULL;
923 task->td_taskwait_counter = 0;
924 task->td_taskwait_thread = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000925
Jonathan Peyton30419822017-05-12 18:01:32 +0000926 task->td_flags.tiedness = TASK_TIED;
927 task->td_flags.tasktype = TASK_IMPLICIT;
Jonathan Peytondf6818b2016-06-14 17:57:47 +0000928#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000929 task->td_flags.proxy = TASK_FULL;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000930#endif
931
Jonathan Peyton30419822017-05-12 18:01:32 +0000932 // All implicit tasks are executed immediately, not deferred
933 task->td_flags.task_serial = 1;
934 task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
935 task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000936
Jonathan Peyton30419822017-05-12 18:01:32 +0000937 task->td_flags.started = 1;
938 task->td_flags.executing = 1;
939 task->td_flags.complete = 0;
940 task->td_flags.freed = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000941
Jim Cownie181b4bb2013-12-23 17:28:57 +0000942#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000943 task->td_depnode = NULL;
Jim Cownie181b4bb2013-12-23 17:28:57 +0000944#endif
Andrey Churbanova756cb22017-11-16 10:45:07 +0000945 task->td_last_tied = task;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000946
Jonathan Peyton30419822017-05-12 18:01:32 +0000947 if (set_curr_task) { // only do this init first time thread is created
948 task->td_incomplete_child_tasks = 0;
Jonathan Peyton642688b2017-06-01 16:46:36 +0000949 // Not used: don't need to deallocate implicit task
950 task->td_allocated_child_tasks = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000951#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +0000952 task->td_taskgroup = NULL; // An implicit task does not have taskgroup
953 task->td_dephash = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000954#endif
Jonathan Peyton30419822017-05-12 18:01:32 +0000955 __kmp_push_current_task_to_thread(this_thr, team, tid);
956 } else {
957 KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
958 KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
959 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000960
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000961#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +0000962 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +0000963 __ompt_task_init(task, tid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +0000964#endif
965
Jonathan Peyton30419822017-05-12 18:01:32 +0000966 KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
967 team, task));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000968}
969
Jonathan Peyton30419822017-05-12 18:01:32 +0000970// __kmp_finish_implicit_task: Release resources associated to implicit tasks
971// at the end of parallel regions. Some resources are kept for reuse in the next
972// parallel region.
Andrey Churbanovdf0d75e2016-10-27 11:43:07 +0000973//
Jonathan Peyton30419822017-05-12 18:01:32 +0000974// thread: thread data structure corresponding to implicit task
975void __kmp_finish_implicit_task(kmp_info_t *thread) {
976 kmp_taskdata_t *task = thread->th.th_current_task;
977 if (task->td_dephash)
978 __kmp_dephash_free_entries(thread, task->td_dephash);
Andrey Churbanovdf0d75e2016-10-27 11:43:07 +0000979}
980
Jonathan Peyton30419822017-05-12 18:01:32 +0000981// __kmp_free_implicit_task: Release resources associated to implicit tasks
982// when these are destroyed regions
Andrey Churbanovdf0d75e2016-10-27 11:43:07 +0000983//
Jonathan Peyton30419822017-05-12 18:01:32 +0000984// thread: thread data structure corresponding to implicit task
985void __kmp_free_implicit_task(kmp_info_t *thread) {
986 kmp_taskdata_t *task = thread->th.th_current_task;
987 if (task->td_dephash)
988 __kmp_dephash_free(thread, task->td_dephash);
989 task->td_dephash = NULL;
Andrey Churbanovdf0d75e2016-10-27 11:43:07 +0000990}
991
Jonathan Peyton30419822017-05-12 18:01:32 +0000992// Round up a size to a power of two specified by val: Used to insert padding
993// between structures co-allocated using a single malloc() call
994static size_t __kmp_round_up_to_val(size_t size, size_t val) {
995 if (size & (val - 1)) {
996 size &= ~(val - 1);
997 if (size <= KMP_SIZE_T_MAX - val) {
998 size += val; // Round up if there is no overflow.
Jonathan Peytonbd3a7632017-09-27 20:36:27 +0000999 }
1000 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001001 return size;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001002} // __kmp_round_up_to_va
1003
Jim Cownie5e8470a2013-09-27 10:38:44 +00001004// __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1005//
1006// loc_ref: source location information
1007// gtid: global thread number.
Jonathan Peyton30419822017-05-12 18:01:32 +00001008// flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1009// task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1010// sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including
1011// private vars accessed in task.
1012// sizeof_shareds: Size in bytes of array of pointers to shared vars accessed
1013// in task.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001014// task_entry: Pointer to task code entry point generated by compiler.
1015// returns: a pointer to the allocated kmp_task_t structure (task).
Jonathan Peyton30419822017-05-12 18:01:32 +00001016kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1017 kmp_tasking_flags_t *flags,
1018 size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1019 kmp_routine_entry_t task_entry) {
1020 kmp_task_t *task;
1021 kmp_taskdata_t *taskdata;
1022 kmp_info_t *thread = __kmp_threads[gtid];
1023 kmp_team_t *team = thread->th.th_team;
1024 kmp_taskdata_t *parent_task = thread->th.th_current_task;
1025 size_t shareds_offset;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001026
Jonathan Peyton30419822017-05-12 18:01:32 +00001027 KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1028 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1029 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1030 sizeof_shareds, task_entry));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001031
Jonathan Peyton30419822017-05-12 18:01:32 +00001032 if (parent_task->td_flags.final) {
1033 if (flags->merged_if0) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00001034 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001035 flags->final = 1;
1036 }
Andrey Churbanova756cb22017-11-16 10:45:07 +00001037 if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1038 // Untied task encountered causes the TSC algorithm to check entire deque of
1039 // the victim thread. If no untied task encountered, then checking the head
1040 // of the deque should be enough.
1041 KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1042 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001043
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001044#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001045 if (flags->proxy == TASK_PROXY) {
1046 flags->tiedness = TASK_UNTIED;
1047 flags->merged_if0 = 1;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001048
Jonathan Peyton30419822017-05-12 18:01:32 +00001049 /* are we running in a sequential parallel or tskm_immediate_exec... we need
1050 tasking support enabled */
1051 if ((thread->th.th_task_team) == NULL) {
1052 /* This should only happen if the team is serialized
1053 setup a task team and propagate it to the thread */
1054 KMP_DEBUG_ASSERT(team->t.t_serialized);
1055 KA_TRACE(30,
1056 ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1057 gtid));
1058 __kmp_task_team_setup(
1059 thread, team,
1060 1); // 1 indicates setup the current team regardless of nthreads
1061 thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001062 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001063 kmp_task_team_t *task_team = thread->th.th_task_team;
1064
1065 /* tasking must be enabled now as the task might not be pushed */
1066 if (!KMP_TASKING_ENABLED(task_team)) {
1067 KA_TRACE(
1068 30,
1069 ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1070 __kmp_enable_tasking(task_team, thread);
1071 kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1072 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1073 // No lock needed since only owner can allocate
1074 if (thread_data->td.td_deque == NULL) {
1075 __kmp_alloc_task_deque(thread, thread_data);
1076 }
1077 }
1078
1079 if (task_team->tt.tt_found_proxy_tasks == FALSE)
1080 TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1081 }
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001082#endif
1083
Jonathan Peyton30419822017-05-12 18:01:32 +00001084 // Calculate shared structure offset including padding after kmp_task_t struct
1085 // to align pointers in shared struct
1086 shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1087 shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001088
Jonathan Peyton30419822017-05-12 18:01:32 +00001089 // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1090 KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1091 shareds_offset));
1092 KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1093 sizeof_shareds));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001094
Jonathan Peyton30419822017-05-12 18:01:32 +00001095// Avoid double allocation here by combining shareds with taskdata
1096#if USE_FAST_MEMORY
1097 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1098 sizeof_shareds);
1099#else /* ! USE_FAST_MEMORY */
1100 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1101 sizeof_shareds);
1102#endif /* USE_FAST_MEMORY */
1103 ANNOTATE_HAPPENS_AFTER(taskdata);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001104
Jonathan Peyton30419822017-05-12 18:01:32 +00001105 task = KMP_TASKDATA_TO_TASK(taskdata);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001106
Jonathan Peyton30419822017-05-12 18:01:32 +00001107// Make sure task & taskdata are aligned appropriately
Andrey Churbanovd1c55042015-01-19 18:29:35 +00001108#if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
Jonathan Peyton30419822017-05-12 18:01:32 +00001109 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1110 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001111#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001112 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1113 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001114#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001115 if (sizeof_shareds > 0) {
1116 // Avoid double allocation here by combining shareds with taskdata
1117 task->shareds = &((char *)taskdata)[shareds_offset];
1118 // Make sure shareds struct is aligned to pointer size
1119 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1120 0);
1121 } else {
1122 task->shareds = NULL;
1123 }
1124 task->routine = task_entry;
1125 task->part_id = 0; // AC: Always start with 0 part id
Jim Cownie5e8470a2013-09-27 10:38:44 +00001126
Jonathan Peyton30419822017-05-12 18:01:32 +00001127 taskdata->td_task_id = KMP_GEN_TASK_ID();
1128 taskdata->td_team = team;
1129 taskdata->td_alloc_thread = thread;
1130 taskdata->td_parent = parent_task;
1131 taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1132 taskdata->td_untied_count = 0;
1133 taskdata->td_ident = loc_ref;
1134 taskdata->td_taskwait_ident = NULL;
1135 taskdata->td_taskwait_counter = 0;
1136 taskdata->td_taskwait_thread = 0;
1137 KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001138#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001139 // avoid copying icvs for proxy tasks
1140 if (flags->proxy == TASK_FULL)
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001141#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001142 copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001143
Jonathan Peyton30419822017-05-12 18:01:32 +00001144 taskdata->td_flags.tiedness = flags->tiedness;
1145 taskdata->td_flags.final = flags->final;
1146 taskdata->td_flags.merged_if0 = flags->merged_if0;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001147#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001148 taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001149#endif // OMP_40_ENABLED
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001150#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001151 taskdata->td_flags.proxy = flags->proxy;
1152 taskdata->td_task_team = thread->th.th_task_team;
1153 taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001154#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001155 taskdata->td_flags.tasktype = TASK_EXPLICIT;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001156
Jonathan Peyton30419822017-05-12 18:01:32 +00001157 // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1158 taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001159
Jonathan Peyton30419822017-05-12 18:01:32 +00001160 // GEH - TODO: fix this to copy parent task's value of team_serial flag
1161 taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001162
Jonathan Peyton30419822017-05-12 18:01:32 +00001163 // GEH - Note we serialize the task if the team is serialized to make sure
1164 // implicit parallel region tasks are not left until program termination to
1165 // execute. Also, it helps locality to execute immediately.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001166
Jonathan Peyton30419822017-05-12 18:01:32 +00001167 taskdata->td_flags.task_serial =
1168 (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1169 taskdata->td_flags.tasking_ser);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001170
Jonathan Peyton30419822017-05-12 18:01:32 +00001171 taskdata->td_flags.started = 0;
1172 taskdata->td_flags.executing = 0;
1173 taskdata->td_flags.complete = 0;
1174 taskdata->td_flags.freed = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001175
Jonathan Peyton30419822017-05-12 18:01:32 +00001176 taskdata->td_flags.native = flags->native;
1177
1178 taskdata->td_incomplete_child_tasks = 0;
1179 taskdata->td_allocated_child_tasks = 1; // start at one because counts current
1180// task and children
Jim Cownie5e8470a2013-09-27 10:38:44 +00001181#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001182 taskdata->td_taskgroup =
1183 parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1184 taskdata->td_dephash = NULL;
1185 taskdata->td_depnode = NULL;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001186#endif
Andrey Churbanova756cb22017-11-16 10:45:07 +00001187 if (flags->tiedness == TASK_UNTIED)
1188 taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1189 else
1190 taskdata->td_last_tied = taskdata;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001191
Jonathan Peyton30419822017-05-12 18:01:32 +00001192// Only need to keep track of child task counts if team parallel and tasking not
1193// serialized or if it is a proxy task
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001194#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001195 if (flags->proxy == TASK_PROXY ||
1196 !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001197#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001198 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001199#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001200 {
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00001201 KMP_TEST_THEN_INC32(&parent_task->td_incomplete_child_tasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001202#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001203 if (parent_task->td_taskgroup)
1204 KMP_TEST_THEN_INC32((kmp_int32 *)(&parent_task->td_taskgroup->count));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001205#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001206 // Only need to keep track of allocated child tasks for explicit tasks since
1207 // implicit not deallocated
1208 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00001209 KMP_TEST_THEN_INC32(&taskdata->td_parent->td_allocated_child_tasks);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001210 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001211 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001212
Jonathan Peyton30419822017-05-12 18:01:32 +00001213 KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1214 gtid, taskdata, taskdata->td_parent));
1215 ANNOTATE_HAPPENS_BEFORE(task);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001216
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001217#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001218 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +00001219 __ompt_task_init(taskdata, gtid);
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001220#endif
1221
Jonathan Peyton30419822017-05-12 18:01:32 +00001222 return task;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001223}
1224
Jonathan Peyton30419822017-05-12 18:01:32 +00001225kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1226 kmp_int32 flags, size_t sizeof_kmp_task_t,
1227 size_t sizeof_shareds,
1228 kmp_routine_entry_t task_entry) {
1229 kmp_task_t *retval;
1230 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001231
Jonathan Peyton30419822017-05-12 18:01:32 +00001232 input_flags->native = FALSE;
1233// __kmp_task_alloc() sets up all other runtime flags
Jim Cownie5e8470a2013-09-27 10:38:44 +00001234
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001235#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001236 KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
1237 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1238 gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
1239 input_flags->proxy ? "proxy" : "", sizeof_kmp_task_t,
1240 sizeof_shareds, task_entry));
Jonathan Peyton1c9e6432015-06-03 18:24:02 +00001241#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001242 KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
1243 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1244 gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
1245 sizeof_kmp_task_t, sizeof_shareds, task_entry));
Jonathan Peyton1c9e6432015-06-03 18:24:02 +00001246#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001247
Jonathan Peyton30419822017-05-12 18:01:32 +00001248 retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1249 sizeof_shareds, task_entry);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001250
Jonathan Peyton30419822017-05-12 18:01:32 +00001251 KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001252
Jonathan Peyton30419822017-05-12 18:01:32 +00001253 return retval;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001254}
1255
Jim Cownie5e8470a2013-09-27 10:38:44 +00001256// __kmp_invoke_task: invoke the specified task
1257//
1258// gtid: global thread ID of caller
1259// task: the task to invoke
1260// current_task: the task to resume after task invokation
Jonathan Peyton30419822017-05-12 18:01:32 +00001261static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1262 kmp_taskdata_t *current_task) {
1263 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1264 kmp_uint64 cur_time;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001265#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001266 int discard = 0 /* false */;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001267#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001268 KA_TRACE(
1269 30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1270 gtid, taskdata, current_task));
1271 KMP_DEBUG_ASSERT(task);
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001272#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001273 if (taskdata->td_flags.proxy == TASK_PROXY &&
1274 taskdata->td_flags.complete == 1) {
1275 // This is a proxy task that was already completed but it needs to run
1276 // its bottom-half finish
1277 KA_TRACE(
1278 30,
1279 ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1280 gtid, taskdata));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001281
Jonathan Peyton30419822017-05-12 18:01:32 +00001282 __kmp_bottom_half_finish_proxy(gtid, task);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001283
Jonathan Peyton30419822017-05-12 18:01:32 +00001284 KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1285 "proxy task %p, resuming task %p\n",
1286 gtid, taskdata, current_task));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001287
Jonathan Peyton30419822017-05-12 18:01:32 +00001288 return;
1289 }
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001290#endif
1291
Jonathan Peyton99ef4d02016-04-14 16:06:49 +00001292#if USE_ITT_BUILD && USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00001293 if (__kmp_forkjoin_frames_mode == 3) {
1294 // Get the current time stamp to measure task execution time to correct
1295 // barrier imbalance time
1296 cur_time = __itt_get_timestamp();
1297 }
Jonathan Peyton99ef4d02016-04-14 16:06:49 +00001298#endif
1299
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001300#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001301 // Proxy tasks are not handled by the runtime
1302 if (taskdata->td_flags.proxy != TASK_PROXY) {
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001303#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001304 ANNOTATE_HAPPENS_AFTER(task);
Joachim Protze82e94a52017-11-01 10:08:30 +00001305 __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
Jonas Hahnfeld50fed042016-11-07 15:58:36 +00001306#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001307 }
Jonas Hahnfeld50fed042016-11-07 15:58:36 +00001308#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001309
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001310#if OMPT_SUPPORT
Jonathan Peyton30419822017-05-12 18:01:32 +00001311 ompt_thread_info_t oldInfo;
1312 kmp_info_t *thread;
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001313 if (UNLIKELY(ompt_enabled.enabled)) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001314 // Store the threads states and restore them after the task
1315 thread = __kmp_threads[gtid];
1316 oldInfo = thread->th.ompt_thread_info;
1317 thread->th.ompt_thread_info.wait_id = 0;
Joachim Protze82e94a52017-11-01 10:08:30 +00001318 thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1319 ? omp_state_work_serial
1320 : omp_state_work_parallel;
Joachim Protzec255ca72017-11-05 14:11:10 +00001321 taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
Jonathan Peyton30419822017-05-12 18:01:32 +00001322 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001323#endif
1324
Jim Cownie181b4bb2013-12-23 17:28:57 +00001325#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001326 // TODO: cancel tasks if the parallel region has also been cancelled
1327 // TODO: check if this sequence can be hoisted above __kmp_task_start
1328 // if cancellation has been enabled for this run ...
1329 if (__kmp_omp_cancellation) {
1330 kmp_info_t *this_thr = __kmp_threads[gtid];
1331 kmp_team_t *this_team = this_thr->th.th_team;
1332 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1333 if ((taskgroup && taskgroup->cancel_request) ||
1334 (this_team->t.t_cancel_request == cancel_parallel)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001335#if OMPT_SUPPORT && OMPT_OPTIONAL
1336 ompt_data_t *task_data;
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001337 if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001338 __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1339 ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1340 task_data,
1341 ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1342 : ompt_cancel_parallel) |
1343 ompt_cancel_discarded_task,
1344 NULL);
1345 }
1346#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001347 KMP_COUNT_BLOCK(TASK_cancelled);
1348 // this task belongs to a task group and we need to cancel it
1349 discard = 1 /* true */;
Jim Cownie181b4bb2013-12-23 17:28:57 +00001350 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001351 }
Jim Cownie181b4bb2013-12-23 17:28:57 +00001352
Jonathan Peyton30419822017-05-12 18:01:32 +00001353 // Invoke the task routine and pass in relevant data.
1354 // Thunks generated by gcc take a different argument list.
1355 if (!discard) {
Andrey Churbanova756cb22017-11-16 10:45:07 +00001356 if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1357 taskdata->td_last_tied = current_task->td_last_tied;
1358 KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1359 }
Jonathan Peyton11dc82f2016-05-05 16:15:57 +00001360#if KMP_STATS_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001361 KMP_COUNT_BLOCK(TASK_executed);
1362 switch (KMP_GET_THREAD_STATE()) {
1363 case FORK_JOIN_BARRIER:
1364 KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1365 break;
1366 case PLAIN_BARRIER:
1367 KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1368 break;
1369 case TASKYIELD:
1370 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1371 break;
1372 case TASKWAIT:
1373 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1374 break;
1375 case TASKGROUP:
1376 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1377 break;
1378 default:
1379 KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1380 break;
1381 }
Jonathan Peyton11dc82f2016-05-05 16:15:57 +00001382#endif // KMP_STATS_ENABLED
Jim Cownie181b4bb2013-12-23 17:28:57 +00001383#endif // OMP_40_ENABLED
Jonathan Peytonadee8c52015-11-11 17:49:50 +00001384
Joachim Protze82e94a52017-11-01 10:08:30 +00001385// OMPT task begin
1386#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001387 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +00001388 __ompt_task_start(task, current_task, gtid);
Jonathan Peytonadee8c52015-11-11 17:49:50 +00001389#endif
1390
Jim Cownie5e8470a2013-09-27 10:38:44 +00001391#ifdef KMP_GOMP_COMPAT
Jonathan Peyton30419822017-05-12 18:01:32 +00001392 if (taskdata->td_flags.native) {
1393 ((void (*)(void *))(*(task->routine)))(task->shareds);
1394 } else
Jim Cownie5e8470a2013-09-27 10:38:44 +00001395#endif /* KMP_GOMP_COMPAT */
Jonathan Peyton30419822017-05-12 18:01:32 +00001396 {
1397 (*(task->routine))(gtid, task);
1398 }
1399 KMP_POP_PARTITIONED_TIMER();
Jonathan Peytonadee8c52015-11-11 17:49:50 +00001400
Joachim Protze82e94a52017-11-01 10:08:30 +00001401#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001402 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +00001403 __ompt_task_finish(task, current_task);
Jonathan Peytonadee8c52015-11-11 17:49:50 +00001404#endif
Jim Cownie181b4bb2013-12-23 17:28:57 +00001405#if OMP_40_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001406 }
Jim Cownie181b4bb2013-12-23 17:28:57 +00001407#endif // OMP_40_ENABLED
Jim Cownie5e8470a2013-09-27 10:38:44 +00001408
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001409#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001410 if (UNLIKELY(ompt_enabled.enabled)) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001411 thread->th.ompt_thread_info = oldInfo;
Joachim Protzec255ca72017-11-05 14:11:10 +00001412 taskdata->ompt_task_info.frame.exit_frame = NULL;
Jonathan Peyton30419822017-05-12 18:01:32 +00001413 }
Andrey Churbanovd7d088f2015-04-29 16:42:24 +00001414#endif
1415
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001416#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001417 // Proxy tasks are not handled by the runtime
1418 if (taskdata->td_flags.proxy != TASK_PROXY) {
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001419#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001420 ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
Joachim Protze82e94a52017-11-01 10:08:30 +00001421 __kmp_task_finish(gtid, task, current_task); // OMPT only if not discarded
Jonas Hahnfeld50fed042016-11-07 15:58:36 +00001422#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001423 }
Jonas Hahnfeld50fed042016-11-07 15:58:36 +00001424#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001425
Jonathan Peyton99ef4d02016-04-14 16:06:49 +00001426#if USE_ITT_BUILD && USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00001427 // Barrier imbalance - correct arrive time after the task finished
1428 if (__kmp_forkjoin_frames_mode == 3) {
1429 kmp_info_t *this_thr = __kmp_threads[gtid];
1430 if (this_thr->th.th_bar_arrive_time) {
1431 this_thr->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
Jonathan Peyton99ef4d02016-04-14 16:06:49 +00001432 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001433 }
Jonathan Peyton99ef4d02016-04-14 16:06:49 +00001434#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001435 KA_TRACE(
1436 30,
1437 ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1438 gtid, taskdata, current_task));
1439 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001440}
1441
Jim Cownie5e8470a2013-09-27 10:38:44 +00001442// __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1443//
1444// loc_ref: location of original task pragma (ignored)
1445// gtid: Global Thread ID of encountering thread
1446// new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1447// Returns:
Jonathan Peyton30419822017-05-12 18:01:32 +00001448// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1449// be resumed later.
1450// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1451// resumed later.
1452kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1453 kmp_task_t *new_task) {
1454 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001455
Jonathan Peyton30419822017-05-12 18:01:32 +00001456 KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1457 loc_ref, new_taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001458
Joachim Protze82e94a52017-11-01 10:08:30 +00001459#if OMPT_SUPPORT
1460 kmp_taskdata_t *parent;
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001461 if (UNLIKELY(ompt_enabled.enabled)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001462 parent = new_taskdata->td_parent;
1463 if (ompt_enabled.ompt_callback_task_create) {
1464 ompt_data_t task_data = ompt_data_none;
1465 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1466 parent ? &(parent->ompt_task_info.task_data) : &task_data,
1467 parent ? &(parent->ompt_task_info.frame) : NULL,
1468 &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1469 OMPT_GET_RETURN_ADDRESS(0));
1470 }
1471 }
1472#endif
1473
Jonathan Peyton30419822017-05-12 18:01:32 +00001474 /* Should we execute the new task or queue it? For now, let's just always try
1475 to queue it. If the queue fills up, then we'll execute it. */
Jim Cownie5e8470a2013-09-27 10:38:44 +00001476
Jonathan Peyton30419822017-05-12 18:01:32 +00001477 if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1478 { // Execute this task immediately
1479 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1480 new_taskdata->td_flags.task_serial = 1;
1481 __kmp_invoke_task(gtid, new_task, current_task);
1482 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001483
Jonathan Peyton30419822017-05-12 18:01:32 +00001484 KA_TRACE(
1485 10,
1486 ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1487 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1488 gtid, loc_ref, new_taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001489
Jonathan Peyton30419822017-05-12 18:01:32 +00001490 ANNOTATE_HAPPENS_BEFORE(new_task);
Joachim Protze82e94a52017-11-01 10:08:30 +00001491#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001492 if (UNLIKELY(ompt_enabled.enabled)) {
Joachim Protzec255ca72017-11-05 14:11:10 +00001493 parent->ompt_task_info.frame.enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00001494 }
1495#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001496 return TASK_CURRENT_NOT_QUEUED;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001497}
1498
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001499// __kmp_omp_task: Schedule a non-thread-switchable task for execution
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001500//
Jonathan Peyton30419822017-05-12 18:01:32 +00001501// gtid: Global Thread ID of encountering thread
1502// new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1503// serialize_immediate: if TRUE then if the task is executed immediately its
1504// execution will be serialized
1505// Returns:
1506// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1507// be resumed later.
1508// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1509// resumed later.
1510kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1511 bool serialize_immediate) {
1512 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001513
Jonathan Peyton30419822017-05-12 18:01:32 +00001514/* Should we execute the new task or queue it? For now, let's just always try to
1515 queue it. If the queue fills up, then we'll execute it. */
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001516#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001517 if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1518 __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001519#else
Jonathan Peyton30419822017-05-12 18:01:32 +00001520 if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001521#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001522 { // Execute this task immediately
1523 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1524 if (serialize_immediate)
1525 new_taskdata->td_flags.task_serial = 1;
1526 __kmp_invoke_task(gtid, new_task, current_task);
1527 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001528
Jonathan Peyton30419822017-05-12 18:01:32 +00001529 ANNOTATE_HAPPENS_BEFORE(new_task);
1530 return TASK_CURRENT_NOT_QUEUED;
Jim Cownie4cc4bb42014-10-07 16:25:50 +00001531}
Jim Cownie5e8470a2013-09-27 10:38:44 +00001532
Jonathan Peyton30419822017-05-12 18:01:32 +00001533// __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1534// non-thread-switchable task from the parent thread only!
1535//
Jim Cownie5e8470a2013-09-27 10:38:44 +00001536// loc_ref: location of original task pragma (ignored)
1537// gtid: Global Thread ID of encountering thread
Jonathan Peyton30419822017-05-12 18:01:32 +00001538// new_task: non-thread-switchable task thunk allocated by
1539// __kmp_omp_task_alloc()
1540// Returns:
1541// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1542// be resumed later.
1543// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1544// resumed later.
1545kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1546 kmp_task_t *new_task) {
1547 kmp_int32 res;
1548 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001549
Joachim Protze82e94a52017-11-01 10:08:30 +00001550#if KMP_DEBUG || OMPT_SUPPORT
Jonathan Peyton30419822017-05-12 18:01:32 +00001551 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
Jonathan Peytond2eb3c72015-08-26 20:02:21 +00001552#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001553 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1554 new_taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001555
Joachim Protze82e94a52017-11-01 10:08:30 +00001556#if OMPT_SUPPORT
1557 kmp_taskdata_t *parent = NULL;
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001558 if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001559 OMPT_STORE_RETURN_ADDRESS(gtid);
1560 parent = new_taskdata->td_parent;
Joachim Protzec255ca72017-11-05 14:11:10 +00001561 if (!parent->ompt_task_info.frame.enter_frame)
1562 parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
Joachim Protze82e94a52017-11-01 10:08:30 +00001563 if (ompt_enabled.ompt_callback_task_create) {
1564 ompt_data_t task_data = ompt_data_none;
1565 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1566 parent ? &(parent->ompt_task_info.task_data) : &task_data,
1567 parent ? &(parent->ompt_task_info.frame) : NULL,
1568 &(new_taskdata->ompt_task_info.task_data),
1569 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1570 OMPT_LOAD_RETURN_ADDRESS(gtid));
1571 }
1572 }
1573#endif
1574
Jonathan Peyton30419822017-05-12 18:01:32 +00001575 res = __kmp_omp_task(gtid, new_task, true);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001576
Jonathan Peyton30419822017-05-12 18:01:32 +00001577 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1578 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1579 gtid, loc_ref, new_taskdata));
Joachim Protze82e94a52017-11-01 10:08:30 +00001580#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001581 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
Joachim Protzec255ca72017-11-05 14:11:10 +00001582 parent->ompt_task_info.frame.enter_frame = NULL;
Joachim Protze82e94a52017-11-01 10:08:30 +00001583 }
1584#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001585 return res;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001586}
1587
Joachim Protze82e94a52017-11-01 10:08:30 +00001588template <bool ompt>
1589static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1590 void *frame_address,
1591 void *return_address) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001592 kmp_taskdata_t *taskdata;
1593 kmp_info_t *thread;
1594 int thread_finished = FALSE;
1595 KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001596
Jonathan Peyton30419822017-05-12 18:01:32 +00001597 KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001598
Jonathan Peyton30419822017-05-12 18:01:32 +00001599 if (__kmp_tasking_mode != tskm_immediate_exec) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001600 thread = __kmp_threads[gtid];
1601 taskdata = thread->th.th_current_task;
Jonathan Peyton61118492016-05-20 19:03:38 +00001602
Joachim Protze82e94a52017-11-01 10:08:30 +00001603#if OMPT_SUPPORT && OMPT_OPTIONAL
1604 ompt_data_t *my_task_data;
1605 ompt_data_t *my_parallel_data;
Jonathan Peyton61118492016-05-20 19:03:38 +00001606
Joachim Protze82e94a52017-11-01 10:08:30 +00001607 if (ompt) {
1608 my_task_data = &(taskdata->ompt_task_info.task_data);
1609 my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1610
Joachim Protzec255ca72017-11-05 14:11:10 +00001611 taskdata->ompt_task_info.frame.enter_frame = frame_address;
Joachim Protze82e94a52017-11-01 10:08:30 +00001612
1613 if (ompt_enabled.ompt_callback_sync_region) {
1614 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1615 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1616 my_task_data, return_address);
1617 }
1618
1619 if (ompt_enabled.ompt_callback_sync_region_wait) {
1620 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1621 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1622 my_task_data, return_address);
Jonathan Peyton30419822017-05-12 18:01:32 +00001623 }
1624 }
Joachim Protze82e94a52017-11-01 10:08:30 +00001625#endif // OMPT_SUPPORT && OMPT_OPTIONAL
Jonathan Peyton960ea2f2015-11-09 15:57:04 +00001626
Jonathan Peyton30419822017-05-12 18:01:32 +00001627// Debugger: The taskwait is active. Store location and thread encountered the
1628// taskwait.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001629#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001630// Note: These values are used by ITT events as well.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001631#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00001632 taskdata->td_taskwait_counter += 1;
1633 taskdata->td_taskwait_ident = loc_ref;
1634 taskdata->td_taskwait_thread = gtid + 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001635
1636#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001637 void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1638 if (itt_sync_obj != NULL)
1639 __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001640#endif /* USE_ITT_BUILD */
1641
Jonathan Peyton30419822017-05-12 18:01:32 +00001642 bool must_wait =
1643 !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
Andrey Churbanovdd313b02016-11-01 08:33:36 +00001644
Jonathan Peytondf6818b2016-06-14 17:57:47 +00001645#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001646 must_wait = must_wait || (thread->th.th_task_team != NULL &&
1647 thread->th.th_task_team->tt.tt_found_proxy_tasks);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00001648#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001649 if (must_wait) {
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00001650 kmp_flag_32 flag(
1651 RCAST(volatile kmp_uint32 *, &taskdata->td_incomplete_child_tasks),
1652 0U);
Jonathan Peyton30419822017-05-12 18:01:32 +00001653 while (TCR_4(taskdata->td_incomplete_child_tasks) != 0) {
1654 flag.execute_tasks(thread, gtid, FALSE,
1655 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1656 __kmp_task_stealing_constraint);
1657 }
1658 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001659#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001660 if (itt_sync_obj != NULL)
1661 __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001662#endif /* USE_ITT_BUILD */
1663
Jonathan Peyton30419822017-05-12 18:01:32 +00001664 // Debugger: The taskwait is completed. Location remains, but thread is
1665 // negated.
1666 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
Jonathan Peyton960ea2f2015-11-09 15:57:04 +00001667
Joachim Protze82e94a52017-11-01 10:08:30 +00001668#if OMPT_SUPPORT && OMPT_OPTIONAL
1669 if (ompt) {
1670 if (ompt_enabled.ompt_callback_sync_region_wait) {
1671 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1672 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1673 my_task_data, return_address);
1674 }
1675 if (ompt_enabled.ompt_callback_sync_region) {
1676 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1677 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1678 my_task_data, return_address);
Jonathan Peyton30419822017-05-12 18:01:32 +00001679 }
Joachim Protzec255ca72017-11-05 14:11:10 +00001680 taskdata->ompt_task_info.frame.enter_frame = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001681 }
Joachim Protze82e94a52017-11-01 10:08:30 +00001682#endif // OMPT_SUPPORT && OMPT_OPTIONAL
1683
Jonathan Peyton30419822017-05-12 18:01:32 +00001684 ANNOTATE_HAPPENS_AFTER(taskdata);
1685 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001686
Jonathan Peyton30419822017-05-12 18:01:32 +00001687 KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1688 "returning TASK_CURRENT_NOT_QUEUED\n",
1689 gtid, taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001690
Jonathan Peyton30419822017-05-12 18:01:32 +00001691 return TASK_CURRENT_NOT_QUEUED;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001692}
1693
Joachim Protze82e94a52017-11-01 10:08:30 +00001694#if OMPT_SUPPORT
1695OMPT_NOINLINE
1696static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1697 void *frame_address,
1698 void *return_address) {
1699 return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1700 return_address);
1701}
1702#endif // OMPT_SUPPORT
1703
1704// __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1705// complete
1706kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1707#if OMPT_SUPPORT && OMPT_OPTIONAL
1708 if (UNLIKELY(ompt_enabled.enabled)) {
1709 OMPT_STORE_RETURN_ADDRESS(gtid);
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001710 return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(1),
1711 OMPT_LOAD_RETURN_ADDRESS(gtid));
Joachim Protze82e94a52017-11-01 10:08:30 +00001712 }
1713#endif
1714 return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
1715}
1716
Jim Cownie5e8470a2013-09-27 10:38:44 +00001717// __kmpc_omp_taskyield: switch to a different task
Jonathan Peyton30419822017-05-12 18:01:32 +00001718kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1719 kmp_taskdata_t *taskdata;
1720 kmp_info_t *thread;
1721 int thread_finished = FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001722
Jonathan Peyton30419822017-05-12 18:01:32 +00001723 KMP_COUNT_BLOCK(OMP_TASKYIELD);
1724 KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001725
Jonathan Peyton30419822017-05-12 18:01:32 +00001726 KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
1727 gtid, loc_ref, end_part));
Jonathan Peyton45be4502015-08-11 21:36:41 +00001728
Jonathan Peyton30419822017-05-12 18:01:32 +00001729 if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
Jonathan Peyton30419822017-05-12 18:01:32 +00001730 thread = __kmp_threads[gtid];
1731 taskdata = thread->th.th_current_task;
1732// Should we model this as a task wait or not?
1733// Debugger: The taskwait is active. Store location and thread encountered the
1734// taskwait.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001735#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001736// Note: These values are used by ITT events as well.
Jim Cownie5e8470a2013-09-27 10:38:44 +00001737#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00001738 taskdata->td_taskwait_counter += 1;
1739 taskdata->td_taskwait_ident = loc_ref;
1740 taskdata->td_taskwait_thread = gtid + 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001741
1742#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001743 void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1744 if (itt_sync_obj != NULL)
1745 __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001746#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00001747 if (!taskdata->td_flags.team_serial) {
1748 kmp_task_team_t *task_team = thread->th.th_task_team;
1749 if (task_team != NULL) {
1750 if (KMP_TASKING_ENABLED(task_team)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001751#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001752 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +00001753 thread->th.ompt_thread_info.ompt_task_yielded = 1;
1754#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001755 __kmp_execute_tasks_32(
1756 thread, gtid, NULL, FALSE,
1757 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1758 __kmp_task_stealing_constraint);
Joachim Protze82e94a52017-11-01 10:08:30 +00001759#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001760 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +00001761 thread->th.ompt_thread_info.ompt_task_yielded = 0;
1762#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001763 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001764 }
1765 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001766#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00001767 if (itt_sync_obj != NULL)
1768 __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00001769#endif /* USE_ITT_BUILD */
1770
Jonathan Peyton30419822017-05-12 18:01:32 +00001771 // Debugger: The taskwait is completed. Location remains, but thread is
1772 // negated.
1773 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1774 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00001775
Jonathan Peyton30419822017-05-12 18:01:32 +00001776 KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
1777 "returning TASK_CURRENT_NOT_QUEUED\n",
1778 gtid, taskdata));
Jim Cownie5e8470a2013-09-27 10:38:44 +00001779
Jonathan Peyton30419822017-05-12 18:01:32 +00001780 return TASK_CURRENT_NOT_QUEUED;
Jim Cownie5e8470a2013-09-27 10:38:44 +00001781}
1782
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001783// TODO: change to OMP_50_ENABLED, need to change build tools for this to work
1784#if OMP_45_ENABLED
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001785// Task Reduction implementation
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001786
1787typedef struct kmp_task_red_flags {
Jonathan Peyton30419822017-05-12 18:01:32 +00001788 unsigned lazy_priv : 1; // hint: (1) use lazy allocation (big objects)
1789 unsigned reserved31 : 31;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001790} kmp_task_red_flags_t;
1791
1792// internal structure for reduction data item related info
1793typedef struct kmp_task_red_data {
Jonathan Peyton30419822017-05-12 18:01:32 +00001794 void *reduce_shar; // shared reduction item
1795 size_t reduce_size; // size of data item
1796 void *reduce_priv; // thread specific data
1797 void *reduce_pend; // end of private data for comparison op
1798 void *reduce_init; // data initialization routine
1799 void *reduce_fini; // data finalization routine
1800 void *reduce_comb; // data combiner routine
1801 kmp_task_red_flags_t flags; // flags for additional info from compiler
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001802} kmp_task_red_data_t;
1803
1804// structure sent us by compiler - one per reduction item
1805typedef struct kmp_task_red_input {
Jonathan Peyton30419822017-05-12 18:01:32 +00001806 void *reduce_shar; // shared reduction item
1807 size_t reduce_size; // size of data item
1808 void *reduce_init; // data initialization routine
1809 void *reduce_fini; // data finalization routine
1810 void *reduce_comb; // data combiner routine
1811 kmp_task_red_flags_t flags; // flags for additional info from compiler
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001812} kmp_task_red_input_t;
1813
1814/*!
1815@ingroup TASKING
1816@param gtid Global thread ID
1817@param num Number of data items to reduce
1818@param data Array of data for reduction
1819@return The taskgroup identifier
1820
1821Initialize task reduction for the taskgroup.
1822*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001823void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
1824 kmp_info_t *thread = __kmp_threads[gtid];
1825 kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
1826 kmp_int32 nth = thread->th.th_team_nproc;
1827 kmp_task_red_input_t *input = (kmp_task_red_input_t *)data;
1828 kmp_task_red_data_t *arr;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001829
Jonathan Peyton30419822017-05-12 18:01:32 +00001830 // check input data just in case
1831 KMP_ASSERT(tg != NULL);
1832 KMP_ASSERT(data != NULL);
1833 KMP_ASSERT(num > 0);
1834 if (nth == 1) {
1835 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
1836 gtid, tg));
1837 return (void *)tg;
1838 }
1839 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
1840 gtid, tg, num));
1841 arr = (kmp_task_red_data_t *)__kmp_thread_malloc(
1842 thread, num * sizeof(kmp_task_red_data_t));
1843 for (int i = 0; i < num; ++i) {
1844 void (*f_init)(void *) = (void (*)(void *))(input[i].reduce_init);
1845 size_t size = input[i].reduce_size - 1;
1846 // round the size up to cache line per thread-specific item
1847 size += CACHE_LINE - size % CACHE_LINE;
1848 KMP_ASSERT(input[i].reduce_comb != NULL); // combiner is mandatory
1849 arr[i].reduce_shar = input[i].reduce_shar;
1850 arr[i].reduce_size = size;
1851 arr[i].reduce_init = input[i].reduce_init;
1852 arr[i].reduce_fini = input[i].reduce_fini;
1853 arr[i].reduce_comb = input[i].reduce_comb;
1854 arr[i].flags = input[i].flags;
1855 if (!input[i].flags.lazy_priv) {
1856 // allocate cache-line aligned block and fill it with zeros
1857 arr[i].reduce_priv = __kmp_allocate(nth * size);
1858 arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
1859 if (f_init != NULL) {
1860 // initialize thread-specific items
1861 for (int j = 0; j < nth; ++j) {
1862 f_init((char *)(arr[i].reduce_priv) + j * size);
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001863 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001864 }
1865 } else {
1866 // only allocate space for pointers now,
1867 // objects will be lazily allocated/initialized once requested
1868 arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001869 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001870 }
1871 tg->reduce_data = (void *)arr;
1872 tg->reduce_num_data = num;
1873 return (void *)tg;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001874}
1875
1876/*!
1877@ingroup TASKING
1878@param gtid Global thread ID
1879@param tskgrp The taskgroup ID (optional)
1880@param data Shared location of the item
1881@return The pointer to per-thread data
1882
1883Get thread-specific location of data item
1884*/
Jonathan Peyton30419822017-05-12 18:01:32 +00001885void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
1886 kmp_info_t *thread = __kmp_threads[gtid];
1887 kmp_int32 nth = thread->th.th_team_nproc;
1888 if (nth == 1)
1889 return data; // nothing to do
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001890
Jonathan Peyton30419822017-05-12 18:01:32 +00001891 kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
1892 if (tg == NULL)
1893 tg = thread->th.th_current_task->td_taskgroup;
1894 KMP_ASSERT(tg != NULL);
1895 kmp_task_red_data_t *arr = (kmp_task_red_data_t *)(tg->reduce_data);
1896 kmp_int32 num = tg->reduce_num_data;
1897 kmp_int32 tid = thread->th.th_info.ds.ds_tid;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001898
Jonathan Peyton30419822017-05-12 18:01:32 +00001899 KMP_ASSERT(data != NULL);
1900 while (tg != NULL) {
1901 for (int i = 0; i < num; ++i) {
1902 if (!arr[i].flags.lazy_priv) {
1903 if (data == arr[i].reduce_shar ||
1904 (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
1905 return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
1906 } else {
1907 // check shared location first
1908 void **p_priv = (void **)(arr[i].reduce_priv);
1909 if (data == arr[i].reduce_shar)
1910 goto found;
1911 // check if we get some thread specific location as parameter
1912 for (int j = 0; j < nth; ++j)
1913 if (data == p_priv[j])
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001914 goto found;
Jonathan Peyton30419822017-05-12 18:01:32 +00001915 continue; // not found, continue search
1916 found:
1917 if (p_priv[tid] == NULL) {
1918 // allocate thread specific object lazily
1919 void (*f_init)(void *) = (void (*)(void *))(arr[i].reduce_init);
1920 p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
1921 if (f_init != NULL) {
1922 f_init(p_priv[tid]);
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001923 }
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001924 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001925 return p_priv[tid];
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001926 }
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001927 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001928 tg = tg->parent;
1929 arr = (kmp_task_red_data_t *)(tg->reduce_data);
1930 num = tg->reduce_num_data;
1931 }
1932 KMP_ASSERT2(0, "Unknown task reduction item");
1933 return NULL; // ERROR, this line never executed
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001934}
1935
1936// Finalize task reduction.
1937// Called from __kmpc_end_taskgroup()
Jonathan Peyton30419822017-05-12 18:01:32 +00001938static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
1939 kmp_int32 nth = th->th.th_team_nproc;
1940 KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
1941 kmp_task_red_data_t *arr = (kmp_task_red_data_t *)tg->reduce_data;
1942 kmp_int32 num = tg->reduce_num_data;
1943 for (int i = 0; i < num; ++i) {
1944 void *sh_data = arr[i].reduce_shar;
1945 void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
1946 void (*f_comb)(void *, void *) =
1947 (void (*)(void *, void *))(arr[i].reduce_comb);
1948 if (!arr[i].flags.lazy_priv) {
1949 void *pr_data = arr[i].reduce_priv;
1950 size_t size = arr[i].reduce_size;
1951 for (int j = 0; j < nth; ++j) {
1952 void *priv_data = (char *)pr_data + j * size;
1953 f_comb(sh_data, priv_data); // combine results
1954 if (f_fini)
1955 f_fini(priv_data); // finalize if needed
1956 }
1957 } else {
1958 void **pr_data = (void **)(arr[i].reduce_priv);
1959 for (int j = 0; j < nth; ++j) {
1960 if (pr_data[j] != NULL) {
1961 f_comb(sh_data, pr_data[j]); // combine results
1962 if (f_fini)
1963 f_fini(pr_data[j]); // finalize if needed
1964 __kmp_free(pr_data[j]);
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001965 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001966 }
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001967 }
Jonathan Peyton30419822017-05-12 18:01:32 +00001968 __kmp_free(arr[i].reduce_priv);
1969 }
1970 __kmp_thread_free(th, arr);
1971 tg->reduce_data = NULL;
1972 tg->reduce_num_data = 0;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001973}
1974#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00001975
1976#if OMP_40_ENABLED
Jim Cownie5e8470a2013-09-27 10:38:44 +00001977// __kmpc_taskgroup: Start a new taskgroup
Jonathan Peyton30419822017-05-12 18:01:32 +00001978void __kmpc_taskgroup(ident_t *loc, int gtid) {
1979 kmp_info_t *thread = __kmp_threads[gtid];
1980 kmp_taskdata_t *taskdata = thread->th.th_current_task;
1981 kmp_taskgroup_t *tg_new =
1982 (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
1983 KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
1984 tg_new->count = 0;
1985 tg_new->cancel_request = cancel_noreq;
1986 tg_new->parent = taskdata->td_taskgroup;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001987// TODO: change to OMP_50_ENABLED, need to change build tools for this to work
1988#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00001989 tg_new->reduce_data = NULL;
1990 tg_new->reduce_num_data = 0;
Andrey Churbanov72ba2102017-02-16 17:49:49 +00001991#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00001992 taskdata->td_taskgroup = tg_new;
Joachim Protze82e94a52017-11-01 10:08:30 +00001993
1994#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00001995 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00001996 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
1997 if (!codeptr)
1998 codeptr = OMPT_GET_RETURN_ADDRESS(0);
1999 kmp_team_t *team = thread->th.th_team;
2000 ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2001 // FIXME: I think this is wrong for lwt!
2002 ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2003
2004 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2005 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2006 &(my_task_data), codeptr);
2007 }
2008#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00002009}
2010
Jim Cownie5e8470a2013-09-27 10:38:44 +00002011// __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2012// and its descendants are complete
Jonathan Peyton30419822017-05-12 18:01:32 +00002013void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2014 kmp_info_t *thread = __kmp_threads[gtid];
2015 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2016 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2017 int thread_finished = FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002018
Joachim Protze82e94a52017-11-01 10:08:30 +00002019#if OMPT_SUPPORT && OMPT_OPTIONAL
2020 kmp_team_t *team;
2021 ompt_data_t my_task_data;
2022 ompt_data_t my_parallel_data;
2023 void *codeptr;
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00002024 if (UNLIKELY(ompt_enabled.enabled)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00002025 team = thread->th.th_team;
2026 my_task_data = taskdata->ompt_task_info.task_data;
2027 // FIXME: I think this is wrong for lwt!
2028 my_parallel_data = team->t.ompt_team_info.parallel_data;
2029 codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2030 if (!codeptr)
2031 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2032 }
2033#endif
2034
Jonathan Peyton30419822017-05-12 18:01:32 +00002035 KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2036 KMP_DEBUG_ASSERT(taskgroup != NULL);
2037 KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002038
Jonathan Peyton30419822017-05-12 18:01:32 +00002039 if (__kmp_tasking_mode != tskm_immediate_exec) {
Andrey Churbanova756cb22017-11-16 10:45:07 +00002040 // mark task as waiting not on a barrier
2041 taskdata->td_taskwait_counter += 1;
2042 taskdata->td_taskwait_ident = loc;
2043 taskdata->td_taskwait_thread = gtid + 1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002044#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002045 // For ITT the taskgroup wait is similar to taskwait until we need to
2046 // distinguish them
2047 void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2048 if (itt_sync_obj != NULL)
2049 __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002050#endif /* USE_ITT_BUILD */
2051
Joachim Protze82e94a52017-11-01 10:08:30 +00002052#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00002053 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00002054 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2055 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2056 &(my_task_data), codeptr);
2057 }
2058#endif
2059
Jonathan Peytondf6818b2016-06-14 17:57:47 +00002060#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00002061 if (!taskdata->td_flags.team_serial ||
2062 (thread->th.th_task_team != NULL &&
2063 thread->th.th_task_team->tt.tt_found_proxy_tasks))
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00002064#else
Jonathan Peyton30419822017-05-12 18:01:32 +00002065 if (!taskdata->td_flags.team_serial)
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00002066#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002067 {
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002068 kmp_flag_32 flag(RCAST(kmp_uint32 *, &taskgroup->count), 0U);
Jonathan Peyton30419822017-05-12 18:01:32 +00002069 while (TCR_4(taskgroup->count) != 0) {
2070 flag.execute_tasks(thread, gtid, FALSE,
2071 &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2072 __kmp_task_stealing_constraint);
2073 }
2074 }
Andrey Churbanova756cb22017-11-16 10:45:07 +00002075 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
Jim Cownie5e8470a2013-09-27 10:38:44 +00002076
Joachim Protze82e94a52017-11-01 10:08:30 +00002077#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00002078 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00002079 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2080 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2081 &(my_task_data), codeptr);
2082 }
2083#endif
2084
Jim Cownie5e8470a2013-09-27 10:38:44 +00002085#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002086 if (itt_sync_obj != NULL)
2087 __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002088#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00002089 }
2090 KMP_DEBUG_ASSERT(taskgroup->count == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002091
Andrey Churbanov72ba2102017-02-16 17:49:49 +00002092// TODO: change to OMP_50_ENABLED, need to change build tools for this to work
2093#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00002094 if (taskgroup->reduce_data != NULL) // need to reduce?
2095 __kmp_task_reduction_fini(thread, taskgroup);
Andrey Churbanov72ba2102017-02-16 17:49:49 +00002096#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002097 // Restore parent taskgroup for the current task
2098 taskdata->td_taskgroup = taskgroup->parent;
2099 __kmp_thread_free(thread, taskgroup);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002100
Jonathan Peyton30419822017-05-12 18:01:32 +00002101 KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2102 gtid, taskdata));
2103 ANNOTATE_HAPPENS_AFTER(taskdata);
Joachim Protze82e94a52017-11-01 10:08:30 +00002104
2105#if OMPT_SUPPORT && OMPT_OPTIONAL
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00002106 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
Joachim Protze82e94a52017-11-01 10:08:30 +00002107 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2108 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2109 &(my_task_data), codeptr);
2110 }
2111#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +00002112}
2113#endif
2114
Jim Cownie5e8470a2013-09-27 10:38:44 +00002115// __kmp_remove_my_task: remove a task from my own deque
Jonathan Peyton30419822017-05-12 18:01:32 +00002116static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2117 kmp_task_team_t *task_team,
2118 kmp_int32 is_constrained) {
2119 kmp_task_t *task;
2120 kmp_taskdata_t *taskdata;
2121 kmp_thread_data_t *thread_data;
2122 kmp_uint32 tail;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002123
Jonathan Peyton30419822017-05-12 18:01:32 +00002124 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2125 KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2126 NULL); // Caller should check this condition
Jim Cownie5e8470a2013-09-27 10:38:44 +00002127
Jonathan Peyton30419822017-05-12 18:01:32 +00002128 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
Jim Cownie5e8470a2013-09-27 10:38:44 +00002129
Jonathan Peyton30419822017-05-12 18:01:32 +00002130 KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2131 gtid, thread_data->td.td_deque_ntasks,
2132 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002133
Jonathan Peyton30419822017-05-12 18:01:32 +00002134 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2135 KA_TRACE(10,
2136 ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2137 "ntasks=%d head=%u tail=%u\n",
2138 gtid, thread_data->td.td_deque_ntasks,
2139 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2140 return NULL;
2141 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002142
Jonathan Peyton30419822017-05-12 18:01:32 +00002143 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2144
2145 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2146 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2147 KA_TRACE(10,
2148 ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2149 "ntasks=%d head=%u tail=%u\n",
2150 gtid, thread_data->td.td_deque_ntasks,
2151 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2152 return NULL;
2153 }
2154
2155 tail = (thread_data->td.td_deque_tail - 1) &
2156 TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2157 taskdata = thread_data->td.td_deque[tail];
2158
2159 if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) {
Andrey Churbanova756cb22017-11-16 10:45:07 +00002160 // we need to check if the candidate obeys task scheduling constraint (TSC)
2161 // only descendant of all deferred tied tasks can be scheduled, checking
2162 // the last one is enough, as it in turn is the descendant of all others
2163 kmp_taskdata_t *current = thread->th.th_current_task->td_last_tied;
2164 KMP_DEBUG_ASSERT(current != NULL);
2165 // check if last tied task is not suspended on barrier
2166 if (current->td_flags.tasktype == TASK_EXPLICIT ||
2167 current->td_taskwait_thread > 0) { // <= 0 on barrier
2168 kmp_int32 level = current->td_level;
2169 kmp_taskdata_t *parent = taskdata->td_parent;
2170 while (parent != current && parent->td_level > level) {
2171 parent = parent->td_parent; // check generation up to the level of the
2172 // current task
2173 KMP_DEBUG_ASSERT(parent != NULL);
2174 }
2175 if (parent != current) {
2176 // The TSC does not allow to steal victim task
2177 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2178 KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2179 "ntasks=%d head=%u tail=%u\n",
2180 gtid, thread_data->td.td_deque_ntasks,
2181 thread_data->td.td_deque_head,
2182 thread_data->td.td_deque_tail));
2183 return NULL;
2184 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002185 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002186 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002187
Jonathan Peyton30419822017-05-12 18:01:32 +00002188 thread_data->td.td_deque_tail = tail;
2189 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002190
Jonathan Peyton30419822017-05-12 18:01:32 +00002191 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002192
Jonathan Peyton30419822017-05-12 18:01:32 +00002193 KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
2194 "ntasks=%d head=%u tail=%u\n",
2195 gtid, taskdata, thread_data->td.td_deque_ntasks,
2196 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002197
Jonathan Peyton30419822017-05-12 18:01:32 +00002198 task = KMP_TASKDATA_TO_TASK(taskdata);
2199 return task;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002200}
2201
Jim Cownie5e8470a2013-09-27 10:38:44 +00002202// __kmp_steal_task: remove a task from another thread's deque
2203// Assume that calling thread has already checked existence of
2204// task_team thread_data before calling this routine.
Andrey Churbanova756cb22017-11-16 10:45:07 +00002205static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002206 kmp_task_team_t *task_team,
2207 volatile kmp_int32 *unfinished_threads,
2208 int *thread_finished,
2209 kmp_int32 is_constrained) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002210 kmp_task_t *task;
2211 kmp_taskdata_t *taskdata;
Andrey Churbanova756cb22017-11-16 10:45:07 +00002212 kmp_taskdata_t *current;
Jonathan Peyton30419822017-05-12 18:01:32 +00002213 kmp_thread_data_t *victim_td, *threads_data;
Andrey Churbanova756cb22017-11-16 10:45:07 +00002214 kmp_int32 level, target;
Jonathan Peyton30419822017-05-12 18:01:32 +00002215 kmp_int32 victim_tid;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002216
Jonathan Peyton30419822017-05-12 18:01:32 +00002217 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002218
Jonathan Peyton30419822017-05-12 18:01:32 +00002219 threads_data = task_team->tt.tt_threads_data;
2220 KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
Jim Cownie5e8470a2013-09-27 10:38:44 +00002221
Andrey Churbanova756cb22017-11-16 10:45:07 +00002222 victim_tid = victim_thr->th.th_info.ds.ds_tid;
Jonathan Peyton30419822017-05-12 18:01:32 +00002223 victim_td = &threads_data[victim_tid];
Jim Cownie5e8470a2013-09-27 10:38:44 +00002224
Jonathan Peyton30419822017-05-12 18:01:32 +00002225 KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
Andrey Churbanova756cb22017-11-16 10:45:07 +00002226 "task_team=%p ntasks=%d head=%u tail=%u\n",
2227 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
Jonathan Peyton30419822017-05-12 18:01:32 +00002228 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2229 victim_td->td.td_deque_tail));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002230
Andrey Churbanova756cb22017-11-16 10:45:07 +00002231 if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002232 KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
Andrey Churbanova756cb22017-11-16 10:45:07 +00002233 "task_team=%p ntasks=%d head=%u tail=%u\n",
2234 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
Jim Cownie5e8470a2013-09-27 10:38:44 +00002235 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
Jonathan Peyton30419822017-05-12 18:01:32 +00002236 victim_td->td.td_deque_tail));
2237 return NULL;
2238 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002239
Jonathan Peyton30419822017-05-12 18:01:32 +00002240 __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2241
Andrey Churbanova756cb22017-11-16 10:45:07 +00002242 int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
Jonathan Peyton30419822017-05-12 18:01:32 +00002243 // Check again after we acquire the lock
Andrey Churbanova756cb22017-11-16 10:45:07 +00002244 if (ntasks == 0) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002245 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2246 KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
Andrey Churbanova756cb22017-11-16 10:45:07 +00002247 "task_team=%p ntasks=%d head=%u tail=%u\n",
2248 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2249 victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
Jonathan Peyton30419822017-05-12 18:01:32 +00002250 return NULL;
2251 }
2252
2253 KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2254
2255 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
Andrey Churbanova756cb22017-11-16 10:45:07 +00002256 if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) {
2257 // we need to check if the candidate obeys task scheduling constraint (TSC)
2258 // only descendant of all deferred tied tasks can be scheduled, checking
2259 // the last one is enough, as it in turn is the descendant of all others
2260 current = __kmp_threads[gtid]->th.th_current_task->td_last_tied;
2261 KMP_DEBUG_ASSERT(current != NULL);
2262 // check if last tied task is not suspended on barrier
2263 if (current->td_flags.tasktype == TASK_EXPLICIT ||
2264 current->td_taskwait_thread > 0) { // <= 0 on barrier
2265 level = current->td_level;
2266 kmp_taskdata_t *parent = taskdata->td_parent;
2267 while (parent != current && parent->td_level > level) {
2268 parent = parent->td_parent; // check generation up to the level of the
2269 // current task
2270 KMP_DEBUG_ASSERT(parent != NULL);
2271 }
2272 if (parent != current) {
2273 if (!task_team->tt.tt_untied_task_encountered) {
2274 // The TSC does not allow to steal victim task
2275 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2276 KA_TRACE(10,
2277 ("__kmp_steal_task(exit #3): T#%d could not steal from "
2278 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2279 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2280 victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2281 return NULL;
2282 }
2283 taskdata = NULL; // will check other tasks in victim's deque
2284 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002285 }
Andrey Churbanova756cb22017-11-16 10:45:07 +00002286 }
2287 if (taskdata != NULL) {
2288 // Bump head pointer and Wrap.
2289 victim_td->td.td_deque_head =
2290 (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2291 } else {
2292 int i;
2293 // walk through victim's deque trying to steal any task
2294 target = victim_td->td.td_deque_head;
2295 for (i = 1; i < ntasks; ++i) {
2296 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2297 taskdata = victim_td->td.td_deque[target];
2298 if (taskdata->td_flags.tiedness == TASK_TIED) {
2299 // check if the candidate obeys the TSC
2300 kmp_taskdata_t *parent = taskdata->td_parent;
2301 // check generation up to the level of the current task
2302 while (parent != current && parent->td_level > level) {
2303 parent = parent->td_parent;
2304 KMP_DEBUG_ASSERT(parent != NULL);
2305 }
2306 if (parent != current) {
2307 // The TSC does not allow to steal the candidate
2308 taskdata = NULL;
2309 continue;
2310 } else {
2311 // found victim tied task
2312 break;
2313 }
2314 } else {
2315 // found victim untied task
2316 break;
2317 }
2318 }
2319 if (taskdata == NULL) {
2320 // No appropriate candidate to steal found
Jonathan Peyton30419822017-05-12 18:01:32 +00002321 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
Andrey Churbanova756cb22017-11-16 10:45:07 +00002322 KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2323 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2324 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
Jonathan Peyton30419822017-05-12 18:01:32 +00002325 victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2326 return NULL;
2327 }
Andrey Churbanova756cb22017-11-16 10:45:07 +00002328 int prev = target;
2329 for (i = i + 1; i < ntasks; ++i) {
2330 // shift remaining tasks in the deque left by 1
2331 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2332 victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2333 prev = target;
2334 }
2335 KMP_DEBUG_ASSERT(victim_td->td.td_deque_tail ==
2336 ((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2337 victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
Jonathan Peyton30419822017-05-12 18:01:32 +00002338 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002339 if (*thread_finished) {
2340 // We need to un-mark this victim as a finished victim. This must be done
2341 // before releasing the lock, or else other threads (starting with the
2342 // master victim) might be prematurely released from the barrier!!!
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002343 kmp_int32 count;
Jonathan Peyton30419822017-05-12 18:01:32 +00002344
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00002345 count = KMP_TEST_THEN_INC32(unfinished_threads);
Jonathan Peyton30419822017-05-12 18:01:32 +00002346
2347 KA_TRACE(
2348 20,
2349 ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2350 gtid, count + 1, task_team));
2351
2352 *thread_finished = FALSE;
2353 }
Andrey Churbanova756cb22017-11-16 10:45:07 +00002354 TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
Jonathan Peyton30419822017-05-12 18:01:32 +00002355
Jonathan Peyton30419822017-05-12 18:01:32 +00002356 __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2357
2358 KMP_COUNT_BLOCK(TASK_stolen);
Andrey Churbanova756cb22017-11-16 10:45:07 +00002359 KA_TRACE(10,
2360 ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2361 "task_team=%p ntasks=%d head=%u tail=%u\n",
2362 gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2363 ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
Jonathan Peyton30419822017-05-12 18:01:32 +00002364
2365 task = KMP_TASKDATA_TO_TASK(taskdata);
2366 return task;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002367}
2368
Jonathan Peyton30419822017-05-12 18:01:32 +00002369// __kmp_execute_tasks_template: Choose and execute tasks until either the
2370// condition is statisfied (return true) or there are none left (return false).
2371//
Jim Cownie5e8470a2013-09-27 10:38:44 +00002372// final_spin is TRUE if this is the spin at the release barrier.
2373// thread_finished indicates whether the thread is finished executing all
2374// the tasks it has on its deque, and is at the release barrier.
2375// spinner is the location on which to spin.
2376// spinner == NULL means only execute a single task and return.
2377// checker is the value to check to terminate the spin.
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002378template <class C>
Jonathan Peyton30419822017-05-12 18:01:32 +00002379static inline int __kmp_execute_tasks_template(
2380 kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2381 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2382 kmp_int32 is_constrained) {
2383 kmp_task_team_t *task_team = thread->th.th_task_team;
2384 kmp_thread_data_t *threads_data;
2385 kmp_task_t *task;
2386 kmp_info_t *other_thread;
2387 kmp_taskdata_t *current_task = thread->th.th_current_task;
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002388 volatile kmp_int32 *unfinished_threads;
Andrey Churbanova756cb22017-11-16 10:45:07 +00002389 kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
Jonathan Peyton30419822017-05-12 18:01:32 +00002390 tid = thread->th.th_info.ds.ds_tid;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002391
Jonathan Peyton30419822017-05-12 18:01:32 +00002392 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2393 KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002394
Jonathan Peyton30419822017-05-12 18:01:32 +00002395 if (task_team == NULL)
2396 return FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002397
Jonathan Peyton30419822017-05-12 18:01:32 +00002398 KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2399 "*thread_finished=%d\n",
2400 gtid, final_spin, *thread_finished));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002401
Jonathan Peyton30419822017-05-12 18:01:32 +00002402 thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2403 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2404 KMP_DEBUG_ASSERT(threads_data != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002405
Jonathan Peyton30419822017-05-12 18:01:32 +00002406 nthreads = task_team->tt.tt_nproc;
2407 unfinished_threads = &(task_team->tt.tt_unfinished_threads);
Jonathan Peytondf6818b2016-06-14 17:57:47 +00002408#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00002409 KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00002410#else
Jonathan Peyton30419822017-05-12 18:01:32 +00002411 KMP_DEBUG_ASSERT(nthreads > 1);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00002412#endif
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002413 KMP_DEBUG_ASSERT(TCR_4(*unfinished_threads) >= 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002414
Jonathan Peyton30419822017-05-12 18:01:32 +00002415 while (1) { // Outer loop keeps trying to find tasks in case of single thread
2416 // getting tasks from target constructs
2417 while (1) { // Inner loop to find a task and execute it
2418 task = NULL;
2419 if (use_own_tasks) { // check on own queue first
2420 task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2421 }
2422 if ((task == NULL) && (nthreads > 1)) { // Steal a task
2423 int asleep = 1;
2424 use_own_tasks = 0;
2425 // Try to steal from the last place I stole from successfully.
Andrey Churbanova756cb22017-11-16 10:45:07 +00002426 if (victim_tid == -2) { // haven't stolen anything yet
2427 victim_tid = threads_data[tid].td.td_deque_last_stolen;
2428 if (victim_tid !=
Jonathan Peyton30419822017-05-12 18:01:32 +00002429 -1) // if we have a last stolen from victim, get the thread
Andrey Churbanova756cb22017-11-16 10:45:07 +00002430 other_thread = threads_data[victim_tid].td.td_thr;
Jonathan Peyton30419822017-05-12 18:01:32 +00002431 }
Andrey Churbanova756cb22017-11-16 10:45:07 +00002432 if (victim_tid != -1) { // found last victim
Jonathan Peyton30419822017-05-12 18:01:32 +00002433 asleep = 0;
2434 } else if (!new_victim) { // no recent steals and we haven't already
2435 // used a new victim; select a random thread
2436 do { // Find a different thread to steal work from.
2437 // Pick a random thread. Initial plan was to cycle through all the
2438 // threads, and only return if we tried to steal from every thread,
2439 // and failed. Arch says that's not such a great idea.
Andrey Churbanova756cb22017-11-16 10:45:07 +00002440 victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2441 if (victim_tid >= tid) {
2442 ++victim_tid; // Adjusts random distribution to exclude self
Jim Cownie5e8470a2013-09-27 10:38:44 +00002443 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002444 // Found a potential victim
Andrey Churbanova756cb22017-11-16 10:45:07 +00002445 other_thread = threads_data[victim_tid].td.td_thr;
Jonathan Peyton30419822017-05-12 18:01:32 +00002446 // There is a slight chance that __kmp_enable_tasking() did not wake
2447 // up all threads waiting at the barrier. If victim is sleeping,
2448 // then wake it up. Since we were going to pay the cache miss
2449 // penalty for referencing another thread's kmp_info_t struct
2450 // anyway,
2451 // the check shouldn't cost too much performance at this point. In
2452 // extra barrier mode, tasks do not sleep at the separate tasking
2453 // barrier, so this isn't a problem.
2454 asleep = 0;
2455 if ((__kmp_tasking_mode == tskm_task_teams) &&
2456 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002457 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2458 NULL)) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002459 asleep = 1;
2460 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2461 other_thread->th.th_sleep_loc);
2462 // A sleeping thread should not have any tasks on it's queue.
2463 // There is a slight possibility that it resumes, steals a task
2464 // from another thread, which spawns more tasks, all in the time
2465 // that it takes this thread to check => don't write an assertion
2466 // that the victim's queue is empty. Try stealing from a
2467 // different thread.
Jonathan Peytonc4c722a2016-06-09 18:27:03 +00002468 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002469 } while (asleep);
2470 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002471
Jonathan Peyton30419822017-05-12 18:01:32 +00002472 if (!asleep) {
2473 // We have a victim to try to steal from
2474 task = __kmp_steal_task(other_thread, gtid, task_team,
2475 unfinished_threads, thread_finished,
2476 is_constrained);
2477 }
2478 if (task != NULL) { // set last stolen to victim
Andrey Churbanova756cb22017-11-16 10:45:07 +00002479 if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2480 threads_data[tid].td.td_deque_last_stolen = victim_tid;
Jonathan Peyton30419822017-05-12 18:01:32 +00002481 // The pre-refactored code did not try more than 1 successful new
2482 // vicitm, unless the last one generated more local tasks;
2483 // new_victim keeps track of this
2484 new_victim = 1;
2485 }
2486 } else { // No tasks found; unset last_stolen
2487 KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
Andrey Churbanova756cb22017-11-16 10:45:07 +00002488 victim_tid = -2; // no successful victim found
Jonathan Peyton30419822017-05-12 18:01:32 +00002489 }
2490 }
Jonathan Peytone8104ad2015-06-08 18:56:33 +00002491
Jonathan Peyton30419822017-05-12 18:01:32 +00002492 if (task == NULL) // break out of tasking loop
2493 break;
2494
2495// Found a task; execute it
Jim Cownie5e8470a2013-09-27 10:38:44 +00002496#if USE_ITT_BUILD && USE_ITT_NOTIFY
Jonathan Peyton30419822017-05-12 18:01:32 +00002497 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2498 if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2499 // get the object reliably
2500 itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2501 }
2502 __kmp_itt_task_starting(itt_sync_obj);
2503 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002504#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
Jonathan Peyton30419822017-05-12 18:01:32 +00002505 __kmp_invoke_task(gtid, task, current_task);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002506#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00002507 if (itt_sync_obj != NULL)
2508 __kmp_itt_task_finished(itt_sync_obj);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002509#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00002510 // If this thread is only partway through the barrier and the condition is
2511 // met, then return now, so that the barrier gather/release pattern can
2512 // proceed. If this thread is in the last spin loop in the barrier,
2513 // waiting to be released, we know that the termination condition will not
2514 // be satisified, so don't waste any cycles checking it.
2515 if (flag == NULL || (!final_spin && flag->done_check())) {
2516 KA_TRACE(
2517 15,
2518 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2519 gtid));
2520 return TRUE;
2521 }
2522 if (thread->th.th_task_team == NULL) {
2523 break;
2524 }
2525 // Yield before executing next task
2526 KMP_YIELD(__kmp_library == library_throughput);
2527 // If execution of a stolen task results in more tasks being placed on our
2528 // run queue, reset use_own_tasks
2529 if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
2530 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
2531 "other tasks, restart\n",
2532 gtid));
2533 use_own_tasks = 1;
2534 new_victim = 0;
2535 }
Jonathan Peytonc4c722a2016-06-09 18:27:03 +00002536 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002537
2538// The task source has been exhausted. If in final spin loop of barrier, check
2539// if termination condition is satisfied.
2540#if OMP_45_ENABLED
2541 // The work queue may be empty but there might be proxy tasks still
2542 // executing
2543 if (final_spin && TCR_4(current_task->td_incomplete_child_tasks) == 0)
2544#else
2545 if (final_spin)
2546#endif
2547 {
2548 // First, decrement the #unfinished threads, if that has not already been
2549 // done. This decrement might be to the spin location, and result in the
2550 // termination condition being satisfied.
2551 if (!*thread_finished) {
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002552 kmp_int32 count;
Jonathan Peyton30419822017-05-12 18:01:32 +00002553
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00002554 count = KMP_TEST_THEN_DEC32(unfinished_threads) - 1;
Jonathan Peyton30419822017-05-12 18:01:32 +00002555 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
2556 "unfinished_threads to %d task_team=%p\n",
2557 gtid, count, task_team));
2558 *thread_finished = TRUE;
2559 }
2560
2561 // It is now unsafe to reference thread->th.th_team !!!
2562 // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2563 // thread to pass through the barrier, where it might reset each thread's
2564 // th.th_team field for the next parallel region. If we can steal more
2565 // work, we know that this has not happened yet.
2566 if (flag != NULL && flag->done_check()) {
2567 KA_TRACE(
2568 15,
2569 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2570 gtid));
2571 return TRUE;
2572 }
2573 }
2574
2575 // If this thread's task team is NULL, master has recognized that there are
2576 // no more tasks; bail out
2577 if (thread->th.th_task_team == NULL) {
2578 KA_TRACE(15,
2579 ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
2580 return FALSE;
2581 }
2582
2583#if OMP_45_ENABLED
2584 // We could be getting tasks from target constructs; if this is the only
2585 // thread, keep trying to execute tasks from own queue
2586 if (nthreads == 1)
2587 use_own_tasks = 1;
2588 else
2589#endif
2590 {
2591 KA_TRACE(15,
2592 ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
2593 return FALSE;
2594 }
2595 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002596}
2597
Jonathan Peyton30419822017-05-12 18:01:32 +00002598int __kmp_execute_tasks_32(
2599 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
2600 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2601 kmp_int32 is_constrained) {
2602 return __kmp_execute_tasks_template(
2603 thread, gtid, flag, final_spin,
2604 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002605}
2606
Jonathan Peyton30419822017-05-12 18:01:32 +00002607int __kmp_execute_tasks_64(
2608 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
2609 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2610 kmp_int32 is_constrained) {
2611 return __kmp_execute_tasks_template(
2612 thread, gtid, flag, final_spin,
2613 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002614}
2615
Jonathan Peyton30419822017-05-12 18:01:32 +00002616int __kmp_execute_tasks_oncore(
2617 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
2618 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2619 kmp_int32 is_constrained) {
2620 return __kmp_execute_tasks_template(
2621 thread, gtid, flag, final_spin,
2622 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002623}
2624
Jim Cownie5e8470a2013-09-27 10:38:44 +00002625// __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
2626// next barrier so they can assist in executing enqueued tasks.
2627// First thread in allocates the task team atomically.
Jonathan Peyton30419822017-05-12 18:01:32 +00002628static void __kmp_enable_tasking(kmp_task_team_t *task_team,
2629 kmp_info_t *this_thr) {
2630 kmp_thread_data_t *threads_data;
2631 int nthreads, i, is_init_thread;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002632
Jonathan Peyton30419822017-05-12 18:01:32 +00002633 KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
2634 __kmp_gtid_from_thread(this_thr)));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002635
Jonathan Peyton30419822017-05-12 18:01:32 +00002636 KMP_DEBUG_ASSERT(task_team != NULL);
2637 KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002638
Jonathan Peyton30419822017-05-12 18:01:32 +00002639 nthreads = task_team->tt.tt_nproc;
2640 KMP_DEBUG_ASSERT(nthreads > 0);
2641 KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002642
Jonathan Peyton30419822017-05-12 18:01:32 +00002643 // Allocate or increase the size of threads_data if necessary
2644 is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002645
Jonathan Peyton30419822017-05-12 18:01:32 +00002646 if (!is_init_thread) {
2647 // Some other thread already set up the array.
2648 KA_TRACE(
2649 20,
2650 ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
2651 __kmp_gtid_from_thread(this_thr)));
2652 return;
2653 }
2654 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2655 KMP_DEBUG_ASSERT(threads_data != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002656
Jonathan Peyton30419822017-05-12 18:01:32 +00002657 if ((__kmp_tasking_mode == tskm_task_teams) &&
2658 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
2659 // Release any threads sleeping at the barrier, so that they can steal
2660 // tasks and execute them. In extra barrier mode, tasks do not sleep
2661 // at the separate tasking barrier, so this isn't a problem.
2662 for (i = 0; i < nthreads; i++) {
2663 volatile void *sleep_loc;
2664 kmp_info_t *thread = threads_data[i].td.td_thr;
2665
2666 if (i == this_thr->th.th_info.ds.ds_tid) {
2667 continue;
2668 }
2669 // Since we haven't locked the thread's suspend mutex lock at this
2670 // point, there is a small window where a thread might be putting
2671 // itself to sleep, but hasn't set the th_sleep_loc field yet.
2672 // To work around this, __kmp_execute_tasks_template() periodically checks
2673 // see if other threads are sleeping (using the same random mechanism that
2674 // is used for task stealing) and awakens them if they are.
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00002675 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
2676 NULL) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002677 KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
2678 __kmp_gtid_from_thread(this_thr),
2679 __kmp_gtid_from_thread(thread)));
2680 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
2681 } else {
2682 KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
2683 __kmp_gtid_from_thread(this_thr),
2684 __kmp_gtid_from_thread(thread)));
2685 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002686 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002687 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002688
Jonathan Peyton30419822017-05-12 18:01:32 +00002689 KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
2690 __kmp_gtid_from_thread(this_thr)));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002691}
2692
Jim Cownie4cc4bb42014-10-07 16:25:50 +00002693/* // TODO: Check the comment consistency
Jim Cownie5e8470a2013-09-27 10:38:44 +00002694 * Utility routines for "task teams". A task team (kmp_task_t) is kind of
2695 * like a shadow of the kmp_team_t data struct, with a different lifetime.
2696 * After a child * thread checks into a barrier and calls __kmp_release() from
2697 * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
2698 * longer assume that the kmp_team_t structure is intact (at any moment, the
2699 * master thread may exit the barrier code and free the team data structure,
2700 * and return the threads to the thread pool).
2701 *
2702 * This does not work with the the tasking code, as the thread is still
2703 * expected to participate in the execution of any tasks that may have been
2704 * spawned my a member of the team, and the thread still needs access to all
2705 * to each thread in the team, so that it can steal work from it.
2706 *
2707 * Enter the existence of the kmp_task_team_t struct. It employs a reference
2708 * counting mechanims, and is allocated by the master thread before calling
2709 * __kmp_<barrier_kind>_release, and then is release by the last thread to
2710 * exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes
2711 * of the kmp_task_team_t structs for consecutive barriers can overlap
2712 * (and will, unless the master thread is the last thread to exit the barrier
2713 * release phase, which is not typical).
2714 *
2715 * The existence of such a struct is useful outside the context of tasking,
2716 * but for now, I'm trying to keep it specific to the OMP_30_ENABLED macro,
2717 * so that any performance differences show up when comparing the 2.5 vs. 3.0
2718 * libraries.
2719 *
2720 * We currently use the existence of the threads array as an indicator that
2721 * tasks were spawned since the last barrier. If the structure is to be
2722 * useful outside the context of tasking, then this will have to change, but
2723 * not settting the field minimizes the performance impact of tasking on
2724 * barriers, when no explicit tasks were spawned (pushed, actually).
2725 */
2726
Jonathan Peyton30419822017-05-12 18:01:32 +00002727static kmp_task_team_t *__kmp_free_task_teams =
2728 NULL; // Free list for task_team data structures
Jim Cownie5e8470a2013-09-27 10:38:44 +00002729// Lock for task team data structures
Jonathan Peytoneaa9e402018-01-10 18:21:48 +00002730kmp_bootstrap_lock_t __kmp_task_team_lock =
Jonathan Peyton30419822017-05-12 18:01:32 +00002731 KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002732
Jim Cownie5e8470a2013-09-27 10:38:44 +00002733// __kmp_alloc_task_deque:
2734// Allocates a task deque for a particular thread, and initialize the necessary
2735// data structures relating to the deque. This only happens once per thread
Jonathan Peyton30419822017-05-12 18:01:32 +00002736// per task team since task teams are recycled. No lock is needed during
2737// allocation since each thread allocates its own deque.
2738static void __kmp_alloc_task_deque(kmp_info_t *thread,
2739 kmp_thread_data_t *thread_data) {
2740 __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
2741 KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002742
Jonathan Peyton30419822017-05-12 18:01:32 +00002743 // Initialize last stolen task field to "none"
2744 thread_data->td.td_deque_last_stolen = -1;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002745
Jonathan Peyton30419822017-05-12 18:01:32 +00002746 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
2747 KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
2748 KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002749
Jonathan Peyton30419822017-05-12 18:01:32 +00002750 KE_TRACE(
2751 10,
2752 ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
2753 __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
2754 // Allocate space for task deque, and zero the deque
2755 // Cannot use __kmp_thread_calloc() because threads not around for
2756 // kmp_reap_task_team( ).
2757 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
2758 INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
2759 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002760}
2761
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002762// __kmp_realloc_task_deque:
Jonathan Peyton30419822017-05-12 18:01:32 +00002763// Re-allocates a task deque for a particular thread, copies the content from
2764// the old deque and adjusts the necessary data structures relating to the
2765// deque. This operation must be done with a the deque_lock being held
2766static void __kmp_realloc_task_deque(kmp_info_t *thread,
2767 kmp_thread_data_t *thread_data) {
2768 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
2769 kmp_int32 new_size = 2 * size;
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002770
Jonathan Peyton30419822017-05-12 18:01:32 +00002771 KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
2772 "%d] for thread_data %p\n",
2773 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002774
Jonathan Peyton30419822017-05-12 18:01:32 +00002775 kmp_taskdata_t **new_deque =
2776 (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002777
Jonathan Peyton30419822017-05-12 18:01:32 +00002778 int i, j;
2779 for (i = thread_data->td.td_deque_head, j = 0; j < size;
2780 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
2781 new_deque[j] = thread_data->td.td_deque[i];
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002782
Jonathan Peyton30419822017-05-12 18:01:32 +00002783 __kmp_free(thread_data->td.td_deque);
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002784
Jonathan Peyton30419822017-05-12 18:01:32 +00002785 thread_data->td.td_deque_head = 0;
2786 thread_data->td.td_deque_tail = size;
2787 thread_data->td.td_deque = new_deque;
2788 thread_data->td.td_deque_size = new_size;
Jonathan Peytonf4f96952016-05-31 19:07:00 +00002789}
Jim Cownie5e8470a2013-09-27 10:38:44 +00002790
Jim Cownie5e8470a2013-09-27 10:38:44 +00002791// __kmp_free_task_deque:
Jonathan Peyton30419822017-05-12 18:01:32 +00002792// Deallocates a task deque for a particular thread. Happens at library
2793// deallocation so don't need to reset all thread data fields.
2794static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002795 if (thread_data->td.td_deque != NULL) {
Jonathan Peyton1b536722017-08-02 20:06:32 +00002796 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
Jonathan Peyton30419822017-05-12 18:01:32 +00002797 TCW_4(thread_data->td.td_deque_ntasks, 0);
2798 __kmp_free(thread_data->td.td_deque);
2799 thread_data->td.td_deque = NULL;
Jonathan Peyton1b536722017-08-02 20:06:32 +00002800 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
Jonathan Peyton30419822017-05-12 18:01:32 +00002801 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002802
2803#ifdef BUILD_TIED_TASK_STACK
Jonathan Peyton30419822017-05-12 18:01:32 +00002804 // GEH: Figure out what to do here for td_susp_tied_tasks
2805 if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
2806 __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
2807 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002808#endif // BUILD_TIED_TASK_STACK
2809}
2810
Jim Cownie5e8470a2013-09-27 10:38:44 +00002811// __kmp_realloc_task_threads_data:
Jonathan Peyton30419822017-05-12 18:01:32 +00002812// Allocates a threads_data array for a task team, either by allocating an
2813// initial array or enlarging an existing array. Only the first thread to get
2814// the lock allocs or enlarges the array and re-initializes the array eleemnts.
Jim Cownie5e8470a2013-09-27 10:38:44 +00002815// That thread returns "TRUE", the rest return "FALSE".
2816// Assumes that the new array size is given by task_team -> tt.tt_nproc.
2817// The current size is given by task_team -> tt.tt_max_threads.
Jonathan Peyton30419822017-05-12 18:01:32 +00002818static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
2819 kmp_task_team_t *task_team) {
2820 kmp_thread_data_t **threads_data_p;
2821 kmp_int32 nthreads, maxthreads;
2822 int is_init_thread = FALSE;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002823
Jonathan Peyton30419822017-05-12 18:01:32 +00002824 if (TCR_4(task_team->tt.tt_found_tasks)) {
2825 // Already reallocated and initialized.
2826 return FALSE;
2827 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002828
Jonathan Peyton30419822017-05-12 18:01:32 +00002829 threads_data_p = &task_team->tt.tt_threads_data;
2830 nthreads = task_team->tt.tt_nproc;
2831 maxthreads = task_team->tt.tt_max_threads;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002832
Jonathan Peyton30419822017-05-12 18:01:32 +00002833 // All threads must lock when they encounter the first task of the implicit
2834 // task region to make sure threads_data fields are (re)initialized before
2835 // used.
2836 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002837
Jonathan Peyton30419822017-05-12 18:01:32 +00002838 if (!TCR_4(task_team->tt.tt_found_tasks)) {
2839 // first thread to enable tasking
2840 kmp_team_t *team = thread->th.th_team;
2841 int i;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002842
Jonathan Peyton30419822017-05-12 18:01:32 +00002843 is_init_thread = TRUE;
2844 if (maxthreads < nthreads) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00002845
Jonathan Peyton30419822017-05-12 18:01:32 +00002846 if (*threads_data_p != NULL) {
2847 kmp_thread_data_t *old_data = *threads_data_p;
2848 kmp_thread_data_t *new_data = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002849
Jonathan Peyton30419822017-05-12 18:01:32 +00002850 KE_TRACE(
2851 10,
2852 ("__kmp_realloc_task_threads_data: T#%d reallocating "
2853 "threads data for task_team %p, new_size = %d, old_size = %d\n",
2854 __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
2855 // Reallocate threads_data to have more elements than current array
2856 // Cannot use __kmp_thread_realloc() because threads not around for
2857 // kmp_reap_task_team( ). Note all new array entries are initialized
2858 // to zero by __kmp_allocate().
2859 new_data = (kmp_thread_data_t *)__kmp_allocate(
2860 nthreads * sizeof(kmp_thread_data_t));
2861 // copy old data to new data
2862 KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
Andrey Churbanov71483f22017-07-18 11:56:16 +00002863 (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002864
2865#ifdef BUILD_TIED_TASK_STACK
Jonathan Peyton30419822017-05-12 18:01:32 +00002866 // GEH: Figure out if this is the right thing to do
2867 for (i = maxthreads; i < nthreads; i++) {
2868 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2869 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2870 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002871#endif // BUILD_TIED_TASK_STACK
Jonathan Peyton30419822017-05-12 18:01:32 +00002872 // Install the new data and free the old data
2873 (*threads_data_p) = new_data;
2874 __kmp_free(old_data);
2875 } else {
2876 KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
2877 "threads data for task_team %p, size = %d\n",
2878 __kmp_gtid_from_thread(thread), task_team, nthreads));
2879 // Make the initial allocate for threads_data array, and zero entries
2880 // Cannot use __kmp_thread_calloc() because threads not around for
2881 // kmp_reap_task_team( ).
2882 ANNOTATE_IGNORE_WRITES_BEGIN();
2883 *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
2884 nthreads * sizeof(kmp_thread_data_t));
2885 ANNOTATE_IGNORE_WRITES_END();
Jim Cownie5e8470a2013-09-27 10:38:44 +00002886#ifdef BUILD_TIED_TASK_STACK
Jonathan Peyton30419822017-05-12 18:01:32 +00002887 // GEH: Figure out if this is the right thing to do
Jim Cownie5e8470a2013-09-27 10:38:44 +00002888 for (i = 0; i < nthreads; i++) {
Jonathan Peyton30419822017-05-12 18:01:32 +00002889 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2890 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002891 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002892#endif // BUILD_TIED_TASK_STACK
2893 }
2894 task_team->tt.tt_max_threads = nthreads;
2895 } else {
2896 // If array has (more than) enough elements, go ahead and use it
2897 KMP_DEBUG_ASSERT(*threads_data_p != NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002898 }
2899
Jonathan Peyton30419822017-05-12 18:01:32 +00002900 // initialize threads_data pointers back to thread_info structures
2901 for (i = 0; i < nthreads; i++) {
2902 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2903 thread_data->td.td_thr = team->t.t_threads[i];
2904
2905 if (thread_data->td.td_deque_last_stolen >= nthreads) {
2906 // The last stolen field survives across teams / barrier, and the number
2907 // of threads may have changed. It's possible (likely?) that a new
2908 // parallel region will exhibit the same behavior as previous region.
2909 thread_data->td.td_deque_last_stolen = -1;
2910 }
2911 }
2912
2913 KMP_MB();
2914 TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
2915 }
2916
2917 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
2918 return is_init_thread;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002919}
2920
Jim Cownie5e8470a2013-09-27 10:38:44 +00002921// __kmp_free_task_threads_data:
2922// Deallocates a threads_data array for a task team, including any attached
2923// tasking deques. Only occurs at library shutdown.
Jonathan Peyton30419822017-05-12 18:01:32 +00002924static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
2925 __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
2926 if (task_team->tt.tt_threads_data != NULL) {
2927 int i;
2928 for (i = 0; i < task_team->tt.tt_max_threads; i++) {
2929 __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002930 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002931 __kmp_free(task_team->tt.tt_threads_data);
2932 task_team->tt.tt_threads_data = NULL;
2933 }
2934 __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002935}
2936
Jim Cownie5e8470a2013-09-27 10:38:44 +00002937// __kmp_allocate_task_team:
2938// Allocates a task team associated with a specific team, taking it from
Jonathan Peyton30419822017-05-12 18:01:32 +00002939// the global task team free list if possible. Also initializes data
2940// structures.
2941static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
2942 kmp_team_t *team) {
2943 kmp_task_team_t *task_team = NULL;
2944 int nthreads;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002945
Jonathan Peyton30419822017-05-12 18:01:32 +00002946 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
2947 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002948
Jonathan Peyton30419822017-05-12 18:01:32 +00002949 if (TCR_PTR(__kmp_free_task_teams) != NULL) {
2950 // Take a task team from the task team pool
2951 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
2952 if (__kmp_free_task_teams != NULL) {
2953 task_team = __kmp_free_task_teams;
2954 TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
2955 task_team->tt.tt_next = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002956 }
Jonathan Peyton30419822017-05-12 18:01:32 +00002957 __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
2958 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002959
Jonathan Peyton30419822017-05-12 18:01:32 +00002960 if (task_team == NULL) {
2961 KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
2962 "task team for team %p\n",
2963 __kmp_gtid_from_thread(thread), team));
2964 // Allocate a new task team if one is not available.
2965 // Cannot use __kmp_thread_malloc() because threads not around for
2966 // kmp_reap_task_team( ).
2967 task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
2968 __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
2969 // AC: __kmp_allocate zeroes returned memory
2970 // task_team -> tt.tt_threads_data = NULL;
2971 // task_team -> tt.tt_max_threads = 0;
2972 // task_team -> tt.tt_next = NULL;
2973 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00002974
Jonathan Peyton30419822017-05-12 18:01:32 +00002975 TCW_4(task_team->tt.tt_found_tasks, FALSE);
Jonathan Peytondf6818b2016-06-14 17:57:47 +00002976#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00002977 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00002978#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00002979 task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002980
Jonathan Peyton30419822017-05-12 18:01:32 +00002981 TCW_4(task_team->tt.tt_unfinished_threads, nthreads);
2982 TCW_4(task_team->tt.tt_active, TRUE);
Jim Cownie5e8470a2013-09-27 10:38:44 +00002983
Jonathan Peyton30419822017-05-12 18:01:32 +00002984 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
2985 "unfinished_threads init'd to %d\n",
2986 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
2987 task_team->tt.tt_unfinished_threads));
2988 return task_team;
Jim Cownie5e8470a2013-09-27 10:38:44 +00002989}
2990
Jim Cownie5e8470a2013-09-27 10:38:44 +00002991// __kmp_free_task_team:
2992// Frees the task team associated with a specific thread, and adds it
2993// to the global task team free list.
Jonathan Peyton30419822017-05-12 18:01:32 +00002994void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
2995 KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
2996 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
Jim Cownie5e8470a2013-09-27 10:38:44 +00002997
Jonathan Peyton30419822017-05-12 18:01:32 +00002998 // Put task team back on free list
2999 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003000
Jonathan Peyton30419822017-05-12 18:01:32 +00003001 KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3002 task_team->tt.tt_next = __kmp_free_task_teams;
3003 TCW_PTR(__kmp_free_task_teams, task_team);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003004
Jonathan Peyton30419822017-05-12 18:01:32 +00003005 __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003006}
3007
Jim Cownie5e8470a2013-09-27 10:38:44 +00003008// __kmp_reap_task_teams:
3009// Free all the task teams on the task team free list.
3010// Should only be done during library shutdown.
Jonathan Peyton30419822017-05-12 18:01:32 +00003011// Cannot do anything that needs a thread structure or gtid since they are
3012// already gone.
3013void __kmp_reap_task_teams(void) {
3014 kmp_task_team_t *task_team;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003015
Jonathan Peyton30419822017-05-12 18:01:32 +00003016 if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3017 // Free all task_teams on the free list
3018 __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3019 while ((task_team = __kmp_free_task_teams) != NULL) {
3020 __kmp_free_task_teams = task_team->tt.tt_next;
3021 task_team->tt.tt_next = NULL;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003022
Jonathan Peyton30419822017-05-12 18:01:32 +00003023 // Free threads_data if necessary
3024 if (task_team->tt.tt_threads_data != NULL) {
3025 __kmp_free_task_threads_data(task_team);
3026 }
3027 __kmp_free(task_team);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003028 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003029 __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3030 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003031}
3032
Jim Cownie5e8470a2013-09-27 10:38:44 +00003033// __kmp_wait_to_unref_task_teams:
3034// Some threads could still be in the fork barrier release code, possibly
3035// trying to steal tasks. Wait for each thread to unreference its task team.
Jonathan Peyton30419822017-05-12 18:01:32 +00003036void __kmp_wait_to_unref_task_teams(void) {
3037 kmp_info_t *thread;
3038 kmp_uint32 spins;
3039 int done;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003040
Jonathan Peyton30419822017-05-12 18:01:32 +00003041 KMP_INIT_YIELD(spins);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003042
Jonathan Peyton30419822017-05-12 18:01:32 +00003043 for (;;) {
3044 done = TRUE;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003045
Jonathan Peyton30419822017-05-12 18:01:32 +00003046 // TODO: GEH - this may be is wrong because some sync would be necessary
3047 // in case threads are added to the pool during the traversal. Need to
3048 // verify that lock for thread pool is held when calling this routine.
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003049 for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
Jonathan Peyton30419822017-05-12 18:01:32 +00003050 thread = thread->th.th_next_pool) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003051#if KMP_OS_WINDOWS
Jonathan Peyton30419822017-05-12 18:01:32 +00003052 DWORD exit_val;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003053#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003054 if (TCR_PTR(thread->th.th_task_team) == NULL) {
3055 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3056 __kmp_gtid_from_thread(thread)));
3057 continue;
3058 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003059#if KMP_OS_WINDOWS
Jonathan Peyton30419822017-05-12 18:01:32 +00003060 // TODO: GEH - add this check for Linux* OS / OS X* as well?
3061 if (!__kmp_is_thread_alive(thread, &exit_val)) {
3062 thread->th.th_task_team = NULL;
3063 continue;
3064 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003065#endif
3066
Jonathan Peyton30419822017-05-12 18:01:32 +00003067 done = FALSE; // Because th_task_team pointer is not NULL for this thread
Jim Cownie5e8470a2013-09-27 10:38:44 +00003068
Jonathan Peyton30419822017-05-12 18:01:32 +00003069 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3070 "unreference task_team\n",
3071 __kmp_gtid_from_thread(thread)));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003072
Jonathan Peyton30419822017-05-12 18:01:32 +00003073 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3074 volatile void *sleep_loc;
3075 // If the thread is sleeping, awaken it.
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003076 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3077 NULL) {
Jonathan Peyton30419822017-05-12 18:01:32 +00003078 KA_TRACE(
3079 10,
3080 ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3081 __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3082 __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003083 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003084 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003085 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003086 if (done) {
3087 break;
3088 }
3089
3090 // If we are oversubscribed, or have waited a bit (and library mode is
3091 // throughput), yield. Pause is in the following code.
3092 KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
3093 KMP_YIELD_SPIN(spins); // Yields only if KMP_LIBRARY=throughput
3094 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003095}
3096
Jim Cownie5e8470a2013-09-27 10:38:44 +00003097// __kmp_task_team_setup: Create a task_team for the current team, but use
3098// an already created, unused one if it already exists.
Jonathan Peyton30419822017-05-12 18:01:32 +00003099void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3100 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003101
Jonathan Peyton30419822017-05-12 18:01:32 +00003102 // If this task_team hasn't been created yet, allocate it. It will be used in
3103 // the region after the next.
3104 // If it exists, it is the current task team and shouldn't be touched yet as
3105 // it may still be in use.
3106 if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3107 (always || team->t.t_nproc > 1)) {
3108 team->t.t_task_team[this_thr->th.th_task_state] =
3109 __kmp_allocate_task_team(this_thr, team);
3110 KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "
3111 "for team %d at parity=%d\n",
3112 __kmp_gtid_from_thread(this_thr),
3113 team->t.t_task_team[this_thr->th.th_task_state],
3114 ((team != NULL) ? team->t.t_id : -1),
3115 this_thr->th.th_task_state));
3116 }
Jonathan Peyton54127982015-11-04 21:37:48 +00003117
Jonathan Peyton30419822017-05-12 18:01:32 +00003118 // After threads exit the release, they will call sync, and then point to this
3119 // other task_team; make sure it is allocated and properly initialized. As
3120 // threads spin in the barrier release phase, they will continue to use the
3121 // previous task_team struct(above), until they receive the signal to stop
3122 // checking for tasks (they can't safely reference the kmp_team_t struct,
3123 // which could be reallocated by the master thread). No task teams are formed
3124 // for serialized teams.
3125 if (team->t.t_nproc > 1) {
3126 int other_team = 1 - this_thr->th.th_task_state;
3127 if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3128 team->t.t_task_team[other_team] =
3129 __kmp_allocate_task_team(this_thr, team);
3130 KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "
3131 "task_team %p for team %d at parity=%d\n",
3132 __kmp_gtid_from_thread(this_thr),
3133 team->t.t_task_team[other_team],
3134 ((team != NULL) ? team->t.t_id : -1), other_team));
3135 } else { // Leave the old task team struct in place for the upcoming region;
3136 // adjust as needed
3137 kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3138 if (!task_team->tt.tt_active ||
3139 team->t.t_nproc != task_team->tt.tt_nproc) {
3140 TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3141 TCW_4(task_team->tt.tt_found_tasks, FALSE);
Jonathan Peytondf6818b2016-06-14 17:57:47 +00003142#if OMP_45_ENABLED
Jonathan Peyton30419822017-05-12 18:01:32 +00003143 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
Jonathan Peytone1dad192015-11-30 20:05:13 +00003144#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003145 TCW_4(task_team->tt.tt_unfinished_threads, team->t.t_nproc);
3146 TCW_4(task_team->tt.tt_active, TRUE);
3147 }
3148 // if team size has changed, the first thread to enable tasking will
3149 // realloc threads_data if necessary
3150 KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "
3151 "%p for team %d at parity=%d\n",
3152 __kmp_gtid_from_thread(this_thr),
3153 team->t.t_task_team[other_team],
3154 ((team != NULL) ? team->t.t_id : -1), other_team));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003155 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003156 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003157}
3158
Jim Cownie5e8470a2013-09-27 10:38:44 +00003159// __kmp_task_team_sync: Propagation of task team data from team to threads
3160// which happens just after the release phase of a team barrier. This may be
3161// called by any thread, but only for teams with # threads > 1.
Jonathan Peyton30419822017-05-12 18:01:32 +00003162void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3163 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003164
Jonathan Peyton30419822017-05-12 18:01:32 +00003165 // Toggle the th_task_state field, to switch which task_team this thread
3166 // refers to
3167 this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3168 // It is now safe to propagate the task team pointer from the team struct to
3169 // the current thread.
3170 TCW_PTR(this_thr->th.th_task_team,
3171 team->t.t_task_team[this_thr->th.th_task_state]);
3172 KA_TRACE(20,
3173 ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3174 "%p from Team #%d (parity=%d)\n",
3175 __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3176 ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003177}
3178
Jonathan Peyton30419822017-05-12 18:01:32 +00003179// __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3180// barrier gather phase. Only called by master thread if #threads in team > 1 or
3181// if proxy tasks were created.
3182//
3183// wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3184// by passing in 0 optionally as the last argument. When wait is zero, master
3185// thread does not wait for unfinished_threads to reach 0.
3186void __kmp_task_team_wait(
3187 kmp_info_t *this_thr,
3188 kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3189 kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
Jim Cownie5e8470a2013-09-27 10:38:44 +00003190
Jonathan Peyton30419822017-05-12 18:01:32 +00003191 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3192 KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003193
Jonathan Peyton30419822017-05-12 18:01:32 +00003194 if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3195 if (wait) {
3196 KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
3197 "(for unfinished_threads to reach 0) on task_team = %p\n",
3198 __kmp_gtid_from_thread(this_thr), task_team));
3199 // Worker threads may have dropped through to release phase, but could
3200 // still be executing tasks. Wait here for tasks to complete. To avoid
3201 // memory contention, only master thread checks termination condition.
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003202 kmp_flag_32 flag(
3203 RCAST(volatile kmp_uint32 *, &task_team->tt.tt_unfinished_threads),
3204 0U);
Jonathan Peyton30419822017-05-12 18:01:32 +00003205 flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003206 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003207 // Deactivate the old task team, so that the worker threads will stop
3208 // referencing it while spinning.
3209 KA_TRACE(
3210 20,
3211 ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
3212 "setting active to false, setting local and team's pointer to NULL\n",
3213 __kmp_gtid_from_thread(this_thr), task_team));
3214#if OMP_45_ENABLED
3215 KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3216 task_team->tt.tt_found_proxy_tasks == TRUE);
3217 TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3218#else
3219 KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1);
3220#endif
Andrey Churbanova756cb22017-11-16 10:45:07 +00003221 KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
Jonathan Peyton30419822017-05-12 18:01:32 +00003222 TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3223 KMP_MB();
3224
3225 TCW_PTR(this_thr->th.th_task_team, NULL);
3226 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003227}
3228
Jim Cownie5e8470a2013-09-27 10:38:44 +00003229// __kmp_tasking_barrier:
Jonathan Peyton1bd61b42015-10-08 19:44:16 +00003230// This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
Jonathan Peyton30419822017-05-12 18:01:32 +00003231// Internal function to execute all tasks prior to a regular barrier or a join
3232// barrier. It is a full barrier itself, which unfortunately turns regular
3233// barriers into double barriers and join barriers into 1 1/2 barriers.
3234void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003235 volatile kmp_uint32 *spin = RCAST(
3236 volatile kmp_uint32 *,
3237 &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
Jonathan Peyton30419822017-05-12 18:01:32 +00003238 int flag = FALSE;
3239 KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003240
3241#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00003242 KMP_FSYNC_SPIN_INIT(spin, (kmp_uint32 *)NULL);
Jim Cownie5e8470a2013-09-27 10:38:44 +00003243#endif /* USE_ITT_BUILD */
Jonathan Peyton30419822017-05-12 18:01:32 +00003244 kmp_flag_32 spin_flag(spin, 0U);
3245 while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3246 &flag USE_ITT_BUILD_ARG(NULL), 0)) {
Jim Cownie5e8470a2013-09-27 10:38:44 +00003247#if USE_ITT_BUILD
Jonathan Peyton30419822017-05-12 18:01:32 +00003248 // TODO: What about itt_sync_obj??
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00003249 KMP_FSYNC_SPIN_PREPARE(CCAST(kmp_uint32 *, spin));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003250#endif /* USE_ITT_BUILD */
3251
Jonathan Peyton30419822017-05-12 18:01:32 +00003252 if (TCR_4(__kmp_global.g.g_done)) {
3253 if (__kmp_global.g.g_abort)
3254 __kmp_abort_thread();
3255 break;
Jim Cownie5e8470a2013-09-27 10:38:44 +00003256 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003257 KMP_YIELD(TRUE); // GH: We always yield here
3258 }
Jim Cownie5e8470a2013-09-27 10:38:44 +00003259#if USE_ITT_BUILD
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00003260 KMP_FSYNC_SPIN_ACQUIRED(CCAST(kmp_uint32 *, spin));
Jim Cownie5e8470a2013-09-27 10:38:44 +00003261#endif /* USE_ITT_BUILD */
3262}
3263
Jonathan Peytondf6818b2016-06-14 17:57:47 +00003264#if OMP_45_ENABLED
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003265
Jonathan Peyton30419822017-05-12 18:01:32 +00003266// __kmp_give_task puts a task into a given thread queue if:
3267// - the queue for that thread was created
3268// - there's space in that queue
3269// Because of this, __kmp_push_task needs to check if there's space after
3270// getting the lock
3271static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3272 kmp_int32 pass) {
3273 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3274 kmp_task_team_t *task_team = taskdata->td_task_team;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003275
Jonathan Peyton30419822017-05-12 18:01:32 +00003276 KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3277 taskdata, tid));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003278
Jonathan Peyton30419822017-05-12 18:01:32 +00003279 // If task_team is NULL something went really bad...
3280 KMP_DEBUG_ASSERT(task_team != NULL);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003281
Jonathan Peyton30419822017-05-12 18:01:32 +00003282 bool result = false;
3283 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003284
Jonathan Peyton30419822017-05-12 18:01:32 +00003285 if (thread_data->td.td_deque == NULL) {
3286 // There's no queue in this thread, go find another one
3287 // We're guaranteed that at least one thread has a queue
3288 KA_TRACE(30,
3289 ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3290 tid, taskdata));
3291 return result;
3292 }
Jonathan Peyton134f90d2016-02-11 23:07:30 +00003293
Jonathan Peyton30419822017-05-12 18:01:32 +00003294 if (TCR_4(thread_data->td.td_deque_ntasks) >=
3295 TASK_DEQUE_SIZE(thread_data->td)) {
3296 KA_TRACE(
3297 30,
3298 ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3299 taskdata, tid));
3300
3301 // if this deque is bigger than the pass ratio give a chance to another
3302 // thread
3303 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3304 return result;
3305
3306 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3307 __kmp_realloc_task_deque(thread, thread_data);
3308
3309 } else {
3310
3311 __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3312
3313 if (TCR_4(thread_data->td.td_deque_ntasks) >=
3314 TASK_DEQUE_SIZE(thread_data->td)) {
3315 KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3316 "thread %d.\n",
3317 taskdata, tid));
3318
3319 // if this deque is bigger than the pass ratio give a chance to another
3320 // thread
3321 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3322 goto release_and_exit;
3323
3324 __kmp_realloc_task_deque(thread, thread_data);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003325 }
Jonathan Peyton30419822017-05-12 18:01:32 +00003326 }
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003327
Jonathan Peyton30419822017-05-12 18:01:32 +00003328 // lock is held here, and there is space in the deque
Jonathan Peytonf4f96952016-05-31 19:07:00 +00003329
Jonathan Peyton30419822017-05-12 18:01:32 +00003330 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3331 // Wrap index.
3332 thread_data->td.td_deque_tail =
3333 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3334 TCW_4(thread_data->td.td_deque_ntasks,
3335 TCR_4(thread_data->td.td_deque_ntasks) + 1);
Jonathan Peytonf4f96952016-05-31 19:07:00 +00003336
Jonathan Peyton30419822017-05-12 18:01:32 +00003337 result = true;
3338 KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3339 taskdata, tid));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003340
3341release_and_exit:
Jonathan Peyton30419822017-05-12 18:01:32 +00003342 __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003343
Jonathan Peyton30419822017-05-12 18:01:32 +00003344 return result;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003345}
3346
Jonathan Peyton30419822017-05-12 18:01:32 +00003347/* The finish of the proxy tasks is divided in two pieces:
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003348 - the top half is the one that can be done from a thread outside the team
3349 - the bottom half must be run from a them within the team
3350
Jonathan Peyton30419822017-05-12 18:01:32 +00003351 In order to run the bottom half the task gets queued back into one of the
3352 threads of the team. Once the td_incomplete_child_task counter of the parent
3353 is decremented the threads can leave the barriers. So, the bottom half needs
3354 to be queued before the counter is decremented. The top half is therefore
3355 divided in two parts:
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003356 - things that can be run before queuing the bottom half
3357 - things that must be run after queuing the bottom half
3358
Jonathan Peyton30419822017-05-12 18:01:32 +00003359 This creates a second race as the bottom half can free the task before the
3360 second top half is executed. To avoid this we use the
3361 td_incomplete_child_task of the proxy task to synchronize the top and bottom
3362 half. */
3363static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3364 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3365 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3366 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3367 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003368
Jonathan Peyton30419822017-05-12 18:01:32 +00003369 taskdata->td_flags.complete = 1; // mark the task as completed
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003370
Jonathan Peyton30419822017-05-12 18:01:32 +00003371 if (taskdata->td_taskgroup)
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003372 KMP_TEST_THEN_DEC32(&taskdata->td_taskgroup->count);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003373
Jonathan Peyton30419822017-05-12 18:01:32 +00003374 // Create an imaginary children for this task so the bottom half cannot
3375 // release the task before we have completed the second top half
3376 TCI_4(taskdata->td_incomplete_child_tasks);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003377}
3378
Jonathan Peyton30419822017-05-12 18:01:32 +00003379static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3380 kmp_int32 children = 0;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003381
Jonathan Peyton30419822017-05-12 18:01:32 +00003382 // Predecrement simulated by "- 1" calculation
3383 children =
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00003384 KMP_TEST_THEN_DEC32(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
Jonathan Peyton30419822017-05-12 18:01:32 +00003385 KMP_DEBUG_ASSERT(children >= 0);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003386
Jonathan Peyton30419822017-05-12 18:01:32 +00003387 // Remove the imaginary children
3388 TCD_4(taskdata->td_incomplete_child_tasks);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003389}
3390
Jonathan Peyton30419822017-05-12 18:01:32 +00003391static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3392 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3393 kmp_info_t *thread = __kmp_threads[gtid];
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003394
Jonathan Peyton30419822017-05-12 18:01:32 +00003395 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3396 KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3397 1); // top half must run before bottom half
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003398
Jonathan Peyton30419822017-05-12 18:01:32 +00003399 // We need to wait to make sure the top half is finished
3400 // Spinning here should be ok as this should happen quickly
3401 while (TCR_4(taskdata->td_incomplete_child_tasks) > 0)
3402 ;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003403
Jonathan Peyton30419822017-05-12 18:01:32 +00003404 __kmp_release_deps(gtid, taskdata);
3405 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003406}
3407
3408/*!
3409@ingroup TASKING
3410@param gtid Global Thread ID of encountering thread
3411@param ptask Task which execution is completed
3412
Jonathan Peyton30419822017-05-12 18:01:32 +00003413Execute the completation of a proxy task from a thread of that is part of the
3414team. Run first and bottom halves directly.
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003415*/
Jonathan Peyton30419822017-05-12 18:01:32 +00003416void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3417 KMP_DEBUG_ASSERT(ptask != NULL);
3418 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3419 KA_TRACE(
3420 10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3421 gtid, taskdata));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003422
Jonathan Peyton30419822017-05-12 18:01:32 +00003423 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003424
Jonathan Peyton30419822017-05-12 18:01:32 +00003425 __kmp_first_top_half_finish_proxy(taskdata);
3426 __kmp_second_top_half_finish_proxy(taskdata);
3427 __kmp_bottom_half_finish_proxy(gtid, ptask);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003428
Jonathan Peyton30419822017-05-12 18:01:32 +00003429 KA_TRACE(10,
3430 ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3431 gtid, taskdata));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003432}
3433
3434/*!
3435@ingroup TASKING
3436@param ptask Task which execution is completed
3437
Jonathan Peyton30419822017-05-12 18:01:32 +00003438Execute the completation of a proxy task from a thread that could not belong to
3439the team.
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003440*/
Jonathan Peyton30419822017-05-12 18:01:32 +00003441void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3442 KMP_DEBUG_ASSERT(ptask != NULL);
3443 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003444
Jonathan Peyton30419822017-05-12 18:01:32 +00003445 KA_TRACE(
3446 10,
3447 ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3448 taskdata));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003449
Jonathan Peyton30419822017-05-12 18:01:32 +00003450 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003451
Jonathan Peyton30419822017-05-12 18:01:32 +00003452 __kmp_first_top_half_finish_proxy(taskdata);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003453
Jonathan Peyton30419822017-05-12 18:01:32 +00003454 // Enqueue task to complete bottom half completion from a thread within the
3455 // corresponding team
3456 kmp_team_t *team = taskdata->td_team;
3457 kmp_int32 nthreads = team->t.t_nproc;
3458 kmp_info_t *thread;
Jonathan Peytonf4f96952016-05-31 19:07:00 +00003459
Jonathan Peyton30419822017-05-12 18:01:32 +00003460 // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3461 // but we cannot use __kmp_get_random here
3462 kmp_int32 start_k = 0;
3463 kmp_int32 pass = 1;
3464 kmp_int32 k = start_k;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003465
Jonathan Peyton30419822017-05-12 18:01:32 +00003466 do {
3467 // For now we're just linearly trying to find a thread
3468 thread = team->t.t_threads[k];
3469 k = (k + 1) % nthreads;
Jonathan Peytonf4f96952016-05-31 19:07:00 +00003470
Jonathan Peyton30419822017-05-12 18:01:32 +00003471 // we did a full pass through all the threads
3472 if (k == start_k)
3473 pass = pass << 1;
Jonathan Peytonf4f96952016-05-31 19:07:00 +00003474
Jonathan Peyton30419822017-05-12 18:01:32 +00003475 } while (!__kmp_give_task(thread, k, ptask, pass));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003476
Jonathan Peyton30419822017-05-12 18:01:32 +00003477 __kmp_second_top_half_finish_proxy(taskdata);
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003478
Jonathan Peyton30419822017-05-12 18:01:32 +00003479 KA_TRACE(
3480 10,
3481 ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
3482 taskdata));
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003483}
3484
Jonathan Peyton30419822017-05-12 18:01:32 +00003485// __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3486// for taskloop
Jonathan Peyton283a2152016-03-02 22:47:51 +00003487//
3488// thread: allocating thread
3489// task_src: pointer to source task to be duplicated
3490// returns: a pointer to the allocated kmp_task_t structure (task).
Jonathan Peyton30419822017-05-12 18:01:32 +00003491kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3492 kmp_task_t *task;
3493 kmp_taskdata_t *taskdata;
3494 kmp_taskdata_t *taskdata_src;
3495 kmp_taskdata_t *parent_task = thread->th.th_current_task;
3496 size_t shareds_offset;
3497 size_t task_size;
Jonathan Peyton283a2152016-03-02 22:47:51 +00003498
Jonathan Peyton30419822017-05-12 18:01:32 +00003499 KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
3500 task_src));
3501 taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
3502 KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
3503 TASK_FULL); // it should not be proxy task
3504 KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
3505 task_size = taskdata_src->td_size_alloc;
Jonathan Peyton283a2152016-03-02 22:47:51 +00003506
Jonathan Peyton30419822017-05-12 18:01:32 +00003507 // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3508 KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
3509 task_size));
3510#if USE_FAST_MEMORY
3511 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
3512#else
3513 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
3514#endif /* USE_FAST_MEMORY */
3515 KMP_MEMCPY(taskdata, taskdata_src, task_size);
Jonathan Peyton283a2152016-03-02 22:47:51 +00003516
Jonathan Peyton30419822017-05-12 18:01:32 +00003517 task = KMP_TASKDATA_TO_TASK(taskdata);
Jonathan Peyton283a2152016-03-02 22:47:51 +00003518
Jonathan Peyton30419822017-05-12 18:01:32 +00003519 // Initialize new task (only specific fields not affected by memcpy)
3520 taskdata->td_task_id = KMP_GEN_TASK_ID();
3521 if (task->shareds != NULL) { // need setup shareds pointer
3522 shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3523 task->shareds = &((char *)taskdata)[shareds_offset];
3524 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
3525 0);
3526 }
3527 taskdata->td_alloc_thread = thread;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003528 taskdata->td_parent = parent_task;
Jonathan Peyton30419822017-05-12 18:01:32 +00003529 taskdata->td_taskgroup =
3530 parent_task
3531 ->td_taskgroup; // task inherits the taskgroup from the parent task
Jonathan Peyton283a2152016-03-02 22:47:51 +00003532
Jonathan Peyton30419822017-05-12 18:01:32 +00003533 // Only need to keep track of child task counts if team parallel and tasking
3534 // not serialized
3535 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00003536 KMP_TEST_THEN_INC32(&parent_task->td_incomplete_child_tasks);
Jonathan Peyton30419822017-05-12 18:01:32 +00003537 if (parent_task->td_taskgroup)
Andrey Churbanovc47afcd2017-07-03 11:24:08 +00003538 KMP_TEST_THEN_INC32(&parent_task->td_taskgroup->count);
Jonathan Peyton30419822017-05-12 18:01:32 +00003539 // Only need to keep track of allocated child tasks for explicit tasks since
3540 // implicit not deallocated
3541 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
Andrey Churbanov5ba90c72017-07-17 09:03:14 +00003542 KMP_TEST_THEN_INC32(&taskdata->td_parent->td_allocated_child_tasks);
Jonathan Peyton30419822017-05-12 18:01:32 +00003543 }
Jonathan Peyton283a2152016-03-02 22:47:51 +00003544
Jonathan Peyton30419822017-05-12 18:01:32 +00003545 KA_TRACE(20,
3546 ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
3547 thread, taskdata, taskdata->td_parent));
Jonathan Peyton283a2152016-03-02 22:47:51 +00003548#if OMPT_SUPPORT
Jonas Hahnfeldf0a1c652017-11-03 18:28:19 +00003549 if (UNLIKELY(ompt_enabled.enabled))
Joachim Protze82e94a52017-11-01 10:08:30 +00003550 __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
Jonathan Peyton283a2152016-03-02 22:47:51 +00003551#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003552 return task;
Jonathan Peyton283a2152016-03-02 22:47:51 +00003553}
3554
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003555// Routine optionally generated by the compiler for setting the lastprivate flag
Jonathan Peyton283a2152016-03-02 22:47:51 +00003556// and calling needed constructors for private/firstprivate objects
3557// (used to form taskloop tasks from pattern task)
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003558// Parameters: dest task, src task, lastprivate flag.
Jonathan Peyton30419822017-05-12 18:01:32 +00003559typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
Jonathan Peyton283a2152016-03-02 22:47:51 +00003560
Jonathan Peyton283a2152016-03-02 22:47:51 +00003561// __kmp_taskloop_linear: Start tasks of the taskloop linearly
3562//
3563// loc Source location information
3564// gtid Global thread ID
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003565// task Pattern task, exposes the loop iteration range
3566// lb Pointer to loop lower bound in task structure
3567// ub Pointer to loop upper bound in task structure
Jonathan Peyton283a2152016-03-02 22:47:51 +00003568// st Loop stride
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003569// ub_glob Global upper bound (used for lastprivate check)
3570// num_tasks Number of tasks to execute
3571// grainsize Number of loop iterations per task
3572// extras Number of chunks with grainsize+1 iterations
3573// tc Iterations count
Jonathan Peyton283a2152016-03-02 22:47:51 +00003574// task_dup Tasks duplication routine
Jonathan Peyton30419822017-05-12 18:01:32 +00003575void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
3576 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003577 kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3578 kmp_uint64 grainsize, kmp_uint64 extras,
3579 kmp_uint64 tc, void *task_dup) {
Jonathan Peyton30419822017-05-12 18:01:32 +00003580 KMP_COUNT_BLOCK(OMP_TASKLOOP);
3581 KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
3582 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
Jonathan Peyton30419822017-05-12 18:01:32 +00003583 kmp_uint64 lower = *lb; // compiler provides global bounds here
3584 kmp_uint64 upper = *ub;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003585 kmp_uint64 i;
Jonathan Peyton30419822017-05-12 18:01:32 +00003586 kmp_info_t *thread = __kmp_threads[gtid];
3587 kmp_taskdata_t *current_task = thread->th.th_current_task;
3588 kmp_task_t *next_task;
3589 kmp_int32 lastpriv = 0;
3590 size_t lower_offset =
3591 (char *)lb - (char *)task; // remember offset of lb in the task structure
3592 size_t upper_offset =
3593 (char *)ub - (char *)task; // remember offset of ub in the task structure
Jonathan Peyton283a2152016-03-02 22:47:51 +00003594
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003595 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3596 KMP_DEBUG_ASSERT(num_tasks > extras);
3597 KMP_DEBUG_ASSERT(num_tasks > 0);
3598 KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
Jonathan Peytond74d8902017-07-25 18:20:16 +00003599 "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",
3600 gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,
3601 task_dup));
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003602
3603 // Launch num_tasks tasks, assign grainsize iterations each task
3604 for (i = 0; i < num_tasks; ++i) {
3605 kmp_uint64 chunk_minus_1;
3606 if (extras == 0) {
3607 chunk_minus_1 = grainsize - 1;
3608 } else {
3609 chunk_minus_1 = grainsize;
3610 --extras; // first extras iterations get bigger chunk (grainsize+1)
3611 }
3612 upper = lower + st * chunk_minus_1;
3613 if (i == num_tasks - 1) {
3614 // schedule the last task, set lastprivate flag if needed
3615 if (st == 1) { // most common case
3616 KMP_DEBUG_ASSERT(upper == *ub);
3617 if (upper == ub_glob)
3618 lastpriv = 1;
3619 } else if (st > 0) { // positive loop stride
3620 KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
3621 if ((kmp_uint64)st > ub_glob - upper)
3622 lastpriv = 1;
Jonathan Peytond74d8902017-07-25 18:20:16 +00003623 } else { // negative loop stride
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003624 KMP_DEBUG_ASSERT(upper + st < *ub);
3625 if (upper - ub_glob < (kmp_uint64)(-st))
3626 lastpriv = 1;
3627 }
3628 }
3629 next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
3630 // adjust task-specific bounds
3631 *(kmp_uint64 *)((char *)next_task + lower_offset) = lower;
3632 *(kmp_uint64 *)((char *)next_task + upper_offset) = upper;
3633 if (ptask_dup != NULL) // set lastprivate flag, construct fistprivates, etc.
3634 ptask_dup(next_task, task, lastpriv);
3635 KA_TRACE(40, ("__kmp_taskloop_linear: T#%d; task %p: lower %lld, "
3636 "upper %lld (offsets %p %p)\n",
3637 gtid, next_task, lower, upper, lower_offset, upper_offset));
3638 __kmp_omp_task(gtid, next_task, true); // schedule new task
3639 lower = upper + st; // adjust lower bound for the next iteration
3640 }
3641 // free the pattern task and exit
3642 __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
3643 // do not execute the pattern task, just do internal bookkeeping
3644 __kmp_task_finish(gtid, task, current_task);
3645}
3646
3647// Structure to keep taskloop parameters for auxiliary task
3648// kept in the shareds of the task structure.
3649typedef struct __taskloop_params {
3650 kmp_task_t *task;
3651 kmp_uint64 *lb;
3652 kmp_uint64 *ub;
3653 void *task_dup;
3654 kmp_int64 st;
3655 kmp_uint64 ub_glob;
3656 kmp_uint64 num_tasks;
3657 kmp_uint64 grainsize;
3658 kmp_uint64 extras;
3659 kmp_uint64 tc;
3660 kmp_uint64 num_t_min;
3661} __taskloop_params_t;
3662
3663void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
3664 kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
3665 kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
3666 void *);
3667
3668// Execute part of the the taskloop submitted as a task.
3669int __kmp_taskloop_task(int gtid, void *ptask) {
Jonathan Peytond74d8902017-07-25 18:20:16 +00003670 __taskloop_params_t *p =
3671 (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003672 kmp_task_t *task = p->task;
3673 kmp_uint64 *lb = p->lb;
3674 kmp_uint64 *ub = p->ub;
3675 void *task_dup = p->task_dup;
Jonathan Peytond74d8902017-07-25 18:20:16 +00003676 // p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003677 kmp_int64 st = p->st;
3678 kmp_uint64 ub_glob = p->ub_glob;
3679 kmp_uint64 num_tasks = p->num_tasks;
3680 kmp_uint64 grainsize = p->grainsize;
3681 kmp_uint64 extras = p->extras;
3682 kmp_uint64 tc = p->tc;
3683 kmp_uint64 num_t_min = p->num_t_min;
3684#if KMP_DEBUG
3685 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3686 KMP_DEBUG_ASSERT(task != NULL);
3687 KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
Jonathan Peytond74d8902017-07-25 18:20:16 +00003688 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3689 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3690 task_dup));
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003691#endif
Jonathan Peytond74d8902017-07-25 18:20:16 +00003692 KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003693 if (num_tasks > num_t_min)
3694 __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3695 grainsize, extras, tc, num_t_min, task_dup);
3696 else
3697 __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3698 grainsize, extras, tc, task_dup);
3699
3700 KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
3701 return 0;
3702}
3703
3704// Schedule part of the the taskloop as a task,
3705// execute the rest of the the taskloop.
3706//
3707// loc Source location information
3708// gtid Global thread ID
3709// task Pattern task, exposes the loop iteration range
3710// lb Pointer to loop lower bound in task structure
3711// ub Pointer to loop upper bound in task structure
3712// st Loop stride
3713// ub_glob Global upper bound (used for lastprivate check)
3714// num_tasks Number of tasks to execute
3715// grainsize Number of loop iterations per task
3716// extras Number of chunks with grainsize+1 iterations
3717// tc Iterations count
3718// num_t_min Threashold to launch tasks recursively
3719// task_dup Tasks duplication routine
3720void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
3721 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3722 kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3723 kmp_uint64 grainsize, kmp_uint64 extras,
3724 kmp_uint64 tc, kmp_uint64 num_t_min, void *task_dup) {
3725#if KMP_DEBUG
3726 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3727 KMP_DEBUG_ASSERT(task != NULL);
3728 KMP_DEBUG_ASSERT(num_tasks > num_t_min);
3729 KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
Jonathan Peytond74d8902017-07-25 18:20:16 +00003730 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3731 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3732 task_dup));
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003733#endif
3734 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3735 kmp_uint64 lower = *lb;
3736 kmp_uint64 upper = *ub;
3737 kmp_info_t *thread = __kmp_threads[gtid];
Jonathan Peytond74d8902017-07-25 18:20:16 +00003738 // kmp_taskdata_t *current_task = thread->th.th_current_task;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003739 kmp_task_t *next_task;
3740 kmp_int32 lastpriv = 0;
3741 size_t lower_offset =
3742 (char *)lb - (char *)task; // remember offset of lb in the task structure
3743 size_t upper_offset =
3744 (char *)ub - (char *)task; // remember offset of ub in the task structure
3745
3746 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3747 KMP_DEBUG_ASSERT(num_tasks > extras);
3748 KMP_DEBUG_ASSERT(num_tasks > 0);
3749
3750 // split the loop in two halves
3751 kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
3752 kmp_uint64 gr_size0 = grainsize;
3753 kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
3754 kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
3755 if (n_tsk0 <= extras) {
3756 gr_size0++; // integrate extras into grainsize
3757 ext0 = 0; // no extra iters in 1st half
3758 ext1 = extras - n_tsk0; // remaining extras
3759 tc0 = gr_size0 * n_tsk0;
3760 tc1 = tc - tc0;
3761 } else { // n_tsk0 > extras
3762 ext1 = 0; // no extra iters in 2nd half
3763 ext0 = extras;
3764 tc1 = grainsize * n_tsk1;
3765 tc0 = tc - tc1;
3766 }
3767 ub0 = lower + st * (tc0 - 1);
3768 lb1 = ub0 + st;
3769
3770 // create pattern task for 2nd half of the loop
3771 next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
3772 // adjust lower bound (upper bound is not changed) for the 2nd half
3773 *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
3774 if (ptask_dup != NULL) // construct fistprivates, etc.
3775 ptask_dup(next_task, task, 0);
3776 *ub = ub0; // adjust upper bound for the 1st half
3777
3778 // create auxiliary task for 2nd half of the loop
3779 kmp_task_t *new_task =
Jonathan Peytond74d8902017-07-25 18:20:16 +00003780 __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003781 sizeof(__taskloop_params_t), &__kmp_taskloop_task);
Jonathan Peytond74d8902017-07-25 18:20:16 +00003782 __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003783 p->task = next_task;
3784 p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
3785 p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
3786 p->task_dup = task_dup;
3787 p->st = st;
3788 p->ub_glob = ub_glob;
3789 p->num_tasks = n_tsk1;
3790 p->grainsize = grainsize;
3791 p->extras = ext1;
3792 p->tc = tc1;
3793 p->num_t_min = num_t_min;
3794 __kmp_omp_task(gtid, new_task, true); // schedule new task
3795
3796 // execute the 1st half of current subrange
3797 if (n_tsk0 > num_t_min)
Jonathan Peytond74d8902017-07-25 18:20:16 +00003798 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
3799 ext0, tc0, num_t_min, task_dup);
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003800 else
3801 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
3802 gr_size0, ext0, tc0, task_dup);
3803
3804 KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid));
3805}
3806
3807/*!
3808@ingroup TASKING
3809@param loc Source location information
3810@param gtid Global thread ID
3811@param task Task structure
3812@param if_val Value of the if clause
3813@param lb Pointer to loop lower bound in task structure
3814@param ub Pointer to loop upper bound in task structure
3815@param st Loop stride
3816@param nogroup Flag, 1 if nogroup clause specified, 0 otherwise
3817@param sched Schedule specified 0/1/2 for none/grainsize/num_tasks
3818@param grainsize Schedule value if specified
3819@param task_dup Tasks duplication routine
3820
3821Execute the taskloop construct.
3822*/
3823void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
3824 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
3825 int sched, kmp_uint64 grainsize, void *task_dup) {
3826 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3827 KMP_DEBUG_ASSERT(task != NULL);
3828
3829 KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
Jonathan Peytond74d8902017-07-25 18:20:16 +00003830 "grain %llu(%d), dup %p\n",
3831 gtid, taskdata, *lb, *ub, st, grainsize, sched, task_dup));
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003832
Joachim Protze82e94a52017-11-01 10:08:30 +00003833#if OMPT_SUPPORT && OMPT_OPTIONAL
3834 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
3835 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
3836 if (ompt_enabled.ompt_callback_work) {
3837 ompt_callbacks.ompt_callback(ompt_callback_work)(
3838 ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
3839 &(task_info->task_data), 0, OMPT_GET_RETURN_ADDRESS(0));
3840 }
3841#endif
3842
3843 if (nogroup == 0) {
3844#if OMPT_SUPPORT && OMPT_OPTIONAL
3845 OMPT_STORE_RETURN_ADDRESS(gtid);
3846#endif
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003847 __kmpc_taskgroup(loc, gtid);
Joachim Protze82e94a52017-11-01 10:08:30 +00003848 }
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003849
3850 // =========================================================================
3851 // calculate loop parameters
3852 kmp_uint64 tc;
3853 kmp_uint64 lower = *lb; // compiler provides global bounds here
3854 kmp_uint64 upper = *ub;
3855 kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
3856 kmp_uint64 num_tasks = 0, extras = 0;
3857 kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
3858 kmp_info_t *thread = __kmp_threads[gtid];
3859 kmp_taskdata_t *current_task = thread->th.th_current_task;
3860
Jonathan Peyton30419822017-05-12 18:01:32 +00003861 // compute trip count
3862 if (st == 1) { // most common case
3863 tc = upper - lower + 1;
3864 } else if (st < 0) {
3865 tc = (lower - upper) / (-st) + 1;
3866 } else { // st > 0
3867 tc = (upper - lower) / st + 1;
3868 }
3869 if (tc == 0) {
3870 KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid));
Jonathan Peyton283a2152016-03-02 22:47:51 +00003871 // free the pattern task and exit
Jonathan Peyton30419822017-05-12 18:01:32 +00003872 __kmp_task_start(gtid, task, current_task);
3873 // do not execute anything for zero-trip loop
3874 __kmp_task_finish(gtid, task, current_task);
3875 return;
3876 }
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003877 if (num_tasks_min == 0)
3878 // TODO: can we choose better default heuristic?
Jonathan Peytond74d8902017-07-25 18:20:16 +00003879 num_tasks_min =
3880 KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
Jonathan Peyton30419822017-05-12 18:01:32 +00003881
3882 // compute num_tasks/grainsize based on the input provided
3883 switch (sched) {
3884 case 0: // no schedule clause specified, we can choose the default
3885 // let's try to schedule (team_size*10) tasks
3886 grainsize = thread->th.th_team_nproc * 10;
3887 case 2: // num_tasks provided
3888 if (grainsize > tc) {
3889 num_tasks = tc; // too big num_tasks requested, adjust values
3890 grainsize = 1;
3891 extras = 0;
3892 } else {
3893 num_tasks = grainsize;
3894 grainsize = tc / num_tasks;
3895 extras = tc % num_tasks;
3896 }
3897 break;
3898 case 1: // grainsize provided
3899 if (grainsize > tc) {
3900 num_tasks = 1; // too big grainsize requested, adjust values
3901 grainsize = tc;
3902 extras = 0;
3903 } else {
3904 num_tasks = tc / grainsize;
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003905 // adjust grainsize for balanced distribution of iterations
3906 grainsize = tc / num_tasks;
Jonathan Peyton30419822017-05-12 18:01:32 +00003907 extras = tc % num_tasks;
3908 }
3909 break;
3910 default:
3911 KMP_ASSERT2(0, "unknown scheduling of taskloop");
3912 }
3913 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3914 KMP_DEBUG_ASSERT(num_tasks > extras);
3915 KMP_DEBUG_ASSERT(num_tasks > 0);
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003916 // =========================================================================
Jonathan Peyton283a2152016-03-02 22:47:51 +00003917
Jonathan Peyton30419822017-05-12 18:01:32 +00003918 // check if clause value first
3919 if (if_val == 0) { // if(0) specified, mark task as serial
3920 taskdata->td_flags.task_serial = 1;
3921 taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
Joachim Protze82e94a52017-11-01 10:08:30 +00003922#if OMPT_SUPPORT && OMPT_OPTIONAL
3923 OMPT_STORE_RETURN_ADDRESS(gtid);
3924#endif
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003925 // always start serial tasks linearly
3926 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
3927 grainsize, extras, tc, task_dup);
3928 } else if (num_tasks > num_tasks_min) {
3929 KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
Jonathan Peytond74d8902017-07-25 18:20:16 +00003930 "(%lld), grain %llu, extras %llu\n",
3931 gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
Joachim Protze82e94a52017-11-01 10:08:30 +00003932#if OMPT_SUPPORT && OMPT_OPTIONAL
3933 OMPT_STORE_RETURN_ADDRESS(gtid);
3934#endif
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003935 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
3936 grainsize, extras, tc, num_tasks_min, task_dup);
3937 } else {
3938 KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
Jonathan Peytond74d8902017-07-25 18:20:16 +00003939 "(%lld), grain %llu, extras %llu\n",
3940 gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
Joachim Protze82e94a52017-11-01 10:08:30 +00003941#if OMPT_SUPPORT && OMPT_OPTIONAL
3942 OMPT_STORE_RETURN_ADDRESS(gtid);
3943#endif
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003944 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
3945 grainsize, extras, tc, task_dup);
Jonathan Peyton30419822017-05-12 18:01:32 +00003946 }
Jonathan Peyton283a2152016-03-02 22:47:51 +00003947
Joachim Protze82e94a52017-11-01 10:08:30 +00003948 if (nogroup == 0) {
3949#if OMPT_SUPPORT && OMPT_OPTIONAL
3950 OMPT_STORE_RETURN_ADDRESS(gtid);
3951#endif
Jonathan Peyton30419822017-05-12 18:01:32 +00003952 __kmpc_end_taskgroup(loc, gtid);
Joachim Protze82e94a52017-11-01 10:08:30 +00003953 }
3954#if OMPT_SUPPORT && OMPT_OPTIONAL
3955 if (ompt_enabled.ompt_callback_work) {
3956 ompt_callbacks.ompt_callback(ompt_callback_work)(
3957 ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
3958 &(task_info->task_data), 0, OMPT_GET_RETURN_ADDRESS(0));
3959 }
3960#endif
Jonathan Peyton93e17cf2017-07-18 18:50:13 +00003961 KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
Jonathan Peyton283a2152016-03-02 22:47:51 +00003962}
3963
Andrey Churbanov535b6fa2015-05-07 17:41:51 +00003964#endif