blob: 1bf2748653334e99fb16443d97af3083483170e3 [file] [log] [blame]
Jim Cownie5e8470a2013-09-27 10:38:44 +00001/*
2 * kmp_taskdeps.cpp
Jim Cownie5e8470a2013-09-27 10:38:44 +00003 */
4
5
6//===----------------------------------------------------------------------===//
7//
8// The LLVM Compiler Infrastructure
9//
10// This file is dual licensed under the MIT and the University of Illinois Open
11// Source Licenses. See LICENSE.txt for details.
12//
13//===----------------------------------------------------------------------===//
14
15
16//#define KMP_SUPPORT_GRAPH_OUTPUT 1
17
18#include "kmp.h"
19#include "kmp_io.h"
Jim Cownie4cc4bb42014-10-07 16:25:50 +000020#include "kmp_wait_release.h"
Jim Cownie5e8470a2013-09-27 10:38:44 +000021
22#if OMP_40_ENABLED
23
24//TODO: Improve memory allocation? keep a list of pre-allocated structures? allocate in blocks? re-use list finished list entries?
25//TODO: don't use atomic ref counters for stack-allocated nodes.
26//TODO: find an alternate to atomic refs for heap-allocated nodes?
27//TODO: Finish graph output support
28//TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other runtime locks
29//TODO: Any ITT support needed?
30
31#ifdef KMP_SUPPORT_GRAPH_OUTPUT
32static kmp_int32 kmp_node_id_seed = 0;
33#endif
34
35static void
36__kmp_init_node ( kmp_depnode_t *node )
37{
38 node->dn.task = NULL; // set to null initially, it will point to the right task once dependences have been processed
39 node->dn.successors = NULL;
40 __kmp_init_lock(&node->dn.lock);
41 node->dn.nrefs = 1; // init creates the first reference to the node
42#ifdef KMP_SUPPORT_GRAPH_OUTPUT
43 node->dn.id = KMP_TEST_THEN_INC32(&kmp_node_id_seed);
44#endif
45}
46
47static inline kmp_depnode_t *
48__kmp_node_ref ( kmp_depnode_t *node )
49{
50 KMP_TEST_THEN_INC32(&node->dn.nrefs);
51 return node;
52}
53
54static inline void
55__kmp_node_deref ( kmp_info_t *thread, kmp_depnode_t *node )
56{
57 if (!node) return;
58
59 kmp_int32 n = KMP_TEST_THEN_DEC32(&node->dn.nrefs) - 1;
60 if ( n == 0 ) {
61 KMP_ASSERT(node->dn.nrefs == 0);
62#if USE_FAST_MEMORY
63 __kmp_fast_free(thread,node);
64#else
65 __kmp_thread_free(thread,node);
66#endif
67 }
68}
69
70#define KMP_ACQUIRE_DEPNODE(gtid,n) __kmp_acquire_lock(&(n)->dn.lock,(gtid))
71#define KMP_RELEASE_DEPNODE(gtid,n) __kmp_release_lock(&(n)->dn.lock,(gtid))
72
73static void
74__kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list );
75
Jonathan Peyton7d454512016-01-28 23:10:44 +000076enum {
77 KMP_DEPHASH_OTHER_SIZE = 97,
78 KMP_DEPHASH_MASTER_SIZE = 997
79};
Jim Cownie5e8470a2013-09-27 10:38:44 +000080
81static inline kmp_int32
Jonathan Peyton7d454512016-01-28 23:10:44 +000082__kmp_dephash_hash ( kmp_intptr_t addr, size_t hsize )
Jim Cownie5e8470a2013-09-27 10:38:44 +000083{
84 //TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) % m_num_sets );
Jonathan Peyton7d454512016-01-28 23:10:44 +000085 return ((addr >> 6) ^ (addr >> 2)) % hsize;
Jim Cownie5e8470a2013-09-27 10:38:44 +000086}
87
88static kmp_dephash_t *
Jonathan Peyton7d454512016-01-28 23:10:44 +000089__kmp_dephash_create ( kmp_info_t *thread, kmp_taskdata_t *current_task )
Jim Cownie5e8470a2013-09-27 10:38:44 +000090{
91 kmp_dephash_t *h;
Jim Cownie4cc4bb42014-10-07 16:25:50 +000092
Jonathan Peyton7d454512016-01-28 23:10:44 +000093 size_t h_size;
94
95 if ( current_task->td_flags.tasktype == TASK_IMPLICIT )
96 h_size = KMP_DEPHASH_MASTER_SIZE;
97 else
98 h_size = KMP_DEPHASH_OTHER_SIZE;
99
100 kmp_int32 size = h_size * sizeof(kmp_dephash_entry_t) + sizeof(kmp_dephash_t);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000101
Jim Cownie5e8470a2013-09-27 10:38:44 +0000102#if USE_FAST_MEMORY
103 h = (kmp_dephash_t *) __kmp_fast_allocate( thread, size );
104#else
105 h = (kmp_dephash_t *) __kmp_thread_malloc( thread, size );
106#endif
Jonathan Peyton7d454512016-01-28 23:10:44 +0000107 h->size = h_size;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000108
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000109#ifdef KMP_DEBUG
Jim Cownie5e8470a2013-09-27 10:38:44 +0000110 h->nelements = 0;
Jonathan Peyton7d454512016-01-28 23:10:44 +0000111 h->nconflicts = 0;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000112#endif
113 h->buckets = (kmp_dephash_entry **)(h+1);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000114
Jonathan Peyton7d454512016-01-28 23:10:44 +0000115 for ( size_t i = 0; i < h_size; i++ )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000116 h->buckets[i] = 0;
117
118 return h;
119}
120
121static void
122__kmp_dephash_free ( kmp_info_t *thread, kmp_dephash_t *h )
123{
Jonathan Peyton7d454512016-01-28 23:10:44 +0000124 for ( size_t i=0; i < h->size; i++ ) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000125 if ( h->buckets[i] ) {
126 kmp_dephash_entry_t *next;
127 for ( kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next ) {
128 next = entry->next_in_bucket;
129 __kmp_depnode_list_free(thread,entry->last_ins);
130 __kmp_node_deref(thread,entry->last_out);
131#if USE_FAST_MEMORY
132 __kmp_fast_free(thread,entry);
133#else
134 __kmp_thread_free(thread,entry);
135#endif
136 }
137 }
138 }
139#if USE_FAST_MEMORY
140 __kmp_fast_free(thread,h);
141#else
142 __kmp_thread_free(thread,h);
143#endif
144}
145
146static kmp_dephash_entry *
147__kmp_dephash_find ( kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr )
148{
Jonathan Peyton7d454512016-01-28 23:10:44 +0000149 kmp_int32 bucket = __kmp_dephash_hash(addr,h->size);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000150
Jim Cownie5e8470a2013-09-27 10:38:44 +0000151 kmp_dephash_entry_t *entry;
152 for ( entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket )
153 if ( entry->addr == addr ) break;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000154
Jim Cownie5e8470a2013-09-27 10:38:44 +0000155 if ( entry == NULL ) {
156 // create entry. This is only done by one thread so no locking required
157#if USE_FAST_MEMORY
158 entry = (kmp_dephash_entry_t *) __kmp_fast_allocate( thread, sizeof(kmp_dephash_entry_t) );
159#else
160 entry = (kmp_dephash_entry_t *) __kmp_thread_malloc( thread, sizeof(kmp_dephash_entry_t) );
161#endif
162 entry->addr = addr;
163 entry->last_out = NULL;
164 entry->last_ins = NULL;
165 entry->next_in_bucket = h->buckets[bucket];
166 h->buckets[bucket] = entry;
167#ifdef KMP_DEBUG
168 h->nelements++;
169 if ( entry->next_in_bucket ) h->nconflicts++;
170#endif
171 }
172 return entry;
173}
174
175static kmp_depnode_list_t *
176__kmp_add_node ( kmp_info_t *thread, kmp_depnode_list_t *list, kmp_depnode_t *node )
177{
178 kmp_depnode_list_t *new_head;
179
180#if USE_FAST_MEMORY
181 new_head = (kmp_depnode_list_t *) __kmp_fast_allocate(thread,sizeof(kmp_depnode_list_t));
182#else
183 new_head = (kmp_depnode_list_t *) __kmp_thread_malloc(thread,sizeof(kmp_depnode_list_t));
184#endif
185
186 new_head->node = __kmp_node_ref(node);
187 new_head->next = list;
188
189 return new_head;
190}
191
192static void
193__kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list )
194{
195 kmp_depnode_list *next;
196
197 for ( ; list ; list = next ) {
198 next = list->next;
199
200 __kmp_node_deref(thread,list->node);
201#if USE_FAST_MEMORY
202 __kmp_fast_free(thread,list);
203#else
204 __kmp_thread_free(thread,list);
205#endif
206 }
207}
208
209static inline void
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000210__kmp_track_dependence ( kmp_depnode_t *source, kmp_depnode_t *sink,
211 kmp_task_t *sink_task )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000212{
213#ifdef KMP_SUPPORT_GRAPH_OUTPUT
214 kmp_taskdata_t * task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
215 kmp_taskdata_t * task_sink = KMP_TASK_TO_TASKDATA(sink->dn.task); // this can be NULL when if(0) ...
216
217 __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id, task_source->td_ident->psource, sink->dn.id, task_sink->td_ident->psource);
218#endif
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000219#if OMPT_SUPPORT && OMPT_TRACE
220 /* OMPT tracks dependences between task (a=source, b=sink) in which
221 task a blocks the execution of b through the ompt_new_dependence_callback */
222 if (ompt_enabled &&
223 ompt_callbacks.ompt_callback(ompt_event_task_dependence_pair))
224 {
225 kmp_taskdata_t * task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
226 kmp_taskdata_t * task_sink = KMP_TASK_TO_TASKDATA(sink_task);
227
228 ompt_callbacks.ompt_callback(ompt_event_task_dependence_pair)(
229 task_source->ompt_task_info.task_id,
230 task_sink->ompt_task_info.task_id);
231 }
232#endif /* OMPT_SUPPORT && OMPT_TRACE */
Jim Cownie5e8470a2013-09-27 10:38:44 +0000233}
234
235template< bool filter >
236static inline kmp_int32
237__kmp_process_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000238 bool dep_barrier,kmp_int32 ndeps, kmp_depend_info_t *dep_list,
239 kmp_task_t *task )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000240{
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000241 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d depencies : dep_barrier = %d\n", filter, gtid, ndeps, dep_barrier ) );
242
Jim Cownie5e8470a2013-09-27 10:38:44 +0000243 kmp_info_t *thread = __kmp_threads[ gtid ];
244 kmp_int32 npredecessors=0;
245 for ( kmp_int32 i = 0; i < ndeps ; i++ ) {
246 const kmp_depend_info_t * dep = &dep_list[i];
247
248 KMP_DEBUG_ASSERT(dep->flags.in);
249
250 if ( filter && dep->base_addr == 0 ) continue; // skip filtered entries
251
252 kmp_dephash_entry_t *info = __kmp_dephash_find(thread,hash,dep->base_addr);
253 kmp_depnode_t *last_out = info->last_out;
254
255 if ( dep->flags.out && info->last_ins ) {
256 for ( kmp_depnode_list_t * p = info->last_ins; p; p = p->next ) {
257 kmp_depnode_t * indep = p->node;
258 if ( indep->dn.task ) {
259 KMP_ACQUIRE_DEPNODE(gtid,indep);
260 if ( indep->dn.task ) {
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000261 __kmp_track_dependence(indep,node,task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000262 indep->dn.successors = __kmp_add_node(thread, indep->dn.successors, node);
Jonathan Peyton55c447f2015-11-16 22:53:38 +0000263 KA_TRACE(40,("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000264 filter,gtid, KMP_TASK_TO_TASKDATA(indep->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
Jim Cownie5e8470a2013-09-27 10:38:44 +0000265 npredecessors++;
266 }
267 KMP_RELEASE_DEPNODE(gtid,indep);
268 }
269 }
270
271 __kmp_depnode_list_free(thread,info->last_ins);
272 info->last_ins = NULL;
273
274 } else if ( last_out && last_out->dn.task ) {
275 KMP_ACQUIRE_DEPNODE(gtid,last_out);
276 if ( last_out->dn.task ) {
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000277 __kmp_track_dependence(last_out,node,task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000278 last_out->dn.successors = __kmp_add_node(thread, last_out->dn.successors, node);
Jonathan Peyton55c447f2015-11-16 22:53:38 +0000279 KA_TRACE(40,("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000280 filter,gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task), KMP_TASK_TO_TASKDATA(node->dn.task)));
281
Jim Cownie5e8470a2013-09-27 10:38:44 +0000282 npredecessors++;
283 }
284 KMP_RELEASE_DEPNODE(gtid,last_out);
285 }
286
287 if ( dep_barrier ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000288 // if this is a sync point in the serial sequence, then the previous outputs are guaranteed to be completed after
Jim Cownie5e8470a2013-09-27 10:38:44 +0000289 // the execution of this task so the previous output nodes can be cleared.
290 __kmp_node_deref(thread,last_out);
291 info->last_out = NULL;
292 } else {
293 if ( dep->flags.out ) {
294 __kmp_node_deref(thread,last_out);
295 info->last_out = __kmp_node_ref(node);
296 } else
297 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
298 }
299
300 }
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000301
302 KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter, gtid, npredecessors ) );
303
Jim Cownie5e8470a2013-09-27 10:38:44 +0000304 return npredecessors;
305}
306
307#define NO_DEP_BARRIER (false)
308#define DEP_BARRIER (true)
309
310// returns true if the task has any outstanding dependence
311static bool
312__kmp_check_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_task_t *task, kmp_dephash_t *hash, bool dep_barrier,
313 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
314 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
315{
316 int i;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000317
Jonathan Peytond2eb3c72015-08-26 20:02:21 +0000318#if KMP_DEBUG
319 kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task);
320#endif
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000321 KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d possibly aliased dependencies, %d non-aliased depedencies : dep_barrier=%d .\n", gtid, taskdata, ndeps, ndeps_noalias, dep_barrier ) );
322
Jim Cownie5e8470a2013-09-27 10:38:44 +0000323 // Filter deps in dep_list
324 // TODO: Different algorithm for large dep_list ( > 10 ? )
325 for ( i = 0; i < ndeps; i ++ ) {
326 if ( dep_list[i].base_addr != 0 )
327 for ( int j = i+1; j < ndeps; j++ )
328 if ( dep_list[i].base_addr == dep_list[j].base_addr ) {
329 dep_list[i].flags.in |= dep_list[j].flags.in;
330 dep_list[i].flags.out |= dep_list[j].flags.out;
331 dep_list[j].base_addr = 0; // Mark j element as void
332 }
333 }
334
335 // doesn't need to be atomic as no other thread is going to be accessing this node just yet
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000336 // npredecessors is set -1 to ensure that none of the releasing tasks queues this task before we have finished processing all the dependencies
337 node->dn.npredecessors = -1;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000338
339 // used to pack all npredecessors additions into a single atomic operation at the end
340 int npredecessors;
341
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000342 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier,
343 ndeps, dep_list, task);
344 npredecessors += __kmp_process_deps<false>(gtid, node, hash, dep_barrier,
345 ndeps_noalias, noalias_dep_list, task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000346
Jim Cownie5e8470a2013-09-27 10:38:44 +0000347 node->dn.task = task;
348 KMP_MB();
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000349
350 // Account for our initial fake value
351 npredecessors++;
352
353 // Update predecessors and obtain current value to check if there are still any outstandig dependences (some tasks may have finished while we processed the dependences)
354 npredecessors = KMP_TEST_THEN_ADD32(&node->dn.npredecessors, npredecessors) + npredecessors;
355
356 KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n", gtid, npredecessors, taskdata ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000357
358 // beyond this point the task could be queued (and executed) by a releasing task...
359 return npredecessors > 0 ? true : false;
360}
361
362void
363__kmp_release_deps ( kmp_int32 gtid, kmp_taskdata_t *task )
364{
365 kmp_info_t *thread = __kmp_threads[ gtid ];
366 kmp_depnode_t *node = task->td_depnode;
367
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000368 if ( task->td_dephash ) {
369 KA_TRACE(40, ("__kmp_realease_deps: T#%d freeing dependencies hash of task %p.\n", gtid, task ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000370 __kmp_dephash_free(thread,task->td_dephash);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000371 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000372
373 if ( !node ) return;
374
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000375 KA_TRACE(20, ("__kmp_realease_deps: T#%d notifying succesors of task %p.\n", gtid, task ) );
376
Jim Cownie5e8470a2013-09-27 10:38:44 +0000377 KMP_ACQUIRE_DEPNODE(gtid,node);
378 node->dn.task = NULL; // mark this task as finished, so no new dependencies are generated
379 KMP_RELEASE_DEPNODE(gtid,node);
380
381 kmp_depnode_list_t *next;
382 for ( kmp_depnode_list_t *p = node->dn.successors; p; p = next ) {
383 kmp_depnode_t *successor = p->node;
384 kmp_int32 npredecessors = KMP_TEST_THEN_DEC32(&successor->dn.npredecessors) - 1;
385
386 // successor task can be NULL for wait_depends or because deps are still being processed
387 if ( npredecessors == 0 ) {
388 KMP_MB();
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000389 if ( successor->dn.task ) {
390 KA_TRACE(20, ("__kmp_realease_deps: T#%d successor %p of %p scheduled for execution.\n", gtid, successor->dn.task, task ) );
391 __kmp_omp_task(gtid,successor->dn.task,false);
392 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000393 }
394
395 next = p->next;
396 __kmp_node_deref(thread,p->node);
397#if USE_FAST_MEMORY
398 __kmp_fast_free(thread,p);
399#else
400 __kmp_thread_free(thread,p);
401#endif
402 }
403
404 __kmp_node_deref(thread,node);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000405
406 KA_TRACE(20, ("__kmp_realease_deps: T#%d all successors of %p notified of completation\n", gtid, task ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000407}
408
409/*!
410@ingroup TASKING
411@param loc_ref location of the original task directive
412@param gtid Global Thread ID of encountering thread
413@param new_task task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
414@param ndeps Number of depend items with possible aliasing
415@param dep_list List of depend items with possible aliasing
416@param ndeps_noalias Number of depend items with no aliasing
417@param noalias_dep_list List of depend items with no aliasing
418
419@return Returns either TASK_CURRENT_NOT_QUEUED if the current task was not suspendend and queued, or TASK_CURRENT_QUEUED if it was suspended and queued
420
421Schedule a non-thread-switchable task with dependences for execution
422*/
423kmp_int32
424__kmpc_omp_task_with_deps( ident_t *loc_ref, kmp_int32 gtid, kmp_task_t * new_task,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000425 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
426 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
Jim Cownie5e8470a2013-09-27 10:38:44 +0000427{
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000428
429 kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
430 KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n",
431 gtid, loc_ref, new_taskdata ) );
432
Jim Cownie5e8470a2013-09-27 10:38:44 +0000433 kmp_info_t *thread = __kmp_threads[ gtid ];
434 kmp_taskdata_t * current_task = thread->th.th_current_task;
435
Jonas Hahnfeld39b68622016-01-28 10:39:52 +0000436#if OMPT_SUPPORT && OMPT_TRACE
437 /* OMPT grab all dependences if requested by the tool */
438 if (ompt_enabled && ndeps+ndeps_noalias > 0 &&
439 ompt_callbacks.ompt_callback(ompt_event_task_dependences))
440 {
441 kmp_int32 i;
442
443 new_taskdata->ompt_task_info.ndeps = ndeps+ndeps_noalias;
444 new_taskdata->ompt_task_info.deps = (ompt_task_dependence_t *)
445 KMP_OMPT_DEPS_ALLOC(thread,
446 (ndeps+ndeps_noalias)*sizeof(ompt_task_dependence_t));
447
448 KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
449
450 for (i = 0; i < ndeps; i++)
451 {
452 new_taskdata->ompt_task_info.deps[i].variable_addr =
453 (void*) dep_list[i].base_addr;
454 if (dep_list[i].flags.in && dep_list[i].flags.out)
455 new_taskdata->ompt_task_info.deps[i].dependence_flags =
456 ompt_task_dependence_type_inout;
457 else if (dep_list[i].flags.out)
458 new_taskdata->ompt_task_info.deps[i].dependence_flags =
459 ompt_task_dependence_type_out;
460 else if (dep_list[i].flags.in)
461 new_taskdata->ompt_task_info.deps[i].dependence_flags =
462 ompt_task_dependence_type_in;
463 }
464 for (i = 0; i < ndeps_noalias; i++)
465 {
466 new_taskdata->ompt_task_info.deps[ndeps+i].variable_addr =
467 (void*) noalias_dep_list[i].base_addr;
468 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
469 new_taskdata->ompt_task_info.deps[ndeps+i].dependence_flags =
470 ompt_task_dependence_type_inout;
471 else if (noalias_dep_list[i].flags.out)
472 new_taskdata->ompt_task_info.deps[ndeps+i].dependence_flags =
473 ompt_task_dependence_type_out;
474 else if (noalias_dep_list[i].flags.in)
475 new_taskdata->ompt_task_info.deps[ndeps+i].dependence_flags =
476 ompt_task_dependence_type_in;
477 }
478 }
479#endif /* OMPT_SUPPORT && OMPT_TRACE */
480
Jim Cownie5e8470a2013-09-27 10:38:44 +0000481 bool serial = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000482#if OMP_41_ENABLED
483 serial = serial && !(new_taskdata->td_flags.proxy == TASK_PROXY);
484#endif
Jim Cownie5e8470a2013-09-27 10:38:44 +0000485
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000486 if ( !serial && ( ndeps > 0 || ndeps_noalias > 0 )) {
Jim Cownie5e8470a2013-09-27 10:38:44 +0000487 /* if no dependencies have been tracked yet, create the dependence hash */
488 if ( current_task->td_dephash == NULL )
Jonathan Peyton7d454512016-01-28 23:10:44 +0000489 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
Jim Cownie5e8470a2013-09-27 10:38:44 +0000490
491#if USE_FAST_MEMORY
492 kmp_depnode_t *node = (kmp_depnode_t *) __kmp_fast_allocate(thread,sizeof(kmp_depnode_t));
493#else
494 kmp_depnode_t *node = (kmp_depnode_t *) __kmp_thread_malloc(thread,sizeof(kmp_depnode_t));
495#endif
496
497 __kmp_init_node(node);
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000498 new_taskdata->td_depnode = node;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000499
500 if ( __kmp_check_deps( gtid, node, new_task, current_task->td_dephash, NO_DEP_BARRIER,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000501 ndeps, dep_list, ndeps_noalias,noalias_dep_list ) ) {
502 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking dependencies: "
503 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref,
504 new_taskdata ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000505 return TASK_CURRENT_NOT_QUEUED;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000506 }
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000507 } else {
508#if OMP_41_ENABLED
509 kmp_task_team_t * task_team = thread->th.th_task_team;
510 if ( task_team && task_team->tt.tt_found_proxy_tasks )
511 __kmpc_omp_wait_deps ( loc_ref, gtid, ndeps, dep_list, ndeps_noalias, noalias_dep_list );
512 else
513#endif
514 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies for task (serialized)"
515 "loc=%p task=%p\n", gtid, loc_ref, new_taskdata ) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000516 }
517
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000518 KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking dependencies : "
519 "loc=%p task=%p, transferring to __kmpc_omp_task\n", gtid, loc_ref,
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000520 new_taskdata ) );
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000521
Jim Cownie5e8470a2013-09-27 10:38:44 +0000522 return __kmpc_omp_task(loc_ref,gtid,new_task);
523}
524
525/*!
526@ingroup TASKING
527@param loc_ref location of the original task directive
528@param gtid Global Thread ID of encountering thread
529@param ndeps Number of depend items with possible aliasing
530@param dep_list List of depend items with possible aliasing
531@param ndeps_noalias Number of depend items with no aliasing
532@param noalias_dep_list List of depend items with no aliasing
533
534Blocks the current task until all specifies dependencies have been fulfilled.
535*/
536void
537__kmpc_omp_wait_deps ( ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list,
538 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
539{
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000540 KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref) );
541
542 if ( ndeps == 0 && ndeps_noalias == 0 ) {
543 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to wait upon : loc=%p\n", gtid, loc_ref) );
544 return;
545 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000546
547 kmp_info_t *thread = __kmp_threads[ gtid ];
548 kmp_taskdata_t * current_task = thread->th.th_current_task;
549
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000550 // We can return immediately as:
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000551 // - dependences are not computed in serial teams (except if we have proxy tasks)
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000552 // - if the dephash is not yet created it means we have nothing to wait for
Andrey Churbanov535b6fa2015-05-07 17:41:51 +0000553 bool ignore = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
554#if OMP_41_ENABLED
555 ignore = ignore && thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
556#endif
557 ignore = ignore || current_task->td_dephash == NULL;
558
559 if ( ignore ) {
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000560 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000561 return;
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000562 }
Jim Cownie5e8470a2013-09-27 10:38:44 +0000563
564 kmp_depnode_t node;
565 __kmp_init_node(&node);
566
567 if (!__kmp_check_deps( gtid, &node, NULL, current_task->td_dephash, DEP_BARRIER,
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000568 ndeps, dep_list, ndeps_noalias, noalias_dep_list )) {
569 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000570 return;
Jim Cownie5e8470a2013-09-27 10:38:44 +0000571 }
572
Jim Cownie4cc4bb42014-10-07 16:25:50 +0000573 int thread_finished = FALSE;
574 kmp_flag_32 flag((volatile kmp_uint32 *)&(node.dn.npredecessors), 0U);
575 while ( node.dn.npredecessors > 0 ) {
576 flag.execute_tasks(thread, gtid, FALSE, &thread_finished,
577#if USE_ITT_BUILD
578 NULL,
579#endif
580 __kmp_task_stealing_constraint );
581 }
582
583 KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n", gtid, loc_ref) );
Jim Cownie5e8470a2013-09-27 10:38:44 +0000584}
585
586#endif /* OMP_40_ENABLED */
587