1 /*
2  * kmp_taskdeps.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 //#define KMP_SUPPORT_GRAPH_OUTPUT 1
14 
15 #include "kmp.h"
16 #include "kmp_io.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 #if OMPT_SUPPORT
20 #include "ompt-specific.h"
21 #endif
22 
23 // TODO: Improve memory allocation? keep a list of pre-allocated structures?
24 // allocate in blocks? re-use list finished list entries?
25 // TODO: don't use atomic ref counters for stack-allocated nodes.
26 // TODO: find an alternate to atomic refs for heap-allocated nodes?
27 // TODO: Finish graph output support
28 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
29 // runtime locks
30 // TODO: Any ITT support needed?
31 
32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
34 #endif
35 
36 static void __kmp_init_node(kmp_depnode_t *node) {
37   node->dn.successors = NULL;
38   node->dn.task = NULL; // will point to the right task
39   // once dependences have been processed
40   for (int i = 0; i < MAX_MTX_DEPS; ++i)
41     node->dn.mtx_locks[i] = NULL;
42   node->dn.mtx_num_locks = 0;
43   __kmp_init_lock(&node->dn.lock);
44   KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference
45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
46   node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
47 #endif
48 }
49 
50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51   KMP_ATOMIC_INC(&node->dn.nrefs);
52   return node;
53 }
54 
55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
56 
57 size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 };
58 const size_t MAX_GEN = 8;
59 
60 static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
61   // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
62   // m_num_sets );
63   return ((addr >> 6) ^ (addr >> 2)) % hsize;
64 }
65 
66 static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
67                                            kmp_dephash_t *current_dephash) {
68   kmp_dephash_t *h;
69 
70   size_t gen = current_dephash->generation + 1;
71   if (gen >= MAX_GEN)
72     return current_dephash;
73   size_t new_size = sizes[gen];
74 
75   size_t size_to_allocate =
76       new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
77 
78 #if USE_FAST_MEMORY
79   h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
80 #else
81   h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
82 #endif
83 
84   h->size = new_size;
85   h->nelements = current_dephash->nelements;
86   h->buckets = (kmp_dephash_entry **)(h + 1);
87   h->generation = gen;
88   h->nconflicts = 0;
89 
90   // make sure buckets are properly initialized
91   for (size_t i = 0; i < new_size; i++) {
92     h->buckets[i] = NULL;
93   }
94 
95   // insert existing elements in the new table
96   for (size_t i = 0; i < current_dephash->size; i++) {
97     kmp_dephash_entry_t *next, *entry;
98     for (entry = current_dephash->buckets[i]; entry; entry = next) {
99       next = entry->next_in_bucket;
100       // Compute the new hash using the new size, and insert the entry in
101       // the new bucket.
102       size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
103       entry->next_in_bucket = h->buckets[new_bucket];
104       if (entry->next_in_bucket) {
105         h->nconflicts++;
106       }
107       h->buckets[new_bucket] = entry;
108     }
109   }
110 
111   // Free old hash table
112 #if USE_FAST_MEMORY
113   __kmp_fast_free(thread, current_dephash);
114 #else
115   __kmp_thread_free(thread, current_dephash);
116 #endif
117 
118   return h;
119 }
120 
121 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
122                                            kmp_taskdata_t *current_task) {
123   kmp_dephash_t *h;
124 
125   size_t h_size;
126 
127   if (current_task->td_flags.tasktype == TASK_IMPLICIT)
128     h_size = KMP_DEPHASH_MASTER_SIZE;
129   else
130     h_size = KMP_DEPHASH_OTHER_SIZE;
131 
132   size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
133 
134 #if USE_FAST_MEMORY
135   h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
136 #else
137   h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
138 #endif
139   h->size = h_size;
140 
141   h->generation = 0;
142   h->nelements = 0;
143   h->nconflicts = 0;
144   h->buckets = (kmp_dephash_entry **)(h + 1);
145 
146   for (size_t i = 0; i < h_size; i++)
147     h->buckets[i] = 0;
148 
149   return h;
150 }
151 
152 #define ENTRY_LAST_INS 0
153 #define ENTRY_LAST_MTXS 1
154 
155 static kmp_dephash_entry *
156 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) {
157   kmp_dephash_t *h = *hash;
158   if (h->nelements != 0
159       && h->nconflicts/h->size >= 1) {
160     *hash = __kmp_dephash_extend(thread, h);
161     h = *hash;
162   }
163   size_t bucket = __kmp_dephash_hash(addr, h->size);
164 
165   kmp_dephash_entry_t *entry;
166   for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
167     if (entry->addr == addr)
168       break;
169 
170   if (entry == NULL) {
171 // create entry. This is only done by one thread so no locking required
172 #if USE_FAST_MEMORY
173     entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
174         thread, sizeof(kmp_dephash_entry_t));
175 #else
176     entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
177         thread, sizeof(kmp_dephash_entry_t));
178 #endif
179     entry->addr = addr;
180     entry->last_out = NULL;
181     entry->last_ins = NULL;
182     entry->last_mtxs = NULL;
183     entry->last_flag = ENTRY_LAST_INS;
184     entry->mtx_lock = NULL;
185     entry->next_in_bucket = h->buckets[bucket];
186     h->buckets[bucket] = entry;
187     h->nelements++;
188     if (entry->next_in_bucket)
189       h->nconflicts++;
190   }
191   return entry;
192 }
193 
194 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
195                                           kmp_depnode_list_t *list,
196                                           kmp_depnode_t *node) {
197   kmp_depnode_list_t *new_head;
198 
199 #if USE_FAST_MEMORY
200   new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
201       thread, sizeof(kmp_depnode_list_t));
202 #else
203   new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
204       thread, sizeof(kmp_depnode_list_t));
205 #endif
206 
207   new_head->node = __kmp_node_ref(node);
208   new_head->next = list;
209 
210   return new_head;
211 }
212 
213 static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
214                                           kmp_depnode_t *sink,
215                                           kmp_task_t *sink_task) {
216 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
217   kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
218   // do not use sink->dn.task as that is only filled after the dependencies
219   // are already processed!
220   kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
221 
222   __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
223                task_source->td_ident->psource, sink->dn.id,
224                task_sink->td_ident->psource);
225 #endif
226 #if OMPT_SUPPORT && OMPT_OPTIONAL
227   /* OMPT tracks dependences between task (a=source, b=sink) in which
228      task a blocks the execution of b through the ompt_new_dependence_callback
229      */
230   if (ompt_enabled.ompt_callback_task_dependence) {
231     kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
232     ompt_data_t *sink_data;
233     if (sink_task)
234       sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
235     else
236       sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
237 
238     ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
239         &(task_source->ompt_task_info.task_data), sink_data);
240   }
241 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
242 }
243 
244 static inline kmp_int32
245 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
246                              kmp_task_t *task, kmp_depnode_t *node,
247                              kmp_depnode_list_t *plist) {
248   if (!plist)
249     return 0;
250   kmp_int32 npredecessors = 0;
251   // link node as successor of list elements
252   for (kmp_depnode_list_t *p = plist; p; p = p->next) {
253     kmp_depnode_t *dep = p->node;
254     if (dep->dn.task) {
255       KMP_ACQUIRE_DEPNODE(gtid, dep);
256       if (dep->dn.task) {
257         __kmp_track_dependence(gtid, dep, node, task);
258         dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
259         KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
260                       "%p\n",
261                       gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
262                       KMP_TASK_TO_TASKDATA(task)));
263         npredecessors++;
264       }
265       KMP_RELEASE_DEPNODE(gtid, dep);
266     }
267   }
268   return npredecessors;
269 }
270 
271 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
272                                                      kmp_info_t *thread,
273                                                      kmp_task_t *task,
274                                                      kmp_depnode_t *source,
275                                                      kmp_depnode_t *sink) {
276   if (!sink)
277     return 0;
278   kmp_int32 npredecessors = 0;
279   if (sink->dn.task) {
280     // synchronously add source to sink' list of successors
281     KMP_ACQUIRE_DEPNODE(gtid, sink);
282     if (sink->dn.task) {
283       __kmp_track_dependence(gtid, sink, source, task);
284       sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
285       KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
286                     "%p\n",
287                     gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
288                     KMP_TASK_TO_TASKDATA(task)));
289       npredecessors++;
290     }
291     KMP_RELEASE_DEPNODE(gtid, sink);
292   }
293   return npredecessors;
294 }
295 
296 template <bool filter>
297 static inline kmp_int32
298 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
299                    bool dep_barrier, kmp_int32 ndeps,
300                    kmp_depend_info_t *dep_list, kmp_task_t *task) {
301   KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : "
302                 "dep_barrier = %d\n",
303                 filter, gtid, ndeps, dep_barrier));
304 
305   kmp_info_t *thread = __kmp_threads[gtid];
306   kmp_int32 npredecessors = 0;
307   for (kmp_int32 i = 0; i < ndeps; i++) {
308     const kmp_depend_info_t *dep = &dep_list[i];
309 
310     if (filter && dep->base_addr == 0)
311       continue; // skip filtered entries
312 
313     kmp_dephash_entry_t *info =
314         __kmp_dephash_find(thread, hash, dep->base_addr);
315     kmp_depnode_t *last_out = info->last_out;
316     kmp_depnode_list_t *last_ins = info->last_ins;
317     kmp_depnode_list_t *last_mtxs = info->last_mtxs;
318 
319     if (dep->flags.out) { // out --> clean lists of ins and mtxs if any
320       if (last_ins || last_mtxs) {
321         if (info->last_flag == ENTRY_LAST_INS) { // INS were last
322           npredecessors +=
323               __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
324         } else { // MTXS were last
325           npredecessors +=
326               __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
327         }
328         __kmp_depnode_list_free(thread, last_ins);
329         __kmp_depnode_list_free(thread, last_mtxs);
330         info->last_ins = NULL;
331         info->last_mtxs = NULL;
332       } else {
333         npredecessors +=
334             __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
335       }
336       __kmp_node_deref(thread, last_out);
337       if (dep_barrier) {
338         // if this is a sync point in the serial sequence, then the previous
339         // outputs are guaranteed to be completed after the execution of this
340         // task so the previous output nodes can be cleared.
341         info->last_out = NULL;
342       } else {
343         info->last_out = __kmp_node_ref(node);
344       }
345     } else if (dep->flags.in) {
346       // in --> link node to either last_out or last_mtxs, clean earlier deps
347       if (last_mtxs) {
348         npredecessors +=
349             __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
350         __kmp_node_deref(thread, last_out);
351         info->last_out = NULL;
352         if (info->last_flag == ENTRY_LAST_MTXS && last_ins) { // MTXS were last
353           // clean old INS before creating new list
354           __kmp_depnode_list_free(thread, last_ins);
355           info->last_ins = NULL;
356         }
357       } else {
358         // link node as successor of the last_out if any
359         npredecessors +=
360             __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
361       }
362       info->last_flag = ENTRY_LAST_INS;
363       info->last_ins = __kmp_add_node(thread, info->last_ins, node);
364     } else {
365       KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
366       // mtx --> link node to either last_out or last_ins, clean earlier deps
367       if (last_ins) {
368         npredecessors +=
369             __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
370         __kmp_node_deref(thread, last_out);
371         info->last_out = NULL;
372         if (info->last_flag == ENTRY_LAST_INS && last_mtxs) { // INS were last
373           // clean old MTXS before creating new list
374           __kmp_depnode_list_free(thread, last_mtxs);
375           info->last_mtxs = NULL;
376         }
377       } else {
378         // link node as successor of the last_out if any
379         npredecessors +=
380             __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
381       }
382       info->last_flag = ENTRY_LAST_MTXS;
383       info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
384       if (info->mtx_lock == NULL) {
385         info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
386         __kmp_init_lock(info->mtx_lock);
387       }
388       KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
389       kmp_int32 m;
390       // Save lock in node's array
391       for (m = 0; m < MAX_MTX_DEPS; ++m) {
392         // sort pointers in decreasing order to avoid potential livelock
393         if (node->dn.mtx_locks[m] < info->mtx_lock) {
394           KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
395           for (int n = node->dn.mtx_num_locks; n > m; --n) {
396             // shift right all lesser non-NULL pointers
397             KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
398             node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
399           }
400           node->dn.mtx_locks[m] = info->mtx_lock;
401           break;
402         }
403       }
404       KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
405       node->dn.mtx_num_locks++;
406     }
407   }
408   KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
409                 gtid, npredecessors));
410   return npredecessors;
411 }
412 
413 #define NO_DEP_BARRIER (false)
414 #define DEP_BARRIER (true)
415 
416 // returns true if the task has any outstanding dependence
417 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
418                              kmp_task_t *task, kmp_dephash_t **hash,
419                              bool dep_barrier, kmp_int32 ndeps,
420                              kmp_depend_info_t *dep_list,
421                              kmp_int32 ndeps_noalias,
422                              kmp_depend_info_t *noalias_dep_list) {
423   int i, n_mtxs = 0;
424 #if KMP_DEBUG
425   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
426 #endif
427   KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d "
428                 "possibly aliased dependencies, %d non-aliased dependencies : "
429                 "dep_barrier=%d .\n",
430                 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
431 
432   // Filter deps in dep_list
433   // TODO: Different algorithm for large dep_list ( > 10 ? )
434   for (i = 0; i < ndeps; i++) {
435     if (dep_list[i].base_addr != 0) {
436       for (int j = i + 1; j < ndeps; j++) {
437         if (dep_list[i].base_addr == dep_list[j].base_addr) {
438           dep_list[i].flags.in |= dep_list[j].flags.in;
439           dep_list[i].flags.out |=
440               (dep_list[j].flags.out ||
441                (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
442                (dep_list[i].flags.mtx && dep_list[j].flags.in));
443           dep_list[i].flags.mtx =
444               dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
445               !dep_list[i].flags.out;
446           dep_list[j].base_addr = 0; // Mark j element as void
447         }
448       }
449       if (dep_list[i].flags.mtx) {
450         // limit number of mtx deps to MAX_MTX_DEPS per node
451         if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
452           ++n_mtxs;
453         } else {
454           dep_list[i].flags.in = 1; // downgrade mutexinoutset to inout
455           dep_list[i].flags.out = 1;
456           dep_list[i].flags.mtx = 0;
457         }
458       }
459     }
460   }
461 
462   // doesn't need to be atomic as no other thread is going to be accessing this
463   // node just yet.
464   // npredecessors is set -1 to ensure that none of the releasing tasks queues
465   // this task before we have finished processing all the dependencies
466   node->dn.npredecessors = -1;
467 
468   // used to pack all npredecessors additions into a single atomic operation at
469   // the end
470   int npredecessors;
471 
472   npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
473                                            dep_list, task);
474   npredecessors += __kmp_process_deps<false>(
475       gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
476 
477   node->dn.task = task;
478   KMP_MB();
479 
480   // Account for our initial fake value
481   npredecessors++;
482 
483   // Update predecessors and obtain current value to check if there are still
484   // any outstanding dependences (some tasks may have finished while we
485   // processed the dependences)
486   npredecessors =
487       node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
488 
489   KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
490                 gtid, npredecessors, taskdata));
491 
492   // beyond this point the task could be queued (and executed) by a releasing
493   // task...
494   return npredecessors > 0 ? true : false;
495 }
496 
497 /*!
498 @ingroup TASKING
499 @param loc_ref location of the original task directive
500 @param gtid Global Thread ID of encountering thread
501 @param new_task task thunk allocated by __kmp_omp_task_alloc() for the ''new
502 task''
503 @param ndeps Number of depend items with possible aliasing
504 @param dep_list List of depend items with possible aliasing
505 @param ndeps_noalias Number of depend items with no aliasing
506 @param noalias_dep_list List of depend items with no aliasing
507 
508 @return Returns either TASK_CURRENT_NOT_QUEUED if the current task was not
509 suspended and queued, or TASK_CURRENT_QUEUED if it was suspended and queued
510 
511 Schedule a non-thread-switchable task with dependences for execution
512 */
513 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
514                                     kmp_task_t *new_task, kmp_int32 ndeps,
515                                     kmp_depend_info_t *dep_list,
516                                     kmp_int32 ndeps_noalias,
517                                     kmp_depend_info_t *noalias_dep_list) {
518 
519   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
520   KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
521                 loc_ref, new_taskdata));
522   __kmp_assert_valid_gtid(gtid);
523   kmp_info_t *thread = __kmp_threads[gtid];
524   kmp_taskdata_t *current_task = thread->th.th_current_task;
525 
526 #if OMPT_SUPPORT
527   if (ompt_enabled.enabled) {
528     if (!current_task->ompt_task_info.frame.enter_frame.ptr)
529       current_task->ompt_task_info.frame.enter_frame.ptr =
530           OMPT_GET_FRAME_ADDRESS(0);
531     if (ompt_enabled.ompt_callback_task_create) {
532       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
533           &(current_task->ompt_task_info.task_data),
534           &(current_task->ompt_task_info.frame),
535           &(new_taskdata->ompt_task_info.task_data),
536           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
537           OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
538     }
539 
540     new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
541   }
542 
543 #if OMPT_OPTIONAL
544   /* OMPT grab all dependences if requested by the tool */
545   if (ndeps + ndeps_noalias > 0 &&
546       ompt_enabled.ompt_callback_dependences) {
547     kmp_int32 i;
548 
549     int ompt_ndeps = ndeps + ndeps_noalias;
550     ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
551         thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
552 
553     KMP_ASSERT(ompt_deps != NULL);
554 
555     for (i = 0; i < ndeps; i++) {
556       ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
557       if (dep_list[i].flags.in && dep_list[i].flags.out)
558         ompt_deps[i].dependence_type = ompt_dependence_type_inout;
559       else if (dep_list[i].flags.out)
560         ompt_deps[i].dependence_type = ompt_dependence_type_out;
561       else if (dep_list[i].flags.in)
562         ompt_deps[i].dependence_type = ompt_dependence_type_in;
563       else if (dep_list[i].flags.mtx)
564         ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
565     }
566     for (i = 0; i < ndeps_noalias; i++) {
567       ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
568       if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
569         ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
570       else if (noalias_dep_list[i].flags.out)
571         ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
572       else if (noalias_dep_list[i].flags.in)
573         ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
574       else if (noalias_dep_list[i].flags.mtx)
575         ompt_deps[ndeps + i].dependence_type =
576             ompt_dependence_type_mutexinoutset;
577     }
578     ompt_callbacks.ompt_callback(ompt_callback_dependences)(
579         &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
580     /* We can now free the allocated memory for the dependencies */
581     /* For OMPD we might want to delay the free until end of this function */
582     KMP_OMPT_DEPS_FREE(thread, ompt_deps);
583   }
584 #endif /* OMPT_OPTIONAL */
585 #endif /* OMPT_SUPPORT */
586 
587   bool serial = current_task->td_flags.team_serial ||
588                 current_task->td_flags.tasking_ser ||
589                 current_task->td_flags.final;
590   kmp_task_team_t *task_team = thread->th.th_task_team;
591   serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
592 
593   if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
594     /* if no dependencies have been tracked yet, create the dependence hash */
595     if (current_task->td_dephash == NULL)
596       current_task->td_dephash = __kmp_dephash_create(thread, current_task);
597 
598 #if USE_FAST_MEMORY
599     kmp_depnode_t *node =
600         (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
601 #else
602     kmp_depnode_t *node =
603         (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
604 #endif
605 
606     __kmp_init_node(node);
607     new_taskdata->td_depnode = node;
608 
609     if (__kmp_check_deps(gtid, node, new_task, &current_task->td_dephash,
610                          NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
611                          noalias_dep_list)) {
612       KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
613                     "dependencies: "
614                     "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
615                     gtid, loc_ref, new_taskdata));
616 #if OMPT_SUPPORT
617       if (ompt_enabled.enabled) {
618         current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
619       }
620 #endif
621       return TASK_CURRENT_NOT_QUEUED;
622     }
623   } else {
624     KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
625                   "for task (serialized)"
626                   "loc=%p task=%p\n",
627                   gtid, loc_ref, new_taskdata));
628   }
629 
630   KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
631                 "dependencies : "
632                 "loc=%p task=%p, transferring to __kmp_omp_task\n",
633                 gtid, loc_ref, new_taskdata));
634 
635   kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
636 #if OMPT_SUPPORT
637   if (ompt_enabled.enabled) {
638     current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
639   }
640 #endif
641   return ret;
642 }
643 
644 #if OMPT_SUPPORT
645 void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
646                                 ompt_data_t *taskwait_task_data) {
647   if (ompt_enabled.ompt_callback_task_schedule) {
648     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
649         &(current_task->ompt_task_info.task_data), ompt_task_switch,
650         taskwait_task_data);
651     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
652         taskwait_task_data, ompt_task_complete,
653         &(current_task->ompt_task_info.task_data));
654   }
655   current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
656   *taskwait_task_data = ompt_data_none;
657 }
658 #endif /* OMPT_SUPPORT */
659 
660 /*!
661 @ingroup TASKING
662 @param loc_ref location of the original task directive
663 @param gtid Global Thread ID of encountering thread
664 @param ndeps Number of depend items with possible aliasing
665 @param dep_list List of depend items with possible aliasing
666 @param ndeps_noalias Number of depend items with no aliasing
667 @param noalias_dep_list List of depend items with no aliasing
668 
669 Blocks the current task until all specifies dependencies have been fulfilled.
670 */
671 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
672                           kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
673                           kmp_depend_info_t *noalias_dep_list) {
674   KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
675 
676   if (ndeps == 0 && ndeps_noalias == 0) {
677     KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
678                   "wait upon : loc=%p\n",
679                   gtid, loc_ref));
680     return;
681   }
682   __kmp_assert_valid_gtid(gtid);
683   kmp_info_t *thread = __kmp_threads[gtid];
684   kmp_taskdata_t *current_task = thread->th.th_current_task;
685 
686 #if OMPT_SUPPORT
687   // this function represents a taskwait construct with depend clause
688   // We signal 4 events:
689   //  - creation of the taskwait task
690   //  - dependences of the taskwait task
691   //  - schedule and finish of the taskwait task
692   ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
693   KMP_ASSERT(taskwait_task_data->ptr == NULL);
694   if (ompt_enabled.enabled) {
695     if (!current_task->ompt_task_info.frame.enter_frame.ptr)
696       current_task->ompt_task_info.frame.enter_frame.ptr =
697           OMPT_GET_FRAME_ADDRESS(0);
698     if (ompt_enabled.ompt_callback_task_create) {
699       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
700           &(current_task->ompt_task_info.task_data),
701           &(current_task->ompt_task_info.frame), taskwait_task_data,
702           ompt_task_explicit | ompt_task_undeferred | ompt_task_mergeable, 1,
703           OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
704     }
705   }
706 
707 #if OMPT_OPTIONAL
708   /* OMPT grab all dependences if requested by the tool */
709   if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
710     kmp_int32 i;
711 
712     int ompt_ndeps = ndeps + ndeps_noalias;
713     ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
714         thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
715 
716     KMP_ASSERT(ompt_deps != NULL);
717 
718     for (i = 0; i < ndeps; i++) {
719       ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr;
720       if (dep_list[i].flags.in && dep_list[i].flags.out)
721         ompt_deps[i].dependence_type = ompt_dependence_type_inout;
722       else if (dep_list[i].flags.out)
723         ompt_deps[i].dependence_type = ompt_dependence_type_out;
724       else if (dep_list[i].flags.in)
725         ompt_deps[i].dependence_type = ompt_dependence_type_in;
726       else if (dep_list[i].flags.mtx)
727         ompt_deps[ndeps + i].dependence_type =
728             ompt_dependence_type_mutexinoutset;
729     }
730     for (i = 0; i < ndeps_noalias; i++) {
731       ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr;
732       if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
733         ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
734       else if (noalias_dep_list[i].flags.out)
735         ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
736       else if (noalias_dep_list[i].flags.in)
737         ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
738       else if (noalias_dep_list[i].flags.mtx)
739         ompt_deps[ndeps + i].dependence_type =
740             ompt_dependence_type_mutexinoutset;
741     }
742     ompt_callbacks.ompt_callback(ompt_callback_dependences)(
743         taskwait_task_data, ompt_deps, ompt_ndeps);
744     /* We can now free the allocated memory for the dependencies */
745     /* For OMPD we might want to delay the free until end of this function */
746     KMP_OMPT_DEPS_FREE(thread, ompt_deps);
747     ompt_deps = NULL;
748   }
749 #endif /* OMPT_OPTIONAL */
750 #endif /* OMPT_SUPPORT */
751 
752   // We can return immediately as:
753   // - dependences are not computed in serial teams (except with proxy tasks)
754   // - if the dephash is not yet created it means we have nothing to wait for
755   bool ignore = current_task->td_flags.team_serial ||
756                 current_task->td_flags.tasking_ser ||
757                 current_task->td_flags.final;
758   ignore = ignore && thread->th.th_task_team != NULL &&
759            thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
760   ignore = ignore || current_task->td_dephash == NULL;
761 
762   if (ignore) {
763     KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
764                   "dependencies : loc=%p\n",
765                   gtid, loc_ref));
766 #if OMPT_SUPPORT
767     __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
768 #endif /* OMPT_SUPPORT */
769     return;
770   }
771 
772   kmp_depnode_t node = {0};
773   __kmp_init_node(&node);
774   // the stack owns the node
775   __kmp_node_ref(&node);
776 
777   if (!__kmp_check_deps(gtid, &node, NULL, &current_task->td_dephash,
778                         DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
779                         noalias_dep_list)) {
780     KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
781                   "dependencies : loc=%p\n",
782                   gtid, loc_ref));
783 #if OMPT_SUPPORT
784     __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
785 #endif /* OMPT_SUPPORT */
786     return;
787   }
788 
789   int thread_finished = FALSE;
790   kmp_flag_32<false, false> flag(
791       (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
792   while (node.dn.npredecessors > 0) {
793     flag.execute_tasks(thread, gtid, FALSE,
794                        &thread_finished USE_ITT_BUILD_ARG(NULL),
795                        __kmp_task_stealing_constraint);
796   }
797 
798 #if OMPT_SUPPORT
799   __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
800 #endif /* OMPT_SUPPORT */
801   KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
802                 gtid, loc_ref));
803 }
804