1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_i18n.h"
15 #include "kmp_itt.h"
16 #include "kmp_stats.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 /* forward declaration */
25 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
26                                  kmp_info_t *this_thr);
27 static void __kmp_alloc_task_deque(kmp_info_t *thread,
28                                    kmp_thread_data_t *thread_data);
29 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
30                                            kmp_task_team_t *task_team);
31 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
32 
33 #ifdef BUILD_TIED_TASK_STACK
34 
35 //  __kmp_trace_task_stack: print the tied tasks from the task stack in order
36 //  from top do bottom
37 //
38 //  gtid: global thread identifier for thread containing stack
39 //  thread_data: thread data for task team thread containing stack
40 //  threshold: value above which the trace statement triggers
41 //  location: string identifying call site of this function (for trace)
42 static void __kmp_trace_task_stack(kmp_int32 gtid,
43                                    kmp_thread_data_t *thread_data,
44                                    int threshold, char *location) {
45   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
46   kmp_taskdata_t **stack_top = task_stack->ts_top;
47   kmp_int32 entries = task_stack->ts_entries;
48   kmp_taskdata_t *tied_task;
49 
50   KA_TRACE(
51       threshold,
52       ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
53        "first_block = %p, stack_top = %p \n",
54        location, gtid, entries, task_stack->ts_first_block, stack_top));
55 
56   KMP_DEBUG_ASSERT(stack_top != NULL);
57   KMP_DEBUG_ASSERT(entries > 0);
58 
59   while (entries != 0) {
60     KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
61     // fix up ts_top if we need to pop from previous block
62     if (entries & TASK_STACK_INDEX_MASK == 0) {
63       kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
64 
65       stack_block = stack_block->sb_prev;
66       stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
67     }
68 
69     // finish bookkeeping
70     stack_top--;
71     entries--;
72 
73     tied_task = *stack_top;
74 
75     KMP_DEBUG_ASSERT(tied_task != NULL);
76     KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
77 
78     KA_TRACE(threshold,
79              ("__kmp_trace_task_stack(%s):             gtid=%d, entry=%d, "
80               "stack_top=%p, tied_task=%p\n",
81               location, gtid, entries, stack_top, tied_task));
82   }
83   KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
84 
85   KA_TRACE(threshold,
86            ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
87             location, gtid));
88 }
89 
90 //  __kmp_init_task_stack: initialize the task stack for the first time
91 //  after a thread_data structure is created.
92 //  It should not be necessary to do this again (assuming the stack works).
93 //
94 //  gtid: global thread identifier of calling thread
95 //  thread_data: thread data for task team thread containing stack
96 static void __kmp_init_task_stack(kmp_int32 gtid,
97                                   kmp_thread_data_t *thread_data) {
98   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
99   kmp_stack_block_t *first_block;
100 
101   // set up the first block of the stack
102   first_block = &task_stack->ts_first_block;
103   task_stack->ts_top = (kmp_taskdata_t **)first_block;
104   memset((void *)first_block, '\0',
105          TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
106 
107   // initialize the stack to be empty
108   task_stack->ts_entries = TASK_STACK_EMPTY;
109   first_block->sb_next = NULL;
110   first_block->sb_prev = NULL;
111 }
112 
113 //  __kmp_free_task_stack: free the task stack when thread_data is destroyed.
114 //
115 //  gtid: global thread identifier for calling thread
116 //  thread_data: thread info for thread containing stack
117 static void __kmp_free_task_stack(kmp_int32 gtid,
118                                   kmp_thread_data_t *thread_data) {
119   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
120   kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
121 
122   KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
123   // free from the second block of the stack
124   while (stack_block != NULL) {
125     kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
126 
127     stack_block->sb_next = NULL;
128     stack_block->sb_prev = NULL;
129     if (stack_block != &task_stack->ts_first_block) {
130       __kmp_thread_free(thread,
131                         stack_block); // free the block, if not the first
132     }
133     stack_block = next_block;
134   }
135   // initialize the stack to be empty
136   task_stack->ts_entries = 0;
137   task_stack->ts_top = NULL;
138 }
139 
140 //  __kmp_push_task_stack: Push the tied task onto the task stack.
141 //     Grow the stack if necessary by allocating another block.
142 //
143 //  gtid: global thread identifier for calling thread
144 //  thread: thread info for thread containing stack
145 //  tied_task: the task to push on the stack
146 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
147                                   kmp_taskdata_t *tied_task) {
148   // GEH - need to consider what to do if tt_threads_data not allocated yet
149   kmp_thread_data_t *thread_data =
150       &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
151   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
152 
153   if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
154     return; // Don't push anything on stack if team or team tasks are serialized
155   }
156 
157   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
158   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
159 
160   KA_TRACE(20,
161            ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
162             gtid, thread, tied_task));
163   // Store entry
164   *(task_stack->ts_top) = tied_task;
165 
166   // Do bookkeeping for next push
167   task_stack->ts_top++;
168   task_stack->ts_entries++;
169 
170   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
171     // Find beginning of this task block
172     kmp_stack_block_t *stack_block =
173         (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
174 
175     // Check if we already have a block
176     if (stack_block->sb_next !=
177         NULL) { // reset ts_top to beginning of next block
178       task_stack->ts_top = &stack_block->sb_next->sb_block[0];
179     } else { // Alloc new block and link it up
180       kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
181           thread, sizeof(kmp_stack_block_t));
182 
183       task_stack->ts_top = &new_block->sb_block[0];
184       stack_block->sb_next = new_block;
185       new_block->sb_prev = stack_block;
186       new_block->sb_next = NULL;
187 
188       KA_TRACE(
189           30,
190           ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
191            gtid, tied_task, new_block));
192     }
193   }
194   KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
195                 tied_task));
196 }
197 
198 //  __kmp_pop_task_stack: Pop the tied task from the task stack.  Don't return
199 //  the task, just check to make sure it matches the ending task passed in.
200 //
201 //  gtid: global thread identifier for the calling thread
202 //  thread: thread info structure containing stack
203 //  tied_task: the task popped off the stack
204 //  ending_task: the task that is ending (should match popped task)
205 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
206                                  kmp_taskdata_t *ending_task) {
207   // GEH - need to consider what to do if tt_threads_data not allocated yet
208   kmp_thread_data_t *thread_data =
209       &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
210   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
211   kmp_taskdata_t *tied_task;
212 
213   if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
214     // Don't pop anything from stack if team or team tasks are serialized
215     return;
216   }
217 
218   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
219   KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
220 
221   KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
222                 thread));
223 
224   // fix up ts_top if we need to pop from previous block
225   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
226     kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
227 
228     stack_block = stack_block->sb_prev;
229     task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
230   }
231 
232   // finish bookkeeping
233   task_stack->ts_top--;
234   task_stack->ts_entries--;
235 
236   tied_task = *(task_stack->ts_top);
237 
238   KMP_DEBUG_ASSERT(tied_task != NULL);
239   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
240   KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
241 
242   KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
243                 tied_task));
244   return;
245 }
246 #endif /* BUILD_TIED_TASK_STACK */
247 
248 // returns 1 if new task is allowed to execute, 0 otherwise
249 // checks Task Scheduling constraint (if requested) and
250 // mutexinoutset dependencies if any
251 static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained,
252                                   const kmp_taskdata_t *tasknew,
253                                   const kmp_taskdata_t *taskcurr) {
254   if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) {
255     // Check if the candidate obeys the Task Scheduling Constraints (TSC)
256     // only descendant of all deferred tied tasks can be scheduled, checking
257     // the last one is enough, as it in turn is the descendant of all others
258     kmp_taskdata_t *current = taskcurr->td_last_tied;
259     KMP_DEBUG_ASSERT(current != NULL);
260     // check if the task is not suspended on barrier
261     if (current->td_flags.tasktype == TASK_EXPLICIT ||
262         current->td_taskwait_thread > 0) { // <= 0 on barrier
263       kmp_int32 level = current->td_level;
264       kmp_taskdata_t *parent = tasknew->td_parent;
265       while (parent != current && parent->td_level > level) {
266         // check generation up to the level of the current task
267         parent = parent->td_parent;
268         KMP_DEBUG_ASSERT(parent != NULL);
269       }
270       if (parent != current)
271         return false;
272     }
273   }
274   // Check mutexinoutset dependencies, acquire locks
275   kmp_depnode_t *node = tasknew->td_depnode;
276   if (UNLIKELY(node && (node->dn.mtx_num_locks > 0))) {
277     for (int i = 0; i < node->dn.mtx_num_locks; ++i) {
278       KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
279       if (__kmp_test_lock(node->dn.mtx_locks[i], gtid))
280         continue;
281       // could not get the lock, release previous locks
282       for (int j = i - 1; j >= 0; --j)
283         __kmp_release_lock(node->dn.mtx_locks[j], gtid);
284       return false;
285     }
286     // negative num_locks means all locks acquired successfully
287     node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
288   }
289   return true;
290 }
291 
292 // __kmp_realloc_task_deque:
293 // Re-allocates a task deque for a particular thread, copies the content from
294 // the old deque and adjusts the necessary data structures relating to the
295 // deque. This operation must be done with the deque_lock being held
296 static void __kmp_realloc_task_deque(kmp_info_t *thread,
297                                      kmp_thread_data_t *thread_data) {
298   kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
299   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size);
300   kmp_int32 new_size = 2 * size;
301 
302   KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
303                 "%d] for thread_data %p\n",
304                 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
305 
306   kmp_taskdata_t **new_deque =
307       (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
308 
309   int i, j;
310   for (i = thread_data->td.td_deque_head, j = 0; j < size;
311        i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
312     new_deque[j] = thread_data->td.td_deque[i];
313 
314   __kmp_free(thread_data->td.td_deque);
315 
316   thread_data->td.td_deque_head = 0;
317   thread_data->td.td_deque_tail = size;
318   thread_data->td.td_deque = new_deque;
319   thread_data->td.td_deque_size = new_size;
320 }
321 
322 //  __kmp_push_task: Add a task to the thread's deque
323 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
324   kmp_info_t *thread = __kmp_threads[gtid];
325   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
326 
327   // If we encounter a hidden helper task, and the current thread is not a
328   // hidden helper thread, we have to give the task to any hidden helper thread
329   // starting from its shadow one.
330   if (UNLIKELY(taskdata->td_flags.hidden_helper &&
331                !KMP_HIDDEN_HELPER_THREAD(gtid))) {
332     kmp_int32 shadow_gtid = KMP_GTID_TO_SHADOW_GTID(gtid);
333     __kmpc_give_task(task, __kmp_tid_from_gtid(shadow_gtid));
334     // Signal the hidden helper threads.
335     __kmp_hidden_helper_worker_thread_signal();
336     return TASK_SUCCESSFULLY_PUSHED;
337   }
338 
339   kmp_task_team_t *task_team = thread->th.th_task_team;
340   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
341   kmp_thread_data_t *thread_data;
342 
343   KA_TRACE(20,
344            ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
345 
346   if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
347     // untied task needs to increment counter so that the task structure is not
348     // freed prematurely
349     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
350     KMP_DEBUG_USE_VAR(counter);
351     KA_TRACE(
352         20,
353         ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
354          gtid, counter, taskdata));
355   }
356 
357   // The first check avoids building task_team thread data if serialized
358   if (UNLIKELY(taskdata->td_flags.task_serial)) {
359     KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
360                   "TASK_NOT_PUSHED for task %p\n",
361                   gtid, taskdata));
362     return TASK_NOT_PUSHED;
363   }
364 
365   // Now that serialized tasks have returned, we can assume that we are not in
366   // immediate exec mode
367   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
368   if (UNLIKELY(!KMP_TASKING_ENABLED(task_team))) {
369     __kmp_enable_tasking(task_team, thread);
370   }
371   KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
372   KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
373 
374   // Find tasking deque specific to encountering thread
375   thread_data = &task_team->tt.tt_threads_data[tid];
376 
377   // No lock needed since only owner can allocate. If the task is hidden_helper,
378   // we don't need it either because we have initialized the dequeue for hidden
379   // helper thread data.
380   if (UNLIKELY(thread_data->td.td_deque == NULL)) {
381     __kmp_alloc_task_deque(thread, thread_data);
382   }
383 
384   int locked = 0;
385   // Check if deque is full
386   if (TCR_4(thread_data->td.td_deque_ntasks) >=
387       TASK_DEQUE_SIZE(thread_data->td)) {
388     if (__kmp_enable_task_throttling &&
389         __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
390                               thread->th.th_current_task)) {
391       KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
392                     "TASK_NOT_PUSHED for task %p\n",
393                     gtid, taskdata));
394       return TASK_NOT_PUSHED;
395     } else {
396       __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
397       locked = 1;
398       if (TCR_4(thread_data->td.td_deque_ntasks) >=
399           TASK_DEQUE_SIZE(thread_data->td)) {
400         // expand deque to push the task which is not allowed to execute
401         __kmp_realloc_task_deque(thread, thread_data);
402       }
403     }
404   }
405   // Lock the deque for the task push operation
406   if (!locked) {
407     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
408     // Need to recheck as we can get a proxy task from thread outside of OpenMP
409     if (TCR_4(thread_data->td.td_deque_ntasks) >=
410         TASK_DEQUE_SIZE(thread_data->td)) {
411       if (__kmp_enable_task_throttling &&
412           __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
413                                 thread->th.th_current_task)) {
414         __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
415         KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; "
416                       "returning TASK_NOT_PUSHED for task %p\n",
417                       gtid, taskdata));
418         return TASK_NOT_PUSHED;
419       } else {
420         // expand deque to push the task which is not allowed to execute
421         __kmp_realloc_task_deque(thread, thread_data);
422       }
423     }
424   }
425   // Must have room since no thread can add tasks but calling thread
426   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
427                    TASK_DEQUE_SIZE(thread_data->td));
428 
429   thread_data->td.td_deque[thread_data->td.td_deque_tail] =
430       taskdata; // Push taskdata
431   // Wrap index.
432   thread_data->td.td_deque_tail =
433       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
434   TCW_4(thread_data->td.td_deque_ntasks,
435         TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
436   KMP_FSYNC_RELEASING(thread->th.th_current_task); // releasing self
437   KMP_FSYNC_RELEASING(taskdata); // releasing child
438   KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
439                 "task=%p ntasks=%d head=%u tail=%u\n",
440                 gtid, taskdata, thread_data->td.td_deque_ntasks,
441                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
442 
443   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
444 
445   return TASK_SUCCESSFULLY_PUSHED;
446 }
447 
448 // __kmp_pop_current_task_from_thread: set up current task from called thread
449 // when team ends
450 //
451 // this_thr: thread structure to set current_task in.
452 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
453   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
454                 "this_thread=%p, curtask=%p, "
455                 "curtask_parent=%p\n",
456                 0, this_thr, this_thr->th.th_current_task,
457                 this_thr->th.th_current_task->td_parent));
458 
459   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
460 
461   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
462                 "this_thread=%p, curtask=%p, "
463                 "curtask_parent=%p\n",
464                 0, this_thr, this_thr->th.th_current_task,
465                 this_thr->th.th_current_task->td_parent));
466 }
467 
468 // __kmp_push_current_task_to_thread: set up current task in called thread for a
469 // new team
470 //
471 // this_thr: thread structure to set up
472 // team: team for implicit task data
473 // tid: thread within team to set up
474 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
475                                        int tid) {
476   // current task of the thread is a parent of the new just created implicit
477   // tasks of new team
478   KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
479                 "curtask=%p "
480                 "parent_task=%p\n",
481                 tid, this_thr, this_thr->th.th_current_task,
482                 team->t.t_implicit_task_taskdata[tid].td_parent));
483 
484   KMP_DEBUG_ASSERT(this_thr != NULL);
485 
486   if (tid == 0) {
487     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
488       team->t.t_implicit_task_taskdata[0].td_parent =
489           this_thr->th.th_current_task;
490       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
491     }
492   } else {
493     team->t.t_implicit_task_taskdata[tid].td_parent =
494         team->t.t_implicit_task_taskdata[0].td_parent;
495     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
496   }
497 
498   KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
499                 "curtask=%p "
500                 "parent_task=%p\n",
501                 tid, this_thr, this_thr->th.th_current_task,
502                 team->t.t_implicit_task_taskdata[tid].td_parent));
503 }
504 
505 // __kmp_task_start: bookkeeping for a task starting execution
506 //
507 // GTID: global thread id of calling thread
508 // task: task starting execution
509 // current_task: task suspending
510 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
511                              kmp_taskdata_t *current_task) {
512   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
513   kmp_info_t *thread = __kmp_threads[gtid];
514 
515   KA_TRACE(10,
516            ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
517             gtid, taskdata, current_task));
518 
519   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
520 
521   // mark currently executing task as suspended
522   // TODO: GEH - make sure root team implicit task is initialized properly.
523   // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
524   current_task->td_flags.executing = 0;
525 
526 // Add task to stack if tied
527 #ifdef BUILD_TIED_TASK_STACK
528   if (taskdata->td_flags.tiedness == TASK_TIED) {
529     __kmp_push_task_stack(gtid, thread, taskdata);
530   }
531 #endif /* BUILD_TIED_TASK_STACK */
532 
533   // mark starting task as executing and as current task
534   thread->th.th_current_task = taskdata;
535 
536   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
537                    taskdata->td_flags.tiedness == TASK_UNTIED);
538   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
539                    taskdata->td_flags.tiedness == TASK_UNTIED);
540   taskdata->td_flags.started = 1;
541   taskdata->td_flags.executing = 1;
542   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
543   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
544 
545   // GEH TODO: shouldn't we pass some sort of location identifier here?
546   // APT: yes, we will pass location here.
547   // need to store current thread state (in a thread or taskdata structure)
548   // before setting work_state, otherwise wrong state is set after end of task
549 
550   KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
551 
552   return;
553 }
554 
555 #if OMPT_SUPPORT
556 //------------------------------------------------------------------------------
557 // __ompt_task_init:
558 //   Initialize OMPT fields maintained by a task. This will only be called after
559 //   ompt_start_tool, so we already know whether ompt is enabled or not.
560 
561 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
562   // The calls to __ompt_task_init already have the ompt_enabled condition.
563   task->ompt_task_info.task_data.value = 0;
564   task->ompt_task_info.frame.exit_frame = ompt_data_none;
565   task->ompt_task_info.frame.enter_frame = ompt_data_none;
566   task->ompt_task_info.frame.exit_frame_flags =
567       ompt_frame_runtime | ompt_frame_framepointer;
568   task->ompt_task_info.frame.enter_frame_flags =
569       ompt_frame_runtime | ompt_frame_framepointer;
570 }
571 
572 // __ompt_task_start:
573 //   Build and trigger task-begin event
574 static inline void __ompt_task_start(kmp_task_t *task,
575                                      kmp_taskdata_t *current_task,
576                                      kmp_int32 gtid) {
577   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
578   ompt_task_status_t status = ompt_task_switch;
579   if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
580     status = ompt_task_yield;
581     __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
582   }
583   /* let OMPT know that we're about to run this task */
584   if (ompt_enabled.ompt_callback_task_schedule) {
585     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
586         &(current_task->ompt_task_info.task_data), status,
587         &(taskdata->ompt_task_info.task_data));
588   }
589   taskdata->ompt_task_info.scheduling_parent = current_task;
590 }
591 
592 // __ompt_task_finish:
593 //   Build and trigger final task-schedule event
594 static inline void __ompt_task_finish(kmp_task_t *task,
595                                       kmp_taskdata_t *resumed_task,
596                                       ompt_task_status_t status) {
597   if (ompt_enabled.ompt_callback_task_schedule) {
598     kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
599     if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
600         taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
601       status = ompt_task_cancel;
602     }
603 
604     /* let OMPT know that we're returning to the callee task */
605     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
606         &(taskdata->ompt_task_info.task_data), status,
607         (resumed_task ? &(resumed_task->ompt_task_info.task_data) : NULL));
608   }
609 }
610 #endif
611 
612 template <bool ompt>
613 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
614                                                kmp_task_t *task,
615                                                void *frame_address,
616                                                void *return_address) {
617   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
618   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
619 
620   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
621                 "current_task=%p\n",
622                 gtid, loc_ref, taskdata, current_task));
623 
624   if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
625     // untied task needs to increment counter so that the task structure is not
626     // freed prematurely
627     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
628     KMP_DEBUG_USE_VAR(counter);
629     KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
630                   "incremented for task %p\n",
631                   gtid, counter, taskdata));
632   }
633 
634   taskdata->td_flags.task_serial =
635       1; // Execute this task immediately, not deferred.
636   __kmp_task_start(gtid, task, current_task);
637 
638 #if OMPT_SUPPORT
639   if (ompt) {
640     if (current_task->ompt_task_info.frame.enter_frame.ptr == NULL) {
641       current_task->ompt_task_info.frame.enter_frame.ptr =
642           taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address;
643       current_task->ompt_task_info.frame.enter_frame_flags =
644           taskdata->ompt_task_info.frame.exit_frame_flags =
645               ompt_frame_application | ompt_frame_framepointer;
646     }
647     if (ompt_enabled.ompt_callback_task_create) {
648       ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
649       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
650           &(parent_info->task_data), &(parent_info->frame),
651           &(taskdata->ompt_task_info.task_data),
652           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
653           return_address);
654     }
655     __ompt_task_start(task, current_task, gtid);
656   }
657 #endif // OMPT_SUPPORT
658 
659   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
660                 loc_ref, taskdata));
661 }
662 
663 #if OMPT_SUPPORT
664 OMPT_NOINLINE
665 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
666                                            kmp_task_t *task,
667                                            void *frame_address,
668                                            void *return_address) {
669   __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
670                                            return_address);
671 }
672 #endif // OMPT_SUPPORT
673 
674 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
675 // execution
676 //
677 // loc_ref: source location information; points to beginning of task block.
678 // gtid: global thread number.
679 // task: task thunk for the started task.
680 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
681                                kmp_task_t *task) {
682 #if OMPT_SUPPORT
683   if (UNLIKELY(ompt_enabled.enabled)) {
684     OMPT_STORE_RETURN_ADDRESS(gtid);
685     __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
686                                    OMPT_GET_FRAME_ADDRESS(1),
687                                    OMPT_LOAD_RETURN_ADDRESS(gtid));
688     return;
689   }
690 #endif
691   __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
692 }
693 
694 #ifdef TASK_UNUSED
695 // __kmpc_omp_task_begin: report that a given task has started execution
696 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
697 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
698   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
699 
700   KA_TRACE(
701       10,
702       ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
703        gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
704 
705   __kmp_task_start(gtid, task, current_task);
706 
707   KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
708                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
709   return;
710 }
711 #endif // TASK_UNUSED
712 
713 // __kmp_free_task: free the current task space and the space for shareds
714 //
715 // gtid: Global thread ID of calling thread
716 // taskdata: task to free
717 // thread: thread data structure of caller
718 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
719                             kmp_info_t *thread) {
720   KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
721                 taskdata));
722 
723   // Check to make sure all flags and counters have the correct values
724   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
725   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
726   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
727   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
728   KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
729                    taskdata->td_flags.task_serial == 1);
730   KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
731 
732   taskdata->td_flags.freed = 1;
733 // deallocate the taskdata and shared variable blocks associated with this task
734 #if USE_FAST_MEMORY
735   __kmp_fast_free(thread, taskdata);
736 #else /* ! USE_FAST_MEMORY */
737   __kmp_thread_free(thread, taskdata);
738 #endif
739   KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
740 }
741 
742 // __kmp_free_task_and_ancestors: free the current task and ancestors without
743 // children
744 //
745 // gtid: Global thread ID of calling thread
746 // taskdata: task to free
747 // thread: thread data structure of caller
748 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
749                                           kmp_taskdata_t *taskdata,
750                                           kmp_info_t *thread) {
751   // Proxy tasks must always be allowed to free their parents
752   // because they can be run in background even in serial mode.
753   kmp_int32 team_serial =
754       (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
755       !taskdata->td_flags.proxy;
756   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
757 
758   kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
759   KMP_DEBUG_ASSERT(children >= 0);
760 
761   // Now, go up the ancestor tree to see if any ancestors can now be freed.
762   while (children == 0) {
763     kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
764 
765     KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
766                   "and freeing itself\n",
767                   gtid, taskdata));
768 
769     // --- Deallocate my ancestor task ---
770     __kmp_free_task(gtid, taskdata, thread);
771 
772     taskdata = parent_taskdata;
773 
774     if (team_serial)
775       return;
776     // Stop checking ancestors at implicit task instead of walking up ancestor
777     // tree to avoid premature deallocation of ancestors.
778     if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
779       if (taskdata->td_dephash) { // do we need to cleanup dephash?
780         int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
781         kmp_tasking_flags_t flags_old = taskdata->td_flags;
782         if (children == 0 && flags_old.complete == 1) {
783           kmp_tasking_flags_t flags_new = flags_old;
784           flags_new.complete = 0;
785           if (KMP_COMPARE_AND_STORE_ACQ32(
786                   RCAST(kmp_int32 *, &taskdata->td_flags),
787                   *RCAST(kmp_int32 *, &flags_old),
788                   *RCAST(kmp_int32 *, &flags_new))) {
789             KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans "
790                            "dephash of implicit task %p\n",
791                            gtid, taskdata));
792             // cleanup dephash of finished implicit task
793             __kmp_dephash_free_entries(thread, taskdata->td_dephash);
794           }
795         }
796       }
797       return;
798     }
799     // Predecrement simulated by "- 1" calculation
800     children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
801     KMP_DEBUG_ASSERT(children >= 0);
802   }
803 
804   KA_TRACE(
805       20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
806            "not freeing it yet\n",
807            gtid, taskdata, children));
808 }
809 
810 // Only need to keep track of child task counts if any of the following:
811 // 1. team parallel and tasking not serialized;
812 // 2. it is a proxy or detachable or hidden helper task
813 // 3. the children counter of its parent task is greater than 0.
814 // The reason for the 3rd one is for serialized team that found detached task,
815 // hidden helper task, T. In this case, the execution of T is still deferred,
816 // and it is also possible that a regular task depends on T. In this case, if we
817 // don't track the children, task synchronization will be broken.
818 static bool __kmp_track_children_task(kmp_taskdata_t *taskdata) {
819   kmp_tasking_flags_t flags = taskdata->td_flags;
820   bool ret = !(flags.team_serial || flags.tasking_ser);
821   ret = ret || flags.proxy == TASK_PROXY ||
822         flags.detachable == TASK_DETACHABLE || flags.hidden_helper;
823   ret = ret ||
824         KMP_ATOMIC_LD_ACQ(&taskdata->td_parent->td_incomplete_child_tasks) > 0;
825   return ret;
826 }
827 
828 // __kmp_task_finish: bookkeeping to do when a task finishes execution
829 //
830 // gtid: global thread ID for calling thread
831 // task: task to be finished
832 // resumed_task: task to be resumed.  (may be NULL if task is serialized)
833 //
834 // template<ompt>: effectively ompt_enabled.enabled!=0
835 // the version with ompt=false is inlined, allowing to optimize away all ompt
836 // code in this case
837 template <bool ompt>
838 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
839                               kmp_taskdata_t *resumed_task) {
840   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
841   kmp_info_t *thread = __kmp_threads[gtid];
842   kmp_task_team_t *task_team =
843       thread->th.th_task_team; // might be NULL for serial teams...
844 #if KMP_DEBUG
845   kmp_int32 children = 0;
846 #endif
847   KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
848                 "task %p\n",
849                 gtid, taskdata, resumed_task));
850 
851   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
852 
853 // Pop task from stack if tied
854 #ifdef BUILD_TIED_TASK_STACK
855   if (taskdata->td_flags.tiedness == TASK_TIED) {
856     __kmp_pop_task_stack(gtid, thread, taskdata);
857   }
858 #endif /* BUILD_TIED_TASK_STACK */
859 
860   if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
861     // untied task needs to check the counter so that the task structure is not
862     // freed prematurely
863     kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
864     KA_TRACE(
865         20,
866         ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
867          gtid, counter, taskdata));
868     if (counter > 0) {
869       // untied task is not done, to be continued possibly by other thread, do
870       // not free it now
871       if (resumed_task == NULL) {
872         KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
873         resumed_task = taskdata->td_parent; // In a serialized task, the resumed
874         // task is the parent
875       }
876       thread->th.th_current_task = resumed_task; // restore current_task
877       resumed_task->td_flags.executing = 1; // resume previous task
878       KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
879                     "resuming task %p\n",
880                     gtid, taskdata, resumed_task));
881       return;
882     }
883   }
884 
885   // bookkeeping for resuming task:
886   // GEH - note tasking_ser => task_serial
887   KMP_DEBUG_ASSERT(
888       (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
889       taskdata->td_flags.task_serial);
890   if (taskdata->td_flags.task_serial) {
891     if (resumed_task == NULL) {
892       resumed_task = taskdata->td_parent; // In a serialized task, the resumed
893       // task is the parent
894     }
895   } else {
896     KMP_DEBUG_ASSERT(resumed_task !=
897                      NULL); // verify that resumed task is passed as argument
898   }
899 
900   /* If the tasks' destructor thunk flag has been set, we need to invoke the
901      destructor thunk that has been generated by the compiler. The code is
902      placed here, since at this point other tasks might have been released
903      hence overlapping the destructor invocations with some other work in the
904      released tasks.  The OpenMP spec is not specific on when the destructors
905      are invoked, so we should be free to choose. */
906   if (UNLIKELY(taskdata->td_flags.destructors_thunk)) {
907     kmp_routine_entry_t destr_thunk = task->data1.destructors;
908     KMP_ASSERT(destr_thunk);
909     destr_thunk(gtid, task);
910   }
911 
912   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
913   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
914   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
915 
916   bool detach = false;
917   if (UNLIKELY(taskdata->td_flags.detachable == TASK_DETACHABLE)) {
918     if (taskdata->td_allow_completion_event.type ==
919         KMP_EVENT_ALLOW_COMPLETION) {
920       // event hasn't been fulfilled yet. Try to detach task.
921       __kmp_acquire_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
922       if (taskdata->td_allow_completion_event.type ==
923           KMP_EVENT_ALLOW_COMPLETION) {
924         // task finished execution
925         KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
926         taskdata->td_flags.executing = 0; // suspend the finishing task
927 
928 #if OMPT_SUPPORT
929         // For a detached task, which is not completed, we switch back
930         // the omp_fulfill_event signals completion
931         // locking is necessary to avoid a race with ompt_task_late_fulfill
932         if (ompt)
933           __ompt_task_finish(task, resumed_task, ompt_task_detach);
934 #endif
935 
936         // no access to taskdata after this point!
937         // __kmp_fulfill_event might free taskdata at any time from now
938 
939         taskdata->td_flags.proxy = TASK_PROXY; // proxify!
940         detach = true;
941       }
942       __kmp_release_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
943     }
944   }
945 
946   if (!detach) {
947     taskdata->td_flags.complete = 1; // mark the task as completed
948 
949 #if OMPT_SUPPORT
950     // This is not a detached task, we are done here
951     if (ompt)
952       __ompt_task_finish(task, resumed_task, ompt_task_complete);
953 #endif
954     // TODO: What would be the balance between the conditions in the function
955     // and an atomic operation?
956     if (__kmp_track_children_task(taskdata)) {
957       __kmp_release_deps(gtid, taskdata);
958       // Predecrement simulated by "- 1" calculation
959 #if KMP_DEBUG
960       children = -1 +
961 #endif
962           KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
963       KMP_DEBUG_ASSERT(children >= 0);
964       if (taskdata->td_taskgroup)
965         KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
966     } else if (task_team && (task_team->tt.tt_found_proxy_tasks ||
967                              task_team->tt.tt_hidden_helper_task_encountered)) {
968       // if we found proxy or hidden helper tasks there could exist a dependency
969       // chain with the proxy task as origin
970       __kmp_release_deps(gtid, taskdata);
971     }
972     // td_flags.executing must be marked as 0 after __kmp_release_deps has been
973     // called. Othertwise, if a task is executed immediately from the
974     // release_deps code, the flag will be reset to 1 again by this same
975     // function
976     KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
977     taskdata->td_flags.executing = 0; // suspend the finishing task
978   }
979 
980   KA_TRACE(
981       20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
982            gtid, taskdata, children));
983 
984   // Free this task and then ancestor tasks if they have no children.
985   // Restore th_current_task first as suggested by John:
986   // johnmc: if an asynchronous inquiry peers into the runtime system
987   // it doesn't see the freed task as the current task.
988   thread->th.th_current_task = resumed_task;
989   if (!detach)
990     __kmp_free_task_and_ancestors(gtid, taskdata, thread);
991 
992   // TODO: GEH - make sure root team implicit task is initialized properly.
993   // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
994   resumed_task->td_flags.executing = 1; // resume previous task
995 
996   KA_TRACE(
997       10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
998            gtid, taskdata, resumed_task));
999 
1000   return;
1001 }
1002 
1003 template <bool ompt>
1004 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
1005                                                   kmp_int32 gtid,
1006                                                   kmp_task_t *task) {
1007   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
1008                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
1009   KMP_DEBUG_ASSERT(gtid >= 0);
1010   // this routine will provide task to resume
1011   __kmp_task_finish<ompt>(gtid, task, NULL);
1012 
1013   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
1014                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
1015 
1016 #if OMPT_SUPPORT
1017   if (ompt) {
1018     ompt_frame_t *ompt_frame;
1019     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
1020     ompt_frame->enter_frame = ompt_data_none;
1021     ompt_frame->enter_frame_flags =
1022         ompt_frame_runtime | ompt_frame_framepointer;
1023   }
1024 #endif
1025 
1026   return;
1027 }
1028 
1029 #if OMPT_SUPPORT
1030 OMPT_NOINLINE
1031 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
1032                                        kmp_task_t *task) {
1033   __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
1034 }
1035 #endif // OMPT_SUPPORT
1036 
1037 // __kmpc_omp_task_complete_if0: report that a task has completed execution
1038 //
1039 // loc_ref: source location information; points to end of task block.
1040 // gtid: global thread number.
1041 // task: task thunk for the completed task.
1042 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
1043                                   kmp_task_t *task) {
1044 #if OMPT_SUPPORT
1045   if (UNLIKELY(ompt_enabled.enabled)) {
1046     __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
1047     return;
1048   }
1049 #endif
1050   __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
1051 }
1052 
1053 #ifdef TASK_UNUSED
1054 // __kmpc_omp_task_complete: report that a task has completed execution
1055 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
1056 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
1057                               kmp_task_t *task) {
1058   KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
1059                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1060 
1061   __kmp_task_finish<false>(gtid, task,
1062                            NULL); // Not sure how to find task to resume
1063 
1064   KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
1065                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1066   return;
1067 }
1068 #endif // TASK_UNUSED
1069 
1070 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
1071 // task for a given thread
1072 //
1073 // loc_ref:  reference to source location of parallel region
1074 // this_thr:  thread data structure corresponding to implicit task
1075 // team: team for this_thr
1076 // tid: thread id of given thread within team
1077 // set_curr_task: TRUE if need to push current task to thread
1078 // NOTE: Routine does not set up the implicit task ICVS.  This is assumed to
1079 // have already been done elsewhere.
1080 // TODO: Get better loc_ref.  Value passed in may be NULL
1081 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
1082                               kmp_team_t *team, int tid, int set_curr_task) {
1083   kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
1084 
1085   KF_TRACE(
1086       10,
1087       ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
1088        tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
1089 
1090   task->td_task_id = KMP_GEN_TASK_ID();
1091   task->td_team = team;
1092   //    task->td_parent   = NULL;  // fix for CQ230101 (broken parent task info
1093   //    in debugger)
1094   task->td_ident = loc_ref;
1095   task->td_taskwait_ident = NULL;
1096   task->td_taskwait_counter = 0;
1097   task->td_taskwait_thread = 0;
1098 
1099   task->td_flags.tiedness = TASK_TIED;
1100   task->td_flags.tasktype = TASK_IMPLICIT;
1101   task->td_flags.proxy = TASK_FULL;
1102 
1103   // All implicit tasks are executed immediately, not deferred
1104   task->td_flags.task_serial = 1;
1105   task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1106   task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1107 
1108   task->td_flags.started = 1;
1109   task->td_flags.executing = 1;
1110   task->td_flags.complete = 0;
1111   task->td_flags.freed = 0;
1112 
1113   task->td_depnode = NULL;
1114   task->td_last_tied = task;
1115   task->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1116 
1117   if (set_curr_task) { // only do this init first time thread is created
1118     KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
1119     // Not used: don't need to deallocate implicit task
1120     KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
1121     task->td_taskgroup = NULL; // An implicit task does not have taskgroup
1122     task->td_dephash = NULL;
1123     __kmp_push_current_task_to_thread(this_thr, team, tid);
1124   } else {
1125     KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
1126     KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
1127   }
1128 
1129 #if OMPT_SUPPORT
1130   if (UNLIKELY(ompt_enabled.enabled))
1131     __ompt_task_init(task, tid);
1132 #endif
1133 
1134   KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
1135                 team, task));
1136 }
1137 
1138 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
1139 // at the end of parallel regions. Some resources are kept for reuse in the next
1140 // parallel region.
1141 //
1142 // thread:  thread data structure corresponding to implicit task
1143 void __kmp_finish_implicit_task(kmp_info_t *thread) {
1144   kmp_taskdata_t *task = thread->th.th_current_task;
1145   if (task->td_dephash) {
1146     int children;
1147     task->td_flags.complete = 1;
1148     children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks);
1149     kmp_tasking_flags_t flags_old = task->td_flags;
1150     if (children == 0 && flags_old.complete == 1) {
1151       kmp_tasking_flags_t flags_new = flags_old;
1152       flags_new.complete = 0;
1153       if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags),
1154                                       *RCAST(kmp_int32 *, &flags_old),
1155                                       *RCAST(kmp_int32 *, &flags_new))) {
1156         KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans "
1157                        "dephash of implicit task %p\n",
1158                        thread->th.th_info.ds.ds_gtid, task));
1159         __kmp_dephash_free_entries(thread, task->td_dephash);
1160       }
1161     }
1162   }
1163 }
1164 
1165 // __kmp_free_implicit_task: Release resources associated to implicit tasks
1166 // when these are destroyed regions
1167 //
1168 // thread:  thread data structure corresponding to implicit task
1169 void __kmp_free_implicit_task(kmp_info_t *thread) {
1170   kmp_taskdata_t *task = thread->th.th_current_task;
1171   if (task && task->td_dephash) {
1172     __kmp_dephash_free(thread, task->td_dephash);
1173     task->td_dephash = NULL;
1174   }
1175 }
1176 
1177 // Round up a size to a power of two specified by val: Used to insert padding
1178 // between structures co-allocated using a single malloc() call
1179 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
1180   if (size & (val - 1)) {
1181     size &= ~(val - 1);
1182     if (size <= KMP_SIZE_T_MAX - val) {
1183       size += val; // Round up if there is no overflow.
1184     }
1185   }
1186   return size;
1187 } // __kmp_round_up_to_va
1188 
1189 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1190 //
1191 // loc_ref: source location information
1192 // gtid: global thread number.
1193 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1194 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1195 // sizeof_kmp_task_t:  Size in bytes of kmp_task_t data structure including
1196 // private vars accessed in task.
1197 // sizeof_shareds:  Size in bytes of array of pointers to shared vars accessed
1198 // in task.
1199 // task_entry: Pointer to task code entry point generated by compiler.
1200 // returns: a pointer to the allocated kmp_task_t structure (task).
1201 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1202                              kmp_tasking_flags_t *flags,
1203                              size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1204                              kmp_routine_entry_t task_entry) {
1205   kmp_task_t *task;
1206   kmp_taskdata_t *taskdata;
1207   kmp_info_t *thread = __kmp_threads[gtid];
1208   kmp_team_t *team = thread->th.th_team;
1209   kmp_taskdata_t *parent_task = thread->th.th_current_task;
1210   size_t shareds_offset;
1211 
1212   if (UNLIKELY(!TCR_4(__kmp_init_middle)))
1213     __kmp_middle_initialize();
1214 
1215   if (flags->hidden_helper) {
1216     if (__kmp_enable_hidden_helper) {
1217       if (!TCR_4(__kmp_init_hidden_helper))
1218         __kmp_hidden_helper_initialize();
1219     } else {
1220       // If the hidden helper task is not enabled, reset the flag to FALSE.
1221       flags->hidden_helper = FALSE;
1222     }
1223   }
1224 
1225   KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1226                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1227                 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1228                 sizeof_shareds, task_entry));
1229 
1230   KMP_DEBUG_ASSERT(parent_task);
1231   if (parent_task->td_flags.final) {
1232     if (flags->merged_if0) {
1233     }
1234     flags->final = 1;
1235   }
1236 
1237   if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1238     // Untied task encountered causes the TSC algorithm to check entire deque of
1239     // the victim thread. If no untied task encountered, then checking the head
1240     // of the deque should be enough.
1241     KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1242   }
1243 
1244   // Detachable tasks are not proxy tasks yet but could be in the future. Doing
1245   // the tasking setup
1246   // when that happens is too late.
1247   if (UNLIKELY(flags->proxy == TASK_PROXY ||
1248                flags->detachable == TASK_DETACHABLE || flags->hidden_helper)) {
1249     if (flags->proxy == TASK_PROXY) {
1250       flags->tiedness = TASK_UNTIED;
1251       flags->merged_if0 = 1;
1252     }
1253     /* are we running in a sequential parallel or tskm_immediate_exec... we need
1254        tasking support enabled */
1255     if ((thread->th.th_task_team) == NULL) {
1256       /* This should only happen if the team is serialized
1257           setup a task team and propagate it to the thread */
1258       KMP_DEBUG_ASSERT(team->t.t_serialized);
1259       KA_TRACE(30,
1260                ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1261                 gtid));
1262       // 1 indicates setup the current team regardless of nthreads
1263       __kmp_task_team_setup(thread, team, 1);
1264       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1265     }
1266     kmp_task_team_t *task_team = thread->th.th_task_team;
1267 
1268     /* tasking must be enabled now as the task might not be pushed */
1269     if (!KMP_TASKING_ENABLED(task_team)) {
1270       KA_TRACE(
1271           30,
1272           ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1273       __kmp_enable_tasking(task_team, thread);
1274       kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1275       kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1276       // No lock needed since only owner can allocate
1277       if (thread_data->td.td_deque == NULL) {
1278         __kmp_alloc_task_deque(thread, thread_data);
1279       }
1280     }
1281 
1282     if ((flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE) &&
1283         task_team->tt.tt_found_proxy_tasks == FALSE)
1284       TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1285     if (flags->hidden_helper &&
1286         task_team->tt.tt_hidden_helper_task_encountered == FALSE)
1287       TCW_4(task_team->tt.tt_hidden_helper_task_encountered, TRUE);
1288   }
1289 
1290   // Calculate shared structure offset including padding after kmp_task_t struct
1291   // to align pointers in shared struct
1292   shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1293   shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1294 
1295   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1296   KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1297                 shareds_offset));
1298   KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1299                 sizeof_shareds));
1300 
1301   // Avoid double allocation here by combining shareds with taskdata
1302 #if USE_FAST_MEMORY
1303   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1304                                                                sizeof_shareds);
1305 #else /* ! USE_FAST_MEMORY */
1306   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1307                                                                sizeof_shareds);
1308 #endif /* USE_FAST_MEMORY */
1309 
1310   task = KMP_TASKDATA_TO_TASK(taskdata);
1311 
1312 // Make sure task & taskdata are aligned appropriately
1313 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
1314   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1315   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1316 #else
1317   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1318   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1319 #endif
1320   if (sizeof_shareds > 0) {
1321     // Avoid double allocation here by combining shareds with taskdata
1322     task->shareds = &((char *)taskdata)[shareds_offset];
1323     // Make sure shareds struct is aligned to pointer size
1324     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1325                      0);
1326   } else {
1327     task->shareds = NULL;
1328   }
1329   task->routine = task_entry;
1330   task->part_id = 0; // AC: Always start with 0 part id
1331 
1332   taskdata->td_task_id = KMP_GEN_TASK_ID();
1333   taskdata->td_team = thread->th.th_team;
1334   taskdata->td_alloc_thread = thread;
1335   taskdata->td_parent = parent_task;
1336   taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1337   KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1338   taskdata->td_ident = loc_ref;
1339   taskdata->td_taskwait_ident = NULL;
1340   taskdata->td_taskwait_counter = 0;
1341   taskdata->td_taskwait_thread = 0;
1342   KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1343   // avoid copying icvs for proxy tasks
1344   if (flags->proxy == TASK_FULL)
1345     copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1346 
1347   taskdata->td_flags = *flags;
1348   taskdata->td_task_team = thread->th.th_task_team;
1349   taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1350   taskdata->td_flags.tasktype = TASK_EXPLICIT;
1351   // If it is hidden helper task, we need to set the team and task team
1352   // correspondingly.
1353   if (flags->hidden_helper) {
1354     kmp_info_t *shadow_thread = __kmp_threads[KMP_GTID_TO_SHADOW_GTID(gtid)];
1355     taskdata->td_team = shadow_thread->th.th_team;
1356     taskdata->td_task_team = shadow_thread->th.th_task_team;
1357   }
1358 
1359   // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1360   taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1361 
1362   // GEH - TODO: fix this to copy parent task's value of team_serial flag
1363   taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1364 
1365   // GEH - Note we serialize the task if the team is serialized to make sure
1366   // implicit parallel region tasks are not left until program termination to
1367   // execute. Also, it helps locality to execute immediately.
1368 
1369   taskdata->td_flags.task_serial =
1370       (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1371        taskdata->td_flags.tasking_ser || flags->merged_if0);
1372 
1373   taskdata->td_flags.started = 0;
1374   taskdata->td_flags.executing = 0;
1375   taskdata->td_flags.complete = 0;
1376   taskdata->td_flags.freed = 0;
1377 
1378   KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1379   // start at one because counts current task and children
1380   KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1381   taskdata->td_taskgroup =
1382       parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1383   taskdata->td_dephash = NULL;
1384   taskdata->td_depnode = NULL;
1385   if (flags->tiedness == TASK_UNTIED)
1386     taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1387   else
1388     taskdata->td_last_tied = taskdata;
1389   taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1390 #if OMPT_SUPPORT
1391   if (UNLIKELY(ompt_enabled.enabled))
1392     __ompt_task_init(taskdata, gtid);
1393 #endif
1394   // TODO: What would be the balance between the conditions in the function and
1395   // an atomic operation?
1396   if (__kmp_track_children_task(taskdata)) {
1397     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1398     if (parent_task->td_taskgroup)
1399       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1400     // Only need to keep track of allocated child tasks for explicit tasks since
1401     // implicit not deallocated
1402     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1403       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1404     }
1405     if (flags->hidden_helper) {
1406       taskdata->td_flags.task_serial = FALSE;
1407       // Increment the number of hidden helper tasks to be executed
1408       KMP_ATOMIC_INC(&__kmp_unexecuted_hidden_helper_tasks);
1409     }
1410   }
1411 
1412   KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1413                 gtid, taskdata, taskdata->td_parent));
1414 
1415   return task;
1416 }
1417 
1418 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1419                                   kmp_int32 flags, size_t sizeof_kmp_task_t,
1420                                   size_t sizeof_shareds,
1421                                   kmp_routine_entry_t task_entry) {
1422   kmp_task_t *retval;
1423   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1424   __kmp_assert_valid_gtid(gtid);
1425   input_flags->native = FALSE;
1426   // __kmp_task_alloc() sets up all other runtime flags
1427   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) "
1428                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1429                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1430                 input_flags->proxy ? "proxy" : "",
1431                 input_flags->detachable ? "detachable" : "", sizeof_kmp_task_t,
1432                 sizeof_shareds, task_entry));
1433 
1434   retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1435                             sizeof_shareds, task_entry);
1436 
1437   KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1438 
1439   return retval;
1440 }
1441 
1442 kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1443                                          kmp_int32 flags,
1444                                          size_t sizeof_kmp_task_t,
1445                                          size_t sizeof_shareds,
1446                                          kmp_routine_entry_t task_entry,
1447                                          kmp_int64 device_id) {
1448   auto &input_flags = reinterpret_cast<kmp_tasking_flags_t &>(flags);
1449   // target task is untied defined in the specification
1450   input_flags.tiedness = TASK_UNTIED;
1451 
1452   if (__kmp_enable_hidden_helper)
1453     input_flags.hidden_helper = TRUE;
1454 
1455   return __kmpc_omp_task_alloc(loc_ref, gtid, flags, sizeof_kmp_task_t,
1456                                sizeof_shareds, task_entry);
1457 }
1458 
1459 /*!
1460 @ingroup TASKING
1461 @param loc_ref location of the original task directive
1462 @param gtid Global Thread ID of encountering thread
1463 @param new_task task thunk allocated by __kmpc_omp_task_alloc() for the ''new
1464 task''
1465 @param naffins Number of affinity items
1466 @param affin_list List of affinity items
1467 @return Returns non-zero if registering affinity information was not successful.
1468  Returns 0 if registration was successful
1469 This entry registers the affinity information attached to a task with the task
1470 thunk structure kmp_taskdata_t.
1471 */
1472 kmp_int32
1473 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid,
1474                                   kmp_task_t *new_task, kmp_int32 naffins,
1475                                   kmp_task_affinity_info_t *affin_list) {
1476   return 0;
1477 }
1478 
1479 //  __kmp_invoke_task: invoke the specified task
1480 //
1481 // gtid: global thread ID of caller
1482 // task: the task to invoke
1483 // current_task: the task to resume after task invocation
1484 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1485                               kmp_taskdata_t *current_task) {
1486   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1487   kmp_info_t *thread;
1488   int discard = 0 /* false */;
1489   KA_TRACE(
1490       30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1491            gtid, taskdata, current_task));
1492   KMP_DEBUG_ASSERT(task);
1493   if (UNLIKELY(taskdata->td_flags.proxy == TASK_PROXY &&
1494                taskdata->td_flags.complete == 1)) {
1495     // This is a proxy task that was already completed but it needs to run
1496     // its bottom-half finish
1497     KA_TRACE(
1498         30,
1499         ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1500          gtid, taskdata));
1501 
1502     __kmp_bottom_half_finish_proxy(gtid, task);
1503 
1504     KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1505                   "proxy task %p, resuming task %p\n",
1506                   gtid, taskdata, current_task));
1507 
1508     return;
1509   }
1510 
1511 #if OMPT_SUPPORT
1512   // For untied tasks, the first task executed only calls __kmpc_omp_task and
1513   // does not execute code.
1514   ompt_thread_info_t oldInfo;
1515   if (UNLIKELY(ompt_enabled.enabled)) {
1516     // Store the threads states and restore them after the task
1517     thread = __kmp_threads[gtid];
1518     oldInfo = thread->th.ompt_thread_info;
1519     thread->th.ompt_thread_info.wait_id = 0;
1520     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1521                                             ? ompt_state_work_serial
1522                                             : ompt_state_work_parallel;
1523     taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1524   }
1525 #endif
1526 
1527   // Decreament the counter of hidden helper tasks to be executed
1528   if (taskdata->td_flags.hidden_helper) {
1529     // Hidden helper tasks can only be executed by hidden helper threads
1530     KMP_ASSERT(KMP_HIDDEN_HELPER_THREAD(gtid));
1531     KMP_ATOMIC_DEC(&__kmp_unexecuted_hidden_helper_tasks);
1532   }
1533 
1534   // Proxy tasks are not handled by the runtime
1535   if (taskdata->td_flags.proxy != TASK_PROXY) {
1536     __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1537   }
1538 
1539   // TODO: cancel tasks if the parallel region has also been cancelled
1540   // TODO: check if this sequence can be hoisted above __kmp_task_start
1541   // if cancellation has been enabled for this run ...
1542   if (UNLIKELY(__kmp_omp_cancellation)) {
1543     thread = __kmp_threads[gtid];
1544     kmp_team_t *this_team = thread->th.th_team;
1545     kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1546     if ((taskgroup && taskgroup->cancel_request) ||
1547         (this_team->t.t_cancel_request == cancel_parallel)) {
1548 #if OMPT_SUPPORT && OMPT_OPTIONAL
1549       ompt_data_t *task_data;
1550       if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1551         __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1552         ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1553             task_data,
1554             ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1555                                                       : ompt_cancel_parallel) |
1556                 ompt_cancel_discarded_task,
1557             NULL);
1558       }
1559 #endif
1560       KMP_COUNT_BLOCK(TASK_cancelled);
1561       // this task belongs to a task group and we need to cancel it
1562       discard = 1 /* true */;
1563     }
1564   }
1565 
1566   // Invoke the task routine and pass in relevant data.
1567   // Thunks generated by gcc take a different argument list.
1568   if (!discard) {
1569     if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1570       taskdata->td_last_tied = current_task->td_last_tied;
1571       KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1572     }
1573 #if KMP_STATS_ENABLED
1574     KMP_COUNT_BLOCK(TASK_executed);
1575     switch (KMP_GET_THREAD_STATE()) {
1576     case FORK_JOIN_BARRIER:
1577       KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1578       break;
1579     case PLAIN_BARRIER:
1580       KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1581       break;
1582     case TASKYIELD:
1583       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1584       break;
1585     case TASKWAIT:
1586       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1587       break;
1588     case TASKGROUP:
1589       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1590       break;
1591     default:
1592       KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1593       break;
1594     }
1595 #endif // KMP_STATS_ENABLED
1596 
1597 // OMPT task begin
1598 #if OMPT_SUPPORT
1599     if (UNLIKELY(ompt_enabled.enabled))
1600       __ompt_task_start(task, current_task, gtid);
1601 #endif
1602 
1603 #if OMPD_SUPPORT
1604     if (ompd_state & OMPD_ENABLE_BP)
1605       ompd_bp_task_begin();
1606 #endif
1607 
1608 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1609     kmp_uint64 cur_time;
1610     kmp_int32 kmp_itt_count_task =
1611         __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1612         current_task->td_flags.tasktype == TASK_IMPLICIT;
1613     if (kmp_itt_count_task) {
1614       thread = __kmp_threads[gtid];
1615       // Time outer level explicit task on barrier for adjusting imbalance time
1616       if (thread->th.th_bar_arrive_time)
1617         cur_time = __itt_get_timestamp();
1618       else
1619         kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1620     }
1621     KMP_FSYNC_ACQUIRED(taskdata); // acquired self (new task)
1622 #endif
1623 
1624 #ifdef KMP_GOMP_COMPAT
1625     if (taskdata->td_flags.native) {
1626       ((void (*)(void *))(*(task->routine)))(task->shareds);
1627     } else
1628 #endif /* KMP_GOMP_COMPAT */
1629     {
1630       (*(task->routine))(gtid, task);
1631     }
1632     KMP_POP_PARTITIONED_TIMER();
1633 
1634 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1635     if (kmp_itt_count_task) {
1636       // Barrier imbalance - adjust arrive time with the task duration
1637       thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1638     }
1639     KMP_FSYNC_CANCEL(taskdata); // destroy self (just executed)
1640     KMP_FSYNC_RELEASING(taskdata->td_parent); // releasing parent
1641 #endif
1642   }
1643 
1644 #if OMPD_SUPPORT
1645   if (ompd_state & OMPD_ENABLE_BP)
1646     ompd_bp_task_end();
1647 #endif
1648 
1649   // Proxy tasks are not handled by the runtime
1650   if (taskdata->td_flags.proxy != TASK_PROXY) {
1651 #if OMPT_SUPPORT
1652     if (UNLIKELY(ompt_enabled.enabled)) {
1653       thread->th.ompt_thread_info = oldInfo;
1654       if (taskdata->td_flags.tiedness == TASK_TIED) {
1655         taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1656       }
1657       __kmp_task_finish<true>(gtid, task, current_task);
1658     } else
1659 #endif
1660       __kmp_task_finish<false>(gtid, task, current_task);
1661   }
1662 
1663   KA_TRACE(
1664       30,
1665       ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1666        gtid, taskdata, current_task));
1667   return;
1668 }
1669 
1670 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1671 //
1672 // loc_ref: location of original task pragma (ignored)
1673 // gtid: Global Thread ID of encountering thread
1674 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1675 // Returns:
1676 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1677 //    be resumed later.
1678 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1679 //    resumed later.
1680 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1681                                 kmp_task_t *new_task) {
1682   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1683 
1684   KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1685                 loc_ref, new_taskdata));
1686 
1687 #if OMPT_SUPPORT
1688   kmp_taskdata_t *parent;
1689   if (UNLIKELY(ompt_enabled.enabled)) {
1690     parent = new_taskdata->td_parent;
1691     if (ompt_enabled.ompt_callback_task_create) {
1692       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1693           &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame),
1694           &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1695           OMPT_GET_RETURN_ADDRESS(0));
1696     }
1697   }
1698 #endif
1699 
1700   /* Should we execute the new task or queue it? For now, let's just always try
1701      to queue it.  If the queue fills up, then we'll execute it.  */
1702 
1703   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1704   { // Execute this task immediately
1705     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1706     new_taskdata->td_flags.task_serial = 1;
1707     __kmp_invoke_task(gtid, new_task, current_task);
1708   }
1709 
1710   KA_TRACE(
1711       10,
1712       ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1713        "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1714        gtid, loc_ref, new_taskdata));
1715 
1716 #if OMPT_SUPPORT
1717   if (UNLIKELY(ompt_enabled.enabled)) {
1718     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1719   }
1720 #endif
1721   return TASK_CURRENT_NOT_QUEUED;
1722 }
1723 
1724 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
1725 //
1726 // gtid: Global Thread ID of encountering thread
1727 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1728 // serialize_immediate: if TRUE then if the task is executed immediately its
1729 // execution will be serialized
1730 // Returns:
1731 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1732 //    be resumed later.
1733 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1734 //    resumed later.
1735 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1736                          bool serialize_immediate) {
1737   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1738 
1739   /* Should we execute the new task or queue it? For now, let's just always try
1740      to queue it.  If the queue fills up, then we'll execute it.  */
1741   if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1742       __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1743   { // Execute this task immediately
1744     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1745     if (serialize_immediate)
1746       new_taskdata->td_flags.task_serial = 1;
1747     __kmp_invoke_task(gtid, new_task, current_task);
1748   }
1749 
1750   return TASK_CURRENT_NOT_QUEUED;
1751 }
1752 
1753 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1754 // non-thread-switchable task from the parent thread only!
1755 //
1756 // loc_ref: location of original task pragma (ignored)
1757 // gtid: Global Thread ID of encountering thread
1758 // new_task: non-thread-switchable task thunk allocated by
1759 // __kmp_omp_task_alloc()
1760 // Returns:
1761 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1762 //    be resumed later.
1763 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1764 //    resumed later.
1765 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1766                           kmp_task_t *new_task) {
1767   kmp_int32 res;
1768   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1769 
1770 #if KMP_DEBUG || OMPT_SUPPORT
1771   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1772 #endif
1773   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1774                 new_taskdata));
1775   __kmp_assert_valid_gtid(gtid);
1776 
1777 #if OMPT_SUPPORT
1778   kmp_taskdata_t *parent = NULL;
1779   if (UNLIKELY(ompt_enabled.enabled)) {
1780     if (!new_taskdata->td_flags.started) {
1781       OMPT_STORE_RETURN_ADDRESS(gtid);
1782       parent = new_taskdata->td_parent;
1783       if (!parent->ompt_task_info.frame.enter_frame.ptr) {
1784         parent->ompt_task_info.frame.enter_frame.ptr =
1785             OMPT_GET_FRAME_ADDRESS(0);
1786       }
1787       if (ompt_enabled.ompt_callback_task_create) {
1788         ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1789             &(parent->ompt_task_info.task_data),
1790             &(parent->ompt_task_info.frame),
1791             &(new_taskdata->ompt_task_info.task_data),
1792             ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1793             OMPT_LOAD_RETURN_ADDRESS(gtid));
1794       }
1795     } else {
1796       // We are scheduling the continuation of an UNTIED task.
1797       // Scheduling back to the parent task.
1798       __ompt_task_finish(new_task,
1799                          new_taskdata->ompt_task_info.scheduling_parent,
1800                          ompt_task_switch);
1801       new_taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1802     }
1803   }
1804 #endif
1805 
1806   res = __kmp_omp_task(gtid, new_task, true);
1807 
1808   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1809                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1810                 gtid, loc_ref, new_taskdata));
1811 #if OMPT_SUPPORT
1812   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1813     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1814   }
1815 #endif
1816   return res;
1817 }
1818 
1819 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
1820 // a taskloop task with the correct OMPT return address
1821 //
1822 // loc_ref: location of original task pragma (ignored)
1823 // gtid: Global Thread ID of encountering thread
1824 // new_task: non-thread-switchable task thunk allocated by
1825 // __kmp_omp_task_alloc()
1826 // codeptr_ra: return address for OMPT callback
1827 // Returns:
1828 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1829 //    be resumed later.
1830 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1831 //    resumed later.
1832 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
1833                                   kmp_task_t *new_task, void *codeptr_ra) {
1834   kmp_int32 res;
1835   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1836 
1837 #if KMP_DEBUG || OMPT_SUPPORT
1838   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1839 #endif
1840   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1841                 new_taskdata));
1842 
1843 #if OMPT_SUPPORT
1844   kmp_taskdata_t *parent = NULL;
1845   if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
1846     parent = new_taskdata->td_parent;
1847     if (!parent->ompt_task_info.frame.enter_frame.ptr)
1848       parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1849     if (ompt_enabled.ompt_callback_task_create) {
1850       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1851           &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame),
1852           &(new_taskdata->ompt_task_info.task_data),
1853           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1854           codeptr_ra);
1855     }
1856   }
1857 #endif
1858 
1859   res = __kmp_omp_task(gtid, new_task, true);
1860 
1861   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1862                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1863                 gtid, loc_ref, new_taskdata));
1864 #if OMPT_SUPPORT
1865   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1866     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1867   }
1868 #endif
1869   return res;
1870 }
1871 
1872 template <bool ompt>
1873 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1874                                               void *frame_address,
1875                                               void *return_address) {
1876   kmp_taskdata_t *taskdata = nullptr;
1877   kmp_info_t *thread;
1878   int thread_finished = FALSE;
1879   KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
1880 
1881   KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
1882   KMP_DEBUG_ASSERT(gtid >= 0);
1883 
1884   if (__kmp_tasking_mode != tskm_immediate_exec) {
1885     thread = __kmp_threads[gtid];
1886     taskdata = thread->th.th_current_task;
1887 
1888 #if OMPT_SUPPORT && OMPT_OPTIONAL
1889     ompt_data_t *my_task_data;
1890     ompt_data_t *my_parallel_data;
1891 
1892     if (ompt) {
1893       my_task_data = &(taskdata->ompt_task_info.task_data);
1894       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1895 
1896       taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address;
1897 
1898       if (ompt_enabled.ompt_callback_sync_region) {
1899         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1900             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1901             my_task_data, return_address);
1902       }
1903 
1904       if (ompt_enabled.ompt_callback_sync_region_wait) {
1905         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1906             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1907             my_task_data, return_address);
1908       }
1909     }
1910 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1911 
1912 // Debugger: The taskwait is active. Store location and thread encountered the
1913 // taskwait.
1914 #if USE_ITT_BUILD
1915 // Note: These values are used by ITT events as well.
1916 #endif /* USE_ITT_BUILD */
1917     taskdata->td_taskwait_counter += 1;
1918     taskdata->td_taskwait_ident = loc_ref;
1919     taskdata->td_taskwait_thread = gtid + 1;
1920 
1921 #if USE_ITT_BUILD
1922     void *itt_sync_obj = NULL;
1923 #if USE_ITT_NOTIFY
1924     KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
1925 #endif /* USE_ITT_NOTIFY */
1926 #endif /* USE_ITT_BUILD */
1927 
1928     bool must_wait =
1929         !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1930 
1931     must_wait = must_wait || (thread->th.th_task_team != NULL &&
1932                               thread->th.th_task_team->tt.tt_found_proxy_tasks);
1933     // If hidden helper thread is encountered, we must enable wait here.
1934     must_wait =
1935         must_wait ||
1936         (__kmp_enable_hidden_helper && thread->th.th_task_team != NULL &&
1937          thread->th.th_task_team->tt.tt_hidden_helper_task_encountered);
1938 
1939     if (must_wait) {
1940       kmp_flag_32<false, false> flag(
1941           RCAST(std::atomic<kmp_uint32> *,
1942                 &(taskdata->td_incomplete_child_tasks)),
1943           0U);
1944       while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
1945         flag.execute_tasks(thread, gtid, FALSE,
1946                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1947                            __kmp_task_stealing_constraint);
1948       }
1949     }
1950 #if USE_ITT_BUILD
1951     KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
1952     KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with children
1953 #endif /* USE_ITT_BUILD */
1954 
1955     // Debugger:  The taskwait is completed. Location remains, but thread is
1956     // negated.
1957     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1958 
1959 #if OMPT_SUPPORT && OMPT_OPTIONAL
1960     if (ompt) {
1961       if (ompt_enabled.ompt_callback_sync_region_wait) {
1962         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1963             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1964             my_task_data, return_address);
1965       }
1966       if (ompt_enabled.ompt_callback_sync_region) {
1967         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1968             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1969             my_task_data, return_address);
1970       }
1971       taskdata->ompt_task_info.frame.enter_frame = ompt_data_none;
1972     }
1973 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1974 
1975   }
1976 
1977   KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1978                 "returning TASK_CURRENT_NOT_QUEUED\n",
1979                 gtid, taskdata));
1980 
1981   return TASK_CURRENT_NOT_QUEUED;
1982 }
1983 
1984 #if OMPT_SUPPORT && OMPT_OPTIONAL
1985 OMPT_NOINLINE
1986 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1987                                           void *frame_address,
1988                                           void *return_address) {
1989   return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1990                                             return_address);
1991 }
1992 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1993 
1994 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1995 // complete
1996 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1997 #if OMPT_SUPPORT && OMPT_OPTIONAL
1998   if (UNLIKELY(ompt_enabled.enabled)) {
1999     OMPT_STORE_RETURN_ADDRESS(gtid);
2000     return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(0),
2001                                     OMPT_LOAD_RETURN_ADDRESS(gtid));
2002   }
2003 #endif
2004   return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
2005 }
2006 
2007 // __kmpc_omp_taskyield: switch to a different task
2008 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
2009   kmp_taskdata_t *taskdata = NULL;
2010   kmp_info_t *thread;
2011   int thread_finished = FALSE;
2012 
2013   KMP_COUNT_BLOCK(OMP_TASKYIELD);
2014   KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
2015 
2016   KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
2017                 gtid, loc_ref, end_part));
2018   __kmp_assert_valid_gtid(gtid);
2019 
2020   if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
2021     thread = __kmp_threads[gtid];
2022     taskdata = thread->th.th_current_task;
2023 // Should we model this as a task wait or not?
2024 // Debugger: The taskwait is active. Store location and thread encountered the
2025 // taskwait.
2026 #if USE_ITT_BUILD
2027 // Note: These values are used by ITT events as well.
2028 #endif /* USE_ITT_BUILD */
2029     taskdata->td_taskwait_counter += 1;
2030     taskdata->td_taskwait_ident = loc_ref;
2031     taskdata->td_taskwait_thread = gtid + 1;
2032 
2033 #if USE_ITT_BUILD
2034     void *itt_sync_obj = NULL;
2035 #if USE_ITT_NOTIFY
2036     KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2037 #endif /* USE_ITT_NOTIFY */
2038 #endif /* USE_ITT_BUILD */
2039     if (!taskdata->td_flags.team_serial) {
2040       kmp_task_team_t *task_team = thread->th.th_task_team;
2041       if (task_team != NULL) {
2042         if (KMP_TASKING_ENABLED(task_team)) {
2043 #if OMPT_SUPPORT
2044           if (UNLIKELY(ompt_enabled.enabled))
2045             thread->th.ompt_thread_info.ompt_task_yielded = 1;
2046 #endif
2047           __kmp_execute_tasks_32(
2048               thread, gtid, (kmp_flag_32<> *)NULL, FALSE,
2049               &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2050               __kmp_task_stealing_constraint);
2051 #if OMPT_SUPPORT
2052           if (UNLIKELY(ompt_enabled.enabled))
2053             thread->th.ompt_thread_info.ompt_task_yielded = 0;
2054 #endif
2055         }
2056       }
2057     }
2058 #if USE_ITT_BUILD
2059     KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2060 #endif /* USE_ITT_BUILD */
2061 
2062     // Debugger:  The taskwait is completed. Location remains, but thread is
2063     // negated.
2064     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2065   }
2066 
2067   KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
2068                 "returning TASK_CURRENT_NOT_QUEUED\n",
2069                 gtid, taskdata));
2070 
2071   return TASK_CURRENT_NOT_QUEUED;
2072 }
2073 
2074 // Task Reduction implementation
2075 //
2076 // Note: initial implementation didn't take into account the possibility
2077 // to specify omp_orig for initializer of the UDR (user defined reduction).
2078 // Corrected implementation takes into account the omp_orig object.
2079 // Compiler is free to use old implementation if omp_orig is not specified.
2080 
2081 /*!
2082 @ingroup BASIC_TYPES
2083 @{
2084 */
2085 
2086 /*!
2087 Flags for special info per task reduction item.
2088 */
2089 typedef struct kmp_taskred_flags {
2090   /*! 1 - use lazy alloc/init (e.g. big objects, #tasks < #threads) */
2091   unsigned lazy_priv : 1;
2092   unsigned reserved31 : 31;
2093 } kmp_taskred_flags_t;
2094 
2095 /*!
2096 Internal struct for reduction data item related info set up by compiler.
2097 */
2098 typedef struct kmp_task_red_input {
2099   void *reduce_shar; /**< shared between tasks item to reduce into */
2100   size_t reduce_size; /**< size of data item in bytes */
2101   // three compiler-generated routines (init, fini are optional):
2102   void *reduce_init; /**< data initialization routine (single parameter) */
2103   void *reduce_fini; /**< data finalization routine */
2104   void *reduce_comb; /**< data combiner routine */
2105   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2106 } kmp_task_red_input_t;
2107 
2108 /*!
2109 Internal struct for reduction data item related info saved by the library.
2110 */
2111 typedef struct kmp_taskred_data {
2112   void *reduce_shar; /**< shared between tasks item to reduce into */
2113   size_t reduce_size; /**< size of data item */
2114   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2115   void *reduce_priv; /**< array of thread specific items */
2116   void *reduce_pend; /**< end of private data for faster comparison op */
2117   // three compiler-generated routines (init, fini are optional):
2118   void *reduce_comb; /**< data combiner routine */
2119   void *reduce_init; /**< data initialization routine (two parameters) */
2120   void *reduce_fini; /**< data finalization routine */
2121   void *reduce_orig; /**< original item (can be used in UDR initializer) */
2122 } kmp_taskred_data_t;
2123 
2124 /*!
2125 Internal struct for reduction data item related info set up by compiler.
2126 
2127 New interface: added reduce_orig field to provide omp_orig for UDR initializer.
2128 */
2129 typedef struct kmp_taskred_input {
2130   void *reduce_shar; /**< shared between tasks item to reduce into */
2131   void *reduce_orig; /**< original reduction item used for initialization */
2132   size_t reduce_size; /**< size of data item */
2133   // three compiler-generated routines (init, fini are optional):
2134   void *reduce_init; /**< data initialization routine (two parameters) */
2135   void *reduce_fini; /**< data finalization routine */
2136   void *reduce_comb; /**< data combiner routine */
2137   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2138 } kmp_taskred_input_t;
2139 /*!
2140 @}
2141 */
2142 
2143 template <typename T> void __kmp_assign_orig(kmp_taskred_data_t &item, T &src);
2144 template <>
2145 void __kmp_assign_orig<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2146                                              kmp_task_red_input_t &src) {
2147   item.reduce_orig = NULL;
2148 }
2149 template <>
2150 void __kmp_assign_orig<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2151                                             kmp_taskred_input_t &src) {
2152   if (src.reduce_orig != NULL) {
2153     item.reduce_orig = src.reduce_orig;
2154   } else {
2155     item.reduce_orig = src.reduce_shar;
2156   } // non-NULL reduce_orig means new interface used
2157 }
2158 
2159 template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, size_t j);
2160 template <>
2161 void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2162                                            size_t offset) {
2163   ((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset);
2164 }
2165 template <>
2166 void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2167                                           size_t offset) {
2168   ((void (*)(void *, void *))item.reduce_init)(
2169       (char *)(item.reduce_priv) + offset, item.reduce_orig);
2170 }
2171 
2172 template <typename T>
2173 void *__kmp_task_reduction_init(int gtid, int num, T *data) {
2174   __kmp_assert_valid_gtid(gtid);
2175   kmp_info_t *thread = __kmp_threads[gtid];
2176   kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
2177   kmp_uint32 nth = thread->th.th_team_nproc;
2178   kmp_taskred_data_t *arr;
2179 
2180   // check input data just in case
2181   KMP_ASSERT(tg != NULL);
2182   KMP_ASSERT(data != NULL);
2183   KMP_ASSERT(num > 0);
2184   if (nth == 1) {
2185     KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
2186                   gtid, tg));
2187     return (void *)tg;
2188   }
2189   KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
2190                 gtid, tg, num));
2191   arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2192       thread, num * sizeof(kmp_taskred_data_t));
2193   for (int i = 0; i < num; ++i) {
2194     size_t size = data[i].reduce_size - 1;
2195     // round the size up to cache line per thread-specific item
2196     size += CACHE_LINE - size % CACHE_LINE;
2197     KMP_ASSERT(data[i].reduce_comb != NULL); // combiner is mandatory
2198     arr[i].reduce_shar = data[i].reduce_shar;
2199     arr[i].reduce_size = size;
2200     arr[i].flags = data[i].flags;
2201     arr[i].reduce_comb = data[i].reduce_comb;
2202     arr[i].reduce_init = data[i].reduce_init;
2203     arr[i].reduce_fini = data[i].reduce_fini;
2204     __kmp_assign_orig<T>(arr[i], data[i]);
2205     if (!arr[i].flags.lazy_priv) {
2206       // allocate cache-line aligned block and fill it with zeros
2207       arr[i].reduce_priv = __kmp_allocate(nth * size);
2208       arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
2209       if (arr[i].reduce_init != NULL) {
2210         // initialize all thread-specific items
2211         for (size_t j = 0; j < nth; ++j) {
2212           __kmp_call_init<T>(arr[i], j * size);
2213         }
2214       }
2215     } else {
2216       // only allocate space for pointers now,
2217       // objects will be lazily allocated/initialized if/when requested
2218       // note that __kmp_allocate zeroes the allocated memory
2219       arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
2220     }
2221   }
2222   tg->reduce_data = (void *)arr;
2223   tg->reduce_num_data = num;
2224   return (void *)tg;
2225 }
2226 
2227 /*!
2228 @ingroup TASKING
2229 @param gtid      Global thread ID
2230 @param num       Number of data items to reduce
2231 @param data      Array of data for reduction
2232 @return The taskgroup identifier
2233 
2234 Initialize task reduction for the taskgroup.
2235 
2236 Note: this entry supposes the optional compiler-generated initializer routine
2237 has single parameter - pointer to object to be initialized. That means
2238 the reduction either does not use omp_orig object, or the omp_orig is accessible
2239 without help of the runtime library.
2240 */
2241 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
2242   return __kmp_task_reduction_init(gtid, num, (kmp_task_red_input_t *)data);
2243 }
2244 
2245 /*!
2246 @ingroup TASKING
2247 @param gtid      Global thread ID
2248 @param num       Number of data items to reduce
2249 @param data      Array of data for reduction
2250 @return The taskgroup identifier
2251 
2252 Initialize task reduction for the taskgroup.
2253 
2254 Note: this entry supposes the optional compiler-generated initializer routine
2255 has two parameters, pointer to object to be initialized and pointer to omp_orig
2256 */
2257 void *__kmpc_taskred_init(int gtid, int num, void *data) {
2258   return __kmp_task_reduction_init(gtid, num, (kmp_taskred_input_t *)data);
2259 }
2260 
2261 // Copy task reduction data (except for shared pointers).
2262 template <typename T>
2263 void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data,
2264                                     kmp_taskgroup_t *tg, void *reduce_data) {
2265   kmp_taskred_data_t *arr;
2266   KA_TRACE(20, ("__kmp_task_reduction_init_copy: Th %p, init taskgroup %p,"
2267                 " from data %p\n",
2268                 thr, tg, reduce_data));
2269   arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2270       thr, num * sizeof(kmp_taskred_data_t));
2271   // threads will share private copies, thunk routines, sizes, flags, etc.:
2272   KMP_MEMCPY(arr, reduce_data, num * sizeof(kmp_taskred_data_t));
2273   for (int i = 0; i < num; ++i) {
2274     arr[i].reduce_shar = data[i].reduce_shar; // init unique shared pointers
2275   }
2276   tg->reduce_data = (void *)arr;
2277   tg->reduce_num_data = num;
2278 }
2279 
2280 /*!
2281 @ingroup TASKING
2282 @param gtid    Global thread ID
2283 @param tskgrp  The taskgroup ID (optional)
2284 @param data    Shared location of the item
2285 @return The pointer to per-thread data
2286 
2287 Get thread-specific location of data item
2288 */
2289 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
2290   __kmp_assert_valid_gtid(gtid);
2291   kmp_info_t *thread = __kmp_threads[gtid];
2292   kmp_int32 nth = thread->th.th_team_nproc;
2293   if (nth == 1)
2294     return data; // nothing to do
2295 
2296   kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
2297   if (tg == NULL)
2298     tg = thread->th.th_current_task->td_taskgroup;
2299   KMP_ASSERT(tg != NULL);
2300   kmp_taskred_data_t *arr = (kmp_taskred_data_t *)(tg->reduce_data);
2301   kmp_int32 num = tg->reduce_num_data;
2302   kmp_int32 tid = thread->th.th_info.ds.ds_tid;
2303 
2304   KMP_ASSERT(data != NULL);
2305   while (tg != NULL) {
2306     for (int i = 0; i < num; ++i) {
2307       if (!arr[i].flags.lazy_priv) {
2308         if (data == arr[i].reduce_shar ||
2309             (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
2310           return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
2311       } else {
2312         // check shared location first
2313         void **p_priv = (void **)(arr[i].reduce_priv);
2314         if (data == arr[i].reduce_shar)
2315           goto found;
2316         // check if we get some thread specific location as parameter
2317         for (int j = 0; j < nth; ++j)
2318           if (data == p_priv[j])
2319             goto found;
2320         continue; // not found, continue search
2321       found:
2322         if (p_priv[tid] == NULL) {
2323           // allocate thread specific object lazily
2324           p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
2325           if (arr[i].reduce_init != NULL) {
2326             if (arr[i].reduce_orig != NULL) { // new interface
2327               ((void (*)(void *, void *))arr[i].reduce_init)(
2328                   p_priv[tid], arr[i].reduce_orig);
2329             } else { // old interface (single parameter)
2330               ((void (*)(void *))arr[i].reduce_init)(p_priv[tid]);
2331             }
2332           }
2333         }
2334         return p_priv[tid];
2335       }
2336     }
2337     tg = tg->parent;
2338     arr = (kmp_taskred_data_t *)(tg->reduce_data);
2339     num = tg->reduce_num_data;
2340   }
2341   KMP_ASSERT2(0, "Unknown task reduction item");
2342   return NULL; // ERROR, this line never executed
2343 }
2344 
2345 // Finalize task reduction.
2346 // Called from __kmpc_end_taskgroup()
2347 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2348   kmp_int32 nth = th->th.th_team_nproc;
2349   KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
2350   kmp_taskred_data_t *arr = (kmp_taskred_data_t *)tg->reduce_data;
2351   kmp_int32 num = tg->reduce_num_data;
2352   for (int i = 0; i < num; ++i) {
2353     void *sh_data = arr[i].reduce_shar;
2354     void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2355     void (*f_comb)(void *, void *) =
2356         (void (*)(void *, void *))(arr[i].reduce_comb);
2357     if (!arr[i].flags.lazy_priv) {
2358       void *pr_data = arr[i].reduce_priv;
2359       size_t size = arr[i].reduce_size;
2360       for (int j = 0; j < nth; ++j) {
2361         void *priv_data = (char *)pr_data + j * size;
2362         f_comb(sh_data, priv_data); // combine results
2363         if (f_fini)
2364           f_fini(priv_data); // finalize if needed
2365       }
2366     } else {
2367       void **pr_data = (void **)(arr[i].reduce_priv);
2368       for (int j = 0; j < nth; ++j) {
2369         if (pr_data[j] != NULL) {
2370           f_comb(sh_data, pr_data[j]); // combine results
2371           if (f_fini)
2372             f_fini(pr_data[j]); // finalize if needed
2373           __kmp_free(pr_data[j]);
2374         }
2375       }
2376     }
2377     __kmp_free(arr[i].reduce_priv);
2378   }
2379   __kmp_thread_free(th, arr);
2380   tg->reduce_data = NULL;
2381   tg->reduce_num_data = 0;
2382 }
2383 
2384 // Cleanup task reduction data for parallel or worksharing,
2385 // do not touch task private data other threads still working with.
2386 // Called from __kmpc_end_taskgroup()
2387 static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) {
2388   __kmp_thread_free(th, tg->reduce_data);
2389   tg->reduce_data = NULL;
2390   tg->reduce_num_data = 0;
2391 }
2392 
2393 template <typename T>
2394 void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2395                                          int num, T *data) {
2396   __kmp_assert_valid_gtid(gtid);
2397   kmp_info_t *thr = __kmp_threads[gtid];
2398   kmp_int32 nth = thr->th.th_team_nproc;
2399   __kmpc_taskgroup(loc, gtid); // form new taskgroup first
2400   if (nth == 1) {
2401     KA_TRACE(10,
2402              ("__kmpc_reduction_modifier_init: T#%d, tg %p, exiting nth=1\n",
2403               gtid, thr->th.th_current_task->td_taskgroup));
2404     return (void *)thr->th.th_current_task->td_taskgroup;
2405   }
2406   kmp_team_t *team = thr->th.th_team;
2407   void *reduce_data;
2408   kmp_taskgroup_t *tg;
2409   reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]);
2410   if (reduce_data == NULL &&
2411       __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data,
2412                                  (void *)1)) {
2413     // single thread enters this block to initialize common reduction data
2414     KMP_DEBUG_ASSERT(reduce_data == NULL);
2415     // first initialize own data, then make a copy other threads can use
2416     tg = (kmp_taskgroup_t *)__kmp_task_reduction_init<T>(gtid, num, data);
2417     reduce_data = __kmp_thread_malloc(thr, num * sizeof(kmp_taskred_data_t));
2418     KMP_MEMCPY(reduce_data, tg->reduce_data, num * sizeof(kmp_taskred_data_t));
2419     // fini counters should be 0 at this point
2420     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0);
2421     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0);
2422     KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data);
2423   } else {
2424     while (
2425         (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) ==
2426         (void *)1) { // wait for task reduction initialization
2427       KMP_CPU_PAUSE();
2428     }
2429     KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here
2430     tg = thr->th.th_current_task->td_taskgroup;
2431     __kmp_task_reduction_init_copy<T>(thr, num, data, tg, reduce_data);
2432   }
2433   return tg;
2434 }
2435 
2436 /*!
2437 @ingroup TASKING
2438 @param loc       Source location info
2439 @param gtid      Global thread ID
2440 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2441 @param num       Number of data items to reduce
2442 @param data      Array of data for reduction
2443 @return The taskgroup identifier
2444 
2445 Initialize task reduction for a parallel or worksharing.
2446 
2447 Note: this entry supposes the optional compiler-generated initializer routine
2448 has single parameter - pointer to object to be initialized. That means
2449 the reduction either does not use omp_orig object, or the omp_orig is accessible
2450 without help of the runtime library.
2451 */
2452 void *__kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2453                                           int num, void *data) {
2454   return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2455                                             (kmp_task_red_input_t *)data);
2456 }
2457 
2458 /*!
2459 @ingroup TASKING
2460 @param loc       Source location info
2461 @param gtid      Global thread ID
2462 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2463 @param num       Number of data items to reduce
2464 @param data      Array of data for reduction
2465 @return The taskgroup identifier
2466 
2467 Initialize task reduction for a parallel or worksharing.
2468 
2469 Note: this entry supposes the optional compiler-generated initializer routine
2470 has two parameters, pointer to object to be initialized and pointer to omp_orig
2471 */
2472 void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num,
2473                                    void *data) {
2474   return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2475                                             (kmp_taskred_input_t *)data);
2476 }
2477 
2478 /*!
2479 @ingroup TASKING
2480 @param loc       Source location info
2481 @param gtid      Global thread ID
2482 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2483 
2484 Finalize task reduction for a parallel or worksharing.
2485 */
2486 void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) {
2487   __kmpc_end_taskgroup(loc, gtid);
2488 }
2489 
2490 // __kmpc_taskgroup: Start a new taskgroup
2491 void __kmpc_taskgroup(ident_t *loc, int gtid) {
2492   __kmp_assert_valid_gtid(gtid);
2493   kmp_info_t *thread = __kmp_threads[gtid];
2494   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2495   kmp_taskgroup_t *tg_new =
2496       (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2497   KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2498   KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2499   KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2500   tg_new->parent = taskdata->td_taskgroup;
2501   tg_new->reduce_data = NULL;
2502   tg_new->reduce_num_data = 0;
2503   tg_new->gomp_data = NULL;
2504   taskdata->td_taskgroup = tg_new;
2505 
2506 #if OMPT_SUPPORT && OMPT_OPTIONAL
2507   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2508     void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2509     if (!codeptr)
2510       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2511     kmp_team_t *team = thread->th.th_team;
2512     ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2513     // FIXME: I think this is wrong for lwt!
2514     ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2515 
2516     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2517         ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2518         &(my_task_data), codeptr);
2519   }
2520 #endif
2521 }
2522 
2523 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2524 //                       and its descendants are complete
2525 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2526   __kmp_assert_valid_gtid(gtid);
2527   kmp_info_t *thread = __kmp_threads[gtid];
2528   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2529   kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2530   int thread_finished = FALSE;
2531 
2532 #if OMPT_SUPPORT && OMPT_OPTIONAL
2533   kmp_team_t *team;
2534   ompt_data_t my_task_data;
2535   ompt_data_t my_parallel_data;
2536   void *codeptr = nullptr;
2537   if (UNLIKELY(ompt_enabled.enabled)) {
2538     team = thread->th.th_team;
2539     my_task_data = taskdata->ompt_task_info.task_data;
2540     // FIXME: I think this is wrong for lwt!
2541     my_parallel_data = team->t.ompt_team_info.parallel_data;
2542     codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2543     if (!codeptr)
2544       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2545   }
2546 #endif
2547 
2548   KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2549   KMP_DEBUG_ASSERT(taskgroup != NULL);
2550   KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2551 
2552   if (__kmp_tasking_mode != tskm_immediate_exec) {
2553     // mark task as waiting not on a barrier
2554     taskdata->td_taskwait_counter += 1;
2555     taskdata->td_taskwait_ident = loc;
2556     taskdata->td_taskwait_thread = gtid + 1;
2557 #if USE_ITT_BUILD
2558     // For ITT the taskgroup wait is similar to taskwait until we need to
2559     // distinguish them
2560     void *itt_sync_obj = NULL;
2561 #if USE_ITT_NOTIFY
2562     KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2563 #endif /* USE_ITT_NOTIFY */
2564 #endif /* USE_ITT_BUILD */
2565 
2566 #if OMPT_SUPPORT && OMPT_OPTIONAL
2567     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2568       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2569           ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2570           &(my_task_data), codeptr);
2571     }
2572 #endif
2573 
2574     if (!taskdata->td_flags.team_serial ||
2575         (thread->th.th_task_team != NULL &&
2576          (thread->th.th_task_team->tt.tt_found_proxy_tasks ||
2577           thread->th.th_task_team->tt.tt_hidden_helper_task_encountered))) {
2578       kmp_flag_32<false, false> flag(
2579           RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)), 0U);
2580       while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2581         flag.execute_tasks(thread, gtid, FALSE,
2582                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2583                            __kmp_task_stealing_constraint);
2584       }
2585     }
2586     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2587 
2588 #if OMPT_SUPPORT && OMPT_OPTIONAL
2589     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2590       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2591           ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2592           &(my_task_data), codeptr);
2593     }
2594 #endif
2595 
2596 #if USE_ITT_BUILD
2597     KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2598     KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with descendants
2599 #endif /* USE_ITT_BUILD */
2600   }
2601   KMP_DEBUG_ASSERT(taskgroup->count == 0);
2602 
2603   if (taskgroup->reduce_data != NULL &&
2604       !taskgroup->gomp_data) { // need to reduce?
2605     int cnt;
2606     void *reduce_data;
2607     kmp_team_t *t = thread->th.th_team;
2608     kmp_taskred_data_t *arr = (kmp_taskred_data_t *)taskgroup->reduce_data;
2609     // check if <priv> data of the first reduction variable shared for the team
2610     void *priv0 = arr[0].reduce_priv;
2611     if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL &&
2612         ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2613       // finishing task reduction on parallel
2614       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]);
2615       if (cnt == thread->th.th_team_nproc - 1) {
2616         // we are the last thread passing __kmpc_reduction_modifier_fini()
2617         // finalize task reduction:
2618         __kmp_task_reduction_fini(thread, taskgroup);
2619         // cleanup fields in the team structure:
2620         // TODO: is relaxed store enough here (whole barrier should follow)?
2621         __kmp_thread_free(thread, reduce_data);
2622         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL);
2623         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0);
2624       } else {
2625         // we are not the last thread passing __kmpc_reduction_modifier_fini(),
2626         // so do not finalize reduction, just clean own copy of the data
2627         __kmp_task_reduction_clean(thread, taskgroup);
2628       }
2629     } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) !=
2630                    NULL &&
2631                ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2632       // finishing task reduction on worksharing
2633       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]);
2634       if (cnt == thread->th.th_team_nproc - 1) {
2635         // we are the last thread passing __kmpc_reduction_modifier_fini()
2636         __kmp_task_reduction_fini(thread, taskgroup);
2637         // cleanup fields in team structure:
2638         // TODO: is relaxed store enough here (whole barrier should follow)?
2639         __kmp_thread_free(thread, reduce_data);
2640         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL);
2641         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0);
2642       } else {
2643         // we are not the last thread passing __kmpc_reduction_modifier_fini(),
2644         // so do not finalize reduction, just clean own copy of the data
2645         __kmp_task_reduction_clean(thread, taskgroup);
2646       }
2647     } else {
2648       // finishing task reduction on taskgroup
2649       __kmp_task_reduction_fini(thread, taskgroup);
2650     }
2651   }
2652   // Restore parent taskgroup for the current task
2653   taskdata->td_taskgroup = taskgroup->parent;
2654   __kmp_thread_free(thread, taskgroup);
2655 
2656   KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2657                 gtid, taskdata));
2658 
2659 #if OMPT_SUPPORT && OMPT_OPTIONAL
2660   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2661     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2662         ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2663         &(my_task_data), codeptr);
2664   }
2665 #endif
2666 }
2667 
2668 // __kmp_remove_my_task: remove a task from my own deque
2669 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2670                                         kmp_task_team_t *task_team,
2671                                         kmp_int32 is_constrained) {
2672   kmp_task_t *task;
2673   kmp_taskdata_t *taskdata;
2674   kmp_thread_data_t *thread_data;
2675   kmp_uint32 tail;
2676 
2677   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2678   KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2679                    NULL); // Caller should check this condition
2680 
2681   thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2682 
2683   KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2684                 gtid, thread_data->td.td_deque_ntasks,
2685                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2686 
2687   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2688     KA_TRACE(10,
2689              ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2690               "ntasks=%d head=%u tail=%u\n",
2691               gtid, thread_data->td.td_deque_ntasks,
2692               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2693     return NULL;
2694   }
2695 
2696   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2697 
2698   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2699     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2700     KA_TRACE(10,
2701              ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2702               "ntasks=%d head=%u tail=%u\n",
2703               gtid, thread_data->td.td_deque_ntasks,
2704               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2705     return NULL;
2706   }
2707 
2708   tail = (thread_data->td.td_deque_tail - 1) &
2709          TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2710   taskdata = thread_data->td.td_deque[tail];
2711 
2712   if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata,
2713                              thread->th.th_current_task)) {
2714     // The TSC does not allow to steal victim task
2715     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2716     KA_TRACE(10,
2717              ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: "
2718               "ntasks=%d head=%u tail=%u\n",
2719               gtid, thread_data->td.td_deque_ntasks,
2720               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2721     return NULL;
2722   }
2723 
2724   thread_data->td.td_deque_tail = tail;
2725   TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2726 
2727   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2728 
2729   KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: "
2730                 "ntasks=%d head=%u tail=%u\n",
2731                 gtid, taskdata, thread_data->td.td_deque_ntasks,
2732                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2733 
2734   task = KMP_TASKDATA_TO_TASK(taskdata);
2735   return task;
2736 }
2737 
2738 // __kmp_steal_task: remove a task from another thread's deque
2739 // Assume that calling thread has already checked existence of
2740 // task_team thread_data before calling this routine.
2741 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2742                                     kmp_task_team_t *task_team,
2743                                     std::atomic<kmp_int32> *unfinished_threads,
2744                                     int *thread_finished,
2745                                     kmp_int32 is_constrained) {
2746   kmp_task_t *task;
2747   kmp_taskdata_t *taskdata;
2748   kmp_taskdata_t *current;
2749   kmp_thread_data_t *victim_td, *threads_data;
2750   kmp_int32 target;
2751   kmp_int32 victim_tid;
2752 
2753   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2754 
2755   threads_data = task_team->tt.tt_threads_data;
2756   KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
2757 
2758   victim_tid = victim_thr->th.th_info.ds.ds_tid;
2759   victim_td = &threads_data[victim_tid];
2760 
2761   KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
2762                 "task_team=%p ntasks=%d head=%u tail=%u\n",
2763                 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2764                 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2765                 victim_td->td.td_deque_tail));
2766 
2767   if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
2768     KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
2769                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2770                   gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2771                   victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2772                   victim_td->td.td_deque_tail));
2773     return NULL;
2774   }
2775 
2776   __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2777 
2778   int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
2779   // Check again after we acquire the lock
2780   if (ntasks == 0) {
2781     __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2782     KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
2783                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2784                   gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2785                   victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2786     return NULL;
2787   }
2788 
2789   KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2790   current = __kmp_threads[gtid]->th.th_current_task;
2791   taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2792   if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2793     // Bump head pointer and Wrap.
2794     victim_td->td.td_deque_head =
2795         (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2796   } else {
2797     if (!task_team->tt.tt_untied_task_encountered) {
2798       // The TSC does not allow to steal victim task
2799       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2800       KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from "
2801                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2802                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2803                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2804       return NULL;
2805     }
2806     int i;
2807     // walk through victim's deque trying to steal any task
2808     target = victim_td->td.td_deque_head;
2809     taskdata = NULL;
2810     for (i = 1; i < ntasks; ++i) {
2811       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2812       taskdata = victim_td->td.td_deque[target];
2813       if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2814         break; // found victim task
2815       } else {
2816         taskdata = NULL;
2817       }
2818     }
2819     if (taskdata == NULL) {
2820       // No appropriate candidate to steal found
2821       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2822       KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2823                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2824                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2825                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2826       return NULL;
2827     }
2828     int prev = target;
2829     for (i = i + 1; i < ntasks; ++i) {
2830       // shift remaining tasks in the deque left by 1
2831       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2832       victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2833       prev = target;
2834     }
2835     KMP_DEBUG_ASSERT(
2836         victim_td->td.td_deque_tail ==
2837         (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2838     victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2839   }
2840   if (*thread_finished) {
2841     // We need to un-mark this victim as a finished victim.  This must be done
2842     // before releasing the lock, or else other threads (starting with the
2843     // primary thread victim) might be prematurely released from the barrier!!!
2844 #if KMP_DEBUG
2845     kmp_int32 count =
2846 #endif
2847         KMP_ATOMIC_INC(unfinished_threads);
2848     KA_TRACE(
2849         20,
2850         ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2851          gtid, count + 1, task_team));
2852     *thread_finished = FALSE;
2853   }
2854   TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
2855 
2856   __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2857 
2858   KMP_COUNT_BLOCK(TASK_stolen);
2859   KA_TRACE(10,
2860            ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2861             "task_team=%p ntasks=%d head=%u tail=%u\n",
2862             gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2863             ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2864 
2865   task = KMP_TASKDATA_TO_TASK(taskdata);
2866   return task;
2867 }
2868 
2869 // __kmp_execute_tasks_template: Choose and execute tasks until either the
2870 // condition is statisfied (return true) or there are none left (return false).
2871 //
2872 // final_spin is TRUE if this is the spin at the release barrier.
2873 // thread_finished indicates whether the thread is finished executing all
2874 // the tasks it has on its deque, and is at the release barrier.
2875 // spinner is the location on which to spin.
2876 // spinner == NULL means only execute a single task and return.
2877 // checker is the value to check to terminate the spin.
2878 template <class C>
2879 static inline int __kmp_execute_tasks_template(
2880     kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2881     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2882     kmp_int32 is_constrained) {
2883   kmp_task_team_t *task_team = thread->th.th_task_team;
2884   kmp_thread_data_t *threads_data;
2885   kmp_task_t *task;
2886   kmp_info_t *other_thread;
2887   kmp_taskdata_t *current_task = thread->th.th_current_task;
2888   std::atomic<kmp_int32> *unfinished_threads;
2889   kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2890                       tid = thread->th.th_info.ds.ds_tid;
2891 
2892   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2893   KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
2894 
2895   if (task_team == NULL || current_task == NULL)
2896     return FALSE;
2897 
2898   KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2899                 "*thread_finished=%d\n",
2900                 gtid, final_spin, *thread_finished));
2901 
2902   thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2903   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2904 
2905   KMP_DEBUG_ASSERT(threads_data != NULL);
2906 
2907   nthreads = task_team->tt.tt_nproc;
2908   unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2909   KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks ||
2910                    task_team->tt.tt_hidden_helper_task_encountered);
2911   KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
2912 
2913   while (1) { // Outer loop keeps trying to find tasks in case of single thread
2914     // getting tasks from target constructs
2915     while (1) { // Inner loop to find a task and execute it
2916       task = NULL;
2917       if (use_own_tasks) { // check on own queue first
2918         task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2919       }
2920       if ((task == NULL) && (nthreads > 1)) { // Steal a task
2921         int asleep = 1;
2922         use_own_tasks = 0;
2923         // Try to steal from the last place I stole from successfully.
2924         if (victim_tid == -2) { // haven't stolen anything yet
2925           victim_tid = threads_data[tid].td.td_deque_last_stolen;
2926           if (victim_tid !=
2927               -1) // if we have a last stolen from victim, get the thread
2928             other_thread = threads_data[victim_tid].td.td_thr;
2929         }
2930         if (victim_tid != -1) { // found last victim
2931           asleep = 0;
2932         } else if (!new_victim) { // no recent steals and we haven't already
2933           // used a new victim; select a random thread
2934           do { // Find a different thread to steal work from.
2935             // Pick a random thread. Initial plan was to cycle through all the
2936             // threads, and only return if we tried to steal from every thread,
2937             // and failed.  Arch says that's not such a great idea.
2938             victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2939             if (victim_tid >= tid) {
2940               ++victim_tid; // Adjusts random distribution to exclude self
2941             }
2942             // Found a potential victim
2943             other_thread = threads_data[victim_tid].td.td_thr;
2944             // There is a slight chance that __kmp_enable_tasking() did not wake
2945             // up all threads waiting at the barrier.  If victim is sleeping,
2946             // then wake it up. Since we were going to pay the cache miss
2947             // penalty for referencing another thread's kmp_info_t struct
2948             // anyway,
2949             // the check shouldn't cost too much performance at this point. In
2950             // extra barrier mode, tasks do not sleep at the separate tasking
2951             // barrier, so this isn't a problem.
2952             asleep = 0;
2953             if ((__kmp_tasking_mode == tskm_task_teams) &&
2954                 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
2955                 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2956                  NULL)) {
2957               asleep = 1;
2958               __kmp_null_resume_wrapper(other_thread);
2959               // A sleeping thread should not have any tasks on it's queue.
2960               // There is a slight possibility that it resumes, steals a task
2961               // from another thread, which spawns more tasks, all in the time
2962               // that it takes this thread to check => don't write an assertion
2963               // that the victim's queue is empty.  Try stealing from a
2964               // different thread.
2965             }
2966           } while (asleep);
2967         }
2968 
2969         if (!asleep) {
2970           // We have a victim to try to steal from
2971           task = __kmp_steal_task(other_thread, gtid, task_team,
2972                                   unfinished_threads, thread_finished,
2973                                   is_constrained);
2974         }
2975         if (task != NULL) { // set last stolen to victim
2976           if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2977             threads_data[tid].td.td_deque_last_stolen = victim_tid;
2978             // The pre-refactored code did not try more than 1 successful new
2979             // vicitm, unless the last one generated more local tasks;
2980             // new_victim keeps track of this
2981             new_victim = 1;
2982           }
2983         } else { // No tasks found; unset last_stolen
2984           KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
2985           victim_tid = -2; // no successful victim found
2986         }
2987       }
2988 
2989       if (task == NULL)
2990         break; // break out of tasking loop
2991 
2992 // Found a task; execute it
2993 #if USE_ITT_BUILD && USE_ITT_NOTIFY
2994       if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2995         if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2996           // get the object reliably
2997           itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2998         }
2999         __kmp_itt_task_starting(itt_sync_obj);
3000       }
3001 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
3002       __kmp_invoke_task(gtid, task, current_task);
3003 #if USE_ITT_BUILD
3004       if (itt_sync_obj != NULL)
3005         __kmp_itt_task_finished(itt_sync_obj);
3006 #endif /* USE_ITT_BUILD */
3007       // If this thread is only partway through the barrier and the condition is
3008       // met, then return now, so that the barrier gather/release pattern can
3009       // proceed. If this thread is in the last spin loop in the barrier,
3010       // waiting to be released, we know that the termination condition will not
3011       // be satisfied, so don't waste any cycles checking it.
3012       if (flag == NULL || (!final_spin && flag->done_check())) {
3013         KA_TRACE(
3014             15,
3015             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3016              gtid));
3017         return TRUE;
3018       }
3019       if (thread->th.th_task_team == NULL) {
3020         break;
3021       }
3022       KMP_YIELD(__kmp_library == library_throughput); // Yield before next task
3023       // If execution of a stolen task results in more tasks being placed on our
3024       // run queue, reset use_own_tasks
3025       if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
3026         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
3027                       "other tasks, restart\n",
3028                       gtid));
3029         use_own_tasks = 1;
3030         new_victim = 0;
3031       }
3032     }
3033 
3034     // The task source has been exhausted. If in final spin loop of barrier,
3035     // check if termination condition is satisfied. The work queue may be empty
3036     // but there might be proxy tasks still executing.
3037     if (final_spin &&
3038         KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0) {
3039       // First, decrement the #unfinished threads, if that has not already been
3040       // done.  This decrement might be to the spin location, and result in the
3041       // termination condition being satisfied.
3042       if (!*thread_finished) {
3043 #if KMP_DEBUG
3044         kmp_int32 count = -1 +
3045 #endif
3046             KMP_ATOMIC_DEC(unfinished_threads);
3047         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
3048                       "unfinished_threads to %d task_team=%p\n",
3049                       gtid, count, task_team));
3050         *thread_finished = TRUE;
3051       }
3052 
3053       // It is now unsafe to reference thread->th.th_team !!!
3054       // Decrementing task_team->tt.tt_unfinished_threads can allow the primary
3055       // thread to pass through the barrier, where it might reset each thread's
3056       // th.th_team field for the next parallel region. If we can steal more
3057       // work, we know that this has not happened yet.
3058       if (flag != NULL && flag->done_check()) {
3059         KA_TRACE(
3060             15,
3061             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3062              gtid));
3063         return TRUE;
3064       }
3065     }
3066 
3067     // If this thread's task team is NULL, primary thread has recognized that
3068     // there are no more tasks; bail out
3069     if (thread->th.th_task_team == NULL) {
3070       KA_TRACE(15,
3071                ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
3072       return FALSE;
3073     }
3074 
3075     // We could be getting tasks from target constructs; if this is the only
3076     // thread, keep trying to execute tasks from own queue
3077     if (nthreads == 1 &&
3078         KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks))
3079       use_own_tasks = 1;
3080     else {
3081       KA_TRACE(15,
3082                ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
3083       return FALSE;
3084     }
3085   }
3086 }
3087 
3088 template <bool C, bool S>
3089 int __kmp_execute_tasks_32(
3090     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32<C, S> *flag, int final_spin,
3091     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3092     kmp_int32 is_constrained) {
3093   return __kmp_execute_tasks_template(
3094       thread, gtid, flag, final_spin,
3095       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3096 }
3097 
3098 template <bool C, bool S>
3099 int __kmp_execute_tasks_64(
3100     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64<C, S> *flag, int final_spin,
3101     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3102     kmp_int32 is_constrained) {
3103   return __kmp_execute_tasks_template(
3104       thread, gtid, flag, final_spin,
3105       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3106 }
3107 
3108 template <bool C, bool S>
3109 int __kmp_atomic_execute_tasks_64(
3110     kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64<C, S> *flag,
3111     int final_spin, int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3112     kmp_int32 is_constrained) {
3113   return __kmp_execute_tasks_template(
3114       thread, gtid, flag, final_spin,
3115       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3116 }
3117 
3118 int __kmp_execute_tasks_oncore(
3119     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
3120     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3121     kmp_int32 is_constrained) {
3122   return __kmp_execute_tasks_template(
3123       thread, gtid, flag, final_spin,
3124       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3125 }
3126 
3127 template int
3128 __kmp_execute_tasks_32<false, false>(kmp_info_t *, kmp_int32,
3129                                      kmp_flag_32<false, false> *, int,
3130                                      int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3131 
3132 template int __kmp_execute_tasks_64<false, true>(kmp_info_t *, kmp_int32,
3133                                                  kmp_flag_64<false, true> *,
3134                                                  int,
3135                                                  int *USE_ITT_BUILD_ARG(void *),
3136                                                  kmp_int32);
3137 
3138 template int __kmp_execute_tasks_64<true, false>(kmp_info_t *, kmp_int32,
3139                                                  kmp_flag_64<true, false> *,
3140                                                  int,
3141                                                  int *USE_ITT_BUILD_ARG(void *),
3142                                                  kmp_int32);
3143 
3144 template int __kmp_atomic_execute_tasks_64<false, true>(
3145     kmp_info_t *, kmp_int32, kmp_atomic_flag_64<false, true> *, int,
3146     int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3147 
3148 template int __kmp_atomic_execute_tasks_64<true, false>(
3149     kmp_info_t *, kmp_int32, kmp_atomic_flag_64<true, false> *, int,
3150     int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3151 
3152 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
3153 // next barrier so they can assist in executing enqueued tasks.
3154 // First thread in allocates the task team atomically.
3155 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
3156                                  kmp_info_t *this_thr) {
3157   kmp_thread_data_t *threads_data;
3158   int nthreads, i, is_init_thread;
3159 
3160   KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
3161                 __kmp_gtid_from_thread(this_thr)));
3162 
3163   KMP_DEBUG_ASSERT(task_team != NULL);
3164   KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
3165 
3166   nthreads = task_team->tt.tt_nproc;
3167   KMP_DEBUG_ASSERT(nthreads > 0);
3168   KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
3169 
3170   // Allocate or increase the size of threads_data if necessary
3171   is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
3172 
3173   if (!is_init_thread) {
3174     // Some other thread already set up the array.
3175     KA_TRACE(
3176         20,
3177         ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
3178          __kmp_gtid_from_thread(this_thr)));
3179     return;
3180   }
3181   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3182   KMP_DEBUG_ASSERT(threads_data != NULL);
3183 
3184   if (__kmp_tasking_mode == tskm_task_teams &&
3185       (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
3186     // Release any threads sleeping at the barrier, so that they can steal
3187     // tasks and execute them.  In extra barrier mode, tasks do not sleep
3188     // at the separate tasking barrier, so this isn't a problem.
3189     for (i = 0; i < nthreads; i++) {
3190       void *sleep_loc;
3191       kmp_info_t *thread = threads_data[i].td.td_thr;
3192 
3193       if (i == this_thr->th.th_info.ds.ds_tid) {
3194         continue;
3195       }
3196       // Since we haven't locked the thread's suspend mutex lock at this
3197       // point, there is a small window where a thread might be putting
3198       // itself to sleep, but hasn't set the th_sleep_loc field yet.
3199       // To work around this, __kmp_execute_tasks_template() periodically checks
3200       // see if other threads are sleeping (using the same random mechanism that
3201       // is used for task stealing) and awakens them if they are.
3202       if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3203           NULL) {
3204         KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
3205                       __kmp_gtid_from_thread(this_thr),
3206                       __kmp_gtid_from_thread(thread)));
3207         __kmp_null_resume_wrapper(thread);
3208       } else {
3209         KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
3210                       __kmp_gtid_from_thread(this_thr),
3211                       __kmp_gtid_from_thread(thread)));
3212       }
3213     }
3214   }
3215 
3216   KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
3217                 __kmp_gtid_from_thread(this_thr)));
3218 }
3219 
3220 /* // TODO: Check the comment consistency
3221  * Utility routines for "task teams".  A task team (kmp_task_t) is kind of
3222  * like a shadow of the kmp_team_t data struct, with a different lifetime.
3223  * After a child * thread checks into a barrier and calls __kmp_release() from
3224  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
3225  * longer assume that the kmp_team_t structure is intact (at any moment, the
3226  * primary thread may exit the barrier code and free the team data structure,
3227  * and return the threads to the thread pool).
3228  *
3229  * This does not work with the tasking code, as the thread is still
3230  * expected to participate in the execution of any tasks that may have been
3231  * spawned my a member of the team, and the thread still needs access to all
3232  * to each thread in the team, so that it can steal work from it.
3233  *
3234  * Enter the existence of the kmp_task_team_t struct.  It employs a reference
3235  * counting mechanism, and is allocated by the primary thread before calling
3236  * __kmp_<barrier_kind>_release, and then is release by the last thread to
3237  * exit __kmp_<barrier_kind>_release at the next barrier.  I.e. the lifetimes
3238  * of the kmp_task_team_t structs for consecutive barriers can overlap
3239  * (and will, unless the primary thread is the last thread to exit the barrier
3240  * release phase, which is not typical). The existence of such a struct is
3241  * useful outside the context of tasking.
3242  *
3243  * We currently use the existence of the threads array as an indicator that
3244  * tasks were spawned since the last barrier.  If the structure is to be
3245  * useful outside the context of tasking, then this will have to change, but
3246  * not setting the field minimizes the performance impact of tasking on
3247  * barriers, when no explicit tasks were spawned (pushed, actually).
3248  */
3249 
3250 static kmp_task_team_t *__kmp_free_task_teams =
3251     NULL; // Free list for task_team data structures
3252 // Lock for task team data structures
3253 kmp_bootstrap_lock_t __kmp_task_team_lock =
3254     KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
3255 
3256 // __kmp_alloc_task_deque:
3257 // Allocates a task deque for a particular thread, and initialize the necessary
3258 // data structures relating to the deque.  This only happens once per thread
3259 // per task team since task teams are recycled. No lock is needed during
3260 // allocation since each thread allocates its own deque.
3261 static void __kmp_alloc_task_deque(kmp_info_t *thread,
3262                                    kmp_thread_data_t *thread_data) {
3263   __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
3264   KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3265 
3266   // Initialize last stolen task field to "none"
3267   thread_data->td.td_deque_last_stolen = -1;
3268 
3269   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3270   KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3271   KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3272 
3273   KE_TRACE(
3274       10,
3275       ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3276        __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3277   // Allocate space for task deque, and zero the deque
3278   // Cannot use __kmp_thread_calloc() because threads not around for
3279   // kmp_reap_task_team( ).
3280   thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3281       INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
3282   thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3283 }
3284 
3285 // __kmp_free_task_deque:
3286 // Deallocates a task deque for a particular thread. Happens at library
3287 // deallocation so don't need to reset all thread data fields.
3288 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3289   if (thread_data->td.td_deque != NULL) {
3290     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3291     TCW_4(thread_data->td.td_deque_ntasks, 0);
3292     __kmp_free(thread_data->td.td_deque);
3293     thread_data->td.td_deque = NULL;
3294     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3295   }
3296 
3297 #ifdef BUILD_TIED_TASK_STACK
3298   // GEH: Figure out what to do here for td_susp_tied_tasks
3299   if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3300     __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3301   }
3302 #endif // BUILD_TIED_TASK_STACK
3303 }
3304 
3305 // __kmp_realloc_task_threads_data:
3306 // Allocates a threads_data array for a task team, either by allocating an
3307 // initial array or enlarging an existing array.  Only the first thread to get
3308 // the lock allocs or enlarges the array and re-initializes the array elements.
3309 // That thread returns "TRUE", the rest return "FALSE".
3310 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
3311 // The current size is given by task_team -> tt.tt_max_threads.
3312 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3313                                            kmp_task_team_t *task_team) {
3314   kmp_thread_data_t **threads_data_p;
3315   kmp_int32 nthreads, maxthreads;
3316   int is_init_thread = FALSE;
3317 
3318   if (TCR_4(task_team->tt.tt_found_tasks)) {
3319     // Already reallocated and initialized.
3320     return FALSE;
3321   }
3322 
3323   threads_data_p = &task_team->tt.tt_threads_data;
3324   nthreads = task_team->tt.tt_nproc;
3325   maxthreads = task_team->tt.tt_max_threads;
3326 
3327   // All threads must lock when they encounter the first task of the implicit
3328   // task region to make sure threads_data fields are (re)initialized before
3329   // used.
3330   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3331 
3332   if (!TCR_4(task_team->tt.tt_found_tasks)) {
3333     // first thread to enable tasking
3334     kmp_team_t *team = thread->th.th_team;
3335     int i;
3336 
3337     is_init_thread = TRUE;
3338     if (maxthreads < nthreads) {
3339 
3340       if (*threads_data_p != NULL) {
3341         kmp_thread_data_t *old_data = *threads_data_p;
3342         kmp_thread_data_t *new_data = NULL;
3343 
3344         KE_TRACE(
3345             10,
3346             ("__kmp_realloc_task_threads_data: T#%d reallocating "
3347              "threads data for task_team %p, new_size = %d, old_size = %d\n",
3348              __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3349         // Reallocate threads_data to have more elements than current array
3350         // Cannot use __kmp_thread_realloc() because threads not around for
3351         // kmp_reap_task_team( ).  Note all new array entries are initialized
3352         // to zero by __kmp_allocate().
3353         new_data = (kmp_thread_data_t *)__kmp_allocate(
3354             nthreads * sizeof(kmp_thread_data_t));
3355         // copy old data to new data
3356         KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
3357                      (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
3358 
3359 #ifdef BUILD_TIED_TASK_STACK
3360         // GEH: Figure out if this is the right thing to do
3361         for (i = maxthreads; i < nthreads; i++) {
3362           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3363           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3364         }
3365 #endif // BUILD_TIED_TASK_STACK
3366        // Install the new data and free the old data
3367         (*threads_data_p) = new_data;
3368         __kmp_free(old_data);
3369       } else {
3370         KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
3371                       "threads data for task_team %p, size = %d\n",
3372                       __kmp_gtid_from_thread(thread), task_team, nthreads));
3373         // Make the initial allocate for threads_data array, and zero entries
3374         // Cannot use __kmp_thread_calloc() because threads not around for
3375         // kmp_reap_task_team( ).
3376         *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
3377             nthreads * sizeof(kmp_thread_data_t));
3378 #ifdef BUILD_TIED_TASK_STACK
3379         // GEH: Figure out if this is the right thing to do
3380         for (i = 0; i < nthreads; i++) {
3381           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3382           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3383         }
3384 #endif // BUILD_TIED_TASK_STACK
3385       }
3386       task_team->tt.tt_max_threads = nthreads;
3387     } else {
3388       // If array has (more than) enough elements, go ahead and use it
3389       KMP_DEBUG_ASSERT(*threads_data_p != NULL);
3390     }
3391 
3392     // initialize threads_data pointers back to thread_info structures
3393     for (i = 0; i < nthreads; i++) {
3394       kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3395       thread_data->td.td_thr = team->t.t_threads[i];
3396 
3397       if (thread_data->td.td_deque_last_stolen >= nthreads) {
3398         // The last stolen field survives across teams / barrier, and the number
3399         // of threads may have changed.  It's possible (likely?) that a new
3400         // parallel region will exhibit the same behavior as previous region.
3401         thread_data->td.td_deque_last_stolen = -1;
3402       }
3403     }
3404 
3405     KMP_MB();
3406     TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3407   }
3408 
3409   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3410   return is_init_thread;
3411 }
3412 
3413 // __kmp_free_task_threads_data:
3414 // Deallocates a threads_data array for a task team, including any attached
3415 // tasking deques.  Only occurs at library shutdown.
3416 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3417   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3418   if (task_team->tt.tt_threads_data != NULL) {
3419     int i;
3420     for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3421       __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3422     }
3423     __kmp_free(task_team->tt.tt_threads_data);
3424     task_team->tt.tt_threads_data = NULL;
3425   }
3426   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3427 }
3428 
3429 // __kmp_allocate_task_team:
3430 // Allocates a task team associated with a specific team, taking it from
3431 // the global task team free list if possible.  Also initializes data
3432 // structures.
3433 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3434                                                  kmp_team_t *team) {
3435   kmp_task_team_t *task_team = NULL;
3436   int nthreads;
3437 
3438   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3439                 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3440 
3441   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3442     // Take a task team from the task team pool
3443     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3444     if (__kmp_free_task_teams != NULL) {
3445       task_team = __kmp_free_task_teams;
3446       TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3447       task_team->tt.tt_next = NULL;
3448     }
3449     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3450   }
3451 
3452   if (task_team == NULL) {
3453     KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3454                   "task team for team %p\n",
3455                   __kmp_gtid_from_thread(thread), team));
3456     // Allocate a new task team if one is not available. Cannot use
3457     // __kmp_thread_malloc because threads not around for kmp_reap_task_team.
3458     task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3459     __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3460 #if USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG
3461     // suppress race conditions detection on synchronization flags in debug mode
3462     // this helps to analyze library internals eliminating false positives
3463     __itt_suppress_mark_range(
3464         __itt_suppress_range, __itt_suppress_threading_errors,
3465         &task_team->tt.tt_found_tasks, sizeof(task_team->tt.tt_found_tasks));
3466     __itt_suppress_mark_range(__itt_suppress_range,
3467                               __itt_suppress_threading_errors,
3468                               CCAST(kmp_uint32 *, &task_team->tt.tt_active),
3469                               sizeof(task_team->tt.tt_active));
3470 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG */
3471     // Note: __kmp_allocate zeroes returned memory, othewise we would need:
3472     // task_team->tt.tt_threads_data = NULL;
3473     // task_team->tt.tt_max_threads = 0;
3474     // task_team->tt.tt_next = NULL;
3475   }
3476 
3477   TCW_4(task_team->tt.tt_found_tasks, FALSE);
3478   TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3479   task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3480 
3481   KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3482   TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
3483   TCW_4(task_team->tt.tt_active, TRUE);
3484 
3485   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
3486                 "unfinished_threads init'd to %d\n",
3487                 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
3488                 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
3489   return task_team;
3490 }
3491 
3492 // __kmp_free_task_team:
3493 // Frees the task team associated with a specific thread, and adds it
3494 // to the global task team free list.
3495 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
3496   KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
3497                 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
3498 
3499   // Put task team back on free list
3500   __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3501 
3502   KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3503   task_team->tt.tt_next = __kmp_free_task_teams;
3504   TCW_PTR(__kmp_free_task_teams, task_team);
3505 
3506   __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3507 }
3508 
3509 // __kmp_reap_task_teams:
3510 // Free all the task teams on the task team free list.
3511 // Should only be done during library shutdown.
3512 // Cannot do anything that needs a thread structure or gtid since they are
3513 // already gone.
3514 void __kmp_reap_task_teams(void) {
3515   kmp_task_team_t *task_team;
3516 
3517   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3518     // Free all task_teams on the free list
3519     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3520     while ((task_team = __kmp_free_task_teams) != NULL) {
3521       __kmp_free_task_teams = task_team->tt.tt_next;
3522       task_team->tt.tt_next = NULL;
3523 
3524       // Free threads_data if necessary
3525       if (task_team->tt.tt_threads_data != NULL) {
3526         __kmp_free_task_threads_data(task_team);
3527       }
3528       __kmp_free(task_team);
3529     }
3530     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3531   }
3532 }
3533 
3534 // __kmp_wait_to_unref_task_teams:
3535 // Some threads could still be in the fork barrier release code, possibly
3536 // trying to steal tasks.  Wait for each thread to unreference its task team.
3537 void __kmp_wait_to_unref_task_teams(void) {
3538   kmp_info_t *thread;
3539   kmp_uint32 spins;
3540   int done;
3541 
3542   KMP_INIT_YIELD(spins);
3543 
3544   for (;;) {
3545     done = TRUE;
3546 
3547     // TODO: GEH - this may be is wrong because some sync would be necessary
3548     // in case threads are added to the pool during the traversal. Need to
3549     // verify that lock for thread pool is held when calling this routine.
3550     for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
3551          thread = thread->th.th_next_pool) {
3552 #if KMP_OS_WINDOWS
3553       DWORD exit_val;
3554 #endif
3555       if (TCR_PTR(thread->th.th_task_team) == NULL) {
3556         KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3557                       __kmp_gtid_from_thread(thread)));
3558         continue;
3559       }
3560 #if KMP_OS_WINDOWS
3561       // TODO: GEH - add this check for Linux* OS / OS X* as well?
3562       if (!__kmp_is_thread_alive(thread, &exit_val)) {
3563         thread->th.th_task_team = NULL;
3564         continue;
3565       }
3566 #endif
3567 
3568       done = FALSE; // Because th_task_team pointer is not NULL for this thread
3569 
3570       KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3571                     "unreference task_team\n",
3572                     __kmp_gtid_from_thread(thread)));
3573 
3574       if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3575         void *sleep_loc;
3576         // If the thread is sleeping, awaken it.
3577         if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3578             NULL) {
3579           KA_TRACE(
3580               10,
3581               ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3582                __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3583           __kmp_null_resume_wrapper(thread);
3584         }
3585       }
3586     }
3587     if (done) {
3588       break;
3589     }
3590 
3591     // If oversubscribed or have waited a bit, yield.
3592     KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
3593   }
3594 }
3595 
3596 // __kmp_task_team_setup:  Create a task_team for the current team, but use
3597 // an already created, unused one if it already exists.
3598 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3599   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3600 
3601   // If this task_team hasn't been created yet, allocate it. It will be used in
3602   // the region after the next.
3603   // If it exists, it is the current task team and shouldn't be touched yet as
3604   // it may still be in use.
3605   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3606       (always || team->t.t_nproc > 1)) {
3607     team->t.t_task_team[this_thr->th.th_task_state] =
3608         __kmp_allocate_task_team(this_thr, team);
3609     KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created new task_team %p"
3610                   " for team %d at parity=%d\n",
3611                   __kmp_gtid_from_thread(this_thr),
3612                   team->t.t_task_team[this_thr->th.th_task_state], team->t.t_id,
3613                   this_thr->th.th_task_state));
3614   }
3615 
3616   // After threads exit the release, they will call sync, and then point to this
3617   // other task_team; make sure it is allocated and properly initialized. As
3618   // threads spin in the barrier release phase, they will continue to use the
3619   // previous task_team struct(above), until they receive the signal to stop
3620   // checking for tasks (they can't safely reference the kmp_team_t struct,
3621   // which could be reallocated by the primary thread). No task teams are formed
3622   // for serialized teams.
3623   if (team->t.t_nproc > 1) {
3624     int other_team = 1 - this_thr->th.th_task_state;
3625     KMP_DEBUG_ASSERT(other_team >= 0 && other_team < 2);
3626     if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3627       team->t.t_task_team[other_team] =
3628           __kmp_allocate_task_team(this_thr, team);
3629       KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created second new "
3630                     "task_team %p for team %d at parity=%d\n",
3631                     __kmp_gtid_from_thread(this_thr),
3632                     team->t.t_task_team[other_team], team->t.t_id, other_team));
3633     } else { // Leave the old task team struct in place for the upcoming region;
3634       // adjust as needed
3635       kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3636       if (!task_team->tt.tt_active ||
3637           team->t.t_nproc != task_team->tt.tt_nproc) {
3638         TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3639         TCW_4(task_team->tt.tt_found_tasks, FALSE);
3640         TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3641         KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
3642                           team->t.t_nproc);
3643         TCW_4(task_team->tt.tt_active, TRUE);
3644       }
3645       // if team size has changed, the first thread to enable tasking will
3646       // realloc threads_data if necessary
3647       KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d reset next task_team "
3648                     "%p for team %d at parity=%d\n",
3649                     __kmp_gtid_from_thread(this_thr),
3650                     team->t.t_task_team[other_team], team->t.t_id, other_team));
3651     }
3652   }
3653 
3654   // For regular thread, task enabling should be called when the task is going
3655   // to be pushed to a dequeue. However, for the hidden helper thread, we need
3656   // it ahead of time so that some operations can be performed without race
3657   // condition.
3658   if (this_thr == __kmp_hidden_helper_main_thread) {
3659     for (int i = 0; i < 2; ++i) {
3660       kmp_task_team_t *task_team = team->t.t_task_team[i];
3661       if (KMP_TASKING_ENABLED(task_team)) {
3662         continue;
3663       }
3664       __kmp_enable_tasking(task_team, this_thr);
3665       for (int j = 0; j < task_team->tt.tt_nproc; ++j) {
3666         kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[j];
3667         if (thread_data->td.td_deque == NULL) {
3668           __kmp_alloc_task_deque(__kmp_hidden_helper_threads[j], thread_data);
3669         }
3670       }
3671     }
3672   }
3673 }
3674 
3675 // __kmp_task_team_sync: Propagation of task team data from team to threads
3676 // which happens just after the release phase of a team barrier.  This may be
3677 // called by any thread, but only for teams with # threads > 1.
3678 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3679   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3680 
3681   // Toggle the th_task_state field, to switch which task_team this thread
3682   // refers to
3683   this_thr->th.th_task_state = (kmp_uint8)(1 - this_thr->th.th_task_state);
3684 
3685   // It is now safe to propagate the task team pointer from the team struct to
3686   // the current thread.
3687   TCW_PTR(this_thr->th.th_task_team,
3688           team->t.t_task_team[this_thr->th.th_task_state]);
3689   KA_TRACE(20,
3690            ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3691             "%p from Team #%d (parity=%d)\n",
3692             __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3693             team->t.t_id, this_thr->th.th_task_state));
3694 }
3695 
3696 // __kmp_task_team_wait: Primary thread waits for outstanding tasks after the
3697 // barrier gather phase. Only called by primary thread if #threads in team > 1
3698 // or if proxy tasks were created.
3699 //
3700 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3701 // by passing in 0 optionally as the last argument. When wait is zero, primary
3702 // thread does not wait for unfinished_threads to reach 0.
3703 void __kmp_task_team_wait(
3704     kmp_info_t *this_thr,
3705     kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3706   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3707 
3708   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3709   KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
3710 
3711   if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3712     if (wait) {
3713       KA_TRACE(20, ("__kmp_task_team_wait: Primary T#%d waiting for all tasks "
3714                     "(for unfinished_threads to reach 0) on task_team = %p\n",
3715                     __kmp_gtid_from_thread(this_thr), task_team));
3716       // Worker threads may have dropped through to release phase, but could
3717       // still be executing tasks. Wait here for tasks to complete. To avoid
3718       // memory contention, only primary thread checks termination condition.
3719       kmp_flag_32<false, false> flag(
3720           RCAST(std::atomic<kmp_uint32> *,
3721                 &task_team->tt.tt_unfinished_threads),
3722           0U);
3723       flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
3724     }
3725     // Deactivate the old task team, so that the worker threads will stop
3726     // referencing it while spinning.
3727     KA_TRACE(
3728         20,
3729         ("__kmp_task_team_wait: Primary T#%d deactivating task_team %p: "
3730          "setting active to false, setting local and team's pointer to NULL\n",
3731          __kmp_gtid_from_thread(this_thr), task_team));
3732     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3733                      task_team->tt.tt_found_proxy_tasks == TRUE);
3734     TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3735     KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
3736     TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3737     KMP_MB();
3738 
3739     TCW_PTR(this_thr->th.th_task_team, NULL);
3740   }
3741 }
3742 
3743 // __kmp_tasking_barrier:
3744 // This routine is called only when __kmp_tasking_mode == tskm_extra_barrier.
3745 // Internal function to execute all tasks prior to a regular barrier or a join
3746 // barrier. It is a full barrier itself, which unfortunately turns regular
3747 // barriers into double barriers and join barriers into 1 1/2 barriers.
3748 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3749   std::atomic<kmp_uint32> *spin = RCAST(
3750       std::atomic<kmp_uint32> *,
3751       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
3752   int flag = FALSE;
3753   KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
3754 
3755 #if USE_ITT_BUILD
3756   KMP_FSYNC_SPIN_INIT(spin, NULL);
3757 #endif /* USE_ITT_BUILD */
3758   kmp_flag_32<false, false> spin_flag(spin, 0U);
3759   while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3760                                   &flag USE_ITT_BUILD_ARG(NULL), 0)) {
3761 #if USE_ITT_BUILD
3762     // TODO: What about itt_sync_obj??
3763     KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
3764 #endif /* USE_ITT_BUILD */
3765 
3766     if (TCR_4(__kmp_global.g.g_done)) {
3767       if (__kmp_global.g.g_abort)
3768         __kmp_abort_thread();
3769       break;
3770     }
3771     KMP_YIELD(TRUE);
3772   }
3773 #if USE_ITT_BUILD
3774   KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
3775 #endif /* USE_ITT_BUILD */
3776 }
3777 
3778 // __kmp_give_task puts a task into a given thread queue if:
3779 //  - the queue for that thread was created
3780 //  - there's space in that queue
3781 // Because of this, __kmp_push_task needs to check if there's space after
3782 // getting the lock
3783 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3784                             kmp_int32 pass) {
3785   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3786   kmp_task_team_t *task_team = taskdata->td_task_team;
3787 
3788   KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3789                 taskdata, tid));
3790 
3791   // If task_team is NULL something went really bad...
3792   KMP_DEBUG_ASSERT(task_team != NULL);
3793 
3794   bool result = false;
3795   kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3796 
3797   if (thread_data->td.td_deque == NULL) {
3798     // There's no queue in this thread, go find another one
3799     // We're guaranteed that at least one thread has a queue
3800     KA_TRACE(30,
3801              ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3802               tid, taskdata));
3803     return result;
3804   }
3805 
3806   if (TCR_4(thread_data->td.td_deque_ntasks) >=
3807       TASK_DEQUE_SIZE(thread_data->td)) {
3808     KA_TRACE(
3809         30,
3810         ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3811          taskdata, tid));
3812 
3813     // if this deque is bigger than the pass ratio give a chance to another
3814     // thread
3815     if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3816       return result;
3817 
3818     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3819     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3820         TASK_DEQUE_SIZE(thread_data->td)) {
3821       // expand deque to push the task which is not allowed to execute
3822       __kmp_realloc_task_deque(thread, thread_data);
3823     }
3824 
3825   } else {
3826 
3827     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3828 
3829     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3830         TASK_DEQUE_SIZE(thread_data->td)) {
3831       KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3832                     "thread %d.\n",
3833                     taskdata, tid));
3834 
3835       // if this deque is bigger than the pass ratio give a chance to another
3836       // thread
3837       if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3838         goto release_and_exit;
3839 
3840       __kmp_realloc_task_deque(thread, thread_data);
3841     }
3842   }
3843 
3844   // lock is held here, and there is space in the deque
3845 
3846   thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3847   // Wrap index.
3848   thread_data->td.td_deque_tail =
3849       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3850   TCW_4(thread_data->td.td_deque_ntasks,
3851         TCR_4(thread_data->td.td_deque_ntasks) + 1);
3852 
3853   result = true;
3854   KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3855                 taskdata, tid));
3856 
3857 release_and_exit:
3858   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3859 
3860   return result;
3861 }
3862 
3863 #define PROXY_TASK_FLAG 0x40000000
3864 /* The finish of the proxy tasks is divided in two pieces:
3865     - the top half is the one that can be done from a thread outside the team
3866     - the bottom half must be run from a thread within the team
3867 
3868    In order to run the bottom half the task gets queued back into one of the
3869    threads of the team. Once the td_incomplete_child_task counter of the parent
3870    is decremented the threads can leave the barriers. So, the bottom half needs
3871    to be queued before the counter is decremented. The top half is therefore
3872    divided in two parts:
3873     - things that can be run before queuing the bottom half
3874     - things that must be run after queuing the bottom half
3875 
3876    This creates a second race as the bottom half can free the task before the
3877    second top half is executed. To avoid this we use the
3878    td_incomplete_child_task of the proxy task to synchronize the top and bottom
3879    half. */
3880 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3881   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3882   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3883   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3884   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
3885 
3886   taskdata->td_flags.complete = 1; // mark the task as completed
3887 
3888   if (taskdata->td_taskgroup)
3889     KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
3890 
3891   // Create an imaginary children for this task so the bottom half cannot
3892   // release the task before we have completed the second top half
3893   KMP_ATOMIC_OR(&taskdata->td_incomplete_child_tasks, PROXY_TASK_FLAG);
3894 }
3895 
3896 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3897 #if KMP_DEBUG
3898   kmp_int32 children = 0;
3899   // Predecrement simulated by "- 1" calculation
3900   children = -1 +
3901 #endif
3902       KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
3903   KMP_DEBUG_ASSERT(children >= 0);
3904 
3905   // Remove the imaginary children
3906   KMP_ATOMIC_AND(&taskdata->td_incomplete_child_tasks, ~PROXY_TASK_FLAG);
3907 }
3908 
3909 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3910   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3911   kmp_info_t *thread = __kmp_threads[gtid];
3912 
3913   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3914   KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3915                    1); // top half must run before bottom half
3916 
3917   // We need to wait to make sure the top half is finished
3918   // Spinning here should be ok as this should happen quickly
3919   while ((KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) &
3920           PROXY_TASK_FLAG) > 0)
3921     ;
3922 
3923   __kmp_release_deps(gtid, taskdata);
3924   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3925 }
3926 
3927 /*!
3928 @ingroup TASKING
3929 @param gtid Global Thread ID of encountering thread
3930 @param ptask Task which execution is completed
3931 
3932 Execute the completion of a proxy task from a thread of that is part of the
3933 team. Run first and bottom halves directly.
3934 */
3935 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3936   KMP_DEBUG_ASSERT(ptask != NULL);
3937   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3938   KA_TRACE(
3939       10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3940            gtid, taskdata));
3941   __kmp_assert_valid_gtid(gtid);
3942   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3943 
3944   __kmp_first_top_half_finish_proxy(taskdata);
3945   __kmp_second_top_half_finish_proxy(taskdata);
3946   __kmp_bottom_half_finish_proxy(gtid, ptask);
3947 
3948   KA_TRACE(10,
3949            ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3950             gtid, taskdata));
3951 }
3952 
3953 void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start = 0) {
3954   KMP_DEBUG_ASSERT(ptask != NULL);
3955   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3956 
3957   // Enqueue task to complete bottom half completion from a thread within the
3958   // corresponding team
3959   kmp_team_t *team = taskdata->td_team;
3960   kmp_int32 nthreads = team->t.t_nproc;
3961   kmp_info_t *thread;
3962 
3963   // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3964   // but we cannot use __kmp_get_random here
3965   kmp_int32 start_k = start % nthreads;
3966   kmp_int32 pass = 1;
3967   kmp_int32 k = start_k;
3968 
3969   do {
3970     // For now we're just linearly trying to find a thread
3971     thread = team->t.t_threads[k];
3972     k = (k + 1) % nthreads;
3973 
3974     // we did a full pass through all the threads
3975     if (k == start_k)
3976       pass = pass << 1;
3977 
3978   } while (!__kmp_give_task(thread, k, ptask, pass));
3979 }
3980 
3981 /*!
3982 @ingroup TASKING
3983 @param ptask Task which execution is completed
3984 
3985 Execute the completion of a proxy task from a thread that could not belong to
3986 the team.
3987 */
3988 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3989   KMP_DEBUG_ASSERT(ptask != NULL);
3990   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3991 
3992   KA_TRACE(
3993       10,
3994       ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3995        taskdata));
3996 
3997   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3998 
3999   __kmp_first_top_half_finish_proxy(taskdata);
4000 
4001   __kmpc_give_task(ptask);
4002 
4003   __kmp_second_top_half_finish_proxy(taskdata);
4004 
4005   KA_TRACE(
4006       10,
4007       ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
4008        taskdata));
4009 }
4010 
4011 kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid,
4012                                                 kmp_task_t *task) {
4013   kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
4014   if (td->td_allow_completion_event.type == KMP_EVENT_UNINITIALIZED) {
4015     td->td_allow_completion_event.type = KMP_EVENT_ALLOW_COMPLETION;
4016     td->td_allow_completion_event.ed.task = task;
4017     __kmp_init_tas_lock(&td->td_allow_completion_event.lock);
4018   }
4019   return &td->td_allow_completion_event;
4020 }
4021 
4022 void __kmp_fulfill_event(kmp_event_t *event) {
4023   if (event->type == KMP_EVENT_ALLOW_COMPLETION) {
4024     kmp_task_t *ptask = event->ed.task;
4025     kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4026     bool detached = false;
4027     int gtid = __kmp_get_gtid();
4028 
4029     // The associated task might have completed or could be completing at this
4030     // point.
4031     // We need to take the lock to avoid races
4032     __kmp_acquire_tas_lock(&event->lock, gtid);
4033     if (taskdata->td_flags.proxy == TASK_PROXY) {
4034       detached = true;
4035     } else {
4036 #if OMPT_SUPPORT
4037       // The OMPT event must occur under mutual exclusion,
4038       // otherwise the tool might access ptask after free
4039       if (UNLIKELY(ompt_enabled.enabled))
4040         __ompt_task_finish(ptask, NULL, ompt_task_early_fulfill);
4041 #endif
4042     }
4043     event->type = KMP_EVENT_UNINITIALIZED;
4044     __kmp_release_tas_lock(&event->lock, gtid);
4045 
4046     if (detached) {
4047 #if OMPT_SUPPORT
4048       // We free ptask afterwards and know the task is finished,
4049       // so locking is not necessary
4050       if (UNLIKELY(ompt_enabled.enabled))
4051         __ompt_task_finish(ptask, NULL, ompt_task_late_fulfill);
4052 #endif
4053       // If the task detached complete the proxy task
4054       if (gtid >= 0) {
4055         kmp_team_t *team = taskdata->td_team;
4056         kmp_info_t *thread = __kmp_get_thread();
4057         if (thread->th.th_team == team) {
4058           __kmpc_proxy_task_completed(gtid, ptask);
4059           return;
4060         }
4061       }
4062 
4063       // fallback
4064       __kmpc_proxy_task_completed_ooo(ptask);
4065     }
4066   }
4067 }
4068 
4069 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
4070 // for taskloop
4071 //
4072 // thread:   allocating thread
4073 // task_src: pointer to source task to be duplicated
4074 // returns:  a pointer to the allocated kmp_task_t structure (task).
4075 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
4076   kmp_task_t *task;
4077   kmp_taskdata_t *taskdata;
4078   kmp_taskdata_t *taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
4079   kmp_taskdata_t *parent_task = taskdata_src->td_parent; // same parent task
4080   size_t shareds_offset;
4081   size_t task_size;
4082 
4083   KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
4084                 task_src));
4085   KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
4086                    TASK_FULL); // it should not be proxy task
4087   KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
4088   task_size = taskdata_src->td_size_alloc;
4089 
4090   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
4091   KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
4092                 task_size));
4093 #if USE_FAST_MEMORY
4094   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
4095 #else
4096   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
4097 #endif /* USE_FAST_MEMORY */
4098   KMP_MEMCPY(taskdata, taskdata_src, task_size);
4099 
4100   task = KMP_TASKDATA_TO_TASK(taskdata);
4101 
4102   // Initialize new task (only specific fields not affected by memcpy)
4103   taskdata->td_task_id = KMP_GEN_TASK_ID();
4104   if (task->shareds != NULL) { // need setup shareds pointer
4105     shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
4106     task->shareds = &((char *)taskdata)[shareds_offset];
4107     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
4108                      0);
4109   }
4110   taskdata->td_alloc_thread = thread;
4111   taskdata->td_parent = parent_task;
4112   // task inherits the taskgroup from the parent task
4113   taskdata->td_taskgroup = parent_task->td_taskgroup;
4114   // tied task needs to initialize the td_last_tied at creation,
4115   // untied one does this when it is scheduled for execution
4116   if (taskdata->td_flags.tiedness == TASK_TIED)
4117     taskdata->td_last_tied = taskdata;
4118 
4119   // Only need to keep track of child task counts if team parallel and tasking
4120   // not serialized
4121   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
4122     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
4123     if (parent_task->td_taskgroup)
4124       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
4125     // Only need to keep track of allocated child tasks for explicit tasks since
4126     // implicit not deallocated
4127     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
4128       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
4129   }
4130 
4131   KA_TRACE(20,
4132            ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
4133             thread, taskdata, taskdata->td_parent));
4134 #if OMPT_SUPPORT
4135   if (UNLIKELY(ompt_enabled.enabled))
4136     __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
4137 #endif
4138   return task;
4139 }
4140 
4141 // Routine optionally generated by the compiler for setting the lastprivate flag
4142 // and calling needed constructors for private/firstprivate objects
4143 // (used to form taskloop tasks from pattern task)
4144 // Parameters: dest task, src task, lastprivate flag.
4145 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
4146 
4147 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
4148 
4149 // class to encapsulate manipulating loop bounds in a taskloop task.
4150 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting
4151 // the loop bound variables.
4152 class kmp_taskloop_bounds_t {
4153   kmp_task_t *task;
4154   const kmp_taskdata_t *taskdata;
4155   size_t lower_offset;
4156   size_t upper_offset;
4157 
4158 public:
4159   kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
4160       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
4161         lower_offset((char *)lb - (char *)task),
4162         upper_offset((char *)ub - (char *)task) {
4163     KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
4164     KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
4165   }
4166   kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
4167       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
4168         lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
4169   size_t get_lower_offset() const { return lower_offset; }
4170   size_t get_upper_offset() const { return upper_offset; }
4171   kmp_uint64 get_lb() const {
4172     kmp_int64 retval;
4173 #if defined(KMP_GOMP_COMPAT)
4174     // Intel task just returns the lower bound normally
4175     if (!taskdata->td_flags.native) {
4176       retval = *(kmp_int64 *)((char *)task + lower_offset);
4177     } else {
4178       // GOMP task has to take into account the sizeof(long)
4179       if (taskdata->td_size_loop_bounds == 4) {
4180         kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
4181         retval = (kmp_int64)*lb;
4182       } else {
4183         kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
4184         retval = (kmp_int64)*lb;
4185       }
4186     }
4187 #else
4188     (void)taskdata;
4189     retval = *(kmp_int64 *)((char *)task + lower_offset);
4190 #endif // defined(KMP_GOMP_COMPAT)
4191     return retval;
4192   }
4193   kmp_uint64 get_ub() const {
4194     kmp_int64 retval;
4195 #if defined(KMP_GOMP_COMPAT)
4196     // Intel task just returns the upper bound normally
4197     if (!taskdata->td_flags.native) {
4198       retval = *(kmp_int64 *)((char *)task + upper_offset);
4199     } else {
4200       // GOMP task has to take into account the sizeof(long)
4201       if (taskdata->td_size_loop_bounds == 4) {
4202         kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
4203         retval = (kmp_int64)*ub;
4204       } else {
4205         kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
4206         retval = (kmp_int64)*ub;
4207       }
4208     }
4209 #else
4210     retval = *(kmp_int64 *)((char *)task + upper_offset);
4211 #endif // defined(KMP_GOMP_COMPAT)
4212     return retval;
4213   }
4214   void set_lb(kmp_uint64 lb) {
4215 #if defined(KMP_GOMP_COMPAT)
4216     // Intel task just sets the lower bound normally
4217     if (!taskdata->td_flags.native) {
4218       *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4219     } else {
4220       // GOMP task has to take into account the sizeof(long)
4221       if (taskdata->td_size_loop_bounds == 4) {
4222         kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
4223         *lower = (kmp_uint32)lb;
4224       } else {
4225         kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
4226         *lower = (kmp_uint64)lb;
4227       }
4228     }
4229 #else
4230     *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4231 #endif // defined(KMP_GOMP_COMPAT)
4232   }
4233   void set_ub(kmp_uint64 ub) {
4234 #if defined(KMP_GOMP_COMPAT)
4235     // Intel task just sets the upper bound normally
4236     if (!taskdata->td_flags.native) {
4237       *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4238     } else {
4239       // GOMP task has to take into account the sizeof(long)
4240       if (taskdata->td_size_loop_bounds == 4) {
4241         kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
4242         *upper = (kmp_uint32)ub;
4243       } else {
4244         kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
4245         *upper = (kmp_uint64)ub;
4246       }
4247     }
4248 #else
4249     *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4250 #endif // defined(KMP_GOMP_COMPAT)
4251   }
4252 };
4253 
4254 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
4255 //
4256 // loc        Source location information
4257 // gtid       Global thread ID
4258 // task       Pattern task, exposes the loop iteration range
4259 // lb         Pointer to loop lower bound in task structure
4260 // ub         Pointer to loop upper bound in task structure
4261 // st         Loop stride
4262 // ub_glob    Global upper bound (used for lastprivate check)
4263 // num_tasks  Number of tasks to execute
4264 // grainsize  Number of loop iterations per task
4265 // extras     Number of chunks with grainsize+1 iterations
4266 // last_chunk Reduction of grainsize for last task
4267 // tc         Iterations count
4268 // task_dup   Tasks duplication routine
4269 // codeptr_ra Return address for OMPT events
4270 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
4271                            kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4272                            kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4273                            kmp_uint64 grainsize, kmp_uint64 extras,
4274                            kmp_int64 last_chunk, kmp_uint64 tc,
4275 #if OMPT_SUPPORT
4276                            void *codeptr_ra,
4277 #endif
4278                            void *task_dup) {
4279   KMP_COUNT_BLOCK(OMP_TASKLOOP);
4280   KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
4281   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4282   // compiler provides global bounds here
4283   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4284   kmp_uint64 lower = task_bounds.get_lb();
4285   kmp_uint64 upper = task_bounds.get_ub();
4286   kmp_uint64 i;
4287   kmp_info_t *thread = __kmp_threads[gtid];
4288   kmp_taskdata_t *current_task = thread->th.th_current_task;
4289   kmp_task_t *next_task;
4290   kmp_int32 lastpriv = 0;
4291 
4292   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
4293                              (last_chunk < 0 ? last_chunk : extras));
4294   KMP_DEBUG_ASSERT(num_tasks > extras);
4295   KMP_DEBUG_ASSERT(num_tasks > 0);
4296   KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
4297                 "extras %lld, last_chunk %lld, i=%lld,%lld(%d)%lld, dup %p\n",
4298                 gtid, num_tasks, grainsize, extras, last_chunk, lower, upper,
4299                 ub_glob, st, task_dup));
4300 
4301   // Launch num_tasks tasks, assign grainsize iterations each task
4302   for (i = 0; i < num_tasks; ++i) {
4303     kmp_uint64 chunk_minus_1;
4304     if (extras == 0) {
4305       chunk_minus_1 = grainsize - 1;
4306     } else {
4307       chunk_minus_1 = grainsize;
4308       --extras; // first extras iterations get bigger chunk (grainsize+1)
4309     }
4310     upper = lower + st * chunk_minus_1;
4311     if (upper > *ub) {
4312       upper = *ub;
4313     }
4314     if (i == num_tasks - 1) {
4315       // schedule the last task, set lastprivate flag if needed
4316       if (st == 1) { // most common case
4317         KMP_DEBUG_ASSERT(upper == *ub);
4318         if (upper == ub_glob)
4319           lastpriv = 1;
4320       } else if (st > 0) { // positive loop stride
4321         KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
4322         if ((kmp_uint64)st > ub_glob - upper)
4323           lastpriv = 1;
4324       } else { // negative loop stride
4325         KMP_DEBUG_ASSERT(upper + st < *ub);
4326         if (upper - ub_glob < (kmp_uint64)(-st))
4327           lastpriv = 1;
4328       }
4329     }
4330     next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
4331     kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
4332     kmp_taskloop_bounds_t next_task_bounds =
4333         kmp_taskloop_bounds_t(next_task, task_bounds);
4334 
4335     // adjust task-specific bounds
4336     next_task_bounds.set_lb(lower);
4337     if (next_taskdata->td_flags.native) {
4338       next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
4339     } else {
4340       next_task_bounds.set_ub(upper);
4341     }
4342     if (ptask_dup != NULL) // set lastprivate flag, construct firstprivates,
4343                            // etc.
4344       ptask_dup(next_task, task, lastpriv);
4345     KA_TRACE(40,
4346              ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
4347               "upper %lld stride %lld, (offsets %p %p)\n",
4348               gtid, i, next_task, lower, upper, st,
4349               next_task_bounds.get_lower_offset(),
4350               next_task_bounds.get_upper_offset()));
4351 #if OMPT_SUPPORT
4352     __kmp_omp_taskloop_task(NULL, gtid, next_task,
4353                             codeptr_ra); // schedule new task
4354 #else
4355     __kmp_omp_task(gtid, next_task, true); // schedule new task
4356 #endif
4357     lower = upper + st; // adjust lower bound for the next iteration
4358   }
4359   // free the pattern task and exit
4360   __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
4361   // do not execute the pattern task, just do internal bookkeeping
4362   __kmp_task_finish<false>(gtid, task, current_task);
4363 }
4364 
4365 // Structure to keep taskloop parameters for auxiliary task
4366 // kept in the shareds of the task structure.
4367 typedef struct __taskloop_params {
4368   kmp_task_t *task;
4369   kmp_uint64 *lb;
4370   kmp_uint64 *ub;
4371   void *task_dup;
4372   kmp_int64 st;
4373   kmp_uint64 ub_glob;
4374   kmp_uint64 num_tasks;
4375   kmp_uint64 grainsize;
4376   kmp_uint64 extras;
4377   kmp_int64 last_chunk;
4378   kmp_uint64 tc;
4379   kmp_uint64 num_t_min;
4380 #if OMPT_SUPPORT
4381   void *codeptr_ra;
4382 #endif
4383 } __taskloop_params_t;
4384 
4385 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
4386                           kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
4387                           kmp_uint64, kmp_uint64, kmp_int64, kmp_uint64,
4388                           kmp_uint64,
4389 #if OMPT_SUPPORT
4390                           void *,
4391 #endif
4392                           void *);
4393 
4394 // Execute part of the taskloop submitted as a task.
4395 int __kmp_taskloop_task(int gtid, void *ptask) {
4396   __taskloop_params_t *p =
4397       (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
4398   kmp_task_t *task = p->task;
4399   kmp_uint64 *lb = p->lb;
4400   kmp_uint64 *ub = p->ub;
4401   void *task_dup = p->task_dup;
4402   //  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4403   kmp_int64 st = p->st;
4404   kmp_uint64 ub_glob = p->ub_glob;
4405   kmp_uint64 num_tasks = p->num_tasks;
4406   kmp_uint64 grainsize = p->grainsize;
4407   kmp_uint64 extras = p->extras;
4408   kmp_int64 last_chunk = p->last_chunk;
4409   kmp_uint64 tc = p->tc;
4410   kmp_uint64 num_t_min = p->num_t_min;
4411 #if OMPT_SUPPORT
4412   void *codeptr_ra = p->codeptr_ra;
4413 #endif
4414 #if KMP_DEBUG
4415   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4416   KMP_DEBUG_ASSERT(task != NULL);
4417   KA_TRACE(20,
4418            ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
4419             " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n",
4420             gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
4421             st, task_dup));
4422 #endif
4423   KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
4424   if (num_tasks > num_t_min)
4425     __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
4426                          grainsize, extras, last_chunk, tc, num_t_min,
4427 #if OMPT_SUPPORT
4428                          codeptr_ra,
4429 #endif
4430                          task_dup);
4431   else
4432     __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
4433                           grainsize, extras, last_chunk, tc,
4434 #if OMPT_SUPPORT
4435                           codeptr_ra,
4436 #endif
4437                           task_dup);
4438 
4439   KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
4440   return 0;
4441 }
4442 
4443 // Schedule part of the taskloop as a task,
4444 // execute the rest of the taskloop.
4445 //
4446 // loc        Source location information
4447 // gtid       Global thread ID
4448 // task       Pattern task, exposes the loop iteration range
4449 // lb         Pointer to loop lower bound in task structure
4450 // ub         Pointer to loop upper bound in task structure
4451 // st         Loop stride
4452 // ub_glob    Global upper bound (used for lastprivate check)
4453 // num_tasks  Number of tasks to execute
4454 // grainsize  Number of loop iterations per task
4455 // extras     Number of chunks with grainsize+1 iterations
4456 // last_chunk Reduction of grainsize for last task
4457 // tc         Iterations count
4458 // num_t_min  Threshold to launch tasks recursively
4459 // task_dup   Tasks duplication routine
4460 // codeptr_ra Return address for OMPT events
4461 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
4462                           kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4463                           kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4464                           kmp_uint64 grainsize, kmp_uint64 extras,
4465                           kmp_int64 last_chunk, kmp_uint64 tc,
4466                           kmp_uint64 num_t_min,
4467 #if OMPT_SUPPORT
4468                           void *codeptr_ra,
4469 #endif
4470                           void *task_dup) {
4471   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4472   KMP_DEBUG_ASSERT(task != NULL);
4473   KMP_DEBUG_ASSERT(num_tasks > num_t_min);
4474   KA_TRACE(20,
4475            ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
4476             " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n",
4477             gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
4478             st, task_dup));
4479   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4480   kmp_uint64 lower = *lb;
4481   kmp_info_t *thread = __kmp_threads[gtid];
4482   //  kmp_taskdata_t *current_task = thread->th.th_current_task;
4483   kmp_task_t *next_task;
4484   size_t lower_offset =
4485       (char *)lb - (char *)task; // remember offset of lb in the task structure
4486   size_t upper_offset =
4487       (char *)ub - (char *)task; // remember offset of ub in the task structure
4488 
4489   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
4490                              (last_chunk < 0 ? last_chunk : extras));
4491   KMP_DEBUG_ASSERT(num_tasks > extras);
4492   KMP_DEBUG_ASSERT(num_tasks > 0);
4493 
4494   // split the loop in two halves
4495   kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
4496   kmp_int64 last_chunk0 = 0, last_chunk1 = 0;
4497   kmp_uint64 gr_size0 = grainsize;
4498   kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
4499   kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
4500   if (last_chunk < 0) {
4501     ext0 = ext1 = 0;
4502     last_chunk1 = last_chunk;
4503     tc0 = grainsize * n_tsk0;
4504     tc1 = tc - tc0;
4505   } else if (n_tsk0 <= extras) {
4506     gr_size0++; // integrate extras into grainsize
4507     ext0 = 0; // no extra iters in 1st half
4508     ext1 = extras - n_tsk0; // remaining extras
4509     tc0 = gr_size0 * n_tsk0;
4510     tc1 = tc - tc0;
4511   } else { // n_tsk0 > extras
4512     ext1 = 0; // no extra iters in 2nd half
4513     ext0 = extras;
4514     tc1 = grainsize * n_tsk1;
4515     tc0 = tc - tc1;
4516   }
4517   ub0 = lower + st * (tc0 - 1);
4518   lb1 = ub0 + st;
4519 
4520   // create pattern task for 2nd half of the loop
4521   next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
4522   // adjust lower bound (upper bound is not changed) for the 2nd half
4523   *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
4524   if (ptask_dup != NULL) // construct firstprivates, etc.
4525     ptask_dup(next_task, task, 0);
4526   *ub = ub0; // adjust upper bound for the 1st half
4527 
4528   // create auxiliary task for 2nd half of the loop
4529   // make sure new task has same parent task as the pattern task
4530   kmp_taskdata_t *current_task = thread->th.th_current_task;
4531   thread->th.th_current_task = taskdata->td_parent;
4532   kmp_task_t *new_task =
4533       __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
4534                             sizeof(__taskloop_params_t), &__kmp_taskloop_task);
4535   // restore current task
4536   thread->th.th_current_task = current_task;
4537   __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
4538   p->task = next_task;
4539   p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
4540   p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
4541   p->task_dup = task_dup;
4542   p->st = st;
4543   p->ub_glob = ub_glob;
4544   p->num_tasks = n_tsk1;
4545   p->grainsize = grainsize;
4546   p->extras = ext1;
4547   p->last_chunk = last_chunk1;
4548   p->tc = tc1;
4549   p->num_t_min = num_t_min;
4550 #if OMPT_SUPPORT
4551   p->codeptr_ra = codeptr_ra;
4552 #endif
4553 
4554 #if OMPT_SUPPORT
4555   // schedule new task with correct return address for OMPT events
4556   __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
4557 #else
4558   __kmp_omp_task(gtid, new_task, true); // schedule new task
4559 #endif
4560 
4561   // execute the 1st half of current subrange
4562   if (n_tsk0 > num_t_min)
4563     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
4564                          ext0, last_chunk0, tc0, num_t_min,
4565 #if OMPT_SUPPORT
4566                          codeptr_ra,
4567 #endif
4568                          task_dup);
4569   else
4570     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
4571                           gr_size0, ext0, last_chunk0, tc0,
4572 #if OMPT_SUPPORT
4573                           codeptr_ra,
4574 #endif
4575                           task_dup);
4576 
4577   KA_TRACE(40, ("__kmp_taskloop_recur(exit): T#%d\n", gtid));
4578 }
4579 
4580 static void __kmp_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4581                            kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4582                            int nogroup, int sched, kmp_uint64 grainsize,
4583                            int modifier, void *task_dup) {
4584   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4585   KMP_DEBUG_ASSERT(task != NULL);
4586   if (nogroup == 0) {
4587 #if OMPT_SUPPORT && OMPT_OPTIONAL
4588     OMPT_STORE_RETURN_ADDRESS(gtid);
4589 #endif
4590     __kmpc_taskgroup(loc, gtid);
4591   }
4592 
4593   // =========================================================================
4594   // calculate loop parameters
4595   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4596   kmp_uint64 tc;
4597   // compiler provides global bounds here
4598   kmp_uint64 lower = task_bounds.get_lb();
4599   kmp_uint64 upper = task_bounds.get_ub();
4600   kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
4601   kmp_uint64 num_tasks = 0, extras = 0;
4602   kmp_int64 last_chunk =
4603       0; // reduce grainsize of last task by last_chunk in strict mode
4604   kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
4605   kmp_info_t *thread = __kmp_threads[gtid];
4606   kmp_taskdata_t *current_task = thread->th.th_current_task;
4607 
4608   KA_TRACE(20, ("__kmp_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
4609                 "grain %llu(%d, %d), dup %p\n",
4610                 gtid, taskdata, lower, upper, st, grainsize, sched, modifier,
4611                 task_dup));
4612 
4613   // compute trip count
4614   if (st == 1) { // most common case
4615     tc = upper - lower + 1;
4616   } else if (st < 0) {
4617     tc = (lower - upper) / (-st) + 1;
4618   } else { // st > 0
4619     tc = (upper - lower) / st + 1;
4620   }
4621   if (tc == 0) {
4622     KA_TRACE(20, ("__kmp_taskloop(exit): T#%d zero-trip loop\n", gtid));
4623     // free the pattern task and exit
4624     __kmp_task_start(gtid, task, current_task);
4625     // do not execute anything for zero-trip loop
4626     __kmp_task_finish<false>(gtid, task, current_task);
4627     return;
4628   }
4629 
4630 #if OMPT_SUPPORT && OMPT_OPTIONAL
4631   ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
4632   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
4633   if (ompt_enabled.ompt_callback_work) {
4634     ompt_callbacks.ompt_callback(ompt_callback_work)(
4635         ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
4636         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4637   }
4638 #endif
4639 
4640   if (num_tasks_min == 0)
4641     // TODO: can we choose better default heuristic?
4642     num_tasks_min =
4643         KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
4644 
4645   // compute num_tasks/grainsize based on the input provided
4646   switch (sched) {
4647   case 0: // no schedule clause specified, we can choose the default
4648     // let's try to schedule (team_size*10) tasks
4649     grainsize = thread->th.th_team_nproc * 10;
4650     KMP_FALLTHROUGH();
4651   case 2: // num_tasks provided
4652     if (grainsize > tc) {
4653       num_tasks = tc; // too big num_tasks requested, adjust values
4654       grainsize = 1;
4655       extras = 0;
4656     } else {
4657       num_tasks = grainsize;
4658       grainsize = tc / num_tasks;
4659       extras = tc % num_tasks;
4660     }
4661     break;
4662   case 1: // grainsize provided
4663     if (grainsize > tc) {
4664       num_tasks = 1;
4665       grainsize = tc; // too big grainsize requested, adjust values
4666       extras = 0;
4667     } else {
4668       if (modifier) {
4669         num_tasks = (tc + grainsize - 1) / grainsize;
4670         last_chunk = tc - (num_tasks * grainsize);
4671         extras = 0;
4672       } else {
4673         num_tasks = tc / grainsize;
4674         // adjust grainsize for balanced distribution of iterations
4675         grainsize = tc / num_tasks;
4676         extras = tc % num_tasks;
4677       }
4678     }
4679     break;
4680   default:
4681     KMP_ASSERT2(0, "unknown scheduling of taskloop");
4682   }
4683 
4684   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
4685                              (last_chunk < 0 ? last_chunk : extras));
4686   KMP_DEBUG_ASSERT(num_tasks > extras);
4687   KMP_DEBUG_ASSERT(num_tasks > 0);
4688   // =========================================================================
4689 
4690   // check if clause value first
4691   // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
4692   if (if_val == 0) { // if(0) specified, mark task as serial
4693     taskdata->td_flags.task_serial = 1;
4694     taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
4695     // always start serial tasks linearly
4696     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4697                           grainsize, extras, last_chunk, tc,
4698 #if OMPT_SUPPORT
4699                           OMPT_GET_RETURN_ADDRESS(0),
4700 #endif
4701                           task_dup);
4702     // !taskdata->td_flags.native => currently force linear spawning of tasks
4703     // for GOMP_taskloop
4704   } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
4705     KA_TRACE(20, ("__kmp_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
4706                   "(%lld), grain %llu, extras %llu, last_chunk %lld\n",
4707                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras,
4708                   last_chunk));
4709     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4710                          grainsize, extras, last_chunk, tc, num_tasks_min,
4711 #if OMPT_SUPPORT
4712                          OMPT_GET_RETURN_ADDRESS(0),
4713 #endif
4714                          task_dup);
4715   } else {
4716     KA_TRACE(20, ("__kmp_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
4717                   "(%lld), grain %llu, extras %llu, last_chunk %lld\n",
4718                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras,
4719                   last_chunk));
4720     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4721                           grainsize, extras, last_chunk, tc,
4722 #if OMPT_SUPPORT
4723                           OMPT_GET_RETURN_ADDRESS(0),
4724 #endif
4725                           task_dup);
4726   }
4727 
4728 #if OMPT_SUPPORT && OMPT_OPTIONAL
4729   if (ompt_enabled.ompt_callback_work) {
4730     ompt_callbacks.ompt_callback(ompt_callback_work)(
4731         ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
4732         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4733   }
4734 #endif
4735 
4736   if (nogroup == 0) {
4737 #if OMPT_SUPPORT && OMPT_OPTIONAL
4738     OMPT_STORE_RETURN_ADDRESS(gtid);
4739 #endif
4740     __kmpc_end_taskgroup(loc, gtid);
4741   }
4742   KA_TRACE(20, ("__kmp_taskloop(exit): T#%d\n", gtid));
4743 }
4744 
4745 /*!
4746 @ingroup TASKING
4747 @param loc       Source location information
4748 @param gtid      Global thread ID
4749 @param task      Task structure
4750 @param if_val    Value of the if clause
4751 @param lb        Pointer to loop lower bound in task structure
4752 @param ub        Pointer to loop upper bound in task structure
4753 @param st        Loop stride
4754 @param nogroup   Flag, 1 if nogroup clause specified, 0 otherwise
4755 @param sched     Schedule specified 0/1/2 for none/grainsize/num_tasks
4756 @param grainsize Schedule value if specified
4757 @param task_dup  Tasks duplication routine
4758 
4759 Execute the taskloop construct.
4760 */
4761 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4762                      kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
4763                      int sched, kmp_uint64 grainsize, void *task_dup) {
4764   __kmp_assert_valid_gtid(gtid);
4765   KA_TRACE(20, ("__kmpc_taskloop(enter): T#%d\n", gtid));
4766   __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize,
4767                  0, task_dup);
4768   KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
4769 }
4770 
4771 /*!
4772 @ingroup TASKING
4773 @param loc       Source location information
4774 @param gtid      Global thread ID
4775 @param task      Task structure
4776 @param if_val    Value of the if clause
4777 @param lb        Pointer to loop lower bound in task structure
4778 @param ub        Pointer to loop upper bound in task structure
4779 @param st        Loop stride
4780 @param nogroup   Flag, 1 if nogroup clause specified, 0 otherwise
4781 @param sched     Schedule specified 0/1/2 for none/grainsize/num_tasks
4782 @param grainsize Schedule value if specified
4783 @param modifer   Modifier 'strict' for sched, 1 if present, 0 otherwise
4784 @param task_dup  Tasks duplication routine
4785 
4786 Execute the taskloop construct.
4787 */
4788 void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4789                        kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4790                        int nogroup, int sched, kmp_uint64 grainsize,
4791                        int modifier, void *task_dup) {
4792   __kmp_assert_valid_gtid(gtid);
4793   KA_TRACE(20, ("__kmpc_taskloop_5(enter): T#%d\n", gtid));
4794   __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize,
4795                  modifier, task_dup);
4796   KA_TRACE(20, ("__kmpc_taskloop_5(exit): T#%d\n", gtid));
4797 }
4798