1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_i18n.h"
16 #include "kmp_itt.h"
17 #include "kmp_stats.h"
18 #include "kmp_wait_release.h"
19 
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #include "tsan_annotations.h"
25 
26 /* forward declaration */
27 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
28                                  kmp_info_t *this_thr);
29 static void __kmp_alloc_task_deque(kmp_info_t *thread,
30                                    kmp_thread_data_t *thread_data);
31 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
32                                            kmp_task_team_t *task_team);
33 
34 #ifdef OMP_45_ENABLED
35 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
36 #endif
37 
38 #ifdef BUILD_TIED_TASK_STACK
39 
40 //  __kmp_trace_task_stack: print the tied tasks from the task stack in order
41 //  from top do bottom
42 //
43 //  gtid: global thread identifier for thread containing stack
44 //  thread_data: thread data for task team thread containing stack
45 //  threshold: value above which the trace statement triggers
46 //  location: string identifying call site of this function (for trace)
47 static void __kmp_trace_task_stack(kmp_int32 gtid,
48                                    kmp_thread_data_t *thread_data,
49                                    int threshold, char *location) {
50   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
51   kmp_taskdata_t **stack_top = task_stack->ts_top;
52   kmp_int32 entries = task_stack->ts_entries;
53   kmp_taskdata_t *tied_task;
54 
55   KA_TRACE(
56       threshold,
57       ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
58        "first_block = %p, stack_top = %p \n",
59        location, gtid, entries, task_stack->ts_first_block, stack_top));
60 
61   KMP_DEBUG_ASSERT(stack_top != NULL);
62   KMP_DEBUG_ASSERT(entries > 0);
63 
64   while (entries != 0) {
65     KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
66     // fix up ts_top if we need to pop from previous block
67     if (entries & TASK_STACK_INDEX_MASK == 0) {
68       kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
69 
70       stack_block = stack_block->sb_prev;
71       stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
72     }
73 
74     // finish bookkeeping
75     stack_top--;
76     entries--;
77 
78     tied_task = *stack_top;
79 
80     KMP_DEBUG_ASSERT(tied_task != NULL);
81     KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
82 
83     KA_TRACE(threshold,
84              ("__kmp_trace_task_stack(%s):             gtid=%d, entry=%d, "
85               "stack_top=%p, tied_task=%p\n",
86               location, gtid, entries, stack_top, tied_task));
87   }
88   KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
89 
90   KA_TRACE(threshold,
91            ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
92             location, gtid));
93 }
94 
95 //  __kmp_init_task_stack: initialize the task stack for the first time
96 //  after a thread_data structure is created.
97 //  It should not be necessary to do this again (assuming the stack works).
98 //
99 //  gtid: global thread identifier of calling thread
100 //  thread_data: thread data for task team thread containing stack
101 static void __kmp_init_task_stack(kmp_int32 gtid,
102                                   kmp_thread_data_t *thread_data) {
103   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
104   kmp_stack_block_t *first_block;
105 
106   // set up the first block of the stack
107   first_block = &task_stack->ts_first_block;
108   task_stack->ts_top = (kmp_taskdata_t **)first_block;
109   memset((void *)first_block, '\0',
110          TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
111 
112   // initialize the stack to be empty
113   task_stack->ts_entries = TASK_STACK_EMPTY;
114   first_block->sb_next = NULL;
115   first_block->sb_prev = NULL;
116 }
117 
118 //  __kmp_free_task_stack: free the task stack when thread_data is destroyed.
119 //
120 //  gtid: global thread identifier for calling thread
121 //  thread_data: thread info for thread containing stack
122 static void __kmp_free_task_stack(kmp_int32 gtid,
123                                   kmp_thread_data_t *thread_data) {
124   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
125   kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
126 
127   KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
128   // free from the second block of the stack
129   while (stack_block != NULL) {
130     kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
131 
132     stack_block->sb_next = NULL;
133     stack_block->sb_prev = NULL;
134     if (stack_block != &task_stack->ts_first_block) {
135       __kmp_thread_free(thread,
136                         stack_block); // free the block, if not the first
137     }
138     stack_block = next_block;
139   }
140   // initialize the stack to be empty
141   task_stack->ts_entries = 0;
142   task_stack->ts_top = NULL;
143 }
144 
145 //  __kmp_push_task_stack: Push the tied task onto the task stack.
146 //     Grow the stack if necessary by allocating another block.
147 //
148 //  gtid: global thread identifier for calling thread
149 //  thread: thread info for thread containing stack
150 //  tied_task: the task to push on the stack
151 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
152                                   kmp_taskdata_t *tied_task) {
153   // GEH - need to consider what to do if tt_threads_data not allocated yet
154   kmp_thread_data_t *thread_data =
155       &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
156   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
157 
158   if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
159     return; // Don't push anything on stack if team or team tasks are serialized
160   }
161 
162   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
163   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
164 
165   KA_TRACE(20,
166            ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
167             gtid, thread, tied_task));
168   // Store entry
169   *(task_stack->ts_top) = tied_task;
170 
171   // Do bookkeeping for next push
172   task_stack->ts_top++;
173   task_stack->ts_entries++;
174 
175   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
176     // Find beginning of this task block
177     kmp_stack_block_t *stack_block =
178         (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
179 
180     // Check if we already have a block
181     if (stack_block->sb_next !=
182         NULL) { // reset ts_top to beginning of next block
183       task_stack->ts_top = &stack_block->sb_next->sb_block[0];
184     } else { // Alloc new block and link it up
185       kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
186           thread, sizeof(kmp_stack_block_t));
187 
188       task_stack->ts_top = &new_block->sb_block[0];
189       stack_block->sb_next = new_block;
190       new_block->sb_prev = stack_block;
191       new_block->sb_next = NULL;
192 
193       KA_TRACE(
194           30,
195           ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
196            gtid, tied_task, new_block));
197     }
198   }
199   KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
200                 tied_task));
201 }
202 
203 //  __kmp_pop_task_stack: Pop the tied task from the task stack.  Don't return
204 //  the task, just check to make sure it matches the ending task passed in.
205 //
206 //  gtid: global thread identifier for the calling thread
207 //  thread: thread info structure containing stack
208 //  tied_task: the task popped off the stack
209 //  ending_task: the task that is ending (should match popped task)
210 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
211                                  kmp_taskdata_t *ending_task) {
212   // GEH - need to consider what to do if tt_threads_data not allocated yet
213   kmp_thread_data_t *thread_data =
214       &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
215   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
216   kmp_taskdata_t *tied_task;
217 
218   if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
219     // Don't pop anything from stack if team or team tasks are serialized
220     return;
221   }
222 
223   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
224   KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
225 
226   KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
227                 thread));
228 
229   // fix up ts_top if we need to pop from previous block
230   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
231     kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
232 
233     stack_block = stack_block->sb_prev;
234     task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
235   }
236 
237   // finish bookkeeping
238   task_stack->ts_top--;
239   task_stack->ts_entries--;
240 
241   tied_task = *(task_stack->ts_top);
242 
243   KMP_DEBUG_ASSERT(tied_task != NULL);
244   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
245   KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
246 
247   KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
248                 tied_task));
249   return;
250 }
251 #endif /* BUILD_TIED_TASK_STACK */
252 
253 //  __kmp_push_task: Add a task to the thread's deque
254 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
255   kmp_info_t *thread = __kmp_threads[gtid];
256   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
257   kmp_task_team_t *task_team = thread->th.th_task_team;
258   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
259   kmp_thread_data_t *thread_data;
260 
261   KA_TRACE(20,
262            ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
263 
264   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
265     // untied task needs to increment counter so that the task structure is not
266     // freed prematurely
267     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
268     KMP_DEBUG_USE_VAR(counter);
269     KA_TRACE(
270         20,
271         ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
272          gtid, counter, taskdata));
273   }
274 
275   // The first check avoids building task_team thread data if serialized
276   if (taskdata->td_flags.task_serial) {
277     KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
278                   "TASK_NOT_PUSHED for task %p\n",
279                   gtid, taskdata));
280     return TASK_NOT_PUSHED;
281   }
282 
283   // Now that serialized tasks have returned, we can assume that we are not in
284   // immediate exec mode
285   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
286   if (!KMP_TASKING_ENABLED(task_team)) {
287     __kmp_enable_tasking(task_team, thread);
288   }
289   KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
290   KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
291 
292   // Find tasking deque specific to encountering thread
293   thread_data = &task_team->tt.tt_threads_data[tid];
294 
295   // No lock needed since only owner can allocate
296   if (thread_data->td.td_deque == NULL) {
297     __kmp_alloc_task_deque(thread, thread_data);
298   }
299 
300   // Check if deque is full
301   if (TCR_4(thread_data->td.td_deque_ntasks) >=
302       TASK_DEQUE_SIZE(thread_data->td)) {
303     KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
304                   "TASK_NOT_PUSHED for task %p\n",
305                   gtid, taskdata));
306     return TASK_NOT_PUSHED;
307   }
308 
309   // Lock the deque for the task push operation
310   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
311 
312 #if OMP_45_ENABLED
313   // Need to recheck as we can get a proxy task from a thread outside of OpenMP
314   if (TCR_4(thread_data->td.td_deque_ntasks) >=
315       TASK_DEQUE_SIZE(thread_data->td)) {
316     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
317     KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; returning "
318                   "TASK_NOT_PUSHED for task %p\n",
319                   gtid, taskdata));
320     return TASK_NOT_PUSHED;
321   }
322 #else
323   // Must have room since no thread can add tasks but calling thread
324   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
325                    TASK_DEQUE_SIZE(thread_data->td));
326 #endif
327 
328   thread_data->td.td_deque[thread_data->td.td_deque_tail] =
329       taskdata; // Push taskdata
330   // Wrap index.
331   thread_data->td.td_deque_tail =
332       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
333   TCW_4(thread_data->td.td_deque_ntasks,
334         TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
335 
336   KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
337                 "task=%p ntasks=%d head=%u tail=%u\n",
338                 gtid, taskdata, thread_data->td.td_deque_ntasks,
339                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
340 
341   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
342 
343   return TASK_SUCCESSFULLY_PUSHED;
344 }
345 
346 // __kmp_pop_current_task_from_thread: set up current task from called thread
347 // when team ends
348 //
349 // this_thr: thread structure to set current_task in.
350 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
351   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
352                 "this_thread=%p, curtask=%p, "
353                 "curtask_parent=%p\n",
354                 0, this_thr, this_thr->th.th_current_task,
355                 this_thr->th.th_current_task->td_parent));
356 
357   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
358 
359   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
360                 "this_thread=%p, curtask=%p, "
361                 "curtask_parent=%p\n",
362                 0, this_thr, this_thr->th.th_current_task,
363                 this_thr->th.th_current_task->td_parent));
364 }
365 
366 // __kmp_push_current_task_to_thread: set up current task in called thread for a
367 // new team
368 //
369 // this_thr: thread structure to set up
370 // team: team for implicit task data
371 // tid: thread within team to set up
372 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
373                                        int tid) {
374   // current task of the thread is a parent of the new just created implicit
375   // tasks of new team
376   KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
377                 "curtask=%p "
378                 "parent_task=%p\n",
379                 tid, this_thr, this_thr->th.th_current_task,
380                 team->t.t_implicit_task_taskdata[tid].td_parent));
381 
382   KMP_DEBUG_ASSERT(this_thr != NULL);
383 
384   if (tid == 0) {
385     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
386       team->t.t_implicit_task_taskdata[0].td_parent =
387           this_thr->th.th_current_task;
388       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
389     }
390   } else {
391     team->t.t_implicit_task_taskdata[tid].td_parent =
392         team->t.t_implicit_task_taskdata[0].td_parent;
393     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
394   }
395 
396   KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
397                 "curtask=%p "
398                 "parent_task=%p\n",
399                 tid, this_thr, this_thr->th.th_current_task,
400                 team->t.t_implicit_task_taskdata[tid].td_parent));
401 }
402 
403 // __kmp_task_start: bookkeeping for a task starting execution
404 //
405 // GTID: global thread id of calling thread
406 // task: task starting execution
407 // current_task: task suspending
408 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
409                              kmp_taskdata_t *current_task) {
410   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
411   kmp_info_t *thread = __kmp_threads[gtid];
412 
413   KA_TRACE(10,
414            ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
415             gtid, taskdata, current_task));
416 
417   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
418 
419   // mark currently executing task as suspended
420   // TODO: GEH - make sure root team implicit task is initialized properly.
421   // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
422   current_task->td_flags.executing = 0;
423 
424 // Add task to stack if tied
425 #ifdef BUILD_TIED_TASK_STACK
426   if (taskdata->td_flags.tiedness == TASK_TIED) {
427     __kmp_push_task_stack(gtid, thread, taskdata);
428   }
429 #endif /* BUILD_TIED_TASK_STACK */
430 
431   // mark starting task as executing and as current task
432   thread->th.th_current_task = taskdata;
433 
434   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
435                    taskdata->td_flags.tiedness == TASK_UNTIED);
436   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
437                    taskdata->td_flags.tiedness == TASK_UNTIED);
438   taskdata->td_flags.started = 1;
439   taskdata->td_flags.executing = 1;
440   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
441   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
442 
443   // GEH TODO: shouldn't we pass some sort of location identifier here?
444   // APT: yes, we will pass location here.
445   // need to store current thread state (in a thread or taskdata structure)
446   // before setting work_state, otherwise wrong state is set after end of task
447 
448   KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
449 
450   return;
451 }
452 
453 #if OMPT_SUPPORT
454 //------------------------------------------------------------------------------
455 // __ompt_task_init:
456 //   Initialize OMPT fields maintained by a task. This will only be called after
457 //   ompt_start_tool, so we already know whether ompt is enabled or not.
458 
459 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
460   // The calls to __ompt_task_init already have the ompt_enabled condition.
461   task->ompt_task_info.task_data.value = 0;
462   task->ompt_task_info.frame.exit_frame = NULL;
463   task->ompt_task_info.frame.enter_frame = NULL;
464 #if OMP_40_ENABLED
465   task->ompt_task_info.ndeps = 0;
466   task->ompt_task_info.deps = NULL;
467 #endif /* OMP_40_ENABLED */
468 }
469 
470 // __ompt_task_start:
471 //   Build and trigger task-begin event
472 static inline void __ompt_task_start(kmp_task_t *task,
473                                      kmp_taskdata_t *current_task,
474                                      kmp_int32 gtid) {
475   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
476   ompt_task_status_t status = ompt_task_others;
477   if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
478     status = ompt_task_yield;
479     __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
480   }
481   /* let OMPT know that we're about to run this task */
482   if (ompt_enabled.ompt_callback_task_schedule) {
483     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
484         &(current_task->ompt_task_info.task_data), status,
485         &(taskdata->ompt_task_info.task_data));
486   }
487   taskdata->ompt_task_info.scheduling_parent = current_task;
488 }
489 
490 // __ompt_task_finish:
491 //   Build and trigger final task-schedule event
492 static inline void
493 __ompt_task_finish(kmp_task_t *task, kmp_taskdata_t *resumed_task,
494                    ompt_task_status_t status = ompt_task_complete) {
495   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
496   if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
497       taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
498     status = ompt_task_cancel;
499   }
500 
501   /* let OMPT know that we're returning to the callee task */
502   if (ompt_enabled.ompt_callback_task_schedule) {
503     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
504         &(taskdata->ompt_task_info.task_data), status,
505         &((resumed_task ? resumed_task
506                         : (taskdata->ompt_task_info.scheduling_parent
507                                ? taskdata->ompt_task_info.scheduling_parent
508                                : taskdata->td_parent))
509               ->ompt_task_info.task_data));
510   }
511 }
512 #endif
513 
514 template <bool ompt>
515 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
516                                                kmp_task_t *task,
517                                                void *frame_address,
518                                                void *return_address) {
519   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
520   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
521 
522   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
523                 "current_task=%p\n",
524                 gtid, loc_ref, taskdata, current_task));
525 
526   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
527     // untied task needs to increment counter so that the task structure is not
528     // freed prematurely
529     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
530     KMP_DEBUG_USE_VAR(counter);
531     KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
532                   "incremented for task %p\n",
533                   gtid, counter, taskdata));
534   }
535 
536   taskdata->td_flags.task_serial =
537       1; // Execute this task immediately, not deferred.
538   __kmp_task_start(gtid, task, current_task);
539 
540 #if OMPT_SUPPORT
541   if (ompt) {
542     if (current_task->ompt_task_info.frame.enter_frame == NULL) {
543       current_task->ompt_task_info.frame.enter_frame =
544           taskdata->ompt_task_info.frame.exit_frame = frame_address;
545     }
546     if (ompt_enabled.ompt_callback_task_create) {
547       ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
548       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
549           &(parent_info->task_data), &(parent_info->frame),
550           &(taskdata->ompt_task_info.task_data),
551           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
552           return_address);
553     }
554     __ompt_task_start(task, current_task, gtid);
555   }
556 #endif // OMPT_SUPPORT
557 
558   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
559                 loc_ref, taskdata));
560 }
561 
562 #if OMPT_SUPPORT
563 OMPT_NOINLINE
564 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
565                                            kmp_task_t *task,
566                                            void *frame_address,
567                                            void *return_address) {
568   __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
569                                            return_address);
570 }
571 #endif // OMPT_SUPPORT
572 
573 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
574 // execution
575 //
576 // loc_ref: source location information; points to beginning of task block.
577 // gtid: global thread number.
578 // task: task thunk for the started task.
579 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
580                                kmp_task_t *task) {
581 #if OMPT_SUPPORT
582   if (UNLIKELY(ompt_enabled.enabled)) {
583     OMPT_STORE_RETURN_ADDRESS(gtid);
584     __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
585                                    OMPT_GET_FRAME_ADDRESS(1),
586                                    OMPT_LOAD_RETURN_ADDRESS(gtid));
587     return;
588   }
589 #endif
590   __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
591 }
592 
593 #ifdef TASK_UNUSED
594 // __kmpc_omp_task_begin: report that a given task has started execution
595 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
596 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
597   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
598 
599   KA_TRACE(
600       10,
601       ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
602        gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
603 
604   __kmp_task_start(gtid, task, current_task);
605 
606   KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
607                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
608   return;
609 }
610 #endif // TASK_UNUSED
611 
612 // __kmp_free_task: free the current task space and the space for shareds
613 //
614 // gtid: Global thread ID of calling thread
615 // taskdata: task to free
616 // thread: thread data structure of caller
617 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
618                             kmp_info_t *thread) {
619   KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
620                 taskdata));
621 
622   // Check to make sure all flags and counters have the correct values
623   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
624   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
625   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
626   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
627   KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
628                    taskdata->td_flags.task_serial == 1);
629   KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
630 
631   taskdata->td_flags.freed = 1;
632   ANNOTATE_HAPPENS_BEFORE(taskdata);
633 // deallocate the taskdata and shared variable blocks associated with this task
634 #if USE_FAST_MEMORY
635   __kmp_fast_free(thread, taskdata);
636 #else /* ! USE_FAST_MEMORY */
637   __kmp_thread_free(thread, taskdata);
638 #endif
639 
640   KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
641 }
642 
643 // __kmp_free_task_and_ancestors: free the current task and ancestors without
644 // children
645 //
646 // gtid: Global thread ID of calling thread
647 // taskdata: task to free
648 // thread: thread data structure of caller
649 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
650                                           kmp_taskdata_t *taskdata,
651                                           kmp_info_t *thread) {
652 #if OMP_45_ENABLED
653   // Proxy tasks must always be allowed to free their parents
654   // because they can be run in background even in serial mode.
655   kmp_int32 team_serial =
656       (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
657       !taskdata->td_flags.proxy;
658 #else
659   kmp_int32 team_serial =
660       taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser;
661 #endif
662   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
663 
664   kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
665   KMP_DEBUG_ASSERT(children >= 0);
666 
667   // Now, go up the ancestor tree to see if any ancestors can now be freed.
668   while (children == 0) {
669     kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
670 
671     KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
672                   "and freeing itself\n",
673                   gtid, taskdata));
674 
675     // --- Deallocate my ancestor task ---
676     __kmp_free_task(gtid, taskdata, thread);
677 
678     taskdata = parent_taskdata;
679 
680     // Stop checking ancestors at implicit task instead of walking up ancestor
681     // tree to avoid premature deallocation of ancestors.
682     if (team_serial || taskdata->td_flags.tasktype == TASK_IMPLICIT)
683       return;
684 
685     // Predecrement simulated by "- 1" calculation
686     children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
687     KMP_DEBUG_ASSERT(children >= 0);
688   }
689 
690   KA_TRACE(
691       20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
692            "not freeing it yet\n",
693            gtid, taskdata, children));
694 }
695 
696 // __kmp_task_finish: bookkeeping to do when a task finishes execution
697 //
698 // gtid: global thread ID for calling thread
699 // task: task to be finished
700 // resumed_task: task to be resumed.  (may be NULL if task is serialized)
701 template <bool ompt>
702 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
703                               kmp_taskdata_t *resumed_task) {
704   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
705   kmp_info_t *thread = __kmp_threads[gtid];
706   kmp_task_team_t *task_team =
707       thread->th.th_task_team; // might be NULL for serial teams...
708   kmp_int32 children = 0;
709 
710   KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
711                 "task %p\n",
712                 gtid, taskdata, resumed_task));
713 
714   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
715 
716 // Pop task from stack if tied
717 #ifdef BUILD_TIED_TASK_STACK
718   if (taskdata->td_flags.tiedness == TASK_TIED) {
719     __kmp_pop_task_stack(gtid, thread, taskdata);
720   }
721 #endif /* BUILD_TIED_TASK_STACK */
722 
723   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
724     // untied task needs to check the counter so that the task structure is not
725     // freed prematurely
726     kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
727     KA_TRACE(
728         20,
729         ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
730          gtid, counter, taskdata));
731     if (counter > 0) {
732       // untied task is not done, to be continued possibly by other thread, do
733       // not free it now
734       if (resumed_task == NULL) {
735         KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
736         resumed_task = taskdata->td_parent; // In a serialized task, the resumed
737         // task is the parent
738       }
739       thread->th.th_current_task = resumed_task; // restore current_task
740       resumed_task->td_flags.executing = 1; // resume previous task
741       KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
742                     "resuming task %p\n",
743                     gtid, taskdata, resumed_task));
744       return;
745     }
746   }
747 #if OMPT_SUPPORT
748   if (ompt)
749     __ompt_task_finish(task, resumed_task);
750 #endif
751 
752   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
753   taskdata->td_flags.complete = 1; // mark the task as completed
754   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
755   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
756 
757   // Only need to keep track of count if team parallel and tasking not
758   // serialized
759   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
760     // Predecrement simulated by "- 1" calculation
761     children =
762         KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
763     KMP_DEBUG_ASSERT(children >= 0);
764 #if OMP_40_ENABLED
765     if (taskdata->td_taskgroup)
766       KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
767 #if OMP_45_ENABLED
768   }
769   // if we found proxy tasks there could exist a dependency chain
770   // with the proxy task as origin
771   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) ||
772       (task_team && task_team->tt.tt_found_proxy_tasks)) {
773 #endif
774     __kmp_release_deps(gtid, taskdata);
775 #endif
776   }
777 
778   // td_flags.executing must be marked as 0 after __kmp_release_deps has been
779   // called. Othertwise, if a task is executed immediately from the release_deps
780   // code, the flag will be reset to 1 again by this same function
781   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
782   taskdata->td_flags.executing = 0; // suspend the finishing task
783 
784   KA_TRACE(
785       20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
786            gtid, taskdata, children));
787 
788 #if OMP_40_ENABLED
789   /* If the tasks' destructor thunk flag has been set, we need to invoke the
790      destructor thunk that has been generated by the compiler. The code is
791      placed here, since at this point other tasks might have been released
792      hence overlapping the destructor invokations with some other work in the
793      released tasks.  The OpenMP spec is not specific on when the destructors
794      are invoked, so we should be free to choose. */
795   if (taskdata->td_flags.destructors_thunk) {
796     kmp_routine_entry_t destr_thunk = task->data1.destructors;
797     KMP_ASSERT(destr_thunk);
798     destr_thunk(gtid, task);
799   }
800 #endif // OMP_40_ENABLED
801 
802   // bookkeeping for resuming task:
803   // GEH - note tasking_ser => task_serial
804   KMP_DEBUG_ASSERT(
805       (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
806       taskdata->td_flags.task_serial);
807   if (taskdata->td_flags.task_serial) {
808     if (resumed_task == NULL) {
809       resumed_task = taskdata->td_parent; // In a serialized task, the resumed
810       // task is the parent
811     }
812   } else {
813     KMP_DEBUG_ASSERT(resumed_task !=
814                      NULL); // verify that resumed task is passed as arguemnt
815   }
816 
817   // Free this task and then ancestor tasks if they have no children.
818   // Restore th_current_task first as suggested by John:
819   // johnmc: if an asynchronous inquiry peers into the runtime system
820   // it doesn't see the freed task as the current task.
821   thread->th.th_current_task = resumed_task;
822   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
823 
824   // TODO: GEH - make sure root team implicit task is initialized properly.
825   // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
826   resumed_task->td_flags.executing = 1; // resume previous task
827 
828   KA_TRACE(
829       10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
830            gtid, taskdata, resumed_task));
831 
832   return;
833 }
834 
835 template <bool ompt>
836 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
837                                                   kmp_int32 gtid,
838                                                   kmp_task_t *task) {
839   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
840                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
841   // this routine will provide task to resume
842   __kmp_task_finish<ompt>(gtid, task, NULL);
843 
844   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
845                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
846 
847 #if OMPT_SUPPORT
848   if (ompt) {
849     omp_frame_t *ompt_frame;
850     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
851     ompt_frame->enter_frame = NULL;
852   }
853 #endif
854 
855   return;
856 }
857 
858 #if OMPT_SUPPORT
859 OMPT_NOINLINE
860 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
861                                        kmp_task_t *task) {
862   __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
863 }
864 #endif // OMPT_SUPPORT
865 
866 // __kmpc_omp_task_complete_if0: report that a task has completed execution
867 //
868 // loc_ref: source location information; points to end of task block.
869 // gtid: global thread number.
870 // task: task thunk for the completed task.
871 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
872                                   kmp_task_t *task) {
873 #if OMPT_SUPPORT
874   if (UNLIKELY(ompt_enabled.enabled)) {
875     __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
876     return;
877   }
878 #endif
879   __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
880 }
881 
882 #ifdef TASK_UNUSED
883 // __kmpc_omp_task_complete: report that a task has completed execution
884 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
885 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
886                               kmp_task_t *task) {
887   KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
888                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
889 
890   __kmp_task_finish<false>(gtid, task,
891                            NULL); // Not sure how to find task to resume
892 
893   KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
894                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
895   return;
896 }
897 #endif // TASK_UNUSED
898 
899 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
900 // task for a given thread
901 //
902 // loc_ref:  reference to source location of parallel region
903 // this_thr:  thread data structure corresponding to implicit task
904 // team: team for this_thr
905 // tid: thread id of given thread within team
906 // set_curr_task: TRUE if need to push current task to thread
907 // NOTE: Routine does not set up the implicit task ICVS.  This is assumed to
908 // have already been done elsewhere.
909 // TODO: Get better loc_ref.  Value passed in may be NULL
910 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
911                               kmp_team_t *team, int tid, int set_curr_task) {
912   kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
913 
914   KF_TRACE(
915       10,
916       ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
917        tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
918 
919   task->td_task_id = KMP_GEN_TASK_ID();
920   task->td_team = team;
921   //    task->td_parent   = NULL;  // fix for CQ230101 (broken parent task info
922   //    in debugger)
923   task->td_ident = loc_ref;
924   task->td_taskwait_ident = NULL;
925   task->td_taskwait_counter = 0;
926   task->td_taskwait_thread = 0;
927 
928   task->td_flags.tiedness = TASK_TIED;
929   task->td_flags.tasktype = TASK_IMPLICIT;
930 #if OMP_45_ENABLED
931   task->td_flags.proxy = TASK_FULL;
932 #endif
933 
934   // All implicit tasks are executed immediately, not deferred
935   task->td_flags.task_serial = 1;
936   task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
937   task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
938 
939   task->td_flags.started = 1;
940   task->td_flags.executing = 1;
941   task->td_flags.complete = 0;
942   task->td_flags.freed = 0;
943 
944 #if OMP_40_ENABLED
945   task->td_depnode = NULL;
946 #endif
947   task->td_last_tied = task;
948 
949   if (set_curr_task) { // only do this init first time thread is created
950     KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
951     // Not used: don't need to deallocate implicit task
952     KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
953 #if OMP_40_ENABLED
954     task->td_taskgroup = NULL; // An implicit task does not have taskgroup
955     task->td_dephash = NULL;
956 #endif
957     __kmp_push_current_task_to_thread(this_thr, team, tid);
958   } else {
959     KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
960     KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
961   }
962 
963 #if OMPT_SUPPORT
964   if (UNLIKELY(ompt_enabled.enabled))
965     __ompt_task_init(task, tid);
966 #endif
967 
968   KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
969                 team, task));
970 }
971 
972 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
973 // at the end of parallel regions. Some resources are kept for reuse in the next
974 // parallel region.
975 //
976 // thread:  thread data structure corresponding to implicit task
977 void __kmp_finish_implicit_task(kmp_info_t *thread) {
978   kmp_taskdata_t *task = thread->th.th_current_task;
979   if (task->td_dephash)
980     __kmp_dephash_free_entries(thread, task->td_dephash);
981 }
982 
983 // __kmp_free_implicit_task: Release resources associated to implicit tasks
984 // when these are destroyed regions
985 //
986 // thread:  thread data structure corresponding to implicit task
987 void __kmp_free_implicit_task(kmp_info_t *thread) {
988   kmp_taskdata_t *task = thread->th.th_current_task;
989   if (task && task->td_dephash) {
990     __kmp_dephash_free(thread, task->td_dephash);
991     task->td_dephash = NULL;
992   }
993 }
994 
995 // Round up a size to a power of two specified by val: Used to insert padding
996 // between structures co-allocated using a single malloc() call
997 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
998   if (size & (val - 1)) {
999     size &= ~(val - 1);
1000     if (size <= KMP_SIZE_T_MAX - val) {
1001       size += val; // Round up if there is no overflow.
1002     }
1003   }
1004   return size;
1005 } // __kmp_round_up_to_va
1006 
1007 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1008 //
1009 // loc_ref: source location information
1010 // gtid: global thread number.
1011 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1012 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1013 // sizeof_kmp_task_t:  Size in bytes of kmp_task_t data structure including
1014 // private vars accessed in task.
1015 // sizeof_shareds:  Size in bytes of array of pointers to shared vars accessed
1016 // in task.
1017 // task_entry: Pointer to task code entry point generated by compiler.
1018 // returns: a pointer to the allocated kmp_task_t structure (task).
1019 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1020                              kmp_tasking_flags_t *flags,
1021                              size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1022                              kmp_routine_entry_t task_entry) {
1023   kmp_task_t *task;
1024   kmp_taskdata_t *taskdata;
1025   kmp_info_t *thread = __kmp_threads[gtid];
1026   kmp_team_t *team = thread->th.th_team;
1027   kmp_taskdata_t *parent_task = thread->th.th_current_task;
1028   size_t shareds_offset;
1029 
1030   if (!TCR_4(__kmp_init_middle))
1031     __kmp_middle_initialize();
1032 
1033   KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1034                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1035                 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1036                 sizeof_shareds, task_entry));
1037 
1038   if (parent_task->td_flags.final) {
1039     if (flags->merged_if0) {
1040     }
1041     flags->final = 1;
1042   }
1043   if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1044     // Untied task encountered causes the TSC algorithm to check entire deque of
1045     // the victim thread. If no untied task encountered, then checking the head
1046     // of the deque should be enough.
1047     KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1048   }
1049 
1050 #if OMP_45_ENABLED
1051   if (flags->proxy == TASK_PROXY) {
1052     flags->tiedness = TASK_UNTIED;
1053     flags->merged_if0 = 1;
1054 
1055     /* are we running in a sequential parallel or tskm_immediate_exec... we need
1056        tasking support enabled */
1057     if ((thread->th.th_task_team) == NULL) {
1058       /* This should only happen if the team is serialized
1059           setup a task team and propagate it to the thread */
1060       KMP_DEBUG_ASSERT(team->t.t_serialized);
1061       KA_TRACE(30,
1062                ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1063                 gtid));
1064       __kmp_task_team_setup(
1065           thread, team,
1066           1); // 1 indicates setup the current team regardless of nthreads
1067       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1068     }
1069     kmp_task_team_t *task_team = thread->th.th_task_team;
1070 
1071     /* tasking must be enabled now as the task might not be pushed */
1072     if (!KMP_TASKING_ENABLED(task_team)) {
1073       KA_TRACE(
1074           30,
1075           ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1076       __kmp_enable_tasking(task_team, thread);
1077       kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1078       kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1079       // No lock needed since only owner can allocate
1080       if (thread_data->td.td_deque == NULL) {
1081         __kmp_alloc_task_deque(thread, thread_data);
1082       }
1083     }
1084 
1085     if (task_team->tt.tt_found_proxy_tasks == FALSE)
1086       TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1087   }
1088 #endif
1089 
1090   // Calculate shared structure offset including padding after kmp_task_t struct
1091   // to align pointers in shared struct
1092   shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1093   shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1094 
1095   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1096   KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1097                 shareds_offset));
1098   KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1099                 sizeof_shareds));
1100 
1101 // Avoid double allocation here by combining shareds with taskdata
1102 #if USE_FAST_MEMORY
1103   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1104                                                                sizeof_shareds);
1105 #else /* ! USE_FAST_MEMORY */
1106   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1107                                                                sizeof_shareds);
1108 #endif /* USE_FAST_MEMORY */
1109   ANNOTATE_HAPPENS_AFTER(taskdata);
1110 
1111   task = KMP_TASKDATA_TO_TASK(taskdata);
1112 
1113 // Make sure task & taskdata are aligned appropriately
1114 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
1115   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1116   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1117 #else
1118   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1119   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1120 #endif
1121   if (sizeof_shareds > 0) {
1122     // Avoid double allocation here by combining shareds with taskdata
1123     task->shareds = &((char *)taskdata)[shareds_offset];
1124     // Make sure shareds struct is aligned to pointer size
1125     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1126                      0);
1127   } else {
1128     task->shareds = NULL;
1129   }
1130   task->routine = task_entry;
1131   task->part_id = 0; // AC: Always start with 0 part id
1132 
1133   taskdata->td_task_id = KMP_GEN_TASK_ID();
1134   taskdata->td_team = team;
1135   taskdata->td_alloc_thread = thread;
1136   taskdata->td_parent = parent_task;
1137   taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1138   KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1139   taskdata->td_ident = loc_ref;
1140   taskdata->td_taskwait_ident = NULL;
1141   taskdata->td_taskwait_counter = 0;
1142   taskdata->td_taskwait_thread = 0;
1143   KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1144 #if OMP_45_ENABLED
1145   // avoid copying icvs for proxy tasks
1146   if (flags->proxy == TASK_FULL)
1147 #endif
1148     copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1149 
1150   taskdata->td_flags.tiedness = flags->tiedness;
1151   taskdata->td_flags.final = flags->final;
1152   taskdata->td_flags.merged_if0 = flags->merged_if0;
1153 #if OMP_40_ENABLED
1154   taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
1155 #endif // OMP_40_ENABLED
1156 #if OMP_45_ENABLED
1157   taskdata->td_flags.proxy = flags->proxy;
1158   taskdata->td_task_team = thread->th.th_task_team;
1159   taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1160 #endif
1161   taskdata->td_flags.tasktype = TASK_EXPLICIT;
1162 
1163   // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1164   taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1165 
1166   // GEH - TODO: fix this to copy parent task's value of team_serial flag
1167   taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1168 
1169   // GEH - Note we serialize the task if the team is serialized to make sure
1170   // implicit parallel region tasks are not left until program termination to
1171   // execute. Also, it helps locality to execute immediately.
1172 
1173   taskdata->td_flags.task_serial =
1174       (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1175        taskdata->td_flags.tasking_ser);
1176 
1177   taskdata->td_flags.started = 0;
1178   taskdata->td_flags.executing = 0;
1179   taskdata->td_flags.complete = 0;
1180   taskdata->td_flags.freed = 0;
1181 
1182   taskdata->td_flags.native = flags->native;
1183 
1184   KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1185   // start at one because counts current task and children
1186   KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1187 #if OMP_40_ENABLED
1188   taskdata->td_taskgroup =
1189       parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1190   taskdata->td_dephash = NULL;
1191   taskdata->td_depnode = NULL;
1192 #endif
1193   if (flags->tiedness == TASK_UNTIED)
1194     taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1195   else
1196     taskdata->td_last_tied = taskdata;
1197 
1198 #if OMPT_SUPPORT
1199   if (UNLIKELY(ompt_enabled.enabled))
1200     __ompt_task_init(taskdata, gtid);
1201 #endif
1202 // Only need to keep track of child task counts if team parallel and tasking not
1203 // serialized or if it is a proxy task
1204 #if OMP_45_ENABLED
1205   if (flags->proxy == TASK_PROXY ||
1206       !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1207 #else
1208   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1209 #endif
1210   {
1211     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1212 #if OMP_40_ENABLED
1213     if (parent_task->td_taskgroup)
1214       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1215 #endif
1216     // Only need to keep track of allocated child tasks for explicit tasks since
1217     // implicit not deallocated
1218     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1219       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1220     }
1221   }
1222 
1223   KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1224                 gtid, taskdata, taskdata->td_parent));
1225   ANNOTATE_HAPPENS_BEFORE(task);
1226 
1227   return task;
1228 }
1229 
1230 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1231                                   kmp_int32 flags, size_t sizeof_kmp_task_t,
1232                                   size_t sizeof_shareds,
1233                                   kmp_routine_entry_t task_entry) {
1234   kmp_task_t *retval;
1235   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1236 
1237   input_flags->native = FALSE;
1238 // __kmp_task_alloc() sets up all other runtime flags
1239 
1240 #if OMP_45_ENABLED
1241   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
1242                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1243                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1244                 input_flags->proxy ? "proxy" : "", sizeof_kmp_task_t,
1245                 sizeof_shareds, task_entry));
1246 #else
1247   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
1248                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1249                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1250                 sizeof_kmp_task_t, sizeof_shareds, task_entry));
1251 #endif
1252 
1253   retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1254                             sizeof_shareds, task_entry);
1255 
1256   KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1257 
1258   return retval;
1259 }
1260 
1261 //  __kmp_invoke_task: invoke the specified task
1262 //
1263 // gtid: global thread ID of caller
1264 // task: the task to invoke
1265 // current_task: the task to resume after task invokation
1266 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1267                               kmp_taskdata_t *current_task) {
1268   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1269   kmp_info_t *thread;
1270 #if OMP_40_ENABLED
1271   int discard = 0 /* false */;
1272 #endif
1273   KA_TRACE(
1274       30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1275            gtid, taskdata, current_task));
1276   KMP_DEBUG_ASSERT(task);
1277 #if OMP_45_ENABLED
1278   if (taskdata->td_flags.proxy == TASK_PROXY &&
1279       taskdata->td_flags.complete == 1) {
1280     // This is a proxy task that was already completed but it needs to run
1281     // its bottom-half finish
1282     KA_TRACE(
1283         30,
1284         ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1285          gtid, taskdata));
1286 
1287     __kmp_bottom_half_finish_proxy(gtid, task);
1288 
1289     KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1290                   "proxy task %p, resuming task %p\n",
1291                   gtid, taskdata, current_task));
1292 
1293     return;
1294   }
1295 #endif
1296 
1297 #if OMPT_SUPPORT
1298   // For untied tasks, the first task executed only calls __kmpc_omp_task and
1299   // does not execute code.
1300   ompt_thread_info_t oldInfo;
1301   if (UNLIKELY(ompt_enabled.enabled)) {
1302     // Store the threads states and restore them after the task
1303     thread = __kmp_threads[gtid];
1304     oldInfo = thread->th.ompt_thread_info;
1305     thread->th.ompt_thread_info.wait_id = 0;
1306     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1307                                             ? omp_state_work_serial
1308                                             : omp_state_work_parallel;
1309     taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
1310   }
1311 #endif
1312 
1313 #if OMP_45_ENABLED
1314   // Proxy tasks are not handled by the runtime
1315   if (taskdata->td_flags.proxy != TASK_PROXY) {
1316 #endif
1317     ANNOTATE_HAPPENS_AFTER(task);
1318     __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1319 #if OMP_45_ENABLED
1320   }
1321 #endif
1322 
1323 #if OMP_40_ENABLED
1324   // TODO: cancel tasks if the parallel region has also been cancelled
1325   // TODO: check if this sequence can be hoisted above __kmp_task_start
1326   // if cancellation has been enabled for this run ...
1327   if (__kmp_omp_cancellation) {
1328     thread = __kmp_threads[gtid];
1329     kmp_team_t *this_team = thread->th.th_team;
1330     kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1331     if ((taskgroup && taskgroup->cancel_request) ||
1332         (this_team->t.t_cancel_request == cancel_parallel)) {
1333 #if OMPT_SUPPORT && OMPT_OPTIONAL
1334       ompt_data_t *task_data;
1335       if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1336         __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1337         ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1338             task_data,
1339             ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1340                                                       : ompt_cancel_parallel) |
1341                 ompt_cancel_discarded_task,
1342             NULL);
1343       }
1344 #endif
1345       KMP_COUNT_BLOCK(TASK_cancelled);
1346       // this task belongs to a task group and we need to cancel it
1347       discard = 1 /* true */;
1348     }
1349   }
1350 
1351   // Invoke the task routine and pass in relevant data.
1352   // Thunks generated by gcc take a different argument list.
1353   if (!discard) {
1354     if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1355       taskdata->td_last_tied = current_task->td_last_tied;
1356       KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1357     }
1358 #if KMP_STATS_ENABLED
1359     KMP_COUNT_BLOCK(TASK_executed);
1360     switch (KMP_GET_THREAD_STATE()) {
1361     case FORK_JOIN_BARRIER:
1362       KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1363       break;
1364     case PLAIN_BARRIER:
1365       KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1366       break;
1367     case TASKYIELD:
1368       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1369       break;
1370     case TASKWAIT:
1371       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1372       break;
1373     case TASKGROUP:
1374       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1375       break;
1376     default:
1377       KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1378       break;
1379     }
1380 #endif // KMP_STATS_ENABLED
1381 #endif // OMP_40_ENABLED
1382 
1383 // OMPT task begin
1384 #if OMPT_SUPPORT
1385     if (UNLIKELY(ompt_enabled.enabled))
1386       __ompt_task_start(task, current_task, gtid);
1387 #endif
1388 
1389 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1390     kmp_uint64 cur_time;
1391     kmp_int32 kmp_itt_count_task =
1392         __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1393         current_task->td_flags.tasktype == TASK_IMPLICIT;
1394     if (kmp_itt_count_task) {
1395       thread = __kmp_threads[gtid];
1396       // Time outer level explicit task on barrier for adjusting imbalance time
1397       if (thread->th.th_bar_arrive_time)
1398         cur_time = __itt_get_timestamp();
1399       else
1400         kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1401     }
1402 #endif
1403 
1404 #ifdef KMP_GOMP_COMPAT
1405     if (taskdata->td_flags.native) {
1406       ((void (*)(void *))(*(task->routine)))(task->shareds);
1407     } else
1408 #endif /* KMP_GOMP_COMPAT */
1409     {
1410       (*(task->routine))(gtid, task);
1411     }
1412     KMP_POP_PARTITIONED_TIMER();
1413 
1414 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1415     if (kmp_itt_count_task) {
1416       // Barrier imbalance - adjust arrive time with the task duration
1417       thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1418     }
1419 #endif
1420 
1421 #if OMP_40_ENABLED
1422   }
1423 #endif // OMP_40_ENABLED
1424 
1425 
1426 #if OMP_45_ENABLED
1427   // Proxy tasks are not handled by the runtime
1428   if (taskdata->td_flags.proxy != TASK_PROXY) {
1429 #endif
1430     ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
1431 #if OMPT_SUPPORT
1432     if (UNLIKELY(ompt_enabled.enabled)) {
1433       thread->th.ompt_thread_info = oldInfo;
1434       if (taskdata->td_flags.tiedness == TASK_TIED) {
1435         taskdata->ompt_task_info.frame.exit_frame = NULL;
1436       }
1437       __kmp_task_finish<true>(gtid, task, current_task);
1438     } else
1439 #endif
1440       __kmp_task_finish<false>(gtid, task, current_task);
1441 #if OMP_45_ENABLED
1442   }
1443 #endif
1444 
1445   KA_TRACE(
1446       30,
1447       ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1448        gtid, taskdata, current_task));
1449   return;
1450 }
1451 
1452 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1453 //
1454 // loc_ref: location of original task pragma (ignored)
1455 // gtid: Global Thread ID of encountering thread
1456 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1457 // Returns:
1458 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1459 //    be resumed later.
1460 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1461 //    resumed later.
1462 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1463                                 kmp_task_t *new_task) {
1464   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1465 
1466   KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1467                 loc_ref, new_taskdata));
1468 
1469 #if OMPT_SUPPORT
1470   kmp_taskdata_t *parent;
1471   if (UNLIKELY(ompt_enabled.enabled)) {
1472     parent = new_taskdata->td_parent;
1473     if (ompt_enabled.ompt_callback_task_create) {
1474       ompt_data_t task_data = ompt_data_none;
1475       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1476           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1477           parent ? &(parent->ompt_task_info.frame) : NULL,
1478           &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1479           OMPT_GET_RETURN_ADDRESS(0));
1480     }
1481   }
1482 #endif
1483 
1484   /* Should we execute the new task or queue it? For now, let's just always try
1485      to queue it.  If the queue fills up, then we'll execute it.  */
1486 
1487   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1488   { // Execute this task immediately
1489     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1490     new_taskdata->td_flags.task_serial = 1;
1491     __kmp_invoke_task(gtid, new_task, current_task);
1492   }
1493 
1494   KA_TRACE(
1495       10,
1496       ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1497        "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1498        gtid, loc_ref, new_taskdata));
1499 
1500   ANNOTATE_HAPPENS_BEFORE(new_task);
1501 #if OMPT_SUPPORT
1502   if (UNLIKELY(ompt_enabled.enabled)) {
1503     parent->ompt_task_info.frame.enter_frame = NULL;
1504   }
1505 #endif
1506   return TASK_CURRENT_NOT_QUEUED;
1507 }
1508 
1509 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
1510 //
1511 // gtid: Global Thread ID of encountering thread
1512 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1513 // serialize_immediate: if TRUE then if the task is executed immediately its
1514 // execution will be serialized
1515 // Returns:
1516 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1517 //    be resumed later.
1518 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1519 //    resumed later.
1520 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1521                          bool serialize_immediate) {
1522   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1523 
1524 /* Should we execute the new task or queue it? For now, let's just always try to
1525    queue it.  If the queue fills up, then we'll execute it.  */
1526 #if OMP_45_ENABLED
1527   if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1528       __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1529 #else
1530   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1531 #endif
1532   { // Execute this task immediately
1533     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1534     if (serialize_immediate)
1535       new_taskdata->td_flags.task_serial = 1;
1536     __kmp_invoke_task(gtid, new_task, current_task);
1537   }
1538 
1539   ANNOTATE_HAPPENS_BEFORE(new_task);
1540   return TASK_CURRENT_NOT_QUEUED;
1541 }
1542 
1543 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1544 // non-thread-switchable task from the parent thread only!
1545 //
1546 // loc_ref: location of original task pragma (ignored)
1547 // gtid: Global Thread ID of encountering thread
1548 // new_task: non-thread-switchable task thunk allocated by
1549 // __kmp_omp_task_alloc()
1550 // Returns:
1551 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1552 //    be resumed later.
1553 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1554 //    resumed later.
1555 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1556                           kmp_task_t *new_task) {
1557   kmp_int32 res;
1558   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1559 
1560 #if KMP_DEBUG || OMPT_SUPPORT
1561   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1562 #endif
1563   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1564                 new_taskdata));
1565 
1566 #if OMPT_SUPPORT
1567   kmp_taskdata_t *parent = NULL;
1568   if (UNLIKELY(ompt_enabled.enabled)) {
1569     if (!new_taskdata->td_flags.started) {
1570       OMPT_STORE_RETURN_ADDRESS(gtid);
1571       parent = new_taskdata->td_parent;
1572       if (!parent->ompt_task_info.frame.enter_frame) {
1573         parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1574       }
1575       if (ompt_enabled.ompt_callback_task_create) {
1576         ompt_data_t task_data = ompt_data_none;
1577         ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1578             parent ? &(parent->ompt_task_info.task_data) : &task_data,
1579             parent ? &(parent->ompt_task_info.frame) : NULL,
1580             &(new_taskdata->ompt_task_info.task_data),
1581             ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1582             OMPT_LOAD_RETURN_ADDRESS(gtid));
1583       }
1584     } else {
1585       // We are scheduling the continuation of an UNTIED task.
1586       // Scheduling back to the parent task.
1587       __ompt_task_finish(new_task,
1588                          new_taskdata->ompt_task_info.scheduling_parent,
1589                          ompt_task_others);
1590       new_taskdata->ompt_task_info.frame.exit_frame = NULL;
1591     }
1592   }
1593 #endif
1594 
1595   res = __kmp_omp_task(gtid, new_task, true);
1596 
1597   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1598                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1599                 gtid, loc_ref, new_taskdata));
1600 #if OMPT_SUPPORT
1601   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1602     parent->ompt_task_info.frame.enter_frame = NULL;
1603   }
1604 #endif
1605   return res;
1606 }
1607 
1608 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
1609 // a taskloop task with the correct OMPT return address
1610 //
1611 // loc_ref: location of original task pragma (ignored)
1612 // gtid: Global Thread ID of encountering thread
1613 // new_task: non-thread-switchable task thunk allocated by
1614 // __kmp_omp_task_alloc()
1615 // codeptr_ra: return address for OMPT callback
1616 // Returns:
1617 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1618 //    be resumed later.
1619 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1620 //    resumed later.
1621 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
1622                                   kmp_task_t *new_task, void *codeptr_ra) {
1623   kmp_int32 res;
1624   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1625 
1626 #if KMP_DEBUG || OMPT_SUPPORT
1627   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1628 #endif
1629   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1630                 new_taskdata));
1631 
1632 #if OMPT_SUPPORT
1633   kmp_taskdata_t *parent = NULL;
1634   if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
1635     parent = new_taskdata->td_parent;
1636     if (!parent->ompt_task_info.frame.enter_frame)
1637       parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1638     if (ompt_enabled.ompt_callback_task_create) {
1639       ompt_data_t task_data = ompt_data_none;
1640       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1641           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1642           parent ? &(parent->ompt_task_info.frame) : NULL,
1643           &(new_taskdata->ompt_task_info.task_data),
1644           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1645           codeptr_ra);
1646     }
1647   }
1648 #endif
1649 
1650   res = __kmp_omp_task(gtid, new_task, true);
1651 
1652   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1653                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1654                 gtid, loc_ref, new_taskdata));
1655 #if OMPT_SUPPORT
1656   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1657     parent->ompt_task_info.frame.enter_frame = NULL;
1658   }
1659 #endif
1660   return res;
1661 }
1662 
1663 template <bool ompt>
1664 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1665                                               void *frame_address,
1666                                               void *return_address) {
1667   kmp_taskdata_t *taskdata;
1668   kmp_info_t *thread;
1669   int thread_finished = FALSE;
1670   KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
1671 
1672   KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
1673 
1674   if (__kmp_tasking_mode != tskm_immediate_exec) {
1675     thread = __kmp_threads[gtid];
1676     taskdata = thread->th.th_current_task;
1677 
1678 #if OMPT_SUPPORT && OMPT_OPTIONAL
1679     ompt_data_t *my_task_data;
1680     ompt_data_t *my_parallel_data;
1681 
1682     if (ompt) {
1683       my_task_data = &(taskdata->ompt_task_info.task_data);
1684       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1685 
1686       taskdata->ompt_task_info.frame.enter_frame = frame_address;
1687 
1688       if (ompt_enabled.ompt_callback_sync_region) {
1689         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1690             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1691             my_task_data, return_address);
1692       }
1693 
1694       if (ompt_enabled.ompt_callback_sync_region_wait) {
1695         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1696             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1697             my_task_data, return_address);
1698       }
1699     }
1700 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1701 
1702 // Debugger: The taskwait is active. Store location and thread encountered the
1703 // taskwait.
1704 #if USE_ITT_BUILD
1705 // Note: These values are used by ITT events as well.
1706 #endif /* USE_ITT_BUILD */
1707     taskdata->td_taskwait_counter += 1;
1708     taskdata->td_taskwait_ident = loc_ref;
1709     taskdata->td_taskwait_thread = gtid + 1;
1710 
1711 #if USE_ITT_BUILD
1712     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1713     if (itt_sync_obj != NULL)
1714       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1715 #endif /* USE_ITT_BUILD */
1716 
1717     bool must_wait =
1718         !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1719 
1720 #if OMP_45_ENABLED
1721     must_wait = must_wait || (thread->th.th_task_team != NULL &&
1722                               thread->th.th_task_team->tt.tt_found_proxy_tasks);
1723 #endif
1724     if (must_wait) {
1725       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
1726                              &(taskdata->td_incomplete_child_tasks)),
1727                        0U);
1728       while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
1729         flag.execute_tasks(thread, gtid, FALSE,
1730                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1731                            __kmp_task_stealing_constraint);
1732       }
1733     }
1734 #if USE_ITT_BUILD
1735     if (itt_sync_obj != NULL)
1736       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1737 #endif /* USE_ITT_BUILD */
1738 
1739     // Debugger:  The taskwait is completed. Location remains, but thread is
1740     // negated.
1741     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1742 
1743 #if OMPT_SUPPORT && OMPT_OPTIONAL
1744     if (ompt) {
1745       if (ompt_enabled.ompt_callback_sync_region_wait) {
1746         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1747             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1748             my_task_data, return_address);
1749       }
1750       if (ompt_enabled.ompt_callback_sync_region) {
1751         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1752             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1753             my_task_data, return_address);
1754       }
1755       taskdata->ompt_task_info.frame.enter_frame = NULL;
1756     }
1757 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1758 
1759     ANNOTATE_HAPPENS_AFTER(taskdata);
1760   }
1761 
1762   KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1763                 "returning TASK_CURRENT_NOT_QUEUED\n",
1764                 gtid, taskdata));
1765 
1766   return TASK_CURRENT_NOT_QUEUED;
1767 }
1768 
1769 #if OMPT_SUPPORT
1770 OMPT_NOINLINE
1771 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1772                                           void *frame_address,
1773                                           void *return_address) {
1774   return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1775                                             return_address);
1776 }
1777 #endif // OMPT_SUPPORT
1778 
1779 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1780 // complete
1781 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1782 #if OMPT_SUPPORT && OMPT_OPTIONAL
1783   if (UNLIKELY(ompt_enabled.enabled)) {
1784     OMPT_STORE_RETURN_ADDRESS(gtid);
1785     return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(1),
1786                                     OMPT_LOAD_RETURN_ADDRESS(gtid));
1787   }
1788 #endif
1789   return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
1790 }
1791 
1792 // __kmpc_omp_taskyield: switch to a different task
1793 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1794   kmp_taskdata_t *taskdata;
1795   kmp_info_t *thread;
1796   int thread_finished = FALSE;
1797 
1798   KMP_COUNT_BLOCK(OMP_TASKYIELD);
1799   KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
1800 
1801   KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
1802                 gtid, loc_ref, end_part));
1803 
1804   if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
1805     thread = __kmp_threads[gtid];
1806     taskdata = thread->th.th_current_task;
1807 // Should we model this as a task wait or not?
1808 // Debugger: The taskwait is active. Store location and thread encountered the
1809 // taskwait.
1810 #if USE_ITT_BUILD
1811 // Note: These values are used by ITT events as well.
1812 #endif /* USE_ITT_BUILD */
1813     taskdata->td_taskwait_counter += 1;
1814     taskdata->td_taskwait_ident = loc_ref;
1815     taskdata->td_taskwait_thread = gtid + 1;
1816 
1817 #if USE_ITT_BUILD
1818     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1819     if (itt_sync_obj != NULL)
1820       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1821 #endif /* USE_ITT_BUILD */
1822     if (!taskdata->td_flags.team_serial) {
1823       kmp_task_team_t *task_team = thread->th.th_task_team;
1824       if (task_team != NULL) {
1825         if (KMP_TASKING_ENABLED(task_team)) {
1826 #if OMPT_SUPPORT
1827           if (UNLIKELY(ompt_enabled.enabled))
1828             thread->th.ompt_thread_info.ompt_task_yielded = 1;
1829 #endif
1830           __kmp_execute_tasks_32(
1831               thread, gtid, NULL, FALSE,
1832               &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1833               __kmp_task_stealing_constraint);
1834 #if OMPT_SUPPORT
1835           if (UNLIKELY(ompt_enabled.enabled))
1836             thread->th.ompt_thread_info.ompt_task_yielded = 0;
1837 #endif
1838         }
1839       }
1840     }
1841 #if USE_ITT_BUILD
1842     if (itt_sync_obj != NULL)
1843       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1844 #endif /* USE_ITT_BUILD */
1845 
1846     // Debugger:  The taskwait is completed. Location remains, but thread is
1847     // negated.
1848     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1849   }
1850 
1851   KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
1852                 "returning TASK_CURRENT_NOT_QUEUED\n",
1853                 gtid, taskdata));
1854 
1855   return TASK_CURRENT_NOT_QUEUED;
1856 }
1857 
1858 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
1859 #if OMP_45_ENABLED
1860 // Task Reduction implementation
1861 
1862 typedef struct kmp_task_red_flags {
1863   unsigned lazy_priv : 1; // hint: (1) use lazy allocation (big objects)
1864   unsigned reserved31 : 31;
1865 } kmp_task_red_flags_t;
1866 
1867 // internal structure for reduction data item related info
1868 typedef struct kmp_task_red_data {
1869   void *reduce_shar; // shared reduction item
1870   size_t reduce_size; // size of data item
1871   void *reduce_priv; // thread specific data
1872   void *reduce_pend; // end of private data for comparison op
1873   void *reduce_init; // data initialization routine
1874   void *reduce_fini; // data finalization routine
1875   void *reduce_comb; // data combiner routine
1876   kmp_task_red_flags_t flags; // flags for additional info from compiler
1877 } kmp_task_red_data_t;
1878 
1879 // structure sent us by compiler - one per reduction item
1880 typedef struct kmp_task_red_input {
1881   void *reduce_shar; // shared reduction item
1882   size_t reduce_size; // size of data item
1883   void *reduce_init; // data initialization routine
1884   void *reduce_fini; // data finalization routine
1885   void *reduce_comb; // data combiner routine
1886   kmp_task_red_flags_t flags; // flags for additional info from compiler
1887 } kmp_task_red_input_t;
1888 
1889 /*!
1890 @ingroup TASKING
1891 @param gtid      Global thread ID
1892 @param num       Number of data items to reduce
1893 @param data      Array of data for reduction
1894 @return The taskgroup identifier
1895 
1896 Initialize task reduction for the taskgroup.
1897 */
1898 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
1899   kmp_info_t *thread = __kmp_threads[gtid];
1900   kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
1901   kmp_int32 nth = thread->th.th_team_nproc;
1902   kmp_task_red_input_t *input = (kmp_task_red_input_t *)data;
1903   kmp_task_red_data_t *arr;
1904 
1905   // check input data just in case
1906   KMP_ASSERT(tg != NULL);
1907   KMP_ASSERT(data != NULL);
1908   KMP_ASSERT(num > 0);
1909   if (nth == 1) {
1910     KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
1911                   gtid, tg));
1912     return (void *)tg;
1913   }
1914   KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
1915                 gtid, tg, num));
1916   arr = (kmp_task_red_data_t *)__kmp_thread_malloc(
1917       thread, num * sizeof(kmp_task_red_data_t));
1918   for (int i = 0; i < num; ++i) {
1919     void (*f_init)(void *) = (void (*)(void *))(input[i].reduce_init);
1920     size_t size = input[i].reduce_size - 1;
1921     // round the size up to cache line per thread-specific item
1922     size += CACHE_LINE - size % CACHE_LINE;
1923     KMP_ASSERT(input[i].reduce_comb != NULL); // combiner is mandatory
1924     arr[i].reduce_shar = input[i].reduce_shar;
1925     arr[i].reduce_size = size;
1926     arr[i].reduce_init = input[i].reduce_init;
1927     arr[i].reduce_fini = input[i].reduce_fini;
1928     arr[i].reduce_comb = input[i].reduce_comb;
1929     arr[i].flags = input[i].flags;
1930     if (!input[i].flags.lazy_priv) {
1931       // allocate cache-line aligned block and fill it with zeros
1932       arr[i].reduce_priv = __kmp_allocate(nth * size);
1933       arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
1934       if (f_init != NULL) {
1935         // initialize thread-specific items
1936         for (int j = 0; j < nth; ++j) {
1937           f_init((char *)(arr[i].reduce_priv) + j * size);
1938         }
1939       }
1940     } else {
1941       // only allocate space for pointers now,
1942       // objects will be lazily allocated/initialized once requested
1943       arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
1944     }
1945   }
1946   tg->reduce_data = (void *)arr;
1947   tg->reduce_num_data = num;
1948   return (void *)tg;
1949 }
1950 
1951 /*!
1952 @ingroup TASKING
1953 @param gtid    Global thread ID
1954 @param tskgrp  The taskgroup ID (optional)
1955 @param data    Shared location of the item
1956 @return The pointer to per-thread data
1957 
1958 Get thread-specific location of data item
1959 */
1960 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
1961   kmp_info_t *thread = __kmp_threads[gtid];
1962   kmp_int32 nth = thread->th.th_team_nproc;
1963   if (nth == 1)
1964     return data; // nothing to do
1965 
1966   kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
1967   if (tg == NULL)
1968     tg = thread->th.th_current_task->td_taskgroup;
1969   KMP_ASSERT(tg != NULL);
1970   kmp_task_red_data_t *arr = (kmp_task_red_data_t *)(tg->reduce_data);
1971   kmp_int32 num = tg->reduce_num_data;
1972   kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1973 
1974   KMP_ASSERT(data != NULL);
1975   while (tg != NULL) {
1976     for (int i = 0; i < num; ++i) {
1977       if (!arr[i].flags.lazy_priv) {
1978         if (data == arr[i].reduce_shar ||
1979             (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
1980           return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
1981       } else {
1982         // check shared location first
1983         void **p_priv = (void **)(arr[i].reduce_priv);
1984         if (data == arr[i].reduce_shar)
1985           goto found;
1986         // check if we get some thread specific location as parameter
1987         for (int j = 0; j < nth; ++j)
1988           if (data == p_priv[j])
1989             goto found;
1990         continue; // not found, continue search
1991       found:
1992         if (p_priv[tid] == NULL) {
1993           // allocate thread specific object lazily
1994           void (*f_init)(void *) = (void (*)(void *))(arr[i].reduce_init);
1995           p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
1996           if (f_init != NULL) {
1997             f_init(p_priv[tid]);
1998           }
1999         }
2000         return p_priv[tid];
2001       }
2002     }
2003     tg = tg->parent;
2004     arr = (kmp_task_red_data_t *)(tg->reduce_data);
2005     num = tg->reduce_num_data;
2006   }
2007   KMP_ASSERT2(0, "Unknown task reduction item");
2008   return NULL; // ERROR, this line never executed
2009 }
2010 
2011 // Finalize task reduction.
2012 // Called from __kmpc_end_taskgroup()
2013 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2014   kmp_int32 nth = th->th.th_team_nproc;
2015   KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
2016   kmp_task_red_data_t *arr = (kmp_task_red_data_t *)tg->reduce_data;
2017   kmp_int32 num = tg->reduce_num_data;
2018   for (int i = 0; i < num; ++i) {
2019     void *sh_data = arr[i].reduce_shar;
2020     void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2021     void (*f_comb)(void *, void *) =
2022         (void (*)(void *, void *))(arr[i].reduce_comb);
2023     if (!arr[i].flags.lazy_priv) {
2024       void *pr_data = arr[i].reduce_priv;
2025       size_t size = arr[i].reduce_size;
2026       for (int j = 0; j < nth; ++j) {
2027         void *priv_data = (char *)pr_data + j * size;
2028         f_comb(sh_data, priv_data); // combine results
2029         if (f_fini)
2030           f_fini(priv_data); // finalize if needed
2031       }
2032     } else {
2033       void **pr_data = (void **)(arr[i].reduce_priv);
2034       for (int j = 0; j < nth; ++j) {
2035         if (pr_data[j] != NULL) {
2036           f_comb(sh_data, pr_data[j]); // combine results
2037           if (f_fini)
2038             f_fini(pr_data[j]); // finalize if needed
2039           __kmp_free(pr_data[j]);
2040         }
2041       }
2042     }
2043     __kmp_free(arr[i].reduce_priv);
2044   }
2045   __kmp_thread_free(th, arr);
2046   tg->reduce_data = NULL;
2047   tg->reduce_num_data = 0;
2048 }
2049 #endif
2050 
2051 #if OMP_40_ENABLED
2052 // __kmpc_taskgroup: Start a new taskgroup
2053 void __kmpc_taskgroup(ident_t *loc, int gtid) {
2054   kmp_info_t *thread = __kmp_threads[gtid];
2055   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2056   kmp_taskgroup_t *tg_new =
2057       (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2058   KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2059   KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2060   KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2061   tg_new->parent = taskdata->td_taskgroup;
2062 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
2063 #if OMP_45_ENABLED
2064   tg_new->reduce_data = NULL;
2065   tg_new->reduce_num_data = 0;
2066 #endif
2067   taskdata->td_taskgroup = tg_new;
2068 
2069 #if OMPT_SUPPORT && OMPT_OPTIONAL
2070   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2071     void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2072     if (!codeptr)
2073       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2074     kmp_team_t *team = thread->th.th_team;
2075     ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2076     // FIXME: I think this is wrong for lwt!
2077     ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2078 
2079     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2080         ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2081         &(my_task_data), codeptr);
2082   }
2083 #endif
2084 }
2085 
2086 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2087 //                       and its descendants are complete
2088 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2089   kmp_info_t *thread = __kmp_threads[gtid];
2090   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2091   kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2092   int thread_finished = FALSE;
2093 
2094 #if OMPT_SUPPORT && OMPT_OPTIONAL
2095   kmp_team_t *team;
2096   ompt_data_t my_task_data;
2097   ompt_data_t my_parallel_data;
2098   void *codeptr;
2099   if (UNLIKELY(ompt_enabled.enabled)) {
2100     team = thread->th.th_team;
2101     my_task_data = taskdata->ompt_task_info.task_data;
2102     // FIXME: I think this is wrong for lwt!
2103     my_parallel_data = team->t.ompt_team_info.parallel_data;
2104     codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2105     if (!codeptr)
2106       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2107   }
2108 #endif
2109 
2110   KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2111   KMP_DEBUG_ASSERT(taskgroup != NULL);
2112   KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2113 
2114   if (__kmp_tasking_mode != tskm_immediate_exec) {
2115     // mark task as waiting not on a barrier
2116     taskdata->td_taskwait_counter += 1;
2117     taskdata->td_taskwait_ident = loc;
2118     taskdata->td_taskwait_thread = gtid + 1;
2119 #if USE_ITT_BUILD
2120     // For ITT the taskgroup wait is similar to taskwait until we need to
2121     // distinguish them
2122     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2123     if (itt_sync_obj != NULL)
2124       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
2125 #endif /* USE_ITT_BUILD */
2126 
2127 #if OMPT_SUPPORT && OMPT_OPTIONAL
2128     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2129       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2130           ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2131           &(my_task_data), codeptr);
2132     }
2133 #endif
2134 
2135 #if OMP_45_ENABLED
2136     if (!taskdata->td_flags.team_serial ||
2137         (thread->th.th_task_team != NULL &&
2138          thread->th.th_task_team->tt.tt_found_proxy_tasks))
2139 #else
2140     if (!taskdata->td_flags.team_serial)
2141 #endif
2142     {
2143       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)),
2144                        0U);
2145       while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2146         flag.execute_tasks(thread, gtid, FALSE,
2147                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2148                            __kmp_task_stealing_constraint);
2149       }
2150     }
2151     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2152 
2153 #if OMPT_SUPPORT && OMPT_OPTIONAL
2154     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2155       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2156           ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2157           &(my_task_data), codeptr);
2158     }
2159 #endif
2160 
2161 #if USE_ITT_BUILD
2162     if (itt_sync_obj != NULL)
2163       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
2164 #endif /* USE_ITT_BUILD */
2165   }
2166   KMP_DEBUG_ASSERT(taskgroup->count == 0);
2167 
2168 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
2169 #if OMP_45_ENABLED
2170   if (taskgroup->reduce_data != NULL) // need to reduce?
2171     __kmp_task_reduction_fini(thread, taskgroup);
2172 #endif
2173   // Restore parent taskgroup for the current task
2174   taskdata->td_taskgroup = taskgroup->parent;
2175   __kmp_thread_free(thread, taskgroup);
2176 
2177   KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2178                 gtid, taskdata));
2179   ANNOTATE_HAPPENS_AFTER(taskdata);
2180 
2181 #if OMPT_SUPPORT && OMPT_OPTIONAL
2182   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2183     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2184         ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2185         &(my_task_data), codeptr);
2186   }
2187 #endif
2188 }
2189 #endif
2190 
2191 // __kmp_remove_my_task: remove a task from my own deque
2192 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2193                                         kmp_task_team_t *task_team,
2194                                         kmp_int32 is_constrained) {
2195   kmp_task_t *task;
2196   kmp_taskdata_t *taskdata;
2197   kmp_thread_data_t *thread_data;
2198   kmp_uint32 tail;
2199 
2200   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2201   KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2202                    NULL); // Caller should check this condition
2203 
2204   thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2205 
2206   KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2207                 gtid, thread_data->td.td_deque_ntasks,
2208                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2209 
2210   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2211     KA_TRACE(10,
2212              ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2213               "ntasks=%d head=%u tail=%u\n",
2214               gtid, thread_data->td.td_deque_ntasks,
2215               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2216     return NULL;
2217   }
2218 
2219   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2220 
2221   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2222     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2223     KA_TRACE(10,
2224              ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2225               "ntasks=%d head=%u tail=%u\n",
2226               gtid, thread_data->td.td_deque_ntasks,
2227               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2228     return NULL;
2229   }
2230 
2231   tail = (thread_data->td.td_deque_tail - 1) &
2232          TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2233   taskdata = thread_data->td.td_deque[tail];
2234 
2235   if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) {
2236     // we need to check if the candidate obeys task scheduling constraint (TSC)
2237     // only descendant of all deferred tied tasks can be scheduled, checking
2238     // the last one is enough, as it in turn is the descendant of all others
2239     kmp_taskdata_t *current = thread->th.th_current_task->td_last_tied;
2240     KMP_DEBUG_ASSERT(current != NULL);
2241     // check if last tied task is not suspended on barrier
2242     if (current->td_flags.tasktype == TASK_EXPLICIT ||
2243         current->td_taskwait_thread > 0) { // <= 0 on barrier
2244       kmp_int32 level = current->td_level;
2245       kmp_taskdata_t *parent = taskdata->td_parent;
2246       while (parent != current && parent->td_level > level) {
2247         parent = parent->td_parent; // check generation up to the level of the
2248         // current task
2249         KMP_DEBUG_ASSERT(parent != NULL);
2250       }
2251       if (parent != current) {
2252         // The TSC does not allow to steal victim task
2253         __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2254         KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2255                       "ntasks=%d head=%u tail=%u\n",
2256                       gtid, thread_data->td.td_deque_ntasks,
2257                       thread_data->td.td_deque_head,
2258                       thread_data->td.td_deque_tail));
2259         return NULL;
2260       }
2261     }
2262   }
2263 
2264   thread_data->td.td_deque_tail = tail;
2265   TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2266 
2267   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2268 
2269   KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
2270                 "ntasks=%d head=%u tail=%u\n",
2271                 gtid, taskdata, thread_data->td.td_deque_ntasks,
2272                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2273 
2274   task = KMP_TASKDATA_TO_TASK(taskdata);
2275   return task;
2276 }
2277 
2278 // __kmp_steal_task: remove a task from another thread's deque
2279 // Assume that calling thread has already checked existence of
2280 // task_team thread_data before calling this routine.
2281 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2282                                     kmp_task_team_t *task_team,
2283                                     std::atomic<kmp_int32> *unfinished_threads,
2284                                     int *thread_finished,
2285                                     kmp_int32 is_constrained) {
2286   kmp_task_t *task;
2287   kmp_taskdata_t *taskdata;
2288   kmp_taskdata_t *current;
2289   kmp_thread_data_t *victim_td, *threads_data;
2290   kmp_int32 level, target;
2291   kmp_int32 victim_tid;
2292 
2293   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2294 
2295   threads_data = task_team->tt.tt_threads_data;
2296   KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
2297 
2298   victim_tid = victim_thr->th.th_info.ds.ds_tid;
2299   victim_td = &threads_data[victim_tid];
2300 
2301   KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
2302                 "task_team=%p ntasks=%d head=%u tail=%u\n",
2303                 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2304                 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2305                 victim_td->td.td_deque_tail));
2306 
2307   if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
2308     KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
2309                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2310                   gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2311                   victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2312                   victim_td->td.td_deque_tail));
2313     return NULL;
2314   }
2315 
2316   __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2317 
2318   int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
2319   // Check again after we acquire the lock
2320   if (ntasks == 0) {
2321     __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2322     KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
2323                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2324                   gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2325                   victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2326     return NULL;
2327   }
2328 
2329   KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2330 
2331   taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2332   if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) {
2333     // we need to check if the candidate obeys task scheduling constraint (TSC)
2334     // only descendant of all deferred tied tasks can be scheduled, checking
2335     // the last one is enough, as it in turn is the descendant of all others
2336     current = __kmp_threads[gtid]->th.th_current_task->td_last_tied;
2337     KMP_DEBUG_ASSERT(current != NULL);
2338     // check if last tied task is not suspended on barrier
2339     if (current->td_flags.tasktype == TASK_EXPLICIT ||
2340         current->td_taskwait_thread > 0) { // <= 0 on barrier
2341       level = current->td_level;
2342       kmp_taskdata_t *parent = taskdata->td_parent;
2343       while (parent != current && parent->td_level > level) {
2344         parent = parent->td_parent; // check generation up to the level of the
2345         // current task
2346         KMP_DEBUG_ASSERT(parent != NULL);
2347       }
2348       if (parent != current) {
2349         if (!task_team->tt.tt_untied_task_encountered) {
2350           // The TSC does not allow to steal victim task
2351           __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2352           KA_TRACE(10,
2353                    ("__kmp_steal_task(exit #3): T#%d could not steal from "
2354                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2355                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2356                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2357           return NULL;
2358         }
2359         taskdata = NULL; // will check other tasks in victim's deque
2360       }
2361     }
2362   }
2363   if (taskdata != NULL) {
2364     // Bump head pointer and Wrap.
2365     victim_td->td.td_deque_head =
2366         (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2367   } else {
2368     int i;
2369     // walk through victim's deque trying to steal any task
2370     target = victim_td->td.td_deque_head;
2371     for (i = 1; i < ntasks; ++i) {
2372       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2373       taskdata = victim_td->td.td_deque[target];
2374       if (taskdata->td_flags.tiedness == TASK_TIED) {
2375         // check if the candidate obeys the TSC
2376         kmp_taskdata_t *parent = taskdata->td_parent;
2377         // check generation up to the level of the current task
2378         while (parent != current && parent->td_level > level) {
2379           parent = parent->td_parent;
2380           KMP_DEBUG_ASSERT(parent != NULL);
2381         }
2382         if (parent != current) {
2383           // The TSC does not allow to steal the candidate
2384           taskdata = NULL;
2385           continue;
2386         } else {
2387           // found victim tied task
2388           break;
2389         }
2390       } else {
2391         // found victim untied task
2392         break;
2393       }
2394     }
2395     if (taskdata == NULL) {
2396       // No appropriate candidate to steal found
2397       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2398       KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2399                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2400                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2401                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2402       return NULL;
2403     }
2404     int prev = target;
2405     for (i = i + 1; i < ntasks; ++i) {
2406       // shift remaining tasks in the deque left by 1
2407       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2408       victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2409       prev = target;
2410     }
2411     KMP_DEBUG_ASSERT(
2412         victim_td->td.td_deque_tail ==
2413         (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2414     victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2415   }
2416   if (*thread_finished) {
2417     // We need to un-mark this victim as a finished victim.  This must be done
2418     // before releasing the lock, or else other threads (starting with the
2419     // master victim) might be prematurely released from the barrier!!!
2420     kmp_int32 count;
2421 
2422     count = KMP_ATOMIC_INC(unfinished_threads);
2423 
2424     KA_TRACE(
2425         20,
2426         ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2427          gtid, count + 1, task_team));
2428 
2429     *thread_finished = FALSE;
2430   }
2431   TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
2432 
2433   __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2434 
2435   KMP_COUNT_BLOCK(TASK_stolen);
2436   KA_TRACE(10,
2437            ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2438             "task_team=%p ntasks=%d head=%u tail=%u\n",
2439             gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2440             ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2441 
2442   task = KMP_TASKDATA_TO_TASK(taskdata);
2443   return task;
2444 }
2445 
2446 // __kmp_execute_tasks_template: Choose and execute tasks until either the
2447 // condition is statisfied (return true) or there are none left (return false).
2448 //
2449 // final_spin is TRUE if this is the spin at the release barrier.
2450 // thread_finished indicates whether the thread is finished executing all
2451 // the tasks it has on its deque, and is at the release barrier.
2452 // spinner is the location on which to spin.
2453 // spinner == NULL means only execute a single task and return.
2454 // checker is the value to check to terminate the spin.
2455 template <class C>
2456 static inline int __kmp_execute_tasks_template(
2457     kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2458     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2459     kmp_int32 is_constrained) {
2460   kmp_task_team_t *task_team = thread->th.th_task_team;
2461   kmp_thread_data_t *threads_data;
2462   kmp_task_t *task;
2463   kmp_info_t *other_thread;
2464   kmp_taskdata_t *current_task = thread->th.th_current_task;
2465   std::atomic<kmp_int32> *unfinished_threads;
2466   kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2467                       tid = thread->th.th_info.ds.ds_tid;
2468 
2469   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2470   KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
2471 
2472   if (task_team == NULL || current_task == NULL)
2473     return FALSE;
2474 
2475   KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2476                 "*thread_finished=%d\n",
2477                 gtid, final_spin, *thread_finished));
2478 
2479   thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2480   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2481   KMP_DEBUG_ASSERT(threads_data != NULL);
2482 
2483   nthreads = task_team->tt.tt_nproc;
2484   unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2485 #if OMP_45_ENABLED
2486   KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
2487 #else
2488   KMP_DEBUG_ASSERT(nthreads > 1);
2489 #endif
2490   KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
2491 
2492   while (1) { // Outer loop keeps trying to find tasks in case of single thread
2493     // getting tasks from target constructs
2494     while (1) { // Inner loop to find a task and execute it
2495       task = NULL;
2496       if (use_own_tasks) { // check on own queue first
2497         task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2498       }
2499       if ((task == NULL) && (nthreads > 1)) { // Steal a task
2500         int asleep = 1;
2501         use_own_tasks = 0;
2502         // Try to steal from the last place I stole from successfully.
2503         if (victim_tid == -2) { // haven't stolen anything yet
2504           victim_tid = threads_data[tid].td.td_deque_last_stolen;
2505           if (victim_tid !=
2506               -1) // if we have a last stolen from victim, get the thread
2507             other_thread = threads_data[victim_tid].td.td_thr;
2508         }
2509         if (victim_tid != -1) { // found last victim
2510           asleep = 0;
2511         } else if (!new_victim) { // no recent steals and we haven't already
2512           // used a new victim; select a random thread
2513           do { // Find a different thread to steal work from.
2514             // Pick a random thread. Initial plan was to cycle through all the
2515             // threads, and only return if we tried to steal from every thread,
2516             // and failed.  Arch says that's not such a great idea.
2517             victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2518             if (victim_tid >= tid) {
2519               ++victim_tid; // Adjusts random distribution to exclude self
2520             }
2521             // Found a potential victim
2522             other_thread = threads_data[victim_tid].td.td_thr;
2523             // There is a slight chance that __kmp_enable_tasking() did not wake
2524             // up all threads waiting at the barrier.  If victim is sleeping,
2525             // then wake it up. Since we were going to pay the cache miss
2526             // penalty for referencing another thread's kmp_info_t struct
2527             // anyway,
2528             // the check shouldn't cost too much performance at this point. In
2529             // extra barrier mode, tasks do not sleep at the separate tasking
2530             // barrier, so this isn't a problem.
2531             asleep = 0;
2532             if ((__kmp_tasking_mode == tskm_task_teams) &&
2533                 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
2534                 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2535                  NULL)) {
2536               asleep = 1;
2537               __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2538                                         other_thread->th.th_sleep_loc);
2539               // A sleeping thread should not have any tasks on it's queue.
2540               // There is a slight possibility that it resumes, steals a task
2541               // from another thread, which spawns more tasks, all in the time
2542               // that it takes this thread to check => don't write an assertion
2543               // that the victim's queue is empty.  Try stealing from a
2544               // different thread.
2545             }
2546           } while (asleep);
2547         }
2548 
2549         if (!asleep) {
2550           // We have a victim to try to steal from
2551           task = __kmp_steal_task(other_thread, gtid, task_team,
2552                                   unfinished_threads, thread_finished,
2553                                   is_constrained);
2554         }
2555         if (task != NULL) { // set last stolen to victim
2556           if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2557             threads_data[tid].td.td_deque_last_stolen = victim_tid;
2558             // The pre-refactored code did not try more than 1 successful new
2559             // vicitm, unless the last one generated more local tasks;
2560             // new_victim keeps track of this
2561             new_victim = 1;
2562           }
2563         } else { // No tasks found; unset last_stolen
2564           KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
2565           victim_tid = -2; // no successful victim found
2566         }
2567       }
2568 
2569       if (task == NULL) // break out of tasking loop
2570         break;
2571 
2572 // Found a task; execute it
2573 #if USE_ITT_BUILD && USE_ITT_NOTIFY
2574       if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2575         if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2576           // get the object reliably
2577           itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2578         }
2579         __kmp_itt_task_starting(itt_sync_obj);
2580       }
2581 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
2582       __kmp_invoke_task(gtid, task, current_task);
2583 #if USE_ITT_BUILD
2584       if (itt_sync_obj != NULL)
2585         __kmp_itt_task_finished(itt_sync_obj);
2586 #endif /* USE_ITT_BUILD */
2587       // If this thread is only partway through the barrier and the condition is
2588       // met, then return now, so that the barrier gather/release pattern can
2589       // proceed. If this thread is in the last spin loop in the barrier,
2590       // waiting to be released, we know that the termination condition will not
2591       // be satisified, so don't waste any cycles checking it.
2592       if (flag == NULL || (!final_spin && flag->done_check())) {
2593         KA_TRACE(
2594             15,
2595             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2596              gtid));
2597         return TRUE;
2598       }
2599       if (thread->th.th_task_team == NULL) {
2600         break;
2601       }
2602       // Yield before executing next task
2603       KMP_YIELD(__kmp_library == library_throughput);
2604       // If execution of a stolen task results in more tasks being placed on our
2605       // run queue, reset use_own_tasks
2606       if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
2607         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
2608                       "other tasks, restart\n",
2609                       gtid));
2610         use_own_tasks = 1;
2611         new_victim = 0;
2612       }
2613     }
2614 
2615 // The task source has been exhausted. If in final spin loop of barrier, check
2616 // if termination condition is satisfied.
2617 #if OMP_45_ENABLED
2618     // The work queue may be empty but there might be proxy tasks still
2619     // executing
2620     if (final_spin &&
2621         KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0)
2622 #else
2623     if (final_spin)
2624 #endif
2625     {
2626       // First, decrement the #unfinished threads, if that has not already been
2627       // done.  This decrement might be to the spin location, and result in the
2628       // termination condition being satisfied.
2629       if (!*thread_finished) {
2630         kmp_int32 count;
2631 
2632         count = KMP_ATOMIC_DEC(unfinished_threads) - 1;
2633         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
2634                       "unfinished_threads to %d task_team=%p\n",
2635                       gtid, count, task_team));
2636         *thread_finished = TRUE;
2637       }
2638 
2639       // It is now unsafe to reference thread->th.th_team !!!
2640       // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2641       // thread to pass through the barrier, where it might reset each thread's
2642       // th.th_team field for the next parallel region. If we can steal more
2643       // work, we know that this has not happened yet.
2644       if (flag != NULL && flag->done_check()) {
2645         KA_TRACE(
2646             15,
2647             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2648              gtid));
2649         return TRUE;
2650       }
2651     }
2652 
2653     // If this thread's task team is NULL, master has recognized that there are
2654     // no more tasks; bail out
2655     if (thread->th.th_task_team == NULL) {
2656       KA_TRACE(15,
2657                ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
2658       return FALSE;
2659     }
2660 
2661 #if OMP_45_ENABLED
2662     // We could be getting tasks from target constructs; if this is the only
2663     // thread, keep trying to execute tasks from own queue
2664     if (nthreads == 1)
2665       use_own_tasks = 1;
2666     else
2667 #endif
2668     {
2669       KA_TRACE(15,
2670                ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
2671       return FALSE;
2672     }
2673   }
2674 }
2675 
2676 int __kmp_execute_tasks_32(
2677     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
2678     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2679     kmp_int32 is_constrained) {
2680   return __kmp_execute_tasks_template(
2681       thread, gtid, flag, final_spin,
2682       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2683 }
2684 
2685 int __kmp_execute_tasks_64(
2686     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
2687     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2688     kmp_int32 is_constrained) {
2689   return __kmp_execute_tasks_template(
2690       thread, gtid, flag, final_spin,
2691       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2692 }
2693 
2694 int __kmp_execute_tasks_oncore(
2695     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
2696     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2697     kmp_int32 is_constrained) {
2698   return __kmp_execute_tasks_template(
2699       thread, gtid, flag, final_spin,
2700       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2701 }
2702 
2703 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
2704 // next barrier so they can assist in executing enqueued tasks.
2705 // First thread in allocates the task team atomically.
2706 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
2707                                  kmp_info_t *this_thr) {
2708   kmp_thread_data_t *threads_data;
2709   int nthreads, i, is_init_thread;
2710 
2711   KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
2712                 __kmp_gtid_from_thread(this_thr)));
2713 
2714   KMP_DEBUG_ASSERT(task_team != NULL);
2715   KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
2716 
2717   nthreads = task_team->tt.tt_nproc;
2718   KMP_DEBUG_ASSERT(nthreads > 0);
2719   KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
2720 
2721   // Allocate or increase the size of threads_data if necessary
2722   is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
2723 
2724   if (!is_init_thread) {
2725     // Some other thread already set up the array.
2726     KA_TRACE(
2727         20,
2728         ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
2729          __kmp_gtid_from_thread(this_thr)));
2730     return;
2731   }
2732   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2733   KMP_DEBUG_ASSERT(threads_data != NULL);
2734 
2735   if ((__kmp_tasking_mode == tskm_task_teams) &&
2736       (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
2737     // Release any threads sleeping at the barrier, so that they can steal
2738     // tasks and execute them.  In extra barrier mode, tasks do not sleep
2739     // at the separate tasking barrier, so this isn't a problem.
2740     for (i = 0; i < nthreads; i++) {
2741       volatile void *sleep_loc;
2742       kmp_info_t *thread = threads_data[i].td.td_thr;
2743 
2744       if (i == this_thr->th.th_info.ds.ds_tid) {
2745         continue;
2746       }
2747       // Since we haven't locked the thread's suspend mutex lock at this
2748       // point, there is a small window where a thread might be putting
2749       // itself to sleep, but hasn't set the th_sleep_loc field yet.
2750       // To work around this, __kmp_execute_tasks_template() periodically checks
2751       // see if other threads are sleeping (using the same random mechanism that
2752       // is used for task stealing) and awakens them if they are.
2753       if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
2754           NULL) {
2755         KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
2756                       __kmp_gtid_from_thread(this_thr),
2757                       __kmp_gtid_from_thread(thread)));
2758         __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
2759       } else {
2760         KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
2761                       __kmp_gtid_from_thread(this_thr),
2762                       __kmp_gtid_from_thread(thread)));
2763       }
2764     }
2765   }
2766 
2767   KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
2768                 __kmp_gtid_from_thread(this_thr)));
2769 }
2770 
2771 /* // TODO: Check the comment consistency
2772  * Utility routines for "task teams".  A task team (kmp_task_t) is kind of
2773  * like a shadow of the kmp_team_t data struct, with a different lifetime.
2774  * After a child * thread checks into a barrier and calls __kmp_release() from
2775  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
2776  * longer assume that the kmp_team_t structure is intact (at any moment, the
2777  * master thread may exit the barrier code and free the team data structure,
2778  * and return the threads to the thread pool).
2779  *
2780  * This does not work with the the tasking code, as the thread is still
2781  * expected to participate in the execution of any tasks that may have been
2782  * spawned my a member of the team, and the thread still needs access to all
2783  * to each thread in the team, so that it can steal work from it.
2784  *
2785  * Enter the existence of the kmp_task_team_t struct.  It employs a reference
2786  * counting mechanims, and is allocated by the master thread before calling
2787  * __kmp_<barrier_kind>_release, and then is release by the last thread to
2788  * exit __kmp_<barrier_kind>_release at the next barrier.  I.e. the lifetimes
2789  * of the kmp_task_team_t structs for consecutive barriers can overlap
2790  * (and will, unless the master thread is the last thread to exit the barrier
2791  * release phase, which is not typical).
2792  *
2793  * The existence of such a struct is useful outside the context of tasking,
2794  * but for now, I'm trying to keep it specific to the OMP_30_ENABLED macro,
2795  * so that any performance differences show up when comparing the 2.5 vs. 3.0
2796  * libraries.
2797  *
2798  * We currently use the existence of the threads array as an indicator that
2799  * tasks were spawned since the last barrier.  If the structure is to be
2800  * useful outside the context of tasking, then this will have to change, but
2801  * not settting the field minimizes the performance impact of tasking on
2802  * barriers, when no explicit tasks were spawned (pushed, actually).
2803  */
2804 
2805 static kmp_task_team_t *__kmp_free_task_teams =
2806     NULL; // Free list for task_team data structures
2807 // Lock for task team data structures
2808 kmp_bootstrap_lock_t __kmp_task_team_lock =
2809     KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
2810 
2811 // __kmp_alloc_task_deque:
2812 // Allocates a task deque for a particular thread, and initialize the necessary
2813 // data structures relating to the deque.  This only happens once per thread
2814 // per task team since task teams are recycled. No lock is needed during
2815 // allocation since each thread allocates its own deque.
2816 static void __kmp_alloc_task_deque(kmp_info_t *thread,
2817                                    kmp_thread_data_t *thread_data) {
2818   __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
2819   KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
2820 
2821   // Initialize last stolen task field to "none"
2822   thread_data->td.td_deque_last_stolen = -1;
2823 
2824   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
2825   KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
2826   KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
2827 
2828   KE_TRACE(
2829       10,
2830       ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
2831        __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
2832   // Allocate space for task deque, and zero the deque
2833   // Cannot use __kmp_thread_calloc() because threads not around for
2834   // kmp_reap_task_team( ).
2835   thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
2836       INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
2837   thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
2838 }
2839 
2840 // __kmp_realloc_task_deque:
2841 // Re-allocates a task deque for a particular thread, copies the content from
2842 // the old deque and adjusts the necessary data structures relating to the
2843 // deque. This operation must be done with a the deque_lock being held
2844 static void __kmp_realloc_task_deque(kmp_info_t *thread,
2845                                      kmp_thread_data_t *thread_data) {
2846   kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
2847   kmp_int32 new_size = 2 * size;
2848 
2849   KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
2850                 "%d] for thread_data %p\n",
2851                 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
2852 
2853   kmp_taskdata_t **new_deque =
2854       (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
2855 
2856   int i, j;
2857   for (i = thread_data->td.td_deque_head, j = 0; j < size;
2858        i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
2859     new_deque[j] = thread_data->td.td_deque[i];
2860 
2861   __kmp_free(thread_data->td.td_deque);
2862 
2863   thread_data->td.td_deque_head = 0;
2864   thread_data->td.td_deque_tail = size;
2865   thread_data->td.td_deque = new_deque;
2866   thread_data->td.td_deque_size = new_size;
2867 }
2868 
2869 // __kmp_free_task_deque:
2870 // Deallocates a task deque for a particular thread. Happens at library
2871 // deallocation so don't need to reset all thread data fields.
2872 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
2873   if (thread_data->td.td_deque != NULL) {
2874     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2875     TCW_4(thread_data->td.td_deque_ntasks, 0);
2876     __kmp_free(thread_data->td.td_deque);
2877     thread_data->td.td_deque = NULL;
2878     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2879   }
2880 
2881 #ifdef BUILD_TIED_TASK_STACK
2882   // GEH: Figure out what to do here for td_susp_tied_tasks
2883   if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
2884     __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
2885   }
2886 #endif // BUILD_TIED_TASK_STACK
2887 }
2888 
2889 // __kmp_realloc_task_threads_data:
2890 // Allocates a threads_data array for a task team, either by allocating an
2891 // initial array or enlarging an existing array.  Only the first thread to get
2892 // the lock allocs or enlarges the array and re-initializes the array eleemnts.
2893 // That thread returns "TRUE", the rest return "FALSE".
2894 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
2895 // The current size is given by task_team -> tt.tt_max_threads.
2896 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
2897                                            kmp_task_team_t *task_team) {
2898   kmp_thread_data_t **threads_data_p;
2899   kmp_int32 nthreads, maxthreads;
2900   int is_init_thread = FALSE;
2901 
2902   if (TCR_4(task_team->tt.tt_found_tasks)) {
2903     // Already reallocated and initialized.
2904     return FALSE;
2905   }
2906 
2907   threads_data_p = &task_team->tt.tt_threads_data;
2908   nthreads = task_team->tt.tt_nproc;
2909   maxthreads = task_team->tt.tt_max_threads;
2910 
2911   // All threads must lock when they encounter the first task of the implicit
2912   // task region to make sure threads_data fields are (re)initialized before
2913   // used.
2914   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
2915 
2916   if (!TCR_4(task_team->tt.tt_found_tasks)) {
2917     // first thread to enable tasking
2918     kmp_team_t *team = thread->th.th_team;
2919     int i;
2920 
2921     is_init_thread = TRUE;
2922     if (maxthreads < nthreads) {
2923 
2924       if (*threads_data_p != NULL) {
2925         kmp_thread_data_t *old_data = *threads_data_p;
2926         kmp_thread_data_t *new_data = NULL;
2927 
2928         KE_TRACE(
2929             10,
2930             ("__kmp_realloc_task_threads_data: T#%d reallocating "
2931              "threads data for task_team %p, new_size = %d, old_size = %d\n",
2932              __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
2933         // Reallocate threads_data to have more elements than current array
2934         // Cannot use __kmp_thread_realloc() because threads not around for
2935         // kmp_reap_task_team( ).  Note all new array entries are initialized
2936         // to zero by __kmp_allocate().
2937         new_data = (kmp_thread_data_t *)__kmp_allocate(
2938             nthreads * sizeof(kmp_thread_data_t));
2939         // copy old data to new data
2940         KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
2941                      (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
2942 
2943 #ifdef BUILD_TIED_TASK_STACK
2944         // GEH: Figure out if this is the right thing to do
2945         for (i = maxthreads; i < nthreads; i++) {
2946           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2947           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2948         }
2949 #endif // BUILD_TIED_TASK_STACK
2950         // Install the new data and free the old data
2951         (*threads_data_p) = new_data;
2952         __kmp_free(old_data);
2953       } else {
2954         KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
2955                       "threads data for task_team %p, size = %d\n",
2956                       __kmp_gtid_from_thread(thread), task_team, nthreads));
2957         // Make the initial allocate for threads_data array, and zero entries
2958         // Cannot use __kmp_thread_calloc() because threads not around for
2959         // kmp_reap_task_team( ).
2960         ANNOTATE_IGNORE_WRITES_BEGIN();
2961         *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
2962             nthreads * sizeof(kmp_thread_data_t));
2963         ANNOTATE_IGNORE_WRITES_END();
2964 #ifdef BUILD_TIED_TASK_STACK
2965         // GEH: Figure out if this is the right thing to do
2966         for (i = 0; i < nthreads; i++) {
2967           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2968           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2969         }
2970 #endif // BUILD_TIED_TASK_STACK
2971       }
2972       task_team->tt.tt_max_threads = nthreads;
2973     } else {
2974       // If array has (more than) enough elements, go ahead and use it
2975       KMP_DEBUG_ASSERT(*threads_data_p != NULL);
2976     }
2977 
2978     // initialize threads_data pointers back to thread_info structures
2979     for (i = 0; i < nthreads; i++) {
2980       kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2981       thread_data->td.td_thr = team->t.t_threads[i];
2982 
2983       if (thread_data->td.td_deque_last_stolen >= nthreads) {
2984         // The last stolen field survives across teams / barrier, and the number
2985         // of threads may have changed.  It's possible (likely?) that a new
2986         // parallel region will exhibit the same behavior as previous region.
2987         thread_data->td.td_deque_last_stolen = -1;
2988       }
2989     }
2990 
2991     KMP_MB();
2992     TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
2993   }
2994 
2995   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
2996   return is_init_thread;
2997 }
2998 
2999 // __kmp_free_task_threads_data:
3000 // Deallocates a threads_data array for a task team, including any attached
3001 // tasking deques.  Only occurs at library shutdown.
3002 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3003   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3004   if (task_team->tt.tt_threads_data != NULL) {
3005     int i;
3006     for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3007       __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3008     }
3009     __kmp_free(task_team->tt.tt_threads_data);
3010     task_team->tt.tt_threads_data = NULL;
3011   }
3012   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3013 }
3014 
3015 // __kmp_allocate_task_team:
3016 // Allocates a task team associated with a specific team, taking it from
3017 // the global task team free list if possible.  Also initializes data
3018 // structures.
3019 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3020                                                  kmp_team_t *team) {
3021   kmp_task_team_t *task_team = NULL;
3022   int nthreads;
3023 
3024   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3025                 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3026 
3027   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3028     // Take a task team from the task team pool
3029     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3030     if (__kmp_free_task_teams != NULL) {
3031       task_team = __kmp_free_task_teams;
3032       TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3033       task_team->tt.tt_next = NULL;
3034     }
3035     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3036   }
3037 
3038   if (task_team == NULL) {
3039     KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3040                   "task team for team %p\n",
3041                   __kmp_gtid_from_thread(thread), team));
3042     // Allocate a new task team if one is not available.
3043     // Cannot use __kmp_thread_malloc() because threads not around for
3044     // kmp_reap_task_team( ).
3045     task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3046     __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3047     // AC: __kmp_allocate zeroes returned memory
3048     // task_team -> tt.tt_threads_data = NULL;
3049     // task_team -> tt.tt_max_threads = 0;
3050     // task_team -> tt.tt_next = NULL;
3051   }
3052 
3053   TCW_4(task_team->tt.tt_found_tasks, FALSE);
3054 #if OMP_45_ENABLED
3055   TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3056 #endif
3057   task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3058 
3059   KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3060   TCW_4(task_team->tt.tt_active, TRUE);
3061 
3062   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
3063                 "unfinished_threads init'd to %d\n",
3064                 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
3065                 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
3066   return task_team;
3067 }
3068 
3069 // __kmp_free_task_team:
3070 // Frees the task team associated with a specific thread, and adds it
3071 // to the global task team free list.
3072 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
3073   KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
3074                 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
3075 
3076   // Put task team back on free list
3077   __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3078 
3079   KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3080   task_team->tt.tt_next = __kmp_free_task_teams;
3081   TCW_PTR(__kmp_free_task_teams, task_team);
3082 
3083   __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3084 }
3085 
3086 // __kmp_reap_task_teams:
3087 // Free all the task teams on the task team free list.
3088 // Should only be done during library shutdown.
3089 // Cannot do anything that needs a thread structure or gtid since they are
3090 // already gone.
3091 void __kmp_reap_task_teams(void) {
3092   kmp_task_team_t *task_team;
3093 
3094   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3095     // Free all task_teams on the free list
3096     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3097     while ((task_team = __kmp_free_task_teams) != NULL) {
3098       __kmp_free_task_teams = task_team->tt.tt_next;
3099       task_team->tt.tt_next = NULL;
3100 
3101       // Free threads_data if necessary
3102       if (task_team->tt.tt_threads_data != NULL) {
3103         __kmp_free_task_threads_data(task_team);
3104       }
3105       __kmp_free(task_team);
3106     }
3107     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3108   }
3109 }
3110 
3111 // __kmp_wait_to_unref_task_teams:
3112 // Some threads could still be in the fork barrier release code, possibly
3113 // trying to steal tasks.  Wait for each thread to unreference its task team.
3114 void __kmp_wait_to_unref_task_teams(void) {
3115   kmp_info_t *thread;
3116   kmp_uint32 spins;
3117   int done;
3118 
3119   KMP_INIT_YIELD(spins);
3120 
3121   for (;;) {
3122     done = TRUE;
3123 
3124     // TODO: GEH - this may be is wrong because some sync would be necessary
3125     // in case threads are added to the pool during the traversal. Need to
3126     // verify that lock for thread pool is held when calling this routine.
3127     for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
3128          thread = thread->th.th_next_pool) {
3129 #if KMP_OS_WINDOWS
3130       DWORD exit_val;
3131 #endif
3132       if (TCR_PTR(thread->th.th_task_team) == NULL) {
3133         KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3134                       __kmp_gtid_from_thread(thread)));
3135         continue;
3136       }
3137 #if KMP_OS_WINDOWS
3138       // TODO: GEH - add this check for Linux* OS / OS X* as well?
3139       if (!__kmp_is_thread_alive(thread, &exit_val)) {
3140         thread->th.th_task_team = NULL;
3141         continue;
3142       }
3143 #endif
3144 
3145       done = FALSE; // Because th_task_team pointer is not NULL for this thread
3146 
3147       KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3148                     "unreference task_team\n",
3149                     __kmp_gtid_from_thread(thread)));
3150 
3151       if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3152         volatile void *sleep_loc;
3153         // If the thread is sleeping, awaken it.
3154         if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3155             NULL) {
3156           KA_TRACE(
3157               10,
3158               ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3159                __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3160           __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3161         }
3162       }
3163     }
3164     if (done) {
3165       break;
3166     }
3167 
3168     // If we are oversubscribed, or have waited a bit (and library mode is
3169     // throughput), yield. Pause is in the following code.
3170     KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
3171     KMP_YIELD_SPIN(spins); // Yields only if KMP_LIBRARY=throughput
3172   }
3173 }
3174 
3175 // __kmp_task_team_setup:  Create a task_team for the current team, but use
3176 // an already created, unused one if it already exists.
3177 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3178   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3179 
3180   // If this task_team hasn't been created yet, allocate it. It will be used in
3181   // the region after the next.
3182   // If it exists, it is the current task team and shouldn't be touched yet as
3183   // it may still be in use.
3184   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3185       (always || team->t.t_nproc > 1)) {
3186     team->t.t_task_team[this_thr->th.th_task_state] =
3187         __kmp_allocate_task_team(this_thr, team);
3188     KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "
3189                   "for team %d at parity=%d\n",
3190                   __kmp_gtid_from_thread(this_thr),
3191                   team->t.t_task_team[this_thr->th.th_task_state],
3192                   ((team != NULL) ? team->t.t_id : -1),
3193                   this_thr->th.th_task_state));
3194   }
3195 
3196   // After threads exit the release, they will call sync, and then point to this
3197   // other task_team; make sure it is allocated and properly initialized. As
3198   // threads spin in the barrier release phase, they will continue to use the
3199   // previous task_team struct(above), until they receive the signal to stop
3200   // checking for tasks (they can't safely reference the kmp_team_t struct,
3201   // which could be reallocated by the master thread). No task teams are formed
3202   // for serialized teams.
3203   if (team->t.t_nproc > 1) {
3204     int other_team = 1 - this_thr->th.th_task_state;
3205     if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3206       team->t.t_task_team[other_team] =
3207           __kmp_allocate_task_team(this_thr, team);
3208       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "
3209                     "task_team %p for team %d at parity=%d\n",
3210                     __kmp_gtid_from_thread(this_thr),
3211                     team->t.t_task_team[other_team],
3212                     ((team != NULL) ? team->t.t_id : -1), other_team));
3213     } else { // Leave the old task team struct in place for the upcoming region;
3214       // adjust as needed
3215       kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3216       if (!task_team->tt.tt_active ||
3217           team->t.t_nproc != task_team->tt.tt_nproc) {
3218         TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3219         TCW_4(task_team->tt.tt_found_tasks, FALSE);
3220 #if OMP_45_ENABLED
3221         TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3222 #endif
3223         KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
3224                           team->t.t_nproc);
3225         TCW_4(task_team->tt.tt_active, TRUE);
3226       }
3227       // if team size has changed, the first thread to enable tasking will
3228       // realloc threads_data if necessary
3229       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "
3230                     "%p for team %d at parity=%d\n",
3231                     __kmp_gtid_from_thread(this_thr),
3232                     team->t.t_task_team[other_team],
3233                     ((team != NULL) ? team->t.t_id : -1), other_team));
3234     }
3235   }
3236 }
3237 
3238 // __kmp_task_team_sync: Propagation of task team data from team to threads
3239 // which happens just after the release phase of a team barrier.  This may be
3240 // called by any thread, but only for teams with # threads > 1.
3241 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3242   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3243 
3244   // Toggle the th_task_state field, to switch which task_team this thread
3245   // refers to
3246   this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3247   // It is now safe to propagate the task team pointer from the team struct to
3248   // the current thread.
3249   TCW_PTR(this_thr->th.th_task_team,
3250           team->t.t_task_team[this_thr->th.th_task_state]);
3251   KA_TRACE(20,
3252            ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3253             "%p from Team #%d (parity=%d)\n",
3254             __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3255             ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
3256 }
3257 
3258 // __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3259 // barrier gather phase. Only called by master thread if #threads in team > 1 or
3260 // if proxy tasks were created.
3261 //
3262 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3263 // by passing in 0 optionally as the last argument. When wait is zero, master
3264 // thread does not wait for unfinished_threads to reach 0.
3265 void __kmp_task_team_wait(
3266     kmp_info_t *this_thr,
3267     kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3268   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3269 
3270   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3271   KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
3272 
3273   if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3274     if (wait) {
3275       KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
3276                     "(for unfinished_threads to reach 0) on task_team = %p\n",
3277                     __kmp_gtid_from_thread(this_thr), task_team));
3278       // Worker threads may have dropped through to release phase, but could
3279       // still be executing tasks. Wait here for tasks to complete. To avoid
3280       // memory contention, only master thread checks termination condition.
3281       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
3282                              &task_team->tt.tt_unfinished_threads),
3283                        0U);
3284       flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
3285     }
3286     // Deactivate the old task team, so that the worker threads will stop
3287     // referencing it while spinning.
3288     KA_TRACE(
3289         20,
3290         ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
3291          "setting active to false, setting local and team's pointer to NULL\n",
3292          __kmp_gtid_from_thread(this_thr), task_team));
3293 #if OMP_45_ENABLED
3294     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3295                      task_team->tt.tt_found_proxy_tasks == TRUE);
3296     TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3297 #else
3298     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1);
3299 #endif
3300     KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
3301     TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3302     KMP_MB();
3303 
3304     TCW_PTR(this_thr->th.th_task_team, NULL);
3305   }
3306 }
3307 
3308 // __kmp_tasking_barrier:
3309 // This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
3310 // Internal function to execute all tasks prior to a regular barrier or a join
3311 // barrier. It is a full barrier itself, which unfortunately turns regular
3312 // barriers into double barriers and join barriers into 1 1/2 barriers.
3313 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3314   std::atomic<kmp_uint32> *spin = RCAST(
3315       std::atomic<kmp_uint32> *,
3316       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
3317   int flag = FALSE;
3318   KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
3319 
3320 #if USE_ITT_BUILD
3321   KMP_FSYNC_SPIN_INIT(spin, NULL);
3322 #endif /* USE_ITT_BUILD */
3323   kmp_flag_32 spin_flag(spin, 0U);
3324   while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3325                                   &flag USE_ITT_BUILD_ARG(NULL), 0)) {
3326 #if USE_ITT_BUILD
3327     // TODO: What about itt_sync_obj??
3328     KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
3329 #endif /* USE_ITT_BUILD */
3330 
3331     if (TCR_4(__kmp_global.g.g_done)) {
3332       if (__kmp_global.g.g_abort)
3333         __kmp_abort_thread();
3334       break;
3335     }
3336     KMP_YIELD(TRUE); // GH: We always yield here
3337   }
3338 #if USE_ITT_BUILD
3339   KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
3340 #endif /* USE_ITT_BUILD */
3341 }
3342 
3343 #if OMP_45_ENABLED
3344 
3345 // __kmp_give_task puts a task into a given thread queue if:
3346 //  - the queue for that thread was created
3347 //  - there's space in that queue
3348 // Because of this, __kmp_push_task needs to check if there's space after
3349 // getting the lock
3350 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3351                             kmp_int32 pass) {
3352   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3353   kmp_task_team_t *task_team = taskdata->td_task_team;
3354 
3355   KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3356                 taskdata, tid));
3357 
3358   // If task_team is NULL something went really bad...
3359   KMP_DEBUG_ASSERT(task_team != NULL);
3360 
3361   bool result = false;
3362   kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3363 
3364   if (thread_data->td.td_deque == NULL) {
3365     // There's no queue in this thread, go find another one
3366     // We're guaranteed that at least one thread has a queue
3367     KA_TRACE(30,
3368              ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3369               tid, taskdata));
3370     return result;
3371   }
3372 
3373   if (TCR_4(thread_data->td.td_deque_ntasks) >=
3374       TASK_DEQUE_SIZE(thread_data->td)) {
3375     KA_TRACE(
3376         30,
3377         ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3378          taskdata, tid));
3379 
3380     // if this deque is bigger than the pass ratio give a chance to another
3381     // thread
3382     if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3383       return result;
3384 
3385     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3386     __kmp_realloc_task_deque(thread, thread_data);
3387 
3388   } else {
3389 
3390     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3391 
3392     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3393         TASK_DEQUE_SIZE(thread_data->td)) {
3394       KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3395                     "thread %d.\n",
3396                     taskdata, tid));
3397 
3398       // if this deque is bigger than the pass ratio give a chance to another
3399       // thread
3400       if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3401         goto release_and_exit;
3402 
3403       __kmp_realloc_task_deque(thread, thread_data);
3404     }
3405   }
3406 
3407   // lock is held here, and there is space in the deque
3408 
3409   thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3410   // Wrap index.
3411   thread_data->td.td_deque_tail =
3412       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3413   TCW_4(thread_data->td.td_deque_ntasks,
3414         TCR_4(thread_data->td.td_deque_ntasks) + 1);
3415 
3416   result = true;
3417   KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3418                 taskdata, tid));
3419 
3420 release_and_exit:
3421   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3422 
3423   return result;
3424 }
3425 
3426 /* The finish of the proxy tasks is divided in two pieces:
3427     - the top half is the one that can be done from a thread outside the team
3428     - the bottom half must be run from a them within the team
3429 
3430    In order to run the bottom half the task gets queued back into one of the
3431    threads of the team. Once the td_incomplete_child_task counter of the parent
3432    is decremented the threads can leave the barriers. So, the bottom half needs
3433    to be queued before the counter is decremented. The top half is therefore
3434    divided in two parts:
3435     - things that can be run before queuing the bottom half
3436     - things that must be run after queuing the bottom half
3437 
3438    This creates a second race as the bottom half can free the task before the
3439    second top half is executed. To avoid this we use the
3440    td_incomplete_child_task of the proxy task to synchronize the top and bottom
3441    half. */
3442 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3443   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3444   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3445   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3446   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
3447 
3448   taskdata->td_flags.complete = 1; // mark the task as completed
3449 
3450   if (taskdata->td_taskgroup)
3451     KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
3452 
3453   // Create an imaginary children for this task so the bottom half cannot
3454   // release the task before we have completed the second top half
3455   KMP_ATOMIC_INC(&taskdata->td_incomplete_child_tasks);
3456 }
3457 
3458 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3459   kmp_int32 children = 0;
3460 
3461   // Predecrement simulated by "- 1" calculation
3462   children =
3463       KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
3464   KMP_DEBUG_ASSERT(children >= 0);
3465 
3466   // Remove the imaginary children
3467   KMP_ATOMIC_DEC(&taskdata->td_incomplete_child_tasks);
3468 }
3469 
3470 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3471   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3472   kmp_info_t *thread = __kmp_threads[gtid];
3473 
3474   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3475   KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3476                    1); // top half must run before bottom half
3477 
3478   // We need to wait to make sure the top half is finished
3479   // Spinning here should be ok as this should happen quickly
3480   while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) > 0)
3481     ;
3482 
3483   __kmp_release_deps(gtid, taskdata);
3484   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3485 }
3486 
3487 /*!
3488 @ingroup TASKING
3489 @param gtid Global Thread ID of encountering thread
3490 @param ptask Task which execution is completed
3491 
3492 Execute the completation of a proxy task from a thread of that is part of the
3493 team. Run first and bottom halves directly.
3494 */
3495 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3496   KMP_DEBUG_ASSERT(ptask != NULL);
3497   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3498   KA_TRACE(
3499       10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3500            gtid, taskdata));
3501 
3502   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3503 
3504   __kmp_first_top_half_finish_proxy(taskdata);
3505   __kmp_second_top_half_finish_proxy(taskdata);
3506   __kmp_bottom_half_finish_proxy(gtid, ptask);
3507 
3508   KA_TRACE(10,
3509            ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3510             gtid, taskdata));
3511 }
3512 
3513 /*!
3514 @ingroup TASKING
3515 @param ptask Task which execution is completed
3516 
3517 Execute the completation of a proxy task from a thread that could not belong to
3518 the team.
3519 */
3520 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3521   KMP_DEBUG_ASSERT(ptask != NULL);
3522   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3523 
3524   KA_TRACE(
3525       10,
3526       ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3527        taskdata));
3528 
3529   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3530 
3531   __kmp_first_top_half_finish_proxy(taskdata);
3532 
3533   // Enqueue task to complete bottom half completion from a thread within the
3534   // corresponding team
3535   kmp_team_t *team = taskdata->td_team;
3536   kmp_int32 nthreads = team->t.t_nproc;
3537   kmp_info_t *thread;
3538 
3539   // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3540   // but we cannot use __kmp_get_random here
3541   kmp_int32 start_k = 0;
3542   kmp_int32 pass = 1;
3543   kmp_int32 k = start_k;
3544 
3545   do {
3546     // For now we're just linearly trying to find a thread
3547     thread = team->t.t_threads[k];
3548     k = (k + 1) % nthreads;
3549 
3550     // we did a full pass through all the threads
3551     if (k == start_k)
3552       pass = pass << 1;
3553 
3554   } while (!__kmp_give_task(thread, k, ptask, pass));
3555 
3556   __kmp_second_top_half_finish_proxy(taskdata);
3557 
3558   KA_TRACE(
3559       10,
3560       ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
3561        taskdata));
3562 }
3563 
3564 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3565 // for taskloop
3566 //
3567 // thread:   allocating thread
3568 // task_src: pointer to source task to be duplicated
3569 // returns:  a pointer to the allocated kmp_task_t structure (task).
3570 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3571   kmp_task_t *task;
3572   kmp_taskdata_t *taskdata;
3573   kmp_taskdata_t *taskdata_src;
3574   kmp_taskdata_t *parent_task = thread->th.th_current_task;
3575   size_t shareds_offset;
3576   size_t task_size;
3577 
3578   KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
3579                 task_src));
3580   taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
3581   KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
3582                    TASK_FULL); // it should not be proxy task
3583   KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
3584   task_size = taskdata_src->td_size_alloc;
3585 
3586   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3587   KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
3588                 task_size));
3589 #if USE_FAST_MEMORY
3590   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
3591 #else
3592   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
3593 #endif /* USE_FAST_MEMORY */
3594   KMP_MEMCPY(taskdata, taskdata_src, task_size);
3595 
3596   task = KMP_TASKDATA_TO_TASK(taskdata);
3597 
3598   // Initialize new task (only specific fields not affected by memcpy)
3599   taskdata->td_task_id = KMP_GEN_TASK_ID();
3600   if (task->shareds != NULL) { // need setup shareds pointer
3601     shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3602     task->shareds = &((char *)taskdata)[shareds_offset];
3603     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
3604                      0);
3605   }
3606   taskdata->td_alloc_thread = thread;
3607   taskdata->td_parent = parent_task;
3608   taskdata->td_taskgroup =
3609       parent_task
3610           ->td_taskgroup; // task inherits the taskgroup from the parent task
3611 
3612   // Only need to keep track of child task counts if team parallel and tasking
3613   // not serialized
3614   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
3615     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
3616     if (parent_task->td_taskgroup)
3617       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
3618     // Only need to keep track of allocated child tasks for explicit tasks since
3619     // implicit not deallocated
3620     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
3621       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
3622   }
3623 
3624   KA_TRACE(20,
3625            ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
3626             thread, taskdata, taskdata->td_parent));
3627 #if OMPT_SUPPORT
3628   if (UNLIKELY(ompt_enabled.enabled))
3629     __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
3630 #endif
3631   return task;
3632 }
3633 
3634 // Routine optionally generated by the compiler for setting the lastprivate flag
3635 // and calling needed constructors for private/firstprivate objects
3636 // (used to form taskloop tasks from pattern task)
3637 // Parameters: dest task, src task, lastprivate flag.
3638 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
3639 
3640 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
3641 
3642 // class to encapsulate manipulating loop bounds in a taskloop task.
3643 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting
3644 // the loop bound variables.
3645 class kmp_taskloop_bounds_t {
3646   kmp_task_t *task;
3647   const kmp_taskdata_t *taskdata;
3648   size_t lower_offset;
3649   size_t upper_offset;
3650 
3651 public:
3652   kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
3653       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
3654         lower_offset((char *)lb - (char *)task),
3655         upper_offset((char *)ub - (char *)task) {
3656     KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
3657     KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
3658   }
3659   kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
3660       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
3661         lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
3662   size_t get_lower_offset() const { return lower_offset; }
3663   size_t get_upper_offset() const { return upper_offset; }
3664   kmp_uint64 get_lb() const {
3665     kmp_int64 retval;
3666 #if defined(KMP_GOMP_COMPAT)
3667     // Intel task just returns the lower bound normally
3668     if (!taskdata->td_flags.native) {
3669       retval = *(kmp_int64 *)((char *)task + lower_offset);
3670     } else {
3671       // GOMP task has to take into account the sizeof(long)
3672       if (taskdata->td_size_loop_bounds == 4) {
3673         kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
3674         retval = (kmp_int64)*lb;
3675       } else {
3676         kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
3677         retval = (kmp_int64)*lb;
3678       }
3679     }
3680 #else
3681     retval = *(kmp_int64 *)((char *)task + lower_offset);
3682 #endif // defined(KMP_GOMP_COMPAT)
3683     return retval;
3684   }
3685   kmp_uint64 get_ub() const {
3686     kmp_int64 retval;
3687 #if defined(KMP_GOMP_COMPAT)
3688     // Intel task just returns the upper bound normally
3689     if (!taskdata->td_flags.native) {
3690       retval = *(kmp_int64 *)((char *)task + upper_offset);
3691     } else {
3692       // GOMP task has to take into account the sizeof(long)
3693       if (taskdata->td_size_loop_bounds == 4) {
3694         kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
3695         retval = (kmp_int64)*ub;
3696       } else {
3697         kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
3698         retval = (kmp_int64)*ub;
3699       }
3700     }
3701 #else
3702     retval = *(kmp_int64 *)((char *)task + upper_offset);
3703 #endif // defined(KMP_GOMP_COMPAT)
3704     return retval;
3705   }
3706   void set_lb(kmp_uint64 lb) {
3707 #if defined(KMP_GOMP_COMPAT)
3708     // Intel task just sets the lower bound normally
3709     if (!taskdata->td_flags.native) {
3710       *(kmp_uint64 *)((char *)task + lower_offset) = lb;
3711     } else {
3712       // GOMP task has to take into account the sizeof(long)
3713       if (taskdata->td_size_loop_bounds == 4) {
3714         kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
3715         *lower = (kmp_uint32)lb;
3716       } else {
3717         kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
3718         *lower = (kmp_uint64)lb;
3719       }
3720     }
3721 #else
3722     *(kmp_uint64 *)((char *)task + lower_offset) = lb;
3723 #endif // defined(KMP_GOMP_COMPAT)
3724   }
3725   void set_ub(kmp_uint64 ub) {
3726 #if defined(KMP_GOMP_COMPAT)
3727     // Intel task just sets the upper bound normally
3728     if (!taskdata->td_flags.native) {
3729       *(kmp_uint64 *)((char *)task + upper_offset) = ub;
3730     } else {
3731       // GOMP task has to take into account the sizeof(long)
3732       if (taskdata->td_size_loop_bounds == 4) {
3733         kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
3734         *upper = (kmp_uint32)ub;
3735       } else {
3736         kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
3737         *upper = (kmp_uint64)ub;
3738       }
3739     }
3740 #else
3741     *(kmp_uint64 *)((char *)task + upper_offset) = ub;
3742 #endif // defined(KMP_GOMP_COMPAT)
3743   }
3744 };
3745 
3746 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
3747 //
3748 // loc        Source location information
3749 // gtid       Global thread ID
3750 // task       Pattern task, exposes the loop iteration range
3751 // lb         Pointer to loop lower bound in task structure
3752 // ub         Pointer to loop upper bound in task structure
3753 // st         Loop stride
3754 // ub_glob    Global upper bound (used for lastprivate check)
3755 // num_tasks  Number of tasks to execute
3756 // grainsize  Number of loop iterations per task
3757 // extras     Number of chunks with grainsize+1 iterations
3758 // tc         Iterations count
3759 // task_dup   Tasks duplication routine
3760 // codeptr_ra Return address for OMPT events
3761 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
3762                            kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3763                            kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3764                            kmp_uint64 grainsize, kmp_uint64 extras,
3765                            kmp_uint64 tc,
3766 #if OMPT_SUPPORT
3767                            void *codeptr_ra,
3768 #endif
3769                            void *task_dup) {
3770   KMP_COUNT_BLOCK(OMP_TASKLOOP);
3771   KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
3772   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3773   // compiler provides global bounds here
3774   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
3775   kmp_uint64 lower = task_bounds.get_lb();
3776   kmp_uint64 upper = task_bounds.get_ub();
3777   kmp_uint64 i;
3778   kmp_info_t *thread = __kmp_threads[gtid];
3779   kmp_taskdata_t *current_task = thread->th.th_current_task;
3780   kmp_task_t *next_task;
3781   kmp_int32 lastpriv = 0;
3782 
3783   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3784   KMP_DEBUG_ASSERT(num_tasks > extras);
3785   KMP_DEBUG_ASSERT(num_tasks > 0);
3786   KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
3787                 "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",
3788                 gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,
3789                 task_dup));
3790 
3791   // Launch num_tasks tasks, assign grainsize iterations each task
3792   for (i = 0; i < num_tasks; ++i) {
3793     kmp_uint64 chunk_minus_1;
3794     if (extras == 0) {
3795       chunk_minus_1 = grainsize - 1;
3796     } else {
3797       chunk_minus_1 = grainsize;
3798       --extras; // first extras iterations get bigger chunk (grainsize+1)
3799     }
3800     upper = lower + st * chunk_minus_1;
3801     if (i == num_tasks - 1) {
3802       // schedule the last task, set lastprivate flag if needed
3803       if (st == 1) { // most common case
3804         KMP_DEBUG_ASSERT(upper == *ub);
3805         if (upper == ub_glob)
3806           lastpriv = 1;
3807       } else if (st > 0) { // positive loop stride
3808         KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
3809         if ((kmp_uint64)st > ub_glob - upper)
3810           lastpriv = 1;
3811       } else { // negative loop stride
3812         KMP_DEBUG_ASSERT(upper + st < *ub);
3813         if (upper - ub_glob < (kmp_uint64)(-st))
3814           lastpriv = 1;
3815       }
3816     }
3817     next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
3818     kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
3819     kmp_taskloop_bounds_t next_task_bounds =
3820         kmp_taskloop_bounds_t(next_task, task_bounds);
3821 
3822     // adjust task-specific bounds
3823     next_task_bounds.set_lb(lower);
3824     if (next_taskdata->td_flags.native) {
3825       next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
3826     } else {
3827       next_task_bounds.set_ub(upper);
3828     }
3829     if (ptask_dup != NULL) // set lastprivate flag, construct fistprivates, etc.
3830       ptask_dup(next_task, task, lastpriv);
3831     KA_TRACE(40,
3832              ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
3833               "upper %lld stride %lld, (offsets %p %p)\n",
3834               gtid, i, next_task, lower, upper, st,
3835               next_task_bounds.get_lower_offset(),
3836               next_task_bounds.get_upper_offset()));
3837 #if OMPT_SUPPORT
3838     __kmp_omp_taskloop_task(NULL, gtid, next_task,
3839                            codeptr_ra); // schedule new task
3840 #else
3841     __kmp_omp_task(gtid, next_task, true); // schedule new task
3842 #endif
3843     lower = upper + st; // adjust lower bound for the next iteration
3844   }
3845   // free the pattern task and exit
3846   __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
3847   // do not execute the pattern task, just do internal bookkeeping
3848   __kmp_task_finish<false>(gtid, task, current_task);
3849 }
3850 
3851 // Structure to keep taskloop parameters for auxiliary task
3852 // kept in the shareds of the task structure.
3853 typedef struct __taskloop_params {
3854   kmp_task_t *task;
3855   kmp_uint64 *lb;
3856   kmp_uint64 *ub;
3857   void *task_dup;
3858   kmp_int64 st;
3859   kmp_uint64 ub_glob;
3860   kmp_uint64 num_tasks;
3861   kmp_uint64 grainsize;
3862   kmp_uint64 extras;
3863   kmp_uint64 tc;
3864   kmp_uint64 num_t_min;
3865 #if OMPT_SUPPORT
3866   void *codeptr_ra;
3867 #endif
3868 } __taskloop_params_t;
3869 
3870 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
3871                           kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
3872                           kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
3873 #if OMPT_SUPPORT
3874                           void *,
3875 #endif
3876                           void *);
3877 
3878 // Execute part of the the taskloop submitted as a task.
3879 int __kmp_taskloop_task(int gtid, void *ptask) {
3880   __taskloop_params_t *p =
3881       (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
3882   kmp_task_t *task = p->task;
3883   kmp_uint64 *lb = p->lb;
3884   kmp_uint64 *ub = p->ub;
3885   void *task_dup = p->task_dup;
3886   //  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3887   kmp_int64 st = p->st;
3888   kmp_uint64 ub_glob = p->ub_glob;
3889   kmp_uint64 num_tasks = p->num_tasks;
3890   kmp_uint64 grainsize = p->grainsize;
3891   kmp_uint64 extras = p->extras;
3892   kmp_uint64 tc = p->tc;
3893   kmp_uint64 num_t_min = p->num_t_min;
3894 #if OMPT_SUPPORT
3895   void *codeptr_ra = p->codeptr_ra;
3896 #endif
3897 #if KMP_DEBUG
3898   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3899   KMP_DEBUG_ASSERT(task != NULL);
3900   KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
3901                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3902                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3903                 task_dup));
3904 #endif
3905   KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
3906   if (num_tasks > num_t_min)
3907     __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3908                          grainsize, extras, tc, num_t_min,
3909 #if OMPT_SUPPORT
3910                          codeptr_ra,
3911 #endif
3912                          task_dup);
3913   else
3914     __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3915                           grainsize, extras, tc,
3916 #if OMPT_SUPPORT
3917                           codeptr_ra,
3918 #endif
3919                           task_dup);
3920 
3921   KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
3922   return 0;
3923 }
3924 
3925 // Schedule part of the the taskloop as a task,
3926 // execute the rest of the the taskloop.
3927 //
3928 // loc        Source location information
3929 // gtid       Global thread ID
3930 // task       Pattern task, exposes the loop iteration range
3931 // lb         Pointer to loop lower bound in task structure
3932 // ub         Pointer to loop upper bound in task structure
3933 // st         Loop stride
3934 // ub_glob    Global upper bound (used for lastprivate check)
3935 // num_tasks  Number of tasks to execute
3936 // grainsize  Number of loop iterations per task
3937 // extras     Number of chunks with grainsize+1 iterations
3938 // tc         Iterations count
3939 // num_t_min  Threashold to launch tasks recursively
3940 // task_dup   Tasks duplication routine
3941 // codeptr_ra Return address for OMPT events
3942 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
3943                           kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3944                           kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3945                           kmp_uint64 grainsize, kmp_uint64 extras,
3946                           kmp_uint64 tc, kmp_uint64 num_t_min,
3947 #if OMPT_SUPPORT
3948                           void *codeptr_ra,
3949 #endif
3950                           void *task_dup) {
3951 #if KMP_DEBUG
3952   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3953   KMP_DEBUG_ASSERT(task != NULL);
3954   KMP_DEBUG_ASSERT(num_tasks > num_t_min);
3955   KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
3956                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3957                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3958                 task_dup));
3959 #endif
3960   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3961   kmp_uint64 lower = *lb;
3962   kmp_info_t *thread = __kmp_threads[gtid];
3963   //  kmp_taskdata_t *current_task = thread->th.th_current_task;
3964   kmp_task_t *next_task;
3965   size_t lower_offset =
3966       (char *)lb - (char *)task; // remember offset of lb in the task structure
3967   size_t upper_offset =
3968       (char *)ub - (char *)task; // remember offset of ub in the task structure
3969 
3970   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3971   KMP_DEBUG_ASSERT(num_tasks > extras);
3972   KMP_DEBUG_ASSERT(num_tasks > 0);
3973 
3974   // split the loop in two halves
3975   kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
3976   kmp_uint64 gr_size0 = grainsize;
3977   kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
3978   kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
3979   if (n_tsk0 <= extras) {
3980     gr_size0++; // integrate extras into grainsize
3981     ext0 = 0; // no extra iters in 1st half
3982     ext1 = extras - n_tsk0; // remaining extras
3983     tc0 = gr_size0 * n_tsk0;
3984     tc1 = tc - tc0;
3985   } else { // n_tsk0 > extras
3986     ext1 = 0; // no extra iters in 2nd half
3987     ext0 = extras;
3988     tc1 = grainsize * n_tsk1;
3989     tc0 = tc - tc1;
3990   }
3991   ub0 = lower + st * (tc0 - 1);
3992   lb1 = ub0 + st;
3993 
3994   // create pattern task for 2nd half of the loop
3995   next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
3996   // adjust lower bound (upper bound is not changed) for the 2nd half
3997   *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
3998   if (ptask_dup != NULL) // construct fistprivates, etc.
3999     ptask_dup(next_task, task, 0);
4000   *ub = ub0; // adjust upper bound for the 1st half
4001 
4002   // create auxiliary task for 2nd half of the loop
4003   kmp_task_t *new_task =
4004       __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
4005                             sizeof(__taskloop_params_t), &__kmp_taskloop_task);
4006   __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
4007   p->task = next_task;
4008   p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
4009   p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
4010   p->task_dup = task_dup;
4011   p->st = st;
4012   p->ub_glob = ub_glob;
4013   p->num_tasks = n_tsk1;
4014   p->grainsize = grainsize;
4015   p->extras = ext1;
4016   p->tc = tc1;
4017   p->num_t_min = num_t_min;
4018 #if OMPT_SUPPORT
4019   p->codeptr_ra = codeptr_ra;
4020 #endif
4021 
4022 #if OMPT_SUPPORT
4023   // schedule new task with correct return address for OMPT events
4024   __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
4025 #else
4026   __kmp_omp_task(gtid, new_task, true); // schedule new task
4027 #endif
4028 
4029   // execute the 1st half of current subrange
4030   if (n_tsk0 > num_t_min)
4031     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
4032                          ext0, tc0, num_t_min,
4033 #if OMPT_SUPPORT
4034                          codeptr_ra,
4035 #endif
4036                          task_dup);
4037   else
4038     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
4039                           gr_size0, ext0, tc0,
4040 #if OMPT_SUPPORT
4041                           codeptr_ra,
4042 #endif
4043                           task_dup);
4044 
4045   KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid));
4046 }
4047 
4048 /*!
4049 @ingroup TASKING
4050 @param loc       Source location information
4051 @param gtid      Global thread ID
4052 @param task      Task structure
4053 @param if_val    Value of the if clause
4054 @param lb        Pointer to loop lower bound in task structure
4055 @param ub        Pointer to loop upper bound in task structure
4056 @param st        Loop stride
4057 @param nogroup   Flag, 1 if nogroup clause specified, 0 otherwise
4058 @param sched     Schedule specified 0/1/2 for none/grainsize/num_tasks
4059 @param grainsize Schedule value if specified
4060 @param task_dup  Tasks duplication routine
4061 
4062 Execute the taskloop construct.
4063 */
4064 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4065                      kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
4066                      int sched, kmp_uint64 grainsize, void *task_dup) {
4067   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4068   KMP_DEBUG_ASSERT(task != NULL);
4069 
4070   if (nogroup == 0) {
4071 #if OMPT_SUPPORT && OMPT_OPTIONAL
4072     OMPT_STORE_RETURN_ADDRESS(gtid);
4073 #endif
4074     __kmpc_taskgroup(loc, gtid);
4075   }
4076 
4077   // =========================================================================
4078   // calculate loop parameters
4079   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4080   kmp_uint64 tc;
4081   // compiler provides global bounds here
4082   kmp_uint64 lower = task_bounds.get_lb();
4083   kmp_uint64 upper = task_bounds.get_ub();
4084   kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
4085   kmp_uint64 num_tasks = 0, extras = 0;
4086   kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
4087   kmp_info_t *thread = __kmp_threads[gtid];
4088   kmp_taskdata_t *current_task = thread->th.th_current_task;
4089 
4090   KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
4091                 "grain %llu(%d), dup %p\n",
4092                 gtid, taskdata, lower, upper, st, grainsize, sched, task_dup));
4093 
4094   // compute trip count
4095   if (st == 1) { // most common case
4096     tc = upper - lower + 1;
4097   } else if (st < 0) {
4098     tc = (lower - upper) / (-st) + 1;
4099   } else { // st > 0
4100     tc = (upper - lower) / st + 1;
4101   }
4102   if (tc == 0) {
4103     KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid));
4104     // free the pattern task and exit
4105     __kmp_task_start(gtid, task, current_task);
4106     // do not execute anything for zero-trip loop
4107     __kmp_task_finish<false>(gtid, task, current_task);
4108     return;
4109   }
4110 
4111 #if OMPT_SUPPORT && OMPT_OPTIONAL
4112   ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
4113   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
4114   if (ompt_enabled.ompt_callback_work) {
4115     ompt_callbacks.ompt_callback(ompt_callback_work)(
4116         ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
4117         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4118   }
4119 #endif
4120 
4121   if (num_tasks_min == 0)
4122     // TODO: can we choose better default heuristic?
4123     num_tasks_min =
4124         KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
4125 
4126   // compute num_tasks/grainsize based on the input provided
4127   switch (sched) {
4128   case 0: // no schedule clause specified, we can choose the default
4129     // let's try to schedule (team_size*10) tasks
4130     grainsize = thread->th.th_team_nproc * 10;
4131   case 2: // num_tasks provided
4132     if (grainsize > tc) {
4133       num_tasks = tc; // too big num_tasks requested, adjust values
4134       grainsize = 1;
4135       extras = 0;
4136     } else {
4137       num_tasks = grainsize;
4138       grainsize = tc / num_tasks;
4139       extras = tc % num_tasks;
4140     }
4141     break;
4142   case 1: // grainsize provided
4143     if (grainsize > tc) {
4144       num_tasks = 1; // too big grainsize requested, adjust values
4145       grainsize = tc;
4146       extras = 0;
4147     } else {
4148       num_tasks = tc / grainsize;
4149       // adjust grainsize for balanced distribution of iterations
4150       grainsize = tc / num_tasks;
4151       extras = tc % num_tasks;
4152     }
4153     break;
4154   default:
4155     KMP_ASSERT2(0, "unknown scheduling of taskloop");
4156   }
4157   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4158   KMP_DEBUG_ASSERT(num_tasks > extras);
4159   KMP_DEBUG_ASSERT(num_tasks > 0);
4160   // =========================================================================
4161 
4162   // check if clause value first
4163   // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
4164   if (if_val == 0) { // if(0) specified, mark task as serial
4165     taskdata->td_flags.task_serial = 1;
4166     taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
4167     // always start serial tasks linearly
4168     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4169                           grainsize, extras, tc,
4170 #if OMPT_SUPPORT
4171                           OMPT_GET_RETURN_ADDRESS(0),
4172 #endif
4173                           task_dup);
4174     // !taskdata->td_flags.native => currently force linear spawning of tasks
4175     // for GOMP_taskloop
4176   } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
4177     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
4178                   "(%lld), grain %llu, extras %llu\n",
4179                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4180     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4181                          grainsize, extras, tc, num_tasks_min,
4182 #if OMPT_SUPPORT
4183                          OMPT_GET_RETURN_ADDRESS(0),
4184 #endif
4185                          task_dup);
4186   } else {
4187     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
4188                   "(%lld), grain %llu, extras %llu\n",
4189                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4190     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4191                           grainsize, extras, tc,
4192 #if OMPT_SUPPORT
4193                           OMPT_GET_RETURN_ADDRESS(0),
4194 #endif
4195                           task_dup);
4196   }
4197 
4198 #if OMPT_SUPPORT && OMPT_OPTIONAL
4199   if (ompt_enabled.ompt_callback_work) {
4200     ompt_callbacks.ompt_callback(ompt_callback_work)(
4201         ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
4202         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4203   }
4204 #endif
4205 
4206   if (nogroup == 0) {
4207 #if OMPT_SUPPORT && OMPT_OPTIONAL
4208     OMPT_STORE_RETURN_ADDRESS(gtid);
4209 #endif
4210     __kmpc_end_taskgroup(loc, gtid);
4211   }
4212   KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
4213 }
4214 
4215 #endif
4216