1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_i18n.h"
15 #include "kmp_itt.h"
16 #include "kmp_stats.h"
17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
19 
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #include "tsan_annotations.h"
25 
26 /* forward declaration */
27 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
28                                  kmp_info_t *this_thr);
29 static void __kmp_alloc_task_deque(kmp_info_t *thread,
30                                    kmp_thread_data_t *thread_data);
31 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
32                                            kmp_task_team_t *task_team);
33 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
34 
35 #ifdef BUILD_TIED_TASK_STACK
36 
37 //  __kmp_trace_task_stack: print the tied tasks from the task stack in order
38 //  from top do bottom
39 //
40 //  gtid: global thread identifier for thread containing stack
41 //  thread_data: thread data for task team thread containing stack
42 //  threshold: value above which the trace statement triggers
43 //  location: string identifying call site of this function (for trace)
44 static void __kmp_trace_task_stack(kmp_int32 gtid,
45                                    kmp_thread_data_t *thread_data,
46                                    int threshold, char *location) {
47   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
48   kmp_taskdata_t **stack_top = task_stack->ts_top;
49   kmp_int32 entries = task_stack->ts_entries;
50   kmp_taskdata_t *tied_task;
51 
52   KA_TRACE(
53       threshold,
54       ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
55        "first_block = %p, stack_top = %p \n",
56        location, gtid, entries, task_stack->ts_first_block, stack_top));
57 
58   KMP_DEBUG_ASSERT(stack_top != NULL);
59   KMP_DEBUG_ASSERT(entries > 0);
60 
61   while (entries != 0) {
62     KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
63     // fix up ts_top if we need to pop from previous block
64     if (entries & TASK_STACK_INDEX_MASK == 0) {
65       kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
66 
67       stack_block = stack_block->sb_prev;
68       stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
69     }
70 
71     // finish bookkeeping
72     stack_top--;
73     entries--;
74 
75     tied_task = *stack_top;
76 
77     KMP_DEBUG_ASSERT(tied_task != NULL);
78     KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
79 
80     KA_TRACE(threshold,
81              ("__kmp_trace_task_stack(%s):             gtid=%d, entry=%d, "
82               "stack_top=%p, tied_task=%p\n",
83               location, gtid, entries, stack_top, tied_task));
84   }
85   KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
86 
87   KA_TRACE(threshold,
88            ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
89             location, gtid));
90 }
91 
92 //  __kmp_init_task_stack: initialize the task stack for the first time
93 //  after a thread_data structure is created.
94 //  It should not be necessary to do this again (assuming the stack works).
95 //
96 //  gtid: global thread identifier of calling thread
97 //  thread_data: thread data for task team thread containing stack
98 static void __kmp_init_task_stack(kmp_int32 gtid,
99                                   kmp_thread_data_t *thread_data) {
100   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
101   kmp_stack_block_t *first_block;
102 
103   // set up the first block of the stack
104   first_block = &task_stack->ts_first_block;
105   task_stack->ts_top = (kmp_taskdata_t **)first_block;
106   memset((void *)first_block, '\0',
107          TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
108 
109   // initialize the stack to be empty
110   task_stack->ts_entries = TASK_STACK_EMPTY;
111   first_block->sb_next = NULL;
112   first_block->sb_prev = NULL;
113 }
114 
115 //  __kmp_free_task_stack: free the task stack when thread_data is destroyed.
116 //
117 //  gtid: global thread identifier for calling thread
118 //  thread_data: thread info for thread containing stack
119 static void __kmp_free_task_stack(kmp_int32 gtid,
120                                   kmp_thread_data_t *thread_data) {
121   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
122   kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
123 
124   KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
125   // free from the second block of the stack
126   while (stack_block != NULL) {
127     kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
128 
129     stack_block->sb_next = NULL;
130     stack_block->sb_prev = NULL;
131     if (stack_block != &task_stack->ts_first_block) {
132       __kmp_thread_free(thread,
133                         stack_block); // free the block, if not the first
134     }
135     stack_block = next_block;
136   }
137   // initialize the stack to be empty
138   task_stack->ts_entries = 0;
139   task_stack->ts_top = NULL;
140 }
141 
142 //  __kmp_push_task_stack: Push the tied task onto the task stack.
143 //     Grow the stack if necessary by allocating another block.
144 //
145 //  gtid: global thread identifier for calling thread
146 //  thread: thread info for thread containing stack
147 //  tied_task: the task to push on the stack
148 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
149                                   kmp_taskdata_t *tied_task) {
150   // GEH - need to consider what to do if tt_threads_data not allocated yet
151   kmp_thread_data_t *thread_data =
152       &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
153   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
154 
155   if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
156     return; // Don't push anything on stack if team or team tasks are serialized
157   }
158 
159   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
160   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
161 
162   KA_TRACE(20,
163            ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
164             gtid, thread, tied_task));
165   // Store entry
166   *(task_stack->ts_top) = tied_task;
167 
168   // Do bookkeeping for next push
169   task_stack->ts_top++;
170   task_stack->ts_entries++;
171 
172   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
173     // Find beginning of this task block
174     kmp_stack_block_t *stack_block =
175         (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
176 
177     // Check if we already have a block
178     if (stack_block->sb_next !=
179         NULL) { // reset ts_top to beginning of next block
180       task_stack->ts_top = &stack_block->sb_next->sb_block[0];
181     } else { // Alloc new block and link it up
182       kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
183           thread, sizeof(kmp_stack_block_t));
184 
185       task_stack->ts_top = &new_block->sb_block[0];
186       stack_block->sb_next = new_block;
187       new_block->sb_prev = stack_block;
188       new_block->sb_next = NULL;
189 
190       KA_TRACE(
191           30,
192           ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
193            gtid, tied_task, new_block));
194     }
195   }
196   KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
197                 tied_task));
198 }
199 
200 //  __kmp_pop_task_stack: Pop the tied task from the task stack.  Don't return
201 //  the task, just check to make sure it matches the ending task passed in.
202 //
203 //  gtid: global thread identifier for the calling thread
204 //  thread: thread info structure containing stack
205 //  tied_task: the task popped off the stack
206 //  ending_task: the task that is ending (should match popped task)
207 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
208                                  kmp_taskdata_t *ending_task) {
209   // GEH - need to consider what to do if tt_threads_data not allocated yet
210   kmp_thread_data_t *thread_data =
211       &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
212   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
213   kmp_taskdata_t *tied_task;
214 
215   if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
216     // Don't pop anything from stack if team or team tasks are serialized
217     return;
218   }
219 
220   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
221   KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
222 
223   KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
224                 thread));
225 
226   // fix up ts_top if we need to pop from previous block
227   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
228     kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
229 
230     stack_block = stack_block->sb_prev;
231     task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
232   }
233 
234   // finish bookkeeping
235   task_stack->ts_top--;
236   task_stack->ts_entries--;
237 
238   tied_task = *(task_stack->ts_top);
239 
240   KMP_DEBUG_ASSERT(tied_task != NULL);
241   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
242   KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
243 
244   KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
245                 tied_task));
246   return;
247 }
248 #endif /* BUILD_TIED_TASK_STACK */
249 
250 // returns 1 if new task is allowed to execute, 0 otherwise
251 // checks Task Scheduling constraint (if requested) and
252 // mutexinoutset dependencies if any
253 static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained,
254                                   const kmp_taskdata_t *tasknew,
255                                   const kmp_taskdata_t *taskcurr) {
256   if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) {
257     // Check if the candidate obeys the Task Scheduling Constraints (TSC)
258     // only descendant of all deferred tied tasks can be scheduled, checking
259     // the last one is enough, as it in turn is the descendant of all others
260     kmp_taskdata_t *current = taskcurr->td_last_tied;
261     KMP_DEBUG_ASSERT(current != NULL);
262     // check if the task is not suspended on barrier
263     if (current->td_flags.tasktype == TASK_EXPLICIT ||
264         current->td_taskwait_thread > 0) { // <= 0 on barrier
265       kmp_int32 level = current->td_level;
266       kmp_taskdata_t *parent = tasknew->td_parent;
267       while (parent != current && parent->td_level > level) {
268         // check generation up to the level of the current task
269         parent = parent->td_parent;
270         KMP_DEBUG_ASSERT(parent != NULL);
271       }
272       if (parent != current)
273         return false;
274     }
275   }
276   // Check mutexinoutset dependencies, acquire locks
277   kmp_depnode_t *node = tasknew->td_depnode;
278   if (node && (node->dn.mtx_num_locks > 0)) {
279     for (int i = 0; i < node->dn.mtx_num_locks; ++i) {
280       KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
281       if (__kmp_test_lock(node->dn.mtx_locks[i], gtid))
282         continue;
283       // could not get the lock, release previous locks
284       for (int j = i - 1; j >= 0; --j)
285         __kmp_release_lock(node->dn.mtx_locks[j], gtid);
286       return false;
287     }
288     // negative num_locks means all locks acquired successfully
289     node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
290   }
291   return true;
292 }
293 
294 // __kmp_realloc_task_deque:
295 // Re-allocates a task deque for a particular thread, copies the content from
296 // the old deque and adjusts the necessary data structures relating to the
297 // deque. This operation must be done with the deque_lock being held
298 static void __kmp_realloc_task_deque(kmp_info_t *thread,
299                                      kmp_thread_data_t *thread_data) {
300   kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
301   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size);
302   kmp_int32 new_size = 2 * size;
303 
304   KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
305                 "%d] for thread_data %p\n",
306                 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
307 
308   kmp_taskdata_t **new_deque =
309       (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
310 
311   int i, j;
312   for (i = thread_data->td.td_deque_head, j = 0; j < size;
313        i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
314     new_deque[j] = thread_data->td.td_deque[i];
315 
316   __kmp_free(thread_data->td.td_deque);
317 
318   thread_data->td.td_deque_head = 0;
319   thread_data->td.td_deque_tail = size;
320   thread_data->td.td_deque = new_deque;
321   thread_data->td.td_deque_size = new_size;
322 }
323 
324 //  __kmp_push_task: Add a task to the thread's deque
325 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
326   kmp_info_t *thread = __kmp_threads[gtid];
327   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
328   kmp_task_team_t *task_team = thread->th.th_task_team;
329   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
330   kmp_thread_data_t *thread_data;
331 
332   KA_TRACE(20,
333            ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
334 
335   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
336     // untied task needs to increment counter so that the task structure is not
337     // freed prematurely
338     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
339     KMP_DEBUG_USE_VAR(counter);
340     KA_TRACE(
341         20,
342         ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
343          gtid, counter, taskdata));
344   }
345 
346   // The first check avoids building task_team thread data if serialized
347   if (taskdata->td_flags.task_serial) {
348     KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
349                   "TASK_NOT_PUSHED for task %p\n",
350                   gtid, taskdata));
351     return TASK_NOT_PUSHED;
352   }
353 
354   // Now that serialized tasks have returned, we can assume that we are not in
355   // immediate exec mode
356   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
357   if (!KMP_TASKING_ENABLED(task_team)) {
358     __kmp_enable_tasking(task_team, thread);
359   }
360   KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
361   KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
362 
363   // Find tasking deque specific to encountering thread
364   thread_data = &task_team->tt.tt_threads_data[tid];
365 
366   // No lock needed since only owner can allocate
367   if (thread_data->td.td_deque == NULL) {
368     __kmp_alloc_task_deque(thread, thread_data);
369   }
370 
371   int locked = 0;
372   // Check if deque is full
373   if (TCR_4(thread_data->td.td_deque_ntasks) >=
374       TASK_DEQUE_SIZE(thread_data->td)) {
375     if (__kmp_enable_task_throttling &&
376         __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
377                               thread->th.th_current_task)) {
378       KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
379                     "TASK_NOT_PUSHED for task %p\n",
380                     gtid, taskdata));
381       return TASK_NOT_PUSHED;
382     } else {
383       __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
384       locked = 1;
385       if (TCR_4(thread_data->td.td_deque_ntasks) >=
386           TASK_DEQUE_SIZE(thread_data->td)) {
387         // expand deque to push the task which is not allowed to execute
388         __kmp_realloc_task_deque(thread, thread_data);
389       }
390     }
391   }
392   // Lock the deque for the task push operation
393   if (!locked) {
394     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
395     // Need to recheck as we can get a proxy task from thread outside of OpenMP
396     if (TCR_4(thread_data->td.td_deque_ntasks) >=
397         TASK_DEQUE_SIZE(thread_data->td)) {
398       if (__kmp_enable_task_throttling &&
399           __kmp_task_is_allowed(gtid, __kmp_task_stealing_constraint, taskdata,
400                                 thread->th.th_current_task)) {
401         __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
402         KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; "
403                       "returning TASK_NOT_PUSHED for task %p\n",
404                       gtid, taskdata));
405         return TASK_NOT_PUSHED;
406       } else {
407         // expand deque to push the task which is not allowed to execute
408         __kmp_realloc_task_deque(thread, thread_data);
409       }
410     }
411   }
412   // Must have room since no thread can add tasks but calling thread
413   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
414                    TASK_DEQUE_SIZE(thread_data->td));
415 
416   thread_data->td.td_deque[thread_data->td.td_deque_tail] =
417       taskdata; // Push taskdata
418   // Wrap index.
419   thread_data->td.td_deque_tail =
420       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
421   TCW_4(thread_data->td.td_deque_ntasks,
422         TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
423 
424   KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
425                 "task=%p ntasks=%d head=%u tail=%u\n",
426                 gtid, taskdata, thread_data->td.td_deque_ntasks,
427                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
428 
429   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
430 
431   return TASK_SUCCESSFULLY_PUSHED;
432 }
433 
434 // __kmp_pop_current_task_from_thread: set up current task from called thread
435 // when team ends
436 //
437 // this_thr: thread structure to set current_task in.
438 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
439   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
440                 "this_thread=%p, curtask=%p, "
441                 "curtask_parent=%p\n",
442                 0, this_thr, this_thr->th.th_current_task,
443                 this_thr->th.th_current_task->td_parent));
444 
445   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
446 
447   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
448                 "this_thread=%p, curtask=%p, "
449                 "curtask_parent=%p\n",
450                 0, this_thr, this_thr->th.th_current_task,
451                 this_thr->th.th_current_task->td_parent));
452 }
453 
454 // __kmp_push_current_task_to_thread: set up current task in called thread for a
455 // new team
456 //
457 // this_thr: thread structure to set up
458 // team: team for implicit task data
459 // tid: thread within team to set up
460 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
461                                        int tid) {
462   // current task of the thread is a parent of the new just created implicit
463   // tasks of new team
464   KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
465                 "curtask=%p "
466                 "parent_task=%p\n",
467                 tid, this_thr, this_thr->th.th_current_task,
468                 team->t.t_implicit_task_taskdata[tid].td_parent));
469 
470   KMP_DEBUG_ASSERT(this_thr != NULL);
471 
472   if (tid == 0) {
473     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
474       team->t.t_implicit_task_taskdata[0].td_parent =
475           this_thr->th.th_current_task;
476       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
477     }
478   } else {
479     team->t.t_implicit_task_taskdata[tid].td_parent =
480         team->t.t_implicit_task_taskdata[0].td_parent;
481     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
482   }
483 
484   KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
485                 "curtask=%p "
486                 "parent_task=%p\n",
487                 tid, this_thr, this_thr->th.th_current_task,
488                 team->t.t_implicit_task_taskdata[tid].td_parent));
489 }
490 
491 // __kmp_task_start: bookkeeping for a task starting execution
492 //
493 // GTID: global thread id of calling thread
494 // task: task starting execution
495 // current_task: task suspending
496 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
497                              kmp_taskdata_t *current_task) {
498   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
499   kmp_info_t *thread = __kmp_threads[gtid];
500 
501   KA_TRACE(10,
502            ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
503             gtid, taskdata, current_task));
504 
505   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
506 
507   // mark currently executing task as suspended
508   // TODO: GEH - make sure root team implicit task is initialized properly.
509   // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
510   current_task->td_flags.executing = 0;
511 
512 // Add task to stack if tied
513 #ifdef BUILD_TIED_TASK_STACK
514   if (taskdata->td_flags.tiedness == TASK_TIED) {
515     __kmp_push_task_stack(gtid, thread, taskdata);
516   }
517 #endif /* BUILD_TIED_TASK_STACK */
518 
519   // mark starting task as executing and as current task
520   thread->th.th_current_task = taskdata;
521 
522   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
523                    taskdata->td_flags.tiedness == TASK_UNTIED);
524   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
525                    taskdata->td_flags.tiedness == TASK_UNTIED);
526   taskdata->td_flags.started = 1;
527   taskdata->td_flags.executing = 1;
528   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
529   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
530 
531   // GEH TODO: shouldn't we pass some sort of location identifier here?
532   // APT: yes, we will pass location here.
533   // need to store current thread state (in a thread or taskdata structure)
534   // before setting work_state, otherwise wrong state is set after end of task
535 
536   KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
537 
538   return;
539 }
540 
541 #if OMPT_SUPPORT
542 //------------------------------------------------------------------------------
543 // __ompt_task_init:
544 //   Initialize OMPT fields maintained by a task. This will only be called after
545 //   ompt_start_tool, so we already know whether ompt is enabled or not.
546 
547 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
548   // The calls to __ompt_task_init already have the ompt_enabled condition.
549   task->ompt_task_info.task_data.value = 0;
550   task->ompt_task_info.frame.exit_frame = ompt_data_none;
551   task->ompt_task_info.frame.enter_frame = ompt_data_none;
552   task->ompt_task_info.frame.exit_frame_flags = ompt_frame_runtime | ompt_frame_framepointer;
553   task->ompt_task_info.frame.enter_frame_flags = ompt_frame_runtime | ompt_frame_framepointer;
554   task->ompt_task_info.ndeps = 0;
555   task->ompt_task_info.deps = NULL;
556 }
557 
558 // __ompt_task_start:
559 //   Build and trigger task-begin event
560 static inline void __ompt_task_start(kmp_task_t *task,
561                                      kmp_taskdata_t *current_task,
562                                      kmp_int32 gtid) {
563   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
564   ompt_task_status_t status = ompt_task_switch;
565   if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
566     status = ompt_task_yield;
567     __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
568   }
569   /* let OMPT know that we're about to run this task */
570   if (ompt_enabled.ompt_callback_task_schedule) {
571     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
572         &(current_task->ompt_task_info.task_data), status,
573         &(taskdata->ompt_task_info.task_data));
574   }
575   taskdata->ompt_task_info.scheduling_parent = current_task;
576 }
577 
578 // __ompt_task_finish:
579 //   Build and trigger final task-schedule event
580 static inline void
581 __ompt_task_finish(kmp_task_t *task, kmp_taskdata_t *resumed_task,
582                    ompt_task_status_t status = ompt_task_complete) {
583   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
584   if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
585       taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
586     status = ompt_task_cancel;
587   }
588 
589   /* let OMPT know that we're returning to the callee task */
590   if (ompt_enabled.ompt_callback_task_schedule) {
591     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
592         &(taskdata->ompt_task_info.task_data), status,
593         &((resumed_task ? resumed_task
594                         : (taskdata->ompt_task_info.scheduling_parent
595                                ? taskdata->ompt_task_info.scheduling_parent
596                                : taskdata->td_parent))
597               ->ompt_task_info.task_data));
598   }
599 }
600 #endif
601 
602 template <bool ompt>
603 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
604                                                kmp_task_t *task,
605                                                void *frame_address,
606                                                void *return_address) {
607   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
608   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
609 
610   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
611                 "current_task=%p\n",
612                 gtid, loc_ref, taskdata, current_task));
613 
614   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
615     // untied task needs to increment counter so that the task structure is not
616     // freed prematurely
617     kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
618     KMP_DEBUG_USE_VAR(counter);
619     KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
620                   "incremented for task %p\n",
621                   gtid, counter, taskdata));
622   }
623 
624   taskdata->td_flags.task_serial =
625       1; // Execute this task immediately, not deferred.
626   __kmp_task_start(gtid, task, current_task);
627 
628 #if OMPT_SUPPORT
629   if (ompt) {
630     if (current_task->ompt_task_info.frame.enter_frame.ptr == NULL) {
631       current_task->ompt_task_info.frame.enter_frame.ptr =
632           taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address;
633       current_task->ompt_task_info.frame.enter_frame_flags =
634           taskdata->ompt_task_info.frame.exit_frame_flags = ompt_frame_application | ompt_frame_framepointer;
635     }
636     if (ompt_enabled.ompt_callback_task_create) {
637       ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
638       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
639           &(parent_info->task_data), &(parent_info->frame),
640           &(taskdata->ompt_task_info.task_data),
641           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
642           return_address);
643     }
644     __ompt_task_start(task, current_task, gtid);
645   }
646 #endif // OMPT_SUPPORT
647 
648   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
649                 loc_ref, taskdata));
650 }
651 
652 #if OMPT_SUPPORT
653 OMPT_NOINLINE
654 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
655                                            kmp_task_t *task,
656                                            void *frame_address,
657                                            void *return_address) {
658   __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
659                                            return_address);
660 }
661 #endif // OMPT_SUPPORT
662 
663 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
664 // execution
665 //
666 // loc_ref: source location information; points to beginning of task block.
667 // gtid: global thread number.
668 // task: task thunk for the started task.
669 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
670                                kmp_task_t *task) {
671 #if OMPT_SUPPORT
672   if (UNLIKELY(ompt_enabled.enabled)) {
673     OMPT_STORE_RETURN_ADDRESS(gtid);
674     __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
675                                    OMPT_GET_FRAME_ADDRESS(1),
676                                    OMPT_LOAD_RETURN_ADDRESS(gtid));
677     return;
678   }
679 #endif
680   __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
681 }
682 
683 #ifdef TASK_UNUSED
684 // __kmpc_omp_task_begin: report that a given task has started execution
685 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
686 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
687   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
688 
689   KA_TRACE(
690       10,
691       ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
692        gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
693 
694   __kmp_task_start(gtid, task, current_task);
695 
696   KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
697                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
698   return;
699 }
700 #endif // TASK_UNUSED
701 
702 // __kmp_free_task: free the current task space and the space for shareds
703 //
704 // gtid: Global thread ID of calling thread
705 // taskdata: task to free
706 // thread: thread data structure of caller
707 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
708                             kmp_info_t *thread) {
709   KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
710                 taskdata));
711 
712   // Check to make sure all flags and counters have the correct values
713   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
714   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
715   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
716   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
717   KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
718                    taskdata->td_flags.task_serial == 1);
719   KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
720 
721   taskdata->td_flags.freed = 1;
722   ANNOTATE_HAPPENS_BEFORE(taskdata);
723 // deallocate the taskdata and shared variable blocks associated with this task
724 #if USE_FAST_MEMORY
725   __kmp_fast_free(thread, taskdata);
726 #else /* ! USE_FAST_MEMORY */
727   __kmp_thread_free(thread, taskdata);
728 #endif
729 
730   KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
731 }
732 
733 // __kmp_free_task_and_ancestors: free the current task and ancestors without
734 // children
735 //
736 // gtid: Global thread ID of calling thread
737 // taskdata: task to free
738 // thread: thread data structure of caller
739 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
740                                           kmp_taskdata_t *taskdata,
741                                           kmp_info_t *thread) {
742   // Proxy tasks must always be allowed to free their parents
743   // because they can be run in background even in serial mode.
744   kmp_int32 team_serial =
745       (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
746       !taskdata->td_flags.proxy;
747   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
748 
749   kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
750   KMP_DEBUG_ASSERT(children >= 0);
751 
752   // Now, go up the ancestor tree to see if any ancestors can now be freed.
753   while (children == 0) {
754     kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
755 
756     KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
757                   "and freeing itself\n",
758                   gtid, taskdata));
759 
760     // --- Deallocate my ancestor task ---
761     __kmp_free_task(gtid, taskdata, thread);
762 
763     taskdata = parent_taskdata;
764 
765     if (team_serial)
766       return;
767     // Stop checking ancestors at implicit task instead of walking up ancestor
768     // tree to avoid premature deallocation of ancestors.
769     if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
770       if (taskdata->td_dephash) { // do we need to cleanup dephash?
771         int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
772         kmp_tasking_flags_t flags_old = taskdata->td_flags;
773         if (children == 0 && flags_old.complete == 1) {
774           kmp_tasking_flags_t flags_new = flags_old;
775           flags_new.complete = 0;
776           if (KMP_COMPARE_AND_STORE_ACQ32(
777                   RCAST(kmp_int32 *, &taskdata->td_flags),
778                   *RCAST(kmp_int32 *, &flags_old),
779                   *RCAST(kmp_int32 *, &flags_new))) {
780             KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans "
781                            "dephash of implicit task %p\n",
782                            gtid, taskdata));
783             // cleanup dephash of finished implicit task
784             __kmp_dephash_free_entries(thread, taskdata->td_dephash);
785           }
786         }
787       }
788       return;
789     }
790     // Predecrement simulated by "- 1" calculation
791     children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
792     KMP_DEBUG_ASSERT(children >= 0);
793   }
794 
795   KA_TRACE(
796       20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
797            "not freeing it yet\n",
798            gtid, taskdata, children));
799 }
800 
801 // __kmp_task_finish: bookkeeping to do when a task finishes execution
802 //
803 // gtid: global thread ID for calling thread
804 // task: task to be finished
805 // resumed_task: task to be resumed.  (may be NULL if task is serialized)
806 template <bool ompt>
807 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
808                               kmp_taskdata_t *resumed_task) {
809   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
810   kmp_info_t *thread = __kmp_threads[gtid];
811   kmp_task_team_t *task_team =
812       thread->th.th_task_team; // might be NULL for serial teams...
813   kmp_int32 children = 0;
814 
815   KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
816                 "task %p\n",
817                 gtid, taskdata, resumed_task));
818 
819   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
820 
821 // Pop task from stack if tied
822 #ifdef BUILD_TIED_TASK_STACK
823   if (taskdata->td_flags.tiedness == TASK_TIED) {
824     __kmp_pop_task_stack(gtid, thread, taskdata);
825   }
826 #endif /* BUILD_TIED_TASK_STACK */
827 
828   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
829     // untied task needs to check the counter so that the task structure is not
830     // freed prematurely
831     kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
832     KA_TRACE(
833         20,
834         ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
835          gtid, counter, taskdata));
836     if (counter > 0) {
837       // untied task is not done, to be continued possibly by other thread, do
838       // not free it now
839       if (resumed_task == NULL) {
840         KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
841         resumed_task = taskdata->td_parent; // In a serialized task, the resumed
842         // task is the parent
843       }
844       thread->th.th_current_task = resumed_task; // restore current_task
845       resumed_task->td_flags.executing = 1; // resume previous task
846       KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
847                     "resuming task %p\n",
848                     gtid, taskdata, resumed_task));
849       return;
850     }
851   }
852 #if OMPT_SUPPORT
853   if (ompt)
854     __ompt_task_finish(task, resumed_task);
855 #endif
856 
857   // Check mutexinoutset dependencies, release locks
858   kmp_depnode_t *node = taskdata->td_depnode;
859   if (node && (node->dn.mtx_num_locks < 0)) {
860     // negative num_locks means all locks were acquired
861     node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
862     for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
863       KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
864       __kmp_release_lock(node->dn.mtx_locks[i], gtid);
865     }
866   }
867 
868   // bookkeeping for resuming task:
869   // GEH - note tasking_ser => task_serial
870   KMP_DEBUG_ASSERT(
871       (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
872       taskdata->td_flags.task_serial);
873   if (taskdata->td_flags.task_serial) {
874     if (resumed_task == NULL) {
875       resumed_task = taskdata->td_parent; // In a serialized task, the resumed
876       // task is the parent
877     }
878   } else {
879     KMP_DEBUG_ASSERT(resumed_task !=
880                      NULL); // verify that resumed task is passed as argument
881   }
882 
883   /* If the tasks' destructor thunk flag has been set, we need to invoke the
884      destructor thunk that has been generated by the compiler. The code is
885      placed here, since at this point other tasks might have been released
886      hence overlapping the destructor invocations with some other work in the
887      released tasks.  The OpenMP spec is not specific on when the destructors
888      are invoked, so we should be free to choose. */
889   if (taskdata->td_flags.destructors_thunk) {
890     kmp_routine_entry_t destr_thunk = task->data1.destructors;
891     KMP_ASSERT(destr_thunk);
892     destr_thunk(gtid, task);
893   }
894 
895   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
896   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
897   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
898 
899   bool detach = false;
900   if (taskdata->td_flags.detachable == TASK_DETACHABLE) {
901     if (taskdata->td_allow_completion_event.type ==
902         KMP_EVENT_ALLOW_COMPLETION) {
903       // event hasn't been fulfilled yet. Try to detach task.
904       __kmp_acquire_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
905       if (taskdata->td_allow_completion_event.type ==
906           KMP_EVENT_ALLOW_COMPLETION) {
907         // task finished execution
908         KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
909         taskdata->td_flags.executing = 0; // suspend the finishing task
910         // no access to taskdata after this point!
911         // __kmp_fulfill_event might free taskdata at any time from now
912         taskdata->td_flags.proxy = TASK_PROXY; // proxify!
913         detach = true;
914       }
915       __kmp_release_tas_lock(&taskdata->td_allow_completion_event.lock, gtid);
916     }
917   }
918 
919   if (!detach) {
920     taskdata->td_flags.complete = 1; // mark the task as completed
921 
922     // Only need to keep track of count if team parallel and tasking not
923     // serialized, or task is detachable and event has already been fulfilled
924     if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) ||
925         taskdata->td_flags.detachable == TASK_DETACHABLE) {
926       // Predecrement simulated by "- 1" calculation
927       children =
928           KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
929       KMP_DEBUG_ASSERT(children >= 0);
930       if (taskdata->td_taskgroup)
931         KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
932       __kmp_release_deps(gtid, taskdata);
933     } else if (task_team && task_team->tt.tt_found_proxy_tasks) {
934       // if we found proxy tasks there could exist a dependency chain
935       // with the proxy task as origin
936       __kmp_release_deps(gtid, taskdata);
937     }
938     // td_flags.executing must be marked as 0 after __kmp_release_deps has been
939     // called. Othertwise, if a task is executed immediately from the
940     // release_deps code, the flag will be reset to 1 again by this same
941     // function
942     KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
943     taskdata->td_flags.executing = 0; // suspend the finishing task
944   }
945 
946 
947   KA_TRACE(
948       20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
949            gtid, taskdata, children));
950 
951   // Free this task and then ancestor tasks if they have no children.
952   // Restore th_current_task first as suggested by John:
953   // johnmc: if an asynchronous inquiry peers into the runtime system
954   // it doesn't see the freed task as the current task.
955   thread->th.th_current_task = resumed_task;
956   if (!detach)
957     __kmp_free_task_and_ancestors(gtid, taskdata, thread);
958 
959   // TODO: GEH - make sure root team implicit task is initialized properly.
960   // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
961   resumed_task->td_flags.executing = 1; // resume previous task
962 
963   KA_TRACE(
964       10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
965            gtid, taskdata, resumed_task));
966 
967   return;
968 }
969 
970 template <bool ompt>
971 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
972                                                   kmp_int32 gtid,
973                                                   kmp_task_t *task) {
974   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
975                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
976   // this routine will provide task to resume
977   __kmp_task_finish<ompt>(gtid, task, NULL);
978 
979   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
980                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
981 
982 #if OMPT_SUPPORT
983   if (ompt) {
984     ompt_frame_t *ompt_frame;
985     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
986     ompt_frame->enter_frame = ompt_data_none;
987     ompt_frame->enter_frame_flags = ompt_frame_runtime | ompt_frame_framepointer;
988   }
989 #endif
990 
991   return;
992 }
993 
994 #if OMPT_SUPPORT
995 OMPT_NOINLINE
996 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
997                                        kmp_task_t *task) {
998   __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
999 }
1000 #endif // OMPT_SUPPORT
1001 
1002 // __kmpc_omp_task_complete_if0: report that a task has completed execution
1003 //
1004 // loc_ref: source location information; points to end of task block.
1005 // gtid: global thread number.
1006 // task: task thunk for the completed task.
1007 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
1008                                   kmp_task_t *task) {
1009 #if OMPT_SUPPORT
1010   if (UNLIKELY(ompt_enabled.enabled)) {
1011     __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
1012     return;
1013   }
1014 #endif
1015   __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
1016 }
1017 
1018 #ifdef TASK_UNUSED
1019 // __kmpc_omp_task_complete: report that a task has completed execution
1020 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
1021 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
1022                               kmp_task_t *task) {
1023   KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
1024                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1025 
1026   __kmp_task_finish<false>(gtid, task,
1027                            NULL); // Not sure how to find task to resume
1028 
1029   KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
1030                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1031   return;
1032 }
1033 #endif // TASK_UNUSED
1034 
1035 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
1036 // task for a given thread
1037 //
1038 // loc_ref:  reference to source location of parallel region
1039 // this_thr:  thread data structure corresponding to implicit task
1040 // team: team for this_thr
1041 // tid: thread id of given thread within team
1042 // set_curr_task: TRUE if need to push current task to thread
1043 // NOTE: Routine does not set up the implicit task ICVS.  This is assumed to
1044 // have already been done elsewhere.
1045 // TODO: Get better loc_ref.  Value passed in may be NULL
1046 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
1047                               kmp_team_t *team, int tid, int set_curr_task) {
1048   kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
1049 
1050   KF_TRACE(
1051       10,
1052       ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
1053        tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
1054 
1055   task->td_task_id = KMP_GEN_TASK_ID();
1056   task->td_team = team;
1057   //    task->td_parent   = NULL;  // fix for CQ230101 (broken parent task info
1058   //    in debugger)
1059   task->td_ident = loc_ref;
1060   task->td_taskwait_ident = NULL;
1061   task->td_taskwait_counter = 0;
1062   task->td_taskwait_thread = 0;
1063 
1064   task->td_flags.tiedness = TASK_TIED;
1065   task->td_flags.tasktype = TASK_IMPLICIT;
1066   task->td_flags.proxy = TASK_FULL;
1067 
1068   // All implicit tasks are executed immediately, not deferred
1069   task->td_flags.task_serial = 1;
1070   task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1071   task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1072 
1073   task->td_flags.started = 1;
1074   task->td_flags.executing = 1;
1075   task->td_flags.complete = 0;
1076   task->td_flags.freed = 0;
1077 
1078   task->td_depnode = NULL;
1079   task->td_last_tied = task;
1080   task->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1081 
1082   if (set_curr_task) { // only do this init first time thread is created
1083     KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
1084     // Not used: don't need to deallocate implicit task
1085     KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
1086     task->td_taskgroup = NULL; // An implicit task does not have taskgroup
1087     task->td_dephash = NULL;
1088     __kmp_push_current_task_to_thread(this_thr, team, tid);
1089   } else {
1090     KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
1091     KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
1092   }
1093 
1094 #if OMPT_SUPPORT
1095   if (UNLIKELY(ompt_enabled.enabled))
1096     __ompt_task_init(task, tid);
1097 #endif
1098 
1099   KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
1100                 team, task));
1101 }
1102 
1103 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
1104 // at the end of parallel regions. Some resources are kept for reuse in the next
1105 // parallel region.
1106 //
1107 // thread:  thread data structure corresponding to implicit task
1108 void __kmp_finish_implicit_task(kmp_info_t *thread) {
1109   kmp_taskdata_t *task = thread->th.th_current_task;
1110   if (task->td_dephash) {
1111     int children;
1112     task->td_flags.complete = 1;
1113     children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks);
1114     kmp_tasking_flags_t flags_old = task->td_flags;
1115     if (children == 0 && flags_old.complete == 1) {
1116       kmp_tasking_flags_t flags_new = flags_old;
1117       flags_new.complete = 0;
1118       if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags),
1119                                       *RCAST(kmp_int32 *, &flags_old),
1120                                       *RCAST(kmp_int32 *, &flags_new))) {
1121         KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans "
1122                        "dephash of implicit task %p\n",
1123                        thread->th.th_info.ds.ds_gtid, task));
1124         __kmp_dephash_free_entries(thread, task->td_dephash);
1125       }
1126     }
1127   }
1128 }
1129 
1130 // __kmp_free_implicit_task: Release resources associated to implicit tasks
1131 // when these are destroyed regions
1132 //
1133 // thread:  thread data structure corresponding to implicit task
1134 void __kmp_free_implicit_task(kmp_info_t *thread) {
1135   kmp_taskdata_t *task = thread->th.th_current_task;
1136   if (task && task->td_dephash) {
1137     __kmp_dephash_free(thread, task->td_dephash);
1138     task->td_dephash = NULL;
1139   }
1140 }
1141 
1142 // Round up a size to a power of two specified by val: Used to insert padding
1143 // between structures co-allocated using a single malloc() call
1144 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
1145   if (size & (val - 1)) {
1146     size &= ~(val - 1);
1147     if (size <= KMP_SIZE_T_MAX - val) {
1148       size += val; // Round up if there is no overflow.
1149     }
1150   }
1151   return size;
1152 } // __kmp_round_up_to_va
1153 
1154 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1155 //
1156 // loc_ref: source location information
1157 // gtid: global thread number.
1158 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1159 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1160 // sizeof_kmp_task_t:  Size in bytes of kmp_task_t data structure including
1161 // private vars accessed in task.
1162 // sizeof_shareds:  Size in bytes of array of pointers to shared vars accessed
1163 // in task.
1164 // task_entry: Pointer to task code entry point generated by compiler.
1165 // returns: a pointer to the allocated kmp_task_t structure (task).
1166 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1167                              kmp_tasking_flags_t *flags,
1168                              size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1169                              kmp_routine_entry_t task_entry) {
1170   kmp_task_t *task;
1171   kmp_taskdata_t *taskdata;
1172   kmp_info_t *thread = __kmp_threads[gtid];
1173   kmp_team_t *team = thread->th.th_team;
1174   kmp_taskdata_t *parent_task = thread->th.th_current_task;
1175   size_t shareds_offset;
1176 
1177   if (!TCR_4(__kmp_init_middle))
1178     __kmp_middle_initialize();
1179 
1180   KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1181                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1182                 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1183                 sizeof_shareds, task_entry));
1184 
1185   if (parent_task->td_flags.final) {
1186     if (flags->merged_if0) {
1187     }
1188     flags->final = 1;
1189   }
1190   if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1191     // Untied task encountered causes the TSC algorithm to check entire deque of
1192     // the victim thread. If no untied task encountered, then checking the head
1193     // of the deque should be enough.
1194     KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1195   }
1196 
1197   // Detachable tasks are not proxy tasks yet but could be in the future. Doing
1198   // the tasking setup
1199   // when that happens is too late.
1200   if (flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE) {
1201     if (flags->proxy == TASK_PROXY) {
1202       flags->tiedness = TASK_UNTIED;
1203       flags->merged_if0 = 1;
1204     }
1205     /* are we running in a sequential parallel or tskm_immediate_exec... we need
1206        tasking support enabled */
1207     if ((thread->th.th_task_team) == NULL) {
1208       /* This should only happen if the team is serialized
1209           setup a task team and propagate it to the thread */
1210       KMP_DEBUG_ASSERT(team->t.t_serialized);
1211       KA_TRACE(30,
1212                ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1213                 gtid));
1214       __kmp_task_team_setup(
1215           thread, team,
1216           1); // 1 indicates setup the current team regardless of nthreads
1217       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1218     }
1219     kmp_task_team_t *task_team = thread->th.th_task_team;
1220 
1221     /* tasking must be enabled now as the task might not be pushed */
1222     if (!KMP_TASKING_ENABLED(task_team)) {
1223       KA_TRACE(
1224           30,
1225           ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1226       __kmp_enable_tasking(task_team, thread);
1227       kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1228       kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1229       // No lock needed since only owner can allocate
1230       if (thread_data->td.td_deque == NULL) {
1231         __kmp_alloc_task_deque(thread, thread_data);
1232       }
1233     }
1234 
1235     if (task_team->tt.tt_found_proxy_tasks == FALSE)
1236       TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1237   }
1238 
1239   // Calculate shared structure offset including padding after kmp_task_t struct
1240   // to align pointers in shared struct
1241   shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1242   shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1243 
1244   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1245   KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1246                 shareds_offset));
1247   KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1248                 sizeof_shareds));
1249 
1250 // Avoid double allocation here by combining shareds with taskdata
1251 #if USE_FAST_MEMORY
1252   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1253                                                                sizeof_shareds);
1254 #else /* ! USE_FAST_MEMORY */
1255   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1256                                                                sizeof_shareds);
1257 #endif /* USE_FAST_MEMORY */
1258   ANNOTATE_HAPPENS_AFTER(taskdata);
1259 
1260   task = KMP_TASKDATA_TO_TASK(taskdata);
1261 
1262 // Make sure task & taskdata are aligned appropriately
1263 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
1264   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1265   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1266 #else
1267   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1268   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1269 #endif
1270   if (sizeof_shareds > 0) {
1271     // Avoid double allocation here by combining shareds with taskdata
1272     task->shareds = &((char *)taskdata)[shareds_offset];
1273     // Make sure shareds struct is aligned to pointer size
1274     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1275                      0);
1276   } else {
1277     task->shareds = NULL;
1278   }
1279   task->routine = task_entry;
1280   task->part_id = 0; // AC: Always start with 0 part id
1281 
1282   taskdata->td_task_id = KMP_GEN_TASK_ID();
1283   taskdata->td_team = team;
1284   taskdata->td_alloc_thread = thread;
1285   taskdata->td_parent = parent_task;
1286   taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1287   KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1288   taskdata->td_ident = loc_ref;
1289   taskdata->td_taskwait_ident = NULL;
1290   taskdata->td_taskwait_counter = 0;
1291   taskdata->td_taskwait_thread = 0;
1292   KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1293   // avoid copying icvs for proxy tasks
1294   if (flags->proxy == TASK_FULL)
1295     copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1296 
1297   taskdata->td_flags.tiedness = flags->tiedness;
1298   taskdata->td_flags.final = flags->final;
1299   taskdata->td_flags.merged_if0 = flags->merged_if0;
1300   taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
1301   taskdata->td_flags.proxy = flags->proxy;
1302   taskdata->td_flags.detachable = flags->detachable;
1303   taskdata->td_task_team = thread->th.th_task_team;
1304   taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1305   taskdata->td_flags.tasktype = TASK_EXPLICIT;
1306 
1307   // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1308   taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1309 
1310   // GEH - TODO: fix this to copy parent task's value of team_serial flag
1311   taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1312 
1313   // GEH - Note we serialize the task if the team is serialized to make sure
1314   // implicit parallel region tasks are not left until program termination to
1315   // execute. Also, it helps locality to execute immediately.
1316 
1317   taskdata->td_flags.task_serial =
1318       (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1319        taskdata->td_flags.tasking_ser);
1320 
1321   taskdata->td_flags.started = 0;
1322   taskdata->td_flags.executing = 0;
1323   taskdata->td_flags.complete = 0;
1324   taskdata->td_flags.freed = 0;
1325 
1326   taskdata->td_flags.native = flags->native;
1327 
1328   KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1329   // start at one because counts current task and children
1330   KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1331   taskdata->td_taskgroup =
1332       parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1333   taskdata->td_dephash = NULL;
1334   taskdata->td_depnode = NULL;
1335   if (flags->tiedness == TASK_UNTIED)
1336     taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1337   else
1338     taskdata->td_last_tied = taskdata;
1339   taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1340 #if OMPT_SUPPORT
1341   if (UNLIKELY(ompt_enabled.enabled))
1342     __ompt_task_init(taskdata, gtid);
1343 #endif
1344 // Only need to keep track of child task counts if team parallel and tasking not
1345 // serialized or if it is a proxy or detachable task
1346   if (flags->proxy == TASK_PROXY ||
1347       flags->detachable == TASK_DETACHABLE ||
1348       !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1349   {
1350     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1351     if (parent_task->td_taskgroup)
1352       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1353     // Only need to keep track of allocated child tasks for explicit tasks since
1354     // implicit not deallocated
1355     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1356       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1357     }
1358   }
1359 
1360   KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1361                 gtid, taskdata, taskdata->td_parent));
1362   ANNOTATE_HAPPENS_BEFORE(task);
1363 
1364   return task;
1365 }
1366 
1367 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1368                                   kmp_int32 flags, size_t sizeof_kmp_task_t,
1369                                   size_t sizeof_shareds,
1370                                   kmp_routine_entry_t task_entry) {
1371   kmp_task_t *retval;
1372   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1373 
1374   input_flags->native = FALSE;
1375 // __kmp_task_alloc() sets up all other runtime flags
1376 
1377   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) "
1378                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1379                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1380                 input_flags->proxy ? "proxy" : "",
1381                 input_flags->detachable ? "detachable" : "", sizeof_kmp_task_t,
1382                 sizeof_shareds, task_entry));
1383 
1384   retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1385                             sizeof_shareds, task_entry);
1386 
1387   KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1388 
1389   return retval;
1390 }
1391 
1392 kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1393                                          kmp_int32 flags,
1394                                          size_t sizeof_kmp_task_t,
1395                                          size_t sizeof_shareds,
1396                                          kmp_routine_entry_t task_entry,
1397                                          kmp_int64 device_id) {
1398   return __kmpc_omp_task_alloc(loc_ref, gtid, flags, sizeof_kmp_task_t,
1399                                sizeof_shareds, task_entry);
1400 }
1401 
1402 /*!
1403 @ingroup TASKING
1404 @param loc_ref location of the original task directive
1405 @param gtid Global Thread ID of encountering thread
1406 @param new_task task thunk allocated by __kmpc_omp_task_alloc() for the ''new
1407 task''
1408 @param naffins Number of affinity items
1409 @param affin_list List of affinity items
1410 @return Returns non-zero if registering affinity information was not successful.
1411  Returns 0 if registration was successful
1412 This entry registers the affinity information attached to a task with the task
1413 thunk structure kmp_taskdata_t.
1414 */
1415 kmp_int32
1416 __kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid,
1417                                   kmp_task_t *new_task, kmp_int32 naffins,
1418                                   kmp_task_affinity_info_t *affin_list) {
1419   return 0;
1420 }
1421 
1422 //  __kmp_invoke_task: invoke the specified task
1423 //
1424 // gtid: global thread ID of caller
1425 // task: the task to invoke
1426 // current_task: the task to resume after task invocation
1427 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1428                               kmp_taskdata_t *current_task) {
1429   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1430   kmp_info_t *thread;
1431   int discard = 0 /* false */;
1432   KA_TRACE(
1433       30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1434            gtid, taskdata, current_task));
1435   KMP_DEBUG_ASSERT(task);
1436   if (taskdata->td_flags.proxy == TASK_PROXY &&
1437       taskdata->td_flags.complete == 1) {
1438     // This is a proxy task that was already completed but it needs to run
1439     // its bottom-half finish
1440     KA_TRACE(
1441         30,
1442         ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1443          gtid, taskdata));
1444 
1445     __kmp_bottom_half_finish_proxy(gtid, task);
1446 
1447     KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1448                   "proxy task %p, resuming task %p\n",
1449                   gtid, taskdata, current_task));
1450 
1451     return;
1452   }
1453 
1454 #if OMPT_SUPPORT
1455   // For untied tasks, the first task executed only calls __kmpc_omp_task and
1456   // does not execute code.
1457   ompt_thread_info_t oldInfo;
1458   if (UNLIKELY(ompt_enabled.enabled)) {
1459     // Store the threads states and restore them after the task
1460     thread = __kmp_threads[gtid];
1461     oldInfo = thread->th.ompt_thread_info;
1462     thread->th.ompt_thread_info.wait_id = 0;
1463     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1464                                             ? ompt_state_work_serial
1465                                             : ompt_state_work_parallel;
1466     taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1467   }
1468 #endif
1469 
1470   // Proxy tasks are not handled by the runtime
1471   if (taskdata->td_flags.proxy != TASK_PROXY) {
1472     ANNOTATE_HAPPENS_AFTER(task);
1473     __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1474   }
1475 
1476   // TODO: cancel tasks if the parallel region has also been cancelled
1477   // TODO: check if this sequence can be hoisted above __kmp_task_start
1478   // if cancellation has been enabled for this run ...
1479   if (__kmp_omp_cancellation) {
1480     thread = __kmp_threads[gtid];
1481     kmp_team_t *this_team = thread->th.th_team;
1482     kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1483     if ((taskgroup && taskgroup->cancel_request) ||
1484         (this_team->t.t_cancel_request == cancel_parallel)) {
1485 #if OMPT_SUPPORT && OMPT_OPTIONAL
1486       ompt_data_t *task_data;
1487       if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1488         __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1489         ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1490             task_data,
1491             ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1492                                                       : ompt_cancel_parallel) |
1493                 ompt_cancel_discarded_task,
1494             NULL);
1495       }
1496 #endif
1497       KMP_COUNT_BLOCK(TASK_cancelled);
1498       // this task belongs to a task group and we need to cancel it
1499       discard = 1 /* true */;
1500     }
1501   }
1502 
1503   // Invoke the task routine and pass in relevant data.
1504   // Thunks generated by gcc take a different argument list.
1505   if (!discard) {
1506     if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1507       taskdata->td_last_tied = current_task->td_last_tied;
1508       KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1509     }
1510 #if KMP_STATS_ENABLED
1511     KMP_COUNT_BLOCK(TASK_executed);
1512     switch (KMP_GET_THREAD_STATE()) {
1513     case FORK_JOIN_BARRIER:
1514       KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1515       break;
1516     case PLAIN_BARRIER:
1517       KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1518       break;
1519     case TASKYIELD:
1520       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1521       break;
1522     case TASKWAIT:
1523       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1524       break;
1525     case TASKGROUP:
1526       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1527       break;
1528     default:
1529       KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1530       break;
1531     }
1532 #endif // KMP_STATS_ENABLED
1533 
1534 // OMPT task begin
1535 #if OMPT_SUPPORT
1536     if (UNLIKELY(ompt_enabled.enabled))
1537       __ompt_task_start(task, current_task, gtid);
1538 #endif
1539 
1540 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1541     kmp_uint64 cur_time;
1542     kmp_int32 kmp_itt_count_task =
1543         __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1544         current_task->td_flags.tasktype == TASK_IMPLICIT;
1545     if (kmp_itt_count_task) {
1546       thread = __kmp_threads[gtid];
1547       // Time outer level explicit task on barrier for adjusting imbalance time
1548       if (thread->th.th_bar_arrive_time)
1549         cur_time = __itt_get_timestamp();
1550       else
1551         kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1552     }
1553 #endif
1554 
1555 #ifdef KMP_GOMP_COMPAT
1556     if (taskdata->td_flags.native) {
1557       ((void (*)(void *))(*(task->routine)))(task->shareds);
1558     } else
1559 #endif /* KMP_GOMP_COMPAT */
1560     {
1561       (*(task->routine))(gtid, task);
1562     }
1563     KMP_POP_PARTITIONED_TIMER();
1564 
1565 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1566     if (kmp_itt_count_task) {
1567       // Barrier imbalance - adjust arrive time with the task duration
1568       thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1569     }
1570 #endif
1571 
1572   }
1573 
1574 
1575   // Proxy tasks are not handled by the runtime
1576   if (taskdata->td_flags.proxy != TASK_PROXY) {
1577     ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
1578 #if OMPT_SUPPORT
1579     if (UNLIKELY(ompt_enabled.enabled)) {
1580       thread->th.ompt_thread_info = oldInfo;
1581       if (taskdata->td_flags.tiedness == TASK_TIED) {
1582         taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1583       }
1584       __kmp_task_finish<true>(gtid, task, current_task);
1585     } else
1586 #endif
1587       __kmp_task_finish<false>(gtid, task, current_task);
1588   }
1589 
1590   KA_TRACE(
1591       30,
1592       ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1593        gtid, taskdata, current_task));
1594   return;
1595 }
1596 
1597 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1598 //
1599 // loc_ref: location of original task pragma (ignored)
1600 // gtid: Global Thread ID of encountering thread
1601 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1602 // Returns:
1603 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1604 //    be resumed later.
1605 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1606 //    resumed later.
1607 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1608                                 kmp_task_t *new_task) {
1609   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1610 
1611   KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1612                 loc_ref, new_taskdata));
1613 
1614 #if OMPT_SUPPORT
1615   kmp_taskdata_t *parent;
1616   if (UNLIKELY(ompt_enabled.enabled)) {
1617     parent = new_taskdata->td_parent;
1618     if (ompt_enabled.ompt_callback_task_create) {
1619       ompt_data_t task_data = ompt_data_none;
1620       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1621           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1622           parent ? &(parent->ompt_task_info.frame) : NULL,
1623           &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1624           OMPT_GET_RETURN_ADDRESS(0));
1625     }
1626   }
1627 #endif
1628 
1629   /* Should we execute the new task or queue it? For now, let's just always try
1630      to queue it.  If the queue fills up, then we'll execute it.  */
1631 
1632   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1633   { // Execute this task immediately
1634     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1635     new_taskdata->td_flags.task_serial = 1;
1636     __kmp_invoke_task(gtid, new_task, current_task);
1637   }
1638 
1639   KA_TRACE(
1640       10,
1641       ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1642        "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1643        gtid, loc_ref, new_taskdata));
1644 
1645   ANNOTATE_HAPPENS_BEFORE(new_task);
1646 #if OMPT_SUPPORT
1647   if (UNLIKELY(ompt_enabled.enabled)) {
1648     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1649   }
1650 #endif
1651   return TASK_CURRENT_NOT_QUEUED;
1652 }
1653 
1654 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
1655 //
1656 // gtid: Global Thread ID of encountering thread
1657 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1658 // serialize_immediate: if TRUE then if the task is executed immediately its
1659 // execution will be serialized
1660 // Returns:
1661 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1662 //    be resumed later.
1663 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1664 //    resumed later.
1665 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1666                          bool serialize_immediate) {
1667   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1668 
1669   /* Should we execute the new task or queue it? For now, let's just always try
1670      to queue it.  If the queue fills up, then we'll execute it.  */
1671   if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1672       __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1673   { // Execute this task immediately
1674     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1675     if (serialize_immediate)
1676       new_taskdata->td_flags.task_serial = 1;
1677     __kmp_invoke_task(gtid, new_task, current_task);
1678   }
1679 
1680   ANNOTATE_HAPPENS_BEFORE(new_task);
1681   return TASK_CURRENT_NOT_QUEUED;
1682 }
1683 
1684 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1685 // non-thread-switchable task from the parent thread only!
1686 //
1687 // loc_ref: location of original task pragma (ignored)
1688 // gtid: Global Thread ID of encountering thread
1689 // new_task: non-thread-switchable task thunk allocated by
1690 // __kmp_omp_task_alloc()
1691 // Returns:
1692 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1693 //    be resumed later.
1694 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1695 //    resumed later.
1696 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1697                           kmp_task_t *new_task) {
1698   kmp_int32 res;
1699   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1700 
1701 #if KMP_DEBUG || OMPT_SUPPORT
1702   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1703 #endif
1704   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1705                 new_taskdata));
1706 
1707 #if OMPT_SUPPORT
1708   kmp_taskdata_t *parent = NULL;
1709   if (UNLIKELY(ompt_enabled.enabled)) {
1710     if (!new_taskdata->td_flags.started) {
1711       OMPT_STORE_RETURN_ADDRESS(gtid);
1712       parent = new_taskdata->td_parent;
1713       if (!parent->ompt_task_info.frame.enter_frame.ptr) {
1714         parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1715       }
1716       if (ompt_enabled.ompt_callback_task_create) {
1717         ompt_data_t task_data = ompt_data_none;
1718         ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1719             parent ? &(parent->ompt_task_info.task_data) : &task_data,
1720             parent ? &(parent->ompt_task_info.frame) : NULL,
1721             &(new_taskdata->ompt_task_info.task_data),
1722             ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1723             OMPT_LOAD_RETURN_ADDRESS(gtid));
1724       }
1725     } else {
1726       // We are scheduling the continuation of an UNTIED task.
1727       // Scheduling back to the parent task.
1728       __ompt_task_finish(new_task,
1729                          new_taskdata->ompt_task_info.scheduling_parent,
1730                          ompt_task_switch);
1731       new_taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1732     }
1733   }
1734 #endif
1735 
1736   res = __kmp_omp_task(gtid, new_task, true);
1737 
1738   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1739                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1740                 gtid, loc_ref, new_taskdata));
1741 #if OMPT_SUPPORT
1742   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1743     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1744   }
1745 #endif
1746   return res;
1747 }
1748 
1749 // __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
1750 // a taskloop task with the correct OMPT return address
1751 //
1752 // loc_ref: location of original task pragma (ignored)
1753 // gtid: Global Thread ID of encountering thread
1754 // new_task: non-thread-switchable task thunk allocated by
1755 // __kmp_omp_task_alloc()
1756 // codeptr_ra: return address for OMPT callback
1757 // Returns:
1758 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1759 //    be resumed later.
1760 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1761 //    resumed later.
1762 kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
1763                                   kmp_task_t *new_task, void *codeptr_ra) {
1764   kmp_int32 res;
1765   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1766 
1767 #if KMP_DEBUG || OMPT_SUPPORT
1768   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1769 #endif
1770   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1771                 new_taskdata));
1772 
1773 #if OMPT_SUPPORT
1774   kmp_taskdata_t *parent = NULL;
1775   if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
1776     parent = new_taskdata->td_parent;
1777     if (!parent->ompt_task_info.frame.enter_frame.ptr)
1778       parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1779     if (ompt_enabled.ompt_callback_task_create) {
1780       ompt_data_t task_data = ompt_data_none;
1781       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1782           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1783           parent ? &(parent->ompt_task_info.frame) : NULL,
1784           &(new_taskdata->ompt_task_info.task_data),
1785           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1786           codeptr_ra);
1787     }
1788   }
1789 #endif
1790 
1791   res = __kmp_omp_task(gtid, new_task, true);
1792 
1793   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1794                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1795                 gtid, loc_ref, new_taskdata));
1796 #if OMPT_SUPPORT
1797   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1798     parent->ompt_task_info.frame.enter_frame = ompt_data_none;
1799   }
1800 #endif
1801   return res;
1802 }
1803 
1804 template <bool ompt>
1805 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1806                                               void *frame_address,
1807                                               void *return_address) {
1808   kmp_taskdata_t *taskdata;
1809   kmp_info_t *thread;
1810   int thread_finished = FALSE;
1811   KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
1812 
1813   KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
1814 
1815   if (__kmp_tasking_mode != tskm_immediate_exec) {
1816     thread = __kmp_threads[gtid];
1817     taskdata = thread->th.th_current_task;
1818 
1819 #if OMPT_SUPPORT && OMPT_OPTIONAL
1820     ompt_data_t *my_task_data;
1821     ompt_data_t *my_parallel_data;
1822 
1823     if (ompt) {
1824       my_task_data = &(taskdata->ompt_task_info.task_data);
1825       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1826 
1827       taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address;
1828 
1829       if (ompt_enabled.ompt_callback_sync_region) {
1830         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1831             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1832             my_task_data, return_address);
1833       }
1834 
1835       if (ompt_enabled.ompt_callback_sync_region_wait) {
1836         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1837             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1838             my_task_data, return_address);
1839       }
1840     }
1841 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1842 
1843 // Debugger: The taskwait is active. Store location and thread encountered the
1844 // taskwait.
1845 #if USE_ITT_BUILD
1846 // Note: These values are used by ITT events as well.
1847 #endif /* USE_ITT_BUILD */
1848     taskdata->td_taskwait_counter += 1;
1849     taskdata->td_taskwait_ident = loc_ref;
1850     taskdata->td_taskwait_thread = gtid + 1;
1851 
1852 #if USE_ITT_BUILD
1853     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1854     if (itt_sync_obj != NULL)
1855       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1856 #endif /* USE_ITT_BUILD */
1857 
1858     bool must_wait =
1859         !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1860 
1861     must_wait = must_wait || (thread->th.th_task_team != NULL &&
1862                               thread->th.th_task_team->tt.tt_found_proxy_tasks);
1863     if (must_wait) {
1864       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
1865                              &(taskdata->td_incomplete_child_tasks)),
1866                        0U);
1867       while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
1868         flag.execute_tasks(thread, gtid, FALSE,
1869                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1870                            __kmp_task_stealing_constraint);
1871       }
1872     }
1873 #if USE_ITT_BUILD
1874     if (itt_sync_obj != NULL)
1875       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1876 #endif /* USE_ITT_BUILD */
1877 
1878     // Debugger:  The taskwait is completed. Location remains, but thread is
1879     // negated.
1880     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1881 
1882 #if OMPT_SUPPORT && OMPT_OPTIONAL
1883     if (ompt) {
1884       if (ompt_enabled.ompt_callback_sync_region_wait) {
1885         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1886             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1887             my_task_data, return_address);
1888       }
1889       if (ompt_enabled.ompt_callback_sync_region) {
1890         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1891             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1892             my_task_data, return_address);
1893       }
1894       taskdata->ompt_task_info.frame.enter_frame = ompt_data_none;
1895     }
1896 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1897 
1898     ANNOTATE_HAPPENS_AFTER(taskdata);
1899   }
1900 
1901   KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1902                 "returning TASK_CURRENT_NOT_QUEUED\n",
1903                 gtid, taskdata));
1904 
1905   return TASK_CURRENT_NOT_QUEUED;
1906 }
1907 
1908 #if OMPT_SUPPORT && OMPT_OPTIONAL
1909 OMPT_NOINLINE
1910 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1911                                           void *frame_address,
1912                                           void *return_address) {
1913   return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1914                                             return_address);
1915 }
1916 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1917 
1918 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1919 // complete
1920 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1921 #if OMPT_SUPPORT && OMPT_OPTIONAL
1922   if (UNLIKELY(ompt_enabled.enabled)) {
1923     OMPT_STORE_RETURN_ADDRESS(gtid);
1924     return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(0),
1925                                     OMPT_LOAD_RETURN_ADDRESS(gtid));
1926   }
1927 #endif
1928   return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
1929 }
1930 
1931 // __kmpc_omp_taskyield: switch to a different task
1932 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1933   kmp_taskdata_t *taskdata;
1934   kmp_info_t *thread;
1935   int thread_finished = FALSE;
1936 
1937   KMP_COUNT_BLOCK(OMP_TASKYIELD);
1938   KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
1939 
1940   KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
1941                 gtid, loc_ref, end_part));
1942 
1943   if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
1944     thread = __kmp_threads[gtid];
1945     taskdata = thread->th.th_current_task;
1946 // Should we model this as a task wait or not?
1947 // Debugger: The taskwait is active. Store location and thread encountered the
1948 // taskwait.
1949 #if USE_ITT_BUILD
1950 // Note: These values are used by ITT events as well.
1951 #endif /* USE_ITT_BUILD */
1952     taskdata->td_taskwait_counter += 1;
1953     taskdata->td_taskwait_ident = loc_ref;
1954     taskdata->td_taskwait_thread = gtid + 1;
1955 
1956 #if USE_ITT_BUILD
1957     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1958     if (itt_sync_obj != NULL)
1959       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1960 #endif /* USE_ITT_BUILD */
1961     if (!taskdata->td_flags.team_serial) {
1962       kmp_task_team_t *task_team = thread->th.th_task_team;
1963       if (task_team != NULL) {
1964         if (KMP_TASKING_ENABLED(task_team)) {
1965 #if OMPT_SUPPORT
1966           if (UNLIKELY(ompt_enabled.enabled))
1967             thread->th.ompt_thread_info.ompt_task_yielded = 1;
1968 #endif
1969           __kmp_execute_tasks_32(
1970               thread, gtid, NULL, FALSE,
1971               &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1972               __kmp_task_stealing_constraint);
1973 #if OMPT_SUPPORT
1974           if (UNLIKELY(ompt_enabled.enabled))
1975             thread->th.ompt_thread_info.ompt_task_yielded = 0;
1976 #endif
1977         }
1978       }
1979     }
1980 #if USE_ITT_BUILD
1981     if (itt_sync_obj != NULL)
1982       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1983 #endif /* USE_ITT_BUILD */
1984 
1985     // Debugger:  The taskwait is completed. Location remains, but thread is
1986     // negated.
1987     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1988   }
1989 
1990   KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
1991                 "returning TASK_CURRENT_NOT_QUEUED\n",
1992                 gtid, taskdata));
1993 
1994   return TASK_CURRENT_NOT_QUEUED;
1995 }
1996 
1997 // Task Reduction implementation
1998 //
1999 // Note: initial implementation didn't take into account the possibility
2000 // to specify omp_orig for initializer of the UDR (user defined reduction).
2001 // Corrected implementation takes into account the omp_orig object.
2002 // Compiler is free to use old implementation if omp_orig is not specified.
2003 
2004 /*!
2005 @ingroup BASIC_TYPES
2006 @{
2007 */
2008 
2009 /*!
2010 Flags for special info per task reduction item.
2011 */
2012 typedef struct kmp_taskred_flags {
2013   /*! 1 - use lazy alloc/init (e.g. big objects, #tasks < #threads) */
2014   unsigned lazy_priv : 1;
2015   unsigned reserved31 : 31;
2016 } kmp_taskred_flags_t;
2017 
2018 /*!
2019 Internal struct for reduction data item related info set up by compiler.
2020 */
2021 typedef struct kmp_task_red_input {
2022   void *reduce_shar; /**< shared between tasks item to reduce into */
2023   size_t reduce_size; /**< size of data item in bytes */
2024   // three compiler-generated routines (init, fini are optional):
2025   void *reduce_init; /**< data initialization routine (single parameter) */
2026   void *reduce_fini; /**< data finalization routine */
2027   void *reduce_comb; /**< data combiner routine */
2028   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2029 } kmp_task_red_input_t;
2030 
2031 /*!
2032 Internal struct for reduction data item related info saved by the library.
2033 */
2034 typedef struct kmp_taskred_data {
2035   void *reduce_shar; /**< shared between tasks item to reduce into */
2036   size_t reduce_size; /**< size of data item */
2037   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2038   void *reduce_priv; /**< array of thread specific items */
2039   void *reduce_pend; /**< end of private data for faster comparison op */
2040   // three compiler-generated routines (init, fini are optional):
2041   void *reduce_comb; /**< data combiner routine */
2042   void *reduce_init; /**< data initialization routine (two parameters) */
2043   void *reduce_fini; /**< data finalization routine */
2044   void *reduce_orig; /**< original item (can be used in UDR initializer) */
2045 } kmp_taskred_data_t;
2046 
2047 /*!
2048 Internal struct for reduction data item related info set up by compiler.
2049 
2050 New interface: added reduce_orig field to provide omp_orig for UDR initializer.
2051 */
2052 typedef struct kmp_taskred_input {
2053   void *reduce_shar; /**< shared between tasks item to reduce into */
2054   void *reduce_orig; /**< original reduction item used for initialization */
2055   size_t reduce_size; /**< size of data item */
2056   // three compiler-generated routines (init, fini are optional):
2057   void *reduce_init; /**< data initialization routine (two parameters) */
2058   void *reduce_fini; /**< data finalization routine */
2059   void *reduce_comb; /**< data combiner routine */
2060   kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2061 } kmp_taskred_input_t;
2062 /*!
2063 @}
2064 */
2065 
2066 template <typename T> void __kmp_assign_orig(kmp_taskred_data_t &item, T &src);
2067 template <>
2068 void __kmp_assign_orig<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2069                                              kmp_task_red_input_t &src) {
2070   item.reduce_orig = NULL;
2071 }
2072 template <>
2073 void __kmp_assign_orig<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2074                                             kmp_taskred_input_t &src) {
2075   if (src.reduce_orig != NULL) {
2076     item.reduce_orig = src.reduce_orig;
2077   } else {
2078     item.reduce_orig = src.reduce_shar;
2079   } // non-NULL reduce_orig means new interface used
2080 }
2081 
2082 template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, int j);
2083 template <>
2084 void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2085                                            int offset) {
2086   ((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset);
2087 }
2088 template <>
2089 void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2090                                           int offset) {
2091   ((void (*)(void *, void *))item.reduce_init)(
2092       (char *)(item.reduce_priv) + offset, item.reduce_orig);
2093 }
2094 
2095 template <typename T>
2096 void *__kmp_task_reduction_init(int gtid, int num, T *data) {
2097   kmp_info_t *thread = __kmp_threads[gtid];
2098   kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
2099   kmp_int32 nth = thread->th.th_team_nproc;
2100   kmp_taskred_data_t *arr;
2101 
2102   // check input data just in case
2103   KMP_ASSERT(tg != NULL);
2104   KMP_ASSERT(data != NULL);
2105   KMP_ASSERT(num > 0);
2106   if (nth == 1) {
2107     KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
2108                   gtid, tg));
2109     return (void *)tg;
2110   }
2111   KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
2112                 gtid, tg, num));
2113   arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2114       thread, num * sizeof(kmp_taskred_data_t));
2115   for (int i = 0; i < num; ++i) {
2116     size_t size = data[i].reduce_size - 1;
2117     // round the size up to cache line per thread-specific item
2118     size += CACHE_LINE - size % CACHE_LINE;
2119     KMP_ASSERT(data[i].reduce_comb != NULL); // combiner is mandatory
2120     arr[i].reduce_shar = data[i].reduce_shar;
2121     arr[i].reduce_size = size;
2122     arr[i].flags = data[i].flags;
2123     arr[i].reduce_comb = data[i].reduce_comb;
2124     arr[i].reduce_init = data[i].reduce_init;
2125     arr[i].reduce_fini = data[i].reduce_fini;
2126     __kmp_assign_orig<T>(arr[i], data[i]);
2127     if (!arr[i].flags.lazy_priv) {
2128       // allocate cache-line aligned block and fill it with zeros
2129       arr[i].reduce_priv = __kmp_allocate(nth * size);
2130       arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
2131       if (arr[i].reduce_init != NULL) {
2132         // initialize all thread-specific items
2133         for (int j = 0; j < nth; ++j) {
2134           __kmp_call_init<T>(arr[i], j * size);
2135         }
2136       }
2137     } else {
2138       // only allocate space for pointers now,
2139       // objects will be lazily allocated/initialized if/when requested
2140       // note that __kmp_allocate zeroes the allocated memory
2141       arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
2142     }
2143   }
2144   tg->reduce_data = (void *)arr;
2145   tg->reduce_num_data = num;
2146   return (void *)tg;
2147 }
2148 
2149 /*!
2150 @ingroup TASKING
2151 @param gtid      Global thread ID
2152 @param num       Number of data items to reduce
2153 @param data      Array of data for reduction
2154 @return The taskgroup identifier
2155 
2156 Initialize task reduction for the taskgroup.
2157 
2158 Note: this entry supposes the optional compiler-generated initializer routine
2159 has single parameter - pointer to object to be initialized. That means
2160 the reduction either does not use omp_orig object, or the omp_orig is accessible
2161 without help of the runtime library.
2162 */
2163 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
2164   return __kmp_task_reduction_init(gtid, num, (kmp_task_red_input_t *)data);
2165 }
2166 
2167 /*!
2168 @ingroup TASKING
2169 @param gtid      Global thread ID
2170 @param num       Number of data items to reduce
2171 @param data      Array of data for reduction
2172 @return The taskgroup identifier
2173 
2174 Initialize task reduction for the taskgroup.
2175 
2176 Note: this entry supposes the optional compiler-generated initializer routine
2177 has two parameters, pointer to object to be initialized and pointer to omp_orig
2178 */
2179 void *__kmpc_taskred_init(int gtid, int num, void *data) {
2180   return __kmp_task_reduction_init(gtid, num, (kmp_taskred_input_t *)data);
2181 }
2182 
2183 // Copy task reduction data (except for shared pointers).
2184 template <typename T>
2185 void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data,
2186                                     kmp_taskgroup_t *tg, void *reduce_data) {
2187   kmp_taskred_data_t *arr;
2188   KA_TRACE(20, ("__kmp_task_reduction_init_copy: Th %p, init taskgroup %p,"
2189                 " from data %p\n",
2190                 thr, tg, reduce_data));
2191   arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2192       thr, num * sizeof(kmp_taskred_data_t));
2193   // threads will share private copies, thunk routines, sizes, flags, etc.:
2194   KMP_MEMCPY(arr, reduce_data, num * sizeof(kmp_taskred_data_t));
2195   for (int i = 0; i < num; ++i) {
2196     arr[i].reduce_shar = data[i].reduce_shar; // init unique shared pointers
2197   }
2198   tg->reduce_data = (void *)arr;
2199   tg->reduce_num_data = num;
2200 }
2201 
2202 /*!
2203 @ingroup TASKING
2204 @param gtid    Global thread ID
2205 @param tskgrp  The taskgroup ID (optional)
2206 @param data    Shared location of the item
2207 @return The pointer to per-thread data
2208 
2209 Get thread-specific location of data item
2210 */
2211 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
2212   kmp_info_t *thread = __kmp_threads[gtid];
2213   kmp_int32 nth = thread->th.th_team_nproc;
2214   if (nth == 1)
2215     return data; // nothing to do
2216 
2217   kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
2218   if (tg == NULL)
2219     tg = thread->th.th_current_task->td_taskgroup;
2220   KMP_ASSERT(tg != NULL);
2221   kmp_taskred_data_t *arr = (kmp_taskred_data_t *)(tg->reduce_data);
2222   kmp_int32 num = tg->reduce_num_data;
2223   kmp_int32 tid = thread->th.th_info.ds.ds_tid;
2224 
2225   KMP_ASSERT(data != NULL);
2226   while (tg != NULL) {
2227     for (int i = 0; i < num; ++i) {
2228       if (!arr[i].flags.lazy_priv) {
2229         if (data == arr[i].reduce_shar ||
2230             (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
2231           return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
2232       } else {
2233         // check shared location first
2234         void **p_priv = (void **)(arr[i].reduce_priv);
2235         if (data == arr[i].reduce_shar)
2236           goto found;
2237         // check if we get some thread specific location as parameter
2238         for (int j = 0; j < nth; ++j)
2239           if (data == p_priv[j])
2240             goto found;
2241         continue; // not found, continue search
2242       found:
2243         if (p_priv[tid] == NULL) {
2244           // allocate thread specific object lazily
2245           p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
2246           if (arr[i].reduce_init != NULL) {
2247             if (arr[i].reduce_orig != NULL) { // new interface
2248               ((void (*)(void *, void *))arr[i].reduce_init)(
2249                   p_priv[tid], arr[i].reduce_orig);
2250             } else { // old interface (single parameter)
2251               ((void (*)(void *))arr[i].reduce_init)(p_priv[tid]);
2252             }
2253           }
2254         }
2255         return p_priv[tid];
2256       }
2257     }
2258     tg = tg->parent;
2259     arr = (kmp_taskred_data_t *)(tg->reduce_data);
2260     num = tg->reduce_num_data;
2261   }
2262   KMP_ASSERT2(0, "Unknown task reduction item");
2263   return NULL; // ERROR, this line never executed
2264 }
2265 
2266 // Finalize task reduction.
2267 // Called from __kmpc_end_taskgroup()
2268 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2269   kmp_int32 nth = th->th.th_team_nproc;
2270   KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
2271   kmp_taskred_data_t *arr = (kmp_taskred_data_t *)tg->reduce_data;
2272   kmp_int32 num = tg->reduce_num_data;
2273   for (int i = 0; i < num; ++i) {
2274     void *sh_data = arr[i].reduce_shar;
2275     void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2276     void (*f_comb)(void *, void *) =
2277         (void (*)(void *, void *))(arr[i].reduce_comb);
2278     if (!arr[i].flags.lazy_priv) {
2279       void *pr_data = arr[i].reduce_priv;
2280       size_t size = arr[i].reduce_size;
2281       for (int j = 0; j < nth; ++j) {
2282         void *priv_data = (char *)pr_data + j * size;
2283         f_comb(sh_data, priv_data); // combine results
2284         if (f_fini)
2285           f_fini(priv_data); // finalize if needed
2286       }
2287     } else {
2288       void **pr_data = (void **)(arr[i].reduce_priv);
2289       for (int j = 0; j < nth; ++j) {
2290         if (pr_data[j] != NULL) {
2291           f_comb(sh_data, pr_data[j]); // combine results
2292           if (f_fini)
2293             f_fini(pr_data[j]); // finalize if needed
2294           __kmp_free(pr_data[j]);
2295         }
2296       }
2297     }
2298     __kmp_free(arr[i].reduce_priv);
2299   }
2300   __kmp_thread_free(th, arr);
2301   tg->reduce_data = NULL;
2302   tg->reduce_num_data = 0;
2303 }
2304 
2305 // Cleanup task reduction data for parallel or worksharing,
2306 // do not touch task private data other threads still working with.
2307 // Called from __kmpc_end_taskgroup()
2308 static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) {
2309   __kmp_thread_free(th, tg->reduce_data);
2310   tg->reduce_data = NULL;
2311   tg->reduce_num_data = 0;
2312 }
2313 
2314 template <typename T>
2315 void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2316                                          int num, T *data) {
2317   kmp_info_t *thr = __kmp_threads[gtid];
2318   kmp_int32 nth = thr->th.th_team_nproc;
2319   __kmpc_taskgroup(loc, gtid); // form new taskgroup first
2320   if (nth == 1) {
2321     KA_TRACE(10,
2322              ("__kmpc_reduction_modifier_init: T#%d, tg %p, exiting nth=1\n",
2323               gtid, thr->th.th_current_task->td_taskgroup));
2324     return (void *)thr->th.th_current_task->td_taskgroup;
2325   }
2326   kmp_team_t *team = thr->th.th_team;
2327   void *reduce_data;
2328   kmp_taskgroup_t *tg;
2329   reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]);
2330   if (reduce_data == NULL &&
2331       __kmp_atomic_compare_store(&team->t.t_tg_reduce_data[is_ws], reduce_data,
2332                                  (void *)1)) {
2333     // single thread enters this block to initialize common reduction data
2334     KMP_DEBUG_ASSERT(reduce_data == NULL);
2335     // first initialize own data, then make a copy other threads can use
2336     tg = (kmp_taskgroup_t *)__kmp_task_reduction_init<T>(gtid, num, data);
2337     reduce_data = __kmp_thread_malloc(thr, num * sizeof(kmp_taskred_data_t));
2338     KMP_MEMCPY(reduce_data, tg->reduce_data, num * sizeof(kmp_taskred_data_t));
2339     // fini counters should be 0 at this point
2340     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0);
2341     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0);
2342     KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data);
2343   } else {
2344     while (
2345         (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) ==
2346         (void *)1) { // wait for task reduction initialization
2347       KMP_CPU_PAUSE();
2348     }
2349     KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here
2350     tg = thr->th.th_current_task->td_taskgroup;
2351     __kmp_task_reduction_init_copy<T>(thr, num, data, tg, reduce_data);
2352   }
2353   return tg;
2354 }
2355 
2356 /*!
2357 @ingroup TASKING
2358 @param loc       Source location info
2359 @param gtid      Global thread ID
2360 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2361 @param num       Number of data items to reduce
2362 @param data      Array of data for reduction
2363 @return The taskgroup identifier
2364 
2365 Initialize task reduction for a parallel or worksharing.
2366 
2367 Note: this entry supposes the optional compiler-generated initializer routine
2368 has single parameter - pointer to object to be initialized. That means
2369 the reduction either does not use omp_orig object, or the omp_orig is accessible
2370 without help of the runtime library.
2371 */
2372 void *__kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2373                                           int num, void *data) {
2374   return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2375                                             (kmp_task_red_input_t *)data);
2376 }
2377 
2378 /*!
2379 @ingroup TASKING
2380 @param loc       Source location info
2381 @param gtid      Global thread ID
2382 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2383 @param num       Number of data items to reduce
2384 @param data      Array of data for reduction
2385 @return The taskgroup identifier
2386 
2387 Initialize task reduction for a parallel or worksharing.
2388 
2389 Note: this entry supposes the optional compiler-generated initializer routine
2390 has two parameters, pointer to object to be initialized and pointer to omp_orig
2391 */
2392 void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num,
2393                                    void *data) {
2394   return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2395                                             (kmp_taskred_input_t *)data);
2396 }
2397 
2398 /*!
2399 @ingroup TASKING
2400 @param loc       Source location info
2401 @param gtid      Global thread ID
2402 @param is_ws     Is 1 if the reduction is for worksharing, 0 otherwise
2403 
2404 Finalize task reduction for a parallel or worksharing.
2405 */
2406 void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) {
2407   __kmpc_end_taskgroup(loc, gtid);
2408 }
2409 
2410 // __kmpc_taskgroup: Start a new taskgroup
2411 void __kmpc_taskgroup(ident_t *loc, int gtid) {
2412   kmp_info_t *thread = __kmp_threads[gtid];
2413   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2414   kmp_taskgroup_t *tg_new =
2415       (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2416   KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2417   KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2418   KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2419   tg_new->parent = taskdata->td_taskgroup;
2420   tg_new->reduce_data = NULL;
2421   tg_new->reduce_num_data = 0;
2422   taskdata->td_taskgroup = tg_new;
2423 
2424 #if OMPT_SUPPORT && OMPT_OPTIONAL
2425   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2426     void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2427     if (!codeptr)
2428       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2429     kmp_team_t *team = thread->th.th_team;
2430     ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2431     // FIXME: I think this is wrong for lwt!
2432     ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2433 
2434     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2435         ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2436         &(my_task_data), codeptr);
2437   }
2438 #endif
2439 }
2440 
2441 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2442 //                       and its descendants are complete
2443 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2444   kmp_info_t *thread = __kmp_threads[gtid];
2445   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2446   kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2447   int thread_finished = FALSE;
2448 
2449 #if OMPT_SUPPORT && OMPT_OPTIONAL
2450   kmp_team_t *team;
2451   ompt_data_t my_task_data;
2452   ompt_data_t my_parallel_data;
2453   void *codeptr;
2454   if (UNLIKELY(ompt_enabled.enabled)) {
2455     team = thread->th.th_team;
2456     my_task_data = taskdata->ompt_task_info.task_data;
2457     // FIXME: I think this is wrong for lwt!
2458     my_parallel_data = team->t.ompt_team_info.parallel_data;
2459     codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2460     if (!codeptr)
2461       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2462   }
2463 #endif
2464 
2465   KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2466   KMP_DEBUG_ASSERT(taskgroup != NULL);
2467   KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2468 
2469   if (__kmp_tasking_mode != tskm_immediate_exec) {
2470     // mark task as waiting not on a barrier
2471     taskdata->td_taskwait_counter += 1;
2472     taskdata->td_taskwait_ident = loc;
2473     taskdata->td_taskwait_thread = gtid + 1;
2474 #if USE_ITT_BUILD
2475     // For ITT the taskgroup wait is similar to taskwait until we need to
2476     // distinguish them
2477     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2478     if (itt_sync_obj != NULL)
2479       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
2480 #endif /* USE_ITT_BUILD */
2481 
2482 #if OMPT_SUPPORT && OMPT_OPTIONAL
2483     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2484       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2485           ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2486           &(my_task_data), codeptr);
2487     }
2488 #endif
2489 
2490     if (!taskdata->td_flags.team_serial ||
2491         (thread->th.th_task_team != NULL &&
2492          thread->th.th_task_team->tt.tt_found_proxy_tasks)) {
2493       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)),
2494                        0U);
2495       while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2496         flag.execute_tasks(thread, gtid, FALSE,
2497                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2498                            __kmp_task_stealing_constraint);
2499       }
2500     }
2501     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2502 
2503 #if OMPT_SUPPORT && OMPT_OPTIONAL
2504     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2505       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2506           ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2507           &(my_task_data), codeptr);
2508     }
2509 #endif
2510 
2511 #if USE_ITT_BUILD
2512     if (itt_sync_obj != NULL)
2513       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
2514 #endif /* USE_ITT_BUILD */
2515   }
2516   KMP_DEBUG_ASSERT(taskgroup->count == 0);
2517 
2518   if (taskgroup->reduce_data != NULL) { // need to reduce?
2519     int cnt;
2520     void *reduce_data;
2521     kmp_team_t *t = thread->th.th_team;
2522     kmp_taskred_data_t *arr = (kmp_taskred_data_t *)taskgroup->reduce_data;
2523     // check if <priv> data of the first reduction variable shared for the team
2524     void *priv0 = arr[0].reduce_priv;
2525     if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL &&
2526         ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2527       // finishing task reduction on parallel
2528       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]);
2529       if (cnt == thread->th.th_team_nproc - 1) {
2530         // we are the last thread passing __kmpc_reduction_modifier_fini()
2531         // finalize task reduction:
2532         __kmp_task_reduction_fini(thread, taskgroup);
2533         // cleanup fields in the team structure:
2534         // TODO: is relaxed store enough here (whole barrier should follow)?
2535         __kmp_thread_free(thread, reduce_data);
2536         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL);
2537         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0);
2538       } else {
2539         // we are not the last thread passing __kmpc_reduction_modifier_fini(),
2540         // so do not finalize reduction, just clean own copy of the data
2541         __kmp_task_reduction_clean(thread, taskgroup);
2542       }
2543     } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) !=
2544                    NULL &&
2545                ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2546       // finishing task reduction on worksharing
2547       cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]);
2548       if (cnt == thread->th.th_team_nproc - 1) {
2549         // we are the last thread passing __kmpc_reduction_modifier_fini()
2550         __kmp_task_reduction_fini(thread, taskgroup);
2551         // cleanup fields in team structure:
2552         // TODO: is relaxed store enough here (whole barrier should follow)?
2553         __kmp_thread_free(thread, reduce_data);
2554         KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL);
2555         KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0);
2556       } else {
2557         // we are not the last thread passing __kmpc_reduction_modifier_fini(),
2558         // so do not finalize reduction, just clean own copy of the data
2559         __kmp_task_reduction_clean(thread, taskgroup);
2560       }
2561     } else {
2562       // finishing task reduction on taskgroup
2563       __kmp_task_reduction_fini(thread, taskgroup);
2564     }
2565   }
2566   // Restore parent taskgroup for the current task
2567   taskdata->td_taskgroup = taskgroup->parent;
2568   __kmp_thread_free(thread, taskgroup);
2569 
2570   KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2571                 gtid, taskdata));
2572   ANNOTATE_HAPPENS_AFTER(taskdata);
2573 
2574 #if OMPT_SUPPORT && OMPT_OPTIONAL
2575   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2576     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2577         ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2578         &(my_task_data), codeptr);
2579   }
2580 #endif
2581 }
2582 
2583 // __kmp_remove_my_task: remove a task from my own deque
2584 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2585                                         kmp_task_team_t *task_team,
2586                                         kmp_int32 is_constrained) {
2587   kmp_task_t *task;
2588   kmp_taskdata_t *taskdata;
2589   kmp_thread_data_t *thread_data;
2590   kmp_uint32 tail;
2591 
2592   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2593   KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2594                    NULL); // Caller should check this condition
2595 
2596   thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2597 
2598   KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2599                 gtid, thread_data->td.td_deque_ntasks,
2600                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2601 
2602   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2603     KA_TRACE(10,
2604              ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2605               "ntasks=%d head=%u tail=%u\n",
2606               gtid, thread_data->td.td_deque_ntasks,
2607               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2608     return NULL;
2609   }
2610 
2611   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2612 
2613   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2614     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2615     KA_TRACE(10,
2616              ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2617               "ntasks=%d head=%u tail=%u\n",
2618               gtid, thread_data->td.td_deque_ntasks,
2619               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2620     return NULL;
2621   }
2622 
2623   tail = (thread_data->td.td_deque_tail - 1) &
2624          TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2625   taskdata = thread_data->td.td_deque[tail];
2626 
2627   if (!__kmp_task_is_allowed(gtid, is_constrained, taskdata,
2628                              thread->th.th_current_task)) {
2629     // The TSC does not allow to steal victim task
2630     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2631     KA_TRACE(10,
2632              ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: "
2633               "ntasks=%d head=%u tail=%u\n",
2634               gtid, thread_data->td.td_deque_ntasks,
2635               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2636     return NULL;
2637   }
2638 
2639   thread_data->td.td_deque_tail = tail;
2640   TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2641 
2642   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2643 
2644   KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: "
2645                 "ntasks=%d head=%u tail=%u\n",
2646                 gtid, taskdata, thread_data->td.td_deque_ntasks,
2647                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2648 
2649   task = KMP_TASKDATA_TO_TASK(taskdata);
2650   return task;
2651 }
2652 
2653 // __kmp_steal_task: remove a task from another thread's deque
2654 // Assume that calling thread has already checked existence of
2655 // task_team thread_data before calling this routine.
2656 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2657                                     kmp_task_team_t *task_team,
2658                                     std::atomic<kmp_int32> *unfinished_threads,
2659                                     int *thread_finished,
2660                                     kmp_int32 is_constrained) {
2661   kmp_task_t *task;
2662   kmp_taskdata_t *taskdata;
2663   kmp_taskdata_t *current;
2664   kmp_thread_data_t *victim_td, *threads_data;
2665   kmp_int32 target;
2666   kmp_int32 victim_tid;
2667 
2668   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2669 
2670   threads_data = task_team->tt.tt_threads_data;
2671   KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
2672 
2673   victim_tid = victim_thr->th.th_info.ds.ds_tid;
2674   victim_td = &threads_data[victim_tid];
2675 
2676   KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
2677                 "task_team=%p ntasks=%d head=%u tail=%u\n",
2678                 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2679                 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2680                 victim_td->td.td_deque_tail));
2681 
2682   if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
2683     KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
2684                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2685                   gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2686                   victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2687                   victim_td->td.td_deque_tail));
2688     return NULL;
2689   }
2690 
2691   __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2692 
2693   int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
2694   // Check again after we acquire the lock
2695   if (ntasks == 0) {
2696     __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2697     KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
2698                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2699                   gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2700                   victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2701     return NULL;
2702   }
2703 
2704   KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2705   current = __kmp_threads[gtid]->th.th_current_task;
2706   taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2707   if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2708     // Bump head pointer and Wrap.
2709     victim_td->td.td_deque_head =
2710         (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2711   } else {
2712     if (!task_team->tt.tt_untied_task_encountered) {
2713       // The TSC does not allow to steal victim task
2714       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2715       KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from "
2716                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2717                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2718                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2719       return NULL;
2720     }
2721     int i;
2722     // walk through victim's deque trying to steal any task
2723     target = victim_td->td.td_deque_head;
2724     taskdata = NULL;
2725     for (i = 1; i < ntasks; ++i) {
2726       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2727       taskdata = victim_td->td.td_deque[target];
2728       if (__kmp_task_is_allowed(gtid, is_constrained, taskdata, current)) {
2729         break; // found victim task
2730       } else {
2731         taskdata = NULL;
2732       }
2733     }
2734     if (taskdata == NULL) {
2735       // No appropriate candidate to steal found
2736       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2737       KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2738                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2739                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2740                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2741       return NULL;
2742     }
2743     int prev = target;
2744     for (i = i + 1; i < ntasks; ++i) {
2745       // shift remaining tasks in the deque left by 1
2746       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2747       victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2748       prev = target;
2749     }
2750     KMP_DEBUG_ASSERT(
2751         victim_td->td.td_deque_tail ==
2752         (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2753     victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2754   }
2755   if (*thread_finished) {
2756     // We need to un-mark this victim as a finished victim.  This must be done
2757     // before releasing the lock, or else other threads (starting with the
2758     // master victim) might be prematurely released from the barrier!!!
2759     kmp_int32 count;
2760 
2761     count = KMP_ATOMIC_INC(unfinished_threads);
2762 
2763     KA_TRACE(
2764         20,
2765         ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2766          gtid, count + 1, task_team));
2767 
2768     *thread_finished = FALSE;
2769   }
2770   TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
2771 
2772   __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2773 
2774   KMP_COUNT_BLOCK(TASK_stolen);
2775   KA_TRACE(10,
2776            ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2777             "task_team=%p ntasks=%d head=%u tail=%u\n",
2778             gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2779             ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2780 
2781   task = KMP_TASKDATA_TO_TASK(taskdata);
2782   return task;
2783 }
2784 
2785 // __kmp_execute_tasks_template: Choose and execute tasks until either the
2786 // condition is statisfied (return true) or there are none left (return false).
2787 //
2788 // final_spin is TRUE if this is the spin at the release barrier.
2789 // thread_finished indicates whether the thread is finished executing all
2790 // the tasks it has on its deque, and is at the release barrier.
2791 // spinner is the location on which to spin.
2792 // spinner == NULL means only execute a single task and return.
2793 // checker is the value to check to terminate the spin.
2794 template <class C>
2795 static inline int __kmp_execute_tasks_template(
2796     kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2797     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2798     kmp_int32 is_constrained) {
2799   kmp_task_team_t *task_team = thread->th.th_task_team;
2800   kmp_thread_data_t *threads_data;
2801   kmp_task_t *task;
2802   kmp_info_t *other_thread;
2803   kmp_taskdata_t *current_task = thread->th.th_current_task;
2804   std::atomic<kmp_int32> *unfinished_threads;
2805   kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2806                       tid = thread->th.th_info.ds.ds_tid;
2807 
2808   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2809   KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
2810 
2811   if (task_team == NULL || current_task == NULL)
2812     return FALSE;
2813 
2814   KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2815                 "*thread_finished=%d\n",
2816                 gtid, final_spin, *thread_finished));
2817 
2818   thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2819   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2820   KMP_DEBUG_ASSERT(threads_data != NULL);
2821 
2822   nthreads = task_team->tt.tt_nproc;
2823   unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2824   KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
2825   KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
2826 
2827   while (1) { // Outer loop keeps trying to find tasks in case of single thread
2828     // getting tasks from target constructs
2829     while (1) { // Inner loop to find a task and execute it
2830       task = NULL;
2831       if (use_own_tasks) { // check on own queue first
2832         task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2833       }
2834       if ((task == NULL) && (nthreads > 1)) { // Steal a task
2835         int asleep = 1;
2836         use_own_tasks = 0;
2837         // Try to steal from the last place I stole from successfully.
2838         if (victim_tid == -2) { // haven't stolen anything yet
2839           victim_tid = threads_data[tid].td.td_deque_last_stolen;
2840           if (victim_tid !=
2841               -1) // if we have a last stolen from victim, get the thread
2842             other_thread = threads_data[victim_tid].td.td_thr;
2843         }
2844         if (victim_tid != -1) { // found last victim
2845           asleep = 0;
2846         } else if (!new_victim) { // no recent steals and we haven't already
2847           // used a new victim; select a random thread
2848           do { // Find a different thread to steal work from.
2849             // Pick a random thread. Initial plan was to cycle through all the
2850             // threads, and only return if we tried to steal from every thread,
2851             // and failed.  Arch says that's not such a great idea.
2852             victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2853             if (victim_tid >= tid) {
2854               ++victim_tid; // Adjusts random distribution to exclude self
2855             }
2856             // Found a potential victim
2857             other_thread = threads_data[victim_tid].td.td_thr;
2858             // There is a slight chance that __kmp_enable_tasking() did not wake
2859             // up all threads waiting at the barrier.  If victim is sleeping,
2860             // then wake it up. Since we were going to pay the cache miss
2861             // penalty for referencing another thread's kmp_info_t struct
2862             // anyway,
2863             // the check shouldn't cost too much performance at this point. In
2864             // extra barrier mode, tasks do not sleep at the separate tasking
2865             // barrier, so this isn't a problem.
2866             asleep = 0;
2867             if ((__kmp_tasking_mode == tskm_task_teams) &&
2868                 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
2869                 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2870                  NULL)) {
2871               asleep = 1;
2872               __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2873                                         other_thread->th.th_sleep_loc);
2874               // A sleeping thread should not have any tasks on it's queue.
2875               // There is a slight possibility that it resumes, steals a task
2876               // from another thread, which spawns more tasks, all in the time
2877               // that it takes this thread to check => don't write an assertion
2878               // that the victim's queue is empty.  Try stealing from a
2879               // different thread.
2880             }
2881           } while (asleep);
2882         }
2883 
2884         if (!asleep) {
2885           // We have a victim to try to steal from
2886           task = __kmp_steal_task(other_thread, gtid, task_team,
2887                                   unfinished_threads, thread_finished,
2888                                   is_constrained);
2889         }
2890         if (task != NULL) { // set last stolen to victim
2891           if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2892             threads_data[tid].td.td_deque_last_stolen = victim_tid;
2893             // The pre-refactored code did not try more than 1 successful new
2894             // vicitm, unless the last one generated more local tasks;
2895             // new_victim keeps track of this
2896             new_victim = 1;
2897           }
2898         } else { // No tasks found; unset last_stolen
2899           KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
2900           victim_tid = -2; // no successful victim found
2901         }
2902       }
2903 
2904       if (task == NULL) // break out of tasking loop
2905         break;
2906 
2907 // Found a task; execute it
2908 #if USE_ITT_BUILD && USE_ITT_NOTIFY
2909       if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2910         if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2911           // get the object reliably
2912           itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2913         }
2914         __kmp_itt_task_starting(itt_sync_obj);
2915       }
2916 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
2917       __kmp_invoke_task(gtid, task, current_task);
2918 #if USE_ITT_BUILD
2919       if (itt_sync_obj != NULL)
2920         __kmp_itt_task_finished(itt_sync_obj);
2921 #endif /* USE_ITT_BUILD */
2922       // If this thread is only partway through the barrier and the condition is
2923       // met, then return now, so that the barrier gather/release pattern can
2924       // proceed. If this thread is in the last spin loop in the barrier,
2925       // waiting to be released, we know that the termination condition will not
2926       // be satisfied, so don't waste any cycles checking it.
2927       if (flag == NULL || (!final_spin && flag->done_check())) {
2928         KA_TRACE(
2929             15,
2930             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2931              gtid));
2932         return TRUE;
2933       }
2934       if (thread->th.th_task_team == NULL) {
2935         break;
2936       }
2937       KMP_YIELD(__kmp_library == library_throughput); // Yield before next task
2938       // If execution of a stolen task results in more tasks being placed on our
2939       // run queue, reset use_own_tasks
2940       if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
2941         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
2942                       "other tasks, restart\n",
2943                       gtid));
2944         use_own_tasks = 1;
2945         new_victim = 0;
2946       }
2947     }
2948 
2949     // The task source has been exhausted. If in final spin loop of barrier,
2950     // check if termination condition is satisfied. The work queue may be empty
2951     // but there might be proxy tasks still executing.
2952     if (final_spin &&
2953         KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0) {
2954       // First, decrement the #unfinished threads, if that has not already been
2955       // done.  This decrement might be to the spin location, and result in the
2956       // termination condition being satisfied.
2957       if (!*thread_finished) {
2958         kmp_int32 count;
2959 
2960         count = KMP_ATOMIC_DEC(unfinished_threads) - 1;
2961         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
2962                       "unfinished_threads to %d task_team=%p\n",
2963                       gtid, count, task_team));
2964         *thread_finished = TRUE;
2965       }
2966 
2967       // It is now unsafe to reference thread->th.th_team !!!
2968       // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2969       // thread to pass through the barrier, where it might reset each thread's
2970       // th.th_team field for the next parallel region. If we can steal more
2971       // work, we know that this has not happened yet.
2972       if (flag != NULL && flag->done_check()) {
2973         KA_TRACE(
2974             15,
2975             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2976              gtid));
2977         return TRUE;
2978       }
2979     }
2980 
2981     // If this thread's task team is NULL, master has recognized that there are
2982     // no more tasks; bail out
2983     if (thread->th.th_task_team == NULL) {
2984       KA_TRACE(15,
2985                ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
2986       return FALSE;
2987     }
2988 
2989     // We could be getting tasks from target constructs; if this is the only
2990     // thread, keep trying to execute tasks from own queue
2991     if (nthreads == 1)
2992       use_own_tasks = 1;
2993     else {
2994       KA_TRACE(15,
2995                ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
2996       return FALSE;
2997     }
2998   }
2999 }
3000 
3001 int __kmp_execute_tasks_32(
3002     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
3003     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3004     kmp_int32 is_constrained) {
3005   return __kmp_execute_tasks_template(
3006       thread, gtid, flag, final_spin,
3007       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3008 }
3009 
3010 int __kmp_execute_tasks_64(
3011     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
3012     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3013     kmp_int32 is_constrained) {
3014   return __kmp_execute_tasks_template(
3015       thread, gtid, flag, final_spin,
3016       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3017 }
3018 
3019 int __kmp_execute_tasks_oncore(
3020     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
3021     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3022     kmp_int32 is_constrained) {
3023   return __kmp_execute_tasks_template(
3024       thread, gtid, flag, final_spin,
3025       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3026 }
3027 
3028 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
3029 // next barrier so they can assist in executing enqueued tasks.
3030 // First thread in allocates the task team atomically.
3031 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
3032                                  kmp_info_t *this_thr) {
3033   kmp_thread_data_t *threads_data;
3034   int nthreads, i, is_init_thread;
3035 
3036   KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
3037                 __kmp_gtid_from_thread(this_thr)));
3038 
3039   KMP_DEBUG_ASSERT(task_team != NULL);
3040   KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
3041 
3042   nthreads = task_team->tt.tt_nproc;
3043   KMP_DEBUG_ASSERT(nthreads > 0);
3044   KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
3045 
3046   // Allocate or increase the size of threads_data if necessary
3047   is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
3048 
3049   if (!is_init_thread) {
3050     // Some other thread already set up the array.
3051     KA_TRACE(
3052         20,
3053         ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
3054          __kmp_gtid_from_thread(this_thr)));
3055     return;
3056   }
3057   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3058   KMP_DEBUG_ASSERT(threads_data != NULL);
3059 
3060   if (__kmp_tasking_mode == tskm_task_teams &&
3061       (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
3062     // Release any threads sleeping at the barrier, so that they can steal
3063     // tasks and execute them.  In extra barrier mode, tasks do not sleep
3064     // at the separate tasking barrier, so this isn't a problem.
3065     for (i = 0; i < nthreads; i++) {
3066       volatile void *sleep_loc;
3067       kmp_info_t *thread = threads_data[i].td.td_thr;
3068 
3069       if (i == this_thr->th.th_info.ds.ds_tid) {
3070         continue;
3071       }
3072       // Since we haven't locked the thread's suspend mutex lock at this
3073       // point, there is a small window where a thread might be putting
3074       // itself to sleep, but hasn't set the th_sleep_loc field yet.
3075       // To work around this, __kmp_execute_tasks_template() periodically checks
3076       // see if other threads are sleeping (using the same random mechanism that
3077       // is used for task stealing) and awakens them if they are.
3078       if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3079           NULL) {
3080         KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
3081                       __kmp_gtid_from_thread(this_thr),
3082                       __kmp_gtid_from_thread(thread)));
3083         __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3084       } else {
3085         KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
3086                       __kmp_gtid_from_thread(this_thr),
3087                       __kmp_gtid_from_thread(thread)));
3088       }
3089     }
3090   }
3091 
3092   KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
3093                 __kmp_gtid_from_thread(this_thr)));
3094 }
3095 
3096 /* // TODO: Check the comment consistency
3097  * Utility routines for "task teams".  A task team (kmp_task_t) is kind of
3098  * like a shadow of the kmp_team_t data struct, with a different lifetime.
3099  * After a child * thread checks into a barrier and calls __kmp_release() from
3100  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
3101  * longer assume that the kmp_team_t structure is intact (at any moment, the
3102  * master thread may exit the barrier code and free the team data structure,
3103  * and return the threads to the thread pool).
3104  *
3105  * This does not work with the tasking code, as the thread is still
3106  * expected to participate in the execution of any tasks that may have been
3107  * spawned my a member of the team, and the thread still needs access to all
3108  * to each thread in the team, so that it can steal work from it.
3109  *
3110  * Enter the existence of the kmp_task_team_t struct.  It employs a reference
3111  * counting mechanism, and is allocated by the master thread before calling
3112  * __kmp_<barrier_kind>_release, and then is release by the last thread to
3113  * exit __kmp_<barrier_kind>_release at the next barrier.  I.e. the lifetimes
3114  * of the kmp_task_team_t structs for consecutive barriers can overlap
3115  * (and will, unless the master thread is the last thread to exit the barrier
3116  * release phase, which is not typical). The existence of such a struct is
3117  * useful outside the context of tasking.
3118  *
3119  * We currently use the existence of the threads array as an indicator that
3120  * tasks were spawned since the last barrier.  If the structure is to be
3121  * useful outside the context of tasking, then this will have to change, but
3122  * not setting the field minimizes the performance impact of tasking on
3123  * barriers, when no explicit tasks were spawned (pushed, actually).
3124  */
3125 
3126 static kmp_task_team_t *__kmp_free_task_teams =
3127     NULL; // Free list for task_team data structures
3128 // Lock for task team data structures
3129 kmp_bootstrap_lock_t __kmp_task_team_lock =
3130     KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
3131 
3132 // __kmp_alloc_task_deque:
3133 // Allocates a task deque for a particular thread, and initialize the necessary
3134 // data structures relating to the deque.  This only happens once per thread
3135 // per task team since task teams are recycled. No lock is needed during
3136 // allocation since each thread allocates its own deque.
3137 static void __kmp_alloc_task_deque(kmp_info_t *thread,
3138                                    kmp_thread_data_t *thread_data) {
3139   __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
3140   KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3141 
3142   // Initialize last stolen task field to "none"
3143   thread_data->td.td_deque_last_stolen = -1;
3144 
3145   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3146   KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3147   KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3148 
3149   KE_TRACE(
3150       10,
3151       ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3152        __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3153   // Allocate space for task deque, and zero the deque
3154   // Cannot use __kmp_thread_calloc() because threads not around for
3155   // kmp_reap_task_team( ).
3156   thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3157       INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
3158   thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3159 }
3160 
3161 // __kmp_free_task_deque:
3162 // Deallocates a task deque for a particular thread. Happens at library
3163 // deallocation so don't need to reset all thread data fields.
3164 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3165   if (thread_data->td.td_deque != NULL) {
3166     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3167     TCW_4(thread_data->td.td_deque_ntasks, 0);
3168     __kmp_free(thread_data->td.td_deque);
3169     thread_data->td.td_deque = NULL;
3170     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3171   }
3172 
3173 #ifdef BUILD_TIED_TASK_STACK
3174   // GEH: Figure out what to do here for td_susp_tied_tasks
3175   if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3176     __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3177   }
3178 #endif // BUILD_TIED_TASK_STACK
3179 }
3180 
3181 // __kmp_realloc_task_threads_data:
3182 // Allocates a threads_data array for a task team, either by allocating an
3183 // initial array or enlarging an existing array.  Only the first thread to get
3184 // the lock allocs or enlarges the array and re-initializes the array elements.
3185 // That thread returns "TRUE", the rest return "FALSE".
3186 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
3187 // The current size is given by task_team -> tt.tt_max_threads.
3188 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3189                                            kmp_task_team_t *task_team) {
3190   kmp_thread_data_t **threads_data_p;
3191   kmp_int32 nthreads, maxthreads;
3192   int is_init_thread = FALSE;
3193 
3194   if (TCR_4(task_team->tt.tt_found_tasks)) {
3195     // Already reallocated and initialized.
3196     return FALSE;
3197   }
3198 
3199   threads_data_p = &task_team->tt.tt_threads_data;
3200   nthreads = task_team->tt.tt_nproc;
3201   maxthreads = task_team->tt.tt_max_threads;
3202 
3203   // All threads must lock when they encounter the first task of the implicit
3204   // task region to make sure threads_data fields are (re)initialized before
3205   // used.
3206   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3207 
3208   if (!TCR_4(task_team->tt.tt_found_tasks)) {
3209     // first thread to enable tasking
3210     kmp_team_t *team = thread->th.th_team;
3211     int i;
3212 
3213     is_init_thread = TRUE;
3214     if (maxthreads < nthreads) {
3215 
3216       if (*threads_data_p != NULL) {
3217         kmp_thread_data_t *old_data = *threads_data_p;
3218         kmp_thread_data_t *new_data = NULL;
3219 
3220         KE_TRACE(
3221             10,
3222             ("__kmp_realloc_task_threads_data: T#%d reallocating "
3223              "threads data for task_team %p, new_size = %d, old_size = %d\n",
3224              __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3225         // Reallocate threads_data to have more elements than current array
3226         // Cannot use __kmp_thread_realloc() because threads not around for
3227         // kmp_reap_task_team( ).  Note all new array entries are initialized
3228         // to zero by __kmp_allocate().
3229         new_data = (kmp_thread_data_t *)__kmp_allocate(
3230             nthreads * sizeof(kmp_thread_data_t));
3231         // copy old data to new data
3232         KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
3233                      (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
3234 
3235 #ifdef BUILD_TIED_TASK_STACK
3236         // GEH: Figure out if this is the right thing to do
3237         for (i = maxthreads; i < nthreads; i++) {
3238           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3239           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3240         }
3241 #endif // BUILD_TIED_TASK_STACK
3242         // Install the new data and free the old data
3243         (*threads_data_p) = new_data;
3244         __kmp_free(old_data);
3245       } else {
3246         KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
3247                       "threads data for task_team %p, size = %d\n",
3248                       __kmp_gtid_from_thread(thread), task_team, nthreads));
3249         // Make the initial allocate for threads_data array, and zero entries
3250         // Cannot use __kmp_thread_calloc() because threads not around for
3251         // kmp_reap_task_team( ).
3252         ANNOTATE_IGNORE_WRITES_BEGIN();
3253         *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
3254             nthreads * sizeof(kmp_thread_data_t));
3255         ANNOTATE_IGNORE_WRITES_END();
3256 #ifdef BUILD_TIED_TASK_STACK
3257         // GEH: Figure out if this is the right thing to do
3258         for (i = 0; i < nthreads; i++) {
3259           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3260           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3261         }
3262 #endif // BUILD_TIED_TASK_STACK
3263       }
3264       task_team->tt.tt_max_threads = nthreads;
3265     } else {
3266       // If array has (more than) enough elements, go ahead and use it
3267       KMP_DEBUG_ASSERT(*threads_data_p != NULL);
3268     }
3269 
3270     // initialize threads_data pointers back to thread_info structures
3271     for (i = 0; i < nthreads; i++) {
3272       kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3273       thread_data->td.td_thr = team->t.t_threads[i];
3274 
3275       if (thread_data->td.td_deque_last_stolen >= nthreads) {
3276         // The last stolen field survives across teams / barrier, and the number
3277         // of threads may have changed.  It's possible (likely?) that a new
3278         // parallel region will exhibit the same behavior as previous region.
3279         thread_data->td.td_deque_last_stolen = -1;
3280       }
3281     }
3282 
3283     KMP_MB();
3284     TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3285   }
3286 
3287   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3288   return is_init_thread;
3289 }
3290 
3291 // __kmp_free_task_threads_data:
3292 // Deallocates a threads_data array for a task team, including any attached
3293 // tasking deques.  Only occurs at library shutdown.
3294 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3295   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
3296   if (task_team->tt.tt_threads_data != NULL) {
3297     int i;
3298     for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3299       __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
3300     }
3301     __kmp_free(task_team->tt.tt_threads_data);
3302     task_team->tt.tt_threads_data = NULL;
3303   }
3304   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
3305 }
3306 
3307 // __kmp_allocate_task_team:
3308 // Allocates a task team associated with a specific team, taking it from
3309 // the global task team free list if possible.  Also initializes data
3310 // structures.
3311 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3312                                                  kmp_team_t *team) {
3313   kmp_task_team_t *task_team = NULL;
3314   int nthreads;
3315 
3316   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3317                 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3318 
3319   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3320     // Take a task team from the task team pool
3321     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3322     if (__kmp_free_task_teams != NULL) {
3323       task_team = __kmp_free_task_teams;
3324       TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3325       task_team->tt.tt_next = NULL;
3326     }
3327     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3328   }
3329 
3330   if (task_team == NULL) {
3331     KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3332                   "task team for team %p\n",
3333                   __kmp_gtid_from_thread(thread), team));
3334     // Allocate a new task team if one is not available.
3335     // Cannot use __kmp_thread_malloc() because threads not around for
3336     // kmp_reap_task_team( ).
3337     task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3338     __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
3339     // AC: __kmp_allocate zeroes returned memory
3340     // task_team -> tt.tt_threads_data = NULL;
3341     // task_team -> tt.tt_max_threads = 0;
3342     // task_team -> tt.tt_next = NULL;
3343   }
3344 
3345   TCW_4(task_team->tt.tt_found_tasks, FALSE);
3346   TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3347   task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3348 
3349   KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
3350   TCW_4(task_team->tt.tt_active, TRUE);
3351 
3352   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
3353                 "unfinished_threads init'd to %d\n",
3354                 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
3355                 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
3356   return task_team;
3357 }
3358 
3359 // __kmp_free_task_team:
3360 // Frees the task team associated with a specific thread, and adds it
3361 // to the global task team free list.
3362 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
3363   KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
3364                 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
3365 
3366   // Put task team back on free list
3367   __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3368 
3369   KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3370   task_team->tt.tt_next = __kmp_free_task_teams;
3371   TCW_PTR(__kmp_free_task_teams, task_team);
3372 
3373   __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3374 }
3375 
3376 // __kmp_reap_task_teams:
3377 // Free all the task teams on the task team free list.
3378 // Should only be done during library shutdown.
3379 // Cannot do anything that needs a thread structure or gtid since they are
3380 // already gone.
3381 void __kmp_reap_task_teams(void) {
3382   kmp_task_team_t *task_team;
3383 
3384   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3385     // Free all task_teams on the free list
3386     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3387     while ((task_team = __kmp_free_task_teams) != NULL) {
3388       __kmp_free_task_teams = task_team->tt.tt_next;
3389       task_team->tt.tt_next = NULL;
3390 
3391       // Free threads_data if necessary
3392       if (task_team->tt.tt_threads_data != NULL) {
3393         __kmp_free_task_threads_data(task_team);
3394       }
3395       __kmp_free(task_team);
3396     }
3397     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3398   }
3399 }
3400 
3401 // __kmp_wait_to_unref_task_teams:
3402 // Some threads could still be in the fork barrier release code, possibly
3403 // trying to steal tasks.  Wait for each thread to unreference its task team.
3404 void __kmp_wait_to_unref_task_teams(void) {
3405   kmp_info_t *thread;
3406   kmp_uint32 spins;
3407   int done;
3408 
3409   KMP_INIT_YIELD(spins);
3410 
3411   for (;;) {
3412     done = TRUE;
3413 
3414     // TODO: GEH - this may be is wrong because some sync would be necessary
3415     // in case threads are added to the pool during the traversal. Need to
3416     // verify that lock for thread pool is held when calling this routine.
3417     for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
3418          thread = thread->th.th_next_pool) {
3419 #if KMP_OS_WINDOWS
3420       DWORD exit_val;
3421 #endif
3422       if (TCR_PTR(thread->th.th_task_team) == NULL) {
3423         KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3424                       __kmp_gtid_from_thread(thread)));
3425         continue;
3426       }
3427 #if KMP_OS_WINDOWS
3428       // TODO: GEH - add this check for Linux* OS / OS X* as well?
3429       if (!__kmp_is_thread_alive(thread, &exit_val)) {
3430         thread->th.th_task_team = NULL;
3431         continue;
3432       }
3433 #endif
3434 
3435       done = FALSE; // Because th_task_team pointer is not NULL for this thread
3436 
3437       KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3438                     "unreference task_team\n",
3439                     __kmp_gtid_from_thread(thread)));
3440 
3441       if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3442         volatile void *sleep_loc;
3443         // If the thread is sleeping, awaken it.
3444         if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3445             NULL) {
3446           KA_TRACE(
3447               10,
3448               ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3449                __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3450           __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3451         }
3452       }
3453     }
3454     if (done) {
3455       break;
3456     }
3457 
3458     // If oversubscribed or have waited a bit, yield.
3459     KMP_YIELD_OVERSUB_ELSE_SPIN(spins);
3460   }
3461 }
3462 
3463 // __kmp_task_team_setup:  Create a task_team for the current team, but use
3464 // an already created, unused one if it already exists.
3465 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3466   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3467 
3468   // If this task_team hasn't been created yet, allocate it. It will be used in
3469   // the region after the next.
3470   // If it exists, it is the current task team and shouldn't be touched yet as
3471   // it may still be in use.
3472   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3473       (always || team->t.t_nproc > 1)) {
3474     team->t.t_task_team[this_thr->th.th_task_state] =
3475         __kmp_allocate_task_team(this_thr, team);
3476     KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "
3477                   "for team %d at parity=%d\n",
3478                   __kmp_gtid_from_thread(this_thr),
3479                   team->t.t_task_team[this_thr->th.th_task_state],
3480                   ((team != NULL) ? team->t.t_id : -1),
3481                   this_thr->th.th_task_state));
3482   }
3483 
3484   // After threads exit the release, they will call sync, and then point to this
3485   // other task_team; make sure it is allocated and properly initialized. As
3486   // threads spin in the barrier release phase, they will continue to use the
3487   // previous task_team struct(above), until they receive the signal to stop
3488   // checking for tasks (they can't safely reference the kmp_team_t struct,
3489   // which could be reallocated by the master thread). No task teams are formed
3490   // for serialized teams.
3491   if (team->t.t_nproc > 1) {
3492     int other_team = 1 - this_thr->th.th_task_state;
3493     if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3494       team->t.t_task_team[other_team] =
3495           __kmp_allocate_task_team(this_thr, team);
3496       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "
3497                     "task_team %p for team %d at parity=%d\n",
3498                     __kmp_gtid_from_thread(this_thr),
3499                     team->t.t_task_team[other_team],
3500                     ((team != NULL) ? team->t.t_id : -1), other_team));
3501     } else { // Leave the old task team struct in place for the upcoming region;
3502       // adjust as needed
3503       kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3504       if (!task_team->tt.tt_active ||
3505           team->t.t_nproc != task_team->tt.tt_nproc) {
3506         TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3507         TCW_4(task_team->tt.tt_found_tasks, FALSE);
3508         TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3509         KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
3510                           team->t.t_nproc);
3511         TCW_4(task_team->tt.tt_active, TRUE);
3512       }
3513       // if team size has changed, the first thread to enable tasking will
3514       // realloc threads_data if necessary
3515       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "
3516                     "%p for team %d at parity=%d\n",
3517                     __kmp_gtid_from_thread(this_thr),
3518                     team->t.t_task_team[other_team],
3519                     ((team != NULL) ? team->t.t_id : -1), other_team));
3520     }
3521   }
3522 }
3523 
3524 // __kmp_task_team_sync: Propagation of task team data from team to threads
3525 // which happens just after the release phase of a team barrier.  This may be
3526 // called by any thread, but only for teams with # threads > 1.
3527 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3528   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3529 
3530   // Toggle the th_task_state field, to switch which task_team this thread
3531   // refers to
3532   this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3533   // It is now safe to propagate the task team pointer from the team struct to
3534   // the current thread.
3535   TCW_PTR(this_thr->th.th_task_team,
3536           team->t.t_task_team[this_thr->th.th_task_state]);
3537   KA_TRACE(20,
3538            ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3539             "%p from Team #%d (parity=%d)\n",
3540             __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3541             ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
3542 }
3543 
3544 // __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3545 // barrier gather phase. Only called by master thread if #threads in team > 1 or
3546 // if proxy tasks were created.
3547 //
3548 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3549 // by passing in 0 optionally as the last argument. When wait is zero, master
3550 // thread does not wait for unfinished_threads to reach 0.
3551 void __kmp_task_team_wait(
3552     kmp_info_t *this_thr,
3553     kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3554   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3555 
3556   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3557   KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
3558 
3559   if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3560     if (wait) {
3561       KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
3562                     "(for unfinished_threads to reach 0) on task_team = %p\n",
3563                     __kmp_gtid_from_thread(this_thr), task_team));
3564       // Worker threads may have dropped through to release phase, but could
3565       // still be executing tasks. Wait here for tasks to complete. To avoid
3566       // memory contention, only master thread checks termination condition.
3567       kmp_flag_32 flag(RCAST(std::atomic<kmp_uint32> *,
3568                              &task_team->tt.tt_unfinished_threads),
3569                        0U);
3570       flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
3571     }
3572     // Deactivate the old task team, so that the worker threads will stop
3573     // referencing it while spinning.
3574     KA_TRACE(
3575         20,
3576         ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
3577          "setting active to false, setting local and team's pointer to NULL\n",
3578          __kmp_gtid_from_thread(this_thr), task_team));
3579     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3580                      task_team->tt.tt_found_proxy_tasks == TRUE);
3581     TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3582     KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
3583     TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3584     KMP_MB();
3585 
3586     TCW_PTR(this_thr->th.th_task_team, NULL);
3587   }
3588 }
3589 
3590 // __kmp_tasking_barrier:
3591 // This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
3592 // Internal function to execute all tasks prior to a regular barrier or a join
3593 // barrier. It is a full barrier itself, which unfortunately turns regular
3594 // barriers into double barriers and join barriers into 1 1/2 barriers.
3595 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3596   std::atomic<kmp_uint32> *spin = RCAST(
3597       std::atomic<kmp_uint32> *,
3598       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
3599   int flag = FALSE;
3600   KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
3601 
3602 #if USE_ITT_BUILD
3603   KMP_FSYNC_SPIN_INIT(spin, NULL);
3604 #endif /* USE_ITT_BUILD */
3605   kmp_flag_32 spin_flag(spin, 0U);
3606   while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3607                                   &flag USE_ITT_BUILD_ARG(NULL), 0)) {
3608 #if USE_ITT_BUILD
3609     // TODO: What about itt_sync_obj??
3610     KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
3611 #endif /* USE_ITT_BUILD */
3612 
3613     if (TCR_4(__kmp_global.g.g_done)) {
3614       if (__kmp_global.g.g_abort)
3615         __kmp_abort_thread();
3616       break;
3617     }
3618     KMP_YIELD(TRUE);
3619   }
3620 #if USE_ITT_BUILD
3621   KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
3622 #endif /* USE_ITT_BUILD */
3623 }
3624 
3625 // __kmp_give_task puts a task into a given thread queue if:
3626 //  - the queue for that thread was created
3627 //  - there's space in that queue
3628 // Because of this, __kmp_push_task needs to check if there's space after
3629 // getting the lock
3630 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3631                             kmp_int32 pass) {
3632   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3633   kmp_task_team_t *task_team = taskdata->td_task_team;
3634 
3635   KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3636                 taskdata, tid));
3637 
3638   // If task_team is NULL something went really bad...
3639   KMP_DEBUG_ASSERT(task_team != NULL);
3640 
3641   bool result = false;
3642   kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3643 
3644   if (thread_data->td.td_deque == NULL) {
3645     // There's no queue in this thread, go find another one
3646     // We're guaranteed that at least one thread has a queue
3647     KA_TRACE(30,
3648              ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3649               tid, taskdata));
3650     return result;
3651   }
3652 
3653   if (TCR_4(thread_data->td.td_deque_ntasks) >=
3654       TASK_DEQUE_SIZE(thread_data->td)) {
3655     KA_TRACE(
3656         30,
3657         ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3658          taskdata, tid));
3659 
3660     // if this deque is bigger than the pass ratio give a chance to another
3661     // thread
3662     if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3663       return result;
3664 
3665     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3666     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3667         TASK_DEQUE_SIZE(thread_data->td)) {
3668       // expand deque to push the task which is not allowed to execute
3669       __kmp_realloc_task_deque(thread, thread_data);
3670     }
3671 
3672   } else {
3673 
3674     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3675 
3676     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3677         TASK_DEQUE_SIZE(thread_data->td)) {
3678       KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3679                     "thread %d.\n",
3680                     taskdata, tid));
3681 
3682       // if this deque is bigger than the pass ratio give a chance to another
3683       // thread
3684       if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3685         goto release_and_exit;
3686 
3687       __kmp_realloc_task_deque(thread, thread_data);
3688     }
3689   }
3690 
3691   // lock is held here, and there is space in the deque
3692 
3693   thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3694   // Wrap index.
3695   thread_data->td.td_deque_tail =
3696       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3697   TCW_4(thread_data->td.td_deque_ntasks,
3698         TCR_4(thread_data->td.td_deque_ntasks) + 1);
3699 
3700   result = true;
3701   KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3702                 taskdata, tid));
3703 
3704 release_and_exit:
3705   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3706 
3707   return result;
3708 }
3709 
3710 /* The finish of the proxy tasks is divided in two pieces:
3711     - the top half is the one that can be done from a thread outside the team
3712     - the bottom half must be run from a thread within the team
3713 
3714    In order to run the bottom half the task gets queued back into one of the
3715    threads of the team. Once the td_incomplete_child_task counter of the parent
3716    is decremented the threads can leave the barriers. So, the bottom half needs
3717    to be queued before the counter is decremented. The top half is therefore
3718    divided in two parts:
3719     - things that can be run before queuing the bottom half
3720     - things that must be run after queuing the bottom half
3721 
3722    This creates a second race as the bottom half can free the task before the
3723    second top half is executed. To avoid this we use the
3724    td_incomplete_child_task of the proxy task to synchronize the top and bottom
3725    half. */
3726 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3727   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3728   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3729   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3730   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
3731 
3732   taskdata->td_flags.complete = 1; // mark the task as completed
3733 
3734   if (taskdata->td_taskgroup)
3735     KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
3736 
3737   // Create an imaginary children for this task so the bottom half cannot
3738   // release the task before we have completed the second top half
3739   KMP_ATOMIC_INC(&taskdata->td_incomplete_child_tasks);
3740 }
3741 
3742 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3743   kmp_int32 children = 0;
3744 
3745   // Predecrement simulated by "- 1" calculation
3746   children =
3747       KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
3748   KMP_DEBUG_ASSERT(children >= 0);
3749 
3750   // Remove the imaginary children
3751   KMP_ATOMIC_DEC(&taskdata->td_incomplete_child_tasks);
3752 }
3753 
3754 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3755   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3756   kmp_info_t *thread = __kmp_threads[gtid];
3757 
3758   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3759   KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3760                    1); // top half must run before bottom half
3761 
3762   // We need to wait to make sure the top half is finished
3763   // Spinning here should be ok as this should happen quickly
3764   while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) > 0)
3765     ;
3766 
3767   __kmp_release_deps(gtid, taskdata);
3768   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3769 }
3770 
3771 /*!
3772 @ingroup TASKING
3773 @param gtid Global Thread ID of encountering thread
3774 @param ptask Task which execution is completed
3775 
3776 Execute the completion of a proxy task from a thread of that is part of the
3777 team. Run first and bottom halves directly.
3778 */
3779 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3780   KMP_DEBUG_ASSERT(ptask != NULL);
3781   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3782   KA_TRACE(
3783       10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3784            gtid, taskdata));
3785 
3786   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3787 
3788   __kmp_first_top_half_finish_proxy(taskdata);
3789   __kmp_second_top_half_finish_proxy(taskdata);
3790   __kmp_bottom_half_finish_proxy(gtid, ptask);
3791 
3792   KA_TRACE(10,
3793            ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3794             gtid, taskdata));
3795 }
3796 
3797 /*!
3798 @ingroup TASKING
3799 @param ptask Task which execution is completed
3800 
3801 Execute the completion of a proxy task from a thread that could not belong to
3802 the team.
3803 */
3804 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3805   KMP_DEBUG_ASSERT(ptask != NULL);
3806   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3807 
3808   KA_TRACE(
3809       10,
3810       ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3811        taskdata));
3812 
3813   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3814 
3815   __kmp_first_top_half_finish_proxy(taskdata);
3816 
3817   // Enqueue task to complete bottom half completion from a thread within the
3818   // corresponding team
3819   kmp_team_t *team = taskdata->td_team;
3820   kmp_int32 nthreads = team->t.t_nproc;
3821   kmp_info_t *thread;
3822 
3823   // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3824   // but we cannot use __kmp_get_random here
3825   kmp_int32 start_k = 0;
3826   kmp_int32 pass = 1;
3827   kmp_int32 k = start_k;
3828 
3829   do {
3830     // For now we're just linearly trying to find a thread
3831     thread = team->t.t_threads[k];
3832     k = (k + 1) % nthreads;
3833 
3834     // we did a full pass through all the threads
3835     if (k == start_k)
3836       pass = pass << 1;
3837 
3838   } while (!__kmp_give_task(thread, k, ptask, pass));
3839 
3840   __kmp_second_top_half_finish_proxy(taskdata);
3841 
3842   KA_TRACE(
3843       10,
3844       ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
3845        taskdata));
3846 }
3847 
3848 kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid,
3849                                                 kmp_task_t *task) {
3850   kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
3851   if (td->td_allow_completion_event.type == KMP_EVENT_UNINITIALIZED) {
3852     td->td_allow_completion_event.type = KMP_EVENT_ALLOW_COMPLETION;
3853     td->td_allow_completion_event.ed.task = task;
3854     __kmp_init_tas_lock(&td->td_allow_completion_event.lock);
3855   }
3856   return &td->td_allow_completion_event;
3857 }
3858 
3859 void __kmp_fulfill_event(kmp_event_t *event) {
3860   if (event->type == KMP_EVENT_ALLOW_COMPLETION) {
3861     kmp_task_t *ptask = event->ed.task;
3862     kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3863     bool detached = false;
3864     int gtid = __kmp_get_gtid();
3865 
3866     // The associated task might have completed or could be completing at this
3867     // point.
3868     // We need to take the lock to avoid races
3869     __kmp_acquire_tas_lock(&event->lock, gtid);
3870     if (taskdata->td_flags.proxy == TASK_PROXY)
3871       detached = true;
3872     event->type = KMP_EVENT_UNINITIALIZED;
3873     __kmp_release_tas_lock(&event->lock, gtid);
3874 
3875     if (detached) {
3876       // If the task detached complete the proxy task
3877       if (gtid >= 0) {
3878         kmp_team_t *team = taskdata->td_team;
3879         kmp_info_t *thread = __kmp_get_thread();
3880         if (thread->th.th_team == team) {
3881           __kmpc_proxy_task_completed(gtid, ptask);
3882           return;
3883         }
3884       }
3885 
3886       // fallback
3887       __kmpc_proxy_task_completed_ooo(ptask);
3888     }
3889   }
3890 }
3891 
3892 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3893 // for taskloop
3894 //
3895 // thread:   allocating thread
3896 // task_src: pointer to source task to be duplicated
3897 // returns:  a pointer to the allocated kmp_task_t structure (task).
3898 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3899   kmp_task_t *task;
3900   kmp_taskdata_t *taskdata;
3901   kmp_taskdata_t *taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
3902   kmp_taskdata_t *parent_task = taskdata_src->td_parent; // same parent task
3903   size_t shareds_offset;
3904   size_t task_size;
3905 
3906   KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
3907                 task_src));
3908   KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
3909                    TASK_FULL); // it should not be proxy task
3910   KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
3911   task_size = taskdata_src->td_size_alloc;
3912 
3913   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3914   KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
3915                 task_size));
3916 #if USE_FAST_MEMORY
3917   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
3918 #else
3919   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
3920 #endif /* USE_FAST_MEMORY */
3921   KMP_MEMCPY(taskdata, taskdata_src, task_size);
3922 
3923   task = KMP_TASKDATA_TO_TASK(taskdata);
3924 
3925   // Initialize new task (only specific fields not affected by memcpy)
3926   taskdata->td_task_id = KMP_GEN_TASK_ID();
3927   if (task->shareds != NULL) { // need setup shareds pointer
3928     shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3929     task->shareds = &((char *)taskdata)[shareds_offset];
3930     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
3931                      0);
3932   }
3933   taskdata->td_alloc_thread = thread;
3934   taskdata->td_parent = parent_task;
3935   // task inherits the taskgroup from the parent task
3936   taskdata->td_taskgroup = parent_task->td_taskgroup;
3937   // tied task needs to initialize the td_last_tied at creation,
3938   // untied one does this when it is scheduled for execution
3939   if (taskdata->td_flags.tiedness == TASK_TIED)
3940     taskdata->td_last_tied = taskdata;
3941 
3942   // Only need to keep track of child task counts if team parallel and tasking
3943   // not serialized
3944   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
3945     KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
3946     if (parent_task->td_taskgroup)
3947       KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
3948     // Only need to keep track of allocated child tasks for explicit tasks since
3949     // implicit not deallocated
3950     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
3951       KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
3952   }
3953 
3954   KA_TRACE(20,
3955            ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
3956             thread, taskdata, taskdata->td_parent));
3957 #if OMPT_SUPPORT
3958   if (UNLIKELY(ompt_enabled.enabled))
3959     __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
3960 #endif
3961   return task;
3962 }
3963 
3964 // Routine optionally generated by the compiler for setting the lastprivate flag
3965 // and calling needed constructors for private/firstprivate objects
3966 // (used to form taskloop tasks from pattern task)
3967 // Parameters: dest task, src task, lastprivate flag.
3968 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
3969 
3970 KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
3971 
3972 // class to encapsulate manipulating loop bounds in a taskloop task.
3973 // this abstracts away the Intel vs GOMP taskloop interface for setting/getting
3974 // the loop bound variables.
3975 class kmp_taskloop_bounds_t {
3976   kmp_task_t *task;
3977   const kmp_taskdata_t *taskdata;
3978   size_t lower_offset;
3979   size_t upper_offset;
3980 
3981 public:
3982   kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
3983       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
3984         lower_offset((char *)lb - (char *)task),
3985         upper_offset((char *)ub - (char *)task) {
3986     KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
3987     KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
3988   }
3989   kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
3990       : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
3991         lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
3992   size_t get_lower_offset() const { return lower_offset; }
3993   size_t get_upper_offset() const { return upper_offset; }
3994   kmp_uint64 get_lb() const {
3995     kmp_int64 retval;
3996 #if defined(KMP_GOMP_COMPAT)
3997     // Intel task just returns the lower bound normally
3998     if (!taskdata->td_flags.native) {
3999       retval = *(kmp_int64 *)((char *)task + lower_offset);
4000     } else {
4001       // GOMP task has to take into account the sizeof(long)
4002       if (taskdata->td_size_loop_bounds == 4) {
4003         kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
4004         retval = (kmp_int64)*lb;
4005       } else {
4006         kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
4007         retval = (kmp_int64)*lb;
4008       }
4009     }
4010 #else
4011     retval = *(kmp_int64 *)((char *)task + lower_offset);
4012 #endif // defined(KMP_GOMP_COMPAT)
4013     return retval;
4014   }
4015   kmp_uint64 get_ub() const {
4016     kmp_int64 retval;
4017 #if defined(KMP_GOMP_COMPAT)
4018     // Intel task just returns the upper bound normally
4019     if (!taskdata->td_flags.native) {
4020       retval = *(kmp_int64 *)((char *)task + upper_offset);
4021     } else {
4022       // GOMP task has to take into account the sizeof(long)
4023       if (taskdata->td_size_loop_bounds == 4) {
4024         kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
4025         retval = (kmp_int64)*ub;
4026       } else {
4027         kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
4028         retval = (kmp_int64)*ub;
4029       }
4030     }
4031 #else
4032     retval = *(kmp_int64 *)((char *)task + upper_offset);
4033 #endif // defined(KMP_GOMP_COMPAT)
4034     return retval;
4035   }
4036   void set_lb(kmp_uint64 lb) {
4037 #if defined(KMP_GOMP_COMPAT)
4038     // Intel task just sets the lower bound normally
4039     if (!taskdata->td_flags.native) {
4040       *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4041     } else {
4042       // GOMP task has to take into account the sizeof(long)
4043       if (taskdata->td_size_loop_bounds == 4) {
4044         kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
4045         *lower = (kmp_uint32)lb;
4046       } else {
4047         kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
4048         *lower = (kmp_uint64)lb;
4049       }
4050     }
4051 #else
4052     *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4053 #endif // defined(KMP_GOMP_COMPAT)
4054   }
4055   void set_ub(kmp_uint64 ub) {
4056 #if defined(KMP_GOMP_COMPAT)
4057     // Intel task just sets the upper bound normally
4058     if (!taskdata->td_flags.native) {
4059       *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4060     } else {
4061       // GOMP task has to take into account the sizeof(long)
4062       if (taskdata->td_size_loop_bounds == 4) {
4063         kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
4064         *upper = (kmp_uint32)ub;
4065       } else {
4066         kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
4067         *upper = (kmp_uint64)ub;
4068       }
4069     }
4070 #else
4071     *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4072 #endif // defined(KMP_GOMP_COMPAT)
4073   }
4074 };
4075 
4076 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
4077 //
4078 // loc        Source location information
4079 // gtid       Global thread ID
4080 // task       Pattern task, exposes the loop iteration range
4081 // lb         Pointer to loop lower bound in task structure
4082 // ub         Pointer to loop upper bound in task structure
4083 // st         Loop stride
4084 // ub_glob    Global upper bound (used for lastprivate check)
4085 // num_tasks  Number of tasks to execute
4086 // grainsize  Number of loop iterations per task
4087 // extras     Number of chunks with grainsize+1 iterations
4088 // tc         Iterations count
4089 // task_dup   Tasks duplication routine
4090 // codeptr_ra Return address for OMPT events
4091 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
4092                            kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4093                            kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4094                            kmp_uint64 grainsize, kmp_uint64 extras,
4095                            kmp_uint64 tc,
4096 #if OMPT_SUPPORT
4097                            void *codeptr_ra,
4098 #endif
4099                            void *task_dup) {
4100   KMP_COUNT_BLOCK(OMP_TASKLOOP);
4101   KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
4102   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4103   // compiler provides global bounds here
4104   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4105   kmp_uint64 lower = task_bounds.get_lb();
4106   kmp_uint64 upper = task_bounds.get_ub();
4107   kmp_uint64 i;
4108   kmp_info_t *thread = __kmp_threads[gtid];
4109   kmp_taskdata_t *current_task = thread->th.th_current_task;
4110   kmp_task_t *next_task;
4111   kmp_int32 lastpriv = 0;
4112 
4113   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4114   KMP_DEBUG_ASSERT(num_tasks > extras);
4115   KMP_DEBUG_ASSERT(num_tasks > 0);
4116   KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
4117                 "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",
4118                 gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,
4119                 task_dup));
4120 
4121   // Launch num_tasks tasks, assign grainsize iterations each task
4122   for (i = 0; i < num_tasks; ++i) {
4123     kmp_uint64 chunk_minus_1;
4124     if (extras == 0) {
4125       chunk_minus_1 = grainsize - 1;
4126     } else {
4127       chunk_minus_1 = grainsize;
4128       --extras; // first extras iterations get bigger chunk (grainsize+1)
4129     }
4130     upper = lower + st * chunk_minus_1;
4131     if (i == num_tasks - 1) {
4132       // schedule the last task, set lastprivate flag if needed
4133       if (st == 1) { // most common case
4134         KMP_DEBUG_ASSERT(upper == *ub);
4135         if (upper == ub_glob)
4136           lastpriv = 1;
4137       } else if (st > 0) { // positive loop stride
4138         KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
4139         if ((kmp_uint64)st > ub_glob - upper)
4140           lastpriv = 1;
4141       } else { // negative loop stride
4142         KMP_DEBUG_ASSERT(upper + st < *ub);
4143         if (upper - ub_glob < (kmp_uint64)(-st))
4144           lastpriv = 1;
4145       }
4146     }
4147     next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
4148     kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
4149     kmp_taskloop_bounds_t next_task_bounds =
4150         kmp_taskloop_bounds_t(next_task, task_bounds);
4151 
4152     // adjust task-specific bounds
4153     next_task_bounds.set_lb(lower);
4154     if (next_taskdata->td_flags.native) {
4155       next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
4156     } else {
4157       next_task_bounds.set_ub(upper);
4158     }
4159     if (ptask_dup != NULL) // set lastprivate flag, construct firstprivates,
4160                            // etc.
4161       ptask_dup(next_task, task, lastpriv);
4162     KA_TRACE(40,
4163              ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
4164               "upper %lld stride %lld, (offsets %p %p)\n",
4165               gtid, i, next_task, lower, upper, st,
4166               next_task_bounds.get_lower_offset(),
4167               next_task_bounds.get_upper_offset()));
4168 #if OMPT_SUPPORT
4169     __kmp_omp_taskloop_task(NULL, gtid, next_task,
4170                            codeptr_ra); // schedule new task
4171 #else
4172     __kmp_omp_task(gtid, next_task, true); // schedule new task
4173 #endif
4174     lower = upper + st; // adjust lower bound for the next iteration
4175   }
4176   // free the pattern task and exit
4177   __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
4178   // do not execute the pattern task, just do internal bookkeeping
4179   __kmp_task_finish<false>(gtid, task, current_task);
4180 }
4181 
4182 // Structure to keep taskloop parameters for auxiliary task
4183 // kept in the shareds of the task structure.
4184 typedef struct __taskloop_params {
4185   kmp_task_t *task;
4186   kmp_uint64 *lb;
4187   kmp_uint64 *ub;
4188   void *task_dup;
4189   kmp_int64 st;
4190   kmp_uint64 ub_glob;
4191   kmp_uint64 num_tasks;
4192   kmp_uint64 grainsize;
4193   kmp_uint64 extras;
4194   kmp_uint64 tc;
4195   kmp_uint64 num_t_min;
4196 #if OMPT_SUPPORT
4197   void *codeptr_ra;
4198 #endif
4199 } __taskloop_params_t;
4200 
4201 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
4202                           kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
4203                           kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
4204 #if OMPT_SUPPORT
4205                           void *,
4206 #endif
4207                           void *);
4208 
4209 // Execute part of the taskloop submitted as a task.
4210 int __kmp_taskloop_task(int gtid, void *ptask) {
4211   __taskloop_params_t *p =
4212       (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
4213   kmp_task_t *task = p->task;
4214   kmp_uint64 *lb = p->lb;
4215   kmp_uint64 *ub = p->ub;
4216   void *task_dup = p->task_dup;
4217   //  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4218   kmp_int64 st = p->st;
4219   kmp_uint64 ub_glob = p->ub_glob;
4220   kmp_uint64 num_tasks = p->num_tasks;
4221   kmp_uint64 grainsize = p->grainsize;
4222   kmp_uint64 extras = p->extras;
4223   kmp_uint64 tc = p->tc;
4224   kmp_uint64 num_t_min = p->num_t_min;
4225 #if OMPT_SUPPORT
4226   void *codeptr_ra = p->codeptr_ra;
4227 #endif
4228 #if KMP_DEBUG
4229   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4230   KMP_DEBUG_ASSERT(task != NULL);
4231   KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
4232                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
4233                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
4234                 task_dup));
4235 #endif
4236   KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
4237   if (num_tasks > num_t_min)
4238     __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
4239                          grainsize, extras, tc, num_t_min,
4240 #if OMPT_SUPPORT
4241                          codeptr_ra,
4242 #endif
4243                          task_dup);
4244   else
4245     __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
4246                           grainsize, extras, tc,
4247 #if OMPT_SUPPORT
4248                           codeptr_ra,
4249 #endif
4250                           task_dup);
4251 
4252   KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
4253   return 0;
4254 }
4255 
4256 // Schedule part of the taskloop as a task,
4257 // execute the rest of the taskloop.
4258 //
4259 // loc        Source location information
4260 // gtid       Global thread ID
4261 // task       Pattern task, exposes the loop iteration range
4262 // lb         Pointer to loop lower bound in task structure
4263 // ub         Pointer to loop upper bound in task structure
4264 // st         Loop stride
4265 // ub_glob    Global upper bound (used for lastprivate check)
4266 // num_tasks  Number of tasks to execute
4267 // grainsize  Number of loop iterations per task
4268 // extras     Number of chunks with grainsize+1 iterations
4269 // tc         Iterations count
4270 // num_t_min  Threshold to launch tasks recursively
4271 // task_dup   Tasks duplication routine
4272 // codeptr_ra Return address for OMPT events
4273 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
4274                           kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4275                           kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4276                           kmp_uint64 grainsize, kmp_uint64 extras,
4277                           kmp_uint64 tc, kmp_uint64 num_t_min,
4278 #if OMPT_SUPPORT
4279                           void *codeptr_ra,
4280 #endif
4281                           void *task_dup) {
4282   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4283   KMP_DEBUG_ASSERT(task != NULL);
4284   KMP_DEBUG_ASSERT(num_tasks > num_t_min);
4285   KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
4286                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
4287                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
4288                 task_dup));
4289   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4290   kmp_uint64 lower = *lb;
4291   kmp_info_t *thread = __kmp_threads[gtid];
4292   //  kmp_taskdata_t *current_task = thread->th.th_current_task;
4293   kmp_task_t *next_task;
4294   size_t lower_offset =
4295       (char *)lb - (char *)task; // remember offset of lb in the task structure
4296   size_t upper_offset =
4297       (char *)ub - (char *)task; // remember offset of ub in the task structure
4298 
4299   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4300   KMP_DEBUG_ASSERT(num_tasks > extras);
4301   KMP_DEBUG_ASSERT(num_tasks > 0);
4302 
4303   // split the loop in two halves
4304   kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
4305   kmp_uint64 gr_size0 = grainsize;
4306   kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
4307   kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
4308   if (n_tsk0 <= extras) {
4309     gr_size0++; // integrate extras into grainsize
4310     ext0 = 0; // no extra iters in 1st half
4311     ext1 = extras - n_tsk0; // remaining extras
4312     tc0 = gr_size0 * n_tsk0;
4313     tc1 = tc - tc0;
4314   } else { // n_tsk0 > extras
4315     ext1 = 0; // no extra iters in 2nd half
4316     ext0 = extras;
4317     tc1 = grainsize * n_tsk1;
4318     tc0 = tc - tc1;
4319   }
4320   ub0 = lower + st * (tc0 - 1);
4321   lb1 = ub0 + st;
4322 
4323   // create pattern task for 2nd half of the loop
4324   next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
4325   // adjust lower bound (upper bound is not changed) for the 2nd half
4326   *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
4327   if (ptask_dup != NULL) // construct firstprivates, etc.
4328     ptask_dup(next_task, task, 0);
4329   *ub = ub0; // adjust upper bound for the 1st half
4330 
4331   // create auxiliary task for 2nd half of the loop
4332   // make sure new task has same parent task as the pattern task
4333   kmp_taskdata_t *current_task = thread->th.th_current_task;
4334   thread->th.th_current_task = taskdata->td_parent;
4335   kmp_task_t *new_task =
4336       __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
4337                             sizeof(__taskloop_params_t), &__kmp_taskloop_task);
4338   // restore current task
4339   thread->th.th_current_task = current_task;
4340   __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
4341   p->task = next_task;
4342   p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
4343   p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
4344   p->task_dup = task_dup;
4345   p->st = st;
4346   p->ub_glob = ub_glob;
4347   p->num_tasks = n_tsk1;
4348   p->grainsize = grainsize;
4349   p->extras = ext1;
4350   p->tc = tc1;
4351   p->num_t_min = num_t_min;
4352 #if OMPT_SUPPORT
4353   p->codeptr_ra = codeptr_ra;
4354 #endif
4355 
4356 #if OMPT_SUPPORT
4357   // schedule new task with correct return address for OMPT events
4358   __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
4359 #else
4360   __kmp_omp_task(gtid, new_task, true); // schedule new task
4361 #endif
4362 
4363   // execute the 1st half of current subrange
4364   if (n_tsk0 > num_t_min)
4365     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
4366                          ext0, tc0, num_t_min,
4367 #if OMPT_SUPPORT
4368                          codeptr_ra,
4369 #endif
4370                          task_dup);
4371   else
4372     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
4373                           gr_size0, ext0, tc0,
4374 #if OMPT_SUPPORT
4375                           codeptr_ra,
4376 #endif
4377                           task_dup);
4378 
4379   KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid));
4380 }
4381 
4382 /*!
4383 @ingroup TASKING
4384 @param loc       Source location information
4385 @param gtid      Global thread ID
4386 @param task      Task structure
4387 @param if_val    Value of the if clause
4388 @param lb        Pointer to loop lower bound in task structure
4389 @param ub        Pointer to loop upper bound in task structure
4390 @param st        Loop stride
4391 @param nogroup   Flag, 1 if no taskgroup needs to be added, 0 otherwise
4392 @param sched     Schedule specified 0/1/2 for none/grainsize/num_tasks
4393 @param grainsize Schedule value if specified
4394 @param task_dup  Tasks duplication routine
4395 
4396 Execute the taskloop construct.
4397 */
4398 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
4399                      kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
4400                      int sched, kmp_uint64 grainsize, void *task_dup) {
4401   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4402   KMP_DEBUG_ASSERT(task != NULL);
4403 
4404   if (nogroup == 0) {
4405 #if OMPT_SUPPORT && OMPT_OPTIONAL
4406     OMPT_STORE_RETURN_ADDRESS(gtid);
4407 #endif
4408     __kmpc_taskgroup(loc, gtid);
4409   }
4410 
4411   // =========================================================================
4412   // calculate loop parameters
4413   kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4414   kmp_uint64 tc;
4415   // compiler provides global bounds here
4416   kmp_uint64 lower = task_bounds.get_lb();
4417   kmp_uint64 upper = task_bounds.get_ub();
4418   kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
4419   kmp_uint64 num_tasks = 0, extras = 0;
4420   kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
4421   kmp_info_t *thread = __kmp_threads[gtid];
4422   kmp_taskdata_t *current_task = thread->th.th_current_task;
4423 
4424   KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
4425                 "grain %llu(%d), dup %p\n",
4426                 gtid, taskdata, lower, upper, st, grainsize, sched, task_dup));
4427 
4428   // compute trip count
4429   if (st == 1) { // most common case
4430     tc = upper - lower + 1;
4431   } else if (st < 0) {
4432     tc = (lower - upper) / (-st) + 1;
4433   } else { // st > 0
4434     tc = (upper - lower) / st + 1;
4435   }
4436   if (tc == 0) {
4437     KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid));
4438     // free the pattern task and exit
4439     __kmp_task_start(gtid, task, current_task);
4440     // do not execute anything for zero-trip loop
4441     __kmp_task_finish<false>(gtid, task, current_task);
4442     return;
4443   }
4444 
4445 #if OMPT_SUPPORT && OMPT_OPTIONAL
4446   ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
4447   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
4448   if (ompt_enabled.ompt_callback_work) {
4449     ompt_callbacks.ompt_callback(ompt_callback_work)(
4450         ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
4451         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4452   }
4453 #endif
4454 
4455   if (num_tasks_min == 0)
4456     // TODO: can we choose better default heuristic?
4457     num_tasks_min =
4458         KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
4459 
4460   // compute num_tasks/grainsize based on the input provided
4461   switch (sched) {
4462   case 0: // no schedule clause specified, we can choose the default
4463     // let's try to schedule (team_size*10) tasks
4464     grainsize = thread->th.th_team_nproc * 10;
4465     KMP_FALLTHROUGH();
4466   case 2: // num_tasks provided
4467     if (grainsize > tc) {
4468       num_tasks = tc; // too big num_tasks requested, adjust values
4469       grainsize = 1;
4470       extras = 0;
4471     } else {
4472       num_tasks = grainsize;
4473       grainsize = tc / num_tasks;
4474       extras = tc % num_tasks;
4475     }
4476     break;
4477   case 1: // grainsize provided
4478     if (grainsize > tc) {
4479       num_tasks = 1; // too big grainsize requested, adjust values
4480       grainsize = tc;
4481       extras = 0;
4482     } else {
4483       num_tasks = tc / grainsize;
4484       // adjust grainsize for balanced distribution of iterations
4485       grainsize = tc / num_tasks;
4486       extras = tc % num_tasks;
4487     }
4488     break;
4489   default:
4490     KMP_ASSERT2(0, "unknown scheduling of taskloop");
4491   }
4492   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
4493   KMP_DEBUG_ASSERT(num_tasks > extras);
4494   KMP_DEBUG_ASSERT(num_tasks > 0);
4495   // =========================================================================
4496 
4497   // check if clause value first
4498   // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
4499   if (if_val == 0) { // if(0) specified, mark task as serial
4500     taskdata->td_flags.task_serial = 1;
4501     taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
4502     // always start serial tasks linearly
4503     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4504                           grainsize, extras, tc,
4505 #if OMPT_SUPPORT
4506                           OMPT_GET_RETURN_ADDRESS(0),
4507 #endif
4508                           task_dup);
4509     // !taskdata->td_flags.native => currently force linear spawning of tasks
4510     // for GOMP_taskloop
4511   } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
4512     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
4513                   "(%lld), grain %llu, extras %llu\n",
4514                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4515     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4516                          grainsize, extras, tc, num_tasks_min,
4517 #if OMPT_SUPPORT
4518                          OMPT_GET_RETURN_ADDRESS(0),
4519 #endif
4520                          task_dup);
4521   } else {
4522     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
4523                   "(%lld), grain %llu, extras %llu\n",
4524                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
4525     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
4526                           grainsize, extras, tc,
4527 #if OMPT_SUPPORT
4528                           OMPT_GET_RETURN_ADDRESS(0),
4529 #endif
4530                           task_dup);
4531   }
4532 
4533 #if OMPT_SUPPORT && OMPT_OPTIONAL
4534   if (ompt_enabled.ompt_callback_work) {
4535     ompt_callbacks.ompt_callback(ompt_callback_work)(
4536         ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
4537         &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
4538   }
4539 #endif
4540 
4541   if (nogroup == 0) {
4542 #if OMPT_SUPPORT && OMPT_OPTIONAL
4543     OMPT_STORE_RETURN_ADDRESS(gtid);
4544 #endif
4545     __kmpc_end_taskgroup(loc, gtid);
4546   }
4547   KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
4548 }
4549