1 /*
2  * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_i18n.h"
16 #include "kmp_itt.h"
17 #include "kmp_stats.h"
18 #include "kmp_wait_release.h"
19 
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #include "tsan_annotations.h"
25 
26 /* forward declaration */
27 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
28                                  kmp_info_t *this_thr);
29 static void __kmp_alloc_task_deque(kmp_info_t *thread,
30                                    kmp_thread_data_t *thread_data);
31 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
32                                            kmp_task_team_t *task_team);
33 
34 #ifdef OMP_45_ENABLED
35 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
36 #endif
37 
38 #ifdef BUILD_TIED_TASK_STACK
39 
40 //  __kmp_trace_task_stack: print the tied tasks from the task stack in order
41 //  from top do bottom
42 //
43 //  gtid: global thread identifier for thread containing stack
44 //  thread_data: thread data for task team thread containing stack
45 //  threshold: value above which the trace statement triggers
46 //  location: string identifying call site of this function (for trace)
47 static void __kmp_trace_task_stack(kmp_int32 gtid,
48                                    kmp_thread_data_t *thread_data,
49                                    int threshold, char *location) {
50   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
51   kmp_taskdata_t **stack_top = task_stack->ts_top;
52   kmp_int32 entries = task_stack->ts_entries;
53   kmp_taskdata_t *tied_task;
54 
55   KA_TRACE(
56       threshold,
57       ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
58        "first_block = %p, stack_top = %p \n",
59        location, gtid, entries, task_stack->ts_first_block, stack_top));
60 
61   KMP_DEBUG_ASSERT(stack_top != NULL);
62   KMP_DEBUG_ASSERT(entries > 0);
63 
64   while (entries != 0) {
65     KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
66     // fix up ts_top if we need to pop from previous block
67     if (entries & TASK_STACK_INDEX_MASK == 0) {
68       kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
69 
70       stack_block = stack_block->sb_prev;
71       stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
72     }
73 
74     // finish bookkeeping
75     stack_top--;
76     entries--;
77 
78     tied_task = *stack_top;
79 
80     KMP_DEBUG_ASSERT(tied_task != NULL);
81     KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
82 
83     KA_TRACE(threshold,
84              ("__kmp_trace_task_stack(%s):             gtid=%d, entry=%d, "
85               "stack_top=%p, tied_task=%p\n",
86               location, gtid, entries, stack_top, tied_task));
87   }
88   KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
89 
90   KA_TRACE(threshold,
91            ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
92             location, gtid));
93 }
94 
95 //  __kmp_init_task_stack: initialize the task stack for the first time
96 //  after a thread_data structure is created.
97 //  It should not be necessary to do this again (assuming the stack works).
98 //
99 //  gtid: global thread identifier of calling thread
100 //  thread_data: thread data for task team thread containing stack
101 static void __kmp_init_task_stack(kmp_int32 gtid,
102                                   kmp_thread_data_t *thread_data) {
103   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
104   kmp_stack_block_t *first_block;
105 
106   // set up the first block of the stack
107   first_block = &task_stack->ts_first_block;
108   task_stack->ts_top = (kmp_taskdata_t **)first_block;
109   memset((void *)first_block, '\0',
110          TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
111 
112   // initialize the stack to be empty
113   task_stack->ts_entries = TASK_STACK_EMPTY;
114   first_block->sb_next = NULL;
115   first_block->sb_prev = NULL;
116 }
117 
118 //  __kmp_free_task_stack: free the task stack when thread_data is destroyed.
119 //
120 //  gtid: global thread identifier for calling thread
121 //  thread_data: thread info for thread containing stack
122 static void __kmp_free_task_stack(kmp_int32 gtid,
123                                   kmp_thread_data_t *thread_data) {
124   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
125   kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
126 
127   KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
128   // free from the second block of the stack
129   while (stack_block != NULL) {
130     kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
131 
132     stack_block->sb_next = NULL;
133     stack_block->sb_prev = NULL;
134     if (stack_block != &task_stack->ts_first_block) {
135       __kmp_thread_free(thread,
136                         stack_block); // free the block, if not the first
137     }
138     stack_block = next_block;
139   }
140   // initialize the stack to be empty
141   task_stack->ts_entries = 0;
142   task_stack->ts_top = NULL;
143 }
144 
145 //  __kmp_push_task_stack: Push the tied task onto the task stack.
146 //     Grow the stack if necessary by allocating another block.
147 //
148 //  gtid: global thread identifier for calling thread
149 //  thread: thread info for thread containing stack
150 //  tied_task: the task to push on the stack
151 static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
152                                   kmp_taskdata_t *tied_task) {
153   // GEH - need to consider what to do if tt_threads_data not allocated yet
154   kmp_thread_data_t *thread_data =
155       &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
156   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
157 
158   if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
159     return; // Don't push anything on stack if team or team tasks are serialized
160   }
161 
162   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
163   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
164 
165   KA_TRACE(20,
166            ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
167             gtid, thread, tied_task));
168   // Store entry
169   *(task_stack->ts_top) = tied_task;
170 
171   // Do bookkeeping for next push
172   task_stack->ts_top++;
173   task_stack->ts_entries++;
174 
175   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
176     // Find beginning of this task block
177     kmp_stack_block_t *stack_block =
178         (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
179 
180     // Check if we already have a block
181     if (stack_block->sb_next !=
182         NULL) { // reset ts_top to beginning of next block
183       task_stack->ts_top = &stack_block->sb_next->sb_block[0];
184     } else { // Alloc new block and link it up
185       kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
186           thread, sizeof(kmp_stack_block_t));
187 
188       task_stack->ts_top = &new_block->sb_block[0];
189       stack_block->sb_next = new_block;
190       new_block->sb_prev = stack_block;
191       new_block->sb_next = NULL;
192 
193       KA_TRACE(
194           30,
195           ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
196            gtid, tied_task, new_block));
197     }
198   }
199   KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
200                 tied_task));
201 }
202 
203 //  __kmp_pop_task_stack: Pop the tied task from the task stack.  Don't return
204 //  the task, just check to make sure it matches the ending task passed in.
205 //
206 //  gtid: global thread identifier for the calling thread
207 //  thread: thread info structure containing stack
208 //  tied_task: the task popped off the stack
209 //  ending_task: the task that is ending (should match popped task)
210 static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
211                                  kmp_taskdata_t *ending_task) {
212   // GEH - need to consider what to do if tt_threads_data not allocated yet
213   kmp_thread_data_t *thread_data =
214       &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
215   kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
216   kmp_taskdata_t *tied_task;
217 
218   if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
219     // Don't pop anything from stack if team or team tasks are serialized
220     return;
221   }
222 
223   KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
224   KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
225 
226   KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
227                 thread));
228 
229   // fix up ts_top if we need to pop from previous block
230   if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
231     kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
232 
233     stack_block = stack_block->sb_prev;
234     task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
235   }
236 
237   // finish bookkeeping
238   task_stack->ts_top--;
239   task_stack->ts_entries--;
240 
241   tied_task = *(task_stack->ts_top);
242 
243   KMP_DEBUG_ASSERT(tied_task != NULL);
244   KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
245   KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
246 
247   KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
248                 tied_task));
249   return;
250 }
251 #endif /* BUILD_TIED_TASK_STACK */
252 
253 //  __kmp_push_task: Add a task to the thread's deque
254 static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
255   kmp_info_t *thread = __kmp_threads[gtid];
256   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
257   kmp_task_team_t *task_team = thread->th.th_task_team;
258   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
259   kmp_thread_data_t *thread_data;
260 
261   KA_TRACE(20,
262            ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
263 
264   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
265     // untied task needs to increment counter so that the task structure is not
266     // freed prematurely
267     kmp_int32 counter = 1 + KMP_TEST_THEN_INC32(&taskdata->td_untied_count);
268     KA_TRACE(
269         20,
270         ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
271          gtid, counter, taskdata));
272   }
273 
274   // The first check avoids building task_team thread data if serialized
275   if (taskdata->td_flags.task_serial) {
276     KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
277                   "TASK_NOT_PUSHED for task %p\n",
278                   gtid, taskdata));
279     return TASK_NOT_PUSHED;
280   }
281 
282   // Now that serialized tasks have returned, we can assume that we are not in
283   // immediate exec mode
284   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
285   if (!KMP_TASKING_ENABLED(task_team)) {
286     __kmp_enable_tasking(task_team, thread);
287   }
288   KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
289   KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
290 
291   // Find tasking deque specific to encountering thread
292   thread_data = &task_team->tt.tt_threads_data[tid];
293 
294   // No lock needed since only owner can allocate
295   if (thread_data->td.td_deque == NULL) {
296     __kmp_alloc_task_deque(thread, thread_data);
297   }
298 
299   // Check if deque is full
300   if (TCR_4(thread_data->td.td_deque_ntasks) >=
301       TASK_DEQUE_SIZE(thread_data->td)) {
302     KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
303                   "TASK_NOT_PUSHED for task %p\n",
304                   gtid, taskdata));
305     return TASK_NOT_PUSHED;
306   }
307 
308   // Lock the deque for the task push operation
309   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
310 
311 #if OMP_45_ENABLED
312   // Need to recheck as we can get a proxy task from a thread outside of OpenMP
313   if (TCR_4(thread_data->td.td_deque_ntasks) >=
314       TASK_DEQUE_SIZE(thread_data->td)) {
315     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
316     KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; returning "
317                   "TASK_NOT_PUSHED for task %p\n",
318                   gtid, taskdata));
319     return TASK_NOT_PUSHED;
320   }
321 #else
322   // Must have room since no thread can add tasks but calling thread
323   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
324                    TASK_DEQUE_SIZE(thread_data->td));
325 #endif
326 
327   thread_data->td.td_deque[thread_data->td.td_deque_tail] =
328       taskdata; // Push taskdata
329   // Wrap index.
330   thread_data->td.td_deque_tail =
331       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
332   TCW_4(thread_data->td.td_deque_ntasks,
333         TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
334 
335   KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
336                 "task=%p ntasks=%d head=%u tail=%u\n",
337                 gtid, taskdata, thread_data->td.td_deque_ntasks,
338                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
339 
340   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
341 
342   return TASK_SUCCESSFULLY_PUSHED;
343 }
344 
345 // __kmp_pop_current_task_from_thread: set up current task from called thread
346 // when team ends
347 //
348 // this_thr: thread structure to set current_task in.
349 void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
350   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
351                 "this_thread=%p, curtask=%p, "
352                 "curtask_parent=%p\n",
353                 0, this_thr, this_thr->th.th_current_task,
354                 this_thr->th.th_current_task->td_parent));
355 
356   this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
357 
358   KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
359                 "this_thread=%p, curtask=%p, "
360                 "curtask_parent=%p\n",
361                 0, this_thr, this_thr->th.th_current_task,
362                 this_thr->th.th_current_task->td_parent));
363 }
364 
365 // __kmp_push_current_task_to_thread: set up current task in called thread for a
366 // new team
367 //
368 // this_thr: thread structure to set up
369 // team: team for implicit task data
370 // tid: thread within team to set up
371 void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
372                                        int tid) {
373   // current task of the thread is a parent of the new just created implicit
374   // tasks of new team
375   KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
376                 "curtask=%p "
377                 "parent_task=%p\n",
378                 tid, this_thr, this_thr->th.th_current_task,
379                 team->t.t_implicit_task_taskdata[tid].td_parent));
380 
381   KMP_DEBUG_ASSERT(this_thr != NULL);
382 
383   if (tid == 0) {
384     if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
385       team->t.t_implicit_task_taskdata[0].td_parent =
386           this_thr->th.th_current_task;
387       this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
388     }
389   } else {
390     team->t.t_implicit_task_taskdata[tid].td_parent =
391         team->t.t_implicit_task_taskdata[0].td_parent;
392     this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
393   }
394 
395   KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
396                 "curtask=%p "
397                 "parent_task=%p\n",
398                 tid, this_thr, this_thr->th.th_current_task,
399                 team->t.t_implicit_task_taskdata[tid].td_parent));
400 }
401 
402 // __kmp_task_start: bookkeeping for a task starting execution
403 //
404 // GTID: global thread id of calling thread
405 // task: task starting execution
406 // current_task: task suspending
407 static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
408                              kmp_taskdata_t *current_task) {
409   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
410   kmp_info_t *thread = __kmp_threads[gtid];
411 
412   KA_TRACE(10,
413            ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
414             gtid, taskdata, current_task));
415 
416   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
417 
418   // mark currently executing task as suspended
419   // TODO: GEH - make sure root team implicit task is initialized properly.
420   // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
421   current_task->td_flags.executing = 0;
422 
423 // Add task to stack if tied
424 #ifdef BUILD_TIED_TASK_STACK
425   if (taskdata->td_flags.tiedness == TASK_TIED) {
426     __kmp_push_task_stack(gtid, thread, taskdata);
427   }
428 #endif /* BUILD_TIED_TASK_STACK */
429 
430   // mark starting task as executing and as current task
431   thread->th.th_current_task = taskdata;
432 
433   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
434                    taskdata->td_flags.tiedness == TASK_UNTIED);
435   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
436                    taskdata->td_flags.tiedness == TASK_UNTIED);
437   taskdata->td_flags.started = 1;
438   taskdata->td_flags.executing = 1;
439   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
440   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
441 
442   // GEH TODO: shouldn't we pass some sort of location identifier here?
443   // APT: yes, we will pass location here.
444   // need to store current thread state (in a thread or taskdata structure)
445   // before setting work_state, otherwise wrong state is set after end of task
446 
447   KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
448 
449   return;
450 }
451 
452 #if OMPT_SUPPORT
453 //------------------------------------------------------------------------------
454 // __ompt_task_init:
455 //   Initialize OMPT fields maintained by a task. This will only be called after
456 //   ompt_start_tool, so we already know whether ompt is enabled or not.
457 
458 static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
459   // The calls to __ompt_task_init already have the ompt_enabled condition.
460   task->ompt_task_info.task_data.value = 0;
461   task->ompt_task_info.frame.exit_frame = NULL;
462   task->ompt_task_info.frame.enter_frame = NULL;
463 #if OMP_40_ENABLED
464   task->ompt_task_info.ndeps = 0;
465   task->ompt_task_info.deps = NULL;
466 #endif /* OMP_40_ENABLED */
467 }
468 
469 // __ompt_task_start:
470 //   Build and trigger task-begin event
471 static inline void __ompt_task_start(kmp_task_t *task,
472                                      kmp_taskdata_t *current_task,
473                                      kmp_int32 gtid) {
474   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
475   ompt_task_status_t status = ompt_task_others;
476   if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
477     status = ompt_task_yield;
478     __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
479   }
480   /* let OMPT know that we're about to run this task */
481   if (ompt_enabled.ompt_callback_task_schedule) {
482     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
483         &(current_task->ompt_task_info.task_data), status,
484         &(taskdata->ompt_task_info.task_data));
485   }
486   taskdata->ompt_task_info.scheduling_parent = current_task;
487 }
488 
489 // __ompt_task_finish:
490 //   Build and trigger final task-schedule event
491 static inline void __ompt_task_finish(kmp_task_t *task,
492                                       kmp_taskdata_t *resumed_task) {
493   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
494   ompt_task_status_t status = ompt_task_complete;
495   if (taskdata->td_flags.tiedness == TASK_UNTIED &&
496       KMP_TEST_THEN_ADD32(&(taskdata->td_untied_count), 0) > 1)
497     status = ompt_task_others;
498   if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
499       taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
500     status = ompt_task_cancel;
501   }
502 
503   /* let OMPT know that we're returning to the callee task */
504   if (ompt_enabled.ompt_callback_task_schedule) {
505     ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
506         &(taskdata->ompt_task_info.task_data), status,
507         &((resumed_task ? resumed_task
508                         : (taskdata->ompt_task_info.scheduling_parent
509                                ? taskdata->ompt_task_info.scheduling_parent
510                                : taskdata->td_parent))
511               ->ompt_task_info.task_data));
512   }
513 }
514 #endif
515 
516 template <bool ompt>
517 static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
518                                                kmp_task_t *task,
519                                                void *frame_address,
520                                                void *return_address) {
521   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
522   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
523 
524   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
525                 "current_task=%p\n",
526                 gtid, loc_ref, taskdata, current_task));
527 
528   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
529     // untied task needs to increment counter so that the task structure is not
530     // freed prematurely
531     kmp_int32 counter = 1 + KMP_TEST_THEN_INC32(&taskdata->td_untied_count);
532     KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
533                   "incremented for task %p\n",
534                   gtid, counter, taskdata));
535   }
536 
537   taskdata->td_flags.task_serial =
538       1; // Execute this task immediately, not deferred.
539   __kmp_task_start(gtid, task, current_task);
540 
541 #if OMPT_SUPPORT
542   if (ompt) {
543     if (current_task->ompt_task_info.frame.enter_frame == NULL) {
544       current_task->ompt_task_info.frame.enter_frame =
545           taskdata->ompt_task_info.frame.exit_frame = frame_address;
546     }
547     if (ompt_enabled.ompt_callback_task_create) {
548       ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
549       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
550           &(parent_info->task_data), &(parent_info->frame),
551           &(taskdata->ompt_task_info.task_data),
552           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
553           return_address);
554     }
555     __ompt_task_start(task, current_task, gtid);
556   }
557 #endif // OMPT_SUPPORT
558 
559   KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
560                 loc_ref, taskdata));
561 }
562 
563 #if OMPT_SUPPORT
564 OMPT_NOINLINE
565 static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
566                                            kmp_task_t *task,
567                                            void *frame_address,
568                                            void *return_address) {
569   __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
570                                            return_address);
571 }
572 #endif // OMPT_SUPPORT
573 
574 // __kmpc_omp_task_begin_if0: report that a given serialized task has started
575 // execution
576 //
577 // loc_ref: source location information; points to beginning of task block.
578 // gtid: global thread number.
579 // task: task thunk for the started task.
580 void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
581                                kmp_task_t *task) {
582 #if OMPT_SUPPORT
583   if (UNLIKELY(ompt_enabled.enabled)) {
584     OMPT_STORE_RETURN_ADDRESS(gtid);
585     __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
586                                    OMPT_GET_FRAME_ADDRESS(1),
587                                    OMPT_LOAD_RETURN_ADDRESS(gtid));
588     return;
589   }
590 #endif
591   __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
592 }
593 
594 #ifdef TASK_UNUSED
595 // __kmpc_omp_task_begin: report that a given task has started execution
596 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
597 void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
598   kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
599 
600   KA_TRACE(
601       10,
602       ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
603        gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
604 
605   __kmp_task_start(gtid, task, current_task);
606 
607   KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
608                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
609   return;
610 }
611 #endif // TASK_UNUSED
612 
613 // __kmp_free_task: free the current task space and the space for shareds
614 //
615 // gtid: Global thread ID of calling thread
616 // taskdata: task to free
617 // thread: thread data structure of caller
618 static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
619                             kmp_info_t *thread) {
620   KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
621                 taskdata));
622 
623   // Check to make sure all flags and counters have the correct values
624   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
625   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
626   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
627   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
628   KMP_DEBUG_ASSERT(TCR_4(taskdata->td_allocated_child_tasks) == 0 ||
629                    taskdata->td_flags.task_serial == 1);
630   KMP_DEBUG_ASSERT(TCR_4(taskdata->td_incomplete_child_tasks) == 0);
631 
632   taskdata->td_flags.freed = 1;
633   ANNOTATE_HAPPENS_BEFORE(taskdata);
634 // deallocate the taskdata and shared variable blocks associated with this task
635 #if USE_FAST_MEMORY
636   __kmp_fast_free(thread, taskdata);
637 #else /* ! USE_FAST_MEMORY */
638   __kmp_thread_free(thread, taskdata);
639 #endif
640 
641   KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
642 }
643 
644 // __kmp_free_task_and_ancestors: free the current task and ancestors without
645 // children
646 //
647 // gtid: Global thread ID of calling thread
648 // taskdata: task to free
649 // thread: thread data structure of caller
650 static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
651                                           kmp_taskdata_t *taskdata,
652                                           kmp_info_t *thread) {
653 #if OMP_45_ENABLED
654   // Proxy tasks must always be allowed to free their parents
655   // because they can be run in background even in serial mode.
656   kmp_int32 team_serial =
657       (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
658       !taskdata->td_flags.proxy;
659 #else
660   kmp_int32 team_serial =
661       taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser;
662 #endif
663   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
664 
665   kmp_int32 children =
666       KMP_TEST_THEN_DEC32(&taskdata->td_allocated_child_tasks) - 1;
667   KMP_DEBUG_ASSERT(children >= 0);
668 
669   // Now, go up the ancestor tree to see if any ancestors can now be freed.
670   while (children == 0) {
671     kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
672 
673     KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
674                   "and freeing itself\n",
675                   gtid, taskdata));
676 
677     // --- Deallocate my ancestor task ---
678     __kmp_free_task(gtid, taskdata, thread);
679 
680     taskdata = parent_taskdata;
681 
682     // Stop checking ancestors at implicit task instead of walking up ancestor
683     // tree to avoid premature deallocation of ancestors.
684     if (team_serial || taskdata->td_flags.tasktype == TASK_IMPLICIT)
685       return;
686 
687     // Predecrement simulated by "- 1" calculation
688     children = KMP_TEST_THEN_DEC32(&taskdata->td_allocated_child_tasks) - 1;
689     KMP_DEBUG_ASSERT(children >= 0);
690   }
691 
692   KA_TRACE(
693       20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
694            "not freeing it yet\n",
695            gtid, taskdata, children));
696 }
697 
698 // __kmp_task_finish: bookkeeping to do when a task finishes execution
699 //
700 // gtid: global thread ID for calling thread
701 // task: task to be finished
702 // resumed_task: task to be resumed.  (may be NULL if task is serialized)
703 static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
704                               kmp_taskdata_t *resumed_task) {
705   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
706   kmp_info_t *thread = __kmp_threads[gtid];
707   kmp_task_team_t *task_team =
708       thread->th.th_task_team; // might be NULL for serial teams...
709   kmp_int32 children = 0;
710 
711   KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
712                 "task %p\n",
713                 gtid, taskdata, resumed_task));
714 
715   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
716 
717 // Pop task from stack if tied
718 #ifdef BUILD_TIED_TASK_STACK
719   if (taskdata->td_flags.tiedness == TASK_TIED) {
720     __kmp_pop_task_stack(gtid, thread, taskdata);
721   }
722 #endif /* BUILD_TIED_TASK_STACK */
723 
724   if (taskdata->td_flags.tiedness == TASK_UNTIED) {
725     // untied task needs to check the counter so that the task structure is not
726     // freed prematurely
727     kmp_int32 counter = KMP_TEST_THEN_DEC32(&taskdata->td_untied_count) - 1;
728     KA_TRACE(
729         20,
730         ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
731          gtid, counter, taskdata));
732     if (counter > 0) {
733       // untied task is not done, to be continued possibly by other thread, do
734       // not free it now
735       if (resumed_task == NULL) {
736         KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
737         resumed_task = taskdata->td_parent; // In a serialized task, the resumed
738         // task is the parent
739       }
740       thread->th.th_current_task = resumed_task; // restore current_task
741       resumed_task->td_flags.executing = 1; // resume previous task
742       KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
743                     "resuming task %p\n",
744                     gtid, taskdata, resumed_task));
745       return;
746     }
747   }
748 
749   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
750   taskdata->td_flags.complete = 1; // mark the task as completed
751   KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
752   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
753 
754   // Only need to keep track of count if team parallel and tasking not
755   // serialized
756   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
757     // Predecrement simulated by "- 1" calculation
758     children =
759         KMP_TEST_THEN_DEC32(&taskdata->td_parent->td_incomplete_child_tasks) -
760         1;
761     KMP_DEBUG_ASSERT(children >= 0);
762 #if OMP_40_ENABLED
763     if (taskdata->td_taskgroup)
764       KMP_TEST_THEN_DEC32((kmp_int32 *)(&taskdata->td_taskgroup->count));
765 #if OMP_45_ENABLED
766   }
767   // if we found proxy tasks there could exist a dependency chain
768   // with the proxy task as origin
769   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) ||
770       (task_team && task_team->tt.tt_found_proxy_tasks)) {
771 #endif
772     __kmp_release_deps(gtid, taskdata);
773 #endif
774   }
775 
776   // td_flags.executing must be marked as 0 after __kmp_release_deps has been
777   // called. Othertwise, if a task is executed immediately from the release_deps
778   // code, the flag will be reset to 1 again by this same function
779   KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
780   taskdata->td_flags.executing = 0; // suspend the finishing task
781 
782   KA_TRACE(
783       20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
784            gtid, taskdata, children));
785 
786 #if OMP_40_ENABLED
787   /* If the tasks' destructor thunk flag has been set, we need to invoke the
788      destructor thunk that has been generated by the compiler. The code is
789      placed here, since at this point other tasks might have been released
790      hence overlapping the destructor invokations with some other work in the
791      released tasks.  The OpenMP spec is not specific on when the destructors
792      are invoked, so we should be free to choose. */
793   if (taskdata->td_flags.destructors_thunk) {
794     kmp_routine_entry_t destr_thunk = task->data1.destructors;
795     KMP_ASSERT(destr_thunk);
796     destr_thunk(gtid, task);
797   }
798 #endif // OMP_40_ENABLED
799 
800   // bookkeeping for resuming task:
801   // GEH - note tasking_ser => task_serial
802   KMP_DEBUG_ASSERT(
803       (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
804       taskdata->td_flags.task_serial);
805   if (taskdata->td_flags.task_serial) {
806     if (resumed_task == NULL) {
807       resumed_task = taskdata->td_parent; // In a serialized task, the resumed
808       // task is the parent
809     }
810   } else {
811     KMP_DEBUG_ASSERT(resumed_task !=
812                      NULL); // verify that resumed task is passed as arguemnt
813   }
814 
815   // Free this task and then ancestor tasks if they have no children.
816   // Restore th_current_task first as suggested by John:
817   // johnmc: if an asynchronous inquiry peers into the runtime system
818   // it doesn't see the freed task as the current task.
819   thread->th.th_current_task = resumed_task;
820   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
821 
822   // TODO: GEH - make sure root team implicit task is initialized properly.
823   // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
824   resumed_task->td_flags.executing = 1; // resume previous task
825 
826   KA_TRACE(
827       10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
828            gtid, taskdata, resumed_task));
829 
830   return;
831 }
832 
833 template <bool ompt>
834 static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
835                                                   kmp_int32 gtid,
836                                                   kmp_task_t *task) {
837   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
838                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
839   // this routine will provide task to resume
840   __kmp_task_finish(gtid, task, NULL);
841 
842   KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
843                 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
844 
845 #if OMPT_SUPPORT
846   if (ompt) {
847     __ompt_task_finish(task, NULL);
848     ompt_frame_t *ompt_frame;
849     __ompt_get_task_info_internal(0, NULL, NULL, &ompt_frame, NULL, NULL);
850     ompt_frame->enter_frame = NULL;
851   }
852 #endif
853 
854   return;
855 }
856 
857 #if OMPT_SUPPORT
858 OMPT_NOINLINE
859 void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
860                                        kmp_task_t *task) {
861   __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
862 }
863 #endif // OMPT_SUPPORT
864 
865 // __kmpc_omp_task_complete_if0: report that a task has completed execution
866 //
867 // loc_ref: source location information; points to end of task block.
868 // gtid: global thread number.
869 // task: task thunk for the completed task.
870 void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
871                                   kmp_task_t *task) {
872 #if OMPT_SUPPORT
873   if (UNLIKELY(ompt_enabled.enabled)) {
874     __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
875     return;
876   }
877 #endif
878   __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
879 }
880 
881 #ifdef TASK_UNUSED
882 // __kmpc_omp_task_complete: report that a task has completed execution
883 // NEVER GENERATED BY COMPILER, DEPRECATED!!!
884 void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
885                               kmp_task_t *task) {
886   KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
887                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
888 
889   __kmp_task_finish(gtid, task, NULL); // Not sure how to find task to resume
890 
891   KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
892                 loc_ref, KMP_TASK_TO_TASKDATA(task)));
893   return;
894 }
895 #endif // TASK_UNUSED
896 
897 // __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
898 // task for a given thread
899 //
900 // loc_ref:  reference to source location of parallel region
901 // this_thr:  thread data structure corresponding to implicit task
902 // team: team for this_thr
903 // tid: thread id of given thread within team
904 // set_curr_task: TRUE if need to push current task to thread
905 // NOTE: Routine does not set up the implicit task ICVS.  This is assumed to
906 // have already been done elsewhere.
907 // TODO: Get better loc_ref.  Value passed in may be NULL
908 void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
909                               kmp_team_t *team, int tid, int set_curr_task) {
910   kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
911 
912   KF_TRACE(
913       10,
914       ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
915        tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
916 
917   task->td_task_id = KMP_GEN_TASK_ID();
918   task->td_team = team;
919   //    task->td_parent   = NULL;  // fix for CQ230101 (broken parent task info
920   //    in debugger)
921   task->td_ident = loc_ref;
922   task->td_taskwait_ident = NULL;
923   task->td_taskwait_counter = 0;
924   task->td_taskwait_thread = 0;
925 
926   task->td_flags.tiedness = TASK_TIED;
927   task->td_flags.tasktype = TASK_IMPLICIT;
928 #if OMP_45_ENABLED
929   task->td_flags.proxy = TASK_FULL;
930 #endif
931 
932   // All implicit tasks are executed immediately, not deferred
933   task->td_flags.task_serial = 1;
934   task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
935   task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
936 
937   task->td_flags.started = 1;
938   task->td_flags.executing = 1;
939   task->td_flags.complete = 0;
940   task->td_flags.freed = 0;
941 
942 #if OMP_40_ENABLED
943   task->td_depnode = NULL;
944 #endif
945   task->td_last_tied = task;
946 
947   if (set_curr_task) { // only do this init first time thread is created
948     task->td_incomplete_child_tasks = 0;
949     // Not used: don't need to deallocate implicit task
950     task->td_allocated_child_tasks = 0;
951 #if OMP_40_ENABLED
952     task->td_taskgroup = NULL; // An implicit task does not have taskgroup
953     task->td_dephash = NULL;
954 #endif
955     __kmp_push_current_task_to_thread(this_thr, team, tid);
956   } else {
957     KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
958     KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
959   }
960 
961 #if OMPT_SUPPORT
962   if (UNLIKELY(ompt_enabled.enabled))
963     __ompt_task_init(task, tid);
964 #endif
965 
966   KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
967                 team, task));
968 }
969 
970 // __kmp_finish_implicit_task: Release resources associated to implicit tasks
971 // at the end of parallel regions. Some resources are kept for reuse in the next
972 // parallel region.
973 //
974 // thread:  thread data structure corresponding to implicit task
975 void __kmp_finish_implicit_task(kmp_info_t *thread) {
976   kmp_taskdata_t *task = thread->th.th_current_task;
977   if (task->td_dephash)
978     __kmp_dephash_free_entries(thread, task->td_dephash);
979 }
980 
981 // __kmp_free_implicit_task: Release resources associated to implicit tasks
982 // when these are destroyed regions
983 //
984 // thread:  thread data structure corresponding to implicit task
985 void __kmp_free_implicit_task(kmp_info_t *thread) {
986   kmp_taskdata_t *task = thread->th.th_current_task;
987   if (task->td_dephash)
988     __kmp_dephash_free(thread, task->td_dephash);
989   task->td_dephash = NULL;
990 }
991 
992 // Round up a size to a power of two specified by val: Used to insert padding
993 // between structures co-allocated using a single malloc() call
994 static size_t __kmp_round_up_to_val(size_t size, size_t val) {
995   if (size & (val - 1)) {
996     size &= ~(val - 1);
997     if (size <= KMP_SIZE_T_MAX - val) {
998       size += val; // Round up if there is no overflow.
999     }
1000   }
1001   return size;
1002 } // __kmp_round_up_to_va
1003 
1004 // __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1005 //
1006 // loc_ref: source location information
1007 // gtid: global thread number.
1008 // flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1009 // task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1010 // sizeof_kmp_task_t:  Size in bytes of kmp_task_t data structure including
1011 // private vars accessed in task.
1012 // sizeof_shareds:  Size in bytes of array of pointers to shared vars accessed
1013 // in task.
1014 // task_entry: Pointer to task code entry point generated by compiler.
1015 // returns: a pointer to the allocated kmp_task_t structure (task).
1016 kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1017                              kmp_tasking_flags_t *flags,
1018                              size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1019                              kmp_routine_entry_t task_entry) {
1020   kmp_task_t *task;
1021   kmp_taskdata_t *taskdata;
1022   kmp_info_t *thread = __kmp_threads[gtid];
1023   kmp_team_t *team = thread->th.th_team;
1024   kmp_taskdata_t *parent_task = thread->th.th_current_task;
1025   size_t shareds_offset;
1026 
1027   KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1028                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1029                 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1030                 sizeof_shareds, task_entry));
1031 
1032   if (parent_task->td_flags.final) {
1033     if (flags->merged_if0) {
1034     }
1035     flags->final = 1;
1036   }
1037   if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1038     // Untied task encountered causes the TSC algorithm to check entire deque of
1039     // the victim thread. If no untied task encountered, then checking the head
1040     // of the deque should be enough.
1041     KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1042   }
1043 
1044 #if OMP_45_ENABLED
1045   if (flags->proxy == TASK_PROXY) {
1046     flags->tiedness = TASK_UNTIED;
1047     flags->merged_if0 = 1;
1048 
1049     /* are we running in a sequential parallel or tskm_immediate_exec... we need
1050        tasking support enabled */
1051     if ((thread->th.th_task_team) == NULL) {
1052       /* This should only happen if the team is serialized
1053           setup a task team and propagate it to the thread */
1054       KMP_DEBUG_ASSERT(team->t.t_serialized);
1055       KA_TRACE(30,
1056                ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1057                 gtid));
1058       __kmp_task_team_setup(
1059           thread, team,
1060           1); // 1 indicates setup the current team regardless of nthreads
1061       thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1062     }
1063     kmp_task_team_t *task_team = thread->th.th_task_team;
1064 
1065     /* tasking must be enabled now as the task might not be pushed */
1066     if (!KMP_TASKING_ENABLED(task_team)) {
1067       KA_TRACE(
1068           30,
1069           ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1070       __kmp_enable_tasking(task_team, thread);
1071       kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1072       kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1073       // No lock needed since only owner can allocate
1074       if (thread_data->td.td_deque == NULL) {
1075         __kmp_alloc_task_deque(thread, thread_data);
1076       }
1077     }
1078 
1079     if (task_team->tt.tt_found_proxy_tasks == FALSE)
1080       TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1081   }
1082 #endif
1083 
1084   // Calculate shared structure offset including padding after kmp_task_t struct
1085   // to align pointers in shared struct
1086   shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1087   shareds_offset = __kmp_round_up_to_val(shareds_offset, sizeof(void *));
1088 
1089   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1090   KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1091                 shareds_offset));
1092   KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1093                 sizeof_shareds));
1094 
1095 // Avoid double allocation here by combining shareds with taskdata
1096 #if USE_FAST_MEMORY
1097   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1098                                                                sizeof_shareds);
1099 #else /* ! USE_FAST_MEMORY */
1100   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1101                                                                sizeof_shareds);
1102 #endif /* USE_FAST_MEMORY */
1103   ANNOTATE_HAPPENS_AFTER(taskdata);
1104 
1105   task = KMP_TASKDATA_TO_TASK(taskdata);
1106 
1107 // Make sure task & taskdata are aligned appropriately
1108 #if KMP_ARCH_X86 || KMP_ARCH_PPC64 || !KMP_HAVE_QUAD
1109   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1110   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1111 #else
1112   KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1113   KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1114 #endif
1115   if (sizeof_shareds > 0) {
1116     // Avoid double allocation here by combining shareds with taskdata
1117     task->shareds = &((char *)taskdata)[shareds_offset];
1118     // Make sure shareds struct is aligned to pointer size
1119     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1120                      0);
1121   } else {
1122     task->shareds = NULL;
1123   }
1124   task->routine = task_entry;
1125   task->part_id = 0; // AC: Always start with 0 part id
1126 
1127   taskdata->td_task_id = KMP_GEN_TASK_ID();
1128   taskdata->td_team = team;
1129   taskdata->td_alloc_thread = thread;
1130   taskdata->td_parent = parent_task;
1131   taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1132   taskdata->td_untied_count = 0;
1133   taskdata->td_ident = loc_ref;
1134   taskdata->td_taskwait_ident = NULL;
1135   taskdata->td_taskwait_counter = 0;
1136   taskdata->td_taskwait_thread = 0;
1137   KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1138 #if OMP_45_ENABLED
1139   // avoid copying icvs for proxy tasks
1140   if (flags->proxy == TASK_FULL)
1141 #endif
1142     copy_icvs(&taskdata->td_icvs, &taskdata->td_parent->td_icvs);
1143 
1144   taskdata->td_flags.tiedness = flags->tiedness;
1145   taskdata->td_flags.final = flags->final;
1146   taskdata->td_flags.merged_if0 = flags->merged_if0;
1147 #if OMP_40_ENABLED
1148   taskdata->td_flags.destructors_thunk = flags->destructors_thunk;
1149 #endif // OMP_40_ENABLED
1150 #if OMP_45_ENABLED
1151   taskdata->td_flags.proxy = flags->proxy;
1152   taskdata->td_task_team = thread->th.th_task_team;
1153   taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1154 #endif
1155   taskdata->td_flags.tasktype = TASK_EXPLICIT;
1156 
1157   // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1158   taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1159 
1160   // GEH - TODO: fix this to copy parent task's value of team_serial flag
1161   taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1162 
1163   // GEH - Note we serialize the task if the team is serialized to make sure
1164   // implicit parallel region tasks are not left until program termination to
1165   // execute. Also, it helps locality to execute immediately.
1166 
1167   taskdata->td_flags.task_serial =
1168       (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1169        taskdata->td_flags.tasking_ser);
1170 
1171   taskdata->td_flags.started = 0;
1172   taskdata->td_flags.executing = 0;
1173   taskdata->td_flags.complete = 0;
1174   taskdata->td_flags.freed = 0;
1175 
1176   taskdata->td_flags.native = flags->native;
1177 
1178   taskdata->td_incomplete_child_tasks = 0;
1179   taskdata->td_allocated_child_tasks = 1; // start at one because counts current
1180 // task and children
1181 #if OMP_40_ENABLED
1182   taskdata->td_taskgroup =
1183       parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1184   taskdata->td_dephash = NULL;
1185   taskdata->td_depnode = NULL;
1186 #endif
1187   if (flags->tiedness == TASK_UNTIED)
1188     taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1189   else
1190     taskdata->td_last_tied = taskdata;
1191 
1192 // Only need to keep track of child task counts if team parallel and tasking not
1193 // serialized or if it is a proxy task
1194 #if OMP_45_ENABLED
1195   if (flags->proxy == TASK_PROXY ||
1196       !(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1197 #else
1198   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser))
1199 #endif
1200   {
1201     KMP_TEST_THEN_INC32(&parent_task->td_incomplete_child_tasks);
1202 #if OMP_40_ENABLED
1203     if (parent_task->td_taskgroup)
1204       KMP_TEST_THEN_INC32((kmp_int32 *)(&parent_task->td_taskgroup->count));
1205 #endif
1206     // Only need to keep track of allocated child tasks for explicit tasks since
1207     // implicit not deallocated
1208     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1209       KMP_TEST_THEN_INC32(&taskdata->td_parent->td_allocated_child_tasks);
1210     }
1211   }
1212 
1213   KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1214                 gtid, taskdata, taskdata->td_parent));
1215   ANNOTATE_HAPPENS_BEFORE(task);
1216 
1217 #if OMPT_SUPPORT
1218   if (UNLIKELY(ompt_enabled.enabled))
1219     __ompt_task_init(taskdata, gtid);
1220 #endif
1221 
1222   return task;
1223 }
1224 
1225 kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1226                                   kmp_int32 flags, size_t sizeof_kmp_task_t,
1227                                   size_t sizeof_shareds,
1228                                   kmp_routine_entry_t task_entry) {
1229   kmp_task_t *retval;
1230   kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1231 
1232   input_flags->native = FALSE;
1233 // __kmp_task_alloc() sets up all other runtime flags
1234 
1235 #if OMP_45_ENABLED
1236   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s) "
1237                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1238                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1239                 input_flags->proxy ? "proxy" : "", sizeof_kmp_task_t,
1240                 sizeof_shareds, task_entry));
1241 #else
1242   KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s) "
1243                 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1244                 gtid, loc_ref, input_flags->tiedness ? "tied  " : "untied",
1245                 sizeof_kmp_task_t, sizeof_shareds, task_entry));
1246 #endif
1247 
1248   retval = __kmp_task_alloc(loc_ref, gtid, input_flags, sizeof_kmp_task_t,
1249                             sizeof_shareds, task_entry);
1250 
1251   KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1252 
1253   return retval;
1254 }
1255 
1256 //  __kmp_invoke_task: invoke the specified task
1257 //
1258 // gtid: global thread ID of caller
1259 // task: the task to invoke
1260 // current_task: the task to resume after task invokation
1261 static void __kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1262                               kmp_taskdata_t *current_task) {
1263   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1264   kmp_uint64 cur_time;
1265 #if OMP_40_ENABLED
1266   int discard = 0 /* false */;
1267 #endif
1268   KA_TRACE(
1269       30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1270            gtid, taskdata, current_task));
1271   KMP_DEBUG_ASSERT(task);
1272 #if OMP_45_ENABLED
1273   if (taskdata->td_flags.proxy == TASK_PROXY &&
1274       taskdata->td_flags.complete == 1) {
1275     // This is a proxy task that was already completed but it needs to run
1276     // its bottom-half finish
1277     KA_TRACE(
1278         30,
1279         ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1280          gtid, taskdata));
1281 
1282     __kmp_bottom_half_finish_proxy(gtid, task);
1283 
1284     KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1285                   "proxy task %p, resuming task %p\n",
1286                   gtid, taskdata, current_task));
1287 
1288     return;
1289   }
1290 #endif
1291 
1292 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1293   if (__kmp_forkjoin_frames_mode == 3) {
1294     // Get the current time stamp to measure task execution time to correct
1295     // barrier imbalance time
1296     cur_time = __itt_get_timestamp();
1297   }
1298 #endif
1299 
1300 #if OMP_45_ENABLED
1301   // Proxy tasks are not handled by the runtime
1302   if (taskdata->td_flags.proxy != TASK_PROXY) {
1303 #endif
1304     ANNOTATE_HAPPENS_AFTER(task);
1305     __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1306 #if OMP_45_ENABLED
1307   }
1308 #endif
1309 
1310 #if OMPT_SUPPORT
1311   ompt_thread_info_t oldInfo;
1312   kmp_info_t *thread;
1313   if (UNLIKELY(ompt_enabled.enabled)) {
1314     // Store the threads states and restore them after the task
1315     thread = __kmp_threads[gtid];
1316     oldInfo = thread->th.ompt_thread_info;
1317     thread->th.ompt_thread_info.wait_id = 0;
1318     thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1319                                             ? omp_state_work_serial
1320                                             : omp_state_work_parallel;
1321     taskdata->ompt_task_info.frame.exit_frame = OMPT_GET_FRAME_ADDRESS(0);
1322   }
1323 #endif
1324 
1325 #if OMP_40_ENABLED
1326   // TODO: cancel tasks if the parallel region has also been cancelled
1327   // TODO: check if this sequence can be hoisted above __kmp_task_start
1328   // if cancellation has been enabled for this run ...
1329   if (__kmp_omp_cancellation) {
1330     kmp_info_t *this_thr = __kmp_threads[gtid];
1331     kmp_team_t *this_team = this_thr->th.th_team;
1332     kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1333     if ((taskgroup && taskgroup->cancel_request) ||
1334         (this_team->t.t_cancel_request == cancel_parallel)) {
1335 #if OMPT_SUPPORT && OMPT_OPTIONAL
1336       ompt_data_t *task_data;
1337       if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1338         __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
1339         ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1340             task_data,
1341             ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1342                                                       : ompt_cancel_parallel) |
1343                 ompt_cancel_discarded_task,
1344             NULL);
1345       }
1346 #endif
1347       KMP_COUNT_BLOCK(TASK_cancelled);
1348       // this task belongs to a task group and we need to cancel it
1349       discard = 1 /* true */;
1350     }
1351   }
1352 
1353   // Invoke the task routine and pass in relevant data.
1354   // Thunks generated by gcc take a different argument list.
1355   if (!discard) {
1356     if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1357       taskdata->td_last_tied = current_task->td_last_tied;
1358       KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1359     }
1360 #if KMP_STATS_ENABLED
1361     KMP_COUNT_BLOCK(TASK_executed);
1362     switch (KMP_GET_THREAD_STATE()) {
1363     case FORK_JOIN_BARRIER:
1364       KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1365       break;
1366     case PLAIN_BARRIER:
1367       KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1368       break;
1369     case TASKYIELD:
1370       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1371       break;
1372     case TASKWAIT:
1373       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1374       break;
1375     case TASKGROUP:
1376       KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1377       break;
1378     default:
1379       KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1380       break;
1381     }
1382 #endif // KMP_STATS_ENABLED
1383 #endif // OMP_40_ENABLED
1384 
1385 // OMPT task begin
1386 #if OMPT_SUPPORT
1387     if (UNLIKELY(ompt_enabled.enabled))
1388       __ompt_task_start(task, current_task, gtid);
1389 #endif
1390 
1391 #ifdef KMP_GOMP_COMPAT
1392     if (taskdata->td_flags.native) {
1393       ((void (*)(void *))(*(task->routine)))(task->shareds);
1394     } else
1395 #endif /* KMP_GOMP_COMPAT */
1396     {
1397       (*(task->routine))(gtid, task);
1398     }
1399     KMP_POP_PARTITIONED_TIMER();
1400 
1401 #if OMPT_SUPPORT
1402     if (UNLIKELY(ompt_enabled.enabled))
1403       __ompt_task_finish(task, current_task);
1404 #endif
1405 #if OMP_40_ENABLED
1406   }
1407 #endif // OMP_40_ENABLED
1408 
1409 #if OMPT_SUPPORT
1410   if (UNLIKELY(ompt_enabled.enabled)) {
1411     thread->th.ompt_thread_info = oldInfo;
1412     taskdata->ompt_task_info.frame.exit_frame = NULL;
1413   }
1414 #endif
1415 
1416 #if OMP_45_ENABLED
1417   // Proxy tasks are not handled by the runtime
1418   if (taskdata->td_flags.proxy != TASK_PROXY) {
1419 #endif
1420     ANNOTATE_HAPPENS_BEFORE(taskdata->td_parent);
1421     __kmp_task_finish(gtid, task, current_task); // OMPT only if not discarded
1422 #if OMP_45_ENABLED
1423   }
1424 #endif
1425 
1426 #if USE_ITT_BUILD && USE_ITT_NOTIFY
1427   // Barrier imbalance - correct arrive time after the task finished
1428   if (__kmp_forkjoin_frames_mode == 3) {
1429     kmp_info_t *this_thr = __kmp_threads[gtid];
1430     if (this_thr->th.th_bar_arrive_time) {
1431       this_thr->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1432     }
1433   }
1434 #endif
1435   KA_TRACE(
1436       30,
1437       ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1438        gtid, taskdata, current_task));
1439   return;
1440 }
1441 
1442 // __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1443 //
1444 // loc_ref: location of original task pragma (ignored)
1445 // gtid: Global Thread ID of encountering thread
1446 // new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1447 // Returns:
1448 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1449 //    be resumed later.
1450 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1451 //    resumed later.
1452 kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1453                                 kmp_task_t *new_task) {
1454   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1455 
1456   KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1457                 loc_ref, new_taskdata));
1458 
1459 #if OMPT_SUPPORT
1460   kmp_taskdata_t *parent;
1461   if (UNLIKELY(ompt_enabled.enabled)) {
1462     parent = new_taskdata->td_parent;
1463     if (ompt_enabled.ompt_callback_task_create) {
1464       ompt_data_t task_data = ompt_data_none;
1465       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1466           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1467           parent ? &(parent->ompt_task_info.frame) : NULL,
1468           &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1469           OMPT_GET_RETURN_ADDRESS(0));
1470     }
1471   }
1472 #endif
1473 
1474   /* Should we execute the new task or queue it? For now, let's just always try
1475      to queue it.  If the queue fills up, then we'll execute it.  */
1476 
1477   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1478   { // Execute this task immediately
1479     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1480     new_taskdata->td_flags.task_serial = 1;
1481     __kmp_invoke_task(gtid, new_task, current_task);
1482   }
1483 
1484   KA_TRACE(
1485       10,
1486       ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1487        "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1488        gtid, loc_ref, new_taskdata));
1489 
1490   ANNOTATE_HAPPENS_BEFORE(new_task);
1491 #if OMPT_SUPPORT
1492   if (UNLIKELY(ompt_enabled.enabled)) {
1493     parent->ompt_task_info.frame.enter_frame = NULL;
1494   }
1495 #endif
1496   return TASK_CURRENT_NOT_QUEUED;
1497 }
1498 
1499 // __kmp_omp_task: Schedule a non-thread-switchable task for execution
1500 //
1501 // gtid: Global Thread ID of encountering thread
1502 // new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
1503 // serialize_immediate: if TRUE then if the task is executed immediately its
1504 // execution will be serialized
1505 // Returns:
1506 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1507 //    be resumed later.
1508 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1509 //    resumed later.
1510 kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
1511                          bool serialize_immediate) {
1512   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1513 
1514 /* Should we execute the new task or queue it? For now, let's just always try to
1515    queue it.  If the queue fills up, then we'll execute it.  */
1516 #if OMP_45_ENABLED
1517   if (new_taskdata->td_flags.proxy == TASK_PROXY ||
1518       __kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1519 #else
1520   if (__kmp_push_task(gtid, new_task) == TASK_NOT_PUSHED) // if cannot defer
1521 #endif
1522   { // Execute this task immediately
1523     kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1524     if (serialize_immediate)
1525       new_taskdata->td_flags.task_serial = 1;
1526     __kmp_invoke_task(gtid, new_task, current_task);
1527   }
1528 
1529   ANNOTATE_HAPPENS_BEFORE(new_task);
1530   return TASK_CURRENT_NOT_QUEUED;
1531 }
1532 
1533 // __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
1534 // non-thread-switchable task from the parent thread only!
1535 //
1536 // loc_ref: location of original task pragma (ignored)
1537 // gtid: Global Thread ID of encountering thread
1538 // new_task: non-thread-switchable task thunk allocated by
1539 // __kmp_omp_task_alloc()
1540 // Returns:
1541 //    TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1542 //    be resumed later.
1543 //    TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1544 //    resumed later.
1545 kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
1546                           kmp_task_t *new_task) {
1547   kmp_int32 res;
1548   KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
1549 
1550 #if KMP_DEBUG || OMPT_SUPPORT
1551   kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1552 #endif
1553   KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
1554                 new_taskdata));
1555 
1556 #if OMPT_SUPPORT
1557   kmp_taskdata_t *parent = NULL;
1558   if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
1559     OMPT_STORE_RETURN_ADDRESS(gtid);
1560     parent = new_taskdata->td_parent;
1561     if (!parent->ompt_task_info.frame.enter_frame)
1562       parent->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1563     if (ompt_enabled.ompt_callback_task_create) {
1564       ompt_data_t task_data = ompt_data_none;
1565       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1566           parent ? &(parent->ompt_task_info.task_data) : &task_data,
1567           parent ? &(parent->ompt_task_info.frame) : NULL,
1568           &(new_taskdata->ompt_task_info.task_data),
1569           ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
1570           OMPT_LOAD_RETURN_ADDRESS(gtid));
1571     }
1572   }
1573 #endif
1574 
1575   res = __kmp_omp_task(gtid, new_task, true);
1576 
1577   KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
1578                 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
1579                 gtid, loc_ref, new_taskdata));
1580 #if OMPT_SUPPORT
1581   if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
1582     parent->ompt_task_info.frame.enter_frame = NULL;
1583   }
1584 #endif
1585   return res;
1586 }
1587 
1588 template <bool ompt>
1589 static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
1590                                               void *frame_address,
1591                                               void *return_address) {
1592   kmp_taskdata_t *taskdata;
1593   kmp_info_t *thread;
1594   int thread_finished = FALSE;
1595   KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
1596 
1597   KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
1598 
1599   if (__kmp_tasking_mode != tskm_immediate_exec) {
1600     thread = __kmp_threads[gtid];
1601     taskdata = thread->th.th_current_task;
1602 
1603 #if OMPT_SUPPORT && OMPT_OPTIONAL
1604     ompt_data_t *my_task_data;
1605     ompt_data_t *my_parallel_data;
1606 
1607     if (ompt) {
1608       my_task_data = &(taskdata->ompt_task_info.task_data);
1609       my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
1610 
1611       taskdata->ompt_task_info.frame.enter_frame = frame_address;
1612 
1613       if (ompt_enabled.ompt_callback_sync_region) {
1614         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1615             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1616             my_task_data, return_address);
1617       }
1618 
1619       if (ompt_enabled.ompt_callback_sync_region_wait) {
1620         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1621             ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
1622             my_task_data, return_address);
1623       }
1624     }
1625 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1626 
1627 // Debugger: The taskwait is active. Store location and thread encountered the
1628 // taskwait.
1629 #if USE_ITT_BUILD
1630 // Note: These values are used by ITT events as well.
1631 #endif /* USE_ITT_BUILD */
1632     taskdata->td_taskwait_counter += 1;
1633     taskdata->td_taskwait_ident = loc_ref;
1634     taskdata->td_taskwait_thread = gtid + 1;
1635 
1636 #if USE_ITT_BUILD
1637     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1638     if (itt_sync_obj != NULL)
1639       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1640 #endif /* USE_ITT_BUILD */
1641 
1642     bool must_wait =
1643         !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
1644 
1645 #if OMP_45_ENABLED
1646     must_wait = must_wait || (thread->th.th_task_team != NULL &&
1647                               thread->th.th_task_team->tt.tt_found_proxy_tasks);
1648 #endif
1649     if (must_wait) {
1650       kmp_flag_32 flag(
1651           RCAST(volatile kmp_uint32 *, &taskdata->td_incomplete_child_tasks),
1652           0U);
1653       while (TCR_4(taskdata->td_incomplete_child_tasks) != 0) {
1654         flag.execute_tasks(thread, gtid, FALSE,
1655                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1656                            __kmp_task_stealing_constraint);
1657       }
1658     }
1659 #if USE_ITT_BUILD
1660     if (itt_sync_obj != NULL)
1661       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1662 #endif /* USE_ITT_BUILD */
1663 
1664     // Debugger:  The taskwait is completed. Location remains, but thread is
1665     // negated.
1666     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1667 
1668 #if OMPT_SUPPORT && OMPT_OPTIONAL
1669     if (ompt) {
1670       if (ompt_enabled.ompt_callback_sync_region_wait) {
1671         ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
1672             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1673             my_task_data, return_address);
1674       }
1675       if (ompt_enabled.ompt_callback_sync_region) {
1676         ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
1677             ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
1678             my_task_data, return_address);
1679       }
1680       taskdata->ompt_task_info.frame.enter_frame = NULL;
1681     }
1682 #endif // OMPT_SUPPORT && OMPT_OPTIONAL
1683 
1684     ANNOTATE_HAPPENS_AFTER(taskdata);
1685   }
1686 
1687   KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
1688                 "returning TASK_CURRENT_NOT_QUEUED\n",
1689                 gtid, taskdata));
1690 
1691   return TASK_CURRENT_NOT_QUEUED;
1692 }
1693 
1694 #if OMPT_SUPPORT
1695 OMPT_NOINLINE
1696 static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
1697                                           void *frame_address,
1698                                           void *return_address) {
1699   return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
1700                                             return_address);
1701 }
1702 #endif // OMPT_SUPPORT
1703 
1704 // __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
1705 // complete
1706 kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
1707 #if OMPT_SUPPORT && OMPT_OPTIONAL
1708   if (UNLIKELY(ompt_enabled.enabled)) {
1709     OMPT_STORE_RETURN_ADDRESS(gtid);
1710     return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(1),
1711                                     OMPT_LOAD_RETURN_ADDRESS(gtid));
1712   }
1713 #endif
1714   return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
1715 }
1716 
1717 // __kmpc_omp_taskyield: switch to a different task
1718 kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
1719   kmp_taskdata_t *taskdata;
1720   kmp_info_t *thread;
1721   int thread_finished = FALSE;
1722 
1723   KMP_COUNT_BLOCK(OMP_TASKYIELD);
1724   KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
1725 
1726   KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
1727                 gtid, loc_ref, end_part));
1728 
1729   if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
1730     thread = __kmp_threads[gtid];
1731     taskdata = thread->th.th_current_task;
1732 // Should we model this as a task wait or not?
1733 // Debugger: The taskwait is active. Store location and thread encountered the
1734 // taskwait.
1735 #if USE_ITT_BUILD
1736 // Note: These values are used by ITT events as well.
1737 #endif /* USE_ITT_BUILD */
1738     taskdata->td_taskwait_counter += 1;
1739     taskdata->td_taskwait_ident = loc_ref;
1740     taskdata->td_taskwait_thread = gtid + 1;
1741 
1742 #if USE_ITT_BUILD
1743     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
1744     if (itt_sync_obj != NULL)
1745       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
1746 #endif /* USE_ITT_BUILD */
1747     if (!taskdata->td_flags.team_serial) {
1748       kmp_task_team_t *task_team = thread->th.th_task_team;
1749       if (task_team != NULL) {
1750         if (KMP_TASKING_ENABLED(task_team)) {
1751 #if OMPT_SUPPORT
1752           if (UNLIKELY(ompt_enabled.enabled))
1753             thread->th.ompt_thread_info.ompt_task_yielded = 1;
1754 #endif
1755           __kmp_execute_tasks_32(
1756               thread, gtid, NULL, FALSE,
1757               &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
1758               __kmp_task_stealing_constraint);
1759 #if OMPT_SUPPORT
1760           if (UNLIKELY(ompt_enabled.enabled))
1761             thread->th.ompt_thread_info.ompt_task_yielded = 0;
1762 #endif
1763         }
1764       }
1765     }
1766 #if USE_ITT_BUILD
1767     if (itt_sync_obj != NULL)
1768       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
1769 #endif /* USE_ITT_BUILD */
1770 
1771     // Debugger:  The taskwait is completed. Location remains, but thread is
1772     // negated.
1773     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
1774   }
1775 
1776   KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
1777                 "returning TASK_CURRENT_NOT_QUEUED\n",
1778                 gtid, taskdata));
1779 
1780   return TASK_CURRENT_NOT_QUEUED;
1781 }
1782 
1783 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
1784 #if OMP_45_ENABLED
1785 // Task Reduction implementation
1786 
1787 typedef struct kmp_task_red_flags {
1788   unsigned lazy_priv : 1; // hint: (1) use lazy allocation (big objects)
1789   unsigned reserved31 : 31;
1790 } kmp_task_red_flags_t;
1791 
1792 // internal structure for reduction data item related info
1793 typedef struct kmp_task_red_data {
1794   void *reduce_shar; // shared reduction item
1795   size_t reduce_size; // size of data item
1796   void *reduce_priv; // thread specific data
1797   void *reduce_pend; // end of private data for comparison op
1798   void *reduce_init; // data initialization routine
1799   void *reduce_fini; // data finalization routine
1800   void *reduce_comb; // data combiner routine
1801   kmp_task_red_flags_t flags; // flags for additional info from compiler
1802 } kmp_task_red_data_t;
1803 
1804 // structure sent us by compiler - one per reduction item
1805 typedef struct kmp_task_red_input {
1806   void *reduce_shar; // shared reduction item
1807   size_t reduce_size; // size of data item
1808   void *reduce_init; // data initialization routine
1809   void *reduce_fini; // data finalization routine
1810   void *reduce_comb; // data combiner routine
1811   kmp_task_red_flags_t flags; // flags for additional info from compiler
1812 } kmp_task_red_input_t;
1813 
1814 /*!
1815 @ingroup TASKING
1816 @param gtid      Global thread ID
1817 @param num       Number of data items to reduce
1818 @param data      Array of data for reduction
1819 @return The taskgroup identifier
1820 
1821 Initialize task reduction for the taskgroup.
1822 */
1823 void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
1824   kmp_info_t *thread = __kmp_threads[gtid];
1825   kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
1826   kmp_int32 nth = thread->th.th_team_nproc;
1827   kmp_task_red_input_t *input = (kmp_task_red_input_t *)data;
1828   kmp_task_red_data_t *arr;
1829 
1830   // check input data just in case
1831   KMP_ASSERT(tg != NULL);
1832   KMP_ASSERT(data != NULL);
1833   KMP_ASSERT(num > 0);
1834   if (nth == 1) {
1835     KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
1836                   gtid, tg));
1837     return (void *)tg;
1838   }
1839   KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
1840                 gtid, tg, num));
1841   arr = (kmp_task_red_data_t *)__kmp_thread_malloc(
1842       thread, num * sizeof(kmp_task_red_data_t));
1843   for (int i = 0; i < num; ++i) {
1844     void (*f_init)(void *) = (void (*)(void *))(input[i].reduce_init);
1845     size_t size = input[i].reduce_size - 1;
1846     // round the size up to cache line per thread-specific item
1847     size += CACHE_LINE - size % CACHE_LINE;
1848     KMP_ASSERT(input[i].reduce_comb != NULL); // combiner is mandatory
1849     arr[i].reduce_shar = input[i].reduce_shar;
1850     arr[i].reduce_size = size;
1851     arr[i].reduce_init = input[i].reduce_init;
1852     arr[i].reduce_fini = input[i].reduce_fini;
1853     arr[i].reduce_comb = input[i].reduce_comb;
1854     arr[i].flags = input[i].flags;
1855     if (!input[i].flags.lazy_priv) {
1856       // allocate cache-line aligned block and fill it with zeros
1857       arr[i].reduce_priv = __kmp_allocate(nth * size);
1858       arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
1859       if (f_init != NULL) {
1860         // initialize thread-specific items
1861         for (int j = 0; j < nth; ++j) {
1862           f_init((char *)(arr[i].reduce_priv) + j * size);
1863         }
1864       }
1865     } else {
1866       // only allocate space for pointers now,
1867       // objects will be lazily allocated/initialized once requested
1868       arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
1869     }
1870   }
1871   tg->reduce_data = (void *)arr;
1872   tg->reduce_num_data = num;
1873   return (void *)tg;
1874 }
1875 
1876 /*!
1877 @ingroup TASKING
1878 @param gtid    Global thread ID
1879 @param tskgrp  The taskgroup ID (optional)
1880 @param data    Shared location of the item
1881 @return The pointer to per-thread data
1882 
1883 Get thread-specific location of data item
1884 */
1885 void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
1886   kmp_info_t *thread = __kmp_threads[gtid];
1887   kmp_int32 nth = thread->th.th_team_nproc;
1888   if (nth == 1)
1889     return data; // nothing to do
1890 
1891   kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
1892   if (tg == NULL)
1893     tg = thread->th.th_current_task->td_taskgroup;
1894   KMP_ASSERT(tg != NULL);
1895   kmp_task_red_data_t *arr = (kmp_task_red_data_t *)(tg->reduce_data);
1896   kmp_int32 num = tg->reduce_num_data;
1897   kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1898 
1899   KMP_ASSERT(data != NULL);
1900   while (tg != NULL) {
1901     for (int i = 0; i < num; ++i) {
1902       if (!arr[i].flags.lazy_priv) {
1903         if (data == arr[i].reduce_shar ||
1904             (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
1905           return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
1906       } else {
1907         // check shared location first
1908         void **p_priv = (void **)(arr[i].reduce_priv);
1909         if (data == arr[i].reduce_shar)
1910           goto found;
1911         // check if we get some thread specific location as parameter
1912         for (int j = 0; j < nth; ++j)
1913           if (data == p_priv[j])
1914             goto found;
1915         continue; // not found, continue search
1916       found:
1917         if (p_priv[tid] == NULL) {
1918           // allocate thread specific object lazily
1919           void (*f_init)(void *) = (void (*)(void *))(arr[i].reduce_init);
1920           p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
1921           if (f_init != NULL) {
1922             f_init(p_priv[tid]);
1923           }
1924         }
1925         return p_priv[tid];
1926       }
1927     }
1928     tg = tg->parent;
1929     arr = (kmp_task_red_data_t *)(tg->reduce_data);
1930     num = tg->reduce_num_data;
1931   }
1932   KMP_ASSERT2(0, "Unknown task reduction item");
1933   return NULL; // ERROR, this line never executed
1934 }
1935 
1936 // Finalize task reduction.
1937 // Called from __kmpc_end_taskgroup()
1938 static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
1939   kmp_int32 nth = th->th.th_team_nproc;
1940   KMP_DEBUG_ASSERT(nth > 1); // should not be called if nth == 1
1941   kmp_task_red_data_t *arr = (kmp_task_red_data_t *)tg->reduce_data;
1942   kmp_int32 num = tg->reduce_num_data;
1943   for (int i = 0; i < num; ++i) {
1944     void *sh_data = arr[i].reduce_shar;
1945     void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
1946     void (*f_comb)(void *, void *) =
1947         (void (*)(void *, void *))(arr[i].reduce_comb);
1948     if (!arr[i].flags.lazy_priv) {
1949       void *pr_data = arr[i].reduce_priv;
1950       size_t size = arr[i].reduce_size;
1951       for (int j = 0; j < nth; ++j) {
1952         void *priv_data = (char *)pr_data + j * size;
1953         f_comb(sh_data, priv_data); // combine results
1954         if (f_fini)
1955           f_fini(priv_data); // finalize if needed
1956       }
1957     } else {
1958       void **pr_data = (void **)(arr[i].reduce_priv);
1959       for (int j = 0; j < nth; ++j) {
1960         if (pr_data[j] != NULL) {
1961           f_comb(sh_data, pr_data[j]); // combine results
1962           if (f_fini)
1963             f_fini(pr_data[j]); // finalize if needed
1964           __kmp_free(pr_data[j]);
1965         }
1966       }
1967     }
1968     __kmp_free(arr[i].reduce_priv);
1969   }
1970   __kmp_thread_free(th, arr);
1971   tg->reduce_data = NULL;
1972   tg->reduce_num_data = 0;
1973 }
1974 #endif
1975 
1976 #if OMP_40_ENABLED
1977 // __kmpc_taskgroup: Start a new taskgroup
1978 void __kmpc_taskgroup(ident_t *loc, int gtid) {
1979   kmp_info_t *thread = __kmp_threads[gtid];
1980   kmp_taskdata_t *taskdata = thread->th.th_current_task;
1981   kmp_taskgroup_t *tg_new =
1982       (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
1983   KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
1984   tg_new->count = 0;
1985   tg_new->cancel_request = cancel_noreq;
1986   tg_new->parent = taskdata->td_taskgroup;
1987 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
1988 #if OMP_45_ENABLED
1989   tg_new->reduce_data = NULL;
1990   tg_new->reduce_num_data = 0;
1991 #endif
1992   taskdata->td_taskgroup = tg_new;
1993 
1994 #if OMPT_SUPPORT && OMPT_OPTIONAL
1995   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
1996     void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
1997     if (!codeptr)
1998       codeptr = OMPT_GET_RETURN_ADDRESS(0);
1999     kmp_team_t *team = thread->th.th_team;
2000     ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2001     // FIXME: I think this is wrong for lwt!
2002     ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2003 
2004     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2005         ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2006         &(my_task_data), codeptr);
2007   }
2008 #endif
2009 }
2010 
2011 // __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2012 //                       and its descendants are complete
2013 void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2014   kmp_info_t *thread = __kmp_threads[gtid];
2015   kmp_taskdata_t *taskdata = thread->th.th_current_task;
2016   kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2017   int thread_finished = FALSE;
2018 
2019 #if OMPT_SUPPORT && OMPT_OPTIONAL
2020   kmp_team_t *team;
2021   ompt_data_t my_task_data;
2022   ompt_data_t my_parallel_data;
2023   void *codeptr;
2024   if (UNLIKELY(ompt_enabled.enabled)) {
2025     team = thread->th.th_team;
2026     my_task_data = taskdata->ompt_task_info.task_data;
2027     // FIXME: I think this is wrong for lwt!
2028     my_parallel_data = team->t.ompt_team_info.parallel_data;
2029     codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2030     if (!codeptr)
2031       codeptr = OMPT_GET_RETURN_ADDRESS(0);
2032   }
2033 #endif
2034 
2035   KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2036   KMP_DEBUG_ASSERT(taskgroup != NULL);
2037   KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2038 
2039   if (__kmp_tasking_mode != tskm_immediate_exec) {
2040     // mark task as waiting not on a barrier
2041     taskdata->td_taskwait_counter += 1;
2042     taskdata->td_taskwait_ident = loc;
2043     taskdata->td_taskwait_thread = gtid + 1;
2044 #if USE_ITT_BUILD
2045     // For ITT the taskgroup wait is similar to taskwait until we need to
2046     // distinguish them
2047     void *itt_sync_obj = __kmp_itt_taskwait_object(gtid);
2048     if (itt_sync_obj != NULL)
2049       __kmp_itt_taskwait_starting(gtid, itt_sync_obj);
2050 #endif /* USE_ITT_BUILD */
2051 
2052 #if OMPT_SUPPORT && OMPT_OPTIONAL
2053     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2054       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2055           ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2056           &(my_task_data), codeptr);
2057     }
2058 #endif
2059 
2060 #if OMP_45_ENABLED
2061     if (!taskdata->td_flags.team_serial ||
2062         (thread->th.th_task_team != NULL &&
2063          thread->th.th_task_team->tt.tt_found_proxy_tasks))
2064 #else
2065     if (!taskdata->td_flags.team_serial)
2066 #endif
2067     {
2068       kmp_flag_32 flag(RCAST(kmp_uint32 *, &taskgroup->count), 0U);
2069       while (TCR_4(taskgroup->count) != 0) {
2070         flag.execute_tasks(thread, gtid, FALSE,
2071                            &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2072                            __kmp_task_stealing_constraint);
2073       }
2074     }
2075     taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2076 
2077 #if OMPT_SUPPORT && OMPT_OPTIONAL
2078     if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2079       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2080           ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2081           &(my_task_data), codeptr);
2082     }
2083 #endif
2084 
2085 #if USE_ITT_BUILD
2086     if (itt_sync_obj != NULL)
2087       __kmp_itt_taskwait_finished(gtid, itt_sync_obj);
2088 #endif /* USE_ITT_BUILD */
2089   }
2090   KMP_DEBUG_ASSERT(taskgroup->count == 0);
2091 
2092 // TODO: change to OMP_50_ENABLED, need to change build tools for this to work
2093 #if OMP_45_ENABLED
2094   if (taskgroup->reduce_data != NULL) // need to reduce?
2095     __kmp_task_reduction_fini(thread, taskgroup);
2096 #endif
2097   // Restore parent taskgroup for the current task
2098   taskdata->td_taskgroup = taskgroup->parent;
2099   __kmp_thread_free(thread, taskgroup);
2100 
2101   KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
2102                 gtid, taskdata));
2103   ANNOTATE_HAPPENS_AFTER(taskdata);
2104 
2105 #if OMPT_SUPPORT && OMPT_OPTIONAL
2106   if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2107     ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2108         ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2109         &(my_task_data), codeptr);
2110   }
2111 #endif
2112 }
2113 #endif
2114 
2115 // __kmp_remove_my_task: remove a task from my own deque
2116 static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
2117                                         kmp_task_team_t *task_team,
2118                                         kmp_int32 is_constrained) {
2119   kmp_task_t *task;
2120   kmp_taskdata_t *taskdata;
2121   kmp_thread_data_t *thread_data;
2122   kmp_uint32 tail;
2123 
2124   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2125   KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
2126                    NULL); // Caller should check this condition
2127 
2128   thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
2129 
2130   KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
2131                 gtid, thread_data->td.td_deque_ntasks,
2132                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2133 
2134   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2135     KA_TRACE(10,
2136              ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
2137               "ntasks=%d head=%u tail=%u\n",
2138               gtid, thread_data->td.td_deque_ntasks,
2139               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2140     return NULL;
2141   }
2142 
2143   __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2144 
2145   if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
2146     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2147     KA_TRACE(10,
2148              ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2149               "ntasks=%d head=%u tail=%u\n",
2150               gtid, thread_data->td.td_deque_ntasks,
2151               thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2152     return NULL;
2153   }
2154 
2155   tail = (thread_data->td.td_deque_tail - 1) &
2156          TASK_DEQUE_MASK(thread_data->td); // Wrap index.
2157   taskdata = thread_data->td.td_deque[tail];
2158 
2159   if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) {
2160     // we need to check if the candidate obeys task scheduling constraint (TSC)
2161     // only descendant of all deferred tied tasks can be scheduled, checking
2162     // the last one is enough, as it in turn is the descendant of all others
2163     kmp_taskdata_t *current = thread->th.th_current_task->td_last_tied;
2164     KMP_DEBUG_ASSERT(current != NULL);
2165     // check if last tied task is not suspended on barrier
2166     if (current->td_flags.tasktype == TASK_EXPLICIT ||
2167         current->td_taskwait_thread > 0) { // <= 0 on barrier
2168       kmp_int32 level = current->td_level;
2169       kmp_taskdata_t *parent = taskdata->td_parent;
2170       while (parent != current && parent->td_level > level) {
2171         parent = parent->td_parent; // check generation up to the level of the
2172         // current task
2173         KMP_DEBUG_ASSERT(parent != NULL);
2174       }
2175       if (parent != current) {
2176         // The TSC does not allow to steal victim task
2177         __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2178         KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
2179                       "ntasks=%d head=%u tail=%u\n",
2180                       gtid, thread_data->td.td_deque_ntasks,
2181                       thread_data->td.td_deque_head,
2182                       thread_data->td.td_deque_tail));
2183         return NULL;
2184       }
2185     }
2186   }
2187 
2188   thread_data->td.td_deque_tail = tail;
2189   TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
2190 
2191   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2192 
2193   KA_TRACE(10, ("__kmp_remove_my_task(exit #2): T#%d task %p removed: "
2194                 "ntasks=%d head=%u tail=%u\n",
2195                 gtid, taskdata, thread_data->td.td_deque_ntasks,
2196                 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
2197 
2198   task = KMP_TASKDATA_TO_TASK(taskdata);
2199   return task;
2200 }
2201 
2202 // __kmp_steal_task: remove a task from another thread's deque
2203 // Assume that calling thread has already checked existence of
2204 // task_team thread_data before calling this routine.
2205 static kmp_task_t *__kmp_steal_task(kmp_info_t *victim_thr, kmp_int32 gtid,
2206                                     kmp_task_team_t *task_team,
2207                                     volatile kmp_int32 *unfinished_threads,
2208                                     int *thread_finished,
2209                                     kmp_int32 is_constrained) {
2210   kmp_task_t *task;
2211   kmp_taskdata_t *taskdata;
2212   kmp_taskdata_t *current;
2213   kmp_thread_data_t *victim_td, *threads_data;
2214   kmp_int32 level, target;
2215   kmp_int32 victim_tid;
2216 
2217   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2218 
2219   threads_data = task_team->tt.tt_threads_data;
2220   KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
2221 
2222   victim_tid = victim_thr->th.th_info.ds.ds_tid;
2223   victim_td = &threads_data[victim_tid];
2224 
2225   KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
2226                 "task_team=%p ntasks=%d head=%u tail=%u\n",
2227                 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2228                 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2229                 victim_td->td.td_deque_tail));
2230 
2231   if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
2232     KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
2233                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2234                   gtid, __kmp_gtid_from_thread(victim_thr), task_team,
2235                   victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
2236                   victim_td->td.td_deque_tail));
2237     return NULL;
2238   }
2239 
2240   __kmp_acquire_bootstrap_lock(&victim_td->td.td_deque_lock);
2241 
2242   int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
2243   // Check again after we acquire the lock
2244   if (ntasks == 0) {
2245     __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2246     KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
2247                   "task_team=%p ntasks=%d head=%u tail=%u\n",
2248                   gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2249                   victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2250     return NULL;
2251   }
2252 
2253   KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
2254 
2255   taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
2256   if (is_constrained && (taskdata->td_flags.tiedness == TASK_TIED)) {
2257     // we need to check if the candidate obeys task scheduling constraint (TSC)
2258     // only descendant of all deferred tied tasks can be scheduled, checking
2259     // the last one is enough, as it in turn is the descendant of all others
2260     current = __kmp_threads[gtid]->th.th_current_task->td_last_tied;
2261     KMP_DEBUG_ASSERT(current != NULL);
2262     // check if last tied task is not suspended on barrier
2263     if (current->td_flags.tasktype == TASK_EXPLICIT ||
2264         current->td_taskwait_thread > 0) { // <= 0 on barrier
2265       level = current->td_level;
2266       kmp_taskdata_t *parent = taskdata->td_parent;
2267       while (parent != current && parent->td_level > level) {
2268         parent = parent->td_parent; // check generation up to the level of the
2269         // current task
2270         KMP_DEBUG_ASSERT(parent != NULL);
2271       }
2272       if (parent != current) {
2273         if (!task_team->tt.tt_untied_task_encountered) {
2274           // The TSC does not allow to steal victim task
2275           __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2276           KA_TRACE(10,
2277                    ("__kmp_steal_task(exit #3): T#%d could not steal from "
2278                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2279                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2280                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2281           return NULL;
2282         }
2283         taskdata = NULL; // will check other tasks in victim's deque
2284       }
2285     }
2286   }
2287   if (taskdata != NULL) {
2288     // Bump head pointer and Wrap.
2289     victim_td->td.td_deque_head =
2290         (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
2291   } else {
2292     int i;
2293     // walk through victim's deque trying to steal any task
2294     target = victim_td->td.td_deque_head;
2295     for (i = 1; i < ntasks; ++i) {
2296       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2297       taskdata = victim_td->td.td_deque[target];
2298       if (taskdata->td_flags.tiedness == TASK_TIED) {
2299         // check if the candidate obeys the TSC
2300         kmp_taskdata_t *parent = taskdata->td_parent;
2301         // check generation up to the level of the current task
2302         while (parent != current && parent->td_level > level) {
2303           parent = parent->td_parent;
2304           KMP_DEBUG_ASSERT(parent != NULL);
2305         }
2306         if (parent != current) {
2307           // The TSC does not allow to steal the candidate
2308           taskdata = NULL;
2309           continue;
2310         } else {
2311           // found victim tied task
2312           break;
2313         }
2314       } else {
2315         // found victim untied task
2316         break;
2317       }
2318     }
2319     if (taskdata == NULL) {
2320       // No appropriate candidate to steal found
2321       __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2322       KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
2323                     "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
2324                     gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
2325                     victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2326       return NULL;
2327     }
2328     int prev = target;
2329     for (i = i + 1; i < ntasks; ++i) {
2330       // shift remaining tasks in the deque left by 1
2331       target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
2332       victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
2333       prev = target;
2334     }
2335     KMP_DEBUG_ASSERT(victim_td->td.td_deque_tail ==
2336                      ((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
2337     victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
2338   }
2339   if (*thread_finished) {
2340     // We need to un-mark this victim as a finished victim.  This must be done
2341     // before releasing the lock, or else other threads (starting with the
2342     // master victim) might be prematurely released from the barrier!!!
2343     kmp_int32 count;
2344 
2345     count = KMP_TEST_THEN_INC32(unfinished_threads);
2346 
2347     KA_TRACE(
2348         20,
2349         ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
2350          gtid, count + 1, task_team));
2351 
2352     *thread_finished = FALSE;
2353   }
2354   TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
2355 
2356   __kmp_release_bootstrap_lock(&victim_td->td.td_deque_lock);
2357 
2358   KMP_COUNT_BLOCK(TASK_stolen);
2359   KA_TRACE(10,
2360            ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
2361             "task_team=%p ntasks=%d head=%u tail=%u\n",
2362             gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
2363             ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
2364 
2365   task = KMP_TASKDATA_TO_TASK(taskdata);
2366   return task;
2367 }
2368 
2369 // __kmp_execute_tasks_template: Choose and execute tasks until either the
2370 // condition is statisfied (return true) or there are none left (return false).
2371 //
2372 // final_spin is TRUE if this is the spin at the release barrier.
2373 // thread_finished indicates whether the thread is finished executing all
2374 // the tasks it has on its deque, and is at the release barrier.
2375 // spinner is the location on which to spin.
2376 // spinner == NULL means only execute a single task and return.
2377 // checker is the value to check to terminate the spin.
2378 template <class C>
2379 static inline int __kmp_execute_tasks_template(
2380     kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
2381     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2382     kmp_int32 is_constrained) {
2383   kmp_task_team_t *task_team = thread->th.th_task_team;
2384   kmp_thread_data_t *threads_data;
2385   kmp_task_t *task;
2386   kmp_info_t *other_thread;
2387   kmp_taskdata_t *current_task = thread->th.th_current_task;
2388   volatile kmp_int32 *unfinished_threads;
2389   kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
2390                       tid = thread->th.th_info.ds.ds_tid;
2391 
2392   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
2393   KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
2394 
2395   if (task_team == NULL)
2396     return FALSE;
2397 
2398   KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
2399                 "*thread_finished=%d\n",
2400                 gtid, final_spin, *thread_finished));
2401 
2402   thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
2403   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2404   KMP_DEBUG_ASSERT(threads_data != NULL);
2405 
2406   nthreads = task_team->tt.tt_nproc;
2407   unfinished_threads = &(task_team->tt.tt_unfinished_threads);
2408 #if OMP_45_ENABLED
2409   KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks);
2410 #else
2411   KMP_DEBUG_ASSERT(nthreads > 1);
2412 #endif
2413   KMP_DEBUG_ASSERT(TCR_4(*unfinished_threads) >= 0);
2414 
2415   while (1) { // Outer loop keeps trying to find tasks in case of single thread
2416     // getting tasks from target constructs
2417     while (1) { // Inner loop to find a task and execute it
2418       task = NULL;
2419       if (use_own_tasks) { // check on own queue first
2420         task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
2421       }
2422       if ((task == NULL) && (nthreads > 1)) { // Steal a task
2423         int asleep = 1;
2424         use_own_tasks = 0;
2425         // Try to steal from the last place I stole from successfully.
2426         if (victim_tid == -2) { // haven't stolen anything yet
2427           victim_tid = threads_data[tid].td.td_deque_last_stolen;
2428           if (victim_tid !=
2429               -1) // if we have a last stolen from victim, get the thread
2430             other_thread = threads_data[victim_tid].td.td_thr;
2431         }
2432         if (victim_tid != -1) { // found last victim
2433           asleep = 0;
2434         } else if (!new_victim) { // no recent steals and we haven't already
2435           // used a new victim; select a random thread
2436           do { // Find a different thread to steal work from.
2437             // Pick a random thread. Initial plan was to cycle through all the
2438             // threads, and only return if we tried to steal from every thread,
2439             // and failed.  Arch says that's not such a great idea.
2440             victim_tid = __kmp_get_random(thread) % (nthreads - 1);
2441             if (victim_tid >= tid) {
2442               ++victim_tid; // Adjusts random distribution to exclude self
2443             }
2444             // Found a potential victim
2445             other_thread = threads_data[victim_tid].td.td_thr;
2446             // There is a slight chance that __kmp_enable_tasking() did not wake
2447             // up all threads waiting at the barrier.  If victim is sleeping,
2448             // then wake it up. Since we were going to pay the cache miss
2449             // penalty for referencing another thread's kmp_info_t struct
2450             // anyway,
2451             // the check shouldn't cost too much performance at this point. In
2452             // extra barrier mode, tasks do not sleep at the separate tasking
2453             // barrier, so this isn't a problem.
2454             asleep = 0;
2455             if ((__kmp_tasking_mode == tskm_task_teams) &&
2456                 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
2457                 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
2458                  NULL)) {
2459               asleep = 1;
2460               __kmp_null_resume_wrapper(__kmp_gtid_from_thread(other_thread),
2461                                         other_thread->th.th_sleep_loc);
2462               // A sleeping thread should not have any tasks on it's queue.
2463               // There is a slight possibility that it resumes, steals a task
2464               // from another thread, which spawns more tasks, all in the time
2465               // that it takes this thread to check => don't write an assertion
2466               // that the victim's queue is empty.  Try stealing from a
2467               // different thread.
2468             }
2469           } while (asleep);
2470         }
2471 
2472         if (!asleep) {
2473           // We have a victim to try to steal from
2474           task = __kmp_steal_task(other_thread, gtid, task_team,
2475                                   unfinished_threads, thread_finished,
2476                                   is_constrained);
2477         }
2478         if (task != NULL) { // set last stolen to victim
2479           if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
2480             threads_data[tid].td.td_deque_last_stolen = victim_tid;
2481             // The pre-refactored code did not try more than 1 successful new
2482             // vicitm, unless the last one generated more local tasks;
2483             // new_victim keeps track of this
2484             new_victim = 1;
2485           }
2486         } else { // No tasks found; unset last_stolen
2487           KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
2488           victim_tid = -2; // no successful victim found
2489         }
2490       }
2491 
2492       if (task == NULL) // break out of tasking loop
2493         break;
2494 
2495 // Found a task; execute it
2496 #if USE_ITT_BUILD && USE_ITT_NOTIFY
2497       if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
2498         if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
2499           // get the object reliably
2500           itt_sync_obj = __kmp_itt_barrier_object(gtid, bs_forkjoin_barrier);
2501         }
2502         __kmp_itt_task_starting(itt_sync_obj);
2503       }
2504 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
2505       __kmp_invoke_task(gtid, task, current_task);
2506 #if USE_ITT_BUILD
2507       if (itt_sync_obj != NULL)
2508         __kmp_itt_task_finished(itt_sync_obj);
2509 #endif /* USE_ITT_BUILD */
2510       // If this thread is only partway through the barrier and the condition is
2511       // met, then return now, so that the barrier gather/release pattern can
2512       // proceed. If this thread is in the last spin loop in the barrier,
2513       // waiting to be released, we know that the termination condition will not
2514       // be satisified, so don't waste any cycles checking it.
2515       if (flag == NULL || (!final_spin && flag->done_check())) {
2516         KA_TRACE(
2517             15,
2518             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2519              gtid));
2520         return TRUE;
2521       }
2522       if (thread->th.th_task_team == NULL) {
2523         break;
2524       }
2525       // Yield before executing next task
2526       KMP_YIELD(__kmp_library == library_throughput);
2527       // If execution of a stolen task results in more tasks being placed on our
2528       // run queue, reset use_own_tasks
2529       if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
2530         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
2531                       "other tasks, restart\n",
2532                       gtid));
2533         use_own_tasks = 1;
2534         new_victim = 0;
2535       }
2536     }
2537 
2538 // The task source has been exhausted. If in final spin loop of barrier, check
2539 // if termination condition is satisfied.
2540 #if OMP_45_ENABLED
2541     // The work queue may be empty but there might be proxy tasks still
2542     // executing
2543     if (final_spin && TCR_4(current_task->td_incomplete_child_tasks) == 0)
2544 #else
2545     if (final_spin)
2546 #endif
2547     {
2548       // First, decrement the #unfinished threads, if that has not already been
2549       // done.  This decrement might be to the spin location, and result in the
2550       // termination condition being satisfied.
2551       if (!*thread_finished) {
2552         kmp_int32 count;
2553 
2554         count = KMP_TEST_THEN_DEC32(unfinished_threads) - 1;
2555         KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
2556                       "unfinished_threads to %d task_team=%p\n",
2557                       gtid, count, task_team));
2558         *thread_finished = TRUE;
2559       }
2560 
2561       // It is now unsafe to reference thread->th.th_team !!!
2562       // Decrementing task_team->tt.tt_unfinished_threads can allow the master
2563       // thread to pass through the barrier, where it might reset each thread's
2564       // th.th_team field for the next parallel region. If we can steal more
2565       // work, we know that this has not happened yet.
2566       if (flag != NULL && flag->done_check()) {
2567         KA_TRACE(
2568             15,
2569             ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
2570              gtid));
2571         return TRUE;
2572       }
2573     }
2574 
2575     // If this thread's task team is NULL, master has recognized that there are
2576     // no more tasks; bail out
2577     if (thread->th.th_task_team == NULL) {
2578       KA_TRACE(15,
2579                ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
2580       return FALSE;
2581     }
2582 
2583 #if OMP_45_ENABLED
2584     // We could be getting tasks from target constructs; if this is the only
2585     // thread, keep trying to execute tasks from own queue
2586     if (nthreads == 1)
2587       use_own_tasks = 1;
2588     else
2589 #endif
2590     {
2591       KA_TRACE(15,
2592                ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
2593       return FALSE;
2594     }
2595   }
2596 }
2597 
2598 int __kmp_execute_tasks_32(
2599     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32 *flag, int final_spin,
2600     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2601     kmp_int32 is_constrained) {
2602   return __kmp_execute_tasks_template(
2603       thread, gtid, flag, final_spin,
2604       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2605 }
2606 
2607 int __kmp_execute_tasks_64(
2608     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64 *flag, int final_spin,
2609     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2610     kmp_int32 is_constrained) {
2611   return __kmp_execute_tasks_template(
2612       thread, gtid, flag, final_spin,
2613       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2614 }
2615 
2616 int __kmp_execute_tasks_oncore(
2617     kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
2618     int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
2619     kmp_int32 is_constrained) {
2620   return __kmp_execute_tasks_template(
2621       thread, gtid, flag, final_spin,
2622       thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
2623 }
2624 
2625 // __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
2626 // next barrier so they can assist in executing enqueued tasks.
2627 // First thread in allocates the task team atomically.
2628 static void __kmp_enable_tasking(kmp_task_team_t *task_team,
2629                                  kmp_info_t *this_thr) {
2630   kmp_thread_data_t *threads_data;
2631   int nthreads, i, is_init_thread;
2632 
2633   KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
2634                 __kmp_gtid_from_thread(this_thr)));
2635 
2636   KMP_DEBUG_ASSERT(task_team != NULL);
2637   KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
2638 
2639   nthreads = task_team->tt.tt_nproc;
2640   KMP_DEBUG_ASSERT(nthreads > 0);
2641   KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
2642 
2643   // Allocate or increase the size of threads_data if necessary
2644   is_init_thread = __kmp_realloc_task_threads_data(this_thr, task_team);
2645 
2646   if (!is_init_thread) {
2647     // Some other thread already set up the array.
2648     KA_TRACE(
2649         20,
2650         ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
2651          __kmp_gtid_from_thread(this_thr)));
2652     return;
2653   }
2654   threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
2655   KMP_DEBUG_ASSERT(threads_data != NULL);
2656 
2657   if ((__kmp_tasking_mode == tskm_task_teams) &&
2658       (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
2659     // Release any threads sleeping at the barrier, so that they can steal
2660     // tasks and execute them.  In extra barrier mode, tasks do not sleep
2661     // at the separate tasking barrier, so this isn't a problem.
2662     for (i = 0; i < nthreads; i++) {
2663       volatile void *sleep_loc;
2664       kmp_info_t *thread = threads_data[i].td.td_thr;
2665 
2666       if (i == this_thr->th.th_info.ds.ds_tid) {
2667         continue;
2668       }
2669       // Since we haven't locked the thread's suspend mutex lock at this
2670       // point, there is a small window where a thread might be putting
2671       // itself to sleep, but hasn't set the th_sleep_loc field yet.
2672       // To work around this, __kmp_execute_tasks_template() periodically checks
2673       // see if other threads are sleeping (using the same random mechanism that
2674       // is used for task stealing) and awakens them if they are.
2675       if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
2676           NULL) {
2677         KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
2678                       __kmp_gtid_from_thread(this_thr),
2679                       __kmp_gtid_from_thread(thread)));
2680         __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
2681       } else {
2682         KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
2683                       __kmp_gtid_from_thread(this_thr),
2684                       __kmp_gtid_from_thread(thread)));
2685       }
2686     }
2687   }
2688 
2689   KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
2690                 __kmp_gtid_from_thread(this_thr)));
2691 }
2692 
2693 /* // TODO: Check the comment consistency
2694  * Utility routines for "task teams".  A task team (kmp_task_t) is kind of
2695  * like a shadow of the kmp_team_t data struct, with a different lifetime.
2696  * After a child * thread checks into a barrier and calls __kmp_release() from
2697  * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
2698  * longer assume that the kmp_team_t structure is intact (at any moment, the
2699  * master thread may exit the barrier code and free the team data structure,
2700  * and return the threads to the thread pool).
2701  *
2702  * This does not work with the the tasking code, as the thread is still
2703  * expected to participate in the execution of any tasks that may have been
2704  * spawned my a member of the team, and the thread still needs access to all
2705  * to each thread in the team, so that it can steal work from it.
2706  *
2707  * Enter the existence of the kmp_task_team_t struct.  It employs a reference
2708  * counting mechanims, and is allocated by the master thread before calling
2709  * __kmp_<barrier_kind>_release, and then is release by the last thread to
2710  * exit __kmp_<barrier_kind>_release at the next barrier.  I.e. the lifetimes
2711  * of the kmp_task_team_t structs for consecutive barriers can overlap
2712  * (and will, unless the master thread is the last thread to exit the barrier
2713  * release phase, which is not typical).
2714  *
2715  * The existence of such a struct is useful outside the context of tasking,
2716  * but for now, I'm trying to keep it specific to the OMP_30_ENABLED macro,
2717  * so that any performance differences show up when comparing the 2.5 vs. 3.0
2718  * libraries.
2719  *
2720  * We currently use the existence of the threads array as an indicator that
2721  * tasks were spawned since the last barrier.  If the structure is to be
2722  * useful outside the context of tasking, then this will have to change, but
2723  * not settting the field minimizes the performance impact of tasking on
2724  * barriers, when no explicit tasks were spawned (pushed, actually).
2725  */
2726 
2727 static kmp_task_team_t *__kmp_free_task_teams =
2728     NULL; // Free list for task_team data structures
2729 // Lock for task team data structures
2730 static kmp_bootstrap_lock_t __kmp_task_team_lock =
2731     KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
2732 
2733 // __kmp_alloc_task_deque:
2734 // Allocates a task deque for a particular thread, and initialize the necessary
2735 // data structures relating to the deque.  This only happens once per thread
2736 // per task team since task teams are recycled. No lock is needed during
2737 // allocation since each thread allocates its own deque.
2738 static void __kmp_alloc_task_deque(kmp_info_t *thread,
2739                                    kmp_thread_data_t *thread_data) {
2740   __kmp_init_bootstrap_lock(&thread_data->td.td_deque_lock);
2741   KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
2742 
2743   // Initialize last stolen task field to "none"
2744   thread_data->td.td_deque_last_stolen = -1;
2745 
2746   KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
2747   KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
2748   KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
2749 
2750   KE_TRACE(
2751       10,
2752       ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
2753        __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
2754   // Allocate space for task deque, and zero the deque
2755   // Cannot use __kmp_thread_calloc() because threads not around for
2756   // kmp_reap_task_team( ).
2757   thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
2758       INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
2759   thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
2760 }
2761 
2762 // __kmp_realloc_task_deque:
2763 // Re-allocates a task deque for a particular thread, copies the content from
2764 // the old deque and adjusts the necessary data structures relating to the
2765 // deque. This operation must be done with a the deque_lock being held
2766 static void __kmp_realloc_task_deque(kmp_info_t *thread,
2767                                      kmp_thread_data_t *thread_data) {
2768   kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
2769   kmp_int32 new_size = 2 * size;
2770 
2771   KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
2772                 "%d] for thread_data %p\n",
2773                 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
2774 
2775   kmp_taskdata_t **new_deque =
2776       (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
2777 
2778   int i, j;
2779   for (i = thread_data->td.td_deque_head, j = 0; j < size;
2780        i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
2781     new_deque[j] = thread_data->td.td_deque[i];
2782 
2783   __kmp_free(thread_data->td.td_deque);
2784 
2785   thread_data->td.td_deque_head = 0;
2786   thread_data->td.td_deque_tail = size;
2787   thread_data->td.td_deque = new_deque;
2788   thread_data->td.td_deque_size = new_size;
2789 }
2790 
2791 // __kmp_free_task_deque:
2792 // Deallocates a task deque for a particular thread. Happens at library
2793 // deallocation so don't need to reset all thread data fields.
2794 static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
2795   if (thread_data->td.td_deque != NULL) {
2796     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
2797     TCW_4(thread_data->td.td_deque_ntasks, 0);
2798     __kmp_free(thread_data->td.td_deque);
2799     thread_data->td.td_deque = NULL;
2800     __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
2801   }
2802 
2803 #ifdef BUILD_TIED_TASK_STACK
2804   // GEH: Figure out what to do here for td_susp_tied_tasks
2805   if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
2806     __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
2807   }
2808 #endif // BUILD_TIED_TASK_STACK
2809 }
2810 
2811 // __kmp_realloc_task_threads_data:
2812 // Allocates a threads_data array for a task team, either by allocating an
2813 // initial array or enlarging an existing array.  Only the first thread to get
2814 // the lock allocs or enlarges the array and re-initializes the array eleemnts.
2815 // That thread returns "TRUE", the rest return "FALSE".
2816 // Assumes that the new array size is given by task_team -> tt.tt_nproc.
2817 // The current size is given by task_team -> tt.tt_max_threads.
2818 static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
2819                                            kmp_task_team_t *task_team) {
2820   kmp_thread_data_t **threads_data_p;
2821   kmp_int32 nthreads, maxthreads;
2822   int is_init_thread = FALSE;
2823 
2824   if (TCR_4(task_team->tt.tt_found_tasks)) {
2825     // Already reallocated and initialized.
2826     return FALSE;
2827   }
2828 
2829   threads_data_p = &task_team->tt.tt_threads_data;
2830   nthreads = task_team->tt.tt_nproc;
2831   maxthreads = task_team->tt.tt_max_threads;
2832 
2833   // All threads must lock when they encounter the first task of the implicit
2834   // task region to make sure threads_data fields are (re)initialized before
2835   // used.
2836   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
2837 
2838   if (!TCR_4(task_team->tt.tt_found_tasks)) {
2839     // first thread to enable tasking
2840     kmp_team_t *team = thread->th.th_team;
2841     int i;
2842 
2843     is_init_thread = TRUE;
2844     if (maxthreads < nthreads) {
2845 
2846       if (*threads_data_p != NULL) {
2847         kmp_thread_data_t *old_data = *threads_data_p;
2848         kmp_thread_data_t *new_data = NULL;
2849 
2850         KE_TRACE(
2851             10,
2852             ("__kmp_realloc_task_threads_data: T#%d reallocating "
2853              "threads data for task_team %p, new_size = %d, old_size = %d\n",
2854              __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
2855         // Reallocate threads_data to have more elements than current array
2856         // Cannot use __kmp_thread_realloc() because threads not around for
2857         // kmp_reap_task_team( ).  Note all new array entries are initialized
2858         // to zero by __kmp_allocate().
2859         new_data = (kmp_thread_data_t *)__kmp_allocate(
2860             nthreads * sizeof(kmp_thread_data_t));
2861         // copy old data to new data
2862         KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
2863                      (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
2864 
2865 #ifdef BUILD_TIED_TASK_STACK
2866         // GEH: Figure out if this is the right thing to do
2867         for (i = maxthreads; i < nthreads; i++) {
2868           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2869           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2870         }
2871 #endif // BUILD_TIED_TASK_STACK
2872         // Install the new data and free the old data
2873         (*threads_data_p) = new_data;
2874         __kmp_free(old_data);
2875       } else {
2876         KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
2877                       "threads data for task_team %p, size = %d\n",
2878                       __kmp_gtid_from_thread(thread), task_team, nthreads));
2879         // Make the initial allocate for threads_data array, and zero entries
2880         // Cannot use __kmp_thread_calloc() because threads not around for
2881         // kmp_reap_task_team( ).
2882         ANNOTATE_IGNORE_WRITES_BEGIN();
2883         *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
2884             nthreads * sizeof(kmp_thread_data_t));
2885         ANNOTATE_IGNORE_WRITES_END();
2886 #ifdef BUILD_TIED_TASK_STACK
2887         // GEH: Figure out if this is the right thing to do
2888         for (i = 0; i < nthreads; i++) {
2889           kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2890           __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
2891         }
2892 #endif // BUILD_TIED_TASK_STACK
2893       }
2894       task_team->tt.tt_max_threads = nthreads;
2895     } else {
2896       // If array has (more than) enough elements, go ahead and use it
2897       KMP_DEBUG_ASSERT(*threads_data_p != NULL);
2898     }
2899 
2900     // initialize threads_data pointers back to thread_info structures
2901     for (i = 0; i < nthreads; i++) {
2902       kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
2903       thread_data->td.td_thr = team->t.t_threads[i];
2904 
2905       if (thread_data->td.td_deque_last_stolen >= nthreads) {
2906         // The last stolen field survives across teams / barrier, and the number
2907         // of threads may have changed.  It's possible (likely?) that a new
2908         // parallel region will exhibit the same behavior as previous region.
2909         thread_data->td.td_deque_last_stolen = -1;
2910       }
2911     }
2912 
2913     KMP_MB();
2914     TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
2915   }
2916 
2917   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
2918   return is_init_thread;
2919 }
2920 
2921 // __kmp_free_task_threads_data:
2922 // Deallocates a threads_data array for a task team, including any attached
2923 // tasking deques.  Only occurs at library shutdown.
2924 static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
2925   __kmp_acquire_bootstrap_lock(&task_team->tt.tt_threads_lock);
2926   if (task_team->tt.tt_threads_data != NULL) {
2927     int i;
2928     for (i = 0; i < task_team->tt.tt_max_threads; i++) {
2929       __kmp_free_task_deque(&task_team->tt.tt_threads_data[i]);
2930     }
2931     __kmp_free(task_team->tt.tt_threads_data);
2932     task_team->tt.tt_threads_data = NULL;
2933   }
2934   __kmp_release_bootstrap_lock(&task_team->tt.tt_threads_lock);
2935 }
2936 
2937 // __kmp_allocate_task_team:
2938 // Allocates a task team associated with a specific team, taking it from
2939 // the global task team free list if possible.  Also initializes data
2940 // structures.
2941 static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
2942                                                  kmp_team_t *team) {
2943   kmp_task_team_t *task_team = NULL;
2944   int nthreads;
2945 
2946   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
2947                 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
2948 
2949   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
2950     // Take a task team from the task team pool
2951     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
2952     if (__kmp_free_task_teams != NULL) {
2953       task_team = __kmp_free_task_teams;
2954       TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
2955       task_team->tt.tt_next = NULL;
2956     }
2957     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
2958   }
2959 
2960   if (task_team == NULL) {
2961     KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
2962                   "task team for team %p\n",
2963                   __kmp_gtid_from_thread(thread), team));
2964     // Allocate a new task team if one is not available.
2965     // Cannot use __kmp_thread_malloc() because threads not around for
2966     // kmp_reap_task_team( ).
2967     task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
2968     __kmp_init_bootstrap_lock(&task_team->tt.tt_threads_lock);
2969     // AC: __kmp_allocate zeroes returned memory
2970     // task_team -> tt.tt_threads_data = NULL;
2971     // task_team -> tt.tt_max_threads = 0;
2972     // task_team -> tt.tt_next = NULL;
2973   }
2974 
2975   TCW_4(task_team->tt.tt_found_tasks, FALSE);
2976 #if OMP_45_ENABLED
2977   TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
2978 #endif
2979   task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
2980 
2981   TCW_4(task_team->tt.tt_unfinished_threads, nthreads);
2982   TCW_4(task_team->tt.tt_active, TRUE);
2983 
2984   KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
2985                 "unfinished_threads init'd to %d\n",
2986                 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
2987                 task_team->tt.tt_unfinished_threads));
2988   return task_team;
2989 }
2990 
2991 // __kmp_free_task_team:
2992 // Frees the task team associated with a specific thread, and adds it
2993 // to the global task team free list.
2994 void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
2995   KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
2996                 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
2997 
2998   // Put task team back on free list
2999   __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3000 
3001   KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
3002   task_team->tt.tt_next = __kmp_free_task_teams;
3003   TCW_PTR(__kmp_free_task_teams, task_team);
3004 
3005   __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3006 }
3007 
3008 // __kmp_reap_task_teams:
3009 // Free all the task teams on the task team free list.
3010 // Should only be done during library shutdown.
3011 // Cannot do anything that needs a thread structure or gtid since they are
3012 // already gone.
3013 void __kmp_reap_task_teams(void) {
3014   kmp_task_team_t *task_team;
3015 
3016   if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3017     // Free all task_teams on the free list
3018     __kmp_acquire_bootstrap_lock(&__kmp_task_team_lock);
3019     while ((task_team = __kmp_free_task_teams) != NULL) {
3020       __kmp_free_task_teams = task_team->tt.tt_next;
3021       task_team->tt.tt_next = NULL;
3022 
3023       // Free threads_data if necessary
3024       if (task_team->tt.tt_threads_data != NULL) {
3025         __kmp_free_task_threads_data(task_team);
3026       }
3027       __kmp_free(task_team);
3028     }
3029     __kmp_release_bootstrap_lock(&__kmp_task_team_lock);
3030   }
3031 }
3032 
3033 // __kmp_wait_to_unref_task_teams:
3034 // Some threads could still be in the fork barrier release code, possibly
3035 // trying to steal tasks.  Wait for each thread to unreference its task team.
3036 void __kmp_wait_to_unref_task_teams(void) {
3037   kmp_info_t *thread;
3038   kmp_uint32 spins;
3039   int done;
3040 
3041   KMP_INIT_YIELD(spins);
3042 
3043   for (;;) {
3044     done = TRUE;
3045 
3046     // TODO: GEH - this may be is wrong because some sync would be necessary
3047     // in case threads are added to the pool during the traversal. Need to
3048     // verify that lock for thread pool is held when calling this routine.
3049     for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
3050          thread = thread->th.th_next_pool) {
3051 #if KMP_OS_WINDOWS
3052       DWORD exit_val;
3053 #endif
3054       if (TCR_PTR(thread->th.th_task_team) == NULL) {
3055         KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
3056                       __kmp_gtid_from_thread(thread)));
3057         continue;
3058       }
3059 #if KMP_OS_WINDOWS
3060       // TODO: GEH - add this check for Linux* OS / OS X* as well?
3061       if (!__kmp_is_thread_alive(thread, &exit_val)) {
3062         thread->th.th_task_team = NULL;
3063         continue;
3064       }
3065 #endif
3066 
3067       done = FALSE; // Because th_task_team pointer is not NULL for this thread
3068 
3069       KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
3070                     "unreference task_team\n",
3071                     __kmp_gtid_from_thread(thread)));
3072 
3073       if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
3074         volatile void *sleep_loc;
3075         // If the thread is sleeping, awaken it.
3076         if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3077             NULL) {
3078           KA_TRACE(
3079               10,
3080               ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
3081                __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
3082           __kmp_null_resume_wrapper(__kmp_gtid_from_thread(thread), sleep_loc);
3083         }
3084       }
3085     }
3086     if (done) {
3087       break;
3088     }
3089 
3090     // If we are oversubscribed, or have waited a bit (and library mode is
3091     // throughput), yield. Pause is in the following code.
3092     KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
3093     KMP_YIELD_SPIN(spins); // Yields only if KMP_LIBRARY=throughput
3094   }
3095 }
3096 
3097 // __kmp_task_team_setup:  Create a task_team for the current team, but use
3098 // an already created, unused one if it already exists.
3099 void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
3100   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3101 
3102   // If this task_team hasn't been created yet, allocate it. It will be used in
3103   // the region after the next.
3104   // If it exists, it is the current task team and shouldn't be touched yet as
3105   // it may still be in use.
3106   if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
3107       (always || team->t.t_nproc > 1)) {
3108     team->t.t_task_team[this_thr->th.th_task_state] =
3109         __kmp_allocate_task_team(this_thr, team);
3110     KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created new task_team %p "
3111                   "for team %d at parity=%d\n",
3112                   __kmp_gtid_from_thread(this_thr),
3113                   team->t.t_task_team[this_thr->th.th_task_state],
3114                   ((team != NULL) ? team->t.t_id : -1),
3115                   this_thr->th.th_task_state));
3116   }
3117 
3118   // After threads exit the release, they will call sync, and then point to this
3119   // other task_team; make sure it is allocated and properly initialized. As
3120   // threads spin in the barrier release phase, they will continue to use the
3121   // previous task_team struct(above), until they receive the signal to stop
3122   // checking for tasks (they can't safely reference the kmp_team_t struct,
3123   // which could be reallocated by the master thread). No task teams are formed
3124   // for serialized teams.
3125   if (team->t.t_nproc > 1) {
3126     int other_team = 1 - this_thr->th.th_task_state;
3127     if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
3128       team->t.t_task_team[other_team] =
3129           __kmp_allocate_task_team(this_thr, team);
3130       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d created second new "
3131                     "task_team %p for team %d at parity=%d\n",
3132                     __kmp_gtid_from_thread(this_thr),
3133                     team->t.t_task_team[other_team],
3134                     ((team != NULL) ? team->t.t_id : -1), other_team));
3135     } else { // Leave the old task team struct in place for the upcoming region;
3136       // adjust as needed
3137       kmp_task_team_t *task_team = team->t.t_task_team[other_team];
3138       if (!task_team->tt.tt_active ||
3139           team->t.t_nproc != task_team->tt.tt_nproc) {
3140         TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
3141         TCW_4(task_team->tt.tt_found_tasks, FALSE);
3142 #if OMP_45_ENABLED
3143         TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3144 #endif
3145         TCW_4(task_team->tt.tt_unfinished_threads, team->t.t_nproc);
3146         TCW_4(task_team->tt.tt_active, TRUE);
3147       }
3148       // if team size has changed, the first thread to enable tasking will
3149       // realloc threads_data if necessary
3150       KA_TRACE(20, ("__kmp_task_team_setup: Master T#%d reset next task_team "
3151                     "%p for team %d at parity=%d\n",
3152                     __kmp_gtid_from_thread(this_thr),
3153                     team->t.t_task_team[other_team],
3154                     ((team != NULL) ? team->t.t_id : -1), other_team));
3155     }
3156   }
3157 }
3158 
3159 // __kmp_task_team_sync: Propagation of task team data from team to threads
3160 // which happens just after the release phase of a team barrier.  This may be
3161 // called by any thread, but only for teams with # threads > 1.
3162 void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
3163   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3164 
3165   // Toggle the th_task_state field, to switch which task_team this thread
3166   // refers to
3167   this_thr->th.th_task_state = 1 - this_thr->th.th_task_state;
3168   // It is now safe to propagate the task team pointer from the team struct to
3169   // the current thread.
3170   TCW_PTR(this_thr->th.th_task_team,
3171           team->t.t_task_team[this_thr->th.th_task_state]);
3172   KA_TRACE(20,
3173            ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
3174             "%p from Team #%d (parity=%d)\n",
3175             __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
3176             ((team != NULL) ? team->t.t_id : -1), this_thr->th.th_task_state));
3177 }
3178 
3179 // __kmp_task_team_wait: Master thread waits for outstanding tasks after the
3180 // barrier gather phase. Only called by master thread if #threads in team > 1 or
3181 // if proxy tasks were created.
3182 //
3183 // wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
3184 // by passing in 0 optionally as the last argument. When wait is zero, master
3185 // thread does not wait for unfinished_threads to reach 0.
3186 void __kmp_task_team_wait(
3187     kmp_info_t *this_thr,
3188     kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
3189   kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
3190 
3191   KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3192   KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
3193 
3194   if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
3195     if (wait) {
3196       KA_TRACE(20, ("__kmp_task_team_wait: Master T#%d waiting for all tasks "
3197                     "(for unfinished_threads to reach 0) on task_team = %p\n",
3198                     __kmp_gtid_from_thread(this_thr), task_team));
3199       // Worker threads may have dropped through to release phase, but could
3200       // still be executing tasks. Wait here for tasks to complete. To avoid
3201       // memory contention, only master thread checks termination condition.
3202       kmp_flag_32 flag(
3203           RCAST(volatile kmp_uint32 *, &task_team->tt.tt_unfinished_threads),
3204           0U);
3205       flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
3206     }
3207     // Deactivate the old task team, so that the worker threads will stop
3208     // referencing it while spinning.
3209     KA_TRACE(
3210         20,
3211         ("__kmp_task_team_wait: Master T#%d deactivating task_team %p: "
3212          "setting active to false, setting local and team's pointer to NULL\n",
3213          __kmp_gtid_from_thread(this_thr), task_team));
3214 #if OMP_45_ENABLED
3215     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
3216                      task_team->tt.tt_found_proxy_tasks == TRUE);
3217     TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3218 #else
3219     KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1);
3220 #endif
3221     KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
3222     TCW_SYNC_4(task_team->tt.tt_active, FALSE);
3223     KMP_MB();
3224 
3225     TCW_PTR(this_thr->th.th_task_team, NULL);
3226   }
3227 }
3228 
3229 // __kmp_tasking_barrier:
3230 // This routine may only called when __kmp_tasking_mode == tskm_extra_barrier.
3231 // Internal function to execute all tasks prior to a regular barrier or a join
3232 // barrier. It is a full barrier itself, which unfortunately turns regular
3233 // barriers into double barriers and join barriers into 1 1/2 barriers.
3234 void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
3235   volatile kmp_uint32 *spin = RCAST(
3236       volatile kmp_uint32 *,
3237       &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
3238   int flag = FALSE;
3239   KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
3240 
3241 #if USE_ITT_BUILD
3242   KMP_FSYNC_SPIN_INIT(spin, (kmp_uint32 *)NULL);
3243 #endif /* USE_ITT_BUILD */
3244   kmp_flag_32 spin_flag(spin, 0U);
3245   while (!spin_flag.execute_tasks(thread, gtid, TRUE,
3246                                   &flag USE_ITT_BUILD_ARG(NULL), 0)) {
3247 #if USE_ITT_BUILD
3248     // TODO: What about itt_sync_obj??
3249     KMP_FSYNC_SPIN_PREPARE(CCAST(kmp_uint32 *, spin));
3250 #endif /* USE_ITT_BUILD */
3251 
3252     if (TCR_4(__kmp_global.g.g_done)) {
3253       if (__kmp_global.g.g_abort)
3254         __kmp_abort_thread();
3255       break;
3256     }
3257     KMP_YIELD(TRUE); // GH: We always yield here
3258   }
3259 #if USE_ITT_BUILD
3260   KMP_FSYNC_SPIN_ACQUIRED(CCAST(kmp_uint32 *, spin));
3261 #endif /* USE_ITT_BUILD */
3262 }
3263 
3264 #if OMP_45_ENABLED
3265 
3266 // __kmp_give_task puts a task into a given thread queue if:
3267 //  - the queue for that thread was created
3268 //  - there's space in that queue
3269 // Because of this, __kmp_push_task needs to check if there's space after
3270 // getting the lock
3271 static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
3272                             kmp_int32 pass) {
3273   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3274   kmp_task_team_t *task_team = taskdata->td_task_team;
3275 
3276   KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
3277                 taskdata, tid));
3278 
3279   // If task_team is NULL something went really bad...
3280   KMP_DEBUG_ASSERT(task_team != NULL);
3281 
3282   bool result = false;
3283   kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
3284 
3285   if (thread_data->td.td_deque == NULL) {
3286     // There's no queue in this thread, go find another one
3287     // We're guaranteed that at least one thread has a queue
3288     KA_TRACE(30,
3289              ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
3290               tid, taskdata));
3291     return result;
3292   }
3293 
3294   if (TCR_4(thread_data->td.td_deque_ntasks) >=
3295       TASK_DEQUE_SIZE(thread_data->td)) {
3296     KA_TRACE(
3297         30,
3298         ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
3299          taskdata, tid));
3300 
3301     // if this deque is bigger than the pass ratio give a chance to another
3302     // thread
3303     if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3304       return result;
3305 
3306     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3307     __kmp_realloc_task_deque(thread, thread_data);
3308 
3309   } else {
3310 
3311     __kmp_acquire_bootstrap_lock(&thread_data->td.td_deque_lock);
3312 
3313     if (TCR_4(thread_data->td.td_deque_ntasks) >=
3314         TASK_DEQUE_SIZE(thread_data->td)) {
3315       KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
3316                     "thread %d.\n",
3317                     taskdata, tid));
3318 
3319       // if this deque is bigger than the pass ratio give a chance to another
3320       // thread
3321       if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
3322         goto release_and_exit;
3323 
3324       __kmp_realloc_task_deque(thread, thread_data);
3325     }
3326   }
3327 
3328   // lock is held here, and there is space in the deque
3329 
3330   thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
3331   // Wrap index.
3332   thread_data->td.td_deque_tail =
3333       (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
3334   TCW_4(thread_data->td.td_deque_ntasks,
3335         TCR_4(thread_data->td.td_deque_ntasks) + 1);
3336 
3337   result = true;
3338   KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
3339                 taskdata, tid));
3340 
3341 release_and_exit:
3342   __kmp_release_bootstrap_lock(&thread_data->td.td_deque_lock);
3343 
3344   return result;
3345 }
3346 
3347 /* The finish of the proxy tasks is divided in two pieces:
3348     - the top half is the one that can be done from a thread outside the team
3349     - the bottom half must be run from a them within the team
3350 
3351    In order to run the bottom half the task gets queued back into one of the
3352    threads of the team. Once the td_incomplete_child_task counter of the parent
3353    is decremented the threads can leave the barriers. So, the bottom half needs
3354    to be queued before the counter is decremented. The top half is therefore
3355    divided in two parts:
3356     - things that can be run before queuing the bottom half
3357     - things that must be run after queuing the bottom half
3358 
3359    This creates a second race as the bottom half can free the task before the
3360    second top half is executed. To avoid this we use the
3361    td_incomplete_child_task of the proxy task to synchronize the top and bottom
3362    half. */
3363 static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3364   KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
3365   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3366   KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
3367   KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
3368 
3369   taskdata->td_flags.complete = 1; // mark the task as completed
3370 
3371   if (taskdata->td_taskgroup)
3372     KMP_TEST_THEN_DEC32(&taskdata->td_taskgroup->count);
3373 
3374   // Create an imaginary children for this task so the bottom half cannot
3375   // release the task before we have completed the second top half
3376   TCI_4(taskdata->td_incomplete_child_tasks);
3377 }
3378 
3379 static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
3380   kmp_int32 children = 0;
3381 
3382   // Predecrement simulated by "- 1" calculation
3383   children =
3384       KMP_TEST_THEN_DEC32(&taskdata->td_parent->td_incomplete_child_tasks) - 1;
3385   KMP_DEBUG_ASSERT(children >= 0);
3386 
3387   // Remove the imaginary children
3388   TCD_4(taskdata->td_incomplete_child_tasks);
3389 }
3390 
3391 static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
3392   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3393   kmp_info_t *thread = __kmp_threads[gtid];
3394 
3395   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3396   KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
3397                    1); // top half must run before bottom half
3398 
3399   // We need to wait to make sure the top half is finished
3400   // Spinning here should be ok as this should happen quickly
3401   while (TCR_4(taskdata->td_incomplete_child_tasks) > 0)
3402     ;
3403 
3404   __kmp_release_deps(gtid, taskdata);
3405   __kmp_free_task_and_ancestors(gtid, taskdata, thread);
3406 }
3407 
3408 /*!
3409 @ingroup TASKING
3410 @param gtid Global Thread ID of encountering thread
3411 @param ptask Task which execution is completed
3412 
3413 Execute the completation of a proxy task from a thread of that is part of the
3414 team. Run first and bottom halves directly.
3415 */
3416 void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
3417   KMP_DEBUG_ASSERT(ptask != NULL);
3418   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3419   KA_TRACE(
3420       10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
3421            gtid, taskdata));
3422 
3423   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3424 
3425   __kmp_first_top_half_finish_proxy(taskdata);
3426   __kmp_second_top_half_finish_proxy(taskdata);
3427   __kmp_bottom_half_finish_proxy(gtid, ptask);
3428 
3429   KA_TRACE(10,
3430            ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
3431             gtid, taskdata));
3432 }
3433 
3434 /*!
3435 @ingroup TASKING
3436 @param ptask Task which execution is completed
3437 
3438 Execute the completation of a proxy task from a thread that could not belong to
3439 the team.
3440 */
3441 void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
3442   KMP_DEBUG_ASSERT(ptask != NULL);
3443   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
3444 
3445   KA_TRACE(
3446       10,
3447       ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
3448        taskdata));
3449 
3450   KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
3451 
3452   __kmp_first_top_half_finish_proxy(taskdata);
3453 
3454   // Enqueue task to complete bottom half completion from a thread within the
3455   // corresponding team
3456   kmp_team_t *team = taskdata->td_team;
3457   kmp_int32 nthreads = team->t.t_nproc;
3458   kmp_info_t *thread;
3459 
3460   // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
3461   // but we cannot use __kmp_get_random here
3462   kmp_int32 start_k = 0;
3463   kmp_int32 pass = 1;
3464   kmp_int32 k = start_k;
3465 
3466   do {
3467     // For now we're just linearly trying to find a thread
3468     thread = team->t.t_threads[k];
3469     k = (k + 1) % nthreads;
3470 
3471     // we did a full pass through all the threads
3472     if (k == start_k)
3473       pass = pass << 1;
3474 
3475   } while (!__kmp_give_task(thread, k, ptask, pass));
3476 
3477   __kmp_second_top_half_finish_proxy(taskdata);
3478 
3479   KA_TRACE(
3480       10,
3481       ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
3482        taskdata));
3483 }
3484 
3485 // __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
3486 // for taskloop
3487 //
3488 // thread:   allocating thread
3489 // task_src: pointer to source task to be duplicated
3490 // returns:  a pointer to the allocated kmp_task_t structure (task).
3491 kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src) {
3492   kmp_task_t *task;
3493   kmp_taskdata_t *taskdata;
3494   kmp_taskdata_t *taskdata_src;
3495   kmp_taskdata_t *parent_task = thread->th.th_current_task;
3496   size_t shareds_offset;
3497   size_t task_size;
3498 
3499   KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
3500                 task_src));
3501   taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
3502   KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
3503                    TASK_FULL); // it should not be proxy task
3504   KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
3505   task_size = taskdata_src->td_size_alloc;
3506 
3507   // Allocate a kmp_taskdata_t block and a kmp_task_t block.
3508   KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
3509                 task_size));
3510 #if USE_FAST_MEMORY
3511   taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
3512 #else
3513   taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
3514 #endif /* USE_FAST_MEMORY */
3515   KMP_MEMCPY(taskdata, taskdata_src, task_size);
3516 
3517   task = KMP_TASKDATA_TO_TASK(taskdata);
3518 
3519   // Initialize new task (only specific fields not affected by memcpy)
3520   taskdata->td_task_id = KMP_GEN_TASK_ID();
3521   if (task->shareds != NULL) { // need setup shareds pointer
3522     shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
3523     task->shareds = &((char *)taskdata)[shareds_offset];
3524     KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
3525                      0);
3526   }
3527   taskdata->td_alloc_thread = thread;
3528   taskdata->td_parent = parent_task;
3529   taskdata->td_taskgroup =
3530       parent_task
3531           ->td_taskgroup; // task inherits the taskgroup from the parent task
3532 
3533   // Only need to keep track of child task counts if team parallel and tasking
3534   // not serialized
3535   if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
3536     KMP_TEST_THEN_INC32(&parent_task->td_incomplete_child_tasks);
3537     if (parent_task->td_taskgroup)
3538       KMP_TEST_THEN_INC32(&parent_task->td_taskgroup->count);
3539     // Only need to keep track of allocated child tasks for explicit tasks since
3540     // implicit not deallocated
3541     if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
3542       KMP_TEST_THEN_INC32(&taskdata->td_parent->td_allocated_child_tasks);
3543   }
3544 
3545   KA_TRACE(20,
3546            ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
3547             thread, taskdata, taskdata->td_parent));
3548 #if OMPT_SUPPORT
3549   if (UNLIKELY(ompt_enabled.enabled))
3550     __ompt_task_init(taskdata, thread->th.th_info.ds.ds_gtid);
3551 #endif
3552   return task;
3553 }
3554 
3555 // Routine optionally generated by the compiler for setting the lastprivate flag
3556 // and calling needed constructors for private/firstprivate objects
3557 // (used to form taskloop tasks from pattern task)
3558 // Parameters: dest task, src task, lastprivate flag.
3559 typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
3560 
3561 // __kmp_taskloop_linear: Start tasks of the taskloop linearly
3562 //
3563 // loc       Source location information
3564 // gtid      Global thread ID
3565 // task      Pattern task, exposes the loop iteration range
3566 // lb        Pointer to loop lower bound in task structure
3567 // ub        Pointer to loop upper bound in task structure
3568 // st        Loop stride
3569 // ub_glob   Global upper bound (used for lastprivate check)
3570 // num_tasks Number of tasks to execute
3571 // grainsize Number of loop iterations per task
3572 // extras    Number of chunks with grainsize+1 iterations
3573 // tc        Iterations count
3574 // task_dup  Tasks duplication routine
3575 void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
3576                            kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3577                            kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3578                            kmp_uint64 grainsize, kmp_uint64 extras,
3579                            kmp_uint64 tc, void *task_dup) {
3580   KMP_COUNT_BLOCK(OMP_TASKLOOP);
3581   KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
3582   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3583   kmp_uint64 lower = *lb; // compiler provides global bounds here
3584   kmp_uint64 upper = *ub;
3585   kmp_uint64 i;
3586   kmp_info_t *thread = __kmp_threads[gtid];
3587   kmp_taskdata_t *current_task = thread->th.th_current_task;
3588   kmp_task_t *next_task;
3589   kmp_int32 lastpriv = 0;
3590   size_t lower_offset =
3591       (char *)lb - (char *)task; // remember offset of lb in the task structure
3592   size_t upper_offset =
3593       (char *)ub - (char *)task; // remember offset of ub in the task structure
3594 
3595   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3596   KMP_DEBUG_ASSERT(num_tasks > extras);
3597   KMP_DEBUG_ASSERT(num_tasks > 0);
3598   KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
3599                 "extras %lld, i=%lld,%lld(%d)%lld, dup %p\n",
3600                 gtid, num_tasks, grainsize, extras, lower, upper, ub_glob, st,
3601                 task_dup));
3602 
3603   // Launch num_tasks tasks, assign grainsize iterations each task
3604   for (i = 0; i < num_tasks; ++i) {
3605     kmp_uint64 chunk_minus_1;
3606     if (extras == 0) {
3607       chunk_minus_1 = grainsize - 1;
3608     } else {
3609       chunk_minus_1 = grainsize;
3610       --extras; // first extras iterations get bigger chunk (grainsize+1)
3611     }
3612     upper = lower + st * chunk_minus_1;
3613     if (i == num_tasks - 1) {
3614       // schedule the last task, set lastprivate flag if needed
3615       if (st == 1) { // most common case
3616         KMP_DEBUG_ASSERT(upper == *ub);
3617         if (upper == ub_glob)
3618           lastpriv = 1;
3619       } else if (st > 0) { // positive loop stride
3620         KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
3621         if ((kmp_uint64)st > ub_glob - upper)
3622           lastpriv = 1;
3623       } else { // negative loop stride
3624         KMP_DEBUG_ASSERT(upper + st < *ub);
3625         if (upper - ub_glob < (kmp_uint64)(-st))
3626           lastpriv = 1;
3627       }
3628     }
3629     next_task = __kmp_task_dup_alloc(thread, task); // allocate new task
3630     // adjust task-specific bounds
3631     *(kmp_uint64 *)((char *)next_task + lower_offset) = lower;
3632     *(kmp_uint64 *)((char *)next_task + upper_offset) = upper;
3633     if (ptask_dup != NULL) // set lastprivate flag, construct fistprivates, etc.
3634       ptask_dup(next_task, task, lastpriv);
3635     KA_TRACE(40, ("__kmp_taskloop_linear: T#%d; task %p: lower %lld, "
3636                   "upper %lld (offsets %p %p)\n",
3637                   gtid, next_task, lower, upper, lower_offset, upper_offset));
3638     __kmp_omp_task(gtid, next_task, true); // schedule new task
3639     lower = upper + st; // adjust lower bound for the next iteration
3640   }
3641   // free the pattern task and exit
3642   __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
3643   // do not execute the pattern task, just do internal bookkeeping
3644   __kmp_task_finish(gtid, task, current_task);
3645 }
3646 
3647 // Structure to keep taskloop parameters for auxiliary task
3648 // kept in the shareds of the task structure.
3649 typedef struct __taskloop_params {
3650   kmp_task_t *task;
3651   kmp_uint64 *lb;
3652   kmp_uint64 *ub;
3653   void *task_dup;
3654   kmp_int64 st;
3655   kmp_uint64 ub_glob;
3656   kmp_uint64 num_tasks;
3657   kmp_uint64 grainsize;
3658   kmp_uint64 extras;
3659   kmp_uint64 tc;
3660   kmp_uint64 num_t_min;
3661 } __taskloop_params_t;
3662 
3663 void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
3664                           kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
3665                           kmp_uint64, kmp_uint64, kmp_uint64, kmp_uint64,
3666                           void *);
3667 
3668 // Execute part of the the taskloop submitted as a task.
3669 int __kmp_taskloop_task(int gtid, void *ptask) {
3670   __taskloop_params_t *p =
3671       (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
3672   kmp_task_t *task = p->task;
3673   kmp_uint64 *lb = p->lb;
3674   kmp_uint64 *ub = p->ub;
3675   void *task_dup = p->task_dup;
3676   //  p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3677   kmp_int64 st = p->st;
3678   kmp_uint64 ub_glob = p->ub_glob;
3679   kmp_uint64 num_tasks = p->num_tasks;
3680   kmp_uint64 grainsize = p->grainsize;
3681   kmp_uint64 extras = p->extras;
3682   kmp_uint64 tc = p->tc;
3683   kmp_uint64 num_t_min = p->num_t_min;
3684 #if KMP_DEBUG
3685   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3686   KMP_DEBUG_ASSERT(task != NULL);
3687   KA_TRACE(20, ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
3688                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3689                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3690                 task_dup));
3691 #endif
3692   KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
3693   if (num_tasks > num_t_min)
3694     __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3695                          grainsize, extras, tc, num_t_min, task_dup);
3696   else
3697     __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
3698                           grainsize, extras, tc, task_dup);
3699 
3700   KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
3701   return 0;
3702 }
3703 
3704 // Schedule part of the the taskloop as a task,
3705 // execute the rest of the the taskloop.
3706 //
3707 // loc       Source location information
3708 // gtid      Global thread ID
3709 // task      Pattern task, exposes the loop iteration range
3710 // lb        Pointer to loop lower bound in task structure
3711 // ub        Pointer to loop upper bound in task structure
3712 // st        Loop stride
3713 // ub_glob   Global upper bound (used for lastprivate check)
3714 // num_tasks Number of tasks to execute
3715 // grainsize Number of loop iterations per task
3716 // extras    Number of chunks with grainsize+1 iterations
3717 // tc        Iterations count
3718 // num_t_min Threashold to launch tasks recursively
3719 // task_dup  Tasks duplication routine
3720 void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
3721                           kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
3722                           kmp_uint64 ub_glob, kmp_uint64 num_tasks,
3723                           kmp_uint64 grainsize, kmp_uint64 extras,
3724                           kmp_uint64 tc, kmp_uint64 num_t_min, void *task_dup) {
3725 #if KMP_DEBUG
3726   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3727   KMP_DEBUG_ASSERT(task != NULL);
3728   KMP_DEBUG_ASSERT(num_tasks > num_t_min);
3729   KA_TRACE(20, ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
3730                 " %lld, extras %lld, i=%lld,%lld(%d), dup %p\n",
3731                 gtid, taskdata, num_tasks, grainsize, extras, *lb, *ub, st,
3732                 task_dup));
3733 #endif
3734   p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
3735   kmp_uint64 lower = *lb;
3736   kmp_uint64 upper = *ub;
3737   kmp_info_t *thread = __kmp_threads[gtid];
3738   //  kmp_taskdata_t *current_task = thread->th.th_current_task;
3739   kmp_task_t *next_task;
3740   kmp_int32 lastpriv = 0;
3741   size_t lower_offset =
3742       (char *)lb - (char *)task; // remember offset of lb in the task structure
3743   size_t upper_offset =
3744       (char *)ub - (char *)task; // remember offset of ub in the task structure
3745 
3746   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3747   KMP_DEBUG_ASSERT(num_tasks > extras);
3748   KMP_DEBUG_ASSERT(num_tasks > 0);
3749 
3750   // split the loop in two halves
3751   kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
3752   kmp_uint64 gr_size0 = grainsize;
3753   kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
3754   kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
3755   if (n_tsk0 <= extras) {
3756     gr_size0++; // integrate extras into grainsize
3757     ext0 = 0; // no extra iters in 1st half
3758     ext1 = extras - n_tsk0; // remaining extras
3759     tc0 = gr_size0 * n_tsk0;
3760     tc1 = tc - tc0;
3761   } else { // n_tsk0 > extras
3762     ext1 = 0; // no extra iters in 2nd half
3763     ext0 = extras;
3764     tc1 = grainsize * n_tsk1;
3765     tc0 = tc - tc1;
3766   }
3767   ub0 = lower + st * (tc0 - 1);
3768   lb1 = ub0 + st;
3769 
3770   // create pattern task for 2nd half of the loop
3771   next_task = __kmp_task_dup_alloc(thread, task); // duplicate the task
3772   // adjust lower bound (upper bound is not changed) for the 2nd half
3773   *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
3774   if (ptask_dup != NULL) // construct fistprivates, etc.
3775     ptask_dup(next_task, task, 0);
3776   *ub = ub0; // adjust upper bound for the 1st half
3777 
3778   // create auxiliary task for 2nd half of the loop
3779   kmp_task_t *new_task =
3780       __kmpc_omp_task_alloc(loc, gtid, 1, 3 * sizeof(void *),
3781                             sizeof(__taskloop_params_t), &__kmp_taskloop_task);
3782   __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
3783   p->task = next_task;
3784   p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
3785   p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
3786   p->task_dup = task_dup;
3787   p->st = st;
3788   p->ub_glob = ub_glob;
3789   p->num_tasks = n_tsk1;
3790   p->grainsize = grainsize;
3791   p->extras = ext1;
3792   p->tc = tc1;
3793   p->num_t_min = num_t_min;
3794   __kmp_omp_task(gtid, new_task, true); // schedule new task
3795 
3796   // execute the 1st half of current subrange
3797   if (n_tsk0 > num_t_min)
3798     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0, gr_size0,
3799                          ext0, tc0, num_t_min, task_dup);
3800   else
3801     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, n_tsk0,
3802                           gr_size0, ext0, tc0, task_dup);
3803 
3804   KA_TRACE(40, ("__kmpc_taskloop_recur(exit): T#%d\n", gtid));
3805 }
3806 
3807 /*!
3808 @ingroup TASKING
3809 @param loc       Source location information
3810 @param gtid      Global thread ID
3811 @param task      Task structure
3812 @param if_val    Value of the if clause
3813 @param lb        Pointer to loop lower bound in task structure
3814 @param ub        Pointer to loop upper bound in task structure
3815 @param st        Loop stride
3816 @param nogroup   Flag, 1 if nogroup clause specified, 0 otherwise
3817 @param sched     Schedule specified 0/1/2 for none/grainsize/num_tasks
3818 @param grainsize Schedule value if specified
3819 @param task_dup  Tasks duplication routine
3820 
3821 Execute the taskloop construct.
3822 */
3823 void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
3824                      kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
3825                      int sched, kmp_uint64 grainsize, void *task_dup) {
3826   kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
3827   KMP_DEBUG_ASSERT(task != NULL);
3828 
3829   KA_TRACE(20, ("__kmpc_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
3830                 "grain %llu(%d), dup %p\n",
3831                 gtid, taskdata, *lb, *ub, st, grainsize, sched, task_dup));
3832 
3833 #if OMPT_SUPPORT && OMPT_OPTIONAL
3834   ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
3835   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
3836   if (ompt_enabled.ompt_callback_work) {
3837     ompt_callbacks.ompt_callback(ompt_callback_work)(
3838         ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
3839         &(task_info->task_data), 0, OMPT_GET_RETURN_ADDRESS(0));
3840   }
3841 #endif
3842 
3843   if (nogroup == 0) {
3844 #if OMPT_SUPPORT && OMPT_OPTIONAL
3845     OMPT_STORE_RETURN_ADDRESS(gtid);
3846 #endif
3847     __kmpc_taskgroup(loc, gtid);
3848   }
3849 
3850   // =========================================================================
3851   // calculate loop parameters
3852   kmp_uint64 tc;
3853   kmp_uint64 lower = *lb; // compiler provides global bounds here
3854   kmp_uint64 upper = *ub;
3855   kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
3856   kmp_uint64 num_tasks = 0, extras = 0;
3857   kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
3858   kmp_info_t *thread = __kmp_threads[gtid];
3859   kmp_taskdata_t *current_task = thread->th.th_current_task;
3860 
3861   // compute trip count
3862   if (st == 1) { // most common case
3863     tc = upper - lower + 1;
3864   } else if (st < 0) {
3865     tc = (lower - upper) / (-st) + 1;
3866   } else { // st > 0
3867     tc = (upper - lower) / st + 1;
3868   }
3869   if (tc == 0) {
3870     KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d zero-trip loop\n", gtid));
3871     // free the pattern task and exit
3872     __kmp_task_start(gtid, task, current_task);
3873     // do not execute anything for zero-trip loop
3874     __kmp_task_finish(gtid, task, current_task);
3875     return;
3876   }
3877   if (num_tasks_min == 0)
3878     // TODO: can we choose better default heuristic?
3879     num_tasks_min =
3880         KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
3881 
3882   // compute num_tasks/grainsize based on the input provided
3883   switch (sched) {
3884   case 0: // no schedule clause specified, we can choose the default
3885     // let's try to schedule (team_size*10) tasks
3886     grainsize = thread->th.th_team_nproc * 10;
3887   case 2: // num_tasks provided
3888     if (grainsize > tc) {
3889       num_tasks = tc; // too big num_tasks requested, adjust values
3890       grainsize = 1;
3891       extras = 0;
3892     } else {
3893       num_tasks = grainsize;
3894       grainsize = tc / num_tasks;
3895       extras = tc % num_tasks;
3896     }
3897     break;
3898   case 1: // grainsize provided
3899     if (grainsize > tc) {
3900       num_tasks = 1; // too big grainsize requested, adjust values
3901       grainsize = tc;
3902       extras = 0;
3903     } else {
3904       num_tasks = tc / grainsize;
3905       // adjust grainsize for balanced distribution of iterations
3906       grainsize = tc / num_tasks;
3907       extras = tc % num_tasks;
3908     }
3909     break;
3910   default:
3911     KMP_ASSERT2(0, "unknown scheduling of taskloop");
3912   }
3913   KMP_DEBUG_ASSERT(tc == num_tasks * grainsize + extras);
3914   KMP_DEBUG_ASSERT(num_tasks > extras);
3915   KMP_DEBUG_ASSERT(num_tasks > 0);
3916   // =========================================================================
3917 
3918   // check if clause value first
3919   if (if_val == 0) { // if(0) specified, mark task as serial
3920     taskdata->td_flags.task_serial = 1;
3921     taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
3922 #if OMPT_SUPPORT && OMPT_OPTIONAL
3923     OMPT_STORE_RETURN_ADDRESS(gtid);
3924 #endif
3925     // always start serial tasks linearly
3926     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
3927                           grainsize, extras, tc, task_dup);
3928   } else if (num_tasks > num_tasks_min) {
3929     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
3930                   "(%lld), grain %llu, extras %llu\n",
3931                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
3932 #if OMPT_SUPPORT && OMPT_OPTIONAL
3933     OMPT_STORE_RETURN_ADDRESS(gtid);
3934 #endif
3935     __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
3936                          grainsize, extras, tc, num_tasks_min, task_dup);
3937   } else {
3938     KA_TRACE(20, ("__kmpc_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
3939                   "(%lld), grain %llu, extras %llu\n",
3940                   gtid, tc, num_tasks, num_tasks_min, grainsize, extras));
3941 #if OMPT_SUPPORT && OMPT_OPTIONAL
3942     OMPT_STORE_RETURN_ADDRESS(gtid);
3943 #endif
3944     __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
3945                           grainsize, extras, tc, task_dup);
3946   }
3947 
3948   if (nogroup == 0) {
3949 #if OMPT_SUPPORT && OMPT_OPTIONAL
3950     OMPT_STORE_RETURN_ADDRESS(gtid);
3951 #endif
3952     __kmpc_end_taskgroup(loc, gtid);
3953   }
3954 #if OMPT_SUPPORT && OMPT_OPTIONAL
3955   if (ompt_enabled.ompt_callback_work) {
3956     ompt_callbacks.ompt_callback(ompt_callback_work)(
3957         ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
3958         &(task_info->task_data), 0, OMPT_GET_RETURN_ADDRESS(0));
3959   }
3960 #endif
3961   KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
3962 }
3963 
3964 #endif
3965