1 /*
2  * kmp_runtime.cpp -- KPTS runtime support library
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_atomic.h"
16 #include "kmp_environment.h"
17 #include "kmp_error.h"
18 #include "kmp_i18n.h"
19 #include "kmp_io.h"
20 #include "kmp_itt.h"
21 #include "kmp_settings.h"
22 #include "kmp_stats.h"
23 #include "kmp_str.h"
24 #include "kmp_wait_release.h"
25 #include "kmp_wrapper_getpid.h"
26 #include "kmp_dispatch.h"
27 #if KMP_USE_HIER_SCHED
28 #include "kmp_dispatch_hier.h"
29 #endif
30 
31 #if OMPT_SUPPORT
32 #include "ompt-specific.h"
33 #endif
34 
35 /* these are temporary issues to be dealt with */
36 #define KMP_USE_PRCTL 0
37 
38 #if KMP_OS_WINDOWS
39 #include <process.h>
40 #endif
41 
42 #include "tsan_annotations.h"
43 
44 #if defined(KMP_GOMP_COMPAT)
45 char const __kmp_version_alt_comp[] =
46     KMP_VERSION_PREFIX "alternative compiler support: yes";
47 #endif /* defined(KMP_GOMP_COMPAT) */
48 
49 char const __kmp_version_omp_api[] = KMP_VERSION_PREFIX "API version: "
50 #if OMP_50_ENABLED
51                                                         "5.0 (201611)";
52 #elif OMP_45_ENABLED
53                                                         "4.5 (201511)";
54 #elif OMP_40_ENABLED
55                                                         "4.0 (201307)";
56 #else
57                                                         "3.1 (201107)";
58 #endif
59 
60 #ifdef KMP_DEBUG
61 char const __kmp_version_lock[] =
62     KMP_VERSION_PREFIX "lock type: run time selectable";
63 #endif /* KMP_DEBUG */
64 
65 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
66 
67 /* ------------------------------------------------------------------------ */
68 
69 #if KMP_USE_MONITOR
70 kmp_info_t __kmp_monitor;
71 #endif
72 
73 /* Forward declarations */
74 
75 void __kmp_cleanup(void);
76 
77 static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *, int tid,
78                                   int gtid);
79 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
80                                   kmp_internal_control_t *new_icvs,
81                                   ident_t *loc);
82 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
83 static void __kmp_partition_places(kmp_team_t *team,
84                                    int update_master_only = 0);
85 #endif
86 static void __kmp_do_serial_initialize(void);
87 void __kmp_fork_barrier(int gtid, int tid);
88 void __kmp_join_barrier(int gtid);
89 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
90                           kmp_internal_control_t *new_icvs, ident_t *loc);
91 
92 #ifdef USE_LOAD_BALANCE
93 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc);
94 #endif
95 
96 static int __kmp_expand_threads(int nNeed);
97 #if KMP_OS_WINDOWS
98 static int __kmp_unregister_root_other_thread(int gtid);
99 #endif
100 static void __kmp_unregister_library(void); // called by __kmp_internal_end()
101 static void __kmp_reap_thread(kmp_info_t *thread, int is_root);
102 kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
103 
104 /* Calculate the identifier of the current thread */
105 /* fast (and somewhat portable) way to get unique identifier of executing
106    thread. Returns KMP_GTID_DNE if we haven't been assigned a gtid. */
107 int __kmp_get_global_thread_id() {
108   int i;
109   kmp_info_t **other_threads;
110   size_t stack_data;
111   char *stack_addr;
112   size_t stack_size;
113   char *stack_base;
114 
115   KA_TRACE(
116       1000,
117       ("*** __kmp_get_global_thread_id: entering, nproc=%d  all_nproc=%d\n",
118        __kmp_nth, __kmp_all_nth));
119 
120   /* JPH - to handle the case where __kmpc_end(0) is called immediately prior to
121      a parallel region, made it return KMP_GTID_DNE to force serial_initialize
122      by caller. Had to handle KMP_GTID_DNE at all call-sites, or else guarantee
123      __kmp_init_gtid for this to work. */
124 
125   if (!TCR_4(__kmp_init_gtid))
126     return KMP_GTID_DNE;
127 
128 #ifdef KMP_TDATA_GTID
129   if (TCR_4(__kmp_gtid_mode) >= 3) {
130     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using TDATA\n"));
131     return __kmp_gtid;
132   }
133 #endif
134   if (TCR_4(__kmp_gtid_mode) >= 2) {
135     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using keyed TLS\n"));
136     return __kmp_gtid_get_specific();
137   }
138   KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using internal alg.\n"));
139 
140   stack_addr = (char *)&stack_data;
141   other_threads = __kmp_threads;
142 
143   /* ATT: The code below is a source of potential bugs due to unsynchronized
144      access to __kmp_threads array. For example:
145      1. Current thread loads other_threads[i] to thr and checks it, it is
146         non-NULL.
147      2. Current thread is suspended by OS.
148      3. Another thread unregisters and finishes (debug versions of free()
149         may fill memory with something like 0xEF).
150      4. Current thread is resumed.
151      5. Current thread reads junk from *thr.
152      TODO: Fix it.  --ln  */
153 
154   for (i = 0; i < __kmp_threads_capacity; i++) {
155 
156     kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]);
157     if (!thr)
158       continue;
159 
160     stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
161     stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
162 
163     /* stack grows down -- search through all of the active threads */
164 
165     if (stack_addr <= stack_base) {
166       size_t stack_diff = stack_base - stack_addr;
167 
168       if (stack_diff <= stack_size) {
169         /* The only way we can be closer than the allocated */
170         /* stack size is if we are running on this thread. */
171         KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == i);
172         return i;
173       }
174     }
175   }
176 
177   /* get specific to try and determine our gtid */
178   KA_TRACE(1000,
179            ("*** __kmp_get_global_thread_id: internal alg. failed to find "
180             "thread, using TLS\n"));
181   i = __kmp_gtid_get_specific();
182 
183   /*fprintf( stderr, "=== %d\n", i );  */ /* GROO */
184 
185   /* if we havn't been assigned a gtid, then return code */
186   if (i < 0)
187     return i;
188 
189   /* dynamically updated stack window for uber threads to avoid get_specific
190      call */
191   if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
192     KMP_FATAL(StackOverflow, i);
193   }
194 
195   stack_base = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
196   if (stack_addr > stack_base) {
197     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
198     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
199             other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
200                 stack_base);
201   } else {
202     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
203             stack_base - stack_addr);
204   }
205 
206   /* Reprint stack bounds for ubermaster since they have been refined */
207   if (__kmp_storage_map) {
208     char *stack_end = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
209     char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
210     __kmp_print_storage_map_gtid(i, stack_beg, stack_end,
211                                  other_threads[i]->th.th_info.ds.ds_stacksize,
212                                  "th_%d stack (refinement)", i);
213   }
214   return i;
215 }
216 
217 int __kmp_get_global_thread_id_reg() {
218   int gtid;
219 
220   if (!__kmp_init_serial) {
221     gtid = KMP_GTID_DNE;
222   } else
223 #ifdef KMP_TDATA_GTID
224       if (TCR_4(__kmp_gtid_mode) >= 3) {
225     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using TDATA\n"));
226     gtid = __kmp_gtid;
227   } else
228 #endif
229       if (TCR_4(__kmp_gtid_mode) >= 2) {
230     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using keyed TLS\n"));
231     gtid = __kmp_gtid_get_specific();
232   } else {
233     KA_TRACE(1000,
234              ("*** __kmp_get_global_thread_id_reg: using internal alg.\n"));
235     gtid = __kmp_get_global_thread_id();
236   }
237 
238   /* we must be a new uber master sibling thread */
239   if (gtid == KMP_GTID_DNE) {
240     KA_TRACE(10,
241              ("__kmp_get_global_thread_id_reg: Encountered new root thread. "
242               "Registering a new gtid.\n"));
243     __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
244     if (!__kmp_init_serial) {
245       __kmp_do_serial_initialize();
246       gtid = __kmp_gtid_get_specific();
247     } else {
248       gtid = __kmp_register_root(FALSE);
249     }
250     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
251     /*__kmp_printf( "+++ %d\n", gtid ); */ /* GROO */
252   }
253 
254   KMP_DEBUG_ASSERT(gtid >= 0);
255 
256   return gtid;
257 }
258 
259 /* caller must hold forkjoin_lock */
260 void __kmp_check_stack_overlap(kmp_info_t *th) {
261   int f;
262   char *stack_beg = NULL;
263   char *stack_end = NULL;
264   int gtid;
265 
266   KA_TRACE(10, ("__kmp_check_stack_overlap: called\n"));
267   if (__kmp_storage_map) {
268     stack_end = (char *)th->th.th_info.ds.ds_stackbase;
269     stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
270 
271     gtid = __kmp_gtid_from_thread(th);
272 
273     if (gtid == KMP_GTID_MONITOR) {
274       __kmp_print_storage_map_gtid(
275           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
276           "th_%s stack (%s)", "mon",
277           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
278     } else {
279       __kmp_print_storage_map_gtid(
280           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
281           "th_%d stack (%s)", gtid,
282           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
283     }
284   }
285 
286   /* No point in checking ubermaster threads since they use refinement and
287    * cannot overlap */
288   gtid = __kmp_gtid_from_thread(th);
289   if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) {
290     KA_TRACE(10,
291              ("__kmp_check_stack_overlap: performing extensive checking\n"));
292     if (stack_beg == NULL) {
293       stack_end = (char *)th->th.th_info.ds.ds_stackbase;
294       stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
295     }
296 
297     for (f = 0; f < __kmp_threads_capacity; f++) {
298       kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]);
299 
300       if (f_th && f_th != th) {
301         char *other_stack_end =
302             (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
303         char *other_stack_beg =
304             other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
305         if ((stack_beg > other_stack_beg && stack_beg < other_stack_end) ||
306             (stack_end > other_stack_beg && stack_end < other_stack_end)) {
307 
308           /* Print the other stack values before the abort */
309           if (__kmp_storage_map)
310             __kmp_print_storage_map_gtid(
311                 -1, other_stack_beg, other_stack_end,
312                 (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
313                 "th_%d stack (overlapped)", __kmp_gtid_from_thread(f_th));
314 
315           __kmp_fatal(KMP_MSG(StackOverlap), KMP_HNT(ChangeStackLimit),
316                       __kmp_msg_null);
317         }
318       }
319     }
320   }
321   KA_TRACE(10, ("__kmp_check_stack_overlap: returning\n"));
322 }
323 
324 /* ------------------------------------------------------------------------ */
325 
326 void __kmp_infinite_loop(void) {
327   static int done = FALSE;
328 
329   while (!done) {
330     KMP_YIELD(TRUE);
331   }
332 }
333 
334 #define MAX_MESSAGE 512
335 
336 void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size,
337                                   char const *format, ...) {
338   char buffer[MAX_MESSAGE];
339   va_list ap;
340 
341   va_start(ap, format);
342   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP storage map: %p %p%8lu %s\n", p1,
343                p2, (unsigned long)size, format);
344   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
345   __kmp_vprintf(kmp_err, buffer, ap);
346 #if KMP_PRINT_DATA_PLACEMENT
347   int node;
348   if (gtid >= 0) {
349     if (p1 <= p2 && (char *)p2 - (char *)p1 == size) {
350       if (__kmp_storage_map_verbose) {
351         node = __kmp_get_host_node(p1);
352         if (node < 0) /* doesn't work, so don't try this next time */
353           __kmp_storage_map_verbose = FALSE;
354         else {
355           char *last;
356           int lastNode;
357           int localProc = __kmp_get_cpu_from_gtid(gtid);
358 
359           const int page_size = KMP_GET_PAGE_SIZE();
360 
361           p1 = (void *)((size_t)p1 & ~((size_t)page_size - 1));
362           p2 = (void *)(((size_t)p2 - 1) & ~((size_t)page_size - 1));
363           if (localProc >= 0)
364             __kmp_printf_no_lock("  GTID %d localNode %d\n", gtid,
365                                  localProc >> 1);
366           else
367             __kmp_printf_no_lock("  GTID %d\n", gtid);
368 #if KMP_USE_PRCTL
369           /* The more elaborate format is disabled for now because of the prctl
370            * hanging bug. */
371           do {
372             last = p1;
373             lastNode = node;
374             /* This loop collates adjacent pages with the same host node. */
375             do {
376               (char *)p1 += page_size;
377             } while (p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode);
378             __kmp_printf_no_lock("    %p-%p memNode %d\n", last, (char *)p1 - 1,
379                                  lastNode);
380           } while (p1 <= p2);
381 #else
382           __kmp_printf_no_lock("    %p-%p memNode %d\n", p1,
383                                (char *)p1 + (page_size - 1),
384                                __kmp_get_host_node(p1));
385           if (p1 < p2) {
386             __kmp_printf_no_lock("    %p-%p memNode %d\n", p2,
387                                  (char *)p2 + (page_size - 1),
388                                  __kmp_get_host_node(p2));
389           }
390 #endif
391         }
392       }
393     } else
394       __kmp_printf_no_lock("  %s\n", KMP_I18N_STR(StorageMapWarning));
395   }
396 #endif /* KMP_PRINT_DATA_PLACEMENT */
397   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
398 }
399 
400 void __kmp_warn(char const *format, ...) {
401   char buffer[MAX_MESSAGE];
402   va_list ap;
403 
404   if (__kmp_generate_warnings == kmp_warnings_off) {
405     return;
406   }
407 
408   va_start(ap, format);
409 
410   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP warning: %s\n", format);
411   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
412   __kmp_vprintf(kmp_err, buffer, ap);
413   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
414 
415   va_end(ap);
416 }
417 
418 void __kmp_abort_process() {
419   // Later threads may stall here, but that's ok because abort() will kill them.
420   __kmp_acquire_bootstrap_lock(&__kmp_exit_lock);
421 
422   if (__kmp_debug_buf) {
423     __kmp_dump_debug_buffer();
424   }
425 
426   if (KMP_OS_WINDOWS) {
427     // Let other threads know of abnormal termination and prevent deadlock
428     // if abort happened during library initialization or shutdown
429     __kmp_global.g.g_abort = SIGABRT;
430 
431     /* On Windows* OS by default abort() causes pop-up error box, which stalls
432        nightly testing. Unfortunately, we cannot reliably suppress pop-up error
433        boxes. _set_abort_behavior() works well, but this function is not
434        available in VS7 (this is not problem for DLL, but it is a problem for
435        static OpenMP RTL). SetErrorMode (and so, timelimit utility) does not
436        help, at least in some versions of MS C RTL.
437 
438        It seems following sequence is the only way to simulate abort() and
439        avoid pop-up error box. */
440     raise(SIGABRT);
441     _exit(3); // Just in case, if signal ignored, exit anyway.
442   } else {
443     abort();
444   }
445 
446   __kmp_infinite_loop();
447   __kmp_release_bootstrap_lock(&__kmp_exit_lock);
448 
449 } // __kmp_abort_process
450 
451 void __kmp_abort_thread(void) {
452   // TODO: Eliminate g_abort global variable and this function.
453   // In case of abort just call abort(), it will kill all the threads.
454   __kmp_infinite_loop();
455 } // __kmp_abort_thread
456 
457 /* Print out the storage map for the major kmp_info_t thread data structures
458    that are allocated together. */
459 
460 static void __kmp_print_thread_storage_map(kmp_info_t *thr, int gtid) {
461   __kmp_print_storage_map_gtid(gtid, thr, thr + 1, sizeof(kmp_info_t), "th_%d",
462                                gtid);
463 
464   __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
465                                sizeof(kmp_desc_t), "th_%d.th_info", gtid);
466 
467   __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
468                                sizeof(kmp_local_t), "th_%d.th_local", gtid);
469 
470   __kmp_print_storage_map_gtid(
471       gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
472       sizeof(kmp_balign_t) * bs_last_barrier, "th_%d.th_bar", gtid);
473 
474   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
475                                &thr->th.th_bar[bs_plain_barrier + 1],
476                                sizeof(kmp_balign_t), "th_%d.th_bar[plain]",
477                                gtid);
478 
479   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
480                                &thr->th.th_bar[bs_forkjoin_barrier + 1],
481                                sizeof(kmp_balign_t), "th_%d.th_bar[forkjoin]",
482                                gtid);
483 
484 #if KMP_FAST_REDUCTION_BARRIER
485   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
486                                &thr->th.th_bar[bs_reduction_barrier + 1],
487                                sizeof(kmp_balign_t), "th_%d.th_bar[reduction]",
488                                gtid);
489 #endif // KMP_FAST_REDUCTION_BARRIER
490 }
491 
492 /* Print out the storage map for the major kmp_team_t team data structures
493    that are allocated together. */
494 
495 static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
496                                          int team_id, int num_thr) {
497   int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
498   __kmp_print_storage_map_gtid(-1, team, team + 1, sizeof(kmp_team_t), "%s_%d",
499                                header, team_id);
500 
501   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
502                                &team->t.t_bar[bs_last_barrier],
503                                sizeof(kmp_balign_team_t) * bs_last_barrier,
504                                "%s_%d.t_bar", header, team_id);
505 
506   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
507                                &team->t.t_bar[bs_plain_barrier + 1],
508                                sizeof(kmp_balign_team_t), "%s_%d.t_bar[plain]",
509                                header, team_id);
510 
511   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
512                                &team->t.t_bar[bs_forkjoin_barrier + 1],
513                                sizeof(kmp_balign_team_t),
514                                "%s_%d.t_bar[forkjoin]", header, team_id);
515 
516 #if KMP_FAST_REDUCTION_BARRIER
517   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
518                                &team->t.t_bar[bs_reduction_barrier + 1],
519                                sizeof(kmp_balign_team_t),
520                                "%s_%d.t_bar[reduction]", header, team_id);
521 #endif // KMP_FAST_REDUCTION_BARRIER
522 
523   __kmp_print_storage_map_gtid(
524       -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
525       sizeof(kmp_disp_t) * num_thr, "%s_%d.t_dispatch", header, team_id);
526 
527   __kmp_print_storage_map_gtid(
528       -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
529       sizeof(kmp_info_t *) * num_thr, "%s_%d.t_threads", header, team_id);
530 
531   __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
532                                &team->t.t_disp_buffer[num_disp_buff],
533                                sizeof(dispatch_shared_info_t) * num_disp_buff,
534                                "%s_%d.t_disp_buffer", header, team_id);
535 }
536 
537 static void __kmp_init_allocator() {
538 #if OMP_50_ENABLED
539   __kmp_init_memkind();
540 #endif
541 }
542 static void __kmp_fini_allocator() {
543 #if OMP_50_ENABLED
544   __kmp_fini_memkind();
545 #endif
546 }
547 
548 /* ------------------------------------------------------------------------ */
549 
550 #if KMP_DYNAMIC_LIB
551 #if KMP_OS_WINDOWS
552 
553 static void __kmp_reset_lock(kmp_bootstrap_lock_t *lck) {
554   // TODO: Change to __kmp_break_bootstrap_lock().
555   __kmp_init_bootstrap_lock(lck); // make the lock released
556 }
557 
558 static void __kmp_reset_locks_on_process_detach(int gtid_req) {
559   int i;
560   int thread_count;
561 
562   // PROCESS_DETACH is expected to be called by a thread that executes
563   // ProcessExit() or FreeLibrary(). OS terminates other threads (except the one
564   // calling ProcessExit or FreeLibrary). So, it might be safe to access the
565   // __kmp_threads[] without taking the forkjoin_lock. However, in fact, some
566   // threads can be still alive here, although being about to be terminated. The
567   // threads in the array with ds_thread==0 are most suspicious. Actually, it
568   // can be not safe to access the __kmp_threads[].
569 
570   // TODO: does it make sense to check __kmp_roots[] ?
571 
572   // Let's check that there are no other alive threads registered with the OMP
573   // lib.
574   while (1) {
575     thread_count = 0;
576     for (i = 0; i < __kmp_threads_capacity; ++i) {
577       if (!__kmp_threads)
578         continue;
579       kmp_info_t *th = __kmp_threads[i];
580       if (th == NULL)
581         continue;
582       int gtid = th->th.th_info.ds.ds_gtid;
583       if (gtid == gtid_req)
584         continue;
585       if (gtid < 0)
586         continue;
587       DWORD exit_val;
588       int alive = __kmp_is_thread_alive(th, &exit_val);
589       if (alive) {
590         ++thread_count;
591       }
592     }
593     if (thread_count == 0)
594       break; // success
595   }
596 
597   // Assume that I'm alone. Now it might be safe to check and reset locks.
598   // __kmp_forkjoin_lock and __kmp_stdio_lock are expected to be reset.
599   __kmp_reset_lock(&__kmp_forkjoin_lock);
600 #ifdef KMP_DEBUG
601   __kmp_reset_lock(&__kmp_stdio_lock);
602 #endif // KMP_DEBUG
603 }
604 
605 BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
606   //__kmp_acquire_bootstrap_lock( &__kmp_initz_lock );
607 
608   switch (fdwReason) {
609 
610   case DLL_PROCESS_ATTACH:
611     KA_TRACE(10, ("DllMain: PROCESS_ATTACH\n"));
612 
613     return TRUE;
614 
615   case DLL_PROCESS_DETACH:
616     KA_TRACE(10, ("DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
617 
618     if (lpReserved != NULL) {
619       // lpReserved is used for telling the difference:
620       //   lpReserved == NULL when FreeLibrary() was called,
621       //   lpReserved != NULL when the process terminates.
622       // When FreeLibrary() is called, worker threads remain alive. So they will
623       // release the forkjoin lock by themselves. When the process terminates,
624       // worker threads disappear triggering the problem of unreleased forkjoin
625       // lock as described below.
626 
627       // A worker thread can take the forkjoin lock. The problem comes up if
628       // that worker thread becomes dead before it releases the forkjoin lock.
629       // The forkjoin lock remains taken, while the thread executing
630       // DllMain()->PROCESS_DETACH->__kmp_internal_end_library() below will try
631       // to take the forkjoin lock and will always fail, so that the application
632       // will never finish [normally]. This scenario is possible if
633       // __kmpc_end() has not been executed. It looks like it's not a corner
634       // case, but common cases:
635       // - the main function was compiled by an alternative compiler;
636       // - the main function was compiled by icl but without /Qopenmp
637       //   (application with plugins);
638       // - application terminates by calling C exit(), Fortran CALL EXIT() or
639       //   Fortran STOP.
640       // - alive foreign thread prevented __kmpc_end from doing cleanup.
641       //
642       // This is a hack to work around the problem.
643       // TODO: !!! figure out something better.
644       __kmp_reset_locks_on_process_detach(__kmp_gtid_get_specific());
645     }
646 
647     __kmp_internal_end_library(__kmp_gtid_get_specific());
648 
649     return TRUE;
650 
651   case DLL_THREAD_ATTACH:
652     KA_TRACE(10, ("DllMain: THREAD_ATTACH\n"));
653 
654     /* if we want to register new siblings all the time here call
655      * __kmp_get_gtid(); */
656     return TRUE;
657 
658   case DLL_THREAD_DETACH:
659     KA_TRACE(10, ("DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
660 
661     __kmp_internal_end_thread(__kmp_gtid_get_specific());
662     return TRUE;
663   }
664 
665   return TRUE;
666 }
667 
668 #endif /* KMP_OS_WINDOWS */
669 #endif /* KMP_DYNAMIC_LIB */
670 
671 /* __kmp_parallel_deo -- Wait until it's our turn. */
672 void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
673   int gtid = *gtid_ref;
674 #ifdef BUILD_PARALLEL_ORDERED
675   kmp_team_t *team = __kmp_team_from_gtid(gtid);
676 #endif /* BUILD_PARALLEL_ORDERED */
677 
678   if (__kmp_env_consistency_check) {
679     if (__kmp_threads[gtid]->th.th_root->r.r_active)
680 #if KMP_USE_DYNAMIC_LOCK
681       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0);
682 #else
683       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL);
684 #endif
685   }
686 #ifdef BUILD_PARALLEL_ORDERED
687   if (!team->t.t_serialized) {
688     KMP_MB();
689     KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ,
690              NULL);
691     KMP_MB();
692   }
693 #endif /* BUILD_PARALLEL_ORDERED */
694 }
695 
696 /* __kmp_parallel_dxo -- Signal the next task. */
697 void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
698   int gtid = *gtid_ref;
699 #ifdef BUILD_PARALLEL_ORDERED
700   int tid = __kmp_tid_from_gtid(gtid);
701   kmp_team_t *team = __kmp_team_from_gtid(gtid);
702 #endif /* BUILD_PARALLEL_ORDERED */
703 
704   if (__kmp_env_consistency_check) {
705     if (__kmp_threads[gtid]->th.th_root->r.r_active)
706       __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref);
707   }
708 #ifdef BUILD_PARALLEL_ORDERED
709   if (!team->t.t_serialized) {
710     KMP_MB(); /* Flush all pending memory write invalidates.  */
711 
712     /* use the tid of the next thread in this team */
713     /* TODO replace with general release procedure */
714     team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
715 
716     KMP_MB(); /* Flush all pending memory write invalidates.  */
717   }
718 #endif /* BUILD_PARALLEL_ORDERED */
719 }
720 
721 /* ------------------------------------------------------------------------ */
722 /* The BARRIER for a SINGLE process section is always explicit   */
723 
724 int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) {
725   int status;
726   kmp_info_t *th;
727   kmp_team_t *team;
728 
729   if (!TCR_4(__kmp_init_parallel))
730     __kmp_parallel_initialize();
731 
732 #if OMP_50_ENABLED
733   __kmp_resume_if_soft_paused();
734 #endif
735 
736   th = __kmp_threads[gtid];
737   team = th->th.th_team;
738   status = 0;
739 
740   th->th.th_ident = id_ref;
741 
742   if (team->t.t_serialized) {
743     status = 1;
744   } else {
745     kmp_int32 old_this = th->th.th_local.this_construct;
746 
747     ++th->th.th_local.this_construct;
748     /* try to set team count to thread count--success means thread got the
749        single block */
750     /* TODO: Should this be acquire or release? */
751     if (team->t.t_construct == old_this) {
752       status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
753                                               th->th.th_local.this_construct);
754     }
755 #if USE_ITT_BUILD
756     if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
757         KMP_MASTER_GTID(gtid) &&
758 #if OMP_40_ENABLED
759         th->th.th_teams_microtask == NULL &&
760 #endif
761         team->t.t_active_level ==
762             1) { // Only report metadata by master of active team at level 1
763       __kmp_itt_metadata_single(id_ref);
764     }
765 #endif /* USE_ITT_BUILD */
766   }
767 
768   if (__kmp_env_consistency_check) {
769     if (status && push_ws) {
770       __kmp_push_workshare(gtid, ct_psingle, id_ref);
771     } else {
772       __kmp_check_workshare(gtid, ct_psingle, id_ref);
773     }
774   }
775 #if USE_ITT_BUILD
776   if (status) {
777     __kmp_itt_single_start(gtid);
778   }
779 #endif /* USE_ITT_BUILD */
780   return status;
781 }
782 
783 void __kmp_exit_single(int gtid) {
784 #if USE_ITT_BUILD
785   __kmp_itt_single_end(gtid);
786 #endif /* USE_ITT_BUILD */
787   if (__kmp_env_consistency_check)
788     __kmp_pop_workshare(gtid, ct_psingle, NULL);
789 }
790 
791 /* determine if we can go parallel or must use a serialized parallel region and
792  * how many threads we can use
793  * set_nproc is the number of threads requested for the team
794  * returns 0 if we should serialize or only use one thread,
795  * otherwise the number of threads to use
796  * The forkjoin lock is held by the caller. */
797 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
798                                  int master_tid, int set_nthreads
799 #if OMP_40_ENABLED
800                                  ,
801                                  int enter_teams
802 #endif /* OMP_40_ENABLED */
803                                  ) {
804   int capacity;
805   int new_nthreads;
806   KMP_DEBUG_ASSERT(__kmp_init_serial);
807   KMP_DEBUG_ASSERT(root && parent_team);
808   kmp_info_t *this_thr = parent_team->t.t_threads[master_tid];
809 
810   // If dyn-var is set, dynamically adjust the number of desired threads,
811   // according to the method specified by dynamic_mode.
812   new_nthreads = set_nthreads;
813   if (!get__dynamic_2(parent_team, master_tid)) {
814     ;
815   }
816 #ifdef USE_LOAD_BALANCE
817   else if (__kmp_global.g.g_dynamic_mode == dynamic_load_balance) {
818     new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
819     if (new_nthreads == 1) {
820       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
821                     "reservation to 1 thread\n",
822                     master_tid));
823       return 1;
824     }
825     if (new_nthreads < set_nthreads) {
826       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
827                     "reservation to %d threads\n",
828                     master_tid, new_nthreads));
829     }
830   }
831 #endif /* USE_LOAD_BALANCE */
832   else if (__kmp_global.g.g_dynamic_mode == dynamic_thread_limit) {
833     new_nthreads = __kmp_avail_proc - __kmp_nth +
834                    (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
835     if (new_nthreads <= 1) {
836       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
837                     "reservation to 1 thread\n",
838                     master_tid));
839       return 1;
840     }
841     if (new_nthreads < set_nthreads) {
842       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
843                     "reservation to %d threads\n",
844                     master_tid, new_nthreads));
845     } else {
846       new_nthreads = set_nthreads;
847     }
848   } else if (__kmp_global.g.g_dynamic_mode == dynamic_random) {
849     if (set_nthreads > 2) {
850       new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
851       new_nthreads = (new_nthreads % set_nthreads) + 1;
852       if (new_nthreads == 1) {
853         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
854                       "reservation to 1 thread\n",
855                       master_tid));
856         return 1;
857       }
858       if (new_nthreads < set_nthreads) {
859         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
860                       "reservation to %d threads\n",
861                       master_tid, new_nthreads));
862       }
863     }
864   } else {
865     KMP_ASSERT(0);
866   }
867 
868   // Respect KMP_ALL_THREADS/KMP_DEVICE_THREAD_LIMIT.
869   if (__kmp_nth + new_nthreads -
870           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
871       __kmp_max_nth) {
872     int tl_nthreads = __kmp_max_nth - __kmp_nth +
873                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
874     if (tl_nthreads <= 0) {
875       tl_nthreads = 1;
876     }
877 
878     // If dyn-var is false, emit a 1-time warning.
879     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
880       __kmp_reserve_warn = 1;
881       __kmp_msg(kmp_ms_warning,
882                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
883                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
884     }
885     if (tl_nthreads == 1) {
886       KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT "
887                     "reduced reservation to 1 thread\n",
888                     master_tid));
889       return 1;
890     }
891     KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT reduced "
892                   "reservation to %d threads\n",
893                   master_tid, tl_nthreads));
894     new_nthreads = tl_nthreads;
895   }
896 
897   // Respect OMP_THREAD_LIMIT
898   int cg_nthreads = this_thr->th.th_cg_roots->cg_nthreads;
899   int max_cg_threads = this_thr->th.th_cg_roots->cg_thread_limit;
900   if (cg_nthreads + new_nthreads -
901           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
902       max_cg_threads) {
903     int tl_nthreads = max_cg_threads - cg_nthreads +
904                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
905     if (tl_nthreads <= 0) {
906       tl_nthreads = 1;
907     }
908 
909     // If dyn-var is false, emit a 1-time warning.
910     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
911       __kmp_reserve_warn = 1;
912       __kmp_msg(kmp_ms_warning,
913                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
914                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
915     }
916     if (tl_nthreads == 1) {
917       KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT "
918                     "reduced reservation to 1 thread\n",
919                     master_tid));
920       return 1;
921     }
922     KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT reduced "
923                   "reservation to %d threads\n",
924                   master_tid, tl_nthreads));
925     new_nthreads = tl_nthreads;
926   }
927 
928   // Check if the threads array is large enough, or needs expanding.
929   // See comment in __kmp_register_root() about the adjustment if
930   // __kmp_threads[0] == NULL.
931   capacity = __kmp_threads_capacity;
932   if (TCR_PTR(__kmp_threads[0]) == NULL) {
933     --capacity;
934   }
935   if (__kmp_nth + new_nthreads -
936           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
937       capacity) {
938     // Expand the threads array.
939     int slotsRequired = __kmp_nth + new_nthreads -
940                         (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
941                         capacity;
942     int slotsAdded = __kmp_expand_threads(slotsRequired);
943     if (slotsAdded < slotsRequired) {
944       // The threads array was not expanded enough.
945       new_nthreads -= (slotsRequired - slotsAdded);
946       KMP_ASSERT(new_nthreads >= 1);
947 
948       // If dyn-var is false, emit a 1-time warning.
949       if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
950         __kmp_reserve_warn = 1;
951         if (__kmp_tp_cached) {
952           __kmp_msg(kmp_ms_warning,
953                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
954                     KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
955                     KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
956         } else {
957           __kmp_msg(kmp_ms_warning,
958                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
959                     KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
960         }
961       }
962     }
963   }
964 
965 #ifdef KMP_DEBUG
966   if (new_nthreads == 1) {
967     KC_TRACE(10,
968              ("__kmp_reserve_threads: T#%d serializing team after reclaiming "
969               "dead roots and rechecking; requested %d threads\n",
970               __kmp_get_gtid(), set_nthreads));
971   } else {
972     KC_TRACE(10, ("__kmp_reserve_threads: T#%d allocating %d threads; requested"
973                   " %d threads\n",
974                   __kmp_get_gtid(), new_nthreads, set_nthreads));
975   }
976 #endif // KMP_DEBUG
977   return new_nthreads;
978 }
979 
980 /* Allocate threads from the thread pool and assign them to the new team. We are
981    assured that there are enough threads available, because we checked on that
982    earlier within critical section forkjoin */
983 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
984                                     kmp_info_t *master_th, int master_gtid) {
985   int i;
986   int use_hot_team;
987 
988   KA_TRACE(10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
989   KMP_DEBUG_ASSERT(master_gtid == __kmp_get_gtid());
990   KMP_MB();
991 
992   /* first, let's setup the master thread */
993   master_th->th.th_info.ds.ds_tid = 0;
994   master_th->th.th_team = team;
995   master_th->th.th_team_nproc = team->t.t_nproc;
996   master_th->th.th_team_master = master_th;
997   master_th->th.th_team_serialized = FALSE;
998   master_th->th.th_dispatch = &team->t.t_dispatch[0];
999 
1000 /* make sure we are not the optimized hot team */
1001 #if KMP_NESTED_HOT_TEAMS
1002   use_hot_team = 0;
1003   kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
1004   if (hot_teams) { // hot teams array is not allocated if
1005     // KMP_HOT_TEAMS_MAX_LEVEL=0
1006     int level = team->t.t_active_level - 1; // index in array of hot teams
1007     if (master_th->th.th_teams_microtask) { // are we inside the teams?
1008       if (master_th->th.th_teams_size.nteams > 1) {
1009         ++level; // level was not increased in teams construct for
1010         // team_of_masters
1011       }
1012       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
1013           master_th->th.th_teams_level == team->t.t_level) {
1014         ++level; // level was not increased in teams construct for
1015         // team_of_workers before the parallel
1016       } // team->t.t_level will be increased inside parallel
1017     }
1018     if (level < __kmp_hot_teams_max_level) {
1019       if (hot_teams[level].hot_team) {
1020         // hot team has already been allocated for given level
1021         KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
1022         use_hot_team = 1; // the team is ready to use
1023       } else {
1024         use_hot_team = 0; // AC: threads are not allocated yet
1025         hot_teams[level].hot_team = team; // remember new hot team
1026         hot_teams[level].hot_team_nth = team->t.t_nproc;
1027       }
1028     } else {
1029       use_hot_team = 0;
1030     }
1031   }
1032 #else
1033   use_hot_team = team == root->r.r_hot_team;
1034 #endif
1035   if (!use_hot_team) {
1036 
1037     /* install the master thread */
1038     team->t.t_threads[0] = master_th;
1039     __kmp_initialize_info(master_th, team, 0, master_gtid);
1040 
1041     /* now, install the worker threads */
1042     for (i = 1; i < team->t.t_nproc; i++) {
1043 
1044       /* fork or reallocate a new thread and install it in team */
1045       kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1046       team->t.t_threads[i] = thr;
1047       KMP_DEBUG_ASSERT(thr);
1048       KMP_DEBUG_ASSERT(thr->th.th_team == team);
1049       /* align team and thread arrived states */
1050       KA_TRACE(20, ("__kmp_fork_team_threads: T#%d(%d:%d) init arrived "
1051                     "T#%d(%d:%d) join =%llu, plain=%llu\n",
1052                     __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1053                     __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1054                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1055                     team->t.t_bar[bs_plain_barrier].b_arrived));
1056 #if OMP_40_ENABLED
1057       thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
1058       thr->th.th_teams_level = master_th->th.th_teams_level;
1059       thr->th.th_teams_size = master_th->th.th_teams_size;
1060 #endif
1061       { // Initialize threads' barrier data.
1062         int b;
1063         kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1064         for (b = 0; b < bs_last_barrier; ++b) {
1065           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1066           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
1067 #if USE_DEBUGGER
1068           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1069 #endif
1070         }
1071       }
1072     }
1073 
1074 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
1075     __kmp_partition_places(team);
1076 #endif
1077   }
1078 
1079 #if OMP_50_ENABLED
1080   if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
1081     for (i = 0; i < team->t.t_nproc; i++) {
1082       kmp_info_t *thr = team->t.t_threads[i];
1083       if (thr->th.th_prev_num_threads != team->t.t_nproc ||
1084           thr->th.th_prev_level != team->t.t_level) {
1085         team->t.t_display_affinity = 1;
1086         break;
1087       }
1088     }
1089   }
1090 #endif
1091 
1092   KMP_MB();
1093 }
1094 
1095 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1096 // Propagate any changes to the floating point control registers out to the team
1097 // We try to avoid unnecessary writes to the relevant cache line in the team
1098 // structure, so we don't make changes unless they are needed.
1099 inline static void propagateFPControl(kmp_team_t *team) {
1100   if (__kmp_inherit_fp_control) {
1101     kmp_int16 x87_fpu_control_word;
1102     kmp_uint32 mxcsr;
1103 
1104     // Get master values of FPU control flags (both X87 and vector)
1105     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1106     __kmp_store_mxcsr(&mxcsr);
1107     mxcsr &= KMP_X86_MXCSR_MASK;
1108 
1109     // There is no point looking at t_fp_control_saved here.
1110     // If it is TRUE, we still have to update the values if they are different
1111     // from those we now have. If it is FALSE we didn't save anything yet, but
1112     // our objective is the same. We have to ensure that the values in the team
1113     // are the same as those we have.
1114     // So, this code achieves what we need whether or not t_fp_control_saved is
1115     // true. By checking whether the value needs updating we avoid unnecessary
1116     // writes that would put the cache-line into a written state, causing all
1117     // threads in the team to have to read it again.
1118     KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1119     KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1120     // Although we don't use this value, other code in the runtime wants to know
1121     // whether it should restore them. So we must ensure it is correct.
1122     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1123   } else {
1124     // Similarly here. Don't write to this cache-line in the team structure
1125     // unless we have to.
1126     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1127   }
1128 }
1129 
1130 // Do the opposite, setting the hardware registers to the updated values from
1131 // the team.
1132 inline static void updateHWFPControl(kmp_team_t *team) {
1133   if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1134     // Only reset the fp control regs if they have been changed in the team.
1135     // the parallel region that we are exiting.
1136     kmp_int16 x87_fpu_control_word;
1137     kmp_uint32 mxcsr;
1138     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1139     __kmp_store_mxcsr(&mxcsr);
1140     mxcsr &= KMP_X86_MXCSR_MASK;
1141 
1142     if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1143       __kmp_clear_x87_fpu_status_word();
1144       __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1145     }
1146 
1147     if (team->t.t_mxcsr != mxcsr) {
1148       __kmp_load_mxcsr(&team->t.t_mxcsr);
1149     }
1150   }
1151 }
1152 #else
1153 #define propagateFPControl(x) ((void)0)
1154 #define updateHWFPControl(x) ((void)0)
1155 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1156 
1157 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
1158                                      int realloc); // forward declaration
1159 
1160 /* Run a parallel region that has been serialized, so runs only in a team of the
1161    single master thread. */
1162 void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
1163   kmp_info_t *this_thr;
1164   kmp_team_t *serial_team;
1165 
1166   KC_TRACE(10, ("__kmpc_serialized_parallel: called by T#%d\n", global_tid));
1167 
1168   /* Skip all this code for autopar serialized loops since it results in
1169      unacceptable overhead */
1170   if (loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR))
1171     return;
1172 
1173   if (!TCR_4(__kmp_init_parallel))
1174     __kmp_parallel_initialize();
1175 
1176 #if OMP_50_ENABLED
1177   __kmp_resume_if_soft_paused();
1178 #endif
1179 
1180   this_thr = __kmp_threads[global_tid];
1181   serial_team = this_thr->th.th_serial_team;
1182 
1183   /* utilize the serialized team held by this thread */
1184   KMP_DEBUG_ASSERT(serial_team);
1185   KMP_MB();
1186 
1187   if (__kmp_tasking_mode != tskm_immediate_exec) {
1188     KMP_DEBUG_ASSERT(
1189         this_thr->th.th_task_team ==
1190         this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]);
1191     KMP_DEBUG_ASSERT(serial_team->t.t_task_team[this_thr->th.th_task_state] ==
1192                      NULL);
1193     KA_TRACE(20, ("__kmpc_serialized_parallel: T#%d pushing task_team %p / "
1194                   "team %p, new task_team = NULL\n",
1195                   global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
1196     this_thr->th.th_task_team = NULL;
1197   }
1198 
1199 #if OMP_40_ENABLED
1200   kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
1201   if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1202     proc_bind = proc_bind_false;
1203   } else if (proc_bind == proc_bind_default) {
1204     // No proc_bind clause was specified, so use the current value
1205     // of proc-bind-var for this parallel region.
1206     proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
1207   }
1208   // Reset for next parallel region
1209   this_thr->th.th_set_proc_bind = proc_bind_default;
1210 #endif /* OMP_40_ENABLED */
1211 
1212 #if OMPT_SUPPORT
1213   ompt_data_t ompt_parallel_data = ompt_data_none;
1214   ompt_data_t *implicit_task_data;
1215   void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
1216   if (ompt_enabled.enabled &&
1217       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1218 
1219     ompt_task_info_t *parent_task_info;
1220     parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
1221 
1222     parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1223     if (ompt_enabled.ompt_callback_parallel_begin) {
1224       int team_size = 1;
1225 
1226       ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1227           &(parent_task_info->task_data), &(parent_task_info->frame),
1228           &ompt_parallel_data, team_size, ompt_parallel_invoker_program,
1229           codeptr);
1230     }
1231   }
1232 #endif // OMPT_SUPPORT
1233 
1234   if (this_thr->th.th_team != serial_team) {
1235     // Nested level will be an index in the nested nthreads array
1236     int level = this_thr->th.th_team->t.t_level;
1237 
1238     if (serial_team->t.t_serialized) {
1239       /* this serial team was already used
1240          TODO increase performance by making this locks more specific */
1241       kmp_team_t *new_team;
1242 
1243       __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1244 
1245       new_team = __kmp_allocate_team(this_thr->th.th_root, 1, 1,
1246 #if OMPT_SUPPORT
1247                                      ompt_parallel_data,
1248 #endif
1249 #if OMP_40_ENABLED
1250                                      proc_bind,
1251 #endif
1252                                      &this_thr->th.th_current_task->td_icvs,
1253                                      0 USE_NESTED_HOT_ARG(NULL));
1254       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1255       KMP_ASSERT(new_team);
1256 
1257       /* setup new serialized team and install it */
1258       new_team->t.t_threads[0] = this_thr;
1259       new_team->t.t_parent = this_thr->th.th_team;
1260       serial_team = new_team;
1261       this_thr->th.th_serial_team = serial_team;
1262 
1263       KF_TRACE(
1264           10,
1265           ("__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1266            global_tid, serial_team));
1267 
1268       /* TODO the above breaks the requirement that if we run out of resources,
1269          then we can still guarantee that serialized teams are ok, since we may
1270          need to allocate a new one */
1271     } else {
1272       KF_TRACE(
1273           10,
1274           ("__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1275            global_tid, serial_team));
1276     }
1277 
1278     /* we have to initialize this serial team */
1279     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1280     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1281     KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team);
1282     serial_team->t.t_ident = loc;
1283     serial_team->t.t_serialized = 1;
1284     serial_team->t.t_nproc = 1;
1285     serial_team->t.t_parent = this_thr->th.th_team;
1286     serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
1287     this_thr->th.th_team = serial_team;
1288     serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
1289 
1290     KF_TRACE(10, ("__kmpc_serialized_parallel: T#d curtask=%p\n", global_tid,
1291                   this_thr->th.th_current_task));
1292     KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
1293     this_thr->th.th_current_task->td_flags.executing = 0;
1294 
1295     __kmp_push_current_task_to_thread(this_thr, serial_team, 0);
1296 
1297     /* TODO: GEH: do ICVs work for nested serialized teams? Don't we need an
1298        implicit task for each serialized task represented by
1299        team->t.t_serialized? */
1300     copy_icvs(&this_thr->th.th_current_task->td_icvs,
1301               &this_thr->th.th_current_task->td_parent->td_icvs);
1302 
1303     // Thread value exists in the nested nthreads array for the next nested
1304     // level
1305     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1306       this_thr->th.th_current_task->td_icvs.nproc =
1307           __kmp_nested_nth.nth[level + 1];
1308     }
1309 
1310 #if OMP_40_ENABLED
1311     if (__kmp_nested_proc_bind.used &&
1312         (level + 1 < __kmp_nested_proc_bind.used)) {
1313       this_thr->th.th_current_task->td_icvs.proc_bind =
1314           __kmp_nested_proc_bind.bind_types[level + 1];
1315     }
1316 #endif /* OMP_40_ENABLED */
1317 
1318 #if USE_DEBUGGER
1319     serial_team->t.t_pkfn = (microtask_t)(~0); // For the debugger.
1320 #endif
1321     this_thr->th.th_info.ds.ds_tid = 0;
1322 
1323     /* set thread cache values */
1324     this_thr->th.th_team_nproc = 1;
1325     this_thr->th.th_team_master = this_thr;
1326     this_thr->th.th_team_serialized = 1;
1327 
1328     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
1329     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
1330 #if OMP_50_ENABLED
1331     serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save
1332 #endif
1333 
1334     propagateFPControl(serial_team);
1335 
1336     /* check if we need to allocate dispatch buffers stack */
1337     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1338     if (!serial_team->t.t_dispatch->th_disp_buffer) {
1339       serial_team->t.t_dispatch->th_disp_buffer =
1340           (dispatch_private_info_t *)__kmp_allocate(
1341               sizeof(dispatch_private_info_t));
1342     }
1343     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1344 
1345     KMP_MB();
1346 
1347   } else {
1348     /* this serialized team is already being used,
1349      * that's fine, just add another nested level */
1350     KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
1351     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1352     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1353     ++serial_team->t.t_serialized;
1354     this_thr->th.th_team_serialized = serial_team->t.t_serialized;
1355 
1356     // Nested level will be an index in the nested nthreads array
1357     int level = this_thr->th.th_team->t.t_level;
1358     // Thread value exists in the nested nthreads array for the next nested
1359     // level
1360     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1361       this_thr->th.th_current_task->td_icvs.nproc =
1362           __kmp_nested_nth.nth[level + 1];
1363     }
1364     serial_team->t.t_level++;
1365     KF_TRACE(10, ("__kmpc_serialized_parallel: T#%d increasing nesting level "
1366                   "of serial team %p to %d\n",
1367                   global_tid, serial_team, serial_team->t.t_level));
1368 
1369     /* allocate/push dispatch buffers stack */
1370     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1371     {
1372       dispatch_private_info_t *disp_buffer =
1373           (dispatch_private_info_t *)__kmp_allocate(
1374               sizeof(dispatch_private_info_t));
1375       disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
1376       serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
1377     }
1378     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1379 
1380     KMP_MB();
1381   }
1382 #if OMP_40_ENABLED
1383   KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
1384 #endif
1385 
1386 #if OMP_50_ENABLED
1387   // Perform the display affinity functionality for
1388   // serialized parallel regions
1389   if (__kmp_display_affinity) {
1390     if (this_thr->th.th_prev_level != serial_team->t.t_level ||
1391         this_thr->th.th_prev_num_threads != 1) {
1392       // NULL means use the affinity-format-var ICV
1393       __kmp_aux_display_affinity(global_tid, NULL);
1394       this_thr->th.th_prev_level = serial_team->t.t_level;
1395       this_thr->th.th_prev_num_threads = 1;
1396     }
1397   }
1398 #endif
1399 
1400   if (__kmp_env_consistency_check)
1401     __kmp_push_parallel(global_tid, NULL);
1402 #if OMPT_SUPPORT
1403   serial_team->t.ompt_team_info.master_return_address = codeptr;
1404   if (ompt_enabled.enabled &&
1405       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1406     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1407 
1408     ompt_lw_taskteam_t lw_taskteam;
1409     __ompt_lw_taskteam_init(&lw_taskteam, this_thr, global_tid,
1410                             &ompt_parallel_data, codeptr);
1411 
1412     __ompt_lw_taskteam_link(&lw_taskteam, this_thr, 1);
1413     // don't use lw_taskteam after linking. content was swaped
1414 
1415     /* OMPT implicit task begin */
1416     implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
1417     if (ompt_enabled.ompt_callback_implicit_task) {
1418       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1419           ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
1420           OMPT_CUR_TASK_DATA(this_thr), 1, __kmp_tid_from_gtid(global_tid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1421       OMPT_CUR_TASK_INFO(this_thr)
1422           ->thread_num = __kmp_tid_from_gtid(global_tid);
1423     }
1424 
1425     /* OMPT state */
1426     this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
1427     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1428   }
1429 #endif
1430 }
1431 
1432 /* most of the work for a fork */
1433 /* return true if we really went parallel, false if serialized */
1434 int __kmp_fork_call(ident_t *loc, int gtid,
1435                     enum fork_context_e call_context, // Intel, GNU, ...
1436                     kmp_int32 argc, microtask_t microtask, launch_t invoker,
1437 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1438 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1439                     va_list *ap
1440 #else
1441                     va_list ap
1442 #endif
1443                     ) {
1444   void **argv;
1445   int i;
1446   int master_tid;
1447   int master_this_cons;
1448   kmp_team_t *team;
1449   kmp_team_t *parent_team;
1450   kmp_info_t *master_th;
1451   kmp_root_t *root;
1452   int nthreads;
1453   int master_active;
1454   int master_set_numthreads;
1455   int level;
1456 #if OMP_40_ENABLED
1457   int active_level;
1458   int teams_level;
1459 #endif
1460 #if KMP_NESTED_HOT_TEAMS
1461   kmp_hot_team_ptr_t **p_hot_teams;
1462 #endif
1463   { // KMP_TIME_BLOCK
1464     KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
1465     KMP_COUNT_VALUE(OMP_PARALLEL_args, argc);
1466 
1467     KA_TRACE(20, ("__kmp_fork_call: enter T#%d\n", gtid));
1468     if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) {
1469       /* Some systems prefer the stack for the root thread(s) to start with */
1470       /* some gap from the parent stack to prevent false sharing. */
1471       void *dummy = KMP_ALLOCA(__kmp_stkpadding);
1472       /* These 2 lines below are so this does not get optimized out */
1473       if (__kmp_stkpadding > KMP_MAX_STKPADDING)
1474         __kmp_stkpadding += (short)((kmp_int64)dummy);
1475     }
1476 
1477     /* initialize if needed */
1478     KMP_DEBUG_ASSERT(
1479         __kmp_init_serial); // AC: potentially unsafe, not in sync with shutdown
1480     if (!TCR_4(__kmp_init_parallel))
1481       __kmp_parallel_initialize();
1482 
1483 #if OMP_50_ENABLED
1484     __kmp_resume_if_soft_paused();
1485 #endif
1486 
1487     /* setup current data */
1488     master_th = __kmp_threads[gtid]; // AC: potentially unsafe, not in sync with
1489     // shutdown
1490     parent_team = master_th->th.th_team;
1491     master_tid = master_th->th.th_info.ds.ds_tid;
1492     master_this_cons = master_th->th.th_local.this_construct;
1493     root = master_th->th.th_root;
1494     master_active = root->r.r_active;
1495     master_set_numthreads = master_th->th.th_set_nproc;
1496 
1497 #if OMPT_SUPPORT
1498     ompt_data_t ompt_parallel_data = ompt_data_none;
1499     ompt_data_t *parent_task_data;
1500     ompt_frame_t *ompt_frame;
1501     ompt_data_t *implicit_task_data;
1502     void *return_address = NULL;
1503 
1504     if (ompt_enabled.enabled) {
1505       __ompt_get_task_info_internal(0, NULL, &parent_task_data, &ompt_frame,
1506                                     NULL, NULL);
1507       return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1508     }
1509 #endif
1510 
1511     // Nested level will be an index in the nested nthreads array
1512     level = parent_team->t.t_level;
1513     // used to launch non-serial teams even if nested is not allowed
1514     active_level = parent_team->t.t_active_level;
1515 #if OMP_40_ENABLED
1516     // needed to check nesting inside the teams
1517     teams_level = master_th->th.th_teams_level;
1518 #endif
1519 #if KMP_NESTED_HOT_TEAMS
1520     p_hot_teams = &master_th->th.th_hot_teams;
1521     if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
1522       *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
1523           sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
1524       (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1525       // it is either actual or not needed (when active_level > 0)
1526       (*p_hot_teams)[0].hot_team_nth = 1;
1527     }
1528 #endif
1529 
1530 #if OMPT_SUPPORT
1531     if (ompt_enabled.enabled) {
1532       if (ompt_enabled.ompt_callback_parallel_begin) {
1533         int team_size = master_set_numthreads
1534                             ? master_set_numthreads
1535                             : get__nproc_2(parent_team, master_tid);
1536         ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1537             parent_task_data, ompt_frame, &ompt_parallel_data, team_size,
1538             OMPT_INVOKER(call_context), return_address);
1539       }
1540       master_th->th.ompt_thread_info.state = ompt_state_overhead;
1541     }
1542 #endif
1543 
1544     master_th->th.th_ident = loc;
1545 
1546 #if OMP_40_ENABLED
1547     if (master_th->th.th_teams_microtask && ap &&
1548         microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
1549       // AC: This is start of parallel that is nested inside teams construct.
1550       // The team is actual (hot), all workers are ready at the fork barrier.
1551       // No lock needed to initialize the team a bit, then free workers.
1552       parent_team->t.t_ident = loc;
1553       __kmp_alloc_argv_entries(argc, parent_team, TRUE);
1554       parent_team->t.t_argc = argc;
1555       argv = (void **)parent_team->t.t_argv;
1556       for (i = argc - 1; i >= 0; --i)
1557 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1558 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1559         *argv++ = va_arg(*ap, void *);
1560 #else
1561         *argv++ = va_arg(ap, void *);
1562 #endif
1563       // Increment our nested depth levels, but not increase the serialization
1564       if (parent_team == master_th->th.th_serial_team) {
1565         // AC: we are in serialized parallel
1566         __kmpc_serialized_parallel(loc, gtid);
1567         KMP_DEBUG_ASSERT(parent_team->t.t_serialized > 1);
1568         // AC: need this in order enquiry functions work
1569         // correctly, will restore at join time
1570         parent_team->t.t_serialized--;
1571 #if OMPT_SUPPORT
1572         void *dummy;
1573         void **exit_runtime_p;
1574 
1575         ompt_lw_taskteam_t lw_taskteam;
1576 
1577         if (ompt_enabled.enabled) {
1578           __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1579                                   &ompt_parallel_data, return_address);
1580           exit_runtime_p = &(lw_taskteam.ompt_task_info.frame.exit_frame.ptr);
1581 
1582           __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1583           // don't use lw_taskteam after linking. content was swaped
1584 
1585           /* OMPT implicit task begin */
1586           implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1587           if (ompt_enabled.ompt_callback_implicit_task) {
1588             ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1589                 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1590                 implicit_task_data, 1, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1591             OMPT_CUR_TASK_INFO(master_th)
1592                 ->thread_num = __kmp_tid_from_gtid(gtid);
1593           }
1594 
1595           /* OMPT state */
1596           master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1597         } else {
1598           exit_runtime_p = &dummy;
1599         }
1600 #endif
1601 
1602         {
1603           KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1604           KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1605           __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
1606 #if OMPT_SUPPORT
1607                                  ,
1608                                  exit_runtime_p
1609 #endif
1610                                  );
1611         }
1612 
1613 #if OMPT_SUPPORT
1614         *exit_runtime_p = NULL;
1615         if (ompt_enabled.enabled) {
1616           OMPT_CUR_TASK_INFO(master_th)->frame.exit_frame = ompt_data_none;
1617           if (ompt_enabled.ompt_callback_implicit_task) {
1618             ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1619                 ompt_scope_end, NULL, implicit_task_data, 1,
1620                 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1621           }
1622           __ompt_lw_taskteam_unlink(master_th);
1623 
1624           if (ompt_enabled.ompt_callback_parallel_end) {
1625             ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1626                 OMPT_CUR_TEAM_DATA(master_th), OMPT_CUR_TASK_DATA(master_th),
1627                 OMPT_INVOKER(call_context), return_address);
1628           }
1629           master_th->th.ompt_thread_info.state = ompt_state_overhead;
1630         }
1631 #endif
1632         return TRUE;
1633       }
1634 
1635       parent_team->t.t_pkfn = microtask;
1636       parent_team->t.t_invoke = invoker;
1637       KMP_ATOMIC_INC(&root->r.r_in_parallel);
1638       parent_team->t.t_active_level++;
1639       parent_team->t.t_level++;
1640 #if OMP_50_ENABLED
1641       parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save
1642 #endif
1643 
1644       /* Change number of threads in the team if requested */
1645       if (master_set_numthreads) { // The parallel has num_threads clause
1646         if (master_set_numthreads < master_th->th.th_teams_size.nth) {
1647           // AC: only can reduce number of threads dynamically, can't increase
1648           kmp_info_t **other_threads = parent_team->t.t_threads;
1649           parent_team->t.t_nproc = master_set_numthreads;
1650           for (i = 0; i < master_set_numthreads; ++i) {
1651             other_threads[i]->th.th_team_nproc = master_set_numthreads;
1652           }
1653           // Keep extra threads hot in the team for possible next parallels
1654         }
1655         master_th->th.th_set_nproc = 0;
1656       }
1657 
1658 #if USE_DEBUGGER
1659       if (__kmp_debugging) { // Let debugger override number of threads.
1660         int nth = __kmp_omp_num_threads(loc);
1661         if (nth > 0) { // 0 means debugger doesn't want to change num threads
1662           master_set_numthreads = nth;
1663         }
1664       }
1665 #endif
1666 
1667       KF_TRACE(10, ("__kmp_fork_call: before internal fork: root=%p, team=%p, "
1668                     "master_th=%p, gtid=%d\n",
1669                     root, parent_team, master_th, gtid));
1670       __kmp_internal_fork(loc, gtid, parent_team);
1671       KF_TRACE(10, ("__kmp_fork_call: after internal fork: root=%p, team=%p, "
1672                     "master_th=%p, gtid=%d\n",
1673                     root, parent_team, master_th, gtid));
1674 
1675       /* Invoke microtask for MASTER thread */
1676       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
1677                     parent_team->t.t_id, parent_team->t.t_pkfn));
1678 
1679       if (!parent_team->t.t_invoke(gtid)) {
1680         KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
1681       }
1682       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
1683                     parent_team->t.t_id, parent_team->t.t_pkfn));
1684       KMP_MB(); /* Flush all pending memory write invalidates.  */
1685 
1686       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
1687 
1688       return TRUE;
1689     } // Parallel closely nested in teams construct
1690 #endif /* OMP_40_ENABLED */
1691 
1692 #if KMP_DEBUG
1693     if (__kmp_tasking_mode != tskm_immediate_exec) {
1694       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
1695                        parent_team->t.t_task_team[master_th->th.th_task_state]);
1696     }
1697 #endif
1698 
1699     if (parent_team->t.t_active_level >=
1700         master_th->th.th_current_task->td_icvs.max_active_levels) {
1701       nthreads = 1;
1702     } else {
1703 #if OMP_40_ENABLED
1704       int enter_teams = ((ap == NULL && active_level == 0) ||
1705                          (ap && teams_level > 0 && teams_level == level));
1706 #endif
1707       nthreads =
1708           master_set_numthreads
1709               ? master_set_numthreads
1710               : get__nproc_2(
1711                     parent_team,
1712                     master_tid); // TODO: get nproc directly from current task
1713 
1714       // Check if we need to take forkjoin lock? (no need for serialized
1715       // parallel out of teams construct). This code moved here from
1716       // __kmp_reserve_threads() to speedup nested serialized parallels.
1717       if (nthreads > 1) {
1718         if ((get__max_active_levels(master_th) == 1 && (root->r.r_in_parallel
1719 #if OMP_40_ENABLED
1720                                                         && !enter_teams
1721 #endif /* OMP_40_ENABLED */
1722                                                         )) ||
1723             (__kmp_library == library_serial)) {
1724           KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
1725                         " threads\n",
1726                         gtid, nthreads));
1727           nthreads = 1;
1728         }
1729       }
1730       if (nthreads > 1) {
1731         /* determine how many new threads we can use */
1732         __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1733         nthreads = __kmp_reserve_threads(
1734             root, parent_team, master_tid, nthreads
1735 #if OMP_40_ENABLED
1736             /* AC: If we execute teams from parallel region (on host), then
1737                teams should be created but each can only have 1 thread if
1738                nesting is disabled. If teams called from serial region, then
1739                teams and their threads should be created regardless of the
1740                nesting setting. */
1741             ,
1742             enter_teams
1743 #endif /* OMP_40_ENABLED */
1744             );
1745         if (nthreads == 1) {
1746           // Free lock for single thread execution here; for multi-thread
1747           // execution it will be freed later after team of threads created
1748           // and initialized
1749           __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1750         }
1751       }
1752     }
1753     KMP_DEBUG_ASSERT(nthreads > 0);
1754 
1755     // If we temporarily changed the set number of threads then restore it now
1756     master_th->th.th_set_nproc = 0;
1757 
1758     /* create a serialized parallel region? */
1759     if (nthreads == 1) {
1760 /* josh todo: hypothetical question: what do we do for OS X*? */
1761 #if KMP_OS_LINUX &&                                                            \
1762     (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
1763       void *args[argc];
1764 #else
1765       void **args = (void **)KMP_ALLOCA(argc * sizeof(void *));
1766 #endif /* KMP_OS_LINUX && ( KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || \
1767           KMP_ARCH_AARCH64) */
1768 
1769       KA_TRACE(20,
1770                ("__kmp_fork_call: T#%d serializing parallel region\n", gtid));
1771 
1772       __kmpc_serialized_parallel(loc, gtid);
1773 
1774       if (call_context == fork_context_intel) {
1775         /* TODO this sucks, use the compiler itself to pass args! :) */
1776         master_th->th.th_serial_team->t.t_ident = loc;
1777 #if OMP_40_ENABLED
1778         if (!ap) {
1779           // revert change made in __kmpc_serialized_parallel()
1780           master_th->th.th_serial_team->t.t_level--;
1781 // Get args from parent team for teams construct
1782 
1783 #if OMPT_SUPPORT
1784           void *dummy;
1785           void **exit_runtime_p;
1786           ompt_task_info_t *task_info;
1787 
1788           ompt_lw_taskteam_t lw_taskteam;
1789 
1790           if (ompt_enabled.enabled) {
1791             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1792                                     &ompt_parallel_data, return_address);
1793 
1794             __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1795             // don't use lw_taskteam after linking. content was swaped
1796 
1797             task_info = OMPT_CUR_TASK_INFO(master_th);
1798             exit_runtime_p = &(task_info->frame.exit_frame.ptr);
1799             if (ompt_enabled.ompt_callback_implicit_task) {
1800               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1801                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1802                   &(task_info->task_data), 1, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1803               OMPT_CUR_TASK_INFO(master_th)
1804                   ->thread_num = __kmp_tid_from_gtid(gtid);
1805             }
1806 
1807             /* OMPT state */
1808             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1809           } else {
1810             exit_runtime_p = &dummy;
1811           }
1812 #endif
1813 
1814           {
1815             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1816             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1817             __kmp_invoke_microtask(microtask, gtid, 0, argc,
1818                                    parent_team->t.t_argv
1819 #if OMPT_SUPPORT
1820                                    ,
1821                                    exit_runtime_p
1822 #endif
1823                                    );
1824           }
1825 
1826 #if OMPT_SUPPORT
1827           if (ompt_enabled.enabled) {
1828             exit_runtime_p = NULL;
1829             if (ompt_enabled.ompt_callback_implicit_task) {
1830               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1831                   ompt_scope_end, NULL, &(task_info->task_data), 1,
1832                   OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1833             }
1834 
1835             __ompt_lw_taskteam_unlink(master_th);
1836             if (ompt_enabled.ompt_callback_parallel_end) {
1837               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1838                   OMPT_CUR_TEAM_DATA(master_th), parent_task_data,
1839                   OMPT_INVOKER(call_context), return_address);
1840             }
1841             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1842           }
1843 #endif
1844         } else if (microtask == (microtask_t)__kmp_teams_master) {
1845           KMP_DEBUG_ASSERT(master_th->th.th_team ==
1846                            master_th->th.th_serial_team);
1847           team = master_th->th.th_team;
1848           // team->t.t_pkfn = microtask;
1849           team->t.t_invoke = invoker;
1850           __kmp_alloc_argv_entries(argc, team, TRUE);
1851           team->t.t_argc = argc;
1852           argv = (void **)team->t.t_argv;
1853           if (ap) {
1854             for (i = argc - 1; i >= 0; --i)
1855 // TODO: revert workaround for Intel(R) 64 tracker #96
1856 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1857               *argv++ = va_arg(*ap, void *);
1858 #else
1859               *argv++ = va_arg(ap, void *);
1860 #endif
1861           } else {
1862             for (i = 0; i < argc; ++i)
1863               // Get args from parent team for teams construct
1864               argv[i] = parent_team->t.t_argv[i];
1865           }
1866           // AC: revert change made in __kmpc_serialized_parallel()
1867           //     because initial code in teams should have level=0
1868           team->t.t_level--;
1869           // AC: call special invoker for outer "parallel" of teams construct
1870           invoker(gtid);
1871         } else {
1872 #endif /* OMP_40_ENABLED */
1873           argv = args;
1874           for (i = argc - 1; i >= 0; --i)
1875 // TODO: revert workaround for Intel(R) 64 tracker #96
1876 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1877             *argv++ = va_arg(*ap, void *);
1878 #else
1879           *argv++ = va_arg(ap, void *);
1880 #endif
1881           KMP_MB();
1882 
1883 #if OMPT_SUPPORT
1884           void *dummy;
1885           void **exit_runtime_p;
1886           ompt_task_info_t *task_info;
1887 
1888           ompt_lw_taskteam_t lw_taskteam;
1889 
1890           if (ompt_enabled.enabled) {
1891             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1892                                     &ompt_parallel_data, return_address);
1893             __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1894             // don't use lw_taskteam after linking. content was swaped
1895             task_info = OMPT_CUR_TASK_INFO(master_th);
1896             exit_runtime_p = &(task_info->frame.exit_frame.ptr);
1897 
1898             /* OMPT implicit task begin */
1899             implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1900             if (ompt_enabled.ompt_callback_implicit_task) {
1901               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1902                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1903                   implicit_task_data, 1, __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1904               OMPT_CUR_TASK_INFO(master_th)
1905                   ->thread_num = __kmp_tid_from_gtid(gtid);
1906             }
1907 
1908             /* OMPT state */
1909             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1910           } else {
1911             exit_runtime_p = &dummy;
1912           }
1913 #endif
1914 
1915           {
1916             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1917             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1918             __kmp_invoke_microtask(microtask, gtid, 0, argc, args
1919 #if OMPT_SUPPORT
1920                                    ,
1921                                    exit_runtime_p
1922 #endif
1923                                    );
1924           }
1925 
1926 #if OMPT_SUPPORT
1927           if (ompt_enabled.enabled) {
1928             *exit_runtime_p = NULL;
1929             if (ompt_enabled.ompt_callback_implicit_task) {
1930               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1931                   ompt_scope_end, NULL, &(task_info->task_data), 1,
1932                   OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1933             }
1934 
1935             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1936             __ompt_lw_taskteam_unlink(master_th);
1937             if (ompt_enabled.ompt_callback_parallel_end) {
1938               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1939                   &ompt_parallel_data, parent_task_data,
1940                   OMPT_INVOKER(call_context), return_address);
1941             }
1942             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1943           }
1944 #endif
1945 #if OMP_40_ENABLED
1946         }
1947 #endif /* OMP_40_ENABLED */
1948       } else if (call_context == fork_context_gnu) {
1949 #if OMPT_SUPPORT
1950         ompt_lw_taskteam_t lwt;
1951         __ompt_lw_taskteam_init(&lwt, master_th, gtid, &ompt_parallel_data,
1952                                 return_address);
1953 
1954         lwt.ompt_task_info.frame.exit_frame = ompt_data_none;
1955         __ompt_lw_taskteam_link(&lwt, master_th, 1);
1956 // don't use lw_taskteam after linking. content was swaped
1957 #endif
1958 
1959         // we were called from GNU native code
1960         KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1961         return FALSE;
1962       } else {
1963         KMP_ASSERT2(call_context < fork_context_last,
1964                     "__kmp_fork_call: unknown fork_context parameter");
1965       }
1966 
1967       KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1968       KMP_MB();
1969       return FALSE;
1970     } // if (nthreads == 1)
1971 
1972     // GEH: only modify the executing flag in the case when not serialized
1973     //      serialized case is handled in kmpc_serialized_parallel
1974     KF_TRACE(10, ("__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, "
1975                   "curtask=%p, curtask_max_aclevel=%d\n",
1976                   parent_team->t.t_active_level, master_th,
1977                   master_th->th.th_current_task,
1978                   master_th->th.th_current_task->td_icvs.max_active_levels));
1979     // TODO: GEH - cannot do this assertion because root thread not set up as
1980     // executing
1981     // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 1 );
1982     master_th->th.th_current_task->td_flags.executing = 0;
1983 
1984 #if OMP_40_ENABLED
1985     if (!master_th->th.th_teams_microtask || level > teams_level)
1986 #endif /* OMP_40_ENABLED */
1987     {
1988       /* Increment our nested depth level */
1989       KMP_ATOMIC_INC(&root->r.r_in_parallel);
1990     }
1991 
1992     // See if we need to make a copy of the ICVs.
1993     int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
1994     if ((level + 1 < __kmp_nested_nth.used) &&
1995         (__kmp_nested_nth.nth[level + 1] != nthreads_icv)) {
1996       nthreads_icv = __kmp_nested_nth.nth[level + 1];
1997     } else {
1998       nthreads_icv = 0; // don't update
1999     }
2000 
2001 #if OMP_40_ENABLED
2002     // Figure out the proc_bind_policy for the new team.
2003     kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
2004     kmp_proc_bind_t proc_bind_icv =
2005         proc_bind_default; // proc_bind_default means don't update
2006     if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
2007       proc_bind = proc_bind_false;
2008     } else {
2009       if (proc_bind == proc_bind_default) {
2010         // No proc_bind clause specified; use current proc-bind-var for this
2011         // parallel region
2012         proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
2013       }
2014       /* else: The proc_bind policy was specified explicitly on parallel clause.
2015          This overrides proc-bind-var for this parallel region, but does not
2016          change proc-bind-var. */
2017       // Figure the value of proc-bind-var for the child threads.
2018       if ((level + 1 < __kmp_nested_proc_bind.used) &&
2019           (__kmp_nested_proc_bind.bind_types[level + 1] !=
2020            master_th->th.th_current_task->td_icvs.proc_bind)) {
2021         proc_bind_icv = __kmp_nested_proc_bind.bind_types[level + 1];
2022       }
2023     }
2024 
2025     // Reset for next parallel region
2026     master_th->th.th_set_proc_bind = proc_bind_default;
2027 #endif /* OMP_40_ENABLED */
2028 
2029     if ((nthreads_icv > 0)
2030 #if OMP_40_ENABLED
2031         || (proc_bind_icv != proc_bind_default)
2032 #endif /* OMP_40_ENABLED */
2033             ) {
2034       kmp_internal_control_t new_icvs;
2035       copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
2036       new_icvs.next = NULL;
2037       if (nthreads_icv > 0) {
2038         new_icvs.nproc = nthreads_icv;
2039       }
2040 
2041 #if OMP_40_ENABLED
2042       if (proc_bind_icv != proc_bind_default) {
2043         new_icvs.proc_bind = proc_bind_icv;
2044       }
2045 #endif /* OMP_40_ENABLED */
2046 
2047       /* allocate a new parallel team */
2048       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2049       team = __kmp_allocate_team(root, nthreads, nthreads,
2050 #if OMPT_SUPPORT
2051                                  ompt_parallel_data,
2052 #endif
2053 #if OMP_40_ENABLED
2054                                  proc_bind,
2055 #endif
2056                                  &new_icvs, argc USE_NESTED_HOT_ARG(master_th));
2057     } else {
2058       /* allocate a new parallel team */
2059       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2060       team = __kmp_allocate_team(root, nthreads, nthreads,
2061 #if OMPT_SUPPORT
2062                                  ompt_parallel_data,
2063 #endif
2064 #if OMP_40_ENABLED
2065                                  proc_bind,
2066 #endif
2067                                  &master_th->th.th_current_task->td_icvs,
2068                                  argc USE_NESTED_HOT_ARG(master_th));
2069     }
2070     KF_TRACE(
2071         10, ("__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2072 
2073     /* setup the new team */
2074     KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2075     KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2076     KMP_CHECK_UPDATE(team->t.t_ident, loc);
2077     KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2078     KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2079 #if OMPT_SUPPORT
2080     KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2081                           return_address);
2082 #endif
2083     KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2084 // TODO: parent_team->t.t_level == INT_MAX ???
2085 #if OMP_40_ENABLED
2086     if (!master_th->th.th_teams_microtask || level > teams_level) {
2087 #endif /* OMP_40_ENABLED */
2088       int new_level = parent_team->t.t_level + 1;
2089       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2090       new_level = parent_team->t.t_active_level + 1;
2091       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2092 #if OMP_40_ENABLED
2093     } else {
2094       // AC: Do not increase parallel level at start of the teams construct
2095       int new_level = parent_team->t.t_level;
2096       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2097       new_level = parent_team->t.t_active_level;
2098       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2099     }
2100 #endif /* OMP_40_ENABLED */
2101     kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
2102     // set master's schedule as new run-time schedule
2103     KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2104 
2105 #if OMP_40_ENABLED
2106     KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2107 #endif
2108 #if OMP_50_ENABLED
2109     KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2110 #endif
2111 
2112     // Update the floating point rounding in the team if required.
2113     propagateFPControl(team);
2114 
2115     if (__kmp_tasking_mode != tskm_immediate_exec) {
2116       // Set master's task team to team's task team. Unless this is hot team, it
2117       // should be NULL.
2118       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2119                        parent_team->t.t_task_team[master_th->th.th_task_state]);
2120       KA_TRACE(20, ("__kmp_fork_call: Master T#%d pushing task_team %p / team "
2121                     "%p, new task_team %p / team %p\n",
2122                     __kmp_gtid_from_thread(master_th),
2123                     master_th->th.th_task_team, parent_team,
2124                     team->t.t_task_team[master_th->th.th_task_state], team));
2125 
2126       if (active_level || master_th->th.th_task_team) {
2127         // Take a memo of master's task_state
2128         KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2129         if (master_th->th.th_task_state_top >=
2130             master_th->th.th_task_state_stack_sz) { // increase size
2131           kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
2132           kmp_uint8 *old_stack, *new_stack;
2133           kmp_uint32 i;
2134           new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
2135           for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
2136             new_stack[i] = master_th->th.th_task_state_memo_stack[i];
2137           }
2138           for (i = master_th->th.th_task_state_stack_sz; i < new_size;
2139                ++i) { // zero-init rest of stack
2140             new_stack[i] = 0;
2141           }
2142           old_stack = master_th->th.th_task_state_memo_stack;
2143           master_th->th.th_task_state_memo_stack = new_stack;
2144           master_th->th.th_task_state_stack_sz = new_size;
2145           __kmp_free(old_stack);
2146         }
2147         // Store master's task_state on stack
2148         master_th->th
2149             .th_task_state_memo_stack[master_th->th.th_task_state_top] =
2150             master_th->th.th_task_state;
2151         master_th->th.th_task_state_top++;
2152 #if KMP_NESTED_HOT_TEAMS
2153         if (master_th->th.th_hot_teams &&
2154             active_level < __kmp_hot_teams_max_level &&
2155             team == master_th->th.th_hot_teams[active_level].hot_team) {
2156           // Restore master's nested state if nested hot team
2157           master_th->th.th_task_state =
2158               master_th->th
2159                   .th_task_state_memo_stack[master_th->th.th_task_state_top];
2160         } else {
2161 #endif
2162           master_th->th.th_task_state = 0;
2163 #if KMP_NESTED_HOT_TEAMS
2164         }
2165 #endif
2166       }
2167 #if !KMP_NESTED_HOT_TEAMS
2168       KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) ||
2169                        (team == root->r.r_hot_team));
2170 #endif
2171     }
2172 
2173     KA_TRACE(
2174         20,
2175         ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2176          gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2177          team->t.t_nproc));
2178     KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2179                      (team->t.t_master_tid == 0 &&
2180                       (team->t.t_parent == root->r.r_root_team ||
2181                        team->t.t_parent->t.t_serialized)));
2182     KMP_MB();
2183 
2184     /* now, setup the arguments */
2185     argv = (void **)team->t.t_argv;
2186 #if OMP_40_ENABLED
2187     if (ap) {
2188 #endif /* OMP_40_ENABLED */
2189       for (i = argc - 1; i >= 0; --i) {
2190 // TODO: revert workaround for Intel(R) 64 tracker #96
2191 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
2192         void *new_argv = va_arg(*ap, void *);
2193 #else
2194       void *new_argv = va_arg(ap, void *);
2195 #endif
2196         KMP_CHECK_UPDATE(*argv, new_argv);
2197         argv++;
2198       }
2199 #if OMP_40_ENABLED
2200     } else {
2201       for (i = 0; i < argc; ++i) {
2202         // Get args from parent team for teams construct
2203         KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2204       }
2205     }
2206 #endif /* OMP_40_ENABLED */
2207 
2208     /* now actually fork the threads */
2209     KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2210     if (!root->r.r_active) // Only do assignment if it prevents cache ping-pong
2211       root->r.r_active = TRUE;
2212 
2213     __kmp_fork_team_threads(root, team, master_th, gtid);
2214     __kmp_setup_icv_copy(team, nthreads,
2215                          &master_th->th.th_current_task->td_icvs, loc);
2216 
2217 #if OMPT_SUPPORT
2218     master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
2219 #endif
2220 
2221     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2222 
2223 #if USE_ITT_BUILD
2224     if (team->t.t_active_level == 1 // only report frames at level 1
2225 #if OMP_40_ENABLED
2226         && !master_th->th.th_teams_microtask // not in teams construct
2227 #endif /* OMP_40_ENABLED */
2228         ) {
2229 #if USE_ITT_NOTIFY
2230       if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2231           (__kmp_forkjoin_frames_mode == 3 ||
2232            __kmp_forkjoin_frames_mode == 1)) {
2233         kmp_uint64 tmp_time = 0;
2234         if (__itt_get_timestamp_ptr)
2235           tmp_time = __itt_get_timestamp();
2236         // Internal fork - report frame begin
2237         master_th->th.th_frame_time = tmp_time;
2238         if (__kmp_forkjoin_frames_mode == 3)
2239           team->t.t_region_time = tmp_time;
2240       } else
2241 // only one notification scheme (either "submit" or "forking/joined", not both)
2242 #endif /* USE_ITT_NOTIFY */
2243           if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
2244               __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
2245         // Mark start of "parallel" region for Intel(R) VTune(TM) analyzer.
2246         __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2247       }
2248     }
2249 #endif /* USE_ITT_BUILD */
2250 
2251     /* now go on and do the work */
2252     KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2253     KMP_MB();
2254     KF_TRACE(10,
2255              ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2256               root, team, master_th, gtid));
2257 
2258 #if USE_ITT_BUILD
2259     if (__itt_stack_caller_create_ptr) {
2260       team->t.t_stack_id =
2261           __kmp_itt_stack_caller_create(); // create new stack stitching id
2262       // before entering fork barrier
2263     }
2264 #endif /* USE_ITT_BUILD */
2265 
2266 #if OMP_40_ENABLED
2267     // AC: skip __kmp_internal_fork at teams construct, let only master
2268     // threads execute
2269     if (ap)
2270 #endif /* OMP_40_ENABLED */
2271     {
2272       __kmp_internal_fork(loc, gtid, team);
2273       KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2274                     "master_th=%p, gtid=%d\n",
2275                     root, team, master_th, gtid));
2276     }
2277 
2278     if (call_context == fork_context_gnu) {
2279       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2280       return TRUE;
2281     }
2282 
2283     /* Invoke microtask for MASTER thread */
2284     KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
2285                   team->t.t_id, team->t.t_pkfn));
2286   } // END of timer KMP_fork_call block
2287 
2288 #if KMP_STATS_ENABLED && OMP_40_ENABLED
2289   // If beginning a teams construct, then change thread state
2290   stats_state_e previous_state = KMP_GET_THREAD_STATE();
2291   if (!ap) {
2292     KMP_SET_THREAD_STATE(stats_state_e::TEAMS_REGION);
2293   }
2294 #endif
2295 
2296   if (!team->t.t_invoke(gtid)) {
2297     KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
2298   }
2299 
2300 #if KMP_STATS_ENABLED && OMP_40_ENABLED
2301   // If was beginning of a teams construct, then reset thread state
2302   if (!ap) {
2303     KMP_SET_THREAD_STATE(previous_state);
2304   }
2305 #endif
2306 
2307   KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
2308                 team->t.t_id, team->t.t_pkfn));
2309   KMP_MB(); /* Flush all pending memory write invalidates.  */
2310 
2311   KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2312 
2313 #if OMPT_SUPPORT
2314   if (ompt_enabled.enabled) {
2315     master_th->th.ompt_thread_info.state = ompt_state_overhead;
2316   }
2317 #endif
2318 
2319   return TRUE;
2320 }
2321 
2322 #if OMPT_SUPPORT
2323 static inline void __kmp_join_restore_state(kmp_info_t *thread,
2324                                             kmp_team_t *team) {
2325   // restore state outside the region
2326   thread->th.ompt_thread_info.state =
2327       ((team->t.t_serialized) ? ompt_state_work_serial
2328                               : ompt_state_work_parallel);
2329 }
2330 
2331 static inline void __kmp_join_ompt(int gtid, kmp_info_t *thread,
2332                                    kmp_team_t *team, ompt_data_t *parallel_data,
2333                                    fork_context_e fork_context, void *codeptr) {
2334   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2335   if (ompt_enabled.ompt_callback_parallel_end) {
2336     ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
2337         parallel_data, &(task_info->task_data), OMPT_INVOKER(fork_context),
2338         codeptr);
2339   }
2340 
2341   task_info->frame.enter_frame = ompt_data_none;
2342   __kmp_join_restore_state(thread, team);
2343 }
2344 #endif
2345 
2346 void __kmp_join_call(ident_t *loc, int gtid
2347 #if OMPT_SUPPORT
2348                      ,
2349                      enum fork_context_e fork_context
2350 #endif
2351 #if OMP_40_ENABLED
2352                      ,
2353                      int exit_teams
2354 #endif /* OMP_40_ENABLED */
2355                      ) {
2356   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
2357   kmp_team_t *team;
2358   kmp_team_t *parent_team;
2359   kmp_info_t *master_th;
2360   kmp_root_t *root;
2361   int master_active;
2362 
2363   KA_TRACE(20, ("__kmp_join_call: enter T#%d\n", gtid));
2364 
2365   /* setup current data */
2366   master_th = __kmp_threads[gtid];
2367   root = master_th->th.th_root;
2368   team = master_th->th.th_team;
2369   parent_team = team->t.t_parent;
2370 
2371   master_th->th.th_ident = loc;
2372 
2373 #if OMPT_SUPPORT
2374   if (ompt_enabled.enabled) {
2375     master_th->th.ompt_thread_info.state = ompt_state_overhead;
2376   }
2377 #endif
2378 
2379 #if KMP_DEBUG
2380   if (__kmp_tasking_mode != tskm_immediate_exec && !exit_teams) {
2381     KA_TRACE(20, ("__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2382                   "th_task_team = %p\n",
2383                   __kmp_gtid_from_thread(master_th), team,
2384                   team->t.t_task_team[master_th->th.th_task_state],
2385                   master_th->th.th_task_team));
2386     KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2387                      team->t.t_task_team[master_th->th.th_task_state]);
2388   }
2389 #endif
2390 
2391   if (team->t.t_serialized) {
2392 #if OMP_40_ENABLED
2393     if (master_th->th.th_teams_microtask) {
2394       // We are in teams construct
2395       int level = team->t.t_level;
2396       int tlevel = master_th->th.th_teams_level;
2397       if (level == tlevel) {
2398         // AC: we haven't incremented it earlier at start of teams construct,
2399         //     so do it here - at the end of teams construct
2400         team->t.t_level++;
2401       } else if (level == tlevel + 1) {
2402         // AC: we are exiting parallel inside teams, need to increment
2403         // serialization in order to restore it in the next call to
2404         // __kmpc_end_serialized_parallel
2405         team->t.t_serialized++;
2406       }
2407     }
2408 #endif /* OMP_40_ENABLED */
2409     __kmpc_end_serialized_parallel(loc, gtid);
2410 
2411 #if OMPT_SUPPORT
2412     if (ompt_enabled.enabled) {
2413       __kmp_join_restore_state(master_th, parent_team);
2414     }
2415 #endif
2416 
2417     return;
2418   }
2419 
2420   master_active = team->t.t_master_active;
2421 
2422 #if OMP_40_ENABLED
2423   if (!exit_teams)
2424 #endif /* OMP_40_ENABLED */
2425   {
2426     // AC: No barrier for internal teams at exit from teams construct.
2427     //     But there is barrier for external team (league).
2428     __kmp_internal_join(loc, gtid, team);
2429   }
2430 #if OMP_40_ENABLED
2431   else {
2432     master_th->th.th_task_state =
2433         0; // AC: no tasking in teams (out of any parallel)
2434   }
2435 #endif /* OMP_40_ENABLED */
2436 
2437   KMP_MB();
2438 
2439 #if OMPT_SUPPORT
2440   ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2441   void *codeptr = team->t.ompt_team_info.master_return_address;
2442 #endif
2443 
2444 #if USE_ITT_BUILD
2445   if (__itt_stack_caller_create_ptr) {
2446     __kmp_itt_stack_caller_destroy(
2447         (__itt_caller)team->t
2448             .t_stack_id); // destroy the stack stitching id after join barrier
2449   }
2450 
2451   // Mark end of "parallel" region for Intel(R) VTune(TM) analyzer.
2452   if (team->t.t_active_level == 1
2453 #if OMP_40_ENABLED
2454       && !master_th->th.th_teams_microtask /* not in teams construct */
2455 #endif /* OMP_40_ENABLED */
2456       ) {
2457     master_th->th.th_ident = loc;
2458     // only one notification scheme (either "submit" or "forking/joined", not
2459     // both)
2460     if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2461         __kmp_forkjoin_frames_mode == 3)
2462       __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2463                              master_th->th.th_frame_time, 0, loc,
2464                              master_th->th.th_team_nproc, 1);
2465     else if ((__itt_frame_end_v3_ptr || KMP_ITT_DEBUG) &&
2466              !__kmp_forkjoin_frames_mode && __kmp_forkjoin_frames)
2467       __kmp_itt_region_joined(gtid);
2468   } // active_level == 1
2469 #endif /* USE_ITT_BUILD */
2470 
2471 #if OMP_40_ENABLED
2472   if (master_th->th.th_teams_microtask && !exit_teams &&
2473       team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2474       team->t.t_level == master_th->th.th_teams_level + 1) {
2475     // AC: We need to leave the team structure intact at the end of parallel
2476     // inside the teams construct, so that at the next parallel same (hot) team
2477     // works, only adjust nesting levels
2478 
2479     /* Decrement our nested depth level */
2480     team->t.t_level--;
2481     team->t.t_active_level--;
2482     KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2483 
2484     // Restore number of threads in the team if needed. This code relies on
2485     // the proper adjustment of th_teams_size.nth after the fork in
2486     // __kmp_teams_master on each teams master in the case that
2487     // __kmp_reserve_threads reduced it.
2488     if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
2489       int old_num = master_th->th.th_team_nproc;
2490       int new_num = master_th->th.th_teams_size.nth;
2491       kmp_info_t **other_threads = team->t.t_threads;
2492       team->t.t_nproc = new_num;
2493       for (int i = 0; i < old_num; ++i) {
2494         other_threads[i]->th.th_team_nproc = new_num;
2495       }
2496       // Adjust states of non-used threads of the team
2497       for (int i = old_num; i < new_num; ++i) {
2498         // Re-initialize thread's barrier data.
2499         KMP_DEBUG_ASSERT(other_threads[i]);
2500         kmp_balign_t *balign = other_threads[i]->th.th_bar;
2501         for (int b = 0; b < bs_last_barrier; ++b) {
2502           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2503           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
2504 #if USE_DEBUGGER
2505           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2506 #endif
2507         }
2508         if (__kmp_tasking_mode != tskm_immediate_exec) {
2509           // Synchronize thread's task state
2510           other_threads[i]->th.th_task_state = master_th->th.th_task_state;
2511         }
2512       }
2513     }
2514 
2515 #if OMPT_SUPPORT
2516     if (ompt_enabled.enabled) {
2517       __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, fork_context,
2518                       codeptr);
2519     }
2520 #endif
2521 
2522     return;
2523   }
2524 #endif /* OMP_40_ENABLED */
2525 
2526   /* do cleanup and restore the parent team */
2527   master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2528   master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2529 
2530   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2531 
2532   /* jc: The following lock has instructions with REL and ACQ semantics,
2533      separating the parallel user code called in this parallel region
2534      from the serial user code called after this function returns. */
2535   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2536 
2537 #if OMP_40_ENABLED
2538   if (!master_th->th.th_teams_microtask ||
2539       team->t.t_level > master_th->th.th_teams_level)
2540 #endif /* OMP_40_ENABLED */
2541   {
2542     /* Decrement our nested depth level */
2543     KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2544   }
2545   KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2546 
2547 #if OMPT_SUPPORT
2548   if (ompt_enabled.enabled) {
2549     ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2550     if (ompt_enabled.ompt_callback_implicit_task) {
2551       int ompt_team_size = team->t.t_nproc;
2552       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2553           ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2554           OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit); // TODO: Can this be ompt_task_initial?
2555     }
2556 
2557     task_info->frame.exit_frame = ompt_data_none;
2558     task_info->task_data = ompt_data_none;
2559   }
2560 #endif
2561 
2562   KF_TRACE(10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2563                 master_th, team));
2564   __kmp_pop_current_task_from_thread(master_th);
2565 
2566 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2567   // Restore master thread's partition.
2568   master_th->th.th_first_place = team->t.t_first_place;
2569   master_th->th.th_last_place = team->t.t_last_place;
2570 #endif /* OMP_40_ENABLED */
2571 #if OMP_50_ENABLED
2572   master_th->th.th_def_allocator = team->t.t_def_allocator;
2573 #endif
2574 
2575   updateHWFPControl(team);
2576 
2577   if (root->r.r_active != master_active)
2578     root->r.r_active = master_active;
2579 
2580   __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2581                             master_th)); // this will free worker threads
2582 
2583   /* this race was fun to find. make sure the following is in the critical
2584      region otherwise assertions may fail occasionally since the old team may be
2585      reallocated and the hierarchy appears inconsistent. it is actually safe to
2586      run and won't cause any bugs, but will cause those assertion failures. it's
2587      only one deref&assign so might as well put this in the critical region */
2588   master_th->th.th_team = parent_team;
2589   master_th->th.th_team_nproc = parent_team->t.t_nproc;
2590   master_th->th.th_team_master = parent_team->t.t_threads[0];
2591   master_th->th.th_team_serialized = parent_team->t.t_serialized;
2592 
2593   /* restore serialized team, if need be */
2594   if (parent_team->t.t_serialized &&
2595       parent_team != master_th->th.th_serial_team &&
2596       parent_team != root->r.r_root_team) {
2597     __kmp_free_team(root,
2598                     master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
2599     master_th->th.th_serial_team = parent_team;
2600   }
2601 
2602   if (__kmp_tasking_mode != tskm_immediate_exec) {
2603     if (master_th->th.th_task_state_top >
2604         0) { // Restore task state from memo stack
2605       KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2606       // Remember master's state if we re-use this nested hot team
2607       master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
2608           master_th->th.th_task_state;
2609       --master_th->th.th_task_state_top; // pop
2610       // Now restore state at this level
2611       master_th->th.th_task_state =
2612           master_th->th
2613               .th_task_state_memo_stack[master_th->th.th_task_state_top];
2614     }
2615     // Copy the task team from the parent team to the master thread
2616     master_th->th.th_task_team =
2617         parent_team->t.t_task_team[master_th->th.th_task_state];
2618     KA_TRACE(20,
2619              ("__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2620               __kmp_gtid_from_thread(master_th), master_th->th.th_task_team,
2621               parent_team));
2622   }
2623 
2624   // TODO: GEH - cannot do this assertion because root thread not set up as
2625   // executing
2626   // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 0 );
2627   master_th->th.th_current_task->td_flags.executing = 1;
2628 
2629   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2630 
2631 #if OMPT_SUPPORT
2632   if (ompt_enabled.enabled) {
2633     __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, fork_context,
2634                     codeptr);
2635   }
2636 #endif
2637 
2638   KMP_MB();
2639   KA_TRACE(20, ("__kmp_join_call: exit T#%d\n", gtid));
2640 }
2641 
2642 /* Check whether we should push an internal control record onto the
2643    serial team stack.  If so, do it.  */
2644 void __kmp_save_internal_controls(kmp_info_t *thread) {
2645 
2646   if (thread->th.th_team != thread->th.th_serial_team) {
2647     return;
2648   }
2649   if (thread->th.th_team->t.t_serialized > 1) {
2650     int push = 0;
2651 
2652     if (thread->th.th_team->t.t_control_stack_top == NULL) {
2653       push = 1;
2654     } else {
2655       if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
2656           thread->th.th_team->t.t_serialized) {
2657         push = 1;
2658       }
2659     }
2660     if (push) { /* push a record on the serial team's stack */
2661       kmp_internal_control_t *control =
2662           (kmp_internal_control_t *)__kmp_allocate(
2663               sizeof(kmp_internal_control_t));
2664 
2665       copy_icvs(control, &thread->th.th_current_task->td_icvs);
2666 
2667       control->serial_nesting_level = thread->th.th_team->t.t_serialized;
2668 
2669       control->next = thread->th.th_team->t.t_control_stack_top;
2670       thread->th.th_team->t.t_control_stack_top = control;
2671     }
2672   }
2673 }
2674 
2675 /* Changes set_nproc */
2676 void __kmp_set_num_threads(int new_nth, int gtid) {
2677   kmp_info_t *thread;
2678   kmp_root_t *root;
2679 
2680   KF_TRACE(10, ("__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth));
2681   KMP_DEBUG_ASSERT(__kmp_init_serial);
2682 
2683   if (new_nth < 1)
2684     new_nth = 1;
2685   else if (new_nth > __kmp_max_nth)
2686     new_nth = __kmp_max_nth;
2687 
2688   KMP_COUNT_VALUE(OMP_set_numthreads, new_nth);
2689   thread = __kmp_threads[gtid];
2690   if (thread->th.th_current_task->td_icvs.nproc == new_nth)
2691     return; // nothing to do
2692 
2693   __kmp_save_internal_controls(thread);
2694 
2695   set__nproc(thread, new_nth);
2696 
2697   // If this omp_set_num_threads() call will cause the hot team size to be
2698   // reduced (in the absence of a num_threads clause), then reduce it now,
2699   // rather than waiting for the next parallel region.
2700   root = thread->th.th_root;
2701   if (__kmp_init_parallel && (!root->r.r_active) &&
2702       (root->r.r_hot_team->t.t_nproc > new_nth)
2703 #if KMP_NESTED_HOT_TEAMS
2704       && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
2705 #endif
2706       ) {
2707     kmp_team_t *hot_team = root->r.r_hot_team;
2708     int f;
2709 
2710     __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2711 
2712     // Release the extra threads we don't need any more.
2713     for (f = new_nth; f < hot_team->t.t_nproc; f++) {
2714       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2715       if (__kmp_tasking_mode != tskm_immediate_exec) {
2716         // When decreasing team size, threads no longer in the team should unref
2717         // task team.
2718         hot_team->t.t_threads[f]->th.th_task_team = NULL;
2719       }
2720       __kmp_free_thread(hot_team->t.t_threads[f]);
2721       hot_team->t.t_threads[f] = NULL;
2722     }
2723     hot_team->t.t_nproc = new_nth;
2724 #if KMP_NESTED_HOT_TEAMS
2725     if (thread->th.th_hot_teams) {
2726       KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
2727       thread->th.th_hot_teams[0].hot_team_nth = new_nth;
2728     }
2729 #endif
2730 
2731     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2732 
2733     // Update the t_nproc field in the threads that are still active.
2734     for (f = 0; f < new_nth; f++) {
2735       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2736       hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
2737     }
2738     // Special flag in case omp_set_num_threads() call
2739     hot_team->t.t_size_changed = -1;
2740   }
2741 }
2742 
2743 /* Changes max_active_levels */
2744 void __kmp_set_max_active_levels(int gtid, int max_active_levels) {
2745   kmp_info_t *thread;
2746 
2747   KF_TRACE(10, ("__kmp_set_max_active_levels: new max_active_levels for thread "
2748                 "%d = (%d)\n",
2749                 gtid, max_active_levels));
2750   KMP_DEBUG_ASSERT(__kmp_init_serial);
2751 
2752   // validate max_active_levels
2753   if (max_active_levels < 0) {
2754     KMP_WARNING(ActiveLevelsNegative, max_active_levels);
2755     // We ignore this call if the user has specified a negative value.
2756     // The current setting won't be changed. The last valid setting will be
2757     // used. A warning will be issued (if warnings are allowed as controlled by
2758     // the KMP_WARNINGS env var).
2759     KF_TRACE(10, ("__kmp_set_max_active_levels: the call is ignored: new "
2760                   "max_active_levels for thread %d = (%d)\n",
2761                   gtid, max_active_levels));
2762     return;
2763   }
2764   if (max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT) {
2765     // it's OK, the max_active_levels is within the valid range: [ 0;
2766     // KMP_MAX_ACTIVE_LEVELS_LIMIT ]
2767     // We allow a zero value. (implementation defined behavior)
2768   } else {
2769     KMP_WARNING(ActiveLevelsExceedLimit, max_active_levels,
2770                 KMP_MAX_ACTIVE_LEVELS_LIMIT);
2771     max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT;
2772     // Current upper limit is MAX_INT. (implementation defined behavior)
2773     // If the input exceeds the upper limit, we correct the input to be the
2774     // upper limit. (implementation defined behavior)
2775     // Actually, the flow should never get here until we use MAX_INT limit.
2776   }
2777   KF_TRACE(10, ("__kmp_set_max_active_levels: after validation: new "
2778                 "max_active_levels for thread %d = (%d)\n",
2779                 gtid, max_active_levels));
2780 
2781   thread = __kmp_threads[gtid];
2782 
2783   __kmp_save_internal_controls(thread);
2784 
2785   set__max_active_levels(thread, max_active_levels);
2786 }
2787 
2788 /* Gets max_active_levels */
2789 int __kmp_get_max_active_levels(int gtid) {
2790   kmp_info_t *thread;
2791 
2792   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d\n", gtid));
2793   KMP_DEBUG_ASSERT(__kmp_init_serial);
2794 
2795   thread = __kmp_threads[gtid];
2796   KMP_DEBUG_ASSERT(thread->th.th_current_task);
2797   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d, curtask=%p, "
2798                 "curtask_maxaclevel=%d\n",
2799                 gtid, thread->th.th_current_task,
2800                 thread->th.th_current_task->td_icvs.max_active_levels));
2801   return thread->th.th_current_task->td_icvs.max_active_levels;
2802 }
2803 
2804 KMP_BUILD_ASSERT(sizeof(kmp_sched_t) == sizeof(int));
2805 KMP_BUILD_ASSERT(sizeof(enum sched_type) == sizeof(int));
2806 
2807 /* Changes def_sched_var ICV values (run-time schedule kind and chunk) */
2808 void __kmp_set_schedule(int gtid, kmp_sched_t kind, int chunk) {
2809   kmp_info_t *thread;
2810   kmp_sched_t orig_kind;
2811   //    kmp_team_t *team;
2812 
2813   KF_TRACE(10, ("__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n",
2814                 gtid, (int)kind, chunk));
2815   KMP_DEBUG_ASSERT(__kmp_init_serial);
2816 
2817   // Check if the kind parameter is valid, correct if needed.
2818   // Valid parameters should fit in one of two intervals - standard or extended:
2819   //       <lower>, <valid>, <upper_std>, <lower_ext>, <valid>, <upper>
2820   // 2008-01-25: 0,  1 - 4,       5,         100,     101 - 102, 103
2821   orig_kind = kind;
2822   kind = __kmp_sched_without_mods(kind);
2823 
2824   if (kind <= kmp_sched_lower || kind >= kmp_sched_upper ||
2825       (kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std)) {
2826     // TODO: Hint needs attention in case we change the default schedule.
2827     __kmp_msg(kmp_ms_warning, KMP_MSG(ScheduleKindOutOfRange, kind),
2828               KMP_HNT(DefaultScheduleKindUsed, "static, no chunk"),
2829               __kmp_msg_null);
2830     kind = kmp_sched_default;
2831     chunk = 0; // ignore chunk value in case of bad kind
2832   }
2833 
2834   thread = __kmp_threads[gtid];
2835 
2836   __kmp_save_internal_controls(thread);
2837 
2838   if (kind < kmp_sched_upper_std) {
2839     if (kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK) {
2840       // differ static chunked vs. unchunked:  chunk should be invalid to
2841       // indicate unchunked schedule (which is the default)
2842       thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static;
2843     } else {
2844       thread->th.th_current_task->td_icvs.sched.r_sched_type =
2845           __kmp_sch_map[kind - kmp_sched_lower - 1];
2846     }
2847   } else {
2848     //    __kmp_sch_map[ kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2849     //    kmp_sched_lower - 2 ];
2850     thread->th.th_current_task->td_icvs.sched.r_sched_type =
2851         __kmp_sch_map[kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2852                       kmp_sched_lower - 2];
2853   }
2854   __kmp_sched_apply_mods_intkind(
2855       orig_kind, &(thread->th.th_current_task->td_icvs.sched.r_sched_type));
2856   if (kind == kmp_sched_auto || chunk < 1) {
2857     // ignore parameter chunk for schedule auto
2858     thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
2859   } else {
2860     thread->th.th_current_task->td_icvs.sched.chunk = chunk;
2861   }
2862 }
2863 
2864 /* Gets def_sched_var ICV values */
2865 void __kmp_get_schedule(int gtid, kmp_sched_t *kind, int *chunk) {
2866   kmp_info_t *thread;
2867   enum sched_type th_type;
2868 
2869   KF_TRACE(10, ("__kmp_get_schedule: thread %d\n", gtid));
2870   KMP_DEBUG_ASSERT(__kmp_init_serial);
2871 
2872   thread = __kmp_threads[gtid];
2873 
2874   th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
2875   switch (SCHEDULE_WITHOUT_MODIFIERS(th_type)) {
2876   case kmp_sch_static:
2877   case kmp_sch_static_greedy:
2878   case kmp_sch_static_balanced:
2879     *kind = kmp_sched_static;
2880     __kmp_sched_apply_mods_stdkind(kind, th_type);
2881     *chunk = 0; // chunk was not set, try to show this fact via zero value
2882     return;
2883   case kmp_sch_static_chunked:
2884     *kind = kmp_sched_static;
2885     break;
2886   case kmp_sch_dynamic_chunked:
2887     *kind = kmp_sched_dynamic;
2888     break;
2889   case kmp_sch_guided_chunked:
2890   case kmp_sch_guided_iterative_chunked:
2891   case kmp_sch_guided_analytical_chunked:
2892     *kind = kmp_sched_guided;
2893     break;
2894   case kmp_sch_auto:
2895     *kind = kmp_sched_auto;
2896     break;
2897   case kmp_sch_trapezoidal:
2898     *kind = kmp_sched_trapezoidal;
2899     break;
2900 #if KMP_STATIC_STEAL_ENABLED
2901   case kmp_sch_static_steal:
2902     *kind = kmp_sched_static_steal;
2903     break;
2904 #endif
2905   default:
2906     KMP_FATAL(UnknownSchedulingType, th_type);
2907   }
2908 
2909   __kmp_sched_apply_mods_stdkind(kind, th_type);
2910   *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
2911 }
2912 
2913 int __kmp_get_ancestor_thread_num(int gtid, int level) {
2914 
2915   int ii, dd;
2916   kmp_team_t *team;
2917   kmp_info_t *thr;
2918 
2919   KF_TRACE(10, ("__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level));
2920   KMP_DEBUG_ASSERT(__kmp_init_serial);
2921 
2922   // validate level
2923   if (level == 0)
2924     return 0;
2925   if (level < 0)
2926     return -1;
2927   thr = __kmp_threads[gtid];
2928   team = thr->th.th_team;
2929   ii = team->t.t_level;
2930   if (level > ii)
2931     return -1;
2932 
2933 #if OMP_40_ENABLED
2934   if (thr->th.th_teams_microtask) {
2935     // AC: we are in teams region where multiple nested teams have same level
2936     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2937     if (level <=
2938         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2939       KMP_DEBUG_ASSERT(ii >= tlevel);
2940       // AC: As we need to pass by the teams league, we need to artificially
2941       // increase ii
2942       if (ii == tlevel) {
2943         ii += 2; // three teams have same level
2944       } else {
2945         ii++; // two teams have same level
2946       }
2947     }
2948   }
2949 #endif
2950 
2951   if (ii == level)
2952     return __kmp_tid_from_gtid(gtid);
2953 
2954   dd = team->t.t_serialized;
2955   level++;
2956   while (ii > level) {
2957     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2958     }
2959     if ((team->t.t_serialized) && (!dd)) {
2960       team = team->t.t_parent;
2961       continue;
2962     }
2963     if (ii > level) {
2964       team = team->t.t_parent;
2965       dd = team->t.t_serialized;
2966       ii--;
2967     }
2968   }
2969 
2970   return (dd > 1) ? (0) : (team->t.t_master_tid);
2971 }
2972 
2973 int __kmp_get_team_size(int gtid, int level) {
2974 
2975   int ii, dd;
2976   kmp_team_t *team;
2977   kmp_info_t *thr;
2978 
2979   KF_TRACE(10, ("__kmp_get_team_size: thread %d %d\n", gtid, level));
2980   KMP_DEBUG_ASSERT(__kmp_init_serial);
2981 
2982   // validate level
2983   if (level == 0)
2984     return 1;
2985   if (level < 0)
2986     return -1;
2987   thr = __kmp_threads[gtid];
2988   team = thr->th.th_team;
2989   ii = team->t.t_level;
2990   if (level > ii)
2991     return -1;
2992 
2993 #if OMP_40_ENABLED
2994   if (thr->th.th_teams_microtask) {
2995     // AC: we are in teams region where multiple nested teams have same level
2996     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2997     if (level <=
2998         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2999       KMP_DEBUG_ASSERT(ii >= tlevel);
3000       // AC: As we need to pass by the teams league, we need to artificially
3001       // increase ii
3002       if (ii == tlevel) {
3003         ii += 2; // three teams have same level
3004       } else {
3005         ii++; // two teams have same level
3006       }
3007     }
3008   }
3009 #endif
3010 
3011   while (ii > level) {
3012     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
3013     }
3014     if (team->t.t_serialized && (!dd)) {
3015       team = team->t.t_parent;
3016       continue;
3017     }
3018     if (ii > level) {
3019       team = team->t.t_parent;
3020       ii--;
3021     }
3022   }
3023 
3024   return team->t.t_nproc;
3025 }
3026 
3027 kmp_r_sched_t __kmp_get_schedule_global() {
3028   // This routine created because pairs (__kmp_sched, __kmp_chunk) and
3029   // (__kmp_static, __kmp_guided) may be changed by kmp_set_defaults
3030   // independently. So one can get the updated schedule here.
3031 
3032   kmp_r_sched_t r_sched;
3033 
3034   // create schedule from 4 globals: __kmp_sched, __kmp_chunk, __kmp_static,
3035   // __kmp_guided. __kmp_sched should keep original value, so that user can set
3036   // KMP_SCHEDULE multiple times, and thus have different run-time schedules in
3037   // different roots (even in OMP 2.5)
3038   enum sched_type s = SCHEDULE_WITHOUT_MODIFIERS(__kmp_sched);
3039 #if OMP_45_ENABLED
3040   enum sched_type sched_modifiers = SCHEDULE_GET_MODIFIERS(__kmp_sched);
3041 #endif
3042   if (s == kmp_sch_static) {
3043     // replace STATIC with more detailed schedule (balanced or greedy)
3044     r_sched.r_sched_type = __kmp_static;
3045   } else if (s == kmp_sch_guided_chunked) {
3046     // replace GUIDED with more detailed schedule (iterative or analytical)
3047     r_sched.r_sched_type = __kmp_guided;
3048   } else { // (STATIC_CHUNKED), or (DYNAMIC_CHUNKED), or other
3049     r_sched.r_sched_type = __kmp_sched;
3050   }
3051 #if OMP_45_ENABLED
3052   SCHEDULE_SET_MODIFIERS(r_sched.r_sched_type, sched_modifiers);
3053 #endif
3054 
3055   if (__kmp_chunk < KMP_DEFAULT_CHUNK) {
3056     // __kmp_chunk may be wrong here (if it was not ever set)
3057     r_sched.chunk = KMP_DEFAULT_CHUNK;
3058   } else {
3059     r_sched.chunk = __kmp_chunk;
3060   }
3061 
3062   return r_sched;
3063 }
3064 
3065 /* Allocate (realloc == FALSE) * or reallocate (realloc == TRUE)
3066    at least argc number of *t_argv entries for the requested team. */
3067 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) {
3068 
3069   KMP_DEBUG_ASSERT(team);
3070   if (!realloc || argc > team->t.t_max_argc) {
3071 
3072     KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: needed entries=%d, "
3073                    "current entries=%d\n",
3074                    team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
3075     /* if previously allocated heap space for args, free them */
3076     if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
3077       __kmp_free((void *)team->t.t_argv);
3078 
3079     if (argc <= KMP_INLINE_ARGV_ENTRIES) {
3080       /* use unused space in the cache line for arguments */
3081       team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
3082       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: inline allocate %d "
3083                      "argv entries\n",
3084                      team->t.t_id, team->t.t_max_argc));
3085       team->t.t_argv = &team->t.t_inline_argv[0];
3086       if (__kmp_storage_map) {
3087         __kmp_print_storage_map_gtid(
3088             -1, &team->t.t_inline_argv[0],
3089             &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3090             (sizeof(void *) * KMP_INLINE_ARGV_ENTRIES), "team_%d.t_inline_argv",
3091             team->t.t_id);
3092       }
3093     } else {
3094       /* allocate space for arguments in the heap */
3095       team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3096                                ? KMP_MIN_MALLOC_ARGV_ENTRIES
3097                                : 2 * argc;
3098       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3099                      "argv entries\n",
3100                      team->t.t_id, team->t.t_max_argc));
3101       team->t.t_argv =
3102           (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
3103       if (__kmp_storage_map) {
3104         __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3105                                      &team->t.t_argv[team->t.t_max_argc],
3106                                      sizeof(void *) * team->t.t_max_argc,
3107                                      "team_%d.t_argv", team->t.t_id);
3108       }
3109     }
3110   }
3111 }
3112 
3113 static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
3114   int i;
3115   int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2;
3116   team->t.t_threads =
3117       (kmp_info_t **)__kmp_allocate(sizeof(kmp_info_t *) * max_nth);
3118   team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3119       sizeof(dispatch_shared_info_t) * num_disp_buff);
3120   team->t.t_dispatch =
3121       (kmp_disp_t *)__kmp_allocate(sizeof(kmp_disp_t) * max_nth);
3122   team->t.t_implicit_task_taskdata =
3123       (kmp_taskdata_t *)__kmp_allocate(sizeof(kmp_taskdata_t) * max_nth);
3124   team->t.t_max_nproc = max_nth;
3125 
3126   /* setup dispatch buffers */
3127   for (i = 0; i < num_disp_buff; ++i) {
3128     team->t.t_disp_buffer[i].buffer_index = i;
3129 #if OMP_45_ENABLED
3130     team->t.t_disp_buffer[i].doacross_buf_idx = i;
3131 #endif
3132   }
3133 }
3134 
3135 static void __kmp_free_team_arrays(kmp_team_t *team) {
3136   /* Note: this does not free the threads in t_threads (__kmp_free_threads) */
3137   int i;
3138   for (i = 0; i < team->t.t_max_nproc; ++i) {
3139     if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3140       __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3141       team->t.t_dispatch[i].th_disp_buffer = NULL;
3142     }
3143   }
3144 #if KMP_USE_HIER_SCHED
3145   __kmp_dispatch_free_hierarchies(team);
3146 #endif
3147   __kmp_free(team->t.t_threads);
3148   __kmp_free(team->t.t_disp_buffer);
3149   __kmp_free(team->t.t_dispatch);
3150   __kmp_free(team->t.t_implicit_task_taskdata);
3151   team->t.t_threads = NULL;
3152   team->t.t_disp_buffer = NULL;
3153   team->t.t_dispatch = NULL;
3154   team->t.t_implicit_task_taskdata = 0;
3155 }
3156 
3157 static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) {
3158   kmp_info_t **oldThreads = team->t.t_threads;
3159 
3160   __kmp_free(team->t.t_disp_buffer);
3161   __kmp_free(team->t.t_dispatch);
3162   __kmp_free(team->t.t_implicit_task_taskdata);
3163   __kmp_allocate_team_arrays(team, max_nth);
3164 
3165   KMP_MEMCPY(team->t.t_threads, oldThreads,
3166              team->t.t_nproc * sizeof(kmp_info_t *));
3167 
3168   __kmp_free(oldThreads);
3169 }
3170 
3171 static kmp_internal_control_t __kmp_get_global_icvs(void) {
3172 
3173   kmp_r_sched_t r_sched =
3174       __kmp_get_schedule_global(); // get current state of scheduling globals
3175 
3176 #if OMP_40_ENABLED
3177   KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
3178 #endif /* OMP_40_ENABLED */
3179 
3180   kmp_internal_control_t g_icvs = {
3181     0, // int serial_nesting_level; //corresponds to value of th_team_serialized
3182     (kmp_int8)__kmp_global.g.g_dynamic, // internal control for dynamic
3183     // adjustment of threads (per thread)
3184     (kmp_int8)__kmp_env_blocktime, // int bt_set; //internal control for
3185     // whether blocktime is explicitly set
3186     __kmp_dflt_blocktime, // int blocktime; //internal control for blocktime
3187 #if KMP_USE_MONITOR
3188     __kmp_bt_intervals, // int bt_intervals; //internal control for blocktime
3189 // intervals
3190 #endif
3191     __kmp_dflt_team_nth, // int nproc; //internal control for # of threads for
3192     // next parallel region (per thread)
3193     // (use a max ub on value if __kmp_parallel_initialize not called yet)
3194     __kmp_cg_max_nth, // int thread_limit;
3195     __kmp_dflt_max_active_levels, // int max_active_levels; //internal control
3196     // for max_active_levels
3197     r_sched, // kmp_r_sched_t sched; //internal control for runtime schedule
3198 // {sched,chunk} pair
3199 #if OMP_40_ENABLED
3200     __kmp_nested_proc_bind.bind_types[0],
3201     __kmp_default_device,
3202 #endif /* OMP_40_ENABLED */
3203     NULL // struct kmp_internal_control *next;
3204   };
3205 
3206   return g_icvs;
3207 }
3208 
3209 static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) {
3210 
3211   kmp_internal_control_t gx_icvs;
3212   gx_icvs.serial_nesting_level =
3213       0; // probably =team->t.t_serial like in save_inter_controls
3214   copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3215   gx_icvs.next = NULL;
3216 
3217   return gx_icvs;
3218 }
3219 
3220 static void __kmp_initialize_root(kmp_root_t *root) {
3221   int f;
3222   kmp_team_t *root_team;
3223   kmp_team_t *hot_team;
3224   int hot_team_max_nth;
3225   kmp_r_sched_t r_sched =
3226       __kmp_get_schedule_global(); // get current state of scheduling globals
3227   kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3228   KMP_DEBUG_ASSERT(root);
3229   KMP_ASSERT(!root->r.r_begin);
3230 
3231   /* setup the root state structure */
3232   __kmp_init_lock(&root->r.r_begin_lock);
3233   root->r.r_begin = FALSE;
3234   root->r.r_active = FALSE;
3235   root->r.r_in_parallel = 0;
3236   root->r.r_blocktime = __kmp_dflt_blocktime;
3237 
3238   /* setup the root team for this task */
3239   /* allocate the root team structure */
3240   KF_TRACE(10, ("__kmp_initialize_root: before root_team\n"));
3241 
3242   root_team =
3243       __kmp_allocate_team(root,
3244                           1, // new_nproc
3245                           1, // max_nproc
3246 #if OMPT_SUPPORT
3247                           ompt_data_none, // root parallel id
3248 #endif
3249 #if OMP_40_ENABLED
3250                           __kmp_nested_proc_bind.bind_types[0],
3251 #endif
3252                           &r_icvs,
3253                           0 // argc
3254                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3255                           );
3256 #if USE_DEBUGGER
3257   // Non-NULL value should be assigned to make the debugger display the root
3258   // team.
3259   TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)(~0));
3260 #endif
3261 
3262   KF_TRACE(10, ("__kmp_initialize_root: after root_team = %p\n", root_team));
3263 
3264   root->r.r_root_team = root_team;
3265   root_team->t.t_control_stack_top = NULL;
3266 
3267   /* initialize root team */
3268   root_team->t.t_threads[0] = NULL;
3269   root_team->t.t_nproc = 1;
3270   root_team->t.t_serialized = 1;
3271   // TODO???: root_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3272   root_team->t.t_sched.sched = r_sched.sched;
3273   KA_TRACE(
3274       20,
3275       ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3276        root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
3277 
3278   /* setup the  hot team for this task */
3279   /* allocate the hot team structure */
3280   KF_TRACE(10, ("__kmp_initialize_root: before hot_team\n"));
3281 
3282   hot_team =
3283       __kmp_allocate_team(root,
3284                           1, // new_nproc
3285                           __kmp_dflt_team_nth_ub * 2, // max_nproc
3286 #if OMPT_SUPPORT
3287                           ompt_data_none, // root parallel id
3288 #endif
3289 #if OMP_40_ENABLED
3290                           __kmp_nested_proc_bind.bind_types[0],
3291 #endif
3292                           &r_icvs,
3293                           0 // argc
3294                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3295                           );
3296   KF_TRACE(10, ("__kmp_initialize_root: after hot_team = %p\n", hot_team));
3297 
3298   root->r.r_hot_team = hot_team;
3299   root_team->t.t_control_stack_top = NULL;
3300 
3301   /* first-time initialization */
3302   hot_team->t.t_parent = root_team;
3303 
3304   /* initialize hot team */
3305   hot_team_max_nth = hot_team->t.t_max_nproc;
3306   for (f = 0; f < hot_team_max_nth; ++f) {
3307     hot_team->t.t_threads[f] = NULL;
3308   }
3309   hot_team->t.t_nproc = 1;
3310   // TODO???: hot_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3311   hot_team->t.t_sched.sched = r_sched.sched;
3312   hot_team->t.t_size_changed = 0;
3313 }
3314 
3315 #ifdef KMP_DEBUG
3316 
3317 typedef struct kmp_team_list_item {
3318   kmp_team_p const *entry;
3319   struct kmp_team_list_item *next;
3320 } kmp_team_list_item_t;
3321 typedef kmp_team_list_item_t *kmp_team_list_t;
3322 
3323 static void __kmp_print_structure_team_accum( // Add team to list of teams.
3324     kmp_team_list_t list, // List of teams.
3325     kmp_team_p const *team // Team to add.
3326     ) {
3327 
3328   // List must terminate with item where both entry and next are NULL.
3329   // Team is added to the list only once.
3330   // List is sorted in ascending order by team id.
3331   // Team id is *not* a key.
3332 
3333   kmp_team_list_t l;
3334 
3335   KMP_DEBUG_ASSERT(list != NULL);
3336   if (team == NULL) {
3337     return;
3338   }
3339 
3340   __kmp_print_structure_team_accum(list, team->t.t_parent);
3341   __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3342 
3343   // Search list for the team.
3344   l = list;
3345   while (l->next != NULL && l->entry != team) {
3346     l = l->next;
3347   }
3348   if (l->next != NULL) {
3349     return; // Team has been added before, exit.
3350   }
3351 
3352   // Team is not found. Search list again for insertion point.
3353   l = list;
3354   while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3355     l = l->next;
3356   }
3357 
3358   // Insert team.
3359   {
3360     kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
3361         sizeof(kmp_team_list_item_t));
3362     *item = *l;
3363     l->entry = team;
3364     l->next = item;
3365   }
3366 }
3367 
3368 static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
3369 
3370                                        ) {
3371   __kmp_printf("%s", title);
3372   if (team != NULL) {
3373     __kmp_printf("%2x %p\n", team->t.t_id, team);
3374   } else {
3375     __kmp_printf(" - (nil)\n");
3376   }
3377 }
3378 
3379 static void __kmp_print_structure_thread(char const *title,
3380                                          kmp_info_p const *thread) {
3381   __kmp_printf("%s", title);
3382   if (thread != NULL) {
3383     __kmp_printf("%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
3384   } else {
3385     __kmp_printf(" - (nil)\n");
3386   }
3387 }
3388 
3389 void __kmp_print_structure(void) {
3390 
3391   kmp_team_list_t list;
3392 
3393   // Initialize list of teams.
3394   list =
3395       (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(sizeof(kmp_team_list_item_t));
3396   list->entry = NULL;
3397   list->next = NULL;
3398 
3399   __kmp_printf("\n------------------------------\nGlobal Thread "
3400                "Table\n------------------------------\n");
3401   {
3402     int gtid;
3403     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3404       __kmp_printf("%2d", gtid);
3405       if (__kmp_threads != NULL) {
3406         __kmp_printf(" %p", __kmp_threads[gtid]);
3407       }
3408       if (__kmp_root != NULL) {
3409         __kmp_printf(" %p", __kmp_root[gtid]);
3410       }
3411       __kmp_printf("\n");
3412     }
3413   }
3414 
3415   // Print out __kmp_threads array.
3416   __kmp_printf("\n------------------------------\nThreads\n--------------------"
3417                "----------\n");
3418   if (__kmp_threads != NULL) {
3419     int gtid;
3420     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3421       kmp_info_t const *thread = __kmp_threads[gtid];
3422       if (thread != NULL) {
3423         __kmp_printf("GTID %2d %p:\n", gtid, thread);
3424         __kmp_printf("    Our Root:        %p\n", thread->th.th_root);
3425         __kmp_print_structure_team("    Our Team:     ", thread->th.th_team);
3426         __kmp_print_structure_team("    Serial Team:  ",
3427                                    thread->th.th_serial_team);
3428         __kmp_printf("    Threads:      %2d\n", thread->th.th_team_nproc);
3429         __kmp_print_structure_thread("    Master:       ",
3430                                      thread->th.th_team_master);
3431         __kmp_printf("    Serialized?:  %2d\n", thread->th.th_team_serialized);
3432         __kmp_printf("    Set NProc:    %2d\n", thread->th.th_set_nproc);
3433 #if OMP_40_ENABLED
3434         __kmp_printf("    Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
3435 #endif
3436         __kmp_print_structure_thread("    Next in pool: ",
3437                                      thread->th.th_next_pool);
3438         __kmp_printf("\n");
3439         __kmp_print_structure_team_accum(list, thread->th.th_team);
3440         __kmp_print_structure_team_accum(list, thread->th.th_serial_team);
3441       }
3442     }
3443   } else {
3444     __kmp_printf("Threads array is not allocated.\n");
3445   }
3446 
3447   // Print out __kmp_root array.
3448   __kmp_printf("\n------------------------------\nUbers\n----------------------"
3449                "--------\n");
3450   if (__kmp_root != NULL) {
3451     int gtid;
3452     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3453       kmp_root_t const *root = __kmp_root[gtid];
3454       if (root != NULL) {
3455         __kmp_printf("GTID %2d %p:\n", gtid, root);
3456         __kmp_print_structure_team("    Root Team:    ", root->r.r_root_team);
3457         __kmp_print_structure_team("    Hot Team:     ", root->r.r_hot_team);
3458         __kmp_print_structure_thread("    Uber Thread:  ",
3459                                      root->r.r_uber_thread);
3460         __kmp_printf("    Active?:      %2d\n", root->r.r_active);
3461         __kmp_printf("    In Parallel:  %2d\n",
3462                      KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
3463         __kmp_printf("\n");
3464         __kmp_print_structure_team_accum(list, root->r.r_root_team);
3465         __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3466       }
3467     }
3468   } else {
3469     __kmp_printf("Ubers array is not allocated.\n");
3470   }
3471 
3472   __kmp_printf("\n------------------------------\nTeams\n----------------------"
3473                "--------\n");
3474   while (list->next != NULL) {
3475     kmp_team_p const *team = list->entry;
3476     int i;
3477     __kmp_printf("Team %2x %p:\n", team->t.t_id, team);
3478     __kmp_print_structure_team("    Parent Team:      ", team->t.t_parent);
3479     __kmp_printf("    Master TID:       %2d\n", team->t.t_master_tid);
3480     __kmp_printf("    Max threads:      %2d\n", team->t.t_max_nproc);
3481     __kmp_printf("    Levels of serial: %2d\n", team->t.t_serialized);
3482     __kmp_printf("    Number threads:   %2d\n", team->t.t_nproc);
3483     for (i = 0; i < team->t.t_nproc; ++i) {
3484       __kmp_printf("    Thread %2d:      ", i);
3485       __kmp_print_structure_thread("", team->t.t_threads[i]);
3486     }
3487     __kmp_print_structure_team("    Next in pool:     ", team->t.t_next_pool);
3488     __kmp_printf("\n");
3489     list = list->next;
3490   }
3491 
3492   // Print out __kmp_thread_pool and __kmp_team_pool.
3493   __kmp_printf("\n------------------------------\nPools\n----------------------"
3494                "--------\n");
3495   __kmp_print_structure_thread("Thread pool:          ",
3496                                CCAST(kmp_info_t *, __kmp_thread_pool));
3497   __kmp_print_structure_team("Team pool:            ",
3498                              CCAST(kmp_team_t *, __kmp_team_pool));
3499   __kmp_printf("\n");
3500 
3501   // Free team list.
3502   while (list != NULL) {
3503     kmp_team_list_item_t *item = list;
3504     list = list->next;
3505     KMP_INTERNAL_FREE(item);
3506   }
3507 }
3508 
3509 #endif
3510 
3511 //---------------------------------------------------------------------------
3512 //  Stuff for per-thread fast random number generator
3513 //  Table of primes
3514 static const unsigned __kmp_primes[] = {
3515     0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877,
3516     0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
3517     0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201,
3518     0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
3519     0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7,
3520     0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
3521     0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45,
3522     0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
3523     0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363,
3524     0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
3525     0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f};
3526 
3527 //---------------------------------------------------------------------------
3528 //  __kmp_get_random: Get a random number using a linear congruential method.
3529 unsigned short __kmp_get_random(kmp_info_t *thread) {
3530   unsigned x = thread->th.th_x;
3531   unsigned short r = x >> 16;
3532 
3533   thread->th.th_x = x * thread->th.th_a + 1;
3534 
3535   KA_TRACE(30, ("__kmp_get_random: THREAD: %d, RETURN: %u\n",
3536                 thread->th.th_info.ds.ds_tid, r));
3537 
3538   return r;
3539 }
3540 //--------------------------------------------------------
3541 // __kmp_init_random: Initialize a random number generator
3542 void __kmp_init_random(kmp_info_t *thread) {
3543   unsigned seed = thread->th.th_info.ds.ds_tid;
3544 
3545   thread->th.th_a =
3546       __kmp_primes[seed % (sizeof(__kmp_primes) / sizeof(__kmp_primes[0]))];
3547   thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
3548   KA_TRACE(30,
3549            ("__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a));
3550 }
3551 
3552 #if KMP_OS_WINDOWS
3553 /* reclaim array entries for root threads that are already dead, returns number
3554  * reclaimed */
3555 static int __kmp_reclaim_dead_roots(void) {
3556   int i, r = 0;
3557 
3558   for (i = 0; i < __kmp_threads_capacity; ++i) {
3559     if (KMP_UBER_GTID(i) &&
3560         !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
3561         !__kmp_root[i]
3562              ->r.r_active) { // AC: reclaim only roots died in non-active state
3563       r += __kmp_unregister_root_other_thread(i);
3564     }
3565   }
3566   return r;
3567 }
3568 #endif
3569 
3570 /* This function attempts to create free entries in __kmp_threads and
3571    __kmp_root, and returns the number of free entries generated.
3572 
3573    For Windows* OS static library, the first mechanism used is to reclaim array
3574    entries for root threads that are already dead.
3575 
3576    On all platforms, expansion is attempted on the arrays __kmp_threads_ and
3577    __kmp_root, with appropriate update to __kmp_threads_capacity. Array
3578    capacity is increased by doubling with clipping to __kmp_tp_capacity, if
3579    threadprivate cache array has been created. Synchronization with
3580    __kmpc_threadprivate_cached is done using __kmp_tp_cached_lock.
3581 
3582    After any dead root reclamation, if the clipping value allows array expansion
3583    to result in the generation of a total of nNeed free slots, the function does
3584    that expansion. If not, nothing is done beyond the possible initial root
3585    thread reclamation.
3586 
3587    If any argument is negative, the behavior is undefined. */
3588 static int __kmp_expand_threads(int nNeed) {
3589   int added = 0;
3590   int minimumRequiredCapacity;
3591   int newCapacity;
3592   kmp_info_t **newThreads;
3593   kmp_root_t **newRoot;
3594 
3595 // All calls to __kmp_expand_threads should be under __kmp_forkjoin_lock, so
3596 // resizing __kmp_threads does not need additional protection if foreign
3597 // threads are present
3598 
3599 #if KMP_OS_WINDOWS && !KMP_DYNAMIC_LIB
3600   /* only for Windows static library */
3601   /* reclaim array entries for root threads that are already dead */
3602   added = __kmp_reclaim_dead_roots();
3603 
3604   if (nNeed) {
3605     nNeed -= added;
3606     if (nNeed < 0)
3607       nNeed = 0;
3608   }
3609 #endif
3610   if (nNeed <= 0)
3611     return added;
3612 
3613   // Note that __kmp_threads_capacity is not bounded by __kmp_max_nth. If
3614   // __kmp_max_nth is set to some value less than __kmp_sys_max_nth by the
3615   // user via KMP_DEVICE_THREAD_LIMIT, then __kmp_threads_capacity may become
3616   // > __kmp_max_nth in one of two ways:
3617   //
3618   // 1) The initialization thread (gtid = 0) exits.  __kmp_threads[0]
3619   //    may not be resused by another thread, so we may need to increase
3620   //    __kmp_threads_capacity to __kmp_max_nth + 1.
3621   //
3622   // 2) New foreign root(s) are encountered.  We always register new foreign
3623   //    roots. This may cause a smaller # of threads to be allocated at
3624   //    subsequent parallel regions, but the worker threads hang around (and
3625   //    eventually go to sleep) and need slots in the __kmp_threads[] array.
3626   //
3627   // Anyway, that is the reason for moving the check to see if
3628   // __kmp_max_nth was exceeded into __kmp_reserve_threads()
3629   // instead of having it performed here. -BB
3630 
3631   KMP_DEBUG_ASSERT(__kmp_sys_max_nth >= __kmp_threads_capacity);
3632 
3633   /* compute expansion headroom to check if we can expand */
3634   if (__kmp_sys_max_nth - __kmp_threads_capacity < nNeed) {
3635     /* possible expansion too small -- give up */
3636     return added;
3637   }
3638   minimumRequiredCapacity = __kmp_threads_capacity + nNeed;
3639 
3640   newCapacity = __kmp_threads_capacity;
3641   do {
3642     newCapacity = newCapacity <= (__kmp_sys_max_nth >> 1) ? (newCapacity << 1)
3643                                                           : __kmp_sys_max_nth;
3644   } while (newCapacity < minimumRequiredCapacity);
3645   newThreads = (kmp_info_t **)__kmp_allocate(
3646       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * newCapacity + CACHE_LINE);
3647   newRoot =
3648       (kmp_root_t **)((char *)newThreads + sizeof(kmp_info_t *) * newCapacity);
3649   KMP_MEMCPY(newThreads, __kmp_threads,
3650              __kmp_threads_capacity * sizeof(kmp_info_t *));
3651   KMP_MEMCPY(newRoot, __kmp_root,
3652              __kmp_threads_capacity * sizeof(kmp_root_t *));
3653 
3654   kmp_info_t **temp_threads = __kmp_threads;
3655   *(kmp_info_t * *volatile *)&__kmp_threads = newThreads;
3656   *(kmp_root_t * *volatile *)&__kmp_root = newRoot;
3657   __kmp_free(temp_threads);
3658   added += newCapacity - __kmp_threads_capacity;
3659   *(volatile int *)&__kmp_threads_capacity = newCapacity;
3660 
3661   if (newCapacity > __kmp_tp_capacity) {
3662     __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
3663     if (__kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3664       __kmp_threadprivate_resize_cache(newCapacity);
3665     } else { // increase __kmp_tp_capacity to correspond with kmp_threads size
3666       *(volatile int *)&__kmp_tp_capacity = newCapacity;
3667     }
3668     __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3669   }
3670 
3671   return added;
3672 }
3673 
3674 /* Register the current thread as a root thread and obtain our gtid. We must
3675    have the __kmp_initz_lock held at this point. Argument TRUE only if are the
3676    thread that calls from __kmp_do_serial_initialize() */
3677 int __kmp_register_root(int initial_thread) {
3678   kmp_info_t *root_thread;
3679   kmp_root_t *root;
3680   int gtid;
3681   int capacity;
3682   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3683   KA_TRACE(20, ("__kmp_register_root: entered\n"));
3684   KMP_MB();
3685 
3686   /* 2007-03-02:
3687      If initial thread did not invoke OpenMP RTL yet, and this thread is not an
3688      initial one, "__kmp_all_nth >= __kmp_threads_capacity" condition does not
3689      work as expected -- it may return false (that means there is at least one
3690      empty slot in __kmp_threads array), but it is possible the only free slot
3691      is #0, which is reserved for initial thread and so cannot be used for this
3692      one. Following code workarounds this bug.
3693 
3694      However, right solution seems to be not reserving slot #0 for initial
3695      thread because:
3696      (1) there is no magic in slot #0,
3697      (2) we cannot detect initial thread reliably (the first thread which does
3698         serial initialization may be not a real initial thread).
3699   */
3700   capacity = __kmp_threads_capacity;
3701   if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) {
3702     --capacity;
3703   }
3704 
3705   /* see if there are too many threads */
3706   if (__kmp_all_nth >= capacity && !__kmp_expand_threads(1)) {
3707     if (__kmp_tp_cached) {
3708       __kmp_fatal(KMP_MSG(CantRegisterNewThread),
3709                   KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
3710                   KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
3711     } else {
3712       __kmp_fatal(KMP_MSG(CantRegisterNewThread), KMP_HNT(SystemLimitOnThreads),
3713                   __kmp_msg_null);
3714     }
3715   }
3716 
3717   /* find an available thread slot */
3718   /* Don't reassign the zero slot since we need that to only be used by initial
3719      thread */
3720   for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL;
3721        gtid++)
3722     ;
3723   KA_TRACE(1,
3724            ("__kmp_register_root: found slot in threads array: T#%d\n", gtid));
3725   KMP_ASSERT(gtid < __kmp_threads_capacity);
3726 
3727   /* update global accounting */
3728   __kmp_all_nth++;
3729   TCW_4(__kmp_nth, __kmp_nth + 1);
3730 
3731   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
3732   // numbers of procs, and method #2 (keyed API call) for higher numbers.
3733   if (__kmp_adjust_gtid_mode) {
3734     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
3735       if (TCR_4(__kmp_gtid_mode) != 2) {
3736         TCW_4(__kmp_gtid_mode, 2);
3737       }
3738     } else {
3739       if (TCR_4(__kmp_gtid_mode) != 1) {
3740         TCW_4(__kmp_gtid_mode, 1);
3741       }
3742     }
3743   }
3744 
3745 #ifdef KMP_ADJUST_BLOCKTIME
3746   /* Adjust blocktime to zero if necessary            */
3747   /* Middle initialization might not have occurred yet */
3748   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
3749     if (__kmp_nth > __kmp_avail_proc) {
3750       __kmp_zero_bt = TRUE;
3751     }
3752   }
3753 #endif /* KMP_ADJUST_BLOCKTIME */
3754 
3755   /* setup this new hierarchy */
3756   if (!(root = __kmp_root[gtid])) {
3757     root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(sizeof(kmp_root_t));
3758     KMP_DEBUG_ASSERT(!root->r.r_root_team);
3759   }
3760 
3761 #if KMP_STATS_ENABLED
3762   // Initialize stats as soon as possible (right after gtid assignment).
3763   __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid);
3764   __kmp_stats_thread_ptr->startLife();
3765   KMP_SET_THREAD_STATE(SERIAL_REGION);
3766   KMP_INIT_PARTITIONED_TIMERS(OMP_serial);
3767 #endif
3768   __kmp_initialize_root(root);
3769 
3770   /* setup new root thread structure */
3771   if (root->r.r_uber_thread) {
3772     root_thread = root->r.r_uber_thread;
3773   } else {
3774     root_thread = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
3775     if (__kmp_storage_map) {
3776       __kmp_print_thread_storage_map(root_thread, gtid);
3777     }
3778     root_thread->th.th_info.ds.ds_gtid = gtid;
3779 #if OMPT_SUPPORT
3780     root_thread->th.ompt_thread_info.thread_data = ompt_data_none;
3781 #endif
3782     root_thread->th.th_root = root;
3783     if (__kmp_env_consistency_check) {
3784       root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
3785     }
3786 #if USE_FAST_MEMORY
3787     __kmp_initialize_fast_memory(root_thread);
3788 #endif /* USE_FAST_MEMORY */
3789 
3790 #if KMP_USE_BGET
3791     KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL);
3792     __kmp_initialize_bget(root_thread);
3793 #endif
3794     __kmp_init_random(root_thread); // Initialize random number generator
3795   }
3796 
3797   /* setup the serial team held in reserve by the root thread */
3798   if (!root_thread->th.th_serial_team) {
3799     kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3800     KF_TRACE(10, ("__kmp_register_root: before serial_team\n"));
3801     root_thread->th.th_serial_team =
3802         __kmp_allocate_team(root, 1, 1,
3803 #if OMPT_SUPPORT
3804                             ompt_data_none, // root parallel id
3805 #endif
3806 #if OMP_40_ENABLED
3807                             proc_bind_default,
3808 #endif
3809                             &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
3810   }
3811   KMP_ASSERT(root_thread->th.th_serial_team);
3812   KF_TRACE(10, ("__kmp_register_root: after serial_team = %p\n",
3813                 root_thread->th.th_serial_team));
3814 
3815   /* drop root_thread into place */
3816   TCW_SYNC_PTR(__kmp_threads[gtid], root_thread);
3817 
3818   root->r.r_root_team->t.t_threads[0] = root_thread;
3819   root->r.r_hot_team->t.t_threads[0] = root_thread;
3820   root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
3821   // AC: the team created in reserve, not for execution (it is unused for now).
3822   root_thread->th.th_serial_team->t.t_serialized = 0;
3823   root->r.r_uber_thread = root_thread;
3824 
3825   /* initialize the thread, get it ready to go */
3826   __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3827   TCW_4(__kmp_init_gtid, TRUE);
3828 
3829   /* prepare the master thread for get_gtid() */
3830   __kmp_gtid_set_specific(gtid);
3831 
3832 #if USE_ITT_BUILD
3833   __kmp_itt_thread_name(gtid);
3834 #endif /* USE_ITT_BUILD */
3835 
3836 #ifdef KMP_TDATA_GTID
3837   __kmp_gtid = gtid;
3838 #endif
3839   __kmp_create_worker(gtid, root_thread, __kmp_stksize);
3840   KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid);
3841 
3842   KA_TRACE(20, ("__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, "
3843                 "plain=%u\n",
3844                 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3845                 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3846                 KMP_INIT_BARRIER_STATE));
3847   { // Initialize barrier data.
3848     int b;
3849     for (b = 0; b < bs_last_barrier; ++b) {
3850       root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
3851 #if USE_DEBUGGER
3852       root_thread->th.th_bar[b].bb.b_worker_arrived = 0;
3853 #endif
3854     }
3855   }
3856   KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3857                    KMP_INIT_BARRIER_STATE);
3858 
3859 #if KMP_AFFINITY_SUPPORTED
3860 #if OMP_40_ENABLED
3861   root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
3862   root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
3863   root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
3864   root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
3865 #endif
3866   if (TCR_4(__kmp_init_middle)) {
3867     __kmp_affinity_set_init_mask(gtid, TRUE);
3868   }
3869 #endif /* KMP_AFFINITY_SUPPORTED */
3870 #if OMP_50_ENABLED
3871   root_thread->th.th_def_allocator = __kmp_def_allocator;
3872   root_thread->th.th_prev_level = 0;
3873   root_thread->th.th_prev_num_threads = 1;
3874 #endif
3875 
3876   kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(sizeof(kmp_cg_root_t));
3877   tmp->cg_root = root_thread;
3878   tmp->cg_thread_limit = __kmp_cg_max_nth;
3879   tmp->cg_nthreads = 1;
3880   KA_TRACE(100, ("__kmp_register_root: Thread %p created node %p with"
3881                  " cg_nthreads init to 1\n",
3882                  root_thread, tmp));
3883   tmp->up = NULL;
3884   root_thread->th.th_cg_roots = tmp;
3885 
3886   __kmp_root_counter++;
3887 
3888 #if OMPT_SUPPORT
3889   if (!initial_thread && ompt_enabled.enabled) {
3890 
3891     kmp_info_t *root_thread = ompt_get_thread();
3892 
3893     ompt_set_thread_state(root_thread, ompt_state_overhead);
3894 
3895     if (ompt_enabled.ompt_callback_thread_begin) {
3896       ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
3897           ompt_thread_initial, __ompt_get_thread_data_internal());
3898     }
3899     ompt_data_t *task_data;
3900     ompt_data_t *parallel_data;
3901     __ompt_get_task_info_internal(0, NULL, &task_data, NULL, &parallel_data, NULL);
3902     if (ompt_enabled.ompt_callback_implicit_task) {
3903       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
3904           ompt_scope_begin, parallel_data, task_data, 1, 1, ompt_task_initial);
3905     }
3906 
3907     ompt_set_thread_state(root_thread, ompt_state_work_serial);
3908   }
3909 #endif
3910 
3911   KMP_MB();
3912   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3913 
3914   return gtid;
3915 }
3916 
3917 #if KMP_NESTED_HOT_TEAMS
3918 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
3919                                 const int max_level) {
3920   int i, n, nth;
3921   kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
3922   if (!hot_teams || !hot_teams[level].hot_team) {
3923     return 0;
3924   }
3925   KMP_DEBUG_ASSERT(level < max_level);
3926   kmp_team_t *team = hot_teams[level].hot_team;
3927   nth = hot_teams[level].hot_team_nth;
3928   n = nth - 1; // master is not freed
3929   if (level < max_level - 1) {
3930     for (i = 0; i < nth; ++i) {
3931       kmp_info_t *th = team->t.t_threads[i];
3932       n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3933       if (i > 0 && th->th.th_hot_teams) {
3934         __kmp_free(th->th.th_hot_teams);
3935         th->th.th_hot_teams = NULL;
3936       }
3937     }
3938   }
3939   __kmp_free_team(root, team, NULL);
3940   return n;
3941 }
3942 #endif
3943 
3944 // Resets a root thread and clear its root and hot teams.
3945 // Returns the number of __kmp_threads entries directly and indirectly freed.
3946 static int __kmp_reset_root(int gtid, kmp_root_t *root) {
3947   kmp_team_t *root_team = root->r.r_root_team;
3948   kmp_team_t *hot_team = root->r.r_hot_team;
3949   int n = hot_team->t.t_nproc;
3950   int i;
3951 
3952   KMP_DEBUG_ASSERT(!root->r.r_active);
3953 
3954   root->r.r_root_team = NULL;
3955   root->r.r_hot_team = NULL;
3956   // __kmp_free_team() does not free hot teams, so we have to clear r_hot_team
3957   // before call to __kmp_free_team().
3958   __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3959 #if KMP_NESTED_HOT_TEAMS
3960   if (__kmp_hot_teams_max_level >
3961       0) { // need to free nested hot teams and their threads if any
3962     for (i = 0; i < hot_team->t.t_nproc; ++i) {
3963       kmp_info_t *th = hot_team->t.t_threads[i];
3964       if (__kmp_hot_teams_max_level > 1) {
3965         n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3966       }
3967       if (th->th.th_hot_teams) {
3968         __kmp_free(th->th.th_hot_teams);
3969         th->th.th_hot_teams = NULL;
3970       }
3971     }
3972   }
3973 #endif
3974   __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3975 
3976   // Before we can reap the thread, we need to make certain that all other
3977   // threads in the teams that had this root as ancestor have stopped trying to
3978   // steal tasks.
3979   if (__kmp_tasking_mode != tskm_immediate_exec) {
3980     __kmp_wait_to_unref_task_teams();
3981   }
3982 
3983 #if KMP_OS_WINDOWS
3984   /* Close Handle of root duplicated in __kmp_create_worker (tr #62919) */
3985   KA_TRACE(
3986       10, ("__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC
3987            "\n",
3988            (LPVOID) & (root->r.r_uber_thread->th),
3989            root->r.r_uber_thread->th.th_info.ds.ds_thread));
3990   __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3991 #endif /* KMP_OS_WINDOWS */
3992 
3993 #if OMPT_SUPPORT
3994   ompt_data_t *task_data;
3995   ompt_data_t *parallel_data;
3996   __ompt_get_task_info_internal(0, NULL, &task_data, NULL, &parallel_data, NULL);
3997   if (ompt_enabled.ompt_callback_implicit_task) {
3998     ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
3999         ompt_scope_end, parallel_data, task_data, 0, 1, ompt_task_initial);
4000   }
4001   if (ompt_enabled.ompt_callback_thread_end) {
4002     ompt_callbacks.ompt_callback(ompt_callback_thread_end)(
4003         &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
4004   }
4005 #endif
4006 
4007   TCW_4(__kmp_nth,
4008         __kmp_nth - 1); // __kmp_reap_thread will decrement __kmp_all_nth.
4009   i = root->r.r_uber_thread->th.th_cg_roots->cg_nthreads--;
4010   KA_TRACE(100, ("__kmp_reset_root: Thread %p decrement cg_nthreads on node %p"
4011                  " to %d\n",
4012                  root->r.r_uber_thread, root->r.r_uber_thread->th.th_cg_roots,
4013                  root->r.r_uber_thread->th.th_cg_roots->cg_nthreads));
4014   if (i == 1) {
4015     // need to free contention group structure
4016     KMP_DEBUG_ASSERT(root->r.r_uber_thread ==
4017                      root->r.r_uber_thread->th.th_cg_roots->cg_root);
4018     KMP_DEBUG_ASSERT(root->r.r_uber_thread->th.th_cg_roots->up == NULL);
4019     __kmp_free(root->r.r_uber_thread->th.th_cg_roots);
4020     root->r.r_uber_thread->th.th_cg_roots = NULL;
4021   }
4022   __kmp_reap_thread(root->r.r_uber_thread, 1);
4023 
4024   // We canot put root thread to __kmp_thread_pool, so we have to reap it istead
4025   // of freeing.
4026   root->r.r_uber_thread = NULL;
4027   /* mark root as no longer in use */
4028   root->r.r_begin = FALSE;
4029 
4030   return n;
4031 }
4032 
4033 void __kmp_unregister_root_current_thread(int gtid) {
4034   KA_TRACE(1, ("__kmp_unregister_root_current_thread: enter T#%d\n", gtid));
4035   /* this lock should be ok, since unregister_root_current_thread is never
4036      called during an abort, only during a normal close. furthermore, if you
4037      have the forkjoin lock, you should never try to get the initz lock */
4038   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
4039   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
4040     KC_TRACE(10, ("__kmp_unregister_root_current_thread: already finished, "
4041                   "exiting T#%d\n",
4042                   gtid));
4043     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
4044     return;
4045   }
4046   kmp_root_t *root = __kmp_root[gtid];
4047 
4048   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
4049   KMP_ASSERT(KMP_UBER_GTID(gtid));
4050   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4051   KMP_ASSERT(root->r.r_active == FALSE);
4052 
4053   KMP_MB();
4054 
4055 #if OMP_45_ENABLED
4056   kmp_info_t *thread = __kmp_threads[gtid];
4057   kmp_team_t *team = thread->th.th_team;
4058   kmp_task_team_t *task_team = thread->th.th_task_team;
4059 
4060   // we need to wait for the proxy tasks before finishing the thread
4061   if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
4062 #if OMPT_SUPPORT
4063     // the runtime is shutting down so we won't report any events
4064     thread->th.ompt_thread_info.state = ompt_state_undefined;
4065 #endif
4066     __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
4067   }
4068 #endif
4069 
4070   __kmp_reset_root(gtid, root);
4071 
4072   /* free up this thread slot */
4073   __kmp_gtid_set_specific(KMP_GTID_DNE);
4074 #ifdef KMP_TDATA_GTID
4075   __kmp_gtid = KMP_GTID_DNE;
4076 #endif
4077 
4078   KMP_MB();
4079   KC_TRACE(10,
4080            ("__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid));
4081 
4082   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
4083 }
4084 
4085 #if KMP_OS_WINDOWS
4086 /* __kmp_forkjoin_lock must be already held
4087    Unregisters a root thread that is not the current thread.  Returns the number
4088    of __kmp_threads entries freed as a result. */
4089 static int __kmp_unregister_root_other_thread(int gtid) {
4090   kmp_root_t *root = __kmp_root[gtid];
4091   int r;
4092 
4093   KA_TRACE(1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
4094   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
4095   KMP_ASSERT(KMP_UBER_GTID(gtid));
4096   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4097   KMP_ASSERT(root->r.r_active == FALSE);
4098 
4099   r = __kmp_reset_root(gtid, root);
4100   KC_TRACE(10,
4101            ("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
4102   return r;
4103 }
4104 #endif
4105 
4106 #if KMP_DEBUG
4107 void __kmp_task_info() {
4108 
4109   kmp_int32 gtid = __kmp_entry_gtid();
4110   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
4111   kmp_info_t *this_thr = __kmp_threads[gtid];
4112   kmp_team_t *steam = this_thr->th.th_serial_team;
4113   kmp_team_t *team = this_thr->th.th_team;
4114 
4115   __kmp_printf(
4116       "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
4117       "ptask=%p\n",
4118       gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
4119       team->t.t_implicit_task_taskdata[tid].td_parent);
4120 }
4121 #endif // KMP_DEBUG
4122 
4123 /* TODO optimize with one big memclr, take out what isn't needed, split
4124    responsibility to workers as much as possible, and delay initialization of
4125    features as much as possible  */
4126 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4127                                   int tid, int gtid) {
4128   /* this_thr->th.th_info.ds.ds_gtid is setup in
4129      kmp_allocate_thread/create_worker.
4130      this_thr->th.th_serial_team is setup in __kmp_allocate_thread */
4131   kmp_info_t *master = team->t.t_threads[0];
4132   KMP_DEBUG_ASSERT(this_thr != NULL);
4133   KMP_DEBUG_ASSERT(this_thr->th.th_serial_team);
4134   KMP_DEBUG_ASSERT(team);
4135   KMP_DEBUG_ASSERT(team->t.t_threads);
4136   KMP_DEBUG_ASSERT(team->t.t_dispatch);
4137   KMP_DEBUG_ASSERT(master);
4138   KMP_DEBUG_ASSERT(master->th.th_root);
4139 
4140   KMP_MB();
4141 
4142   TCW_SYNC_PTR(this_thr->th.th_team, team);
4143 
4144   this_thr->th.th_info.ds.ds_tid = tid;
4145   this_thr->th.th_set_nproc = 0;
4146   if (__kmp_tasking_mode != tskm_immediate_exec)
4147     // When tasking is possible, threads are not safe to reap until they are
4148     // done tasking; this will be set when tasking code is exited in wait
4149     this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
4150   else // no tasking --> always safe to reap
4151     this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
4152 #if OMP_40_ENABLED
4153   this_thr->th.th_set_proc_bind = proc_bind_default;
4154 #if KMP_AFFINITY_SUPPORTED
4155   this_thr->th.th_new_place = this_thr->th.th_current_place;
4156 #endif
4157 #endif
4158   this_thr->th.th_root = master->th.th_root;
4159 
4160   /* setup the thread's cache of the team structure */
4161   this_thr->th.th_team_nproc = team->t.t_nproc;
4162   this_thr->th.th_team_master = master;
4163   this_thr->th.th_team_serialized = team->t.t_serialized;
4164   TCW_PTR(this_thr->th.th_sleep_loc, NULL);
4165 
4166   KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4167 
4168   KF_TRACE(10, ("__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n",
4169                 tid, gtid, this_thr, this_thr->th.th_current_task));
4170 
4171   __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
4172                            team, tid, TRUE);
4173 
4174   KF_TRACE(10, ("__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n",
4175                 tid, gtid, this_thr, this_thr->th.th_current_task));
4176   // TODO: Initialize ICVs from parent; GEH - isn't that already done in
4177   // __kmp_initialize_team()?
4178 
4179   /* TODO no worksharing in speculative threads */
4180   this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4181 
4182   this_thr->th.th_local.this_construct = 0;
4183 
4184   if (!this_thr->th.th_pri_common) {
4185     this_thr->th.th_pri_common =
4186         (struct common_table *)__kmp_allocate(sizeof(struct common_table));
4187     if (__kmp_storage_map) {
4188       __kmp_print_storage_map_gtid(
4189           gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
4190           sizeof(struct common_table), "th_%d.th_pri_common\n", gtid);
4191     }
4192     this_thr->th.th_pri_head = NULL;
4193   }
4194 
4195   if (this_thr != master && // Master's CG root is initialized elsewhere
4196       this_thr->th.th_cg_roots != master->th.th_cg_roots) { // CG root not set
4197     // Make new thread's CG root same as master's
4198     KMP_DEBUG_ASSERT(master->th.th_cg_roots);
4199     kmp_cg_root_t *tmp = this_thr->th.th_cg_roots;
4200     if (tmp) {
4201       // worker changes CG, need to check if old CG should be freed
4202       int i = tmp->cg_nthreads--;
4203       KA_TRACE(100, ("__kmp_initialize_info: Thread %p decrement cg_nthreads"
4204                      " on node %p of thread %p to %d\n",
4205                      this_thr, tmp, tmp->cg_root, tmp->cg_nthreads));
4206       if (i == 1) {
4207         __kmp_free(tmp); // last thread left CG --> free it
4208       }
4209     }
4210     this_thr->th.th_cg_roots = master->th.th_cg_roots;
4211     // Increment new thread's CG root's counter to add the new thread
4212     this_thr->th.th_cg_roots->cg_nthreads++;
4213     KA_TRACE(100, ("__kmp_initialize_info: Thread %p increment cg_nthreads on"
4214                    " node %p of thread %p to %d\n",
4215                    this_thr, this_thr->th.th_cg_roots,
4216                    this_thr->th.th_cg_roots->cg_root,
4217                    this_thr->th.th_cg_roots->cg_nthreads));
4218     this_thr->th.th_current_task->td_icvs.thread_limit =
4219         this_thr->th.th_cg_roots->cg_thread_limit;
4220   }
4221 
4222   /* Initialize dynamic dispatch */
4223   {
4224     volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
4225     // Use team max_nproc since this will never change for the team.
4226     size_t disp_size =
4227         sizeof(dispatch_private_info_t) *
4228         (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4229     KD_TRACE(10, ("__kmp_initialize_info: T#%d max_nproc: %d\n", gtid,
4230                   team->t.t_max_nproc));
4231     KMP_ASSERT(dispatch);
4232     KMP_DEBUG_ASSERT(team->t.t_dispatch);
4233     KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4234 
4235     dispatch->th_disp_index = 0;
4236 #if OMP_45_ENABLED
4237     dispatch->th_doacross_buf_idx = 0;
4238 #endif
4239     if (!dispatch->th_disp_buffer) {
4240       dispatch->th_disp_buffer =
4241           (dispatch_private_info_t *)__kmp_allocate(disp_size);
4242 
4243       if (__kmp_storage_map) {
4244         __kmp_print_storage_map_gtid(
4245             gtid, &dispatch->th_disp_buffer[0],
4246             &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4247                                           ? 1
4248                                           : __kmp_dispatch_num_buffers],
4249             disp_size, "th_%d.th_dispatch.th_disp_buffer "
4250                        "(team_%d.t_dispatch[%d].th_disp_buffer)",
4251             gtid, team->t.t_id, gtid);
4252       }
4253     } else {
4254       memset(&dispatch->th_disp_buffer[0], '\0', disp_size);
4255     }
4256 
4257     dispatch->th_dispatch_pr_current = 0;
4258     dispatch->th_dispatch_sh_current = 0;
4259 
4260     dispatch->th_deo_fcn = 0; /* ORDERED     */
4261     dispatch->th_dxo_fcn = 0; /* END ORDERED */
4262   }
4263 
4264   this_thr->th.th_next_pool = NULL;
4265 
4266   if (!this_thr->th.th_task_state_memo_stack) {
4267     size_t i;
4268     this_thr->th.th_task_state_memo_stack =
4269         (kmp_uint8 *)__kmp_allocate(4 * sizeof(kmp_uint8));
4270     this_thr->th.th_task_state_top = 0;
4271     this_thr->th.th_task_state_stack_sz = 4;
4272     for (i = 0; i < this_thr->th.th_task_state_stack_sz;
4273          ++i) // zero init the stack
4274       this_thr->th.th_task_state_memo_stack[i] = 0;
4275   }
4276 
4277   KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
4278   KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
4279 
4280   KMP_MB();
4281 }
4282 
4283 /* allocate a new thread for the requesting team. this is only called from
4284    within a forkjoin critical section. we will first try to get an available
4285    thread from the thread pool. if none is available, we will fork a new one
4286    assuming we are able to create a new one. this should be assured, as the
4287    caller should check on this first. */
4288 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4289                                   int new_tid) {
4290   kmp_team_t *serial_team;
4291   kmp_info_t *new_thr;
4292   int new_gtid;
4293 
4294   KA_TRACE(20, ("__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
4295   KMP_DEBUG_ASSERT(root && team);
4296 #if !KMP_NESTED_HOT_TEAMS
4297   KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
4298 #endif
4299   KMP_MB();
4300 
4301   /* first, try to get one from the thread pool */
4302   if (__kmp_thread_pool) {
4303     new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
4304     __kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool;
4305     if (new_thr == __kmp_thread_pool_insert_pt) {
4306       __kmp_thread_pool_insert_pt = NULL;
4307     }
4308     TCW_4(new_thr->th.th_in_pool, FALSE);
4309     __kmp_suspend_initialize_thread(new_thr);
4310     __kmp_lock_suspend_mx(new_thr);
4311     if (new_thr->th.th_active_in_pool == TRUE) {
4312       KMP_DEBUG_ASSERT(new_thr->th.th_active == TRUE);
4313       KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
4314       new_thr->th.th_active_in_pool = FALSE;
4315     }
4316     __kmp_unlock_suspend_mx(new_thr);
4317 
4318     KA_TRACE(20, ("__kmp_allocate_thread: T#%d using thread T#%d\n",
4319                   __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid));
4320     KMP_ASSERT(!new_thr->th.th_team);
4321     KMP_DEBUG_ASSERT(__kmp_nth < __kmp_threads_capacity);
4322 
4323     /* setup the thread structure */
4324     __kmp_initialize_info(new_thr, team, new_tid,
4325                           new_thr->th.th_info.ds.ds_gtid);
4326     KMP_DEBUG_ASSERT(new_thr->th.th_serial_team);
4327 
4328     TCW_4(__kmp_nth, __kmp_nth + 1);
4329 
4330     new_thr->th.th_task_state = 0;
4331     new_thr->th.th_task_state_top = 0;
4332     new_thr->th.th_task_state_stack_sz = 4;
4333 
4334 #ifdef KMP_ADJUST_BLOCKTIME
4335     /* Adjust blocktime back to zero if necessary */
4336     /* Middle initialization might not have occurred yet */
4337     if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4338       if (__kmp_nth > __kmp_avail_proc) {
4339         __kmp_zero_bt = TRUE;
4340       }
4341     }
4342 #endif /* KMP_ADJUST_BLOCKTIME */
4343 
4344 #if KMP_DEBUG
4345     // If thread entered pool via __kmp_free_thread, wait_flag should !=
4346     // KMP_BARRIER_PARENT_FLAG.
4347     int b;
4348     kmp_balign_t *balign = new_thr->th.th_bar;
4349     for (b = 0; b < bs_last_barrier; ++b)
4350       KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
4351 #endif
4352 
4353     KF_TRACE(10, ("__kmp_allocate_thread: T#%d using thread %p T#%d\n",
4354                   __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid));
4355 
4356     KMP_MB();
4357     return new_thr;
4358   }
4359 
4360   /* no, well fork a new one */
4361   KMP_ASSERT(__kmp_nth == __kmp_all_nth);
4362   KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
4363 
4364 #if KMP_USE_MONITOR
4365   // If this is the first worker thread the RTL is creating, then also
4366   // launch the monitor thread.  We try to do this as early as possible.
4367   if (!TCR_4(__kmp_init_monitor)) {
4368     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
4369     if (!TCR_4(__kmp_init_monitor)) {
4370       KF_TRACE(10, ("before __kmp_create_monitor\n"));
4371       TCW_4(__kmp_init_monitor, 1);
4372       __kmp_create_monitor(&__kmp_monitor);
4373       KF_TRACE(10, ("after __kmp_create_monitor\n"));
4374 #if KMP_OS_WINDOWS
4375       // AC: wait until monitor has started. This is a fix for CQ232808.
4376       // The reason is that if the library is loaded/unloaded in a loop with
4377       // small (parallel) work in between, then there is high probability that
4378       // monitor thread started after the library shutdown. At shutdown it is
4379       // too late to cope with the problem, because when the master is in
4380       // DllMain (process detach) the monitor has no chances to start (it is
4381       // blocked), and master has no means to inform the monitor that the
4382       // library has gone, because all the memory which the monitor can access
4383       // is going to be released/reset.
4384       while (TCR_4(__kmp_init_monitor) < 2) {
4385         KMP_YIELD(TRUE);
4386       }
4387       KF_TRACE(10, ("after monitor thread has started\n"));
4388 #endif
4389     }
4390     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
4391   }
4392 #endif
4393 
4394   KMP_MB();
4395   for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) {
4396     KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity);
4397   }
4398 
4399   /* allocate space for it. */
4400   new_thr = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
4401 
4402   TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr);
4403 
4404   if (__kmp_storage_map) {
4405     __kmp_print_thread_storage_map(new_thr, new_gtid);
4406   }
4407 
4408   // add the reserve serialized team, initialized from the team's master thread
4409   {
4410     kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4411     KF_TRACE(10, ("__kmp_allocate_thread: before th_serial/serial_team\n"));
4412     new_thr->th.th_serial_team = serial_team =
4413         (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4414 #if OMPT_SUPPORT
4415                                           ompt_data_none, // root parallel id
4416 #endif
4417 #if OMP_40_ENABLED
4418                                           proc_bind_default,
4419 #endif
4420                                           &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
4421   }
4422   KMP_ASSERT(serial_team);
4423   serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
4424   // execution (it is unused for now).
4425   serial_team->t.t_threads[0] = new_thr;
4426   KF_TRACE(10,
4427            ("__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n",
4428             new_thr));
4429 
4430   /* setup the thread structures */
4431   __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4432 
4433 #if USE_FAST_MEMORY
4434   __kmp_initialize_fast_memory(new_thr);
4435 #endif /* USE_FAST_MEMORY */
4436 
4437 #if KMP_USE_BGET
4438   KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL);
4439   __kmp_initialize_bget(new_thr);
4440 #endif
4441 
4442   __kmp_init_random(new_thr); // Initialize random number generator
4443 
4444   /* Initialize these only once when thread is grabbed for a team allocation */
4445   KA_TRACE(20,
4446            ("__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n",
4447             __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
4448 
4449   int b;
4450   kmp_balign_t *balign = new_thr->th.th_bar;
4451   for (b = 0; b < bs_last_barrier; ++b) {
4452     balign[b].bb.b_go = KMP_INIT_BARRIER_STATE;
4453     balign[b].bb.team = NULL;
4454     balign[b].bb.wait_flag = KMP_BARRIER_NOT_WAITING;
4455     balign[b].bb.use_oncore_barrier = 0;
4456   }
4457 
4458   new_thr->th.th_spin_here = FALSE;
4459   new_thr->th.th_next_waiting = 0;
4460 #if KMP_OS_UNIX
4461   new_thr->th.th_blocking = false;
4462 #endif
4463 
4464 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
4465   new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
4466   new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
4467   new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
4468   new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
4469 #endif
4470 #if OMP_50_ENABLED
4471   new_thr->th.th_def_allocator = __kmp_def_allocator;
4472   new_thr->th.th_prev_level = 0;
4473   new_thr->th.th_prev_num_threads = 1;
4474 #endif
4475 
4476   TCW_4(new_thr->th.th_in_pool, FALSE);
4477   new_thr->th.th_active_in_pool = FALSE;
4478   TCW_4(new_thr->th.th_active, TRUE);
4479 
4480   /* adjust the global counters */
4481   __kmp_all_nth++;
4482   __kmp_nth++;
4483 
4484   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
4485   // numbers of procs, and method #2 (keyed API call) for higher numbers.
4486   if (__kmp_adjust_gtid_mode) {
4487     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
4488       if (TCR_4(__kmp_gtid_mode) != 2) {
4489         TCW_4(__kmp_gtid_mode, 2);
4490       }
4491     } else {
4492       if (TCR_4(__kmp_gtid_mode) != 1) {
4493         TCW_4(__kmp_gtid_mode, 1);
4494       }
4495     }
4496   }
4497 
4498 #ifdef KMP_ADJUST_BLOCKTIME
4499   /* Adjust blocktime back to zero if necessary       */
4500   /* Middle initialization might not have occurred yet */
4501   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4502     if (__kmp_nth > __kmp_avail_proc) {
4503       __kmp_zero_bt = TRUE;
4504     }
4505   }
4506 #endif /* KMP_ADJUST_BLOCKTIME */
4507 
4508   /* actually fork it and create the new worker thread */
4509   KF_TRACE(
4510       10, ("__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr));
4511   __kmp_create_worker(new_gtid, new_thr, __kmp_stksize);
4512   KF_TRACE(10,
4513            ("__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr));
4514 
4515   KA_TRACE(20, ("__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(),
4516                 new_gtid));
4517   KMP_MB();
4518   return new_thr;
4519 }
4520 
4521 /* Reinitialize team for reuse.
4522    The hot team code calls this case at every fork barrier, so EPCC barrier
4523    test are extremely sensitive to changes in it, esp. writes to the team
4524    struct, which cause a cache invalidation in all threads.
4525    IF YOU TOUCH THIS ROUTINE, RUN EPCC C SYNCBENCH ON A BIG-IRON MACHINE!!! */
4526 static void __kmp_reinitialize_team(kmp_team_t *team,
4527                                     kmp_internal_control_t *new_icvs,
4528                                     ident_t *loc) {
4529   KF_TRACE(10, ("__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4530                 team->t.t_threads[0], team));
4531   KMP_DEBUG_ASSERT(team && new_icvs);
4532   KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
4533   KMP_CHECK_UPDATE(team->t.t_ident, loc);
4534 
4535   KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4536   // Copy ICVs to the master thread's implicit taskdata
4537   __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4538   copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4539 
4540   KF_TRACE(10, ("__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4541                 team->t.t_threads[0], team));
4542 }
4543 
4544 /* Initialize the team data structure.
4545    This assumes the t_threads and t_max_nproc are already set.
4546    Also, we don't touch the arguments */
4547 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
4548                                   kmp_internal_control_t *new_icvs,
4549                                   ident_t *loc) {
4550   KF_TRACE(10, ("__kmp_initialize_team: enter: team=%p\n", team));
4551 
4552   /* verify */
4553   KMP_DEBUG_ASSERT(team);
4554   KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4555   KMP_DEBUG_ASSERT(team->t.t_threads);
4556   KMP_MB();
4557 
4558   team->t.t_master_tid = 0; /* not needed */
4559   /* team->t.t_master_bar;        not needed */
4560   team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4561   team->t.t_nproc = new_nproc;
4562 
4563   /* team->t.t_parent     = NULL; TODO not needed & would mess up hot team */
4564   team->t.t_next_pool = NULL;
4565   /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess
4566    * up hot team */
4567 
4568   TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
4569   team->t.t_invoke = NULL; /* not needed */
4570 
4571   // TODO???: team->t.t_max_active_levels       = new_max_active_levels;
4572   team->t.t_sched.sched = new_icvs->sched.sched;
4573 
4574 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4575   team->t.t_fp_control_saved = FALSE; /* not needed */
4576   team->t.t_x87_fpu_control_word = 0; /* not needed */
4577   team->t.t_mxcsr = 0; /* not needed */
4578 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4579 
4580   team->t.t_construct = 0;
4581 
4582   team->t.t_ordered.dt.t_value = 0;
4583   team->t.t_master_active = FALSE;
4584 
4585 #ifdef KMP_DEBUG
4586   team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */
4587 #endif
4588 #if KMP_OS_WINDOWS
4589   team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */
4590 #endif
4591 
4592   team->t.t_control_stack_top = NULL;
4593 
4594   __kmp_reinitialize_team(team, new_icvs, loc);
4595 
4596   KMP_MB();
4597   KF_TRACE(10, ("__kmp_initialize_team: exit: team=%p\n", team));
4598 }
4599 
4600 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
4601 /* Sets full mask for thread and returns old mask, no changes to structures. */
4602 static void
4603 __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
4604   if (KMP_AFFINITY_CAPABLE()) {
4605     int status;
4606     if (old_mask != NULL) {
4607       status = __kmp_get_system_affinity(old_mask, TRUE);
4608       int error = errno;
4609       if (status != 0) {
4610         __kmp_fatal(KMP_MSG(ChangeThreadAffMaskError), KMP_ERR(error),
4611                     __kmp_msg_null);
4612       }
4613     }
4614     __kmp_set_system_affinity(__kmp_affin_fullMask, TRUE);
4615   }
4616 }
4617 #endif
4618 
4619 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
4620 
4621 // __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism.
4622 // It calculats the worker + master thread's partition based upon the parent
4623 // thread's partition, and binds each worker to a thread in their partition.
4624 // The master thread's partition should already include its current binding.
4625 static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
4626   // Copy the master thread's place partion to the team struct
4627   kmp_info_t *master_th = team->t.t_threads[0];
4628   KMP_DEBUG_ASSERT(master_th != NULL);
4629   kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4630   int first_place = master_th->th.th_first_place;
4631   int last_place = master_th->th.th_last_place;
4632   int masters_place = master_th->th.th_current_place;
4633   team->t.t_first_place = first_place;
4634   team->t.t_last_place = last_place;
4635 
4636   KA_TRACE(20, ("__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) "
4637                 "bound to place %d partition = [%d,%d]\n",
4638                 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4639                 team->t.t_id, masters_place, first_place, last_place));
4640 
4641   switch (proc_bind) {
4642 
4643   case proc_bind_default:
4644     // serial teams might have the proc_bind policy set to proc_bind_default. It
4645     // doesn't matter, as we don't rebind master thread for any proc_bind policy
4646     KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4647     break;
4648 
4649   case proc_bind_master: {
4650     int f;
4651     int n_th = team->t.t_nproc;
4652     for (f = 1; f < n_th; f++) {
4653       kmp_info_t *th = team->t.t_threads[f];
4654       KMP_DEBUG_ASSERT(th != NULL);
4655       th->th.th_first_place = first_place;
4656       th->th.th_last_place = last_place;
4657       th->th.th_new_place = masters_place;
4658 #if OMP_50_ENABLED
4659       if (__kmp_display_affinity && masters_place != th->th.th_current_place &&
4660           team->t.t_display_affinity != 1) {
4661         team->t.t_display_affinity = 1;
4662       }
4663 #endif
4664 
4665       KA_TRACE(100, ("__kmp_partition_places: master: T#%d(%d:%d) place %d "
4666                      "partition = [%d,%d]\n",
4667                      __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4668                      f, masters_place, first_place, last_place));
4669     }
4670   } break;
4671 
4672   case proc_bind_close: {
4673     int f;
4674     int n_th = team->t.t_nproc;
4675     int n_places;
4676     if (first_place <= last_place) {
4677       n_places = last_place - first_place + 1;
4678     } else {
4679       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4680     }
4681     if (n_th <= n_places) {
4682       int place = masters_place;
4683       for (f = 1; f < n_th; f++) {
4684         kmp_info_t *th = team->t.t_threads[f];
4685         KMP_DEBUG_ASSERT(th != NULL);
4686 
4687         if (place == last_place) {
4688           place = first_place;
4689         } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4690           place = 0;
4691         } else {
4692           place++;
4693         }
4694         th->th.th_first_place = first_place;
4695         th->th.th_last_place = last_place;
4696         th->th.th_new_place = place;
4697 #if OMP_50_ENABLED
4698         if (__kmp_display_affinity && place != th->th.th_current_place &&
4699             team->t.t_display_affinity != 1) {
4700           team->t.t_display_affinity = 1;
4701         }
4702 #endif
4703 
4704         KA_TRACE(100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4705                        "partition = [%d,%d]\n",
4706                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4707                        team->t.t_id, f, place, first_place, last_place));
4708       }
4709     } else {
4710       int S, rem, gap, s_count;
4711       S = n_th / n_places;
4712       s_count = 0;
4713       rem = n_th - (S * n_places);
4714       gap = rem > 0 ? n_places / rem : n_places;
4715       int place = masters_place;
4716       int gap_ct = gap;
4717       for (f = 0; f < n_th; f++) {
4718         kmp_info_t *th = team->t.t_threads[f];
4719         KMP_DEBUG_ASSERT(th != NULL);
4720 
4721         th->th.th_first_place = first_place;
4722         th->th.th_last_place = last_place;
4723         th->th.th_new_place = place;
4724 #if OMP_50_ENABLED
4725         if (__kmp_display_affinity && place != th->th.th_current_place &&
4726             team->t.t_display_affinity != 1) {
4727           team->t.t_display_affinity = 1;
4728         }
4729 #endif
4730         s_count++;
4731 
4732         if ((s_count == S) && rem && (gap_ct == gap)) {
4733           // do nothing, add an extra thread to place on next iteration
4734         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4735           // we added an extra thread to this place; move to next place
4736           if (place == last_place) {
4737             place = first_place;
4738           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4739             place = 0;
4740           } else {
4741             place++;
4742           }
4743           s_count = 0;
4744           gap_ct = 1;
4745           rem--;
4746         } else if (s_count == S) { // place full; don't add extra
4747           if (place == last_place) {
4748             place = first_place;
4749           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4750             place = 0;
4751           } else {
4752             place++;
4753           }
4754           gap_ct++;
4755           s_count = 0;
4756         }
4757 
4758         KA_TRACE(100,
4759                  ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4760                   "partition = [%d,%d]\n",
4761                   __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4762                   th->th.th_new_place, first_place, last_place));
4763       }
4764       KMP_DEBUG_ASSERT(place == masters_place);
4765     }
4766   } break;
4767 
4768   case proc_bind_spread: {
4769     int f;
4770     int n_th = team->t.t_nproc;
4771     int n_places;
4772     int thidx;
4773     if (first_place <= last_place) {
4774       n_places = last_place - first_place + 1;
4775     } else {
4776       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4777     }
4778     if (n_th <= n_places) {
4779       int place = -1;
4780 
4781       if (n_places != static_cast<int>(__kmp_affinity_num_masks)) {
4782         int S = n_places / n_th;
4783         int s_count, rem, gap, gap_ct;
4784 
4785         place = masters_place;
4786         rem = n_places - n_th * S;
4787         gap = rem ? n_th / rem : 1;
4788         gap_ct = gap;
4789         thidx = n_th;
4790         if (update_master_only == 1)
4791           thidx = 1;
4792         for (f = 0; f < thidx; f++) {
4793           kmp_info_t *th = team->t.t_threads[f];
4794           KMP_DEBUG_ASSERT(th != NULL);
4795 
4796           th->th.th_first_place = place;
4797           th->th.th_new_place = place;
4798 #if OMP_50_ENABLED
4799           if (__kmp_display_affinity && place != th->th.th_current_place &&
4800               team->t.t_display_affinity != 1) {
4801             team->t.t_display_affinity = 1;
4802           }
4803 #endif
4804           s_count = 1;
4805           while (s_count < S) {
4806             if (place == last_place) {
4807               place = first_place;
4808             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4809               place = 0;
4810             } else {
4811               place++;
4812             }
4813             s_count++;
4814           }
4815           if (rem && (gap_ct == gap)) {
4816             if (place == last_place) {
4817               place = first_place;
4818             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4819               place = 0;
4820             } else {
4821               place++;
4822             }
4823             rem--;
4824             gap_ct = 0;
4825           }
4826           th->th.th_last_place = place;
4827           gap_ct++;
4828 
4829           if (place == last_place) {
4830             place = first_place;
4831           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4832             place = 0;
4833           } else {
4834             place++;
4835           }
4836 
4837           KA_TRACE(100,
4838                    ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4839                     "partition = [%d,%d], __kmp_affinity_num_masks: %u\n",
4840                     __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4841                     f, th->th.th_new_place, th->th.th_first_place,
4842                     th->th.th_last_place, __kmp_affinity_num_masks));
4843         }
4844       } else {
4845         /* Having uniform space of available computation places I can create
4846            T partitions of round(P/T) size and put threads into the first
4847            place of each partition. */
4848         double current = static_cast<double>(masters_place);
4849         double spacing =
4850             (static_cast<double>(n_places + 1) / static_cast<double>(n_th));
4851         int first, last;
4852         kmp_info_t *th;
4853 
4854         thidx = n_th + 1;
4855         if (update_master_only == 1)
4856           thidx = 1;
4857         for (f = 0; f < thidx; f++) {
4858           first = static_cast<int>(current);
4859           last = static_cast<int>(current + spacing) - 1;
4860           KMP_DEBUG_ASSERT(last >= first);
4861           if (first >= n_places) {
4862             if (masters_place) {
4863               first -= n_places;
4864               last -= n_places;
4865               if (first == (masters_place + 1)) {
4866                 KMP_DEBUG_ASSERT(f == n_th);
4867                 first--;
4868               }
4869               if (last == masters_place) {
4870                 KMP_DEBUG_ASSERT(f == (n_th - 1));
4871                 last--;
4872               }
4873             } else {
4874               KMP_DEBUG_ASSERT(f == n_th);
4875               first = 0;
4876               last = 0;
4877             }
4878           }
4879           if (last >= n_places) {
4880             last = (n_places - 1);
4881           }
4882           place = first;
4883           current += spacing;
4884           if (f < n_th) {
4885             KMP_DEBUG_ASSERT(0 <= first);
4886             KMP_DEBUG_ASSERT(n_places > first);
4887             KMP_DEBUG_ASSERT(0 <= last);
4888             KMP_DEBUG_ASSERT(n_places > last);
4889             KMP_DEBUG_ASSERT(last_place >= first_place);
4890             th = team->t.t_threads[f];
4891             KMP_DEBUG_ASSERT(th);
4892             th->th.th_first_place = first;
4893             th->th.th_new_place = place;
4894             th->th.th_last_place = last;
4895 #if OMP_50_ENABLED
4896             if (__kmp_display_affinity && place != th->th.th_current_place &&
4897                 team->t.t_display_affinity != 1) {
4898               team->t.t_display_affinity = 1;
4899             }
4900 #endif
4901             KA_TRACE(100,
4902                      ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4903                       "partition = [%d,%d], spacing = %.4f\n",
4904                       __kmp_gtid_from_thread(team->t.t_threads[f]),
4905                       team->t.t_id, f, th->th.th_new_place,
4906                       th->th.th_first_place, th->th.th_last_place, spacing));
4907           }
4908         }
4909       }
4910       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4911     } else {
4912       int S, rem, gap, s_count;
4913       S = n_th / n_places;
4914       s_count = 0;
4915       rem = n_th - (S * n_places);
4916       gap = rem > 0 ? n_places / rem : n_places;
4917       int place = masters_place;
4918       int gap_ct = gap;
4919       thidx = n_th;
4920       if (update_master_only == 1)
4921         thidx = 1;
4922       for (f = 0; f < thidx; f++) {
4923         kmp_info_t *th = team->t.t_threads[f];
4924         KMP_DEBUG_ASSERT(th != NULL);
4925 
4926         th->th.th_first_place = place;
4927         th->th.th_last_place = place;
4928         th->th.th_new_place = place;
4929 #if OMP_50_ENABLED
4930         if (__kmp_display_affinity && place != th->th.th_current_place &&
4931             team->t.t_display_affinity != 1) {
4932           team->t.t_display_affinity = 1;
4933         }
4934 #endif
4935         s_count++;
4936 
4937         if ((s_count == S) && rem && (gap_ct == gap)) {
4938           // do nothing, add an extra thread to place on next iteration
4939         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4940           // we added an extra thread to this place; move on to next place
4941           if (place == last_place) {
4942             place = first_place;
4943           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4944             place = 0;
4945           } else {
4946             place++;
4947           }
4948           s_count = 0;
4949           gap_ct = 1;
4950           rem--;
4951         } else if (s_count == S) { // place is full; don't add extra thread
4952           if (place == last_place) {
4953             place = first_place;
4954           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4955             place = 0;
4956           } else {
4957             place++;
4958           }
4959           gap_ct++;
4960           s_count = 0;
4961         }
4962 
4963         KA_TRACE(100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4964                        "partition = [%d,%d]\n",
4965                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4966                        team->t.t_id, f, th->th.th_new_place,
4967                        th->th.th_first_place, th->th.th_last_place));
4968       }
4969       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4970     }
4971   } break;
4972 
4973   default:
4974     break;
4975   }
4976 
4977   KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
4978 }
4979 
4980 #endif /* OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED */
4981 
4982 /* allocate a new team data structure to use.  take one off of the free pool if
4983    available */
4984 kmp_team_t *
4985 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
4986 #if OMPT_SUPPORT
4987                     ompt_data_t ompt_parallel_data,
4988 #endif
4989 #if OMP_40_ENABLED
4990                     kmp_proc_bind_t new_proc_bind,
4991 #endif
4992                     kmp_internal_control_t *new_icvs,
4993                     int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
4994   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
4995   int f;
4996   kmp_team_t *team;
4997   int use_hot_team = !root->r.r_active;
4998   int level = 0;
4999 
5000   KA_TRACE(20, ("__kmp_allocate_team: called\n"));
5001   KMP_DEBUG_ASSERT(new_nproc >= 1 && argc >= 0);
5002   KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
5003   KMP_MB();
5004 
5005 #if KMP_NESTED_HOT_TEAMS
5006   kmp_hot_team_ptr_t *hot_teams;
5007   if (master) {
5008     team = master->th.th_team;
5009     level = team->t.t_active_level;
5010     if (master->th.th_teams_microtask) { // in teams construct?
5011       if (master->th.th_teams_size.nteams > 1 &&
5012           ( // #teams > 1
5013               team->t.t_pkfn ==
5014                   (microtask_t)__kmp_teams_master || // inner fork of the teams
5015               master->th.th_teams_level <
5016                   team->t.t_level)) { // or nested parallel inside the teams
5017         ++level; // not increment if #teams==1, or for outer fork of the teams;
5018         // increment otherwise
5019       }
5020     }
5021     hot_teams = master->th.th_hot_teams;
5022     if (level < __kmp_hot_teams_max_level && hot_teams &&
5023         hot_teams[level]
5024             .hot_team) { // hot team has already been allocated for given level
5025       use_hot_team = 1;
5026     } else {
5027       use_hot_team = 0;
5028     }
5029   }
5030 #endif
5031   // Optimization to use a "hot" team
5032   if (use_hot_team && new_nproc > 1) {
5033     KMP_DEBUG_ASSERT(new_nproc <= max_nproc);
5034 #if KMP_NESTED_HOT_TEAMS
5035     team = hot_teams[level].hot_team;
5036 #else
5037     team = root->r.r_hot_team;
5038 #endif
5039 #if KMP_DEBUG
5040     if (__kmp_tasking_mode != tskm_immediate_exec) {
5041       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5042                     "task_team[1] = %p before reinit\n",
5043                     team->t.t_task_team[0], team->t.t_task_team[1]));
5044     }
5045 #endif
5046 
5047     // Has the number of threads changed?
5048     /* Let's assume the most common case is that the number of threads is
5049        unchanged, and put that case first. */
5050     if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
5051       KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
5052       // This case can mean that omp_set_num_threads() was called and the hot
5053       // team size was already reduced, so we check the special flag
5054       if (team->t.t_size_changed == -1) {
5055         team->t.t_size_changed = 1;
5056       } else {
5057         KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
5058       }
5059 
5060       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
5061       kmp_r_sched_t new_sched = new_icvs->sched;
5062       // set master's schedule as new run-time schedule
5063       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
5064 
5065       __kmp_reinitialize_team(team, new_icvs,
5066                               root->r.r_uber_thread->th.th_ident);
5067 
5068       KF_TRACE(10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
5069                     team->t.t_threads[0], team));
5070       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5071 
5072 #if OMP_40_ENABLED
5073 #if KMP_AFFINITY_SUPPORTED
5074       if ((team->t.t_size_changed == 0) &&
5075           (team->t.t_proc_bind == new_proc_bind)) {
5076         if (new_proc_bind == proc_bind_spread) {
5077           __kmp_partition_places(
5078               team, 1); // add flag to update only master for spread
5079         }
5080         KA_TRACE(200, ("__kmp_allocate_team: reusing hot team #%d bindings: "
5081                        "proc_bind = %d, partition = [%d,%d]\n",
5082                        team->t.t_id, new_proc_bind, team->t.t_first_place,
5083                        team->t.t_last_place));
5084       } else {
5085         KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5086         __kmp_partition_places(team);
5087       }
5088 #else
5089       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5090 #endif /* KMP_AFFINITY_SUPPORTED */
5091 #endif /* OMP_40_ENABLED */
5092     } else if (team->t.t_nproc > new_nproc) {
5093       KA_TRACE(20,
5094                ("__kmp_allocate_team: decreasing hot team thread count to %d\n",
5095                 new_nproc));
5096 
5097       team->t.t_size_changed = 1;
5098 #if KMP_NESTED_HOT_TEAMS
5099       if (__kmp_hot_teams_mode == 0) {
5100         // AC: saved number of threads should correspond to team's value in this
5101         // mode, can be bigger in mode 1, when hot team has threads in reserve
5102         KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
5103         hot_teams[level].hot_team_nth = new_nproc;
5104 #endif // KMP_NESTED_HOT_TEAMS
5105         /* release the extra threads we don't need any more */
5106         for (f = new_nproc; f < team->t.t_nproc; f++) {
5107           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5108           if (__kmp_tasking_mode != tskm_immediate_exec) {
5109             // When decreasing team size, threads no longer in the team should
5110             // unref task team.
5111             team->t.t_threads[f]->th.th_task_team = NULL;
5112           }
5113           __kmp_free_thread(team->t.t_threads[f]);
5114           team->t.t_threads[f] = NULL;
5115         }
5116 #if KMP_NESTED_HOT_TEAMS
5117       } // (__kmp_hot_teams_mode == 0)
5118       else {
5119         // When keeping extra threads in team, switch threads to wait on own
5120         // b_go flag
5121         for (f = new_nproc; f < team->t.t_nproc; ++f) {
5122           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5123           kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
5124           for (int b = 0; b < bs_last_barrier; ++b) {
5125             if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG) {
5126               balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5127             }
5128             KMP_CHECK_UPDATE(balign[b].bb.leaf_kids, 0);
5129           }
5130         }
5131       }
5132 #endif // KMP_NESTED_HOT_TEAMS
5133       team->t.t_nproc = new_nproc;
5134       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
5135       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
5136       __kmp_reinitialize_team(team, new_icvs,
5137                               root->r.r_uber_thread->th.th_ident);
5138 
5139       // Update remaining threads
5140       for (f = 0; f < new_nproc; ++f) {
5141         team->t.t_threads[f]->th.th_team_nproc = new_nproc;
5142       }
5143 
5144       // restore the current task state of the master thread: should be the
5145       // implicit task
5146       KF_TRACE(10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5147                     team->t.t_threads[0], team));
5148 
5149       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5150 
5151 #ifdef KMP_DEBUG
5152       for (f = 0; f < team->t.t_nproc; f++) {
5153         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5154                          team->t.t_threads[f]->th.th_team_nproc ==
5155                              team->t.t_nproc);
5156       }
5157 #endif
5158 
5159 #if OMP_40_ENABLED
5160       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5161 #if KMP_AFFINITY_SUPPORTED
5162       __kmp_partition_places(team);
5163 #endif
5164 #endif
5165     } else { // team->t.t_nproc < new_nproc
5166 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5167       kmp_affin_mask_t *old_mask;
5168       if (KMP_AFFINITY_CAPABLE()) {
5169         KMP_CPU_ALLOC(old_mask);
5170       }
5171 #endif
5172 
5173       KA_TRACE(20,
5174                ("__kmp_allocate_team: increasing hot team thread count to %d\n",
5175                 new_nproc));
5176 
5177       team->t.t_size_changed = 1;
5178 
5179 #if KMP_NESTED_HOT_TEAMS
5180       int avail_threads = hot_teams[level].hot_team_nth;
5181       if (new_nproc < avail_threads)
5182         avail_threads = new_nproc;
5183       kmp_info_t **other_threads = team->t.t_threads;
5184       for (f = team->t.t_nproc; f < avail_threads; ++f) {
5185         // Adjust barrier data of reserved threads (if any) of the team
5186         // Other data will be set in __kmp_initialize_info() below.
5187         int b;
5188         kmp_balign_t *balign = other_threads[f]->th.th_bar;
5189         for (b = 0; b < bs_last_barrier; ++b) {
5190           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5191           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5192 #if USE_DEBUGGER
5193           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5194 #endif
5195         }
5196       }
5197       if (hot_teams[level].hot_team_nth >= new_nproc) {
5198         // we have all needed threads in reserve, no need to allocate any
5199         // this only possible in mode 1, cannot have reserved threads in mode 0
5200         KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1);
5201         team->t.t_nproc = new_nproc; // just get reserved threads involved
5202       } else {
5203         // we may have some threads in reserve, but not enough
5204         team->t.t_nproc =
5205             hot_teams[level]
5206                 .hot_team_nth; // get reserved threads involved if any
5207         hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
5208 #endif // KMP_NESTED_HOT_TEAMS
5209         if (team->t.t_max_nproc < new_nproc) {
5210           /* reallocate larger arrays */
5211           __kmp_reallocate_team_arrays(team, new_nproc);
5212           __kmp_reinitialize_team(team, new_icvs, NULL);
5213         }
5214 
5215 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5216         /* Temporarily set full mask for master thread before creation of
5217            workers. The reason is that workers inherit the affinity from master,
5218            so if a lot of workers are created on the single core quickly, they
5219            don't get a chance to set their own affinity for a long time. */
5220         __kmp_set_thread_affinity_mask_full_tmp(old_mask);
5221 #endif
5222 
5223         /* allocate new threads for the hot team */
5224         for (f = team->t.t_nproc; f < new_nproc; f++) {
5225           kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5226           KMP_DEBUG_ASSERT(new_worker);
5227           team->t.t_threads[f] = new_worker;
5228 
5229           KA_TRACE(20,
5230                    ("__kmp_allocate_team: team %d init T#%d arrived: "
5231                     "join=%llu, plain=%llu\n",
5232                     team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5233                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5234                     team->t.t_bar[bs_plain_barrier].b_arrived));
5235 
5236           { // Initialize barrier data for new threads.
5237             int b;
5238             kmp_balign_t *balign = new_worker->th.th_bar;
5239             for (b = 0; b < bs_last_barrier; ++b) {
5240               balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5241               KMP_DEBUG_ASSERT(balign[b].bb.wait_flag !=
5242                                KMP_BARRIER_PARENT_FLAG);
5243 #if USE_DEBUGGER
5244               balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5245 #endif
5246             }
5247           }
5248         }
5249 
5250 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5251         if (KMP_AFFINITY_CAPABLE()) {
5252           /* Restore initial master thread's affinity mask */
5253           __kmp_set_system_affinity(old_mask, TRUE);
5254           KMP_CPU_FREE(old_mask);
5255         }
5256 #endif
5257 #if KMP_NESTED_HOT_TEAMS
5258       } // end of check of t_nproc vs. new_nproc vs. hot_team_nth
5259 #endif // KMP_NESTED_HOT_TEAMS
5260       /* make sure everyone is syncronized */
5261       int old_nproc = team->t.t_nproc; // save old value and use to update only
5262       // new threads below
5263       __kmp_initialize_team(team, new_nproc, new_icvs,
5264                             root->r.r_uber_thread->th.th_ident);
5265 
5266       /* reinitialize the threads */
5267       KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5268       for (f = 0; f < team->t.t_nproc; ++f)
5269         __kmp_initialize_info(team->t.t_threads[f], team, f,
5270                               __kmp_gtid_from_tid(f, team));
5271 
5272       if (level) { // set th_task_state for new threads in nested hot team
5273         // __kmp_initialize_info() no longer zeroes th_task_state, so we should
5274         // only need to set the th_task_state for the new threads. th_task_state
5275         // for master thread will not be accurate until after this in
5276         // __kmp_fork_call(), so we look to the master's memo_stack to get the
5277         // correct value.
5278         for (f = old_nproc; f < team->t.t_nproc; ++f)
5279           team->t.t_threads[f]->th.th_task_state =
5280               team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5281       } else { // set th_task_state for new threads in non-nested hot team
5282         int old_state =
5283             team->t.t_threads[0]->th.th_task_state; // copy master's state
5284         for (f = old_nproc; f < team->t.t_nproc; ++f)
5285           team->t.t_threads[f]->th.th_task_state = old_state;
5286       }
5287 
5288 #ifdef KMP_DEBUG
5289       for (f = 0; f < team->t.t_nproc; ++f) {
5290         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5291                          team->t.t_threads[f]->th.th_team_nproc ==
5292                              team->t.t_nproc);
5293       }
5294 #endif
5295 
5296 #if OMP_40_ENABLED
5297       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5298 #if KMP_AFFINITY_SUPPORTED
5299       __kmp_partition_places(team);
5300 #endif
5301 #endif
5302     } // Check changes in number of threads
5303 
5304 #if OMP_40_ENABLED
5305     kmp_info_t *master = team->t.t_threads[0];
5306     if (master->th.th_teams_microtask) {
5307       for (f = 1; f < new_nproc; ++f) {
5308         // propagate teams construct specific info to workers
5309         kmp_info_t *thr = team->t.t_threads[f];
5310         thr->th.th_teams_microtask = master->th.th_teams_microtask;
5311         thr->th.th_teams_level = master->th.th_teams_level;
5312         thr->th.th_teams_size = master->th.th_teams_size;
5313       }
5314     }
5315 #endif /* OMP_40_ENABLED */
5316 #if KMP_NESTED_HOT_TEAMS
5317     if (level) {
5318       // Sync barrier state for nested hot teams, not needed for outermost hot
5319       // team.
5320       for (f = 1; f < new_nproc; ++f) {
5321         kmp_info_t *thr = team->t.t_threads[f];
5322         int b;
5323         kmp_balign_t *balign = thr->th.th_bar;
5324         for (b = 0; b < bs_last_barrier; ++b) {
5325           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5326           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5327 #if USE_DEBUGGER
5328           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5329 #endif
5330         }
5331       }
5332     }
5333 #endif // KMP_NESTED_HOT_TEAMS
5334 
5335     /* reallocate space for arguments if necessary */
5336     __kmp_alloc_argv_entries(argc, team, TRUE);
5337     KMP_CHECK_UPDATE(team->t.t_argc, argc);
5338     // The hot team re-uses the previous task team,
5339     // if untouched during the previous release->gather phase.
5340 
5341     KF_TRACE(10, (" hot_team = %p\n", team));
5342 
5343 #if KMP_DEBUG
5344     if (__kmp_tasking_mode != tskm_immediate_exec) {
5345       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5346                     "task_team[1] = %p after reinit\n",
5347                     team->t.t_task_team[0], team->t.t_task_team[1]));
5348     }
5349 #endif
5350 
5351 #if OMPT_SUPPORT
5352     __ompt_team_assign_id(team, ompt_parallel_data);
5353 #endif
5354 
5355     KMP_MB();
5356 
5357     return team;
5358   }
5359 
5360   /* next, let's try to take one from the team pool */
5361   KMP_MB();
5362   for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5363     /* TODO: consider resizing undersized teams instead of reaping them, now
5364        that we have a resizing mechanism */
5365     if (team->t.t_max_nproc >= max_nproc) {
5366       /* take this team from the team pool */
5367       __kmp_team_pool = team->t.t_next_pool;
5368 
5369       /* setup the team for fresh use */
5370       __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5371 
5372       KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and "
5373                     "task_team[1] %p to NULL\n",
5374                     &team->t.t_task_team[0], &team->t.t_task_team[1]));
5375       team->t.t_task_team[0] = NULL;
5376       team->t.t_task_team[1] = NULL;
5377 
5378       /* reallocate space for arguments if necessary */
5379       __kmp_alloc_argv_entries(argc, team, TRUE);
5380       KMP_CHECK_UPDATE(team->t.t_argc, argc);
5381 
5382       KA_TRACE(
5383           20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5384                team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5385       { // Initialize barrier data.
5386         int b;
5387         for (b = 0; b < bs_last_barrier; ++b) {
5388           team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5389 #if USE_DEBUGGER
5390           team->t.t_bar[b].b_master_arrived = 0;
5391           team->t.t_bar[b].b_team_arrived = 0;
5392 #endif
5393         }
5394       }
5395 
5396 #if OMP_40_ENABLED
5397       team->t.t_proc_bind = new_proc_bind;
5398 #endif
5399 
5400       KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
5401                     team->t.t_id));
5402 
5403 #if OMPT_SUPPORT
5404       __ompt_team_assign_id(team, ompt_parallel_data);
5405 #endif
5406 
5407       KMP_MB();
5408 
5409       return team;
5410     }
5411 
5412     /* reap team if it is too small, then loop back and check the next one */
5413     // not sure if this is wise, but, will be redone during the hot-teams
5414     // rewrite.
5415     /* TODO: Use technique to find the right size hot-team, don't reap them */
5416     team = __kmp_reap_team(team);
5417     __kmp_team_pool = team;
5418   }
5419 
5420   /* nothing available in the pool, no matter, make a new team! */
5421   KMP_MB();
5422   team = (kmp_team_t *)__kmp_allocate(sizeof(kmp_team_t));
5423 
5424   /* and set it up */
5425   team->t.t_max_nproc = max_nproc;
5426   /* NOTE well, for some reason allocating one big buffer and dividing it up
5427      seems to really hurt performance a lot on the P4, so, let's not use this */
5428   __kmp_allocate_team_arrays(team, max_nproc);
5429 
5430   KA_TRACE(20, ("__kmp_allocate_team: making a new team\n"));
5431   __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5432 
5433   KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and task_team[1] "
5434                 "%p to NULL\n",
5435                 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5436   team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
5437   // memory, no need to duplicate
5438   team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
5439   // memory, no need to duplicate
5440 
5441   if (__kmp_storage_map) {
5442     __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
5443   }
5444 
5445   /* allocate space for arguments */
5446   __kmp_alloc_argv_entries(argc, team, FALSE);
5447   team->t.t_argc = argc;
5448 
5449   KA_TRACE(20,
5450            ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5451             team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5452   { // Initialize barrier data.
5453     int b;
5454     for (b = 0; b < bs_last_barrier; ++b) {
5455       team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5456 #if USE_DEBUGGER
5457       team->t.t_bar[b].b_master_arrived = 0;
5458       team->t.t_bar[b].b_team_arrived = 0;
5459 #endif
5460     }
5461   }
5462 
5463 #if OMP_40_ENABLED
5464   team->t.t_proc_bind = new_proc_bind;
5465 #endif
5466 
5467 #if OMPT_SUPPORT
5468   __ompt_team_assign_id(team, ompt_parallel_data);
5469   team->t.ompt_serialized_team_info = NULL;
5470 #endif
5471 
5472   KMP_MB();
5473 
5474   KA_TRACE(20, ("__kmp_allocate_team: done creating a new team %d.\n",
5475                 team->t.t_id));
5476 
5477   return team;
5478 }
5479 
5480 /* TODO implement hot-teams at all levels */
5481 /* TODO implement lazy thread release on demand (disband request) */
5482 
5483 /* free the team.  return it to the team pool.  release all the threads
5484  * associated with it */
5485 void __kmp_free_team(kmp_root_t *root,
5486                      kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5487   int f;
5488   KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5489                 team->t.t_id));
5490 
5491   /* verify state */
5492   KMP_DEBUG_ASSERT(root);
5493   KMP_DEBUG_ASSERT(team);
5494   KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5495   KMP_DEBUG_ASSERT(team->t.t_threads);
5496 
5497   int use_hot_team = team == root->r.r_hot_team;
5498 #if KMP_NESTED_HOT_TEAMS
5499   int level;
5500   kmp_hot_team_ptr_t *hot_teams;
5501   if (master) {
5502     level = team->t.t_active_level - 1;
5503     if (master->th.th_teams_microtask) { // in teams construct?
5504       if (master->th.th_teams_size.nteams > 1) {
5505         ++level; // level was not increased in teams construct for
5506         // team_of_masters
5507       }
5508       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5509           master->th.th_teams_level == team->t.t_level) {
5510         ++level; // level was not increased in teams construct for
5511         // team_of_workers before the parallel
5512       } // team->t.t_level will be increased inside parallel
5513     }
5514     hot_teams = master->th.th_hot_teams;
5515     if (level < __kmp_hot_teams_max_level) {
5516       KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5517       use_hot_team = 1;
5518     }
5519   }
5520 #endif // KMP_NESTED_HOT_TEAMS
5521 
5522   /* team is done working */
5523   TCW_SYNC_PTR(team->t.t_pkfn,
5524                NULL); // Important for Debugging Support Library.
5525 #if KMP_OS_WINDOWS
5526   team->t.t_copyin_counter = 0; // init counter for possible reuse
5527 #endif
5528   // Do not reset pointer to parent team to NULL for hot teams.
5529 
5530   /* if we are non-hot team, release our threads */
5531   if (!use_hot_team) {
5532     if (__kmp_tasking_mode != tskm_immediate_exec) {
5533       // Wait for threads to reach reapable state
5534       for (f = 1; f < team->t.t_nproc; ++f) {
5535         KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5536         kmp_info_t *th = team->t.t_threads[f];
5537         volatile kmp_uint32 *state = &th->th.th_reap_state;
5538         while (*state != KMP_SAFE_TO_REAP) {
5539 #if KMP_OS_WINDOWS
5540           // On Windows a thread can be killed at any time, check this
5541           DWORD ecode;
5542           if (!__kmp_is_thread_alive(th, &ecode)) {
5543             *state = KMP_SAFE_TO_REAP; // reset the flag for dead thread
5544             break;
5545           }
5546 #endif
5547           // first check if thread is sleeping
5548           kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
5549           if (fl.is_sleeping())
5550             fl.resume(__kmp_gtid_from_thread(th));
5551           KMP_CPU_PAUSE();
5552         }
5553       }
5554 
5555       // Delete task teams
5556       int tt_idx;
5557       for (tt_idx = 0; tt_idx < 2; ++tt_idx) {
5558         kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5559         if (task_team != NULL) {
5560           for (f = 0; f < team->t.t_nproc; ++f) { // threads unref task teams
5561             KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5562             team->t.t_threads[f]->th.th_task_team = NULL;
5563           }
5564           KA_TRACE(
5565               20,
5566               ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5567                __kmp_get_gtid(), task_team, team->t.t_id));
5568 #if KMP_NESTED_HOT_TEAMS
5569           __kmp_free_task_team(master, task_team);
5570 #endif
5571           team->t.t_task_team[tt_idx] = NULL;
5572         }
5573       }
5574     }
5575 
5576     // Reset pointer to parent team only for non-hot teams.
5577     team->t.t_parent = NULL;
5578     team->t.t_level = 0;
5579     team->t.t_active_level = 0;
5580 
5581     /* free the worker threads */
5582     for (f = 1; f < team->t.t_nproc; ++f) {
5583       KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5584       __kmp_free_thread(team->t.t_threads[f]);
5585       team->t.t_threads[f] = NULL;
5586     }
5587 
5588     /* put the team back in the team pool */
5589     /* TODO limit size of team pool, call reap_team if pool too large */
5590     team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5591     __kmp_team_pool = (volatile kmp_team_t *)team;
5592   } else { // Check if team was created for the masters in a teams construct
5593     // See if first worker is a CG root
5594     KMP_DEBUG_ASSERT(team->t.t_threads[1] &&
5595                      team->t.t_threads[1]->th.th_cg_roots);
5596     if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
5597       // Clean up the CG root nodes on workers so that this team can be re-used
5598       for (f = 1; f < team->t.t_nproc; ++f) {
5599         kmp_info_t *thr = team->t.t_threads[f];
5600         KMP_DEBUG_ASSERT(thr && thr->th.th_cg_roots &&
5601                          thr->th.th_cg_roots->cg_root == thr);
5602         // Pop current CG root off list
5603         kmp_cg_root_t *tmp = thr->th.th_cg_roots;
5604         thr->th.th_cg_roots = tmp->up;
5605         KA_TRACE(100, ("__kmp_free_team: Thread %p popping node %p and moving"
5606                        " up to node %p. cg_nthreads was %d\n",
5607                        thr, tmp, thr->th.th_cg_roots, tmp->cg_nthreads));
5608         int i = tmp->cg_nthreads--;
5609         if (i == 1) {
5610           __kmp_free(tmp); // free CG if we are the last thread in it
5611         }
5612         // Restore current task's thread_limit from CG root
5613         if (thr->th.th_cg_roots)
5614           thr->th.th_current_task->td_icvs.thread_limit =
5615               thr->th.th_cg_roots->cg_thread_limit;
5616       }
5617     }
5618   }
5619 
5620   KMP_MB();
5621 }
5622 
5623 /* reap the team.  destroy it, reclaim all its resources and free its memory */
5624 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5625   kmp_team_t *next_pool = team->t.t_next_pool;
5626 
5627   KMP_DEBUG_ASSERT(team);
5628   KMP_DEBUG_ASSERT(team->t.t_dispatch);
5629   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5630   KMP_DEBUG_ASSERT(team->t.t_threads);
5631   KMP_DEBUG_ASSERT(team->t.t_argv);
5632 
5633   /* TODO clean the threads that are a part of this? */
5634 
5635   /* free stuff */
5636   __kmp_free_team_arrays(team);
5637   if (team->t.t_argv != &team->t.t_inline_argv[0])
5638     __kmp_free((void *)team->t.t_argv);
5639   __kmp_free(team);
5640 
5641   KMP_MB();
5642   return next_pool;
5643 }
5644 
5645 // Free the thread.  Don't reap it, just place it on the pool of available
5646 // threads.
5647 //
5648 // Changes for Quad issue 527845: We need a predictable OMP tid <-> gtid
5649 // binding for the affinity mechanism to be useful.
5650 //
5651 // Now, we always keep the free list (__kmp_thread_pool) sorted by gtid.
5652 // However, we want to avoid a potential performance problem by always
5653 // scanning through the list to find the correct point at which to insert
5654 // the thread (potential N**2 behavior).  To do this we keep track of the
5655 // last place a thread struct was inserted (__kmp_thread_pool_insert_pt).
5656 // With single-level parallelism, threads will always be added to the tail
5657 // of the list, kept track of by __kmp_thread_pool_insert_pt.  With nested
5658 // parallelism, all bets are off and we may need to scan through the entire
5659 // free list.
5660 //
5661 // This change also has a potentially large performance benefit, for some
5662 // applications.  Previously, as threads were freed from the hot team, they
5663 // would be placed back on the free list in inverse order.  If the hot team
5664 // grew back to it's original size, then the freed thread would be placed
5665 // back on the hot team in reverse order.  This could cause bad cache
5666 // locality problems on programs where the size of the hot team regularly
5667 // grew and shrunk.
5668 //
5669 // Now, for single-level parallelism, the OMP tid is alway == gtid.
5670 void __kmp_free_thread(kmp_info_t *this_th) {
5671   int gtid;
5672   kmp_info_t **scan;
5673 
5674   KA_TRACE(20, ("__kmp_free_thread: T#%d putting T#%d back on free pool.\n",
5675                 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid));
5676 
5677   KMP_DEBUG_ASSERT(this_th);
5678 
5679   // When moving thread to pool, switch thread to wait on own b_go flag, and
5680   // uninitialized (NULL team).
5681   int b;
5682   kmp_balign_t *balign = this_th->th.th_bar;
5683   for (b = 0; b < bs_last_barrier; ++b) {
5684     if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG)
5685       balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5686     balign[b].bb.team = NULL;
5687     balign[b].bb.leaf_kids = 0;
5688   }
5689   this_th->th.th_task_state = 0;
5690   this_th->th.th_reap_state = KMP_SAFE_TO_REAP;
5691 
5692   /* put thread back on the free pool */
5693   TCW_PTR(this_th->th.th_team, NULL);
5694   TCW_PTR(this_th->th.th_root, NULL);
5695   TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */
5696 
5697   while (this_th->th.th_cg_roots) {
5698     this_th->th.th_cg_roots->cg_nthreads--;
5699     KA_TRACE(100, ("__kmp_free_thread: Thread %p decrement cg_nthreads on node"
5700                    " %p of thread  %p to %d\n",
5701                    this_th, this_th->th.th_cg_roots,
5702                    this_th->th.th_cg_roots->cg_root,
5703                    this_th->th.th_cg_roots->cg_nthreads));
5704     kmp_cg_root_t *tmp = this_th->th.th_cg_roots;
5705     if (tmp->cg_root == this_th) { // Thread is a cg_root
5706       KMP_DEBUG_ASSERT(tmp->cg_nthreads == 0);
5707       KA_TRACE(
5708           5, ("__kmp_free_thread: Thread %p freeing node %p\n", this_th, tmp));
5709       this_th->th.th_cg_roots = tmp->up;
5710       __kmp_free(tmp);
5711     } else { // Worker thread
5712       if (tmp->cg_nthreads == 0) { // last thread leaves contention group
5713         __kmp_free(tmp);
5714       }
5715       this_th->th.th_cg_roots = NULL;
5716       break;
5717     }
5718   }
5719 
5720   /* If the implicit task assigned to this thread can be used by other threads
5721    * -> multiple threads can share the data and try to free the task at
5722    * __kmp_reap_thread at exit. This duplicate use of the task data can happen
5723    * with higher probability when hot team is disabled but can occurs even when
5724    * the hot team is enabled */
5725   __kmp_free_implicit_task(this_th);
5726   this_th->th.th_current_task = NULL;
5727 
5728   // If the __kmp_thread_pool_insert_pt is already past the new insert
5729   // point, then we need to re-scan the entire list.
5730   gtid = this_th->th.th_info.ds.ds_gtid;
5731   if (__kmp_thread_pool_insert_pt != NULL) {
5732     KMP_DEBUG_ASSERT(__kmp_thread_pool != NULL);
5733     if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
5734       __kmp_thread_pool_insert_pt = NULL;
5735     }
5736   }
5737 
5738   // Scan down the list to find the place to insert the thread.
5739   // scan is the address of a link in the list, possibly the address of
5740   // __kmp_thread_pool itself.
5741   //
5742   // In the absence of nested parallism, the for loop will have 0 iterations.
5743   if (__kmp_thread_pool_insert_pt != NULL) {
5744     scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
5745   } else {
5746     scan = CCAST(kmp_info_t **, &__kmp_thread_pool);
5747   }
5748   for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
5749        scan = &((*scan)->th.th_next_pool))
5750     ;
5751 
5752   // Insert the new element on the list, and set __kmp_thread_pool_insert_pt
5753   // to its address.
5754   TCW_PTR(this_th->th.th_next_pool, *scan);
5755   __kmp_thread_pool_insert_pt = *scan = this_th;
5756   KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) ||
5757                    (this_th->th.th_info.ds.ds_gtid <
5758                     this_th->th.th_next_pool->th.th_info.ds.ds_gtid));
5759   TCW_4(this_th->th.th_in_pool, TRUE);
5760   __kmp_suspend_initialize_thread(this_th);
5761   __kmp_lock_suspend_mx(this_th);
5762   if (this_th->th.th_active == TRUE) {
5763     KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
5764     this_th->th.th_active_in_pool = TRUE;
5765   }
5766 #if KMP_DEBUG
5767   else {
5768     KMP_DEBUG_ASSERT(this_th->th.th_active_in_pool == FALSE);
5769   }
5770 #endif
5771   __kmp_unlock_suspend_mx(this_th);
5772 
5773   TCW_4(__kmp_nth, __kmp_nth - 1);
5774 
5775 #ifdef KMP_ADJUST_BLOCKTIME
5776   /* Adjust blocktime back to user setting or default if necessary */
5777   /* Middle initialization might never have occurred                */
5778   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5779     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5780     if (__kmp_nth <= __kmp_avail_proc) {
5781       __kmp_zero_bt = FALSE;
5782     }
5783   }
5784 #endif /* KMP_ADJUST_BLOCKTIME */
5785 
5786   KMP_MB();
5787 }
5788 
5789 /* ------------------------------------------------------------------------ */
5790 
5791 void *__kmp_launch_thread(kmp_info_t *this_thr) {
5792   int gtid = this_thr->th.th_info.ds.ds_gtid;
5793   /*    void                 *stack_data;*/
5794   kmp_team_t *(*volatile pteam);
5795 
5796   KMP_MB();
5797   KA_TRACE(10, ("__kmp_launch_thread: T#%d start\n", gtid));
5798 
5799   if (__kmp_env_consistency_check) {
5800     this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak?
5801   }
5802 
5803 #if OMPT_SUPPORT
5804   ompt_data_t *thread_data;
5805   if (ompt_enabled.enabled) {
5806     thread_data = &(this_thr->th.ompt_thread_info.thread_data);
5807     *thread_data = ompt_data_none;
5808 
5809     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5810     this_thr->th.ompt_thread_info.wait_id = 0;
5811     this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0);
5812     if (ompt_enabled.ompt_callback_thread_begin) {
5813       ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
5814           ompt_thread_worker, thread_data);
5815     }
5816   }
5817 #endif
5818 
5819 #if OMPT_SUPPORT
5820   if (ompt_enabled.enabled) {
5821     this_thr->th.ompt_thread_info.state = ompt_state_idle;
5822   }
5823 #endif
5824   /* This is the place where threads wait for work */
5825   while (!TCR_4(__kmp_global.g.g_done)) {
5826     KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]);
5827     KMP_MB();
5828 
5829     /* wait for work to do */
5830     KA_TRACE(20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid));
5831 
5832     /* No tid yet since not part of a team */
5833     __kmp_fork_barrier(gtid, KMP_GTID_DNE);
5834 
5835 #if OMPT_SUPPORT
5836     if (ompt_enabled.enabled) {
5837       this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5838     }
5839 #endif
5840 
5841     pteam = (kmp_team_t * (*))(&this_thr->th.th_team);
5842 
5843     /* have we been allocated? */
5844     if (TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done)) {
5845       /* we were just woken up, so run our new task */
5846       if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
5847         int rc;
5848         KA_TRACE(20,
5849                  ("__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n",
5850                   gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5851                   (*pteam)->t.t_pkfn));
5852 
5853         updateHWFPControl(*pteam);
5854 
5855 #if OMPT_SUPPORT
5856         if (ompt_enabled.enabled) {
5857           this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
5858         }
5859 #endif
5860 
5861         rc = (*pteam)->t.t_invoke(gtid);
5862         KMP_ASSERT(rc);
5863 
5864         KMP_MB();
5865         KA_TRACE(20, ("__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n",
5866                       gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5867                       (*pteam)->t.t_pkfn));
5868       }
5869 #if OMPT_SUPPORT
5870       if (ompt_enabled.enabled) {
5871         /* no frame set while outside task */
5872         __ompt_get_task_info_object(0)->frame.exit_frame = ompt_data_none;
5873 
5874         this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5875       }
5876 #endif
5877       /* join barrier after parallel region */
5878       __kmp_join_barrier(gtid);
5879     }
5880   }
5881   TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
5882 
5883 #if OMPT_SUPPORT
5884   if (ompt_enabled.ompt_callback_thread_end) {
5885     ompt_callbacks.ompt_callback(ompt_callback_thread_end)(thread_data);
5886   }
5887 #endif
5888 
5889   this_thr->th.th_task_team = NULL;
5890   /* run the destructors for the threadprivate data for this thread */
5891   __kmp_common_destroy_gtid(gtid);
5892 
5893   KA_TRACE(10, ("__kmp_launch_thread: T#%d done\n", gtid));
5894   KMP_MB();
5895   return this_thr;
5896 }
5897 
5898 /* ------------------------------------------------------------------------ */
5899 
5900 void __kmp_internal_end_dest(void *specific_gtid) {
5901 #if KMP_COMPILER_ICC
5902 #pragma warning(push)
5903 #pragma warning(disable : 810) // conversion from "void *" to "int" may lose
5904 // significant bits
5905 #endif
5906   // Make sure no significant bits are lost
5907   int gtid = (kmp_intptr_t)specific_gtid - 1;
5908 #if KMP_COMPILER_ICC
5909 #pragma warning(pop)
5910 #endif
5911 
5912   KA_TRACE(30, ("__kmp_internal_end_dest: T#%d\n", gtid));
5913   /* NOTE: the gtid is stored as gitd+1 in the thread-local-storage
5914    * this is because 0 is reserved for the nothing-stored case */
5915 
5916   /* josh: One reason for setting the gtid specific data even when it is being
5917      destroyed by pthread is to allow gtid lookup through thread specific data
5918      (__kmp_gtid_get_specific).  Some of the code, especially stat code,
5919      that gets executed in the call to __kmp_internal_end_thread, actually
5920      gets the gtid through the thread specific data.  Setting it here seems
5921      rather inelegant and perhaps wrong, but allows __kmp_internal_end_thread
5922      to run smoothly.
5923      todo: get rid of this after we remove the dependence on
5924      __kmp_gtid_get_specific  */
5925   if (gtid >= 0 && KMP_UBER_GTID(gtid))
5926     __kmp_gtid_set_specific(gtid);
5927 #ifdef KMP_TDATA_GTID
5928   __kmp_gtid = gtid;
5929 #endif
5930   __kmp_internal_end_thread(gtid);
5931 }
5932 
5933 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
5934 
5935 // 2009-09-08 (lev): It looks the destructor does not work. In simple test cases
5936 // destructors work perfectly, but in real libomp.so I have no evidence it is
5937 // ever called. However, -fini linker option in makefile.mk works fine.
5938 
5939 __attribute__((destructor)) void __kmp_internal_end_dtor(void) {
5940   __kmp_internal_end_atexit();
5941 }
5942 
5943 void __kmp_internal_end_fini(void) { __kmp_internal_end_atexit(); }
5944 
5945 #endif
5946 
5947 /* [Windows] josh: when the atexit handler is called, there may still be more
5948    than one thread alive */
5949 void __kmp_internal_end_atexit(void) {
5950   KA_TRACE(30, ("__kmp_internal_end_atexit\n"));
5951   /* [Windows]
5952      josh: ideally, we want to completely shutdown the library in this atexit
5953      handler, but stat code that depends on thread specific data for gtid fails
5954      because that data becomes unavailable at some point during the shutdown, so
5955      we call __kmp_internal_end_thread instead. We should eventually remove the
5956      dependency on __kmp_get_specific_gtid in the stat code and use
5957      __kmp_internal_end_library to cleanly shutdown the library.
5958 
5959      // TODO: Can some of this comment about GVS be removed?
5960      I suspect that the offending stat code is executed when the calling thread
5961      tries to clean up a dead root thread's data structures, resulting in GVS
5962      code trying to close the GVS structures for that thread, but since the stat
5963      code uses __kmp_get_specific_gtid to get the gtid with the assumption that
5964      the calling thread is cleaning up itself instead of another thread, it get
5965      confused. This happens because allowing a thread to unregister and cleanup
5966      another thread is a recent modification for addressing an issue.
5967      Based on the current design (20050722), a thread may end up
5968      trying to unregister another thread only if thread death does not trigger
5969      the calling of __kmp_internal_end_thread.  For Linux* OS, there is the
5970      thread specific data destructor function to detect thread death. For
5971      Windows dynamic, there is DllMain(THREAD_DETACH). For Windows static, there
5972      is nothing.  Thus, the workaround is applicable only for Windows static
5973      stat library. */
5974   __kmp_internal_end_library(-1);
5975 #if KMP_OS_WINDOWS
5976   __kmp_close_console();
5977 #endif
5978 }
5979 
5980 static void __kmp_reap_thread(kmp_info_t *thread, int is_root) {
5981   // It is assumed __kmp_forkjoin_lock is acquired.
5982 
5983   int gtid;
5984 
5985   KMP_DEBUG_ASSERT(thread != NULL);
5986 
5987   gtid = thread->th.th_info.ds.ds_gtid;
5988 
5989   if (!is_root) {
5990     if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
5991       /* Assume the threads are at the fork barrier here */
5992       KA_TRACE(
5993           20, ("__kmp_reap_thread: releasing T#%d from fork barrier for reap\n",
5994                gtid));
5995       /* Need release fence here to prevent seg faults for tree forkjoin barrier
5996        * (GEH) */
5997       ANNOTATE_HAPPENS_BEFORE(thread);
5998       kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
5999       __kmp_release_64(&flag);
6000     }
6001 
6002     // Terminate OS thread.
6003     __kmp_reap_worker(thread);
6004 
6005     // The thread was killed asynchronously.  If it was actively
6006     // spinning in the thread pool, decrement the global count.
6007     //
6008     // There is a small timing hole here - if the worker thread was just waking
6009     // up after sleeping in the pool, had reset it's th_active_in_pool flag but
6010     // not decremented the global counter __kmp_thread_pool_active_nth yet, then
6011     // the global counter might not get updated.
6012     //
6013     // Currently, this can only happen as the library is unloaded,
6014     // so there are no harmful side effects.
6015     if (thread->th.th_active_in_pool) {
6016       thread->th.th_active_in_pool = FALSE;
6017       KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
6018       KMP_DEBUG_ASSERT(__kmp_thread_pool_active_nth >= 0);
6019     }
6020   }
6021 
6022   __kmp_free_implicit_task(thread);
6023 
6024 // Free the fast memory for tasking
6025 #if USE_FAST_MEMORY
6026   __kmp_free_fast_memory(thread);
6027 #endif /* USE_FAST_MEMORY */
6028 
6029   __kmp_suspend_uninitialize_thread(thread);
6030 
6031   KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread);
6032   TCW_SYNC_PTR(__kmp_threads[gtid], NULL);
6033 
6034   --__kmp_all_nth;
6035 // __kmp_nth was decremented when thread is added to the pool.
6036 
6037 #ifdef KMP_ADJUST_BLOCKTIME
6038   /* Adjust blocktime back to user setting or default if necessary */
6039   /* Middle initialization might never have occurred                */
6040   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
6041     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
6042     if (__kmp_nth <= __kmp_avail_proc) {
6043       __kmp_zero_bt = FALSE;
6044     }
6045   }
6046 #endif /* KMP_ADJUST_BLOCKTIME */
6047 
6048   /* free the memory being used */
6049   if (__kmp_env_consistency_check) {
6050     if (thread->th.th_cons) {
6051       __kmp_free_cons_stack(thread->th.th_cons);
6052       thread->th.th_cons = NULL;
6053     }
6054   }
6055 
6056   if (thread->th.th_pri_common != NULL) {
6057     __kmp_free(thread->th.th_pri_common);
6058     thread->th.th_pri_common = NULL;
6059   }
6060 
6061   if (thread->th.th_task_state_memo_stack != NULL) {
6062     __kmp_free(thread->th.th_task_state_memo_stack);
6063     thread->th.th_task_state_memo_stack = NULL;
6064   }
6065 
6066 #if KMP_USE_BGET
6067   if (thread->th.th_local.bget_data != NULL) {
6068     __kmp_finalize_bget(thread);
6069   }
6070 #endif
6071 
6072 #if KMP_AFFINITY_SUPPORTED
6073   if (thread->th.th_affin_mask != NULL) {
6074     KMP_CPU_FREE(thread->th.th_affin_mask);
6075     thread->th.th_affin_mask = NULL;
6076   }
6077 #endif /* KMP_AFFINITY_SUPPORTED */
6078 
6079 #if KMP_USE_HIER_SCHED
6080   if (thread->th.th_hier_bar_data != NULL) {
6081     __kmp_free(thread->th.th_hier_bar_data);
6082     thread->th.th_hier_bar_data = NULL;
6083   }
6084 #endif
6085 
6086   __kmp_reap_team(thread->th.th_serial_team);
6087   thread->th.th_serial_team = NULL;
6088   __kmp_free(thread);
6089 
6090   KMP_MB();
6091 
6092 } // __kmp_reap_thread
6093 
6094 static void __kmp_internal_end(void) {
6095   int i;
6096 
6097   /* First, unregister the library */
6098   __kmp_unregister_library();
6099 
6100 #if KMP_OS_WINDOWS
6101   /* In Win static library, we can't tell when a root actually dies, so we
6102      reclaim the data structures for any root threads that have died but not
6103      unregistered themselves, in order to shut down cleanly.
6104      In Win dynamic library we also can't tell when a thread dies.  */
6105   __kmp_reclaim_dead_roots(); // AC: moved here to always clean resources of
6106 // dead roots
6107 #endif
6108 
6109   for (i = 0; i < __kmp_threads_capacity; i++)
6110     if (__kmp_root[i])
6111       if (__kmp_root[i]->r.r_active)
6112         break;
6113   KMP_MB(); /* Flush all pending memory write invalidates.  */
6114   TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6115 
6116   if (i < __kmp_threads_capacity) {
6117 #if KMP_USE_MONITOR
6118     // 2009-09-08 (lev): Other alive roots found. Why do we kill the monitor??
6119     KMP_MB(); /* Flush all pending memory write invalidates.  */
6120 
6121     // Need to check that monitor was initialized before reaping it. If we are
6122     // called form __kmp_atfork_child (which sets __kmp_init_parallel = 0), then
6123     // __kmp_monitor will appear to contain valid data, but it is only valid in
6124     // the parent process, not the child.
6125     // New behavior (201008): instead of keying off of the flag
6126     // __kmp_init_parallel, the monitor thread creation is keyed off
6127     // of the new flag __kmp_init_monitor.
6128     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6129     if (TCR_4(__kmp_init_monitor)) {
6130       __kmp_reap_monitor(&__kmp_monitor);
6131       TCW_4(__kmp_init_monitor, 0);
6132     }
6133     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6134     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
6135 #endif // KMP_USE_MONITOR
6136   } else {
6137 /* TODO move this to cleanup code */
6138 #ifdef KMP_DEBUG
6139     /* make sure that everything has properly ended */
6140     for (i = 0; i < __kmp_threads_capacity; i++) {
6141       if (__kmp_root[i]) {
6142         //                    KMP_ASSERT( ! KMP_UBER_GTID( i ) );         // AC:
6143         //                    there can be uber threads alive here
6144         KMP_ASSERT(!__kmp_root[i]->r.r_active); // TODO: can they be active?
6145       }
6146     }
6147 #endif
6148 
6149     KMP_MB();
6150 
6151     // Reap the worker threads.
6152     // This is valid for now, but be careful if threads are reaped sooner.
6153     while (__kmp_thread_pool != NULL) { // Loop thru all the thread in the pool.
6154       // Get the next thread from the pool.
6155       kmp_info_t *thread = CCAST(kmp_info_t *, __kmp_thread_pool);
6156       __kmp_thread_pool = thread->th.th_next_pool;
6157       // Reap it.
6158       KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP);
6159       thread->th.th_next_pool = NULL;
6160       thread->th.th_in_pool = FALSE;
6161       __kmp_reap_thread(thread, 0);
6162     }
6163     __kmp_thread_pool_insert_pt = NULL;
6164 
6165     // Reap teams.
6166     while (__kmp_team_pool != NULL) { // Loop thru all the teams in the pool.
6167       // Get the next team from the pool.
6168       kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
6169       __kmp_team_pool = team->t.t_next_pool;
6170       // Reap it.
6171       team->t.t_next_pool = NULL;
6172       __kmp_reap_team(team);
6173     }
6174 
6175     __kmp_reap_task_teams();
6176 
6177 #if KMP_OS_UNIX
6178     // Threads that are not reaped should not access any resources since they
6179     // are going to be deallocated soon, so the shutdown sequence should wait
6180     // until all threads either exit the final spin-waiting loop or begin
6181     // sleeping after the given blocktime.
6182     for (i = 0; i < __kmp_threads_capacity; i++) {
6183       kmp_info_t *thr = __kmp_threads[i];
6184       while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking))
6185         KMP_CPU_PAUSE();
6186     }
6187 #endif
6188 
6189     for (i = 0; i < __kmp_threads_capacity; ++i) {
6190       // TBD: Add some checking...
6191       // Something like KMP_DEBUG_ASSERT( __kmp_thread[ i ] == NULL );
6192     }
6193 
6194     /* Make sure all threadprivate destructors get run by joining with all
6195        worker threads before resetting this flag */
6196     TCW_SYNC_4(__kmp_init_common, FALSE);
6197 
6198     KA_TRACE(10, ("__kmp_internal_end: all workers reaped\n"));
6199     KMP_MB();
6200 
6201 #if KMP_USE_MONITOR
6202     // See note above: One of the possible fixes for CQ138434 / CQ140126
6203     //
6204     // FIXME: push both code fragments down and CSE them?
6205     // push them into __kmp_cleanup() ?
6206     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6207     if (TCR_4(__kmp_init_monitor)) {
6208       __kmp_reap_monitor(&__kmp_monitor);
6209       TCW_4(__kmp_init_monitor, 0);
6210     }
6211     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6212     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
6213 #endif
6214   } /* else !__kmp_global.t_active */
6215   TCW_4(__kmp_init_gtid, FALSE);
6216   KMP_MB(); /* Flush all pending memory write invalidates.  */
6217 
6218   __kmp_cleanup();
6219 #if OMPT_SUPPORT
6220   ompt_fini();
6221 #endif
6222 }
6223 
6224 void __kmp_internal_end_library(int gtid_req) {
6225   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6226   /* this shouldn't be a race condition because __kmp_internal_end() is the
6227      only place to clear __kmp_serial_init */
6228   /* we'll check this later too, after we get the lock */
6229   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6230   // redundaant, because the next check will work in any case.
6231   if (__kmp_global.g.g_abort) {
6232     KA_TRACE(11, ("__kmp_internal_end_library: abort, exiting\n"));
6233     /* TODO abort? */
6234     return;
6235   }
6236   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6237     KA_TRACE(10, ("__kmp_internal_end_library: already finished\n"));
6238     return;
6239   }
6240 
6241   KMP_MB(); /* Flush all pending memory write invalidates.  */
6242 
6243   /* find out who we are and what we should do */
6244   {
6245     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6246     KA_TRACE(
6247         10, ("__kmp_internal_end_library: enter T#%d  (%d)\n", gtid, gtid_req));
6248     if (gtid == KMP_GTID_SHUTDOWN) {
6249       KA_TRACE(10, ("__kmp_internal_end_library: !__kmp_init_runtime, system "
6250                     "already shutdown\n"));
6251       return;
6252     } else if (gtid == KMP_GTID_MONITOR) {
6253       KA_TRACE(10, ("__kmp_internal_end_library: monitor thread, gtid not "
6254                     "registered, or system shutdown\n"));
6255       return;
6256     } else if (gtid == KMP_GTID_DNE) {
6257       KA_TRACE(10, ("__kmp_internal_end_library: gtid not registered or system "
6258                     "shutdown\n"));
6259       /* we don't know who we are, but we may still shutdown the library */
6260     } else if (KMP_UBER_GTID(gtid)) {
6261       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6262       if (__kmp_root[gtid]->r.r_active) {
6263         __kmp_global.g.g_abort = -1;
6264         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6265         KA_TRACE(10,
6266                  ("__kmp_internal_end_library: root still active, abort T#%d\n",
6267                   gtid));
6268         return;
6269       } else {
6270         KA_TRACE(
6271             10,
6272             ("__kmp_internal_end_library: unregistering sibling T#%d\n", gtid));
6273         __kmp_unregister_root_current_thread(gtid);
6274       }
6275     } else {
6276 /* worker threads may call this function through the atexit handler, if they
6277  * call exit() */
6278 /* For now, skip the usual subsequent processing and just dump the debug buffer.
6279    TODO: do a thorough shutdown instead */
6280 #ifdef DUMP_DEBUG_ON_EXIT
6281       if (__kmp_debug_buf)
6282         __kmp_dump_debug_buffer();
6283 #endif
6284       return;
6285     }
6286   }
6287   /* synchronize the termination process */
6288   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6289 
6290   /* have we already finished */
6291   if (__kmp_global.g.g_abort) {
6292     KA_TRACE(10, ("__kmp_internal_end_library: abort, exiting\n"));
6293     /* TODO abort? */
6294     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6295     return;
6296   }
6297   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6298     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6299     return;
6300   }
6301 
6302   /* We need this lock to enforce mutex between this reading of
6303      __kmp_threads_capacity and the writing by __kmp_register_root.
6304      Alternatively, we can use a counter of roots that is atomically updated by
6305      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6306      __kmp_internal_end_*.  */
6307   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6308 
6309   /* now we can safely conduct the actual termination */
6310   __kmp_internal_end();
6311 
6312   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6313   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6314 
6315   KA_TRACE(10, ("__kmp_internal_end_library: exit\n"));
6316 
6317 #ifdef DUMP_DEBUG_ON_EXIT
6318   if (__kmp_debug_buf)
6319     __kmp_dump_debug_buffer();
6320 #endif
6321 
6322 #if KMP_OS_WINDOWS
6323   __kmp_close_console();
6324 #endif
6325 
6326   __kmp_fini_allocator();
6327 
6328 } // __kmp_internal_end_library
6329 
6330 void __kmp_internal_end_thread(int gtid_req) {
6331   int i;
6332 
6333   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6334   /* this shouldn't be a race condition because __kmp_internal_end() is the
6335    * only place to clear __kmp_serial_init */
6336   /* we'll check this later too, after we get the lock */
6337   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6338   // redundant, because the next check will work in any case.
6339   if (__kmp_global.g.g_abort) {
6340     KA_TRACE(11, ("__kmp_internal_end_thread: abort, exiting\n"));
6341     /* TODO abort? */
6342     return;
6343   }
6344   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6345     KA_TRACE(10, ("__kmp_internal_end_thread: already finished\n"));
6346     return;
6347   }
6348 
6349   KMP_MB(); /* Flush all pending memory write invalidates.  */
6350 
6351   /* find out who we are and what we should do */
6352   {
6353     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6354     KA_TRACE(10,
6355              ("__kmp_internal_end_thread: enter T#%d  (%d)\n", gtid, gtid_req));
6356     if (gtid == KMP_GTID_SHUTDOWN) {
6357       KA_TRACE(10, ("__kmp_internal_end_thread: !__kmp_init_runtime, system "
6358                     "already shutdown\n"));
6359       return;
6360     } else if (gtid == KMP_GTID_MONITOR) {
6361       KA_TRACE(10, ("__kmp_internal_end_thread: monitor thread, gtid not "
6362                     "registered, or system shutdown\n"));
6363       return;
6364     } else if (gtid == KMP_GTID_DNE) {
6365       KA_TRACE(10, ("__kmp_internal_end_thread: gtid not registered or system "
6366                     "shutdown\n"));
6367       return;
6368       /* we don't know who we are */
6369     } else if (KMP_UBER_GTID(gtid)) {
6370       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6371       if (__kmp_root[gtid]->r.r_active) {
6372         __kmp_global.g.g_abort = -1;
6373         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6374         KA_TRACE(10,
6375                  ("__kmp_internal_end_thread: root still active, abort T#%d\n",
6376                   gtid));
6377         return;
6378       } else {
6379         KA_TRACE(10, ("__kmp_internal_end_thread: unregistering sibling T#%d\n",
6380                       gtid));
6381         __kmp_unregister_root_current_thread(gtid);
6382       }
6383     } else {
6384       /* just a worker thread, let's leave */
6385       KA_TRACE(10, ("__kmp_internal_end_thread: worker thread T#%d\n", gtid));
6386 
6387       if (gtid >= 0) {
6388         __kmp_threads[gtid]->th.th_task_team = NULL;
6389       }
6390 
6391       KA_TRACE(10,
6392                ("__kmp_internal_end_thread: worker thread done, exiting T#%d\n",
6393                 gtid));
6394       return;
6395     }
6396   }
6397 #if KMP_DYNAMIC_LIB
6398 #if OMP_50_ENABLED
6399   if (__kmp_pause_status != kmp_hard_paused)
6400 #endif
6401   // AC: lets not shutdown the dynamic library at the exit of uber thread,
6402   // because we will better shutdown later in the library destructor.
6403   {
6404     KA_TRACE(10, ("__kmp_internal_end_thread: exiting T#%d\n", gtid_req));
6405     return;
6406   }
6407 #endif
6408   /* synchronize the termination process */
6409   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6410 
6411   /* have we already finished */
6412   if (__kmp_global.g.g_abort) {
6413     KA_TRACE(10, ("__kmp_internal_end_thread: abort, exiting\n"));
6414     /* TODO abort? */
6415     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6416     return;
6417   }
6418   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6419     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6420     return;
6421   }
6422 
6423   /* We need this lock to enforce mutex between this reading of
6424      __kmp_threads_capacity and the writing by __kmp_register_root.
6425      Alternatively, we can use a counter of roots that is atomically updated by
6426      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6427      __kmp_internal_end_*.  */
6428 
6429   /* should we finish the run-time?  are all siblings done? */
6430   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6431 
6432   for (i = 0; i < __kmp_threads_capacity; ++i) {
6433     if (KMP_UBER_GTID(i)) {
6434       KA_TRACE(
6435           10,
6436           ("__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i));
6437       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6438       __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6439       return;
6440     }
6441   }
6442 
6443   /* now we can safely conduct the actual termination */
6444 
6445   __kmp_internal_end();
6446 
6447   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6448   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6449 
6450   KA_TRACE(10, ("__kmp_internal_end_thread: exit T#%d\n", gtid_req));
6451 
6452 #ifdef DUMP_DEBUG_ON_EXIT
6453   if (__kmp_debug_buf)
6454     __kmp_dump_debug_buffer();
6455 #endif
6456 } // __kmp_internal_end_thread
6457 
6458 // -----------------------------------------------------------------------------
6459 // Library registration stuff.
6460 
6461 static long __kmp_registration_flag = 0;
6462 // Random value used to indicate library initialization.
6463 static char *__kmp_registration_str = NULL;
6464 // Value to be saved in env var __KMP_REGISTERED_LIB_<pid>.
6465 
6466 static inline char *__kmp_reg_status_name() {
6467   /* On RHEL 3u5 if linked statically, getpid() returns different values in
6468      each thread. If registration and unregistration go in different threads
6469      (omp_misc_other_root_exit.cpp test case), the name of registered_lib_env
6470      env var can not be found, because the name will contain different pid. */
6471   return __kmp_str_format("__KMP_REGISTERED_LIB_%d", (int)getpid());
6472 } // __kmp_reg_status_get
6473 
6474 void __kmp_register_library_startup(void) {
6475 
6476   char *name = __kmp_reg_status_name(); // Name of the environment variable.
6477   int done = 0;
6478   union {
6479     double dtime;
6480     long ltime;
6481   } time;
6482 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6483   __kmp_initialize_system_tick();
6484 #endif
6485   __kmp_read_system_time(&time.dtime);
6486   __kmp_registration_flag = 0xCAFE0000L | (time.ltime & 0x0000FFFFL);
6487   __kmp_registration_str =
6488       __kmp_str_format("%p-%lx-%s", &__kmp_registration_flag,
6489                        __kmp_registration_flag, KMP_LIBRARY_FILE);
6490 
6491   KA_TRACE(50, ("__kmp_register_library_startup: %s=\"%s\"\n", name,
6492                 __kmp_registration_str));
6493 
6494   while (!done) {
6495 
6496     char *value = NULL; // Actual value of the environment variable.
6497 
6498     // Set environment variable, but do not overwrite if it is exist.
6499     __kmp_env_set(name, __kmp_registration_str, 0);
6500     // Check the variable is written.
6501     value = __kmp_env_get(name);
6502     if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6503 
6504       done = 1; // Ok, environment variable set successfully, exit the loop.
6505 
6506     } else {
6507 
6508       // Oops. Write failed. Another copy of OpenMP RTL is in memory.
6509       // Check whether it alive or dead.
6510       int neighbor = 0; // 0 -- unknown status, 1 -- alive, 2 -- dead.
6511       char *tail = value;
6512       char *flag_addr_str = NULL;
6513       char *flag_val_str = NULL;
6514       char const *file_name = NULL;
6515       __kmp_str_split(tail, '-', &flag_addr_str, &tail);
6516       __kmp_str_split(tail, '-', &flag_val_str, &tail);
6517       file_name = tail;
6518       if (tail != NULL) {
6519         long *flag_addr = 0;
6520         long flag_val = 0;
6521         KMP_SSCANF(flag_addr_str, "%p", RCAST(void**, &flag_addr));
6522         KMP_SSCANF(flag_val_str, "%lx", &flag_val);
6523         if (flag_addr != 0 && flag_val != 0 && strcmp(file_name, "") != 0) {
6524           // First, check whether environment-encoded address is mapped into
6525           // addr space.
6526           // If so, dereference it to see if it still has the right value.
6527           if (__kmp_is_address_mapped(flag_addr) && *flag_addr == flag_val) {
6528             neighbor = 1;
6529           } else {
6530             // If not, then we know the other copy of the library is no longer
6531             // running.
6532             neighbor = 2;
6533           }
6534         }
6535       }
6536       switch (neighbor) {
6537       case 0: // Cannot parse environment variable -- neighbor status unknown.
6538         // Assume it is the incompatible format of future version of the
6539         // library. Assume the other library is alive.
6540         // WARN( ... ); // TODO: Issue a warning.
6541         file_name = "unknown library";
6542         KMP_FALLTHROUGH();
6543       // Attention! Falling to the next case. That's intentional.
6544       case 1: { // Neighbor is alive.
6545         // Check it is allowed.
6546         char *duplicate_ok = __kmp_env_get("KMP_DUPLICATE_LIB_OK");
6547         if (!__kmp_str_match_true(duplicate_ok)) {
6548           // That's not allowed. Issue fatal error.
6549           __kmp_fatal(KMP_MSG(DuplicateLibrary, KMP_LIBRARY_FILE, file_name),
6550                       KMP_HNT(DuplicateLibrary), __kmp_msg_null);
6551         }
6552         KMP_INTERNAL_FREE(duplicate_ok);
6553         __kmp_duplicate_library_ok = 1;
6554         done = 1; // Exit the loop.
6555       } break;
6556       case 2: { // Neighbor is dead.
6557         // Clear the variable and try to register library again.
6558         __kmp_env_unset(name);
6559       } break;
6560       default: { KMP_DEBUG_ASSERT(0); } break;
6561       }
6562     }
6563     KMP_INTERNAL_FREE((void *)value);
6564   }
6565   KMP_INTERNAL_FREE((void *)name);
6566 
6567 } // func __kmp_register_library_startup
6568 
6569 void __kmp_unregister_library(void) {
6570 
6571   char *name = __kmp_reg_status_name();
6572   char *value = __kmp_env_get(name);
6573 
6574   KMP_DEBUG_ASSERT(__kmp_registration_flag != 0);
6575   KMP_DEBUG_ASSERT(__kmp_registration_str != NULL);
6576   if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6577     // Ok, this is our variable. Delete it.
6578     __kmp_env_unset(name);
6579   }
6580 
6581   KMP_INTERNAL_FREE(__kmp_registration_str);
6582   KMP_INTERNAL_FREE(value);
6583   KMP_INTERNAL_FREE(name);
6584 
6585   __kmp_registration_flag = 0;
6586   __kmp_registration_str = NULL;
6587 
6588 } // __kmp_unregister_library
6589 
6590 // End of Library registration stuff.
6591 // -----------------------------------------------------------------------------
6592 
6593 #if KMP_MIC_SUPPORTED
6594 
6595 static void __kmp_check_mic_type() {
6596   kmp_cpuid_t cpuid_state = {0};
6597   kmp_cpuid_t *cs_p = &cpuid_state;
6598   __kmp_x86_cpuid(1, 0, cs_p);
6599   // We don't support mic1 at the moment
6600   if ((cs_p->eax & 0xff0) == 0xB10) {
6601     __kmp_mic_type = mic2;
6602   } else if ((cs_p->eax & 0xf0ff0) == 0x50670) {
6603     __kmp_mic_type = mic3;
6604   } else {
6605     __kmp_mic_type = non_mic;
6606   }
6607 }
6608 
6609 #endif /* KMP_MIC_SUPPORTED */
6610 
6611 static void __kmp_do_serial_initialize(void) {
6612   int i, gtid;
6613   int size;
6614 
6615   KA_TRACE(10, ("__kmp_do_serial_initialize: enter\n"));
6616 
6617   KMP_DEBUG_ASSERT(sizeof(kmp_int32) == 4);
6618   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) == 4);
6619   KMP_DEBUG_ASSERT(sizeof(kmp_int64) == 8);
6620   KMP_DEBUG_ASSERT(sizeof(kmp_uint64) == 8);
6621   KMP_DEBUG_ASSERT(sizeof(kmp_intptr_t) == sizeof(void *));
6622 
6623 #if OMPT_SUPPORT
6624   ompt_pre_init();
6625 #endif
6626 
6627   __kmp_validate_locks();
6628 
6629   /* Initialize internal memory allocator */
6630   __kmp_init_allocator();
6631 
6632   /* Register the library startup via an environment variable and check to see
6633      whether another copy of the library is already registered. */
6634 
6635   __kmp_register_library_startup();
6636 
6637   /* TODO reinitialization of library */
6638   if (TCR_4(__kmp_global.g.g_done)) {
6639     KA_TRACE(10, ("__kmp_do_serial_initialize: reinitialization of library\n"));
6640   }
6641 
6642   __kmp_global.g.g_abort = 0;
6643   TCW_SYNC_4(__kmp_global.g.g_done, FALSE);
6644 
6645 /* initialize the locks */
6646 #if KMP_USE_ADAPTIVE_LOCKS
6647 #if KMP_DEBUG_ADAPTIVE_LOCKS
6648   __kmp_init_speculative_stats();
6649 #endif
6650 #endif
6651 #if KMP_STATS_ENABLED
6652   __kmp_stats_init();
6653 #endif
6654   __kmp_init_lock(&__kmp_global_lock);
6655   __kmp_init_queuing_lock(&__kmp_dispatch_lock);
6656   __kmp_init_lock(&__kmp_debug_lock);
6657   __kmp_init_atomic_lock(&__kmp_atomic_lock);
6658   __kmp_init_atomic_lock(&__kmp_atomic_lock_1i);
6659   __kmp_init_atomic_lock(&__kmp_atomic_lock_2i);
6660   __kmp_init_atomic_lock(&__kmp_atomic_lock_4i);
6661   __kmp_init_atomic_lock(&__kmp_atomic_lock_4r);
6662   __kmp_init_atomic_lock(&__kmp_atomic_lock_8i);
6663   __kmp_init_atomic_lock(&__kmp_atomic_lock_8r);
6664   __kmp_init_atomic_lock(&__kmp_atomic_lock_8c);
6665   __kmp_init_atomic_lock(&__kmp_atomic_lock_10r);
6666   __kmp_init_atomic_lock(&__kmp_atomic_lock_16r);
6667   __kmp_init_atomic_lock(&__kmp_atomic_lock_16c);
6668   __kmp_init_atomic_lock(&__kmp_atomic_lock_20c);
6669   __kmp_init_atomic_lock(&__kmp_atomic_lock_32c);
6670   __kmp_init_bootstrap_lock(&__kmp_forkjoin_lock);
6671   __kmp_init_bootstrap_lock(&__kmp_exit_lock);
6672 #if KMP_USE_MONITOR
6673   __kmp_init_bootstrap_lock(&__kmp_monitor_lock);
6674 #endif
6675   __kmp_init_bootstrap_lock(&__kmp_tp_cached_lock);
6676 
6677   /* conduct initialization and initial setup of configuration */
6678 
6679   __kmp_runtime_initialize();
6680 
6681 #if KMP_MIC_SUPPORTED
6682   __kmp_check_mic_type();
6683 #endif
6684 
6685 // Some global variable initialization moved here from kmp_env_initialize()
6686 #ifdef KMP_DEBUG
6687   kmp_diag = 0;
6688 #endif
6689   __kmp_abort_delay = 0;
6690 
6691   // From __kmp_init_dflt_team_nth()
6692   /* assume the entire machine will be used */
6693   __kmp_dflt_team_nth_ub = __kmp_xproc;
6694   if (__kmp_dflt_team_nth_ub < KMP_MIN_NTH) {
6695     __kmp_dflt_team_nth_ub = KMP_MIN_NTH;
6696   }
6697   if (__kmp_dflt_team_nth_ub > __kmp_sys_max_nth) {
6698     __kmp_dflt_team_nth_ub = __kmp_sys_max_nth;
6699   }
6700   __kmp_max_nth = __kmp_sys_max_nth;
6701   __kmp_cg_max_nth = __kmp_sys_max_nth;
6702   __kmp_teams_max_nth = __kmp_xproc; // set a "reasonable" default
6703   if (__kmp_teams_max_nth > __kmp_sys_max_nth) {
6704     __kmp_teams_max_nth = __kmp_sys_max_nth;
6705   }
6706 
6707   // Three vars below moved here from __kmp_env_initialize() "KMP_BLOCKTIME"
6708   // part
6709   __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
6710 #if KMP_USE_MONITOR
6711   __kmp_monitor_wakeups =
6712       KMP_WAKEUPS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6713   __kmp_bt_intervals =
6714       KMP_INTERVALS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6715 #endif
6716   // From "KMP_LIBRARY" part of __kmp_env_initialize()
6717   __kmp_library = library_throughput;
6718   // From KMP_SCHEDULE initialization
6719   __kmp_static = kmp_sch_static_balanced;
6720 // AC: do not use analytical here, because it is non-monotonous
6721 //__kmp_guided = kmp_sch_guided_iterative_chunked;
6722 //__kmp_auto = kmp_sch_guided_analytical_chunked; // AC: it is the default, no
6723 // need to repeat assignment
6724 // Barrier initialization. Moved here from __kmp_env_initialize() Barrier branch
6725 // bit control and barrier method control parts
6726 #if KMP_FAST_REDUCTION_BARRIER
6727 #define kmp_reduction_barrier_gather_bb ((int)1)
6728 #define kmp_reduction_barrier_release_bb ((int)1)
6729 #define kmp_reduction_barrier_gather_pat bp_hyper_bar
6730 #define kmp_reduction_barrier_release_pat bp_hyper_bar
6731 #endif // KMP_FAST_REDUCTION_BARRIER
6732   for (i = bs_plain_barrier; i < bs_last_barrier; i++) {
6733     __kmp_barrier_gather_branch_bits[i] = __kmp_barrier_gather_bb_dflt;
6734     __kmp_barrier_release_branch_bits[i] = __kmp_barrier_release_bb_dflt;
6735     __kmp_barrier_gather_pattern[i] = __kmp_barrier_gather_pat_dflt;
6736     __kmp_barrier_release_pattern[i] = __kmp_barrier_release_pat_dflt;
6737 #if KMP_FAST_REDUCTION_BARRIER
6738     if (i == bs_reduction_barrier) { // tested and confirmed on ALTIX only (
6739       // lin_64 ): hyper,1
6740       __kmp_barrier_gather_branch_bits[i] = kmp_reduction_barrier_gather_bb;
6741       __kmp_barrier_release_branch_bits[i] = kmp_reduction_barrier_release_bb;
6742       __kmp_barrier_gather_pattern[i] = kmp_reduction_barrier_gather_pat;
6743       __kmp_barrier_release_pattern[i] = kmp_reduction_barrier_release_pat;
6744     }
6745 #endif // KMP_FAST_REDUCTION_BARRIER
6746   }
6747 #if KMP_FAST_REDUCTION_BARRIER
6748 #undef kmp_reduction_barrier_release_pat
6749 #undef kmp_reduction_barrier_gather_pat
6750 #undef kmp_reduction_barrier_release_bb
6751 #undef kmp_reduction_barrier_gather_bb
6752 #endif // KMP_FAST_REDUCTION_BARRIER
6753 #if KMP_MIC_SUPPORTED
6754   if (__kmp_mic_type == mic2) { // KNC
6755     // AC: plane=3,2, forkjoin=2,1 are optimal for 240 threads on KNC
6756     __kmp_barrier_gather_branch_bits[bs_plain_barrier] = 3; // plain gather
6757     __kmp_barrier_release_branch_bits[bs_forkjoin_barrier] =
6758         1; // forkjoin release
6759     __kmp_barrier_gather_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6760     __kmp_barrier_release_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6761   }
6762 #if KMP_FAST_REDUCTION_BARRIER
6763   if (__kmp_mic_type == mic2) { // KNC
6764     __kmp_barrier_gather_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6765     __kmp_barrier_release_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6766   }
6767 #endif // KMP_FAST_REDUCTION_BARRIER
6768 #endif // KMP_MIC_SUPPORTED
6769 
6770 // From KMP_CHECKS initialization
6771 #ifdef KMP_DEBUG
6772   __kmp_env_checks = TRUE; /* development versions have the extra checks */
6773 #else
6774   __kmp_env_checks = FALSE; /* port versions do not have the extra checks */
6775 #endif
6776 
6777   // From "KMP_FOREIGN_THREADS_THREADPRIVATE" initialization
6778   __kmp_foreign_tp = TRUE;
6779 
6780   __kmp_global.g.g_dynamic = FALSE;
6781   __kmp_global.g.g_dynamic_mode = dynamic_default;
6782 
6783   __kmp_env_initialize(NULL);
6784 
6785 // Print all messages in message catalog for testing purposes.
6786 #ifdef KMP_DEBUG
6787   char const *val = __kmp_env_get("KMP_DUMP_CATALOG");
6788   if (__kmp_str_match_true(val)) {
6789     kmp_str_buf_t buffer;
6790     __kmp_str_buf_init(&buffer);
6791     __kmp_i18n_dump_catalog(&buffer);
6792     __kmp_printf("%s", buffer.str);
6793     __kmp_str_buf_free(&buffer);
6794   }
6795   __kmp_env_free(&val);
6796 #endif
6797 
6798   __kmp_threads_capacity =
6799       __kmp_initial_threads_capacity(__kmp_dflt_team_nth_ub);
6800   // Moved here from __kmp_env_initialize() "KMP_ALL_THREADPRIVATE" part
6801   __kmp_tp_capacity = __kmp_default_tp_capacity(
6802       __kmp_dflt_team_nth_ub, __kmp_max_nth, __kmp_allThreadsSpecified);
6803 
6804   // If the library is shut down properly, both pools must be NULL. Just in
6805   // case, set them to NULL -- some memory may leak, but subsequent code will
6806   // work even if pools are not freed.
6807   KMP_DEBUG_ASSERT(__kmp_thread_pool == NULL);
6808   KMP_DEBUG_ASSERT(__kmp_thread_pool_insert_pt == NULL);
6809   KMP_DEBUG_ASSERT(__kmp_team_pool == NULL);
6810   __kmp_thread_pool = NULL;
6811   __kmp_thread_pool_insert_pt = NULL;
6812   __kmp_team_pool = NULL;
6813 
6814   /* Allocate all of the variable sized records */
6815   /* NOTE: __kmp_threads_capacity entries are allocated, but the arrays are
6816    * expandable */
6817   /* Since allocation is cache-aligned, just add extra padding at the end */
6818   size =
6819       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * __kmp_threads_capacity +
6820       CACHE_LINE;
6821   __kmp_threads = (kmp_info_t **)__kmp_allocate(size);
6822   __kmp_root = (kmp_root_t **)((char *)__kmp_threads +
6823                                sizeof(kmp_info_t *) * __kmp_threads_capacity);
6824 
6825   /* init thread counts */
6826   KMP_DEBUG_ASSERT(__kmp_all_nth ==
6827                    0); // Asserts fail if the library is reinitializing and
6828   KMP_DEBUG_ASSERT(__kmp_nth == 0); // something was wrong in termination.
6829   __kmp_all_nth = 0;
6830   __kmp_nth = 0;
6831 
6832   /* setup the uber master thread and hierarchy */
6833   gtid = __kmp_register_root(TRUE);
6834   KA_TRACE(10, ("__kmp_do_serial_initialize  T#%d\n", gtid));
6835   KMP_ASSERT(KMP_UBER_GTID(gtid));
6836   KMP_ASSERT(KMP_INITIAL_GTID(gtid));
6837 
6838   KMP_MB(); /* Flush all pending memory write invalidates.  */
6839 
6840   __kmp_common_initialize();
6841 
6842 #if KMP_OS_UNIX
6843   /* invoke the child fork handler */
6844   __kmp_register_atfork();
6845 #endif
6846 
6847 #if !KMP_DYNAMIC_LIB
6848   {
6849     /* Invoke the exit handler when the program finishes, only for static
6850        library. For dynamic library, we already have _fini and DllMain. */
6851     int rc = atexit(__kmp_internal_end_atexit);
6852     if (rc != 0) {
6853       __kmp_fatal(KMP_MSG(FunctionError, "atexit()"), KMP_ERR(rc),
6854                   __kmp_msg_null);
6855     }
6856   }
6857 #endif
6858 
6859 #if KMP_HANDLE_SIGNALS
6860 #if KMP_OS_UNIX
6861   /* NOTE: make sure that this is called before the user installs their own
6862      signal handlers so that the user handlers are called first. this way they
6863      can return false, not call our handler, avoid terminating the library, and
6864      continue execution where they left off. */
6865   __kmp_install_signals(FALSE);
6866 #endif /* KMP_OS_UNIX */
6867 #if KMP_OS_WINDOWS
6868   __kmp_install_signals(TRUE);
6869 #endif /* KMP_OS_WINDOWS */
6870 #endif
6871 
6872   /* we have finished the serial initialization */
6873   __kmp_init_counter++;
6874 
6875   __kmp_init_serial = TRUE;
6876 
6877   if (__kmp_settings) {
6878     __kmp_env_print();
6879   }
6880 
6881 #if OMP_40_ENABLED
6882   if (__kmp_display_env || __kmp_display_env_verbose) {
6883     __kmp_env_print_2();
6884   }
6885 #endif // OMP_40_ENABLED
6886 
6887 #if OMPT_SUPPORT
6888   ompt_post_init();
6889 #endif
6890 
6891   KMP_MB();
6892 
6893   KA_TRACE(10, ("__kmp_do_serial_initialize: exit\n"));
6894 }
6895 
6896 void __kmp_serial_initialize(void) {
6897   if (__kmp_init_serial) {
6898     return;
6899   }
6900   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6901   if (__kmp_init_serial) {
6902     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6903     return;
6904   }
6905   __kmp_do_serial_initialize();
6906   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6907 }
6908 
6909 static void __kmp_do_middle_initialize(void) {
6910   int i, j;
6911   int prev_dflt_team_nth;
6912 
6913   if (!__kmp_init_serial) {
6914     __kmp_do_serial_initialize();
6915   }
6916 
6917   KA_TRACE(10, ("__kmp_middle_initialize: enter\n"));
6918 
6919   // Save the previous value for the __kmp_dflt_team_nth so that
6920   // we can avoid some reinitialization if it hasn't changed.
6921   prev_dflt_team_nth = __kmp_dflt_team_nth;
6922 
6923 #if KMP_AFFINITY_SUPPORTED
6924   // __kmp_affinity_initialize() will try to set __kmp_ncores to the
6925   // number of cores on the machine.
6926   __kmp_affinity_initialize();
6927 
6928   // Run through the __kmp_threads array and set the affinity mask
6929   // for each root thread that is currently registered with the RTL.
6930   for (i = 0; i < __kmp_threads_capacity; i++) {
6931     if (TCR_PTR(__kmp_threads[i]) != NULL) {
6932       __kmp_affinity_set_init_mask(i, TRUE);
6933     }
6934   }
6935 #endif /* KMP_AFFINITY_SUPPORTED */
6936 
6937   KMP_ASSERT(__kmp_xproc > 0);
6938   if (__kmp_avail_proc == 0) {
6939     __kmp_avail_proc = __kmp_xproc;
6940   }
6941 
6942   // If there were empty places in num_threads list (OMP_NUM_THREADS=,,2,3),
6943   // correct them now
6944   j = 0;
6945   while ((j < __kmp_nested_nth.used) && !__kmp_nested_nth.nth[j]) {
6946     __kmp_nested_nth.nth[j] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub =
6947         __kmp_avail_proc;
6948     j++;
6949   }
6950 
6951   if (__kmp_dflt_team_nth == 0) {
6952 #ifdef KMP_DFLT_NTH_CORES
6953     // Default #threads = #cores
6954     __kmp_dflt_team_nth = __kmp_ncores;
6955     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6956                   "__kmp_ncores (%d)\n",
6957                   __kmp_dflt_team_nth));
6958 #else
6959     // Default #threads = #available OS procs
6960     __kmp_dflt_team_nth = __kmp_avail_proc;
6961     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6962                   "__kmp_avail_proc(%d)\n",
6963                   __kmp_dflt_team_nth));
6964 #endif /* KMP_DFLT_NTH_CORES */
6965   }
6966 
6967   if (__kmp_dflt_team_nth < KMP_MIN_NTH) {
6968     __kmp_dflt_team_nth = KMP_MIN_NTH;
6969   }
6970   if (__kmp_dflt_team_nth > __kmp_sys_max_nth) {
6971     __kmp_dflt_team_nth = __kmp_sys_max_nth;
6972   }
6973 
6974   // There's no harm in continuing if the following check fails,
6975   // but it indicates an error in the previous logic.
6976   KMP_DEBUG_ASSERT(__kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub);
6977 
6978   if (__kmp_dflt_team_nth != prev_dflt_team_nth) {
6979     // Run through the __kmp_threads array and set the num threads icv for each
6980     // root thread that is currently registered with the RTL (which has not
6981     // already explicitly set its nthreads-var with a call to
6982     // omp_set_num_threads()).
6983     for (i = 0; i < __kmp_threads_capacity; i++) {
6984       kmp_info_t *thread = __kmp_threads[i];
6985       if (thread == NULL)
6986         continue;
6987       if (thread->th.th_current_task->td_icvs.nproc != 0)
6988         continue;
6989 
6990       set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
6991     }
6992   }
6993   KA_TRACE(
6994       20,
6995       ("__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n",
6996        __kmp_dflt_team_nth));
6997 
6998 #ifdef KMP_ADJUST_BLOCKTIME
6999   /* Adjust blocktime to zero if necessary  now that __kmp_avail_proc is set */
7000   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
7001     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
7002     if (__kmp_nth > __kmp_avail_proc) {
7003       __kmp_zero_bt = TRUE;
7004     }
7005   }
7006 #endif /* KMP_ADJUST_BLOCKTIME */
7007 
7008   /* we have finished middle initialization */
7009   TCW_SYNC_4(__kmp_init_middle, TRUE);
7010 
7011   KA_TRACE(10, ("__kmp_do_middle_initialize: exit\n"));
7012 }
7013 
7014 void __kmp_middle_initialize(void) {
7015   if (__kmp_init_middle) {
7016     return;
7017   }
7018   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7019   if (__kmp_init_middle) {
7020     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7021     return;
7022   }
7023   __kmp_do_middle_initialize();
7024   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7025 }
7026 
7027 void __kmp_parallel_initialize(void) {
7028   int gtid = __kmp_entry_gtid(); // this might be a new root
7029 
7030   /* synchronize parallel initialization (for sibling) */
7031   if (TCR_4(__kmp_init_parallel))
7032     return;
7033   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7034   if (TCR_4(__kmp_init_parallel)) {
7035     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7036     return;
7037   }
7038 
7039   /* TODO reinitialization after we have already shut down */
7040   if (TCR_4(__kmp_global.g.g_done)) {
7041     KA_TRACE(
7042         10,
7043         ("__kmp_parallel_initialize: attempt to init while shutting down\n"));
7044     __kmp_infinite_loop();
7045   }
7046 
7047   /* jc: The lock __kmp_initz_lock is already held, so calling
7048      __kmp_serial_initialize would cause a deadlock.  So we call
7049      __kmp_do_serial_initialize directly. */
7050   if (!__kmp_init_middle) {
7051     __kmp_do_middle_initialize();
7052   }
7053 
7054 #if OMP_50_ENABLED
7055   __kmp_resume_if_hard_paused();
7056 #endif
7057 
7058   /* begin initialization */
7059   KA_TRACE(10, ("__kmp_parallel_initialize: enter\n"));
7060   KMP_ASSERT(KMP_UBER_GTID(gtid));
7061 
7062 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
7063   // Save the FP control regs.
7064   // Worker threads will set theirs to these values at thread startup.
7065   __kmp_store_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
7066   __kmp_store_mxcsr(&__kmp_init_mxcsr);
7067   __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK;
7068 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
7069 
7070 #if KMP_OS_UNIX
7071 #if KMP_HANDLE_SIGNALS
7072   /*  must be after __kmp_serial_initialize  */
7073   __kmp_install_signals(TRUE);
7074 #endif
7075 #endif
7076 
7077   __kmp_suspend_initialize();
7078 
7079 #if defined(USE_LOAD_BALANCE)
7080   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
7081     __kmp_global.g.g_dynamic_mode = dynamic_load_balance;
7082   }
7083 #else
7084   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
7085     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7086   }
7087 #endif
7088 
7089   if (__kmp_version) {
7090     __kmp_print_version_2();
7091   }
7092 
7093   /* we have finished parallel initialization */
7094   TCW_SYNC_4(__kmp_init_parallel, TRUE);
7095 
7096   KMP_MB();
7097   KA_TRACE(10, ("__kmp_parallel_initialize: exit\n"));
7098 
7099   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7100 }
7101 
7102 /* ------------------------------------------------------------------------ */
7103 
7104 void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
7105                                    kmp_team_t *team) {
7106   kmp_disp_t *dispatch;
7107 
7108   KMP_MB();
7109 
7110   /* none of the threads have encountered any constructs, yet. */
7111   this_thr->th.th_local.this_construct = 0;
7112 #if KMP_CACHE_MANAGE
7113   KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived);
7114 #endif /* KMP_CACHE_MANAGE */
7115   dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
7116   KMP_DEBUG_ASSERT(dispatch);
7117   KMP_DEBUG_ASSERT(team->t.t_dispatch);
7118   // KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[
7119   // this_thr->th.th_info.ds.ds_tid ] );
7120 
7121   dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */
7122 #if OMP_45_ENABLED
7123   dispatch->th_doacross_buf_idx =
7124       0; /* reset the doacross dispatch buffer counter */
7125 #endif
7126   if (__kmp_env_consistency_check)
7127     __kmp_push_parallel(gtid, team->t.t_ident);
7128 
7129   KMP_MB(); /* Flush all pending memory write invalidates.  */
7130 }
7131 
7132 void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
7133                                   kmp_team_t *team) {
7134   if (__kmp_env_consistency_check)
7135     __kmp_pop_parallel(gtid, team->t.t_ident);
7136 
7137   __kmp_finish_implicit_task(this_thr);
7138 }
7139 
7140 int __kmp_invoke_task_func(int gtid) {
7141   int rc;
7142   int tid = __kmp_tid_from_gtid(gtid);
7143   kmp_info_t *this_thr = __kmp_threads[gtid];
7144   kmp_team_t *team = this_thr->th.th_team;
7145 
7146   __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
7147 #if USE_ITT_BUILD
7148   if (__itt_stack_caller_create_ptr) {
7149     __kmp_itt_stack_callee_enter(
7150         (__itt_caller)
7151             team->t.t_stack_id); // inform ittnotify about entering user's code
7152   }
7153 #endif /* USE_ITT_BUILD */
7154 #if INCLUDE_SSC_MARKS
7155   SSC_MARK_INVOKING();
7156 #endif
7157 
7158 #if OMPT_SUPPORT
7159   void *dummy;
7160   void **exit_runtime_p;
7161   ompt_data_t *my_task_data;
7162   ompt_data_t *my_parallel_data;
7163   int ompt_team_size;
7164 
7165   if (ompt_enabled.enabled) {
7166     exit_runtime_p = &(
7167         team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame.ptr);
7168   } else {
7169     exit_runtime_p = &dummy;
7170   }
7171 
7172   my_task_data =
7173       &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
7174   my_parallel_data = &(team->t.ompt_team_info.parallel_data);
7175   if (ompt_enabled.ompt_callback_implicit_task) {
7176     ompt_team_size = team->t.t_nproc;
7177     ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7178         ompt_scope_begin, my_parallel_data, my_task_data, ompt_team_size,
7179         __kmp_tid_from_gtid(gtid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
7180     OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid);
7181   }
7182 #endif
7183 
7184 #if KMP_STATS_ENABLED
7185   stats_state_e previous_state = KMP_GET_THREAD_STATE();
7186   if (previous_state == stats_state_e::TEAMS_REGION) {
7187     KMP_PUSH_PARTITIONED_TIMER(OMP_teams);
7188   } else {
7189     KMP_PUSH_PARTITIONED_TIMER(OMP_parallel);
7190   }
7191   KMP_SET_THREAD_STATE(IMPLICIT_TASK);
7192 #endif
7193 
7194   rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
7195                               tid, (int)team->t.t_argc, (void **)team->t.t_argv
7196 #if OMPT_SUPPORT
7197                               ,
7198                               exit_runtime_p
7199 #endif
7200                               );
7201 #if OMPT_SUPPORT
7202   *exit_runtime_p = NULL;
7203 #endif
7204 
7205 #if KMP_STATS_ENABLED
7206   if (previous_state == stats_state_e::TEAMS_REGION) {
7207     KMP_SET_THREAD_STATE(previous_state);
7208   }
7209   KMP_POP_PARTITIONED_TIMER();
7210 #endif
7211 
7212 #if USE_ITT_BUILD
7213   if (__itt_stack_caller_create_ptr) {
7214     __kmp_itt_stack_callee_leave(
7215         (__itt_caller)
7216             team->t.t_stack_id); // inform ittnotify about leaving user's code
7217   }
7218 #endif /* USE_ITT_BUILD */
7219   __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7220 
7221   return rc;
7222 }
7223 
7224 #if OMP_40_ENABLED
7225 void __kmp_teams_master(int gtid) {
7226   // This routine is called by all master threads in teams construct
7227   kmp_info_t *thr = __kmp_threads[gtid];
7228   kmp_team_t *team = thr->th.th_team;
7229   ident_t *loc = team->t.t_ident;
7230   thr->th.th_set_nproc = thr->th.th_teams_size.nth;
7231   KMP_DEBUG_ASSERT(thr->th.th_teams_microtask);
7232   KMP_DEBUG_ASSERT(thr->th.th_set_nproc);
7233   KA_TRACE(20, ("__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid,
7234                 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask));
7235 
7236   // This thread is a new CG root.  Set up the proper variables.
7237   kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(sizeof(kmp_cg_root_t));
7238   tmp->cg_root = thr; // Make thr the CG root
7239   // Init to thread limit that was stored when league masters were forked
7240   tmp->cg_thread_limit = thr->th.th_current_task->td_icvs.thread_limit;
7241   tmp->cg_nthreads = 1; // Init counter to one active thread, this one
7242   KA_TRACE(100, ("__kmp_teams_master: Thread %p created node %p and init"
7243                  " cg_nthreads to 1\n",
7244                  thr, tmp));
7245   tmp->up = thr->th.th_cg_roots;
7246   thr->th.th_cg_roots = tmp;
7247 
7248 // Launch league of teams now, but not let workers execute
7249 // (they hang on fork barrier until next parallel)
7250 #if INCLUDE_SSC_MARKS
7251   SSC_MARK_FORKING();
7252 #endif
7253   __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7254                   (microtask_t)thr->th.th_teams_microtask, // "wrapped" task
7255                   VOLATILE_CAST(launch_t) __kmp_invoke_task_func, NULL);
7256 #if INCLUDE_SSC_MARKS
7257   SSC_MARK_JOINING();
7258 #endif
7259   // If the team size was reduced from the limit, set it to the new size
7260   if (thr->th.th_team_nproc < thr->th.th_teams_size.nth)
7261     thr->th.th_teams_size.nth = thr->th.th_team_nproc;
7262   // AC: last parameter "1" eliminates join barrier which won't work because
7263   // worker threads are in a fork barrier waiting for more parallel regions
7264   __kmp_join_call(loc, gtid
7265 #if OMPT_SUPPORT
7266                   ,
7267                   fork_context_intel
7268 #endif
7269                   ,
7270                   1);
7271 }
7272 
7273 int __kmp_invoke_teams_master(int gtid) {
7274   kmp_info_t *this_thr = __kmp_threads[gtid];
7275   kmp_team_t *team = this_thr->th.th_team;
7276 #if KMP_DEBUG
7277   if (!__kmp_threads[gtid]->th.th_team->t.t_serialized)
7278     KMP_DEBUG_ASSERT((void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn ==
7279                      (void *)__kmp_teams_master);
7280 #endif
7281   __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7282   __kmp_teams_master(gtid);
7283   __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7284   return 1;
7285 }
7286 #endif /* OMP_40_ENABLED */
7287 
7288 /* this sets the requested number of threads for the next parallel region
7289    encountered by this team. since this should be enclosed in the forkjoin
7290    critical section it should avoid race conditions with assymmetrical nested
7291    parallelism */
7292 
7293 void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) {
7294   kmp_info_t *thr = __kmp_threads[gtid];
7295 
7296   if (num_threads > 0)
7297     thr->th.th_set_nproc = num_threads;
7298 }
7299 
7300 #if OMP_40_ENABLED
7301 
7302 /* this sets the requested number of teams for the teams region and/or
7303    the number of threads for the next parallel region encountered  */
7304 void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams,
7305                           int num_threads) {
7306   kmp_info_t *thr = __kmp_threads[gtid];
7307   KMP_DEBUG_ASSERT(num_teams >= 0);
7308   KMP_DEBUG_ASSERT(num_threads >= 0);
7309 
7310   if (num_teams == 0)
7311     num_teams = 1; // default number of teams is 1.
7312   if (num_teams > __kmp_teams_max_nth) { // if too many teams requested?
7313     if (!__kmp_reserve_warn) {
7314       __kmp_reserve_warn = 1;
7315       __kmp_msg(kmp_ms_warning,
7316                 KMP_MSG(CantFormThrTeam, num_teams, __kmp_teams_max_nth),
7317                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7318     }
7319     num_teams = __kmp_teams_max_nth;
7320   }
7321   // Set number of teams (number of threads in the outer "parallel" of the
7322   // teams)
7323   thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
7324 
7325   // Remember the number of threads for inner parallel regions
7326   if (num_threads == 0) {
7327     if (!TCR_4(__kmp_init_middle))
7328       __kmp_middle_initialize(); // get __kmp_avail_proc calculated
7329     num_threads = __kmp_avail_proc / num_teams;
7330     if (num_teams * num_threads > __kmp_teams_max_nth) {
7331       // adjust num_threads w/o warning as it is not user setting
7332       num_threads = __kmp_teams_max_nth / num_teams;
7333     }
7334   } else {
7335     // This thread will be the master of the league masters
7336     // Store new thread limit; old limit is saved in th_cg_roots list
7337     thr->th.th_current_task->td_icvs.thread_limit = num_threads;
7338 
7339     if (num_teams * num_threads > __kmp_teams_max_nth) {
7340       int new_threads = __kmp_teams_max_nth / num_teams;
7341       if (!__kmp_reserve_warn) { // user asked for too many threads
7342         __kmp_reserve_warn = 1; // conflicts with KMP_TEAMS_THREAD_LIMIT
7343         __kmp_msg(kmp_ms_warning,
7344                   KMP_MSG(CantFormThrTeam, num_threads, new_threads),
7345                   KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7346       }
7347       num_threads = new_threads;
7348     }
7349   }
7350   thr->th.th_teams_size.nth = num_threads;
7351 }
7352 
7353 // Set the proc_bind var to use in the following parallel region.
7354 void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) {
7355   kmp_info_t *thr = __kmp_threads[gtid];
7356   thr->th.th_set_proc_bind = proc_bind;
7357 }
7358 
7359 #endif /* OMP_40_ENABLED */
7360 
7361 /* Launch the worker threads into the microtask. */
7362 
7363 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
7364   kmp_info_t *this_thr = __kmp_threads[gtid];
7365 
7366 #ifdef KMP_DEBUG
7367   int f;
7368 #endif /* KMP_DEBUG */
7369 
7370   KMP_DEBUG_ASSERT(team);
7371   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7372   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7373   KMP_MB(); /* Flush all pending memory write invalidates.  */
7374 
7375   team->t.t_construct = 0; /* no single directives seen yet */
7376   team->t.t_ordered.dt.t_value =
7377       0; /* thread 0 enters the ordered section first */
7378 
7379   /* Reset the identifiers on the dispatch buffer */
7380   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7381   if (team->t.t_max_nproc > 1) {
7382     int i;
7383     for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
7384       team->t.t_disp_buffer[i].buffer_index = i;
7385 #if OMP_45_ENABLED
7386       team->t.t_disp_buffer[i].doacross_buf_idx = i;
7387 #endif
7388     }
7389   } else {
7390     team->t.t_disp_buffer[0].buffer_index = 0;
7391 #if OMP_45_ENABLED
7392     team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7393 #endif
7394   }
7395 
7396   KMP_MB(); /* Flush all pending memory write invalidates.  */
7397   KMP_ASSERT(this_thr->th.th_team == team);
7398 
7399 #ifdef KMP_DEBUG
7400   for (f = 0; f < team->t.t_nproc; f++) {
7401     KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7402                      team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7403   }
7404 #endif /* KMP_DEBUG */
7405 
7406   /* release the worker threads so they may begin working */
7407   __kmp_fork_barrier(gtid, 0);
7408 }
7409 
7410 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) {
7411   kmp_info_t *this_thr = __kmp_threads[gtid];
7412 
7413   KMP_DEBUG_ASSERT(team);
7414   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7415   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7416   KMP_MB(); /* Flush all pending memory write invalidates.  */
7417 
7418 /* Join barrier after fork */
7419 
7420 #ifdef KMP_DEBUG
7421   if (__kmp_threads[gtid] &&
7422       __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7423     __kmp_printf("GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid,
7424                  __kmp_threads[gtid]);
7425     __kmp_printf("__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, "
7426                  "team->t.t_nproc=%d\n",
7427                  gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7428                  team->t.t_nproc);
7429     __kmp_print_structure();
7430   }
7431   KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
7432                    __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7433 #endif /* KMP_DEBUG */
7434 
7435   __kmp_join_barrier(gtid); /* wait for everyone */
7436 #if OMPT_SUPPORT
7437   if (ompt_enabled.enabled &&
7438       this_thr->th.ompt_thread_info.state == ompt_state_wait_barrier_implicit) {
7439     int ds_tid = this_thr->th.th_info.ds.ds_tid;
7440     ompt_data_t *task_data = OMPT_CUR_TASK_DATA(this_thr);
7441     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
7442 #if OMPT_OPTIONAL
7443     void *codeptr = NULL;
7444     if (KMP_MASTER_TID(ds_tid) &&
7445         (ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait) ||
7446          ompt_callbacks.ompt_callback(ompt_callback_sync_region)))
7447       codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
7448 
7449     if (ompt_enabled.ompt_callback_sync_region_wait) {
7450       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
7451           ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
7452           codeptr);
7453     }
7454     if (ompt_enabled.ompt_callback_sync_region) {
7455       ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
7456           ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
7457           codeptr);
7458     }
7459 #endif
7460     if (!KMP_MASTER_TID(ds_tid) && ompt_enabled.ompt_callback_implicit_task) {
7461       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7462           ompt_scope_end, NULL, task_data, 0, ds_tid, ompt_task_implicit); // TODO: Can this be ompt_task_initial?
7463     }
7464   }
7465 #endif
7466 
7467   KMP_MB(); /* Flush all pending memory write invalidates.  */
7468   KMP_ASSERT(this_thr->th.th_team == team);
7469 }
7470 
7471 /* ------------------------------------------------------------------------ */
7472 
7473 #ifdef USE_LOAD_BALANCE
7474 
7475 // Return the worker threads actively spinning in the hot team, if we
7476 // are at the outermost level of parallelism.  Otherwise, return 0.
7477 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7478   int i;
7479   int retval;
7480   kmp_team_t *hot_team;
7481 
7482   if (root->r.r_active) {
7483     return 0;
7484   }
7485   hot_team = root->r.r_hot_team;
7486   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
7487     return hot_team->t.t_nproc - 1; // Don't count master thread
7488   }
7489 
7490   // Skip the master thread - it is accounted for elsewhere.
7491   retval = 0;
7492   for (i = 1; i < hot_team->t.t_nproc; i++) {
7493     if (hot_team->t.t_threads[i]->th.th_active) {
7494       retval++;
7495     }
7496   }
7497   return retval;
7498 }
7499 
7500 // Perform an automatic adjustment to the number of
7501 // threads used by the next parallel region.
7502 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc) {
7503   int retval;
7504   int pool_active;
7505   int hot_team_active;
7506   int team_curr_active;
7507   int system_active;
7508 
7509   KB_TRACE(20, ("__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7510                 set_nproc));
7511   KMP_DEBUG_ASSERT(root);
7512   KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7513                        ->th.th_current_task->td_icvs.dynamic == TRUE);
7514   KMP_DEBUG_ASSERT(set_nproc > 1);
7515 
7516   if (set_nproc == 1) {
7517     KB_TRACE(20, ("__kmp_load_balance_nproc: serial execution.\n"));
7518     return 1;
7519   }
7520 
7521   // Threads that are active in the thread pool, active in the hot team for this
7522   // particular root (if we are at the outer par level), and the currently
7523   // executing thread (to become the master) are available to add to the new
7524   // team, but are currently contributing to the system load, and must be
7525   // accounted for.
7526   pool_active = __kmp_thread_pool_active_nth;
7527   hot_team_active = __kmp_active_hot_team_nproc(root);
7528   team_curr_active = pool_active + hot_team_active + 1;
7529 
7530   // Check the system load.
7531   system_active = __kmp_get_load_balance(__kmp_avail_proc + team_curr_active);
7532   KB_TRACE(30, ("__kmp_load_balance_nproc: system active = %d pool active = %d "
7533                 "hot team active = %d\n",
7534                 system_active, pool_active, hot_team_active));
7535 
7536   if (system_active < 0) {
7537     // There was an error reading the necessary info from /proc, so use the
7538     // thread limit algorithm instead. Once we set __kmp_global.g.g_dynamic_mode
7539     // = dynamic_thread_limit, we shouldn't wind up getting back here.
7540     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7541     KMP_WARNING(CantLoadBalUsing, "KMP_DYNAMIC_MODE=thread limit");
7542 
7543     // Make this call behave like the thread limit algorithm.
7544     retval = __kmp_avail_proc - __kmp_nth +
7545              (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7546     if (retval > set_nproc) {
7547       retval = set_nproc;
7548     }
7549     if (retval < KMP_MIN_NTH) {
7550       retval = KMP_MIN_NTH;
7551     }
7552 
7553     KB_TRACE(20, ("__kmp_load_balance_nproc: thread limit exit. retval:%d\n",
7554                   retval));
7555     return retval;
7556   }
7557 
7558   // There is a slight delay in the load balance algorithm in detecting new
7559   // running procs. The real system load at this instant should be at least as
7560   // large as the #active omp thread that are available to add to the team.
7561   if (system_active < team_curr_active) {
7562     system_active = team_curr_active;
7563   }
7564   retval = __kmp_avail_proc - system_active + team_curr_active;
7565   if (retval > set_nproc) {
7566     retval = set_nproc;
7567   }
7568   if (retval < KMP_MIN_NTH) {
7569     retval = KMP_MIN_NTH;
7570   }
7571 
7572   KB_TRACE(20, ("__kmp_load_balance_nproc: exit. retval:%d\n", retval));
7573   return retval;
7574 } // __kmp_load_balance_nproc()
7575 
7576 #endif /* USE_LOAD_BALANCE */
7577 
7578 /* ------------------------------------------------------------------------ */
7579 
7580 /* NOTE: this is called with the __kmp_init_lock held */
7581 void __kmp_cleanup(void) {
7582   int f;
7583 
7584   KA_TRACE(10, ("__kmp_cleanup: enter\n"));
7585 
7586   if (TCR_4(__kmp_init_parallel)) {
7587 #if KMP_HANDLE_SIGNALS
7588     __kmp_remove_signals();
7589 #endif
7590     TCW_4(__kmp_init_parallel, FALSE);
7591   }
7592 
7593   if (TCR_4(__kmp_init_middle)) {
7594 #if KMP_AFFINITY_SUPPORTED
7595     __kmp_affinity_uninitialize();
7596 #endif /* KMP_AFFINITY_SUPPORTED */
7597     __kmp_cleanup_hierarchy();
7598     TCW_4(__kmp_init_middle, FALSE);
7599   }
7600 
7601   KA_TRACE(10, ("__kmp_cleanup: go serial cleanup\n"));
7602 
7603   if (__kmp_init_serial) {
7604     __kmp_runtime_destroy();
7605     __kmp_init_serial = FALSE;
7606   }
7607 
7608   __kmp_cleanup_threadprivate_caches();
7609 
7610   for (f = 0; f < __kmp_threads_capacity; f++) {
7611     if (__kmp_root[f] != NULL) {
7612       __kmp_free(__kmp_root[f]);
7613       __kmp_root[f] = NULL;
7614     }
7615   }
7616   __kmp_free(__kmp_threads);
7617   // __kmp_threads and __kmp_root were allocated at once, as single block, so
7618   // there is no need in freeing __kmp_root.
7619   __kmp_threads = NULL;
7620   __kmp_root = NULL;
7621   __kmp_threads_capacity = 0;
7622 
7623 #if KMP_USE_DYNAMIC_LOCK
7624   __kmp_cleanup_indirect_user_locks();
7625 #else
7626   __kmp_cleanup_user_locks();
7627 #endif
7628 
7629 #if KMP_AFFINITY_SUPPORTED
7630   KMP_INTERNAL_FREE(CCAST(char *, __kmp_cpuinfo_file));
7631   __kmp_cpuinfo_file = NULL;
7632 #endif /* KMP_AFFINITY_SUPPORTED */
7633 
7634 #if KMP_USE_ADAPTIVE_LOCKS
7635 #if KMP_DEBUG_ADAPTIVE_LOCKS
7636   __kmp_print_speculative_stats();
7637 #endif
7638 #endif
7639   KMP_INTERNAL_FREE(__kmp_nested_nth.nth);
7640   __kmp_nested_nth.nth = NULL;
7641   __kmp_nested_nth.size = 0;
7642   __kmp_nested_nth.used = 0;
7643   KMP_INTERNAL_FREE(__kmp_nested_proc_bind.bind_types);
7644   __kmp_nested_proc_bind.bind_types = NULL;
7645   __kmp_nested_proc_bind.size = 0;
7646   __kmp_nested_proc_bind.used = 0;
7647 #if OMP_50_ENABLED
7648   if (__kmp_affinity_format) {
7649     KMP_INTERNAL_FREE(__kmp_affinity_format);
7650     __kmp_affinity_format = NULL;
7651   }
7652 #endif
7653 
7654   __kmp_i18n_catclose();
7655 
7656 #if KMP_USE_HIER_SCHED
7657   __kmp_hier_scheds.deallocate();
7658 #endif
7659 
7660 #if KMP_STATS_ENABLED
7661   __kmp_stats_fini();
7662 #endif
7663 
7664   KA_TRACE(10, ("__kmp_cleanup: exit\n"));
7665 }
7666 
7667 /* ------------------------------------------------------------------------ */
7668 
7669 int __kmp_ignore_mppbeg(void) {
7670   char *env;
7671 
7672   if ((env = getenv("KMP_IGNORE_MPPBEG")) != NULL) {
7673     if (__kmp_str_match_false(env))
7674       return FALSE;
7675   }
7676   // By default __kmpc_begin() is no-op.
7677   return TRUE;
7678 }
7679 
7680 int __kmp_ignore_mppend(void) {
7681   char *env;
7682 
7683   if ((env = getenv("KMP_IGNORE_MPPEND")) != NULL) {
7684     if (__kmp_str_match_false(env))
7685       return FALSE;
7686   }
7687   // By default __kmpc_end() is no-op.
7688   return TRUE;
7689 }
7690 
7691 void __kmp_internal_begin(void) {
7692   int gtid;
7693   kmp_root_t *root;
7694 
7695   /* this is a very important step as it will register new sibling threads
7696      and assign these new uber threads a new gtid */
7697   gtid = __kmp_entry_gtid();
7698   root = __kmp_threads[gtid]->th.th_root;
7699   KMP_ASSERT(KMP_UBER_GTID(gtid));
7700 
7701   if (root->r.r_begin)
7702     return;
7703   __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7704   if (root->r.r_begin) {
7705     __kmp_release_lock(&root->r.r_begin_lock, gtid);
7706     return;
7707   }
7708 
7709   root->r.r_begin = TRUE;
7710 
7711   __kmp_release_lock(&root->r.r_begin_lock, gtid);
7712 }
7713 
7714 /* ------------------------------------------------------------------------ */
7715 
7716 void __kmp_user_set_library(enum library_type arg) {
7717   int gtid;
7718   kmp_root_t *root;
7719   kmp_info_t *thread;
7720 
7721   /* first, make sure we are initialized so we can get our gtid */
7722 
7723   gtid = __kmp_entry_gtid();
7724   thread = __kmp_threads[gtid];
7725 
7726   root = thread->th.th_root;
7727 
7728   KA_TRACE(20, ("__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg,
7729                 library_serial));
7730   if (root->r.r_in_parallel) { /* Must be called in serial section of top-level
7731                                   thread */
7732     KMP_WARNING(SetLibraryIncorrectCall);
7733     return;
7734   }
7735 
7736   switch (arg) {
7737   case library_serial:
7738     thread->th.th_set_nproc = 0;
7739     set__nproc(thread, 1);
7740     break;
7741   case library_turnaround:
7742     thread->th.th_set_nproc = 0;
7743     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7744                                            : __kmp_dflt_team_nth_ub);
7745     break;
7746   case library_throughput:
7747     thread->th.th_set_nproc = 0;
7748     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7749                                            : __kmp_dflt_team_nth_ub);
7750     break;
7751   default:
7752     KMP_FATAL(UnknownLibraryType, arg);
7753   }
7754 
7755   __kmp_aux_set_library(arg);
7756 }
7757 
7758 void __kmp_aux_set_stacksize(size_t arg) {
7759   if (!__kmp_init_serial)
7760     __kmp_serial_initialize();
7761 
7762 #if KMP_OS_DARWIN
7763   if (arg & (0x1000 - 1)) {
7764     arg &= ~(0x1000 - 1);
7765     if (arg + 0x1000) /* check for overflow if we round up */
7766       arg += 0x1000;
7767   }
7768 #endif
7769   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7770 
7771   /* only change the default stacksize before the first parallel region */
7772   if (!TCR_4(__kmp_init_parallel)) {
7773     size_t value = arg; /* argument is in bytes */
7774 
7775     if (value < __kmp_sys_min_stksize)
7776       value = __kmp_sys_min_stksize;
7777     else if (value > KMP_MAX_STKSIZE)
7778       value = KMP_MAX_STKSIZE;
7779 
7780     __kmp_stksize = value;
7781 
7782     __kmp_env_stksize = TRUE; /* was KMP_STACKSIZE specified? */
7783   }
7784 
7785   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7786 }
7787 
7788 /* set the behaviour of the runtime library */
7789 /* TODO this can cause some odd behaviour with sibling parallelism... */
7790 void __kmp_aux_set_library(enum library_type arg) {
7791   __kmp_library = arg;
7792 
7793   switch (__kmp_library) {
7794   case library_serial: {
7795     KMP_INFORM(LibraryIsSerial);
7796   } break;
7797   case library_turnaround:
7798     if (__kmp_use_yield == 1 && !__kmp_use_yield_exp_set)
7799       __kmp_use_yield = 2; // only yield when oversubscribed
7800     break;
7801   case library_throughput:
7802     if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME)
7803       __kmp_dflt_blocktime = 200;
7804     break;
7805   default:
7806     KMP_FATAL(UnknownLibraryType, arg);
7807   }
7808 }
7809 
7810 /* Getting team information common for all team API */
7811 // Returns NULL if not in teams construct
7812 static kmp_team_t *__kmp_aux_get_team_info(int &teams_serialized) {
7813   kmp_info_t *thr = __kmp_entry_thread();
7814   teams_serialized = 0;
7815   if (thr->th.th_teams_microtask) {
7816     kmp_team_t *team = thr->th.th_team;
7817     int tlevel = thr->th.th_teams_level; // the level of the teams construct
7818     int ii = team->t.t_level;
7819     teams_serialized = team->t.t_serialized;
7820     int level = tlevel + 1;
7821     KMP_DEBUG_ASSERT(ii >= tlevel);
7822     while (ii > level) {
7823       for (teams_serialized = team->t.t_serialized;
7824            (teams_serialized > 0) && (ii > level); teams_serialized--, ii--) {
7825       }
7826       if (team->t.t_serialized && (!teams_serialized)) {
7827         team = team->t.t_parent;
7828         continue;
7829       }
7830       if (ii > level) {
7831         team = team->t.t_parent;
7832         ii--;
7833       }
7834     }
7835     return team;
7836   }
7837   return NULL;
7838 }
7839 
7840 int __kmp_aux_get_team_num() {
7841   int serialized;
7842   kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7843   if (team) {
7844     if (serialized > 1) {
7845       return 0; // teams region is serialized ( 1 team of 1 thread ).
7846     } else {
7847       return team->t.t_master_tid;
7848     }
7849   }
7850   return 0;
7851 }
7852 
7853 int __kmp_aux_get_num_teams() {
7854   int serialized;
7855   kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7856   if (team) {
7857     if (serialized > 1) {
7858       return 1;
7859     } else {
7860       return team->t.t_parent->t.t_nproc;
7861     }
7862   }
7863   return 1;
7864 }
7865 
7866 /* ------------------------------------------------------------------------ */
7867 
7868 #if OMP_50_ENABLED
7869 /*
7870  * Affinity Format Parser
7871  *
7872  * Field is in form of: %[[[0].]size]type
7873  * % and type are required (%% means print a literal '%')
7874  * type is either single char or long name surrounded by {},
7875  * e.g., N or {num_threads}
7876  * 0 => leading zeros
7877  * . => right justified when size is specified
7878  * by default output is left justified
7879  * size is the *minimum* field length
7880  * All other characters are printed as is
7881  *
7882  * Available field types:
7883  * L {thread_level}      - omp_get_level()
7884  * n {thread_num}        - omp_get_thread_num()
7885  * h {host}              - name of host machine
7886  * P {process_id}        - process id (integer)
7887  * T {thread_identifier} - native thread identifier (integer)
7888  * N {num_threads}       - omp_get_num_threads()
7889  * A {ancestor_tnum}     - omp_get_ancestor_thread_num(omp_get_level()-1)
7890  * a {thread_affinity}   - comma separated list of integers or integer ranges
7891  *                         (values of affinity mask)
7892  *
7893  * Implementation-specific field types can be added
7894  * If a type is unknown, print "undefined"
7895 */
7896 
7897 // Structure holding the short name, long name, and corresponding data type
7898 // for snprintf.  A table of these will represent the entire valid keyword
7899 // field types.
7900 typedef struct kmp_affinity_format_field_t {
7901   char short_name; // from spec e.g., L -> thread level
7902   const char *long_name; // from spec thread_level -> thread level
7903   char field_format; // data type for snprintf (typically 'd' or 's'
7904   // for integer or string)
7905 } kmp_affinity_format_field_t;
7906 
7907 static const kmp_affinity_format_field_t __kmp_affinity_format_table[] = {
7908 #if KMP_AFFINITY_SUPPORTED
7909     {'A', "thread_affinity", 's'},
7910 #endif
7911     {'t', "team_num", 'd'},
7912     {'T', "num_teams", 'd'},
7913     {'L', "nesting_level", 'd'},
7914     {'n', "thread_num", 'd'},
7915     {'N', "num_threads", 'd'},
7916     {'a', "ancestor_tnum", 'd'},
7917     {'H', "host", 's'},
7918     {'P', "process_id", 'd'},
7919     {'i', "native_thread_id", 'd'}};
7920 
7921 // Return the number of characters it takes to hold field
7922 static int __kmp_aux_capture_affinity_field(int gtid, const kmp_info_t *th,
7923                                             const char **ptr,
7924                                             kmp_str_buf_t *field_buffer) {
7925   int rc, format_index, field_value;
7926   const char *width_left, *width_right;
7927   bool pad_zeros, right_justify, parse_long_name, found_valid_name;
7928   static const int FORMAT_SIZE = 20;
7929   char format[FORMAT_SIZE] = {0};
7930   char absolute_short_name = 0;
7931 
7932   KMP_DEBUG_ASSERT(gtid >= 0);
7933   KMP_DEBUG_ASSERT(th);
7934   KMP_DEBUG_ASSERT(**ptr == '%');
7935   KMP_DEBUG_ASSERT(field_buffer);
7936 
7937   __kmp_str_buf_clear(field_buffer);
7938 
7939   // Skip the initial %
7940   (*ptr)++;
7941 
7942   // Check for %% first
7943   if (**ptr == '%') {
7944     __kmp_str_buf_cat(field_buffer, "%", 1);
7945     (*ptr)++; // skip over the second %
7946     return 1;
7947   }
7948 
7949   // Parse field modifiers if they are present
7950   pad_zeros = false;
7951   if (**ptr == '0') {
7952     pad_zeros = true;
7953     (*ptr)++; // skip over 0
7954   }
7955   right_justify = false;
7956   if (**ptr == '.') {
7957     right_justify = true;
7958     (*ptr)++; // skip over .
7959   }
7960   // Parse width of field: [width_left, width_right)
7961   width_left = width_right = NULL;
7962   if (**ptr >= '0' && **ptr <= '9') {
7963     width_left = *ptr;
7964     SKIP_DIGITS(*ptr);
7965     width_right = *ptr;
7966   }
7967 
7968   // Create the format for KMP_SNPRINTF based on flags parsed above
7969   format_index = 0;
7970   format[format_index++] = '%';
7971   if (!right_justify)
7972     format[format_index++] = '-';
7973   if (pad_zeros)
7974     format[format_index++] = '0';
7975   if (width_left && width_right) {
7976     int i = 0;
7977     // Only allow 8 digit number widths.
7978     // This also prevents overflowing format variable
7979     while (i < 8 && width_left < width_right) {
7980       format[format_index++] = *width_left;
7981       width_left++;
7982       i++;
7983     }
7984   }
7985 
7986   // Parse a name (long or short)
7987   // Canonicalize the name into absolute_short_name
7988   found_valid_name = false;
7989   parse_long_name = (**ptr == '{');
7990   if (parse_long_name)
7991     (*ptr)++; // skip initial left brace
7992   for (size_t i = 0; i < sizeof(__kmp_affinity_format_table) /
7993                              sizeof(__kmp_affinity_format_table[0]);
7994        ++i) {
7995     char short_name = __kmp_affinity_format_table[i].short_name;
7996     const char *long_name = __kmp_affinity_format_table[i].long_name;
7997     char field_format = __kmp_affinity_format_table[i].field_format;
7998     if (parse_long_name) {
7999       int length = KMP_STRLEN(long_name);
8000       if (strncmp(*ptr, long_name, length) == 0) {
8001         found_valid_name = true;
8002         (*ptr) += length; // skip the long name
8003       }
8004     } else if (**ptr == short_name) {
8005       found_valid_name = true;
8006       (*ptr)++; // skip the short name
8007     }
8008     if (found_valid_name) {
8009       format[format_index++] = field_format;
8010       format[format_index++] = '\0';
8011       absolute_short_name = short_name;
8012       break;
8013     }
8014   }
8015   if (parse_long_name) {
8016     if (**ptr != '}') {
8017       absolute_short_name = 0;
8018     } else {
8019       (*ptr)++; // skip over the right brace
8020     }
8021   }
8022 
8023   // Attempt to fill the buffer with the requested
8024   // value using snprintf within __kmp_str_buf_print()
8025   switch (absolute_short_name) {
8026   case 't':
8027     rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_team_num());
8028     break;
8029   case 'T':
8030     rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_num_teams());
8031     break;
8032   case 'L':
8033     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level);
8034     break;
8035   case 'n':
8036     rc = __kmp_str_buf_print(field_buffer, format, __kmp_tid_from_gtid(gtid));
8037     break;
8038   case 'H': {
8039     static const int BUFFER_SIZE = 256;
8040     char buf[BUFFER_SIZE];
8041     __kmp_expand_host_name(buf, BUFFER_SIZE);
8042     rc = __kmp_str_buf_print(field_buffer, format, buf);
8043   } break;
8044   case 'P':
8045     rc = __kmp_str_buf_print(field_buffer, format, getpid());
8046     break;
8047   case 'i':
8048     rc = __kmp_str_buf_print(field_buffer, format, __kmp_gettid());
8049     break;
8050   case 'N':
8051     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc);
8052     break;
8053   case 'a':
8054     field_value =
8055         __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1);
8056     rc = __kmp_str_buf_print(field_buffer, format, field_value);
8057     break;
8058 #if KMP_AFFINITY_SUPPORTED
8059   case 'A': {
8060     kmp_str_buf_t buf;
8061     __kmp_str_buf_init(&buf);
8062     __kmp_affinity_str_buf_mask(&buf, th->th.th_affin_mask);
8063     rc = __kmp_str_buf_print(field_buffer, format, buf.str);
8064     __kmp_str_buf_free(&buf);
8065   } break;
8066 #endif
8067   default:
8068     // According to spec, If an implementation does not have info for field
8069     // type, then "undefined" is printed
8070     rc = __kmp_str_buf_print(field_buffer, "%s", "undefined");
8071     // Skip the field
8072     if (parse_long_name) {
8073       SKIP_TOKEN(*ptr);
8074       if (**ptr == '}')
8075         (*ptr)++;
8076     } else {
8077       (*ptr)++;
8078     }
8079   }
8080 
8081   KMP_ASSERT(format_index <= FORMAT_SIZE);
8082   return rc;
8083 }
8084 
8085 /*
8086  * Return number of characters needed to hold the affinity string
8087  * (not including null byte character)
8088  * The resultant string is printed to buffer, which the caller can then
8089  * handle afterwards
8090 */
8091 size_t __kmp_aux_capture_affinity(int gtid, const char *format,
8092                                   kmp_str_buf_t *buffer) {
8093   const char *parse_ptr;
8094   size_t retval;
8095   const kmp_info_t *th;
8096   kmp_str_buf_t field;
8097 
8098   KMP_DEBUG_ASSERT(buffer);
8099   KMP_DEBUG_ASSERT(gtid >= 0);
8100 
8101   __kmp_str_buf_init(&field);
8102   __kmp_str_buf_clear(buffer);
8103 
8104   th = __kmp_threads[gtid];
8105   retval = 0;
8106 
8107   // If format is NULL or zero-length string, then we use
8108   // affinity-format-var ICV
8109   parse_ptr = format;
8110   if (parse_ptr == NULL || *parse_ptr == '\0') {
8111     parse_ptr = __kmp_affinity_format;
8112   }
8113   KMP_DEBUG_ASSERT(parse_ptr);
8114 
8115   while (*parse_ptr != '\0') {
8116     // Parse a field
8117     if (*parse_ptr == '%') {
8118       // Put field in the buffer
8119       int rc = __kmp_aux_capture_affinity_field(gtid, th, &parse_ptr, &field);
8120       __kmp_str_buf_catbuf(buffer, &field);
8121       retval += rc;
8122     } else {
8123       // Put literal character in buffer
8124       __kmp_str_buf_cat(buffer, parse_ptr, 1);
8125       retval++;
8126       parse_ptr++;
8127     }
8128   }
8129   __kmp_str_buf_free(&field);
8130   return retval;
8131 }
8132 
8133 // Displays the affinity string to stdout
8134 void __kmp_aux_display_affinity(int gtid, const char *format) {
8135   kmp_str_buf_t buf;
8136   __kmp_str_buf_init(&buf);
8137   __kmp_aux_capture_affinity(gtid, format, &buf);
8138   __kmp_fprintf(kmp_out, "%s" KMP_END_OF_LINE, buf.str);
8139   __kmp_str_buf_free(&buf);
8140 }
8141 #endif // OMP_50_ENABLED
8142 
8143 /* ------------------------------------------------------------------------ */
8144 
8145 void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid) {
8146   int blocktime = arg; /* argument is in milliseconds */
8147 #if KMP_USE_MONITOR
8148   int bt_intervals;
8149 #endif
8150   int bt_set;
8151 
8152   __kmp_save_internal_controls(thread);
8153 
8154   /* Normalize and set blocktime for the teams */
8155   if (blocktime < KMP_MIN_BLOCKTIME)
8156     blocktime = KMP_MIN_BLOCKTIME;
8157   else if (blocktime > KMP_MAX_BLOCKTIME)
8158     blocktime = KMP_MAX_BLOCKTIME;
8159 
8160   set__blocktime_team(thread->th.th_team, tid, blocktime);
8161   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
8162 
8163 #if KMP_USE_MONITOR
8164   /* Calculate and set blocktime intervals for the teams */
8165   bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
8166 
8167   set__bt_intervals_team(thread->th.th_team, tid, bt_intervals);
8168   set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals);
8169 #endif
8170 
8171   /* Set whether blocktime has been set to "TRUE" */
8172   bt_set = TRUE;
8173 
8174   set__bt_set_team(thread->th.th_team, tid, bt_set);
8175   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
8176 #if KMP_USE_MONITOR
8177   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, "
8178                 "bt_intervals=%d, monitor_updates=%d\n",
8179                 __kmp_gtid_from_tid(tid, thread->th.th_team),
8180                 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals,
8181                 __kmp_monitor_wakeups));
8182 #else
8183   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n",
8184                 __kmp_gtid_from_tid(tid, thread->th.th_team),
8185                 thread->th.th_team->t.t_id, tid, blocktime));
8186 #endif
8187 }
8188 
8189 void __kmp_aux_set_defaults(char const *str, int len) {
8190   if (!__kmp_init_serial) {
8191     __kmp_serial_initialize();
8192   }
8193   __kmp_env_initialize(str);
8194 
8195   if (__kmp_settings
8196 #if OMP_40_ENABLED
8197       || __kmp_display_env || __kmp_display_env_verbose
8198 #endif // OMP_40_ENABLED
8199       ) {
8200     __kmp_env_print();
8201   }
8202 } // __kmp_aux_set_defaults
8203 
8204 /* ------------------------------------------------------------------------ */
8205 /* internal fast reduction routines */
8206 
8207 PACKED_REDUCTION_METHOD_T
8208 __kmp_determine_reduction_method(
8209     ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
8210     void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
8211     kmp_critical_name *lck) {
8212 
8213   // Default reduction method: critical construct ( lck != NULL, like in current
8214   // PAROPT )
8215   // If ( reduce_data!=NULL && reduce_func!=NULL ): the tree-reduction method
8216   // can be selected by RTL
8217   // If loc->flags contains KMP_IDENT_ATOMIC_REDUCE, the atomic reduce method
8218   // can be selected by RTL
8219   // Finally, it's up to OpenMP RTL to make a decision on which method to select
8220   // among generated by PAROPT.
8221 
8222   PACKED_REDUCTION_METHOD_T retval;
8223 
8224   int team_size;
8225 
8226   KMP_DEBUG_ASSERT(loc); // it would be nice to test ( loc != 0 )
8227   KMP_DEBUG_ASSERT(lck); // it would be nice to test ( lck != 0 )
8228 
8229 #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED                                 \
8230   ((loc->flags & (KMP_IDENT_ATOMIC_REDUCE)) == (KMP_IDENT_ATOMIC_REDUCE))
8231 #define FAST_REDUCTION_TREE_METHOD_GENERATED ((reduce_data) && (reduce_func))
8232 
8233   retval = critical_reduce_block;
8234 
8235   // another choice of getting a team size (with 1 dynamic deference) is slower
8236   team_size = __kmp_get_team_num_threads(global_tid);
8237   if (team_size == 1) {
8238 
8239     retval = empty_reduce_block;
8240 
8241   } else {
8242 
8243     int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8244 
8245 #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS64
8246 
8247 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
8248     KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD
8249 
8250     int teamsize_cutoff = 4;
8251 
8252 #if KMP_MIC_SUPPORTED
8253     if (__kmp_mic_type != non_mic) {
8254       teamsize_cutoff = 8;
8255     }
8256 #endif
8257     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8258     if (tree_available) {
8259       if (team_size <= teamsize_cutoff) {
8260         if (atomic_available) {
8261           retval = atomic_reduce_block;
8262         }
8263       } else {
8264         retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8265       }
8266     } else if (atomic_available) {
8267       retval = atomic_reduce_block;
8268     }
8269 #else
8270 #error "Unknown or unsupported OS"
8271 #endif // KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||
8272        // KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD
8273 
8274 #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS
8275 
8276 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS || KMP_OS_HURD
8277 
8278     // basic tuning
8279 
8280     if (atomic_available) {
8281       if (num_vars <= 2) { // && ( team_size <= 8 ) due to false-sharing ???
8282         retval = atomic_reduce_block;
8283       }
8284     } // otherwise: use critical section
8285 
8286 #elif KMP_OS_DARWIN
8287 
8288     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8289     if (atomic_available && (num_vars <= 3)) {
8290       retval = atomic_reduce_block;
8291     } else if (tree_available) {
8292       if ((reduce_size > (9 * sizeof(kmp_real64))) &&
8293           (reduce_size < (2000 * sizeof(kmp_real64)))) {
8294         retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER;
8295       }
8296     } // otherwise: use critical section
8297 
8298 #else
8299 #error "Unknown or unsupported OS"
8300 #endif
8301 
8302 #else
8303 #error "Unknown or unsupported architecture"
8304 #endif
8305   }
8306 
8307   // KMP_FORCE_REDUCTION
8308 
8309   // If the team is serialized (team_size == 1), ignore the forced reduction
8310   // method and stay with the unsynchronized method (empty_reduce_block)
8311   if (__kmp_force_reduction_method != reduction_method_not_defined &&
8312       team_size != 1) {
8313 
8314     PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block;
8315 
8316     int atomic_available, tree_available;
8317 
8318     switch ((forced_retval = __kmp_force_reduction_method)) {
8319     case critical_reduce_block:
8320       KMP_ASSERT(lck); // lck should be != 0
8321       break;
8322 
8323     case atomic_reduce_block:
8324       atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8325       if (!atomic_available) {
8326         KMP_WARNING(RedMethodNotSupported, "atomic");
8327         forced_retval = critical_reduce_block;
8328       }
8329       break;
8330 
8331     case tree_reduce_block:
8332       tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8333       if (!tree_available) {
8334         KMP_WARNING(RedMethodNotSupported, "tree");
8335         forced_retval = critical_reduce_block;
8336       } else {
8337 #if KMP_FAST_REDUCTION_BARRIER
8338         forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8339 #endif
8340       }
8341       break;
8342 
8343     default:
8344       KMP_ASSERT(0); // "unsupported method specified"
8345     }
8346 
8347     retval = forced_retval;
8348   }
8349 
8350   KA_TRACE(10, ("reduction method selected=%08x\n", retval));
8351 
8352 #undef FAST_REDUCTION_TREE_METHOD_GENERATED
8353 #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED
8354 
8355   return (retval);
8356 }
8357 
8358 // this function is for testing set/get/determine reduce method
8359 kmp_int32 __kmp_get_reduce_method(void) {
8360   return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
8361 }
8362 
8363 #if OMP_50_ENABLED
8364 
8365 // Soft pause sets up threads to ignore blocktime and just go to sleep.
8366 // Spin-wait code checks __kmp_pause_status and reacts accordingly.
8367 void __kmp_soft_pause() { __kmp_pause_status = kmp_soft_paused; }
8368 
8369 // Hard pause shuts down the runtime completely.  Resume happens naturally when
8370 // OpenMP is used subsequently.
8371 void __kmp_hard_pause() {
8372   __kmp_pause_status = kmp_hard_paused;
8373   __kmp_internal_end_thread(-1);
8374 }
8375 
8376 // Soft resume sets __kmp_pause_status, and wakes up all threads.
8377 void __kmp_resume_if_soft_paused() {
8378   if (__kmp_pause_status == kmp_soft_paused) {
8379     __kmp_pause_status = kmp_not_paused;
8380 
8381     for (int gtid = 1; gtid < __kmp_threads_capacity; ++gtid) {
8382       kmp_info_t *thread = __kmp_threads[gtid];
8383       if (thread) { // Wake it if sleeping
8384         kmp_flag_64 fl(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
8385         if (fl.is_sleeping())
8386           fl.resume(gtid);
8387         else if (__kmp_try_suspend_mx(thread)) { // got suspend lock
8388           __kmp_unlock_suspend_mx(thread); // unlock it; it won't sleep
8389         } else { // thread holds the lock and may sleep soon
8390           do { // until either the thread sleeps, or we can get the lock
8391             if (fl.is_sleeping()) {
8392               fl.resume(gtid);
8393               break;
8394             } else if (__kmp_try_suspend_mx(thread)) {
8395               __kmp_unlock_suspend_mx(thread);
8396               break;
8397             }
8398           } while (1);
8399         }
8400       }
8401     }
8402   }
8403 }
8404 
8405 // This function is called via __kmpc_pause_resource. Returns 0 if successful.
8406 // TODO: add warning messages
8407 int __kmp_pause_resource(kmp_pause_status_t level) {
8408   if (level == kmp_not_paused) { // requesting resume
8409     if (__kmp_pause_status == kmp_not_paused) {
8410       // error message about runtime not being paused, so can't resume
8411       return 1;
8412     } else {
8413       KMP_DEBUG_ASSERT(__kmp_pause_status == kmp_soft_paused ||
8414                        __kmp_pause_status == kmp_hard_paused);
8415       __kmp_pause_status = kmp_not_paused;
8416       return 0;
8417     }
8418   } else if (level == kmp_soft_paused) { // requesting soft pause
8419     if (__kmp_pause_status != kmp_not_paused) {
8420       // error message about already being paused
8421       return 1;
8422     } else {
8423       __kmp_soft_pause();
8424       return 0;
8425     }
8426   } else if (level == kmp_hard_paused) { // requesting hard pause
8427     if (__kmp_pause_status != kmp_not_paused) {
8428       // error message about already being paused
8429       return 1;
8430     } else {
8431       __kmp_hard_pause();
8432       return 0;
8433     }
8434   } else {
8435     // error message about invalid level
8436     return 1;
8437   }
8438 }
8439 
8440 #endif // OMP_50_ENABLED
8441