1 /*
2  * kmp_runtime.cpp -- KPTS runtime support library
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_atomic.h"
16 #include "kmp_environment.h"
17 #include "kmp_error.h"
18 #include "kmp_i18n.h"
19 #include "kmp_io.h"
20 #include "kmp_itt.h"
21 #include "kmp_settings.h"
22 #include "kmp_stats.h"
23 #include "kmp_str.h"
24 #include "kmp_wait_release.h"
25 #include "kmp_wrapper_getpid.h"
26 #include "kmp_dispatch.h"
27 #if KMP_USE_HIER_SCHED
28 #include "kmp_dispatch_hier.h"
29 #endif
30 
31 #if OMPT_SUPPORT
32 #include "ompt-specific.h"
33 #endif
34 
35 /* these are temporary issues to be dealt with */
36 #define KMP_USE_PRCTL 0
37 
38 #if KMP_OS_WINDOWS
39 #include <process.h>
40 #endif
41 
42 #include "tsan_annotations.h"
43 
44 #if defined(KMP_GOMP_COMPAT)
45 char const __kmp_version_alt_comp[] =
46     KMP_VERSION_PREFIX "alternative compiler support: yes";
47 #endif /* defined(KMP_GOMP_COMPAT) */
48 
49 char const __kmp_version_omp_api[] =
50     KMP_VERSION_PREFIX "API version: 5.0 (201611)";
51 
52 #ifdef KMP_DEBUG
53 char const __kmp_version_lock[] =
54     KMP_VERSION_PREFIX "lock type: run time selectable";
55 #endif /* KMP_DEBUG */
56 
57 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
58 
59 /* ------------------------------------------------------------------------ */
60 
61 #if KMP_USE_MONITOR
62 kmp_info_t __kmp_monitor;
63 #endif
64 
65 /* Forward declarations */
66 
67 void __kmp_cleanup(void);
68 
69 static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *, int tid,
70                                   int gtid);
71 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
72                                   kmp_internal_control_t *new_icvs,
73                                   ident_t *loc);
74 #if KMP_AFFINITY_SUPPORTED
75 static void __kmp_partition_places(kmp_team_t *team,
76                                    int update_master_only = 0);
77 #endif
78 static void __kmp_do_serial_initialize(void);
79 void __kmp_fork_barrier(int gtid, int tid);
80 void __kmp_join_barrier(int gtid);
81 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
82                           kmp_internal_control_t *new_icvs, ident_t *loc);
83 
84 #ifdef USE_LOAD_BALANCE
85 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc);
86 #endif
87 
88 static int __kmp_expand_threads(int nNeed);
89 #if KMP_OS_WINDOWS
90 static int __kmp_unregister_root_other_thread(int gtid);
91 #endif
92 static void __kmp_unregister_library(void); // called by __kmp_internal_end()
93 static void __kmp_reap_thread(kmp_info_t *thread, int is_root);
94 kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
95 
96 /* Calculate the identifier of the current thread */
97 /* fast (and somewhat portable) way to get unique identifier of executing
98    thread. Returns KMP_GTID_DNE if we haven't been assigned a gtid. */
99 int __kmp_get_global_thread_id() {
100   int i;
101   kmp_info_t **other_threads;
102   size_t stack_data;
103   char *stack_addr;
104   size_t stack_size;
105   char *stack_base;
106 
107   KA_TRACE(
108       1000,
109       ("*** __kmp_get_global_thread_id: entering, nproc=%d  all_nproc=%d\n",
110        __kmp_nth, __kmp_all_nth));
111 
112   /* JPH - to handle the case where __kmpc_end(0) is called immediately prior to
113      a parallel region, made it return KMP_GTID_DNE to force serial_initialize
114      by caller. Had to handle KMP_GTID_DNE at all call-sites, or else guarantee
115      __kmp_init_gtid for this to work. */
116 
117   if (!TCR_4(__kmp_init_gtid))
118     return KMP_GTID_DNE;
119 
120 #ifdef KMP_TDATA_GTID
121   if (TCR_4(__kmp_gtid_mode) >= 3) {
122     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using TDATA\n"));
123     return __kmp_gtid;
124   }
125 #endif
126   if (TCR_4(__kmp_gtid_mode) >= 2) {
127     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using keyed TLS\n"));
128     return __kmp_gtid_get_specific();
129   }
130   KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using internal alg.\n"));
131 
132   stack_addr = (char *)&stack_data;
133   other_threads = __kmp_threads;
134 
135   /* ATT: The code below is a source of potential bugs due to unsynchronized
136      access to __kmp_threads array. For example:
137      1. Current thread loads other_threads[i] to thr and checks it, it is
138         non-NULL.
139      2. Current thread is suspended by OS.
140      3. Another thread unregisters and finishes (debug versions of free()
141         may fill memory with something like 0xEF).
142      4. Current thread is resumed.
143      5. Current thread reads junk from *thr.
144      TODO: Fix it.  --ln  */
145 
146   for (i = 0; i < __kmp_threads_capacity; i++) {
147 
148     kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]);
149     if (!thr)
150       continue;
151 
152     stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
153     stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
154 
155     /* stack grows down -- search through all of the active threads */
156 
157     if (stack_addr <= stack_base) {
158       size_t stack_diff = stack_base - stack_addr;
159 
160       if (stack_diff <= stack_size) {
161         /* The only way we can be closer than the allocated */
162         /* stack size is if we are running on this thread. */
163         KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == i);
164         return i;
165       }
166     }
167   }
168 
169   /* get specific to try and determine our gtid */
170   KA_TRACE(1000,
171            ("*** __kmp_get_global_thread_id: internal alg. failed to find "
172             "thread, using TLS\n"));
173   i = __kmp_gtid_get_specific();
174 
175   /*fprintf( stderr, "=== %d\n", i );  */ /* GROO */
176 
177   /* if we havn't been assigned a gtid, then return code */
178   if (i < 0)
179     return i;
180 
181   /* dynamically updated stack window for uber threads to avoid get_specific
182      call */
183   if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
184     KMP_FATAL(StackOverflow, i);
185   }
186 
187   stack_base = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
188   if (stack_addr > stack_base) {
189     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
190     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
191             other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
192                 stack_base);
193   } else {
194     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
195             stack_base - stack_addr);
196   }
197 
198   /* Reprint stack bounds for ubermaster since they have been refined */
199   if (__kmp_storage_map) {
200     char *stack_end = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
201     char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
202     __kmp_print_storage_map_gtid(i, stack_beg, stack_end,
203                                  other_threads[i]->th.th_info.ds.ds_stacksize,
204                                  "th_%d stack (refinement)", i);
205   }
206   return i;
207 }
208 
209 int __kmp_get_global_thread_id_reg() {
210   int gtid;
211 
212   if (!__kmp_init_serial) {
213     gtid = KMP_GTID_DNE;
214   } else
215 #ifdef KMP_TDATA_GTID
216       if (TCR_4(__kmp_gtid_mode) >= 3) {
217     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using TDATA\n"));
218     gtid = __kmp_gtid;
219   } else
220 #endif
221       if (TCR_4(__kmp_gtid_mode) >= 2) {
222     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using keyed TLS\n"));
223     gtid = __kmp_gtid_get_specific();
224   } else {
225     KA_TRACE(1000,
226              ("*** __kmp_get_global_thread_id_reg: using internal alg.\n"));
227     gtid = __kmp_get_global_thread_id();
228   }
229 
230   /* we must be a new uber master sibling thread */
231   if (gtid == KMP_GTID_DNE) {
232     KA_TRACE(10,
233              ("__kmp_get_global_thread_id_reg: Encountered new root thread. "
234               "Registering a new gtid.\n"));
235     __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
236     if (!__kmp_init_serial) {
237       __kmp_do_serial_initialize();
238       gtid = __kmp_gtid_get_specific();
239     } else {
240       gtid = __kmp_register_root(FALSE);
241     }
242     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
243     /*__kmp_printf( "+++ %d\n", gtid ); */ /* GROO */
244   }
245 
246   KMP_DEBUG_ASSERT(gtid >= 0);
247 
248   return gtid;
249 }
250 
251 /* caller must hold forkjoin_lock */
252 void __kmp_check_stack_overlap(kmp_info_t *th) {
253   int f;
254   char *stack_beg = NULL;
255   char *stack_end = NULL;
256   int gtid;
257 
258   KA_TRACE(10, ("__kmp_check_stack_overlap: called\n"));
259   if (__kmp_storage_map) {
260     stack_end = (char *)th->th.th_info.ds.ds_stackbase;
261     stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
262 
263     gtid = __kmp_gtid_from_thread(th);
264 
265     if (gtid == KMP_GTID_MONITOR) {
266       __kmp_print_storage_map_gtid(
267           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
268           "th_%s stack (%s)", "mon",
269           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
270     } else {
271       __kmp_print_storage_map_gtid(
272           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
273           "th_%d stack (%s)", gtid,
274           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
275     }
276   }
277 
278   /* No point in checking ubermaster threads since they use refinement and
279    * cannot overlap */
280   gtid = __kmp_gtid_from_thread(th);
281   if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) {
282     KA_TRACE(10,
283              ("__kmp_check_stack_overlap: performing extensive checking\n"));
284     if (stack_beg == NULL) {
285       stack_end = (char *)th->th.th_info.ds.ds_stackbase;
286       stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
287     }
288 
289     for (f = 0; f < __kmp_threads_capacity; f++) {
290       kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]);
291 
292       if (f_th && f_th != th) {
293         char *other_stack_end =
294             (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
295         char *other_stack_beg =
296             other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
297         if ((stack_beg > other_stack_beg && stack_beg < other_stack_end) ||
298             (stack_end > other_stack_beg && stack_end < other_stack_end)) {
299 
300           /* Print the other stack values before the abort */
301           if (__kmp_storage_map)
302             __kmp_print_storage_map_gtid(
303                 -1, other_stack_beg, other_stack_end,
304                 (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
305                 "th_%d stack (overlapped)", __kmp_gtid_from_thread(f_th));
306 
307           __kmp_fatal(KMP_MSG(StackOverlap), KMP_HNT(ChangeStackLimit),
308                       __kmp_msg_null);
309         }
310       }
311     }
312   }
313   KA_TRACE(10, ("__kmp_check_stack_overlap: returning\n"));
314 }
315 
316 /* ------------------------------------------------------------------------ */
317 
318 void __kmp_infinite_loop(void) {
319   static int done = FALSE;
320 
321   while (!done) {
322     KMP_YIELD(TRUE);
323   }
324 }
325 
326 #define MAX_MESSAGE 512
327 
328 void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size,
329                                   char const *format, ...) {
330   char buffer[MAX_MESSAGE];
331   va_list ap;
332 
333   va_start(ap, format);
334   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP storage map: %p %p%8lu %s\n", p1,
335                p2, (unsigned long)size, format);
336   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
337   __kmp_vprintf(kmp_err, buffer, ap);
338 #if KMP_PRINT_DATA_PLACEMENT
339   int node;
340   if (gtid >= 0) {
341     if (p1 <= p2 && (char *)p2 - (char *)p1 == size) {
342       if (__kmp_storage_map_verbose) {
343         node = __kmp_get_host_node(p1);
344         if (node < 0) /* doesn't work, so don't try this next time */
345           __kmp_storage_map_verbose = FALSE;
346         else {
347           char *last;
348           int lastNode;
349           int localProc = __kmp_get_cpu_from_gtid(gtid);
350 
351           const int page_size = KMP_GET_PAGE_SIZE();
352 
353           p1 = (void *)((size_t)p1 & ~((size_t)page_size - 1));
354           p2 = (void *)(((size_t)p2 - 1) & ~((size_t)page_size - 1));
355           if (localProc >= 0)
356             __kmp_printf_no_lock("  GTID %d localNode %d\n", gtid,
357                                  localProc >> 1);
358           else
359             __kmp_printf_no_lock("  GTID %d\n", gtid);
360 #if KMP_USE_PRCTL
361           /* The more elaborate format is disabled for now because of the prctl
362            * hanging bug. */
363           do {
364             last = p1;
365             lastNode = node;
366             /* This loop collates adjacent pages with the same host node. */
367             do {
368               (char *)p1 += page_size;
369             } while (p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode);
370             __kmp_printf_no_lock("    %p-%p memNode %d\n", last, (char *)p1 - 1,
371                                  lastNode);
372           } while (p1 <= p2);
373 #else
374           __kmp_printf_no_lock("    %p-%p memNode %d\n", p1,
375                                (char *)p1 + (page_size - 1),
376                                __kmp_get_host_node(p1));
377           if (p1 < p2) {
378             __kmp_printf_no_lock("    %p-%p memNode %d\n", p2,
379                                  (char *)p2 + (page_size - 1),
380                                  __kmp_get_host_node(p2));
381           }
382 #endif
383         }
384       }
385     } else
386       __kmp_printf_no_lock("  %s\n", KMP_I18N_STR(StorageMapWarning));
387   }
388 #endif /* KMP_PRINT_DATA_PLACEMENT */
389   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
390 }
391 
392 void __kmp_warn(char const *format, ...) {
393   char buffer[MAX_MESSAGE];
394   va_list ap;
395 
396   if (__kmp_generate_warnings == kmp_warnings_off) {
397     return;
398   }
399 
400   va_start(ap, format);
401 
402   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP warning: %s\n", format);
403   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
404   __kmp_vprintf(kmp_err, buffer, ap);
405   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
406 
407   va_end(ap);
408 }
409 
410 void __kmp_abort_process() {
411   // Later threads may stall here, but that's ok because abort() will kill them.
412   __kmp_acquire_bootstrap_lock(&__kmp_exit_lock);
413 
414   if (__kmp_debug_buf) {
415     __kmp_dump_debug_buffer();
416   }
417 
418   if (KMP_OS_WINDOWS) {
419     // Let other threads know of abnormal termination and prevent deadlock
420     // if abort happened during library initialization or shutdown
421     __kmp_global.g.g_abort = SIGABRT;
422 
423     /* On Windows* OS by default abort() causes pop-up error box, which stalls
424        nightly testing. Unfortunately, we cannot reliably suppress pop-up error
425        boxes. _set_abort_behavior() works well, but this function is not
426        available in VS7 (this is not problem for DLL, but it is a problem for
427        static OpenMP RTL). SetErrorMode (and so, timelimit utility) does not
428        help, at least in some versions of MS C RTL.
429 
430        It seems following sequence is the only way to simulate abort() and
431        avoid pop-up error box. */
432     raise(SIGABRT);
433     _exit(3); // Just in case, if signal ignored, exit anyway.
434   } else {
435     abort();
436   }
437 
438   __kmp_infinite_loop();
439   __kmp_release_bootstrap_lock(&__kmp_exit_lock);
440 
441 } // __kmp_abort_process
442 
443 void __kmp_abort_thread(void) {
444   // TODO: Eliminate g_abort global variable and this function.
445   // In case of abort just call abort(), it will kill all the threads.
446   __kmp_infinite_loop();
447 } // __kmp_abort_thread
448 
449 /* Print out the storage map for the major kmp_info_t thread data structures
450    that are allocated together. */
451 
452 static void __kmp_print_thread_storage_map(kmp_info_t *thr, int gtid) {
453   __kmp_print_storage_map_gtid(gtid, thr, thr + 1, sizeof(kmp_info_t), "th_%d",
454                                gtid);
455 
456   __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
457                                sizeof(kmp_desc_t), "th_%d.th_info", gtid);
458 
459   __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
460                                sizeof(kmp_local_t), "th_%d.th_local", gtid);
461 
462   __kmp_print_storage_map_gtid(
463       gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
464       sizeof(kmp_balign_t) * bs_last_barrier, "th_%d.th_bar", gtid);
465 
466   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
467                                &thr->th.th_bar[bs_plain_barrier + 1],
468                                sizeof(kmp_balign_t), "th_%d.th_bar[plain]",
469                                gtid);
470 
471   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
472                                &thr->th.th_bar[bs_forkjoin_barrier + 1],
473                                sizeof(kmp_balign_t), "th_%d.th_bar[forkjoin]",
474                                gtid);
475 
476 #if KMP_FAST_REDUCTION_BARRIER
477   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
478                                &thr->th.th_bar[bs_reduction_barrier + 1],
479                                sizeof(kmp_balign_t), "th_%d.th_bar[reduction]",
480                                gtid);
481 #endif // KMP_FAST_REDUCTION_BARRIER
482 }
483 
484 /* Print out the storage map for the major kmp_team_t team data structures
485    that are allocated together. */
486 
487 static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
488                                          int team_id, int num_thr) {
489   int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
490   __kmp_print_storage_map_gtid(-1, team, team + 1, sizeof(kmp_team_t), "%s_%d",
491                                header, team_id);
492 
493   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
494                                &team->t.t_bar[bs_last_barrier],
495                                sizeof(kmp_balign_team_t) * bs_last_barrier,
496                                "%s_%d.t_bar", header, team_id);
497 
498   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
499                                &team->t.t_bar[bs_plain_barrier + 1],
500                                sizeof(kmp_balign_team_t), "%s_%d.t_bar[plain]",
501                                header, team_id);
502 
503   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
504                                &team->t.t_bar[bs_forkjoin_barrier + 1],
505                                sizeof(kmp_balign_team_t),
506                                "%s_%d.t_bar[forkjoin]", header, team_id);
507 
508 #if KMP_FAST_REDUCTION_BARRIER
509   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
510                                &team->t.t_bar[bs_reduction_barrier + 1],
511                                sizeof(kmp_balign_team_t),
512                                "%s_%d.t_bar[reduction]", header, team_id);
513 #endif // KMP_FAST_REDUCTION_BARRIER
514 
515   __kmp_print_storage_map_gtid(
516       -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
517       sizeof(kmp_disp_t) * num_thr, "%s_%d.t_dispatch", header, team_id);
518 
519   __kmp_print_storage_map_gtid(
520       -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
521       sizeof(kmp_info_t *) * num_thr, "%s_%d.t_threads", header, team_id);
522 
523   __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
524                                &team->t.t_disp_buffer[num_disp_buff],
525                                sizeof(dispatch_shared_info_t) * num_disp_buff,
526                                "%s_%d.t_disp_buffer", header, team_id);
527 }
528 
529 static void __kmp_init_allocator() { __kmp_init_memkind(); }
530 static void __kmp_fini_allocator() { __kmp_fini_memkind(); }
531 
532 /* ------------------------------------------------------------------------ */
533 
534 #if KMP_DYNAMIC_LIB
535 #if KMP_OS_WINDOWS
536 
537 static void __kmp_reset_lock(kmp_bootstrap_lock_t *lck) {
538   // TODO: Change to __kmp_break_bootstrap_lock().
539   __kmp_init_bootstrap_lock(lck); // make the lock released
540 }
541 
542 static void __kmp_reset_locks_on_process_detach(int gtid_req) {
543   int i;
544   int thread_count;
545 
546   // PROCESS_DETACH is expected to be called by a thread that executes
547   // ProcessExit() or FreeLibrary(). OS terminates other threads (except the one
548   // calling ProcessExit or FreeLibrary). So, it might be safe to access the
549   // __kmp_threads[] without taking the forkjoin_lock. However, in fact, some
550   // threads can be still alive here, although being about to be terminated. The
551   // threads in the array with ds_thread==0 are most suspicious. Actually, it
552   // can be not safe to access the __kmp_threads[].
553 
554   // TODO: does it make sense to check __kmp_roots[] ?
555 
556   // Let's check that there are no other alive threads registered with the OMP
557   // lib.
558   while (1) {
559     thread_count = 0;
560     for (i = 0; i < __kmp_threads_capacity; ++i) {
561       if (!__kmp_threads)
562         continue;
563       kmp_info_t *th = __kmp_threads[i];
564       if (th == NULL)
565         continue;
566       int gtid = th->th.th_info.ds.ds_gtid;
567       if (gtid == gtid_req)
568         continue;
569       if (gtid < 0)
570         continue;
571       DWORD exit_val;
572       int alive = __kmp_is_thread_alive(th, &exit_val);
573       if (alive) {
574         ++thread_count;
575       }
576     }
577     if (thread_count == 0)
578       break; // success
579   }
580 
581   // Assume that I'm alone. Now it might be safe to check and reset locks.
582   // __kmp_forkjoin_lock and __kmp_stdio_lock are expected to be reset.
583   __kmp_reset_lock(&__kmp_forkjoin_lock);
584 #ifdef KMP_DEBUG
585   __kmp_reset_lock(&__kmp_stdio_lock);
586 #endif // KMP_DEBUG
587 }
588 
589 BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
590   //__kmp_acquire_bootstrap_lock( &__kmp_initz_lock );
591 
592   switch (fdwReason) {
593 
594   case DLL_PROCESS_ATTACH:
595     KA_TRACE(10, ("DllMain: PROCESS_ATTACH\n"));
596 
597     return TRUE;
598 
599   case DLL_PROCESS_DETACH:
600     KA_TRACE(10, ("DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
601 
602     if (lpReserved != NULL) {
603       // lpReserved is used for telling the difference:
604       //   lpReserved == NULL when FreeLibrary() was called,
605       //   lpReserved != NULL when the process terminates.
606       // When FreeLibrary() is called, worker threads remain alive. So they will
607       // release the forkjoin lock by themselves. When the process terminates,
608       // worker threads disappear triggering the problem of unreleased forkjoin
609       // lock as described below.
610 
611       // A worker thread can take the forkjoin lock. The problem comes up if
612       // that worker thread becomes dead before it releases the forkjoin lock.
613       // The forkjoin lock remains taken, while the thread executing
614       // DllMain()->PROCESS_DETACH->__kmp_internal_end_library() below will try
615       // to take the forkjoin lock and will always fail, so that the application
616       // will never finish [normally]. This scenario is possible if
617       // __kmpc_end() has not been executed. It looks like it's not a corner
618       // case, but common cases:
619       // - the main function was compiled by an alternative compiler;
620       // - the main function was compiled by icl but without /Qopenmp
621       //   (application with plugins);
622       // - application terminates by calling C exit(), Fortran CALL EXIT() or
623       //   Fortran STOP.
624       // - alive foreign thread prevented __kmpc_end from doing cleanup.
625       //
626       // This is a hack to work around the problem.
627       // TODO: !!! figure out something better.
628       __kmp_reset_locks_on_process_detach(__kmp_gtid_get_specific());
629     }
630 
631     __kmp_internal_end_library(__kmp_gtid_get_specific());
632 
633     return TRUE;
634 
635   case DLL_THREAD_ATTACH:
636     KA_TRACE(10, ("DllMain: THREAD_ATTACH\n"));
637 
638     /* if we want to register new siblings all the time here call
639      * __kmp_get_gtid(); */
640     return TRUE;
641 
642   case DLL_THREAD_DETACH:
643     KA_TRACE(10, ("DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
644 
645     __kmp_internal_end_thread(__kmp_gtid_get_specific());
646     return TRUE;
647   }
648 
649   return TRUE;
650 }
651 
652 #endif /* KMP_OS_WINDOWS */
653 #endif /* KMP_DYNAMIC_LIB */
654 
655 /* __kmp_parallel_deo -- Wait until it's our turn. */
656 void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
657   int gtid = *gtid_ref;
658 #ifdef BUILD_PARALLEL_ORDERED
659   kmp_team_t *team = __kmp_team_from_gtid(gtid);
660 #endif /* BUILD_PARALLEL_ORDERED */
661 
662   if (__kmp_env_consistency_check) {
663     if (__kmp_threads[gtid]->th.th_root->r.r_active)
664 #if KMP_USE_DYNAMIC_LOCK
665       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0);
666 #else
667       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL);
668 #endif
669   }
670 #ifdef BUILD_PARALLEL_ORDERED
671   if (!team->t.t_serialized) {
672     KMP_MB();
673     KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ,
674              NULL);
675     KMP_MB();
676   }
677 #endif /* BUILD_PARALLEL_ORDERED */
678 }
679 
680 /* __kmp_parallel_dxo -- Signal the next task. */
681 void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
682   int gtid = *gtid_ref;
683 #ifdef BUILD_PARALLEL_ORDERED
684   int tid = __kmp_tid_from_gtid(gtid);
685   kmp_team_t *team = __kmp_team_from_gtid(gtid);
686 #endif /* BUILD_PARALLEL_ORDERED */
687 
688   if (__kmp_env_consistency_check) {
689     if (__kmp_threads[gtid]->th.th_root->r.r_active)
690       __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref);
691   }
692 #ifdef BUILD_PARALLEL_ORDERED
693   if (!team->t.t_serialized) {
694     KMP_MB(); /* Flush all pending memory write invalidates.  */
695 
696     /* use the tid of the next thread in this team */
697     /* TODO replace with general release procedure */
698     team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
699 
700     KMP_MB(); /* Flush all pending memory write invalidates.  */
701   }
702 #endif /* BUILD_PARALLEL_ORDERED */
703 }
704 
705 /* ------------------------------------------------------------------------ */
706 /* The BARRIER for a SINGLE process section is always explicit   */
707 
708 int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) {
709   int status;
710   kmp_info_t *th;
711   kmp_team_t *team;
712 
713   if (!TCR_4(__kmp_init_parallel))
714     __kmp_parallel_initialize();
715   __kmp_resume_if_soft_paused();
716 
717   th = __kmp_threads[gtid];
718   team = th->th.th_team;
719   status = 0;
720 
721   th->th.th_ident = id_ref;
722 
723   if (team->t.t_serialized) {
724     status = 1;
725   } else {
726     kmp_int32 old_this = th->th.th_local.this_construct;
727 
728     ++th->th.th_local.this_construct;
729     /* try to set team count to thread count--success means thread got the
730        single block */
731     /* TODO: Should this be acquire or release? */
732     if (team->t.t_construct == old_this) {
733       status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
734                                               th->th.th_local.this_construct);
735     }
736 #if USE_ITT_BUILD
737     if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
738         KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL &&
739         team->t.t_active_level ==
740             1) { // Only report metadata by master of active team at level 1
741       __kmp_itt_metadata_single(id_ref);
742     }
743 #endif /* USE_ITT_BUILD */
744   }
745 
746   if (__kmp_env_consistency_check) {
747     if (status && push_ws) {
748       __kmp_push_workshare(gtid, ct_psingle, id_ref);
749     } else {
750       __kmp_check_workshare(gtid, ct_psingle, id_ref);
751     }
752   }
753 #if USE_ITT_BUILD
754   if (status) {
755     __kmp_itt_single_start(gtid);
756   }
757 #endif /* USE_ITT_BUILD */
758   return status;
759 }
760 
761 void __kmp_exit_single(int gtid) {
762 #if USE_ITT_BUILD
763   __kmp_itt_single_end(gtid);
764 #endif /* USE_ITT_BUILD */
765   if (__kmp_env_consistency_check)
766     __kmp_pop_workshare(gtid, ct_psingle, NULL);
767 }
768 
769 /* determine if we can go parallel or must use a serialized parallel region and
770  * how many threads we can use
771  * set_nproc is the number of threads requested for the team
772  * returns 0 if we should serialize or only use one thread,
773  * otherwise the number of threads to use
774  * The forkjoin lock is held by the caller. */
775 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
776                                  int master_tid, int set_nthreads,
777                                  int enter_teams) {
778   int capacity;
779   int new_nthreads;
780   KMP_DEBUG_ASSERT(__kmp_init_serial);
781   KMP_DEBUG_ASSERT(root && parent_team);
782   kmp_info_t *this_thr = parent_team->t.t_threads[master_tid];
783 
784   // If dyn-var is set, dynamically adjust the number of desired threads,
785   // according to the method specified by dynamic_mode.
786   new_nthreads = set_nthreads;
787   if (!get__dynamic_2(parent_team, master_tid)) {
788     ;
789   }
790 #ifdef USE_LOAD_BALANCE
791   else if (__kmp_global.g.g_dynamic_mode == dynamic_load_balance) {
792     new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
793     if (new_nthreads == 1) {
794       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
795                     "reservation to 1 thread\n",
796                     master_tid));
797       return 1;
798     }
799     if (new_nthreads < set_nthreads) {
800       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
801                     "reservation to %d threads\n",
802                     master_tid, new_nthreads));
803     }
804   }
805 #endif /* USE_LOAD_BALANCE */
806   else if (__kmp_global.g.g_dynamic_mode == dynamic_thread_limit) {
807     new_nthreads = __kmp_avail_proc - __kmp_nth +
808                    (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
809     if (new_nthreads <= 1) {
810       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
811                     "reservation to 1 thread\n",
812                     master_tid));
813       return 1;
814     }
815     if (new_nthreads < set_nthreads) {
816       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
817                     "reservation to %d threads\n",
818                     master_tid, new_nthreads));
819     } else {
820       new_nthreads = set_nthreads;
821     }
822   } else if (__kmp_global.g.g_dynamic_mode == dynamic_random) {
823     if (set_nthreads > 2) {
824       new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
825       new_nthreads = (new_nthreads % set_nthreads) + 1;
826       if (new_nthreads == 1) {
827         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
828                       "reservation to 1 thread\n",
829                       master_tid));
830         return 1;
831       }
832       if (new_nthreads < set_nthreads) {
833         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
834                       "reservation to %d threads\n",
835                       master_tid, new_nthreads));
836       }
837     }
838   } else {
839     KMP_ASSERT(0);
840   }
841 
842   // Respect KMP_ALL_THREADS/KMP_DEVICE_THREAD_LIMIT.
843   if (__kmp_nth + new_nthreads -
844           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
845       __kmp_max_nth) {
846     int tl_nthreads = __kmp_max_nth - __kmp_nth +
847                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
848     if (tl_nthreads <= 0) {
849       tl_nthreads = 1;
850     }
851 
852     // If dyn-var is false, emit a 1-time warning.
853     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
854       __kmp_reserve_warn = 1;
855       __kmp_msg(kmp_ms_warning,
856                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
857                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
858     }
859     if (tl_nthreads == 1) {
860       KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT "
861                     "reduced reservation to 1 thread\n",
862                     master_tid));
863       return 1;
864     }
865     KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT reduced "
866                   "reservation to %d threads\n",
867                   master_tid, tl_nthreads));
868     new_nthreads = tl_nthreads;
869   }
870 
871   // Respect OMP_THREAD_LIMIT
872   int cg_nthreads = this_thr->th.th_cg_roots->cg_nthreads;
873   int max_cg_threads = this_thr->th.th_cg_roots->cg_thread_limit;
874   if (cg_nthreads + new_nthreads -
875           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
876       max_cg_threads) {
877     int tl_nthreads = max_cg_threads - cg_nthreads +
878                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
879     if (tl_nthreads <= 0) {
880       tl_nthreads = 1;
881     }
882 
883     // If dyn-var is false, emit a 1-time warning.
884     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
885       __kmp_reserve_warn = 1;
886       __kmp_msg(kmp_ms_warning,
887                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
888                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
889     }
890     if (tl_nthreads == 1) {
891       KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT "
892                     "reduced reservation to 1 thread\n",
893                     master_tid));
894       return 1;
895     }
896     KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT reduced "
897                   "reservation to %d threads\n",
898                   master_tid, tl_nthreads));
899     new_nthreads = tl_nthreads;
900   }
901 
902   // Check if the threads array is large enough, or needs expanding.
903   // See comment in __kmp_register_root() about the adjustment if
904   // __kmp_threads[0] == NULL.
905   capacity = __kmp_threads_capacity;
906   if (TCR_PTR(__kmp_threads[0]) == NULL) {
907     --capacity;
908   }
909   if (__kmp_nth + new_nthreads -
910           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
911       capacity) {
912     // Expand the threads array.
913     int slotsRequired = __kmp_nth + new_nthreads -
914                         (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
915                         capacity;
916     int slotsAdded = __kmp_expand_threads(slotsRequired);
917     if (slotsAdded < slotsRequired) {
918       // The threads array was not expanded enough.
919       new_nthreads -= (slotsRequired - slotsAdded);
920       KMP_ASSERT(new_nthreads >= 1);
921 
922       // If dyn-var is false, emit a 1-time warning.
923       if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
924         __kmp_reserve_warn = 1;
925         if (__kmp_tp_cached) {
926           __kmp_msg(kmp_ms_warning,
927                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
928                     KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
929                     KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
930         } else {
931           __kmp_msg(kmp_ms_warning,
932                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
933                     KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
934         }
935       }
936     }
937   }
938 
939 #ifdef KMP_DEBUG
940   if (new_nthreads == 1) {
941     KC_TRACE(10,
942              ("__kmp_reserve_threads: T#%d serializing team after reclaiming "
943               "dead roots and rechecking; requested %d threads\n",
944               __kmp_get_gtid(), set_nthreads));
945   } else {
946     KC_TRACE(10, ("__kmp_reserve_threads: T#%d allocating %d threads; requested"
947                   " %d threads\n",
948                   __kmp_get_gtid(), new_nthreads, set_nthreads));
949   }
950 #endif // KMP_DEBUG
951   return new_nthreads;
952 }
953 
954 /* Allocate threads from the thread pool and assign them to the new team. We are
955    assured that there are enough threads available, because we checked on that
956    earlier within critical section forkjoin */
957 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
958                                     kmp_info_t *master_th, int master_gtid) {
959   int i;
960   int use_hot_team;
961 
962   KA_TRACE(10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
963   KMP_DEBUG_ASSERT(master_gtid == __kmp_get_gtid());
964   KMP_MB();
965 
966   /* first, let's setup the master thread */
967   master_th->th.th_info.ds.ds_tid = 0;
968   master_th->th.th_team = team;
969   master_th->th.th_team_nproc = team->t.t_nproc;
970   master_th->th.th_team_master = master_th;
971   master_th->th.th_team_serialized = FALSE;
972   master_th->th.th_dispatch = &team->t.t_dispatch[0];
973 
974 /* make sure we are not the optimized hot team */
975 #if KMP_NESTED_HOT_TEAMS
976   use_hot_team = 0;
977   kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
978   if (hot_teams) { // hot teams array is not allocated if
979     // KMP_HOT_TEAMS_MAX_LEVEL=0
980     int level = team->t.t_active_level - 1; // index in array of hot teams
981     if (master_th->th.th_teams_microtask) { // are we inside the teams?
982       if (master_th->th.th_teams_size.nteams > 1) {
983         ++level; // level was not increased in teams construct for
984         // team_of_masters
985       }
986       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
987           master_th->th.th_teams_level == team->t.t_level) {
988         ++level; // level was not increased in teams construct for
989         // team_of_workers before the parallel
990       } // team->t.t_level will be increased inside parallel
991     }
992     if (level < __kmp_hot_teams_max_level) {
993       if (hot_teams[level].hot_team) {
994         // hot team has already been allocated for given level
995         KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
996         use_hot_team = 1; // the team is ready to use
997       } else {
998         use_hot_team = 0; // AC: threads are not allocated yet
999         hot_teams[level].hot_team = team; // remember new hot team
1000         hot_teams[level].hot_team_nth = team->t.t_nproc;
1001       }
1002     } else {
1003       use_hot_team = 0;
1004     }
1005   }
1006 #else
1007   use_hot_team = team == root->r.r_hot_team;
1008 #endif
1009   if (!use_hot_team) {
1010 
1011     /* install the master thread */
1012     team->t.t_threads[0] = master_th;
1013     __kmp_initialize_info(master_th, team, 0, master_gtid);
1014 
1015     /* now, install the worker threads */
1016     for (i = 1; i < team->t.t_nproc; i++) {
1017 
1018       /* fork or reallocate a new thread and install it in team */
1019       kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1020       team->t.t_threads[i] = thr;
1021       KMP_DEBUG_ASSERT(thr);
1022       KMP_DEBUG_ASSERT(thr->th.th_team == team);
1023       /* align team and thread arrived states */
1024       KA_TRACE(20, ("__kmp_fork_team_threads: T#%d(%d:%d) init arrived "
1025                     "T#%d(%d:%d) join =%llu, plain=%llu\n",
1026                     __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1027                     __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1028                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1029                     team->t.t_bar[bs_plain_barrier].b_arrived));
1030       thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
1031       thr->th.th_teams_level = master_th->th.th_teams_level;
1032       thr->th.th_teams_size = master_th->th.th_teams_size;
1033       { // Initialize threads' barrier data.
1034         int b;
1035         kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1036         for (b = 0; b < bs_last_barrier; ++b) {
1037           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1038           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
1039 #if USE_DEBUGGER
1040           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1041 #endif
1042         }
1043       }
1044     }
1045 
1046 #if KMP_AFFINITY_SUPPORTED
1047     __kmp_partition_places(team);
1048 #endif
1049   }
1050 
1051   if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
1052     for (i = 0; i < team->t.t_nproc; i++) {
1053       kmp_info_t *thr = team->t.t_threads[i];
1054       if (thr->th.th_prev_num_threads != team->t.t_nproc ||
1055           thr->th.th_prev_level != team->t.t_level) {
1056         team->t.t_display_affinity = 1;
1057         break;
1058       }
1059     }
1060   }
1061 
1062   KMP_MB();
1063 }
1064 
1065 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1066 // Propagate any changes to the floating point control registers out to the team
1067 // We try to avoid unnecessary writes to the relevant cache line in the team
1068 // structure, so we don't make changes unless they are needed.
1069 inline static void propagateFPControl(kmp_team_t *team) {
1070   if (__kmp_inherit_fp_control) {
1071     kmp_int16 x87_fpu_control_word;
1072     kmp_uint32 mxcsr;
1073 
1074     // Get master values of FPU control flags (both X87 and vector)
1075     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1076     __kmp_store_mxcsr(&mxcsr);
1077     mxcsr &= KMP_X86_MXCSR_MASK;
1078 
1079     // There is no point looking at t_fp_control_saved here.
1080     // If it is TRUE, we still have to update the values if they are different
1081     // from those we now have. If it is FALSE we didn't save anything yet, but
1082     // our objective is the same. We have to ensure that the values in the team
1083     // are the same as those we have.
1084     // So, this code achieves what we need whether or not t_fp_control_saved is
1085     // true. By checking whether the value needs updating we avoid unnecessary
1086     // writes that would put the cache-line into a written state, causing all
1087     // threads in the team to have to read it again.
1088     KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1089     KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1090     // Although we don't use this value, other code in the runtime wants to know
1091     // whether it should restore them. So we must ensure it is correct.
1092     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1093   } else {
1094     // Similarly here. Don't write to this cache-line in the team structure
1095     // unless we have to.
1096     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1097   }
1098 }
1099 
1100 // Do the opposite, setting the hardware registers to the updated values from
1101 // the team.
1102 inline static void updateHWFPControl(kmp_team_t *team) {
1103   if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1104     // Only reset the fp control regs if they have been changed in the team.
1105     // the parallel region that we are exiting.
1106     kmp_int16 x87_fpu_control_word;
1107     kmp_uint32 mxcsr;
1108     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1109     __kmp_store_mxcsr(&mxcsr);
1110     mxcsr &= KMP_X86_MXCSR_MASK;
1111 
1112     if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1113       __kmp_clear_x87_fpu_status_word();
1114       __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1115     }
1116 
1117     if (team->t.t_mxcsr != mxcsr) {
1118       __kmp_load_mxcsr(&team->t.t_mxcsr);
1119     }
1120   }
1121 }
1122 #else
1123 #define propagateFPControl(x) ((void)0)
1124 #define updateHWFPControl(x) ((void)0)
1125 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1126 
1127 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
1128                                      int realloc); // forward declaration
1129 
1130 /* Run a parallel region that has been serialized, so runs only in a team of the
1131    single master thread. */
1132 void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
1133   kmp_info_t *this_thr;
1134   kmp_team_t *serial_team;
1135 
1136   KC_TRACE(10, ("__kmpc_serialized_parallel: called by T#%d\n", global_tid));
1137 
1138   /* Skip all this code for autopar serialized loops since it results in
1139      unacceptable overhead */
1140   if (loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR))
1141     return;
1142 
1143   if (!TCR_4(__kmp_init_parallel))
1144     __kmp_parallel_initialize();
1145   __kmp_resume_if_soft_paused();
1146 
1147   this_thr = __kmp_threads[global_tid];
1148   serial_team = this_thr->th.th_serial_team;
1149 
1150   /* utilize the serialized team held by this thread */
1151   KMP_DEBUG_ASSERT(serial_team);
1152   KMP_MB();
1153 
1154   if (__kmp_tasking_mode != tskm_immediate_exec) {
1155     KMP_DEBUG_ASSERT(
1156         this_thr->th.th_task_team ==
1157         this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]);
1158     KMP_DEBUG_ASSERT(serial_team->t.t_task_team[this_thr->th.th_task_state] ==
1159                      NULL);
1160     KA_TRACE(20, ("__kmpc_serialized_parallel: T#%d pushing task_team %p / "
1161                   "team %p, new task_team = NULL\n",
1162                   global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
1163     this_thr->th.th_task_team = NULL;
1164   }
1165 
1166   kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
1167   if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1168     proc_bind = proc_bind_false;
1169   } else if (proc_bind == proc_bind_default) {
1170     // No proc_bind clause was specified, so use the current value
1171     // of proc-bind-var for this parallel region.
1172     proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
1173   }
1174   // Reset for next parallel region
1175   this_thr->th.th_set_proc_bind = proc_bind_default;
1176 
1177 #if OMPT_SUPPORT
1178   ompt_data_t ompt_parallel_data = ompt_data_none;
1179   ompt_data_t *implicit_task_data;
1180   void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
1181   if (ompt_enabled.enabled &&
1182       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1183 
1184     ompt_task_info_t *parent_task_info;
1185     parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
1186 
1187     parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1188     if (ompt_enabled.ompt_callback_parallel_begin) {
1189       int team_size = 1;
1190 
1191       ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1192           &(parent_task_info->task_data), &(parent_task_info->frame),
1193           &ompt_parallel_data, team_size,
1194           ompt_parallel_invoker_program | ompt_parallel_team, codeptr);
1195     }
1196   }
1197 #endif // OMPT_SUPPORT
1198 
1199   if (this_thr->th.th_team != serial_team) {
1200     // Nested level will be an index in the nested nthreads array
1201     int level = this_thr->th.th_team->t.t_level;
1202 
1203     if (serial_team->t.t_serialized) {
1204       /* this serial team was already used
1205          TODO increase performance by making this locks more specific */
1206       kmp_team_t *new_team;
1207 
1208       __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1209 
1210       new_team =
1211           __kmp_allocate_team(this_thr->th.th_root, 1, 1,
1212 #if OMPT_SUPPORT
1213                               ompt_parallel_data,
1214 #endif
1215                               proc_bind, &this_thr->th.th_current_task->td_icvs,
1216                               0 USE_NESTED_HOT_ARG(NULL));
1217       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1218       KMP_ASSERT(new_team);
1219 
1220       /* setup new serialized team and install it */
1221       new_team->t.t_threads[0] = this_thr;
1222       new_team->t.t_parent = this_thr->th.th_team;
1223       serial_team = new_team;
1224       this_thr->th.th_serial_team = serial_team;
1225 
1226       KF_TRACE(
1227           10,
1228           ("__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1229            global_tid, serial_team));
1230 
1231       /* TODO the above breaks the requirement that if we run out of resources,
1232          then we can still guarantee that serialized teams are ok, since we may
1233          need to allocate a new one */
1234     } else {
1235       KF_TRACE(
1236           10,
1237           ("__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1238            global_tid, serial_team));
1239     }
1240 
1241     /* we have to initialize this serial team */
1242     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1243     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1244     KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team);
1245     serial_team->t.t_ident = loc;
1246     serial_team->t.t_serialized = 1;
1247     serial_team->t.t_nproc = 1;
1248     serial_team->t.t_parent = this_thr->th.th_team;
1249     serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
1250     this_thr->th.th_team = serial_team;
1251     serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
1252 
1253     KF_TRACE(10, ("__kmpc_serialized_parallel: T#d curtask=%p\n", global_tid,
1254                   this_thr->th.th_current_task));
1255     KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
1256     this_thr->th.th_current_task->td_flags.executing = 0;
1257 
1258     __kmp_push_current_task_to_thread(this_thr, serial_team, 0);
1259 
1260     /* TODO: GEH: do ICVs work for nested serialized teams? Don't we need an
1261        implicit task for each serialized task represented by
1262        team->t.t_serialized? */
1263     copy_icvs(&this_thr->th.th_current_task->td_icvs,
1264               &this_thr->th.th_current_task->td_parent->td_icvs);
1265 
1266     // Thread value exists in the nested nthreads array for the next nested
1267     // level
1268     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1269       this_thr->th.th_current_task->td_icvs.nproc =
1270           __kmp_nested_nth.nth[level + 1];
1271     }
1272 
1273     if (__kmp_nested_proc_bind.used &&
1274         (level + 1 < __kmp_nested_proc_bind.used)) {
1275       this_thr->th.th_current_task->td_icvs.proc_bind =
1276           __kmp_nested_proc_bind.bind_types[level + 1];
1277     }
1278 
1279 #if USE_DEBUGGER
1280     serial_team->t.t_pkfn = (microtask_t)(~0); // For the debugger.
1281 #endif
1282     this_thr->th.th_info.ds.ds_tid = 0;
1283 
1284     /* set thread cache values */
1285     this_thr->th.th_team_nproc = 1;
1286     this_thr->th.th_team_master = this_thr;
1287     this_thr->th.th_team_serialized = 1;
1288 
1289     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
1290     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
1291     serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save
1292 
1293     propagateFPControl(serial_team);
1294 
1295     /* check if we need to allocate dispatch buffers stack */
1296     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1297     if (!serial_team->t.t_dispatch->th_disp_buffer) {
1298       serial_team->t.t_dispatch->th_disp_buffer =
1299           (dispatch_private_info_t *)__kmp_allocate(
1300               sizeof(dispatch_private_info_t));
1301     }
1302     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1303 
1304     KMP_MB();
1305 
1306   } else {
1307     /* this serialized team is already being used,
1308      * that's fine, just add another nested level */
1309     KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
1310     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1311     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1312     ++serial_team->t.t_serialized;
1313     this_thr->th.th_team_serialized = serial_team->t.t_serialized;
1314 
1315     // Nested level will be an index in the nested nthreads array
1316     int level = this_thr->th.th_team->t.t_level;
1317     // Thread value exists in the nested nthreads array for the next nested
1318     // level
1319     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1320       this_thr->th.th_current_task->td_icvs.nproc =
1321           __kmp_nested_nth.nth[level + 1];
1322     }
1323     serial_team->t.t_level++;
1324     KF_TRACE(10, ("__kmpc_serialized_parallel: T#%d increasing nesting level "
1325                   "of serial team %p to %d\n",
1326                   global_tid, serial_team, serial_team->t.t_level));
1327 
1328     /* allocate/push dispatch buffers stack */
1329     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1330     {
1331       dispatch_private_info_t *disp_buffer =
1332           (dispatch_private_info_t *)__kmp_allocate(
1333               sizeof(dispatch_private_info_t));
1334       disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
1335       serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
1336     }
1337     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1338 
1339     KMP_MB();
1340   }
1341   KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
1342 
1343   // Perform the display affinity functionality for
1344   // serialized parallel regions
1345   if (__kmp_display_affinity) {
1346     if (this_thr->th.th_prev_level != serial_team->t.t_level ||
1347         this_thr->th.th_prev_num_threads != 1) {
1348       // NULL means use the affinity-format-var ICV
1349       __kmp_aux_display_affinity(global_tid, NULL);
1350       this_thr->th.th_prev_level = serial_team->t.t_level;
1351       this_thr->th.th_prev_num_threads = 1;
1352     }
1353   }
1354 
1355   if (__kmp_env_consistency_check)
1356     __kmp_push_parallel(global_tid, NULL);
1357 #if OMPT_SUPPORT
1358   serial_team->t.ompt_team_info.master_return_address = codeptr;
1359   if (ompt_enabled.enabled &&
1360       this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1361     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1362 
1363     ompt_lw_taskteam_t lw_taskteam;
1364     __ompt_lw_taskteam_init(&lw_taskteam, this_thr, global_tid,
1365                             &ompt_parallel_data, codeptr);
1366 
1367     __ompt_lw_taskteam_link(&lw_taskteam, this_thr, 1);
1368     // don't use lw_taskteam after linking. content was swaped
1369 
1370     /* OMPT implicit task begin */
1371     implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
1372     if (ompt_enabled.ompt_callback_implicit_task) {
1373       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1374           ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
1375           OMPT_CUR_TASK_DATA(this_thr), 1, __kmp_tid_from_gtid(global_tid), ompt_task_implicit); // TODO: Can this be ompt_task_initial?
1376       OMPT_CUR_TASK_INFO(this_thr)
1377           ->thread_num = __kmp_tid_from_gtid(global_tid);
1378     }
1379 
1380     /* OMPT state */
1381     this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
1382     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1383   }
1384 #endif
1385 }
1386 
1387 /* most of the work for a fork */
1388 /* return true if we really went parallel, false if serialized */
1389 int __kmp_fork_call(ident_t *loc, int gtid,
1390                     enum fork_context_e call_context, // Intel, GNU, ...
1391                     kmp_int32 argc, microtask_t microtask, launch_t invoker,
1392 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1393 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1394                     va_list *ap
1395 #else
1396                     va_list ap
1397 #endif
1398                     ) {
1399   void **argv;
1400   int i;
1401   int master_tid;
1402   int master_this_cons;
1403   kmp_team_t *team;
1404   kmp_team_t *parent_team;
1405   kmp_info_t *master_th;
1406   kmp_root_t *root;
1407   int nthreads;
1408   int master_active;
1409   int master_set_numthreads;
1410   int level;
1411   int active_level;
1412   int teams_level;
1413 #if KMP_NESTED_HOT_TEAMS
1414   kmp_hot_team_ptr_t **p_hot_teams;
1415 #endif
1416   { // KMP_TIME_BLOCK
1417     KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
1418     KMP_COUNT_VALUE(OMP_PARALLEL_args, argc);
1419 
1420     KA_TRACE(20, ("__kmp_fork_call: enter T#%d\n", gtid));
1421     if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) {
1422       /* Some systems prefer the stack for the root thread(s) to start with */
1423       /* some gap from the parent stack to prevent false sharing. */
1424       void *dummy = KMP_ALLOCA(__kmp_stkpadding);
1425       /* These 2 lines below are so this does not get optimized out */
1426       if (__kmp_stkpadding > KMP_MAX_STKPADDING)
1427         __kmp_stkpadding += (short)((kmp_int64)dummy);
1428     }
1429 
1430     /* initialize if needed */
1431     KMP_DEBUG_ASSERT(
1432         __kmp_init_serial); // AC: potentially unsafe, not in sync with shutdown
1433     if (!TCR_4(__kmp_init_parallel))
1434       __kmp_parallel_initialize();
1435     __kmp_resume_if_soft_paused();
1436 
1437     /* setup current data */
1438     master_th = __kmp_threads[gtid]; // AC: potentially unsafe, not in sync with
1439     // shutdown
1440     parent_team = master_th->th.th_team;
1441     master_tid = master_th->th.th_info.ds.ds_tid;
1442     master_this_cons = master_th->th.th_local.this_construct;
1443     root = master_th->th.th_root;
1444     master_active = root->r.r_active;
1445     master_set_numthreads = master_th->th.th_set_nproc;
1446 
1447 #if OMPT_SUPPORT
1448     ompt_data_t ompt_parallel_data = ompt_data_none;
1449     ompt_data_t *parent_task_data;
1450     ompt_frame_t *ompt_frame;
1451     ompt_data_t *implicit_task_data;
1452     void *return_address = NULL;
1453 
1454     if (ompt_enabled.enabled) {
1455       __ompt_get_task_info_internal(0, NULL, &parent_task_data, &ompt_frame,
1456                                     NULL, NULL);
1457       return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1458     }
1459 #endif
1460 
1461     // Nested level will be an index in the nested nthreads array
1462     level = parent_team->t.t_level;
1463     // used to launch non-serial teams even if nested is not allowed
1464     active_level = parent_team->t.t_active_level;
1465     // needed to check nesting inside the teams
1466     teams_level = master_th->th.th_teams_level;
1467 #if KMP_NESTED_HOT_TEAMS
1468     p_hot_teams = &master_th->th.th_hot_teams;
1469     if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
1470       *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
1471           sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
1472       (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1473       // it is either actual or not needed (when active_level > 0)
1474       (*p_hot_teams)[0].hot_team_nth = 1;
1475     }
1476 #endif
1477 
1478 #if OMPT_SUPPORT
1479     if (ompt_enabled.enabled) {
1480       if (ompt_enabled.ompt_callback_parallel_begin) {
1481         int team_size = master_set_numthreads
1482                             ? master_set_numthreads
1483                             : get__nproc_2(parent_team, master_tid);
1484         int flags = OMPT_INVOKER(call_context) |
1485                     ((microtask == (microtask_t)__kmp_teams_master)
1486                          ? ompt_parallel_league
1487                          : ompt_parallel_team);
1488         ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1489             parent_task_data, ompt_frame, &ompt_parallel_data, team_size, flags,
1490             return_address);
1491       }
1492       master_th->th.ompt_thread_info.state = ompt_state_overhead;
1493     }
1494 #endif
1495 
1496     master_th->th.th_ident = loc;
1497 
1498     if (master_th->th.th_teams_microtask && ap &&
1499         microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
1500       // AC: This is start of parallel that is nested inside teams construct.
1501       // The team is actual (hot), all workers are ready at the fork barrier.
1502       // No lock needed to initialize the team a bit, then free workers.
1503       parent_team->t.t_ident = loc;
1504       __kmp_alloc_argv_entries(argc, parent_team, TRUE);
1505       parent_team->t.t_argc = argc;
1506       argv = (void **)parent_team->t.t_argv;
1507       for (i = argc - 1; i >= 0; --i)
1508 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1509 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1510         *argv++ = va_arg(*ap, void *);
1511 #else
1512         *argv++ = va_arg(ap, void *);
1513 #endif
1514       // Increment our nested depth levels, but not increase the serialization
1515       if (parent_team == master_th->th.th_serial_team) {
1516         // AC: we are in serialized parallel
1517         __kmpc_serialized_parallel(loc, gtid);
1518         KMP_DEBUG_ASSERT(parent_team->t.t_serialized > 1);
1519 
1520 #if OMPT_SUPPORT
1521         void *dummy;
1522         void **exit_frame_p;
1523 
1524         ompt_lw_taskteam_t lw_taskteam;
1525 
1526         if (ompt_enabled.enabled) {
1527           __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1528                                   &ompt_parallel_data, return_address);
1529           exit_frame_p = &(lw_taskteam.ompt_task_info.frame.exit_frame.ptr);
1530 
1531           __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1532           // don't use lw_taskteam after linking. content was swaped
1533 
1534           /* OMPT implicit task begin */
1535           implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1536           if (ompt_enabled.ompt_callback_implicit_task) {
1537             OMPT_CUR_TASK_INFO(master_th)
1538                 ->thread_num = __kmp_tid_from_gtid(gtid);
1539             ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1540                 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1541                 implicit_task_data, 1,
1542                 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1543           }
1544 
1545           /* OMPT state */
1546           master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1547         } else {
1548           exit_frame_p = &dummy;
1549         }
1550 #endif
1551         // AC: need to decrement t_serialized for enquiry functions to work
1552         // correctly, will restore at join time
1553         parent_team->t.t_serialized--;
1554 
1555         {
1556           KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1557           KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1558           __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
1559 #if OMPT_SUPPORT
1560                                  ,
1561                                  exit_frame_p
1562 #endif
1563                                  );
1564         }
1565 
1566 #if OMPT_SUPPORT
1567         if (ompt_enabled.enabled) {
1568           *exit_frame_p = NULL;
1569           OMPT_CUR_TASK_INFO(master_th)->frame.exit_frame = ompt_data_none;
1570           if (ompt_enabled.ompt_callback_implicit_task) {
1571             ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1572                 ompt_scope_end, NULL, implicit_task_data, 1,
1573                 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1574           }
1575           ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1576           __ompt_lw_taskteam_unlink(master_th);
1577           if (ompt_enabled.ompt_callback_parallel_end) {
1578             ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1579                 &ompt_parallel_data, OMPT_CUR_TASK_DATA(master_th),
1580                 OMPT_INVOKER(call_context) | ompt_parallel_team,
1581                 return_address);
1582           }
1583           master_th->th.ompt_thread_info.state = ompt_state_overhead;
1584         }
1585 #endif
1586         return TRUE;
1587       }
1588 
1589       parent_team->t.t_pkfn = microtask;
1590       parent_team->t.t_invoke = invoker;
1591       KMP_ATOMIC_INC(&root->r.r_in_parallel);
1592       parent_team->t.t_active_level++;
1593       parent_team->t.t_level++;
1594       parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save
1595 
1596 #if OMPT_SUPPORT
1597       if (ompt_enabled.enabled) {
1598         ompt_lw_taskteam_t lw_taskteam;
1599         __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1600                                 &ompt_parallel_data, return_address);
1601         __ompt_lw_taskteam_link(&lw_taskteam, master_th, 1, true);
1602       }
1603 #endif
1604 
1605       /* Change number of threads in the team if requested */
1606       if (master_set_numthreads) { // The parallel has num_threads clause
1607         if (master_set_numthreads < master_th->th.th_teams_size.nth) {
1608           // AC: only can reduce number of threads dynamically, can't increase
1609           kmp_info_t **other_threads = parent_team->t.t_threads;
1610           parent_team->t.t_nproc = master_set_numthreads;
1611           for (i = 0; i < master_set_numthreads; ++i) {
1612             other_threads[i]->th.th_team_nproc = master_set_numthreads;
1613           }
1614           // Keep extra threads hot in the team for possible next parallels
1615         }
1616         master_th->th.th_set_nproc = 0;
1617       }
1618 
1619 #if USE_DEBUGGER
1620       if (__kmp_debugging) { // Let debugger override number of threads.
1621         int nth = __kmp_omp_num_threads(loc);
1622         if (nth > 0) { // 0 means debugger doesn't want to change num threads
1623           master_set_numthreads = nth;
1624         }
1625       }
1626 #endif
1627 
1628 #if USE_ITT_BUILD
1629       if (((__itt_frame_submit_v3_ptr && __itt_get_timestamp_ptr) ||
1630            KMP_ITT_DEBUG) &&
1631           __kmp_forkjoin_frames_mode == 3 &&
1632           parent_team->t.t_active_level == 1 // only report frames at level 1
1633           && master_th->th.th_teams_size.nteams == 1) {
1634         kmp_uint64 tmp_time = __itt_get_timestamp();
1635         master_th->th.th_frame_time = tmp_time;
1636         parent_team->t.t_region_time = tmp_time;
1637       }
1638       if (__itt_stack_caller_create_ptr) {
1639         // create new stack stitching id before entering fork barrier
1640         parent_team->t.t_stack_id = __kmp_itt_stack_caller_create();
1641       }
1642 #endif /* USE_ITT_BUILD */
1643 
1644       KF_TRACE(10, ("__kmp_fork_call: before internal fork: root=%p, team=%p, "
1645                     "master_th=%p, gtid=%d\n",
1646                     root, parent_team, master_th, gtid));
1647       __kmp_internal_fork(loc, gtid, parent_team);
1648       KF_TRACE(10, ("__kmp_fork_call: after internal fork: root=%p, team=%p, "
1649                     "master_th=%p, gtid=%d\n",
1650                     root, parent_team, master_th, gtid));
1651 
1652       /* Invoke microtask for MASTER thread */
1653       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
1654                     parent_team->t.t_id, parent_team->t.t_pkfn));
1655 
1656       if (!parent_team->t.t_invoke(gtid)) {
1657         KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
1658       }
1659       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
1660                     parent_team->t.t_id, parent_team->t.t_pkfn));
1661       KMP_MB(); /* Flush all pending memory write invalidates.  */
1662 
1663       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
1664 
1665       return TRUE;
1666     } // Parallel closely nested in teams construct
1667 
1668 #if KMP_DEBUG
1669     if (__kmp_tasking_mode != tskm_immediate_exec) {
1670       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
1671                        parent_team->t.t_task_team[master_th->th.th_task_state]);
1672     }
1673 #endif
1674 
1675     if (parent_team->t.t_active_level >=
1676         master_th->th.th_current_task->td_icvs.max_active_levels) {
1677       nthreads = 1;
1678     } else {
1679       int enter_teams = ((ap == NULL && active_level == 0) ||
1680                          (ap && teams_level > 0 && teams_level == level));
1681       nthreads =
1682           master_set_numthreads
1683               ? master_set_numthreads
1684               : get__nproc_2(
1685                     parent_team,
1686                     master_tid); // TODO: get nproc directly from current task
1687 
1688       // Check if we need to take forkjoin lock? (no need for serialized
1689       // parallel out of teams construct). This code moved here from
1690       // __kmp_reserve_threads() to speedup nested serialized parallels.
1691       if (nthreads > 1) {
1692         if ((get__max_active_levels(master_th) == 1 &&
1693              (root->r.r_in_parallel && !enter_teams)) ||
1694             (__kmp_library == library_serial)) {
1695           KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
1696                         " threads\n",
1697                         gtid, nthreads));
1698           nthreads = 1;
1699         }
1700       }
1701       if (nthreads > 1) {
1702         /* determine how many new threads we can use */
1703         __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1704         /* AC: If we execute teams from parallel region (on host), then teams
1705            should be created but each can only have 1 thread if nesting is
1706            disabled. If teams called from serial region, then teams and their
1707            threads should be created regardless of the nesting setting. */
1708         nthreads = __kmp_reserve_threads(root, parent_team, master_tid,
1709                                          nthreads, enter_teams);
1710         if (nthreads == 1) {
1711           // Free lock for single thread execution here; for multi-thread
1712           // execution it will be freed later after team of threads created
1713           // and initialized
1714           __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1715         }
1716       }
1717     }
1718     KMP_DEBUG_ASSERT(nthreads > 0);
1719 
1720     // If we temporarily changed the set number of threads then restore it now
1721     master_th->th.th_set_nproc = 0;
1722 
1723     /* create a serialized parallel region? */
1724     if (nthreads == 1) {
1725 /* josh todo: hypothetical question: what do we do for OS X*? */
1726 #if KMP_OS_LINUX &&                                                            \
1727     (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
1728       void *args[argc];
1729 #else
1730       void **args = (void **)KMP_ALLOCA(argc * sizeof(void *));
1731 #endif /* KMP_OS_LINUX && ( KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || \
1732           KMP_ARCH_AARCH64) */
1733 
1734       KA_TRACE(20,
1735                ("__kmp_fork_call: T#%d serializing parallel region\n", gtid));
1736 
1737       __kmpc_serialized_parallel(loc, gtid);
1738 
1739       if (call_context == fork_context_intel) {
1740         /* TODO this sucks, use the compiler itself to pass args! :) */
1741         master_th->th.th_serial_team->t.t_ident = loc;
1742         if (!ap) {
1743           // revert change made in __kmpc_serialized_parallel()
1744           master_th->th.th_serial_team->t.t_level--;
1745 // Get args from parent team for teams construct
1746 
1747 #if OMPT_SUPPORT
1748           void *dummy;
1749           void **exit_frame_p;
1750           ompt_task_info_t *task_info;
1751 
1752           ompt_lw_taskteam_t lw_taskteam;
1753 
1754           if (ompt_enabled.enabled) {
1755             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1756                                     &ompt_parallel_data, return_address);
1757 
1758             __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1759             // don't use lw_taskteam after linking. content was swaped
1760 
1761             task_info = OMPT_CUR_TASK_INFO(master_th);
1762             exit_frame_p = &(task_info->frame.exit_frame.ptr);
1763             if (ompt_enabled.ompt_callback_implicit_task) {
1764               OMPT_CUR_TASK_INFO(master_th)
1765                   ->thread_num = __kmp_tid_from_gtid(gtid);
1766               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1767                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1768                   &(task_info->task_data), 1,
1769                   OMPT_CUR_TASK_INFO(master_th)->thread_num,
1770                   ompt_task_implicit);
1771             }
1772 
1773             /* OMPT state */
1774             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1775           } else {
1776             exit_frame_p = &dummy;
1777           }
1778 #endif
1779 
1780           {
1781             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1782             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1783             __kmp_invoke_microtask(microtask, gtid, 0, argc,
1784                                    parent_team->t.t_argv
1785 #if OMPT_SUPPORT
1786                                    ,
1787                                    exit_frame_p
1788 #endif
1789                                    );
1790           }
1791 
1792 #if OMPT_SUPPORT
1793           if (ompt_enabled.enabled) {
1794             *exit_frame_p = NULL;
1795             if (ompt_enabled.ompt_callback_implicit_task) {
1796               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1797                   ompt_scope_end, NULL, &(task_info->task_data), 1,
1798                   OMPT_CUR_TASK_INFO(master_th)->thread_num,
1799                   ompt_task_implicit);
1800             }
1801             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1802             __ompt_lw_taskteam_unlink(master_th);
1803             if (ompt_enabled.ompt_callback_parallel_end) {
1804               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1805                   &ompt_parallel_data, parent_task_data,
1806                   OMPT_INVOKER(call_context) | ompt_parallel_team,
1807                   return_address);
1808             }
1809             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1810           }
1811 #endif
1812         } else if (microtask == (microtask_t)__kmp_teams_master) {
1813           KMP_DEBUG_ASSERT(master_th->th.th_team ==
1814                            master_th->th.th_serial_team);
1815           team = master_th->th.th_team;
1816           // team->t.t_pkfn = microtask;
1817           team->t.t_invoke = invoker;
1818           __kmp_alloc_argv_entries(argc, team, TRUE);
1819           team->t.t_argc = argc;
1820           argv = (void **)team->t.t_argv;
1821           if (ap) {
1822             for (i = argc - 1; i >= 0; --i)
1823 // TODO: revert workaround for Intel(R) 64 tracker #96
1824 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1825               *argv++ = va_arg(*ap, void *);
1826 #else
1827               *argv++ = va_arg(ap, void *);
1828 #endif
1829           } else {
1830             for (i = 0; i < argc; ++i)
1831               // Get args from parent team for teams construct
1832               argv[i] = parent_team->t.t_argv[i];
1833           }
1834           // AC: revert change made in __kmpc_serialized_parallel()
1835           //     because initial code in teams should have level=0
1836           team->t.t_level--;
1837           // AC: call special invoker for outer "parallel" of teams construct
1838           invoker(gtid);
1839 #if OMPT_SUPPORT
1840           if (ompt_enabled.enabled) {
1841             ompt_task_info_t *task_info = OMPT_CUR_TASK_INFO(master_th);
1842             if (ompt_enabled.ompt_callback_implicit_task) {
1843               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1844                   ompt_scope_end, NULL, &(task_info->task_data), 0,
1845                   OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_initial);
1846             }
1847             if (ompt_enabled.ompt_callback_parallel_end) {
1848               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1849                   &ompt_parallel_data, parent_task_data,
1850                   OMPT_INVOKER(call_context) | ompt_parallel_league,
1851                   return_address);
1852             }
1853             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1854           }
1855 #endif
1856         } else {
1857           argv = args;
1858           for (i = argc - 1; i >= 0; --i)
1859 // TODO: revert workaround for Intel(R) 64 tracker #96
1860 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1861             *argv++ = va_arg(*ap, void *);
1862 #else
1863             *argv++ = va_arg(ap, void *);
1864 #endif
1865           KMP_MB();
1866 
1867 #if OMPT_SUPPORT
1868           void *dummy;
1869           void **exit_frame_p;
1870           ompt_task_info_t *task_info;
1871 
1872           ompt_lw_taskteam_t lw_taskteam;
1873 
1874           if (ompt_enabled.enabled) {
1875             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1876                                     &ompt_parallel_data, return_address);
1877             __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1878             // don't use lw_taskteam after linking. content was swaped
1879             task_info = OMPT_CUR_TASK_INFO(master_th);
1880             exit_frame_p = &(task_info->frame.exit_frame.ptr);
1881 
1882             /* OMPT implicit task begin */
1883             implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1884             if (ompt_enabled.ompt_callback_implicit_task) {
1885               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1886                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1887                   implicit_task_data, 1, __kmp_tid_from_gtid(gtid),
1888                   ompt_task_implicit);
1889               OMPT_CUR_TASK_INFO(master_th)
1890                   ->thread_num = __kmp_tid_from_gtid(gtid);
1891             }
1892 
1893             /* OMPT state */
1894             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1895           } else {
1896             exit_frame_p = &dummy;
1897           }
1898 #endif
1899 
1900           {
1901             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1902             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1903             __kmp_invoke_microtask(microtask, gtid, 0, argc, args
1904 #if OMPT_SUPPORT
1905                                    ,
1906                                    exit_frame_p
1907 #endif
1908                                    );
1909           }
1910 
1911 #if OMPT_SUPPORT
1912           if (ompt_enabled.enabled) {
1913             *exit_frame_p = NULL;
1914             if (ompt_enabled.ompt_callback_implicit_task) {
1915               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1916                   ompt_scope_end, NULL, &(task_info->task_data), 1,
1917                   OMPT_CUR_TASK_INFO(master_th)->thread_num,
1918                   ompt_task_implicit);
1919             }
1920 
1921             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1922             __ompt_lw_taskteam_unlink(master_th);
1923             if (ompt_enabled.ompt_callback_parallel_end) {
1924               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1925                   &ompt_parallel_data, parent_task_data,
1926                   OMPT_INVOKER(call_context) | ompt_parallel_team,
1927                   return_address);
1928             }
1929             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1930           }
1931 #endif
1932         }
1933       } else if (call_context == fork_context_gnu) {
1934 #if OMPT_SUPPORT
1935         ompt_lw_taskteam_t lwt;
1936         __ompt_lw_taskteam_init(&lwt, master_th, gtid, &ompt_parallel_data,
1937                                 return_address);
1938 
1939         lwt.ompt_task_info.frame.exit_frame = ompt_data_none;
1940         __ompt_lw_taskteam_link(&lwt, master_th, 1);
1941 // don't use lw_taskteam after linking. content was swaped
1942 #endif
1943 
1944         // we were called from GNU native code
1945         KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1946         return FALSE;
1947       } else {
1948         KMP_ASSERT2(call_context < fork_context_last,
1949                     "__kmp_fork_call: unknown fork_context parameter");
1950       }
1951 
1952       KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1953       KMP_MB();
1954       return FALSE;
1955     } // if (nthreads == 1)
1956 
1957     // GEH: only modify the executing flag in the case when not serialized
1958     //      serialized case is handled in kmpc_serialized_parallel
1959     KF_TRACE(10, ("__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, "
1960                   "curtask=%p, curtask_max_aclevel=%d\n",
1961                   parent_team->t.t_active_level, master_th,
1962                   master_th->th.th_current_task,
1963                   master_th->th.th_current_task->td_icvs.max_active_levels));
1964     // TODO: GEH - cannot do this assertion because root thread not set up as
1965     // executing
1966     // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 1 );
1967     master_th->th.th_current_task->td_flags.executing = 0;
1968 
1969     if (!master_th->th.th_teams_microtask || level > teams_level) {
1970       /* Increment our nested depth level */
1971       KMP_ATOMIC_INC(&root->r.r_in_parallel);
1972     }
1973 
1974     // See if we need to make a copy of the ICVs.
1975     int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
1976     if ((level + 1 < __kmp_nested_nth.used) &&
1977         (__kmp_nested_nth.nth[level + 1] != nthreads_icv)) {
1978       nthreads_icv = __kmp_nested_nth.nth[level + 1];
1979     } else {
1980       nthreads_icv = 0; // don't update
1981     }
1982 
1983     // Figure out the proc_bind_policy for the new team.
1984     kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
1985     kmp_proc_bind_t proc_bind_icv =
1986         proc_bind_default; // proc_bind_default means don't update
1987     if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1988       proc_bind = proc_bind_false;
1989     } else {
1990       if (proc_bind == proc_bind_default) {
1991         // No proc_bind clause specified; use current proc-bind-var for this
1992         // parallel region
1993         proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
1994       }
1995       /* else: The proc_bind policy was specified explicitly on parallel clause.
1996          This overrides proc-bind-var for this parallel region, but does not
1997          change proc-bind-var. */
1998       // Figure the value of proc-bind-var for the child threads.
1999       if ((level + 1 < __kmp_nested_proc_bind.used) &&
2000           (__kmp_nested_proc_bind.bind_types[level + 1] !=
2001            master_th->th.th_current_task->td_icvs.proc_bind)) {
2002         proc_bind_icv = __kmp_nested_proc_bind.bind_types[level + 1];
2003       }
2004     }
2005 
2006     // Reset for next parallel region
2007     master_th->th.th_set_proc_bind = proc_bind_default;
2008 
2009     if ((nthreads_icv > 0) || (proc_bind_icv != proc_bind_default)) {
2010       kmp_internal_control_t new_icvs;
2011       copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
2012       new_icvs.next = NULL;
2013       if (nthreads_icv > 0) {
2014         new_icvs.nproc = nthreads_icv;
2015       }
2016       if (proc_bind_icv != proc_bind_default) {
2017         new_icvs.proc_bind = proc_bind_icv;
2018       }
2019 
2020       /* allocate a new parallel team */
2021       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2022       team = __kmp_allocate_team(root, nthreads, nthreads,
2023 #if OMPT_SUPPORT
2024                                  ompt_parallel_data,
2025 #endif
2026                                  proc_bind, &new_icvs,
2027                                  argc USE_NESTED_HOT_ARG(master_th));
2028     } else {
2029       /* allocate a new parallel team */
2030       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2031       team = __kmp_allocate_team(root, nthreads, nthreads,
2032 #if OMPT_SUPPORT
2033                                  ompt_parallel_data,
2034 #endif
2035                                  proc_bind,
2036                                  &master_th->th.th_current_task->td_icvs,
2037                                  argc USE_NESTED_HOT_ARG(master_th));
2038     }
2039     KF_TRACE(
2040         10, ("__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2041 
2042     /* setup the new team */
2043     KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2044     KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2045     KMP_CHECK_UPDATE(team->t.t_ident, loc);
2046     KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2047     KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2048 #if OMPT_SUPPORT
2049     KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2050                           return_address);
2051 #endif
2052     KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2053     // TODO: parent_team->t.t_level == INT_MAX ???
2054     if (!master_th->th.th_teams_microtask || level > teams_level) {
2055       int new_level = parent_team->t.t_level + 1;
2056       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2057       new_level = parent_team->t.t_active_level + 1;
2058       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2059     } else {
2060       // AC: Do not increase parallel level at start of the teams construct
2061       int new_level = parent_team->t.t_level;
2062       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2063       new_level = parent_team->t.t_active_level;
2064       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2065     }
2066     kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
2067     // set master's schedule as new run-time schedule
2068     KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2069 
2070     KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2071     KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2072 
2073     // Update the floating point rounding in the team if required.
2074     propagateFPControl(team);
2075 
2076     if (__kmp_tasking_mode != tskm_immediate_exec) {
2077       // Set master's task team to team's task team. Unless this is hot team, it
2078       // should be NULL.
2079       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2080                        parent_team->t.t_task_team[master_th->th.th_task_state]);
2081       KA_TRACE(20, ("__kmp_fork_call: Master T#%d pushing task_team %p / team "
2082                     "%p, new task_team %p / team %p\n",
2083                     __kmp_gtid_from_thread(master_th),
2084                     master_th->th.th_task_team, parent_team,
2085                     team->t.t_task_team[master_th->th.th_task_state], team));
2086 
2087       if (active_level || master_th->th.th_task_team) {
2088         // Take a memo of master's task_state
2089         KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2090         if (master_th->th.th_task_state_top >=
2091             master_th->th.th_task_state_stack_sz) { // increase size
2092           kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
2093           kmp_uint8 *old_stack, *new_stack;
2094           kmp_uint32 i;
2095           new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
2096           for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
2097             new_stack[i] = master_th->th.th_task_state_memo_stack[i];
2098           }
2099           for (i = master_th->th.th_task_state_stack_sz; i < new_size;
2100                ++i) { // zero-init rest of stack
2101             new_stack[i] = 0;
2102           }
2103           old_stack = master_th->th.th_task_state_memo_stack;
2104           master_th->th.th_task_state_memo_stack = new_stack;
2105           master_th->th.th_task_state_stack_sz = new_size;
2106           __kmp_free(old_stack);
2107         }
2108         // Store master's task_state on stack
2109         master_th->th
2110             .th_task_state_memo_stack[master_th->th.th_task_state_top] =
2111             master_th->th.th_task_state;
2112         master_th->th.th_task_state_top++;
2113 #if KMP_NESTED_HOT_TEAMS
2114         if (master_th->th.th_hot_teams &&
2115             active_level < __kmp_hot_teams_max_level &&
2116             team == master_th->th.th_hot_teams[active_level].hot_team) {
2117           // Restore master's nested state if nested hot team
2118           master_th->th.th_task_state =
2119               master_th->th
2120                   .th_task_state_memo_stack[master_th->th.th_task_state_top];
2121         } else {
2122 #endif
2123           master_th->th.th_task_state = 0;
2124 #if KMP_NESTED_HOT_TEAMS
2125         }
2126 #endif
2127       }
2128 #if !KMP_NESTED_HOT_TEAMS
2129       KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) ||
2130                        (team == root->r.r_hot_team));
2131 #endif
2132     }
2133 
2134     KA_TRACE(
2135         20,
2136         ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2137          gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2138          team->t.t_nproc));
2139     KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2140                      (team->t.t_master_tid == 0 &&
2141                       (team->t.t_parent == root->r.r_root_team ||
2142                        team->t.t_parent->t.t_serialized)));
2143     KMP_MB();
2144 
2145     /* now, setup the arguments */
2146     argv = (void **)team->t.t_argv;
2147     if (ap) {
2148       for (i = argc - 1; i >= 0; --i) {
2149 // TODO: revert workaround for Intel(R) 64 tracker #96
2150 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
2151         void *new_argv = va_arg(*ap, void *);
2152 #else
2153         void *new_argv = va_arg(ap, void *);
2154 #endif
2155         KMP_CHECK_UPDATE(*argv, new_argv);
2156         argv++;
2157       }
2158     } else {
2159       for (i = 0; i < argc; ++i) {
2160         // Get args from parent team for teams construct
2161         KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2162       }
2163     }
2164 
2165     /* now actually fork the threads */
2166     KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2167     if (!root->r.r_active) // Only do assignment if it prevents cache ping-pong
2168       root->r.r_active = TRUE;
2169 
2170     __kmp_fork_team_threads(root, team, master_th, gtid);
2171     __kmp_setup_icv_copy(team, nthreads,
2172                          &master_th->th.th_current_task->td_icvs, loc);
2173 
2174 #if OMPT_SUPPORT
2175     master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
2176 #endif
2177 
2178     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2179 
2180 #if USE_ITT_BUILD
2181     if (team->t.t_active_level == 1 // only report frames at level 1
2182         && !master_th->th.th_teams_microtask) { // not in teams construct
2183 #if USE_ITT_NOTIFY
2184       if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2185           (__kmp_forkjoin_frames_mode == 3 ||
2186            __kmp_forkjoin_frames_mode == 1)) {
2187         kmp_uint64 tmp_time = 0;
2188         if (__itt_get_timestamp_ptr)
2189           tmp_time = __itt_get_timestamp();
2190         // Internal fork - report frame begin
2191         master_th->th.th_frame_time = tmp_time;
2192         if (__kmp_forkjoin_frames_mode == 3)
2193           team->t.t_region_time = tmp_time;
2194       } else
2195 // only one notification scheme (either "submit" or "forking/joined", not both)
2196 #endif /* USE_ITT_NOTIFY */
2197           if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
2198               __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
2199         // Mark start of "parallel" region for Intel(R) VTune(TM) analyzer.
2200         __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2201       }
2202     }
2203 #endif /* USE_ITT_BUILD */
2204 
2205     /* now go on and do the work */
2206     KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2207     KMP_MB();
2208     KF_TRACE(10,
2209              ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2210               root, team, master_th, gtid));
2211 
2212 #if USE_ITT_BUILD
2213     if (__itt_stack_caller_create_ptr) {
2214       team->t.t_stack_id =
2215           __kmp_itt_stack_caller_create(); // create new stack stitching id
2216       // before entering fork barrier
2217     }
2218 #endif /* USE_ITT_BUILD */
2219 
2220     // AC: skip __kmp_internal_fork at teams construct, let only master
2221     // threads execute
2222     if (ap) {
2223       __kmp_internal_fork(loc, gtid, team);
2224       KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2225                     "master_th=%p, gtid=%d\n",
2226                     root, team, master_th, gtid));
2227     }
2228 
2229     if (call_context == fork_context_gnu) {
2230       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2231       return TRUE;
2232     }
2233 
2234     /* Invoke microtask for MASTER thread */
2235     KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
2236                   team->t.t_id, team->t.t_pkfn));
2237   } // END of timer KMP_fork_call block
2238 
2239 #if KMP_STATS_ENABLED
2240   // If beginning a teams construct, then change thread state
2241   stats_state_e previous_state = KMP_GET_THREAD_STATE();
2242   if (!ap) {
2243     KMP_SET_THREAD_STATE(stats_state_e::TEAMS_REGION);
2244   }
2245 #endif
2246 
2247   if (!team->t.t_invoke(gtid)) {
2248     KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
2249   }
2250 
2251 #if KMP_STATS_ENABLED
2252   // If was beginning of a teams construct, then reset thread state
2253   if (!ap) {
2254     KMP_SET_THREAD_STATE(previous_state);
2255   }
2256 #endif
2257 
2258   KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
2259                 team->t.t_id, team->t.t_pkfn));
2260   KMP_MB(); /* Flush all pending memory write invalidates.  */
2261 
2262   KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2263 
2264 #if OMPT_SUPPORT
2265   if (ompt_enabled.enabled) {
2266     master_th->th.ompt_thread_info.state = ompt_state_overhead;
2267   }
2268 #endif
2269 
2270   return TRUE;
2271 }
2272 
2273 #if OMPT_SUPPORT
2274 static inline void __kmp_join_restore_state(kmp_info_t *thread,
2275                                             kmp_team_t *team) {
2276   // restore state outside the region
2277   thread->th.ompt_thread_info.state =
2278       ((team->t.t_serialized) ? ompt_state_work_serial
2279                               : ompt_state_work_parallel);
2280 }
2281 
2282 static inline void __kmp_join_ompt(int gtid, kmp_info_t *thread,
2283                                    kmp_team_t *team, ompt_data_t *parallel_data,
2284                                    int flags, void *codeptr) {
2285   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2286   if (ompt_enabled.ompt_callback_parallel_end) {
2287     ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
2288         parallel_data, &(task_info->task_data), flags, codeptr);
2289   }
2290 
2291   task_info->frame.enter_frame = ompt_data_none;
2292   __kmp_join_restore_state(thread, team);
2293 }
2294 #endif
2295 
2296 void __kmp_join_call(ident_t *loc, int gtid
2297 #if OMPT_SUPPORT
2298                      ,
2299                      enum fork_context_e fork_context
2300 #endif
2301                      ,
2302                      int exit_teams) {
2303   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
2304   kmp_team_t *team;
2305   kmp_team_t *parent_team;
2306   kmp_info_t *master_th;
2307   kmp_root_t *root;
2308   int master_active;
2309 
2310   KA_TRACE(20, ("__kmp_join_call: enter T#%d\n", gtid));
2311 
2312   /* setup current data */
2313   master_th = __kmp_threads[gtid];
2314   root = master_th->th.th_root;
2315   team = master_th->th.th_team;
2316   parent_team = team->t.t_parent;
2317 
2318   master_th->th.th_ident = loc;
2319 
2320 #if OMPT_SUPPORT
2321   void *team_microtask = (void *)team->t.t_pkfn;
2322   if (ompt_enabled.enabled) {
2323     master_th->th.ompt_thread_info.state = ompt_state_overhead;
2324   }
2325 #endif
2326 
2327 #if KMP_DEBUG
2328   if (__kmp_tasking_mode != tskm_immediate_exec && !exit_teams) {
2329     KA_TRACE(20, ("__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2330                   "th_task_team = %p\n",
2331                   __kmp_gtid_from_thread(master_th), team,
2332                   team->t.t_task_team[master_th->th.th_task_state],
2333                   master_th->th.th_task_team));
2334     KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2335                      team->t.t_task_team[master_th->th.th_task_state]);
2336   }
2337 #endif
2338 
2339   if (team->t.t_serialized) {
2340     if (master_th->th.th_teams_microtask) {
2341       // We are in teams construct
2342       int level = team->t.t_level;
2343       int tlevel = master_th->th.th_teams_level;
2344       if (level == tlevel) {
2345         // AC: we haven't incremented it earlier at start of teams construct,
2346         //     so do it here - at the end of teams construct
2347         team->t.t_level++;
2348       } else if (level == tlevel + 1) {
2349         // AC: we are exiting parallel inside teams, need to increment
2350         // serialization in order to restore it in the next call to
2351         // __kmpc_end_serialized_parallel
2352         team->t.t_serialized++;
2353       }
2354     }
2355     __kmpc_end_serialized_parallel(loc, gtid);
2356 
2357 #if OMPT_SUPPORT
2358     if (ompt_enabled.enabled) {
2359       __kmp_join_restore_state(master_th, parent_team);
2360     }
2361 #endif
2362 
2363     return;
2364   }
2365 
2366   master_active = team->t.t_master_active;
2367 
2368   if (!exit_teams) {
2369     // AC: No barrier for internal teams at exit from teams construct.
2370     //     But there is barrier for external team (league).
2371     __kmp_internal_join(loc, gtid, team);
2372   } else {
2373     master_th->th.th_task_state =
2374         0; // AC: no tasking in teams (out of any parallel)
2375   }
2376 
2377   KMP_MB();
2378 
2379 #if OMPT_SUPPORT
2380   ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2381   void *codeptr = team->t.ompt_team_info.master_return_address;
2382 #endif
2383 
2384 #if USE_ITT_BUILD
2385   if (__itt_stack_caller_create_ptr) {
2386     // destroy the stack stitching id after join barrier
2387     __kmp_itt_stack_caller_destroy((__itt_caller)team->t.t_stack_id);
2388   }
2389   // Mark end of "parallel" region for Intel(R) VTune(TM) analyzer.
2390   if (team->t.t_active_level == 1 &&
2391       (!master_th->th.th_teams_microtask || /* not in teams construct */
2392        master_th->th.th_teams_size.nteams == 1)) {
2393     master_th->th.th_ident = loc;
2394     // only one notification scheme (either "submit" or "forking/joined", not
2395     // both)
2396     if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2397         __kmp_forkjoin_frames_mode == 3)
2398       __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2399                              master_th->th.th_frame_time, 0, loc,
2400                              master_th->th.th_team_nproc, 1);
2401     else if ((__itt_frame_end_v3_ptr || KMP_ITT_DEBUG) &&
2402              !__kmp_forkjoin_frames_mode && __kmp_forkjoin_frames)
2403       __kmp_itt_region_joined(gtid);
2404   } // active_level == 1
2405 #endif /* USE_ITT_BUILD */
2406 
2407   if (master_th->th.th_teams_microtask && !exit_teams &&
2408       team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2409       team->t.t_level == master_th->th.th_teams_level + 1) {
2410 // AC: We need to leave the team structure intact at the end of parallel
2411 // inside the teams construct, so that at the next parallel same (hot) team
2412 // works, only adjust nesting levels
2413 #if OMPT_SUPPORT
2414     ompt_data_t ompt_parallel_data = ompt_data_none;
2415     if (ompt_enabled.enabled) {
2416       ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2417       if (ompt_enabled.ompt_callback_implicit_task) {
2418         int ompt_team_size = team->t.t_nproc;
2419         ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2420             ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2421             OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
2422       }
2423       task_info->frame.exit_frame = ompt_data_none;
2424       task_info->task_data = ompt_data_none;
2425       ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
2426       __ompt_lw_taskteam_unlink(master_th);
2427     }
2428 #endif
2429     /* Decrement our nested depth level */
2430     team->t.t_level--;
2431     team->t.t_active_level--;
2432     KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2433 
2434     // Restore number of threads in the team if needed. This code relies on
2435     // the proper adjustment of th_teams_size.nth after the fork in
2436     // __kmp_teams_master on each teams master in the case that
2437     // __kmp_reserve_threads reduced it.
2438     if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
2439       int old_num = master_th->th.th_team_nproc;
2440       int new_num = master_th->th.th_teams_size.nth;
2441       kmp_info_t **other_threads = team->t.t_threads;
2442       team->t.t_nproc = new_num;
2443       for (int i = 0; i < old_num; ++i) {
2444         other_threads[i]->th.th_team_nproc = new_num;
2445       }
2446       // Adjust states of non-used threads of the team
2447       for (int i = old_num; i < new_num; ++i) {
2448         // Re-initialize thread's barrier data.
2449         KMP_DEBUG_ASSERT(other_threads[i]);
2450         kmp_balign_t *balign = other_threads[i]->th.th_bar;
2451         for (int b = 0; b < bs_last_barrier; ++b) {
2452           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2453           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
2454 #if USE_DEBUGGER
2455           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2456 #endif
2457         }
2458         if (__kmp_tasking_mode != tskm_immediate_exec) {
2459           // Synchronize thread's task state
2460           other_threads[i]->th.th_task_state = master_th->th.th_task_state;
2461         }
2462       }
2463     }
2464 
2465 #if OMPT_SUPPORT
2466     if (ompt_enabled.enabled) {
2467       __kmp_join_ompt(gtid, master_th, parent_team, &ompt_parallel_data,
2468                       OMPT_INVOKER(fork_context) | ompt_parallel_team, codeptr);
2469     }
2470 #endif
2471 
2472     return;
2473   }
2474 
2475   /* do cleanup and restore the parent team */
2476   master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2477   master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2478 
2479   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2480 
2481   /* jc: The following lock has instructions with REL and ACQ semantics,
2482      separating the parallel user code called in this parallel region
2483      from the serial user code called after this function returns. */
2484   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2485 
2486   if (!master_th->th.th_teams_microtask ||
2487       team->t.t_level > master_th->th.th_teams_level) {
2488     /* Decrement our nested depth level */
2489     KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2490   }
2491   KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2492 
2493 #if OMPT_SUPPORT
2494   if (ompt_enabled.enabled) {
2495     ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2496     if (ompt_enabled.ompt_callback_implicit_task) {
2497       int flags = (team_microtask == (void *)__kmp_teams_master)
2498                       ? ompt_task_initial
2499                       : ompt_task_implicit;
2500       int ompt_team_size = (flags == ompt_task_initial) ? 0 : team->t.t_nproc;
2501       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2502           ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2503           OMPT_CUR_TASK_INFO(master_th)->thread_num, flags);
2504     }
2505     task_info->frame.exit_frame = ompt_data_none;
2506     task_info->task_data = ompt_data_none;
2507   }
2508 #endif
2509 
2510   KF_TRACE(10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2511                 master_th, team));
2512   __kmp_pop_current_task_from_thread(master_th);
2513 
2514 #if KMP_AFFINITY_SUPPORTED
2515   // Restore master thread's partition.
2516   master_th->th.th_first_place = team->t.t_first_place;
2517   master_th->th.th_last_place = team->t.t_last_place;
2518 #endif // KMP_AFFINITY_SUPPORTED
2519   master_th->th.th_def_allocator = team->t.t_def_allocator;
2520 
2521   updateHWFPControl(team);
2522 
2523   if (root->r.r_active != master_active)
2524     root->r.r_active = master_active;
2525 
2526   __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2527                             master_th)); // this will free worker threads
2528 
2529   /* this race was fun to find. make sure the following is in the critical
2530      region otherwise assertions may fail occasionally since the old team may be
2531      reallocated and the hierarchy appears inconsistent. it is actually safe to
2532      run and won't cause any bugs, but will cause those assertion failures. it's
2533      only one deref&assign so might as well put this in the critical region */
2534   master_th->th.th_team = parent_team;
2535   master_th->th.th_team_nproc = parent_team->t.t_nproc;
2536   master_th->th.th_team_master = parent_team->t.t_threads[0];
2537   master_th->th.th_team_serialized = parent_team->t.t_serialized;
2538 
2539   /* restore serialized team, if need be */
2540   if (parent_team->t.t_serialized &&
2541       parent_team != master_th->th.th_serial_team &&
2542       parent_team != root->r.r_root_team) {
2543     __kmp_free_team(root,
2544                     master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
2545     master_th->th.th_serial_team = parent_team;
2546   }
2547 
2548   if (__kmp_tasking_mode != tskm_immediate_exec) {
2549     if (master_th->th.th_task_state_top >
2550         0) { // Restore task state from memo stack
2551       KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2552       // Remember master's state if we re-use this nested hot team
2553       master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
2554           master_th->th.th_task_state;
2555       --master_th->th.th_task_state_top; // pop
2556       // Now restore state at this level
2557       master_th->th.th_task_state =
2558           master_th->th
2559               .th_task_state_memo_stack[master_th->th.th_task_state_top];
2560     }
2561     // Copy the task team from the parent team to the master thread
2562     master_th->th.th_task_team =
2563         parent_team->t.t_task_team[master_th->th.th_task_state];
2564     KA_TRACE(20,
2565              ("__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2566               __kmp_gtid_from_thread(master_th), master_th->th.th_task_team,
2567               parent_team));
2568   }
2569 
2570   // TODO: GEH - cannot do this assertion because root thread not set up as
2571   // executing
2572   // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 0 );
2573   master_th->th.th_current_task->td_flags.executing = 1;
2574 
2575   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2576 
2577 #if OMPT_SUPPORT
2578   int flags =
2579       OMPT_INVOKER(fork_context) |
2580       ((team_microtask == (void *)__kmp_teams_master) ? ompt_parallel_league
2581                                                       : ompt_parallel_team);
2582   if (ompt_enabled.enabled) {
2583     __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, flags,
2584                     codeptr);
2585   }
2586 #endif
2587 
2588   KMP_MB();
2589   KA_TRACE(20, ("__kmp_join_call: exit T#%d\n", gtid));
2590 }
2591 
2592 /* Check whether we should push an internal control record onto the
2593    serial team stack.  If so, do it.  */
2594 void __kmp_save_internal_controls(kmp_info_t *thread) {
2595 
2596   if (thread->th.th_team != thread->th.th_serial_team) {
2597     return;
2598   }
2599   if (thread->th.th_team->t.t_serialized > 1) {
2600     int push = 0;
2601 
2602     if (thread->th.th_team->t.t_control_stack_top == NULL) {
2603       push = 1;
2604     } else {
2605       if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
2606           thread->th.th_team->t.t_serialized) {
2607         push = 1;
2608       }
2609     }
2610     if (push) { /* push a record on the serial team's stack */
2611       kmp_internal_control_t *control =
2612           (kmp_internal_control_t *)__kmp_allocate(
2613               sizeof(kmp_internal_control_t));
2614 
2615       copy_icvs(control, &thread->th.th_current_task->td_icvs);
2616 
2617       control->serial_nesting_level = thread->th.th_team->t.t_serialized;
2618 
2619       control->next = thread->th.th_team->t.t_control_stack_top;
2620       thread->th.th_team->t.t_control_stack_top = control;
2621     }
2622   }
2623 }
2624 
2625 /* Changes set_nproc */
2626 void __kmp_set_num_threads(int new_nth, int gtid) {
2627   kmp_info_t *thread;
2628   kmp_root_t *root;
2629 
2630   KF_TRACE(10, ("__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth));
2631   KMP_DEBUG_ASSERT(__kmp_init_serial);
2632 
2633   if (new_nth < 1)
2634     new_nth = 1;
2635   else if (new_nth > __kmp_max_nth)
2636     new_nth = __kmp_max_nth;
2637 
2638   KMP_COUNT_VALUE(OMP_set_numthreads, new_nth);
2639   thread = __kmp_threads[gtid];
2640   if (thread->th.th_current_task->td_icvs.nproc == new_nth)
2641     return; // nothing to do
2642 
2643   __kmp_save_internal_controls(thread);
2644 
2645   set__nproc(thread, new_nth);
2646 
2647   // If this omp_set_num_threads() call will cause the hot team size to be
2648   // reduced (in the absence of a num_threads clause), then reduce it now,
2649   // rather than waiting for the next parallel region.
2650   root = thread->th.th_root;
2651   if (__kmp_init_parallel && (!root->r.r_active) &&
2652       (root->r.r_hot_team->t.t_nproc > new_nth)
2653 #if KMP_NESTED_HOT_TEAMS
2654       && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
2655 #endif
2656       ) {
2657     kmp_team_t *hot_team = root->r.r_hot_team;
2658     int f;
2659 
2660     __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2661 
2662     // Release the extra threads we don't need any more.
2663     for (f = new_nth; f < hot_team->t.t_nproc; f++) {
2664       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2665       if (__kmp_tasking_mode != tskm_immediate_exec) {
2666         // When decreasing team size, threads no longer in the team should unref
2667         // task team.
2668         hot_team->t.t_threads[f]->th.th_task_team = NULL;
2669       }
2670       __kmp_free_thread(hot_team->t.t_threads[f]);
2671       hot_team->t.t_threads[f] = NULL;
2672     }
2673     hot_team->t.t_nproc = new_nth;
2674 #if KMP_NESTED_HOT_TEAMS
2675     if (thread->th.th_hot_teams) {
2676       KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
2677       thread->th.th_hot_teams[0].hot_team_nth = new_nth;
2678     }
2679 #endif
2680 
2681     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2682 
2683     // Update the t_nproc field in the threads that are still active.
2684     for (f = 0; f < new_nth; f++) {
2685       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2686       hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
2687     }
2688     // Special flag in case omp_set_num_threads() call
2689     hot_team->t.t_size_changed = -1;
2690   }
2691 }
2692 
2693 /* Changes max_active_levels */
2694 void __kmp_set_max_active_levels(int gtid, int max_active_levels) {
2695   kmp_info_t *thread;
2696 
2697   KF_TRACE(10, ("__kmp_set_max_active_levels: new max_active_levels for thread "
2698                 "%d = (%d)\n",
2699                 gtid, max_active_levels));
2700   KMP_DEBUG_ASSERT(__kmp_init_serial);
2701 
2702   // validate max_active_levels
2703   if (max_active_levels < 0) {
2704     KMP_WARNING(ActiveLevelsNegative, max_active_levels);
2705     // We ignore this call if the user has specified a negative value.
2706     // The current setting won't be changed. The last valid setting will be
2707     // used. A warning will be issued (if warnings are allowed as controlled by
2708     // the KMP_WARNINGS env var).
2709     KF_TRACE(10, ("__kmp_set_max_active_levels: the call is ignored: new "
2710                   "max_active_levels for thread %d = (%d)\n",
2711                   gtid, max_active_levels));
2712     return;
2713   }
2714   if (max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT) {
2715     // it's OK, the max_active_levels is within the valid range: [ 0;
2716     // KMP_MAX_ACTIVE_LEVELS_LIMIT ]
2717     // We allow a zero value. (implementation defined behavior)
2718   } else {
2719     KMP_WARNING(ActiveLevelsExceedLimit, max_active_levels,
2720                 KMP_MAX_ACTIVE_LEVELS_LIMIT);
2721     max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT;
2722     // Current upper limit is MAX_INT. (implementation defined behavior)
2723     // If the input exceeds the upper limit, we correct the input to be the
2724     // upper limit. (implementation defined behavior)
2725     // Actually, the flow should never get here until we use MAX_INT limit.
2726   }
2727   KF_TRACE(10, ("__kmp_set_max_active_levels: after validation: new "
2728                 "max_active_levels for thread %d = (%d)\n",
2729                 gtid, max_active_levels));
2730 
2731   thread = __kmp_threads[gtid];
2732 
2733   __kmp_save_internal_controls(thread);
2734 
2735   set__max_active_levels(thread, max_active_levels);
2736 }
2737 
2738 /* Gets max_active_levels */
2739 int __kmp_get_max_active_levels(int gtid) {
2740   kmp_info_t *thread;
2741 
2742   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d\n", gtid));
2743   KMP_DEBUG_ASSERT(__kmp_init_serial);
2744 
2745   thread = __kmp_threads[gtid];
2746   KMP_DEBUG_ASSERT(thread->th.th_current_task);
2747   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d, curtask=%p, "
2748                 "curtask_maxaclevel=%d\n",
2749                 gtid, thread->th.th_current_task,
2750                 thread->th.th_current_task->td_icvs.max_active_levels));
2751   return thread->th.th_current_task->td_icvs.max_active_levels;
2752 }
2753 
2754 KMP_BUILD_ASSERT(sizeof(kmp_sched_t) == sizeof(int));
2755 KMP_BUILD_ASSERT(sizeof(enum sched_type) == sizeof(int));
2756 
2757 /* Changes def_sched_var ICV values (run-time schedule kind and chunk) */
2758 void __kmp_set_schedule(int gtid, kmp_sched_t kind, int chunk) {
2759   kmp_info_t *thread;
2760   kmp_sched_t orig_kind;
2761   //    kmp_team_t *team;
2762 
2763   KF_TRACE(10, ("__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n",
2764                 gtid, (int)kind, chunk));
2765   KMP_DEBUG_ASSERT(__kmp_init_serial);
2766 
2767   // Check if the kind parameter is valid, correct if needed.
2768   // Valid parameters should fit in one of two intervals - standard or extended:
2769   //       <lower>, <valid>, <upper_std>, <lower_ext>, <valid>, <upper>
2770   // 2008-01-25: 0,  1 - 4,       5,         100,     101 - 102, 103
2771   orig_kind = kind;
2772   kind = __kmp_sched_without_mods(kind);
2773 
2774   if (kind <= kmp_sched_lower || kind >= kmp_sched_upper ||
2775       (kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std)) {
2776     // TODO: Hint needs attention in case we change the default schedule.
2777     __kmp_msg(kmp_ms_warning, KMP_MSG(ScheduleKindOutOfRange, kind),
2778               KMP_HNT(DefaultScheduleKindUsed, "static, no chunk"),
2779               __kmp_msg_null);
2780     kind = kmp_sched_default;
2781     chunk = 0; // ignore chunk value in case of bad kind
2782   }
2783 
2784   thread = __kmp_threads[gtid];
2785 
2786   __kmp_save_internal_controls(thread);
2787 
2788   if (kind < kmp_sched_upper_std) {
2789     if (kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK) {
2790       // differ static chunked vs. unchunked:  chunk should be invalid to
2791       // indicate unchunked schedule (which is the default)
2792       thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static;
2793     } else {
2794       thread->th.th_current_task->td_icvs.sched.r_sched_type =
2795           __kmp_sch_map[kind - kmp_sched_lower - 1];
2796     }
2797   } else {
2798     //    __kmp_sch_map[ kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2799     //    kmp_sched_lower - 2 ];
2800     thread->th.th_current_task->td_icvs.sched.r_sched_type =
2801         __kmp_sch_map[kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2802                       kmp_sched_lower - 2];
2803   }
2804   __kmp_sched_apply_mods_intkind(
2805       orig_kind, &(thread->th.th_current_task->td_icvs.sched.r_sched_type));
2806   if (kind == kmp_sched_auto || chunk < 1) {
2807     // ignore parameter chunk for schedule auto
2808     thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
2809   } else {
2810     thread->th.th_current_task->td_icvs.sched.chunk = chunk;
2811   }
2812 }
2813 
2814 /* Gets def_sched_var ICV values */
2815 void __kmp_get_schedule(int gtid, kmp_sched_t *kind, int *chunk) {
2816   kmp_info_t *thread;
2817   enum sched_type th_type;
2818 
2819   KF_TRACE(10, ("__kmp_get_schedule: thread %d\n", gtid));
2820   KMP_DEBUG_ASSERT(__kmp_init_serial);
2821 
2822   thread = __kmp_threads[gtid];
2823 
2824   th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
2825   switch (SCHEDULE_WITHOUT_MODIFIERS(th_type)) {
2826   case kmp_sch_static:
2827   case kmp_sch_static_greedy:
2828   case kmp_sch_static_balanced:
2829     *kind = kmp_sched_static;
2830     __kmp_sched_apply_mods_stdkind(kind, th_type);
2831     *chunk = 0; // chunk was not set, try to show this fact via zero value
2832     return;
2833   case kmp_sch_static_chunked:
2834     *kind = kmp_sched_static;
2835     break;
2836   case kmp_sch_dynamic_chunked:
2837     *kind = kmp_sched_dynamic;
2838     break;
2839   case kmp_sch_guided_chunked:
2840   case kmp_sch_guided_iterative_chunked:
2841   case kmp_sch_guided_analytical_chunked:
2842     *kind = kmp_sched_guided;
2843     break;
2844   case kmp_sch_auto:
2845     *kind = kmp_sched_auto;
2846     break;
2847   case kmp_sch_trapezoidal:
2848     *kind = kmp_sched_trapezoidal;
2849     break;
2850 #if KMP_STATIC_STEAL_ENABLED
2851   case kmp_sch_static_steal:
2852     *kind = kmp_sched_static_steal;
2853     break;
2854 #endif
2855   default:
2856     KMP_FATAL(UnknownSchedulingType, th_type);
2857   }
2858 
2859   __kmp_sched_apply_mods_stdkind(kind, th_type);
2860   *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
2861 }
2862 
2863 int __kmp_get_ancestor_thread_num(int gtid, int level) {
2864 
2865   int ii, dd;
2866   kmp_team_t *team;
2867   kmp_info_t *thr;
2868 
2869   KF_TRACE(10, ("__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level));
2870   KMP_DEBUG_ASSERT(__kmp_init_serial);
2871 
2872   // validate level
2873   if (level == 0)
2874     return 0;
2875   if (level < 0)
2876     return -1;
2877   thr = __kmp_threads[gtid];
2878   team = thr->th.th_team;
2879   ii = team->t.t_level;
2880   if (level > ii)
2881     return -1;
2882 
2883   if (thr->th.th_teams_microtask) {
2884     // AC: we are in teams region where multiple nested teams have same level
2885     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2886     if (level <=
2887         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2888       KMP_DEBUG_ASSERT(ii >= tlevel);
2889       // AC: As we need to pass by the teams league, we need to artificially
2890       // increase ii
2891       if (ii == tlevel) {
2892         ii += 2; // three teams have same level
2893       } else {
2894         ii++; // two teams have same level
2895       }
2896     }
2897   }
2898 
2899   if (ii == level)
2900     return __kmp_tid_from_gtid(gtid);
2901 
2902   dd = team->t.t_serialized;
2903   level++;
2904   while (ii > level) {
2905     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2906     }
2907     if ((team->t.t_serialized) && (!dd)) {
2908       team = team->t.t_parent;
2909       continue;
2910     }
2911     if (ii > level) {
2912       team = team->t.t_parent;
2913       dd = team->t.t_serialized;
2914       ii--;
2915     }
2916   }
2917 
2918   return (dd > 1) ? (0) : (team->t.t_master_tid);
2919 }
2920 
2921 int __kmp_get_team_size(int gtid, int level) {
2922 
2923   int ii, dd;
2924   kmp_team_t *team;
2925   kmp_info_t *thr;
2926 
2927   KF_TRACE(10, ("__kmp_get_team_size: thread %d %d\n", gtid, level));
2928   KMP_DEBUG_ASSERT(__kmp_init_serial);
2929 
2930   // validate level
2931   if (level == 0)
2932     return 1;
2933   if (level < 0)
2934     return -1;
2935   thr = __kmp_threads[gtid];
2936   team = thr->th.th_team;
2937   ii = team->t.t_level;
2938   if (level > ii)
2939     return -1;
2940 
2941   if (thr->th.th_teams_microtask) {
2942     // AC: we are in teams region where multiple nested teams have same level
2943     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2944     if (level <=
2945         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2946       KMP_DEBUG_ASSERT(ii >= tlevel);
2947       // AC: As we need to pass by the teams league, we need to artificially
2948       // increase ii
2949       if (ii == tlevel) {
2950         ii += 2; // three teams have same level
2951       } else {
2952         ii++; // two teams have same level
2953       }
2954     }
2955   }
2956 
2957   while (ii > level) {
2958     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2959     }
2960     if (team->t.t_serialized && (!dd)) {
2961       team = team->t.t_parent;
2962       continue;
2963     }
2964     if (ii > level) {
2965       team = team->t.t_parent;
2966       ii--;
2967     }
2968   }
2969 
2970   return team->t.t_nproc;
2971 }
2972 
2973 kmp_r_sched_t __kmp_get_schedule_global() {
2974   // This routine created because pairs (__kmp_sched, __kmp_chunk) and
2975   // (__kmp_static, __kmp_guided) may be changed by kmp_set_defaults
2976   // independently. So one can get the updated schedule here.
2977 
2978   kmp_r_sched_t r_sched;
2979 
2980   // create schedule from 4 globals: __kmp_sched, __kmp_chunk, __kmp_static,
2981   // __kmp_guided. __kmp_sched should keep original value, so that user can set
2982   // KMP_SCHEDULE multiple times, and thus have different run-time schedules in
2983   // different roots (even in OMP 2.5)
2984   enum sched_type s = SCHEDULE_WITHOUT_MODIFIERS(__kmp_sched);
2985   enum sched_type sched_modifiers = SCHEDULE_GET_MODIFIERS(__kmp_sched);
2986   if (s == kmp_sch_static) {
2987     // replace STATIC with more detailed schedule (balanced or greedy)
2988     r_sched.r_sched_type = __kmp_static;
2989   } else if (s == kmp_sch_guided_chunked) {
2990     // replace GUIDED with more detailed schedule (iterative or analytical)
2991     r_sched.r_sched_type = __kmp_guided;
2992   } else { // (STATIC_CHUNKED), or (DYNAMIC_CHUNKED), or other
2993     r_sched.r_sched_type = __kmp_sched;
2994   }
2995   SCHEDULE_SET_MODIFIERS(r_sched.r_sched_type, sched_modifiers);
2996 
2997   if (__kmp_chunk < KMP_DEFAULT_CHUNK) {
2998     // __kmp_chunk may be wrong here (if it was not ever set)
2999     r_sched.chunk = KMP_DEFAULT_CHUNK;
3000   } else {
3001     r_sched.chunk = __kmp_chunk;
3002   }
3003 
3004   return r_sched;
3005 }
3006 
3007 /* Allocate (realloc == FALSE) * or reallocate (realloc == TRUE)
3008    at least argc number of *t_argv entries for the requested team. */
3009 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) {
3010 
3011   KMP_DEBUG_ASSERT(team);
3012   if (!realloc || argc > team->t.t_max_argc) {
3013 
3014     KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: needed entries=%d, "
3015                    "current entries=%d\n",
3016                    team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
3017     /* if previously allocated heap space for args, free them */
3018     if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
3019       __kmp_free((void *)team->t.t_argv);
3020 
3021     if (argc <= KMP_INLINE_ARGV_ENTRIES) {
3022       /* use unused space in the cache line for arguments */
3023       team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
3024       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: inline allocate %d "
3025                      "argv entries\n",
3026                      team->t.t_id, team->t.t_max_argc));
3027       team->t.t_argv = &team->t.t_inline_argv[0];
3028       if (__kmp_storage_map) {
3029         __kmp_print_storage_map_gtid(
3030             -1, &team->t.t_inline_argv[0],
3031             &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3032             (sizeof(void *) * KMP_INLINE_ARGV_ENTRIES), "team_%d.t_inline_argv",
3033             team->t.t_id);
3034       }
3035     } else {
3036       /* allocate space for arguments in the heap */
3037       team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3038                                ? KMP_MIN_MALLOC_ARGV_ENTRIES
3039                                : 2 * argc;
3040       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3041                      "argv entries\n",
3042                      team->t.t_id, team->t.t_max_argc));
3043       team->t.t_argv =
3044           (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
3045       if (__kmp_storage_map) {
3046         __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3047                                      &team->t.t_argv[team->t.t_max_argc],
3048                                      sizeof(void *) * team->t.t_max_argc,
3049                                      "team_%d.t_argv", team->t.t_id);
3050       }
3051     }
3052   }
3053 }
3054 
3055 static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
3056   int i;
3057   int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2;
3058   team->t.t_threads =
3059       (kmp_info_t **)__kmp_allocate(sizeof(kmp_info_t *) * max_nth);
3060   team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3061       sizeof(dispatch_shared_info_t) * num_disp_buff);
3062   team->t.t_dispatch =
3063       (kmp_disp_t *)__kmp_allocate(sizeof(kmp_disp_t) * max_nth);
3064   team->t.t_implicit_task_taskdata =
3065       (kmp_taskdata_t *)__kmp_allocate(sizeof(kmp_taskdata_t) * max_nth);
3066   team->t.t_max_nproc = max_nth;
3067 
3068   /* setup dispatch buffers */
3069   for (i = 0; i < num_disp_buff; ++i) {
3070     team->t.t_disp_buffer[i].buffer_index = i;
3071     team->t.t_disp_buffer[i].doacross_buf_idx = i;
3072   }
3073 }
3074 
3075 static void __kmp_free_team_arrays(kmp_team_t *team) {
3076   /* Note: this does not free the threads in t_threads (__kmp_free_threads) */
3077   int i;
3078   for (i = 0; i < team->t.t_max_nproc; ++i) {
3079     if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3080       __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3081       team->t.t_dispatch[i].th_disp_buffer = NULL;
3082     }
3083   }
3084 #if KMP_USE_HIER_SCHED
3085   __kmp_dispatch_free_hierarchies(team);
3086 #endif
3087   __kmp_free(team->t.t_threads);
3088   __kmp_free(team->t.t_disp_buffer);
3089   __kmp_free(team->t.t_dispatch);
3090   __kmp_free(team->t.t_implicit_task_taskdata);
3091   team->t.t_threads = NULL;
3092   team->t.t_disp_buffer = NULL;
3093   team->t.t_dispatch = NULL;
3094   team->t.t_implicit_task_taskdata = 0;
3095 }
3096 
3097 static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) {
3098   kmp_info_t **oldThreads = team->t.t_threads;
3099 
3100   __kmp_free(team->t.t_disp_buffer);
3101   __kmp_free(team->t.t_dispatch);
3102   __kmp_free(team->t.t_implicit_task_taskdata);
3103   __kmp_allocate_team_arrays(team, max_nth);
3104 
3105   KMP_MEMCPY(team->t.t_threads, oldThreads,
3106              team->t.t_nproc * sizeof(kmp_info_t *));
3107 
3108   __kmp_free(oldThreads);
3109 }
3110 
3111 static kmp_internal_control_t __kmp_get_global_icvs(void) {
3112 
3113   kmp_r_sched_t r_sched =
3114       __kmp_get_schedule_global(); // get current state of scheduling globals
3115 
3116   KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
3117 
3118   kmp_internal_control_t g_icvs = {
3119     0, // int serial_nesting_level; //corresponds to value of th_team_serialized
3120     (kmp_int8)__kmp_global.g.g_dynamic, // internal control for dynamic
3121     // adjustment of threads (per thread)
3122     (kmp_int8)__kmp_env_blocktime, // int bt_set; //internal control for
3123     // whether blocktime is explicitly set
3124     __kmp_dflt_blocktime, // int blocktime; //internal control for blocktime
3125 #if KMP_USE_MONITOR
3126     __kmp_bt_intervals, // int bt_intervals; //internal control for blocktime
3127 // intervals
3128 #endif
3129     __kmp_dflt_team_nth, // int nproc; //internal control for # of threads for
3130     // next parallel region (per thread)
3131     // (use a max ub on value if __kmp_parallel_initialize not called yet)
3132     __kmp_cg_max_nth, // int thread_limit;
3133     __kmp_dflt_max_active_levels, // int max_active_levels; //internal control
3134     // for max_active_levels
3135     r_sched, // kmp_r_sched_t sched; //internal control for runtime schedule
3136     // {sched,chunk} pair
3137     __kmp_nested_proc_bind.bind_types[0],
3138     __kmp_default_device,
3139     NULL // struct kmp_internal_control *next;
3140   };
3141 
3142   return g_icvs;
3143 }
3144 
3145 static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) {
3146 
3147   kmp_internal_control_t gx_icvs;
3148   gx_icvs.serial_nesting_level =
3149       0; // probably =team->t.t_serial like in save_inter_controls
3150   copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3151   gx_icvs.next = NULL;
3152 
3153   return gx_icvs;
3154 }
3155 
3156 static void __kmp_initialize_root(kmp_root_t *root) {
3157   int f;
3158   kmp_team_t *root_team;
3159   kmp_team_t *hot_team;
3160   int hot_team_max_nth;
3161   kmp_r_sched_t r_sched =
3162       __kmp_get_schedule_global(); // get current state of scheduling globals
3163   kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3164   KMP_DEBUG_ASSERT(root);
3165   KMP_ASSERT(!root->r.r_begin);
3166 
3167   /* setup the root state structure */
3168   __kmp_init_lock(&root->r.r_begin_lock);
3169   root->r.r_begin = FALSE;
3170   root->r.r_active = FALSE;
3171   root->r.r_in_parallel = 0;
3172   root->r.r_blocktime = __kmp_dflt_blocktime;
3173 
3174   /* setup the root team for this task */
3175   /* allocate the root team structure */
3176   KF_TRACE(10, ("__kmp_initialize_root: before root_team\n"));
3177 
3178   root_team =
3179       __kmp_allocate_team(root,
3180                           1, // new_nproc
3181                           1, // max_nproc
3182 #if OMPT_SUPPORT
3183                           ompt_data_none, // root parallel id
3184 #endif
3185                           __kmp_nested_proc_bind.bind_types[0], &r_icvs,
3186                           0 // argc
3187                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3188                           );
3189 #if USE_DEBUGGER
3190   // Non-NULL value should be assigned to make the debugger display the root
3191   // team.
3192   TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)(~0));
3193 #endif
3194 
3195   KF_TRACE(10, ("__kmp_initialize_root: after root_team = %p\n", root_team));
3196 
3197   root->r.r_root_team = root_team;
3198   root_team->t.t_control_stack_top = NULL;
3199 
3200   /* initialize root team */
3201   root_team->t.t_threads[0] = NULL;
3202   root_team->t.t_nproc = 1;
3203   root_team->t.t_serialized = 1;
3204   // TODO???: root_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3205   root_team->t.t_sched.sched = r_sched.sched;
3206   KA_TRACE(
3207       20,
3208       ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3209        root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
3210 
3211   /* setup the  hot team for this task */
3212   /* allocate the hot team structure */
3213   KF_TRACE(10, ("__kmp_initialize_root: before hot_team\n"));
3214 
3215   hot_team =
3216       __kmp_allocate_team(root,
3217                           1, // new_nproc
3218                           __kmp_dflt_team_nth_ub * 2, // max_nproc
3219 #if OMPT_SUPPORT
3220                           ompt_data_none, // root parallel id
3221 #endif
3222                           __kmp_nested_proc_bind.bind_types[0], &r_icvs,
3223                           0 // argc
3224                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3225                           );
3226   KF_TRACE(10, ("__kmp_initialize_root: after hot_team = %p\n", hot_team));
3227 
3228   root->r.r_hot_team = hot_team;
3229   root_team->t.t_control_stack_top = NULL;
3230 
3231   /* first-time initialization */
3232   hot_team->t.t_parent = root_team;
3233 
3234   /* initialize hot team */
3235   hot_team_max_nth = hot_team->t.t_max_nproc;
3236   for (f = 0; f < hot_team_max_nth; ++f) {
3237     hot_team->t.t_threads[f] = NULL;
3238   }
3239   hot_team->t.t_nproc = 1;
3240   // TODO???: hot_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3241   hot_team->t.t_sched.sched = r_sched.sched;
3242   hot_team->t.t_size_changed = 0;
3243 }
3244 
3245 #ifdef KMP_DEBUG
3246 
3247 typedef struct kmp_team_list_item {
3248   kmp_team_p const *entry;
3249   struct kmp_team_list_item *next;
3250 } kmp_team_list_item_t;
3251 typedef kmp_team_list_item_t *kmp_team_list_t;
3252 
3253 static void __kmp_print_structure_team_accum( // Add team to list of teams.
3254     kmp_team_list_t list, // List of teams.
3255     kmp_team_p const *team // Team to add.
3256     ) {
3257 
3258   // List must terminate with item where both entry and next are NULL.
3259   // Team is added to the list only once.
3260   // List is sorted in ascending order by team id.
3261   // Team id is *not* a key.
3262 
3263   kmp_team_list_t l;
3264 
3265   KMP_DEBUG_ASSERT(list != NULL);
3266   if (team == NULL) {
3267     return;
3268   }
3269 
3270   __kmp_print_structure_team_accum(list, team->t.t_parent);
3271   __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3272 
3273   // Search list for the team.
3274   l = list;
3275   while (l->next != NULL && l->entry != team) {
3276     l = l->next;
3277   }
3278   if (l->next != NULL) {
3279     return; // Team has been added before, exit.
3280   }
3281 
3282   // Team is not found. Search list again for insertion point.
3283   l = list;
3284   while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3285     l = l->next;
3286   }
3287 
3288   // Insert team.
3289   {
3290     kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
3291         sizeof(kmp_team_list_item_t));
3292     *item = *l;
3293     l->entry = team;
3294     l->next = item;
3295   }
3296 }
3297 
3298 static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
3299 
3300                                        ) {
3301   __kmp_printf("%s", title);
3302   if (team != NULL) {
3303     __kmp_printf("%2x %p\n", team->t.t_id, team);
3304   } else {
3305     __kmp_printf(" - (nil)\n");
3306   }
3307 }
3308 
3309 static void __kmp_print_structure_thread(char const *title,
3310                                          kmp_info_p const *thread) {
3311   __kmp_printf("%s", title);
3312   if (thread != NULL) {
3313     __kmp_printf("%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
3314   } else {
3315     __kmp_printf(" - (nil)\n");
3316   }
3317 }
3318 
3319 void __kmp_print_structure(void) {
3320 
3321   kmp_team_list_t list;
3322 
3323   // Initialize list of teams.
3324   list =
3325       (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(sizeof(kmp_team_list_item_t));
3326   list->entry = NULL;
3327   list->next = NULL;
3328 
3329   __kmp_printf("\n------------------------------\nGlobal Thread "
3330                "Table\n------------------------------\n");
3331   {
3332     int gtid;
3333     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3334       __kmp_printf("%2d", gtid);
3335       if (__kmp_threads != NULL) {
3336         __kmp_printf(" %p", __kmp_threads[gtid]);
3337       }
3338       if (__kmp_root != NULL) {
3339         __kmp_printf(" %p", __kmp_root[gtid]);
3340       }
3341       __kmp_printf("\n");
3342     }
3343   }
3344 
3345   // Print out __kmp_threads array.
3346   __kmp_printf("\n------------------------------\nThreads\n--------------------"
3347                "----------\n");
3348   if (__kmp_threads != NULL) {
3349     int gtid;
3350     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3351       kmp_info_t const *thread = __kmp_threads[gtid];
3352       if (thread != NULL) {
3353         __kmp_printf("GTID %2d %p:\n", gtid, thread);
3354         __kmp_printf("    Our Root:        %p\n", thread->th.th_root);
3355         __kmp_print_structure_team("    Our Team:     ", thread->th.th_team);
3356         __kmp_print_structure_team("    Serial Team:  ",
3357                                    thread->th.th_serial_team);
3358         __kmp_printf("    Threads:      %2d\n", thread->th.th_team_nproc);
3359         __kmp_print_structure_thread("    Master:       ",
3360                                      thread->th.th_team_master);
3361         __kmp_printf("    Serialized?:  %2d\n", thread->th.th_team_serialized);
3362         __kmp_printf("    Set NProc:    %2d\n", thread->th.th_set_nproc);
3363         __kmp_printf("    Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
3364         __kmp_print_structure_thread("    Next in pool: ",
3365                                      thread->th.th_next_pool);
3366         __kmp_printf("\n");
3367         __kmp_print_structure_team_accum(list, thread->th.th_team);
3368         __kmp_print_structure_team_accum(list, thread->th.th_serial_team);
3369       }
3370     }
3371   } else {
3372     __kmp_printf("Threads array is not allocated.\n");
3373   }
3374 
3375   // Print out __kmp_root array.
3376   __kmp_printf("\n------------------------------\nUbers\n----------------------"
3377                "--------\n");
3378   if (__kmp_root != NULL) {
3379     int gtid;
3380     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3381       kmp_root_t const *root = __kmp_root[gtid];
3382       if (root != NULL) {
3383         __kmp_printf("GTID %2d %p:\n", gtid, root);
3384         __kmp_print_structure_team("    Root Team:    ", root->r.r_root_team);
3385         __kmp_print_structure_team("    Hot Team:     ", root->r.r_hot_team);
3386         __kmp_print_structure_thread("    Uber Thread:  ",
3387                                      root->r.r_uber_thread);
3388         __kmp_printf("    Active?:      %2d\n", root->r.r_active);
3389         __kmp_printf("    In Parallel:  %2d\n",
3390                      KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
3391         __kmp_printf("\n");
3392         __kmp_print_structure_team_accum(list, root->r.r_root_team);
3393         __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3394       }
3395     }
3396   } else {
3397     __kmp_printf("Ubers array is not allocated.\n");
3398   }
3399 
3400   __kmp_printf("\n------------------------------\nTeams\n----------------------"
3401                "--------\n");
3402   while (list->next != NULL) {
3403     kmp_team_p const *team = list->entry;
3404     int i;
3405     __kmp_printf("Team %2x %p:\n", team->t.t_id, team);
3406     __kmp_print_structure_team("    Parent Team:      ", team->t.t_parent);
3407     __kmp_printf("    Master TID:       %2d\n", team->t.t_master_tid);
3408     __kmp_printf("    Max threads:      %2d\n", team->t.t_max_nproc);
3409     __kmp_printf("    Levels of serial: %2d\n", team->t.t_serialized);
3410     __kmp_printf("    Number threads:   %2d\n", team->t.t_nproc);
3411     for (i = 0; i < team->t.t_nproc; ++i) {
3412       __kmp_printf("    Thread %2d:      ", i);
3413       __kmp_print_structure_thread("", team->t.t_threads[i]);
3414     }
3415     __kmp_print_structure_team("    Next in pool:     ", team->t.t_next_pool);
3416     __kmp_printf("\n");
3417     list = list->next;
3418   }
3419 
3420   // Print out __kmp_thread_pool and __kmp_team_pool.
3421   __kmp_printf("\n------------------------------\nPools\n----------------------"
3422                "--------\n");
3423   __kmp_print_structure_thread("Thread pool:          ",
3424                                CCAST(kmp_info_t *, __kmp_thread_pool));
3425   __kmp_print_structure_team("Team pool:            ",
3426                              CCAST(kmp_team_t *, __kmp_team_pool));
3427   __kmp_printf("\n");
3428 
3429   // Free team list.
3430   while (list != NULL) {
3431     kmp_team_list_item_t *item = list;
3432     list = list->next;
3433     KMP_INTERNAL_FREE(item);
3434   }
3435 }
3436 
3437 #endif
3438 
3439 //---------------------------------------------------------------------------
3440 //  Stuff for per-thread fast random number generator
3441 //  Table of primes
3442 static const unsigned __kmp_primes[] = {
3443     0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877,
3444     0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
3445     0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201,
3446     0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
3447     0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7,
3448     0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
3449     0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45,
3450     0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
3451     0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363,
3452     0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
3453     0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f};
3454 
3455 //---------------------------------------------------------------------------
3456 //  __kmp_get_random: Get a random number using a linear congruential method.
3457 unsigned short __kmp_get_random(kmp_info_t *thread) {
3458   unsigned x = thread->th.th_x;
3459   unsigned short r = x >> 16;
3460 
3461   thread->th.th_x = x * thread->th.th_a + 1;
3462 
3463   KA_TRACE(30, ("__kmp_get_random: THREAD: %d, RETURN: %u\n",
3464                 thread->th.th_info.ds.ds_tid, r));
3465 
3466   return r;
3467 }
3468 //--------------------------------------------------------
3469 // __kmp_init_random: Initialize a random number generator
3470 void __kmp_init_random(kmp_info_t *thread) {
3471   unsigned seed = thread->th.th_info.ds.ds_tid;
3472 
3473   thread->th.th_a =
3474       __kmp_primes[seed % (sizeof(__kmp_primes) / sizeof(__kmp_primes[0]))];
3475   thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
3476   KA_TRACE(30,
3477            ("__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a));
3478 }
3479 
3480 #if KMP_OS_WINDOWS
3481 /* reclaim array entries for root threads that are already dead, returns number
3482  * reclaimed */
3483 static int __kmp_reclaim_dead_roots(void) {
3484   int i, r = 0;
3485 
3486   for (i = 0; i < __kmp_threads_capacity; ++i) {
3487     if (KMP_UBER_GTID(i) &&
3488         !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
3489         !__kmp_root[i]
3490              ->r.r_active) { // AC: reclaim only roots died in non-active state
3491       r += __kmp_unregister_root_other_thread(i);
3492     }
3493   }
3494   return r;
3495 }
3496 #endif
3497 
3498 /* This function attempts to create free entries in __kmp_threads and
3499    __kmp_root, and returns the number of free entries generated.
3500 
3501    For Windows* OS static library, the first mechanism used is to reclaim array
3502    entries for root threads that are already dead.
3503 
3504    On all platforms, expansion is attempted on the arrays __kmp_threads_ and
3505    __kmp_root, with appropriate update to __kmp_threads_capacity. Array
3506    capacity is increased by doubling with clipping to __kmp_tp_capacity, if
3507    threadprivate cache array has been created. Synchronization with
3508    __kmpc_threadprivate_cached is done using __kmp_tp_cached_lock.
3509 
3510    After any dead root reclamation, if the clipping value allows array expansion
3511    to result in the generation of a total of nNeed free slots, the function does
3512    that expansion. If not, nothing is done beyond the possible initial root
3513    thread reclamation.
3514 
3515    If any argument is negative, the behavior is undefined. */
3516 static int __kmp_expand_threads(int nNeed) {
3517   int added = 0;
3518   int minimumRequiredCapacity;
3519   int newCapacity;
3520   kmp_info_t **newThreads;
3521   kmp_root_t **newRoot;
3522 
3523 // All calls to __kmp_expand_threads should be under __kmp_forkjoin_lock, so
3524 // resizing __kmp_threads does not need additional protection if foreign
3525 // threads are present
3526 
3527 #if KMP_OS_WINDOWS && !KMP_DYNAMIC_LIB
3528   /* only for Windows static library */
3529   /* reclaim array entries for root threads that are already dead */
3530   added = __kmp_reclaim_dead_roots();
3531 
3532   if (nNeed) {
3533     nNeed -= added;
3534     if (nNeed < 0)
3535       nNeed = 0;
3536   }
3537 #endif
3538   if (nNeed <= 0)
3539     return added;
3540 
3541   // Note that __kmp_threads_capacity is not bounded by __kmp_max_nth. If
3542   // __kmp_max_nth is set to some value less than __kmp_sys_max_nth by the
3543   // user via KMP_DEVICE_THREAD_LIMIT, then __kmp_threads_capacity may become
3544   // > __kmp_max_nth in one of two ways:
3545   //
3546   // 1) The initialization thread (gtid = 0) exits.  __kmp_threads[0]
3547   //    may not be reused by another thread, so we may need to increase
3548   //    __kmp_threads_capacity to __kmp_max_nth + 1.
3549   //
3550   // 2) New foreign root(s) are encountered.  We always register new foreign
3551   //    roots. This may cause a smaller # of threads to be allocated at
3552   //    subsequent parallel regions, but the worker threads hang around (and
3553   //    eventually go to sleep) and need slots in the __kmp_threads[] array.
3554   //
3555   // Anyway, that is the reason for moving the check to see if
3556   // __kmp_max_nth was exceeded into __kmp_reserve_threads()
3557   // instead of having it performed here. -BB
3558 
3559   KMP_DEBUG_ASSERT(__kmp_sys_max_nth >= __kmp_threads_capacity);
3560 
3561   /* compute expansion headroom to check if we can expand */
3562   if (__kmp_sys_max_nth - __kmp_threads_capacity < nNeed) {
3563     /* possible expansion too small -- give up */
3564     return added;
3565   }
3566   minimumRequiredCapacity = __kmp_threads_capacity + nNeed;
3567 
3568   newCapacity = __kmp_threads_capacity;
3569   do {
3570     newCapacity = newCapacity <= (__kmp_sys_max_nth >> 1) ? (newCapacity << 1)
3571                                                           : __kmp_sys_max_nth;
3572   } while (newCapacity < minimumRequiredCapacity);
3573   newThreads = (kmp_info_t **)__kmp_allocate(
3574       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * newCapacity + CACHE_LINE);
3575   newRoot =
3576       (kmp_root_t **)((char *)newThreads + sizeof(kmp_info_t *) * newCapacity);
3577   KMP_MEMCPY(newThreads, __kmp_threads,
3578              __kmp_threads_capacity * sizeof(kmp_info_t *));
3579   KMP_MEMCPY(newRoot, __kmp_root,
3580              __kmp_threads_capacity * sizeof(kmp_root_t *));
3581 
3582   kmp_info_t **temp_threads = __kmp_threads;
3583   *(kmp_info_t * *volatile *)&__kmp_threads = newThreads;
3584   *(kmp_root_t * *volatile *)&__kmp_root = newRoot;
3585   __kmp_free(temp_threads);
3586   added += newCapacity - __kmp_threads_capacity;
3587   *(volatile int *)&__kmp_threads_capacity = newCapacity;
3588 
3589   if (newCapacity > __kmp_tp_capacity) {
3590     __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
3591     if (__kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3592       __kmp_threadprivate_resize_cache(newCapacity);
3593     } else { // increase __kmp_tp_capacity to correspond with kmp_threads size
3594       *(volatile int *)&__kmp_tp_capacity = newCapacity;
3595     }
3596     __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3597   }
3598 
3599   return added;
3600 }
3601 
3602 /* Register the current thread as a root thread and obtain our gtid. We must
3603    have the __kmp_initz_lock held at this point. Argument TRUE only if are the
3604    thread that calls from __kmp_do_serial_initialize() */
3605 int __kmp_register_root(int initial_thread) {
3606   kmp_info_t *root_thread;
3607   kmp_root_t *root;
3608   int gtid;
3609   int capacity;
3610   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3611   KA_TRACE(20, ("__kmp_register_root: entered\n"));
3612   KMP_MB();
3613 
3614   /* 2007-03-02:
3615      If initial thread did not invoke OpenMP RTL yet, and this thread is not an
3616      initial one, "__kmp_all_nth >= __kmp_threads_capacity" condition does not
3617      work as expected -- it may return false (that means there is at least one
3618      empty slot in __kmp_threads array), but it is possible the only free slot
3619      is #0, which is reserved for initial thread and so cannot be used for this
3620      one. Following code workarounds this bug.
3621 
3622      However, right solution seems to be not reserving slot #0 for initial
3623      thread because:
3624      (1) there is no magic in slot #0,
3625      (2) we cannot detect initial thread reliably (the first thread which does
3626         serial initialization may be not a real initial thread).
3627   */
3628   capacity = __kmp_threads_capacity;
3629   if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) {
3630     --capacity;
3631   }
3632 
3633   /* see if there are too many threads */
3634   if (__kmp_all_nth >= capacity && !__kmp_expand_threads(1)) {
3635     if (__kmp_tp_cached) {
3636       __kmp_fatal(KMP_MSG(CantRegisterNewThread),
3637                   KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
3638                   KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
3639     } else {
3640       __kmp_fatal(KMP_MSG(CantRegisterNewThread), KMP_HNT(SystemLimitOnThreads),
3641                   __kmp_msg_null);
3642     }
3643   }
3644 
3645   /* find an available thread slot */
3646   /* Don't reassign the zero slot since we need that to only be used by initial
3647      thread */
3648   for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL;
3649        gtid++)
3650     ;
3651   KA_TRACE(1,
3652            ("__kmp_register_root: found slot in threads array: T#%d\n", gtid));
3653   KMP_ASSERT(gtid < __kmp_threads_capacity);
3654 
3655   /* update global accounting */
3656   __kmp_all_nth++;
3657   TCW_4(__kmp_nth, __kmp_nth + 1);
3658 
3659   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
3660   // numbers of procs, and method #2 (keyed API call) for higher numbers.
3661   if (__kmp_adjust_gtid_mode) {
3662     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
3663       if (TCR_4(__kmp_gtid_mode) != 2) {
3664         TCW_4(__kmp_gtid_mode, 2);
3665       }
3666     } else {
3667       if (TCR_4(__kmp_gtid_mode) != 1) {
3668         TCW_4(__kmp_gtid_mode, 1);
3669       }
3670     }
3671   }
3672 
3673 #ifdef KMP_ADJUST_BLOCKTIME
3674   /* Adjust blocktime to zero if necessary            */
3675   /* Middle initialization might not have occurred yet */
3676   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
3677     if (__kmp_nth > __kmp_avail_proc) {
3678       __kmp_zero_bt = TRUE;
3679     }
3680   }
3681 #endif /* KMP_ADJUST_BLOCKTIME */
3682 
3683   /* setup this new hierarchy */
3684   if (!(root = __kmp_root[gtid])) {
3685     root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(sizeof(kmp_root_t));
3686     KMP_DEBUG_ASSERT(!root->r.r_root_team);
3687   }
3688 
3689 #if KMP_STATS_ENABLED
3690   // Initialize stats as soon as possible (right after gtid assignment).
3691   __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid);
3692   __kmp_stats_thread_ptr->startLife();
3693   KMP_SET_THREAD_STATE(SERIAL_REGION);
3694   KMP_INIT_PARTITIONED_TIMERS(OMP_serial);
3695 #endif
3696   __kmp_initialize_root(root);
3697 
3698   /* setup new root thread structure */
3699   if (root->r.r_uber_thread) {
3700     root_thread = root->r.r_uber_thread;
3701   } else {
3702     root_thread = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
3703     if (__kmp_storage_map) {
3704       __kmp_print_thread_storage_map(root_thread, gtid);
3705     }
3706     root_thread->th.th_info.ds.ds_gtid = gtid;
3707 #if OMPT_SUPPORT
3708     root_thread->th.ompt_thread_info.thread_data = ompt_data_none;
3709 #endif
3710     root_thread->th.th_root = root;
3711     if (__kmp_env_consistency_check) {
3712       root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
3713     }
3714 #if USE_FAST_MEMORY
3715     __kmp_initialize_fast_memory(root_thread);
3716 #endif /* USE_FAST_MEMORY */
3717 
3718 #if KMP_USE_BGET
3719     KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL);
3720     __kmp_initialize_bget(root_thread);
3721 #endif
3722     __kmp_init_random(root_thread); // Initialize random number generator
3723   }
3724 
3725   /* setup the serial team held in reserve by the root thread */
3726   if (!root_thread->th.th_serial_team) {
3727     kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3728     KF_TRACE(10, ("__kmp_register_root: before serial_team\n"));
3729     root_thread->th.th_serial_team = __kmp_allocate_team(
3730         root, 1, 1,
3731 #if OMPT_SUPPORT
3732         ompt_data_none, // root parallel id
3733 #endif
3734         proc_bind_default, &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
3735   }
3736   KMP_ASSERT(root_thread->th.th_serial_team);
3737   KF_TRACE(10, ("__kmp_register_root: after serial_team = %p\n",
3738                 root_thread->th.th_serial_team));
3739 
3740   /* drop root_thread into place */
3741   TCW_SYNC_PTR(__kmp_threads[gtid], root_thread);
3742 
3743   root->r.r_root_team->t.t_threads[0] = root_thread;
3744   root->r.r_hot_team->t.t_threads[0] = root_thread;
3745   root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
3746   // AC: the team created in reserve, not for execution (it is unused for now).
3747   root_thread->th.th_serial_team->t.t_serialized = 0;
3748   root->r.r_uber_thread = root_thread;
3749 
3750   /* initialize the thread, get it ready to go */
3751   __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3752   TCW_4(__kmp_init_gtid, TRUE);
3753 
3754   /* prepare the master thread for get_gtid() */
3755   __kmp_gtid_set_specific(gtid);
3756 
3757 #if USE_ITT_BUILD
3758   __kmp_itt_thread_name(gtid);
3759 #endif /* USE_ITT_BUILD */
3760 
3761 #ifdef KMP_TDATA_GTID
3762   __kmp_gtid = gtid;
3763 #endif
3764   __kmp_create_worker(gtid, root_thread, __kmp_stksize);
3765   KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid);
3766 
3767   KA_TRACE(20, ("__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, "
3768                 "plain=%u\n",
3769                 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3770                 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3771                 KMP_INIT_BARRIER_STATE));
3772   { // Initialize barrier data.
3773     int b;
3774     for (b = 0; b < bs_last_barrier; ++b) {
3775       root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
3776 #if USE_DEBUGGER
3777       root_thread->th.th_bar[b].bb.b_worker_arrived = 0;
3778 #endif
3779     }
3780   }
3781   KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3782                    KMP_INIT_BARRIER_STATE);
3783 
3784 #if KMP_AFFINITY_SUPPORTED
3785   root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
3786   root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
3787   root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
3788   root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
3789   if (TCR_4(__kmp_init_middle)) {
3790     __kmp_affinity_set_init_mask(gtid, TRUE);
3791   }
3792 #endif /* KMP_AFFINITY_SUPPORTED */
3793   root_thread->th.th_def_allocator = __kmp_def_allocator;
3794   root_thread->th.th_prev_level = 0;
3795   root_thread->th.th_prev_num_threads = 1;
3796 
3797   kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(sizeof(kmp_cg_root_t));
3798   tmp->cg_root = root_thread;
3799   tmp->cg_thread_limit = __kmp_cg_max_nth;
3800   tmp->cg_nthreads = 1;
3801   KA_TRACE(100, ("__kmp_register_root: Thread %p created node %p with"
3802                  " cg_nthreads init to 1\n",
3803                  root_thread, tmp));
3804   tmp->up = NULL;
3805   root_thread->th.th_cg_roots = tmp;
3806 
3807   __kmp_root_counter++;
3808 
3809 #if OMPT_SUPPORT
3810   if (!initial_thread && ompt_enabled.enabled) {
3811 
3812     kmp_info_t *root_thread = ompt_get_thread();
3813 
3814     ompt_set_thread_state(root_thread, ompt_state_overhead);
3815 
3816     if (ompt_enabled.ompt_callback_thread_begin) {
3817       ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
3818           ompt_thread_initial, __ompt_get_thread_data_internal());
3819     }
3820     ompt_data_t *task_data;
3821     ompt_data_t *parallel_data;
3822     __ompt_get_task_info_internal(0, NULL, &task_data, NULL, &parallel_data, NULL);
3823     if (ompt_enabled.ompt_callback_implicit_task) {
3824       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
3825           ompt_scope_begin, parallel_data, task_data, 1, 1, ompt_task_initial);
3826     }
3827 
3828     ompt_set_thread_state(root_thread, ompt_state_work_serial);
3829   }
3830 #endif
3831 
3832   KMP_MB();
3833   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3834 
3835   return gtid;
3836 }
3837 
3838 #if KMP_NESTED_HOT_TEAMS
3839 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
3840                                 const int max_level) {
3841   int i, n, nth;
3842   kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
3843   if (!hot_teams || !hot_teams[level].hot_team) {
3844     return 0;
3845   }
3846   KMP_DEBUG_ASSERT(level < max_level);
3847   kmp_team_t *team = hot_teams[level].hot_team;
3848   nth = hot_teams[level].hot_team_nth;
3849   n = nth - 1; // master is not freed
3850   if (level < max_level - 1) {
3851     for (i = 0; i < nth; ++i) {
3852       kmp_info_t *th = team->t.t_threads[i];
3853       n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3854       if (i > 0 && th->th.th_hot_teams) {
3855         __kmp_free(th->th.th_hot_teams);
3856         th->th.th_hot_teams = NULL;
3857       }
3858     }
3859   }
3860   __kmp_free_team(root, team, NULL);
3861   return n;
3862 }
3863 #endif
3864 
3865 // Resets a root thread and clear its root and hot teams.
3866 // Returns the number of __kmp_threads entries directly and indirectly freed.
3867 static int __kmp_reset_root(int gtid, kmp_root_t *root) {
3868   kmp_team_t *root_team = root->r.r_root_team;
3869   kmp_team_t *hot_team = root->r.r_hot_team;
3870   int n = hot_team->t.t_nproc;
3871   int i;
3872 
3873   KMP_DEBUG_ASSERT(!root->r.r_active);
3874 
3875   root->r.r_root_team = NULL;
3876   root->r.r_hot_team = NULL;
3877   // __kmp_free_team() does not free hot teams, so we have to clear r_hot_team
3878   // before call to __kmp_free_team().
3879   __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3880 #if KMP_NESTED_HOT_TEAMS
3881   if (__kmp_hot_teams_max_level >
3882       0) { // need to free nested hot teams and their threads if any
3883     for (i = 0; i < hot_team->t.t_nproc; ++i) {
3884       kmp_info_t *th = hot_team->t.t_threads[i];
3885       if (__kmp_hot_teams_max_level > 1) {
3886         n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3887       }
3888       if (th->th.th_hot_teams) {
3889         __kmp_free(th->th.th_hot_teams);
3890         th->th.th_hot_teams = NULL;
3891       }
3892     }
3893   }
3894 #endif
3895   __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3896 
3897   // Before we can reap the thread, we need to make certain that all other
3898   // threads in the teams that had this root as ancestor have stopped trying to
3899   // steal tasks.
3900   if (__kmp_tasking_mode != tskm_immediate_exec) {
3901     __kmp_wait_to_unref_task_teams();
3902   }
3903 
3904 #if KMP_OS_WINDOWS
3905   /* Close Handle of root duplicated in __kmp_create_worker (tr #62919) */
3906   KA_TRACE(
3907       10, ("__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC
3908            "\n",
3909            (LPVOID) & (root->r.r_uber_thread->th),
3910            root->r.r_uber_thread->th.th_info.ds.ds_thread));
3911   __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3912 #endif /* KMP_OS_WINDOWS */
3913 
3914 #if OMPT_SUPPORT
3915   ompt_data_t *task_data;
3916   ompt_data_t *parallel_data;
3917   __ompt_get_task_info_internal(0, NULL, &task_data, NULL, &parallel_data, NULL);
3918   if (ompt_enabled.ompt_callback_implicit_task) {
3919     ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
3920         ompt_scope_end, parallel_data, task_data, 0, 1, ompt_task_initial);
3921   }
3922   if (ompt_enabled.ompt_callback_thread_end) {
3923     ompt_callbacks.ompt_callback(ompt_callback_thread_end)(
3924         &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
3925   }
3926 #endif
3927 
3928   TCW_4(__kmp_nth,
3929         __kmp_nth - 1); // __kmp_reap_thread will decrement __kmp_all_nth.
3930   i = root->r.r_uber_thread->th.th_cg_roots->cg_nthreads--;
3931   KA_TRACE(100, ("__kmp_reset_root: Thread %p decrement cg_nthreads on node %p"
3932                  " to %d\n",
3933                  root->r.r_uber_thread, root->r.r_uber_thread->th.th_cg_roots,
3934                  root->r.r_uber_thread->th.th_cg_roots->cg_nthreads));
3935   if (i == 1) {
3936     // need to free contention group structure
3937     KMP_DEBUG_ASSERT(root->r.r_uber_thread ==
3938                      root->r.r_uber_thread->th.th_cg_roots->cg_root);
3939     KMP_DEBUG_ASSERT(root->r.r_uber_thread->th.th_cg_roots->up == NULL);
3940     __kmp_free(root->r.r_uber_thread->th.th_cg_roots);
3941     root->r.r_uber_thread->th.th_cg_roots = NULL;
3942   }
3943   __kmp_reap_thread(root->r.r_uber_thread, 1);
3944 
3945   // We canot put root thread to __kmp_thread_pool, so we have to reap it
3946   // instead of freeing.
3947   root->r.r_uber_thread = NULL;
3948   /* mark root as no longer in use */
3949   root->r.r_begin = FALSE;
3950 
3951   return n;
3952 }
3953 
3954 void __kmp_unregister_root_current_thread(int gtid) {
3955   KA_TRACE(1, ("__kmp_unregister_root_current_thread: enter T#%d\n", gtid));
3956   /* this lock should be ok, since unregister_root_current_thread is never
3957      called during an abort, only during a normal close. furthermore, if you
3958      have the forkjoin lock, you should never try to get the initz lock */
3959   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3960   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
3961     KC_TRACE(10, ("__kmp_unregister_root_current_thread: already finished, "
3962                   "exiting T#%d\n",
3963                   gtid));
3964     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3965     return;
3966   }
3967   kmp_root_t *root = __kmp_root[gtid];
3968 
3969   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3970   KMP_ASSERT(KMP_UBER_GTID(gtid));
3971   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3972   KMP_ASSERT(root->r.r_active == FALSE);
3973 
3974   KMP_MB();
3975 
3976   kmp_info_t *thread = __kmp_threads[gtid];
3977   kmp_team_t *team = thread->th.th_team;
3978   kmp_task_team_t *task_team = thread->th.th_task_team;
3979 
3980   // we need to wait for the proxy tasks before finishing the thread
3981   if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
3982 #if OMPT_SUPPORT
3983     // the runtime is shutting down so we won't report any events
3984     thread->th.ompt_thread_info.state = ompt_state_undefined;
3985 #endif
3986     __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
3987   }
3988 
3989   __kmp_reset_root(gtid, root);
3990 
3991   /* free up this thread slot */
3992   __kmp_gtid_set_specific(KMP_GTID_DNE);
3993 #ifdef KMP_TDATA_GTID
3994   __kmp_gtid = KMP_GTID_DNE;
3995 #endif
3996 
3997   KMP_MB();
3998   KC_TRACE(10,
3999            ("__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid));
4000 
4001   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
4002 }
4003 
4004 #if KMP_OS_WINDOWS
4005 /* __kmp_forkjoin_lock must be already held
4006    Unregisters a root thread that is not the current thread.  Returns the number
4007    of __kmp_threads entries freed as a result. */
4008 static int __kmp_unregister_root_other_thread(int gtid) {
4009   kmp_root_t *root = __kmp_root[gtid];
4010   int r;
4011 
4012   KA_TRACE(1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
4013   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
4014   KMP_ASSERT(KMP_UBER_GTID(gtid));
4015   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4016   KMP_ASSERT(root->r.r_active == FALSE);
4017 
4018   r = __kmp_reset_root(gtid, root);
4019   KC_TRACE(10,
4020            ("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
4021   return r;
4022 }
4023 #endif
4024 
4025 #if KMP_DEBUG
4026 void __kmp_task_info() {
4027 
4028   kmp_int32 gtid = __kmp_entry_gtid();
4029   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
4030   kmp_info_t *this_thr = __kmp_threads[gtid];
4031   kmp_team_t *steam = this_thr->th.th_serial_team;
4032   kmp_team_t *team = this_thr->th.th_team;
4033 
4034   __kmp_printf(
4035       "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
4036       "ptask=%p\n",
4037       gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
4038       team->t.t_implicit_task_taskdata[tid].td_parent);
4039 }
4040 #endif // KMP_DEBUG
4041 
4042 /* TODO optimize with one big memclr, take out what isn't needed, split
4043    responsibility to workers as much as possible, and delay initialization of
4044    features as much as possible  */
4045 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4046                                   int tid, int gtid) {
4047   /* this_thr->th.th_info.ds.ds_gtid is setup in
4048      kmp_allocate_thread/create_worker.
4049      this_thr->th.th_serial_team is setup in __kmp_allocate_thread */
4050   kmp_info_t *master = team->t.t_threads[0];
4051   KMP_DEBUG_ASSERT(this_thr != NULL);
4052   KMP_DEBUG_ASSERT(this_thr->th.th_serial_team);
4053   KMP_DEBUG_ASSERT(team);
4054   KMP_DEBUG_ASSERT(team->t.t_threads);
4055   KMP_DEBUG_ASSERT(team->t.t_dispatch);
4056   KMP_DEBUG_ASSERT(master);
4057   KMP_DEBUG_ASSERT(master->th.th_root);
4058 
4059   KMP_MB();
4060 
4061   TCW_SYNC_PTR(this_thr->th.th_team, team);
4062 
4063   this_thr->th.th_info.ds.ds_tid = tid;
4064   this_thr->th.th_set_nproc = 0;
4065   if (__kmp_tasking_mode != tskm_immediate_exec)
4066     // When tasking is possible, threads are not safe to reap until they are
4067     // done tasking; this will be set when tasking code is exited in wait
4068     this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
4069   else // no tasking --> always safe to reap
4070     this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
4071   this_thr->th.th_set_proc_bind = proc_bind_default;
4072 #if KMP_AFFINITY_SUPPORTED
4073   this_thr->th.th_new_place = this_thr->th.th_current_place;
4074 #endif
4075   this_thr->th.th_root = master->th.th_root;
4076 
4077   /* setup the thread's cache of the team structure */
4078   this_thr->th.th_team_nproc = team->t.t_nproc;
4079   this_thr->th.th_team_master = master;
4080   this_thr->th.th_team_serialized = team->t.t_serialized;
4081   TCW_PTR(this_thr->th.th_sleep_loc, NULL);
4082 
4083   KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4084 
4085   KF_TRACE(10, ("__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n",
4086                 tid, gtid, this_thr, this_thr->th.th_current_task));
4087 
4088   __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
4089                            team, tid, TRUE);
4090 
4091   KF_TRACE(10, ("__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n",
4092                 tid, gtid, this_thr, this_thr->th.th_current_task));
4093   // TODO: Initialize ICVs from parent; GEH - isn't that already done in
4094   // __kmp_initialize_team()?
4095 
4096   /* TODO no worksharing in speculative threads */
4097   this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4098 
4099   this_thr->th.th_local.this_construct = 0;
4100 
4101   if (!this_thr->th.th_pri_common) {
4102     this_thr->th.th_pri_common =
4103         (struct common_table *)__kmp_allocate(sizeof(struct common_table));
4104     if (__kmp_storage_map) {
4105       __kmp_print_storage_map_gtid(
4106           gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
4107           sizeof(struct common_table), "th_%d.th_pri_common\n", gtid);
4108     }
4109     this_thr->th.th_pri_head = NULL;
4110   }
4111 
4112   if (this_thr != master && // Master's CG root is initialized elsewhere
4113       this_thr->th.th_cg_roots != master->th.th_cg_roots) { // CG root not set
4114     // Make new thread's CG root same as master's
4115     KMP_DEBUG_ASSERT(master->th.th_cg_roots);
4116     kmp_cg_root_t *tmp = this_thr->th.th_cg_roots;
4117     if (tmp) {
4118       // worker changes CG, need to check if old CG should be freed
4119       int i = tmp->cg_nthreads--;
4120       KA_TRACE(100, ("__kmp_initialize_info: Thread %p decrement cg_nthreads"
4121                      " on node %p of thread %p to %d\n",
4122                      this_thr, tmp, tmp->cg_root, tmp->cg_nthreads));
4123       if (i == 1) {
4124         __kmp_free(tmp); // last thread left CG --> free it
4125       }
4126     }
4127     this_thr->th.th_cg_roots = master->th.th_cg_roots;
4128     // Increment new thread's CG root's counter to add the new thread
4129     this_thr->th.th_cg_roots->cg_nthreads++;
4130     KA_TRACE(100, ("__kmp_initialize_info: Thread %p increment cg_nthreads on"
4131                    " node %p of thread %p to %d\n",
4132                    this_thr, this_thr->th.th_cg_roots,
4133                    this_thr->th.th_cg_roots->cg_root,
4134                    this_thr->th.th_cg_roots->cg_nthreads));
4135     this_thr->th.th_current_task->td_icvs.thread_limit =
4136         this_thr->th.th_cg_roots->cg_thread_limit;
4137   }
4138 
4139   /* Initialize dynamic dispatch */
4140   {
4141     volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
4142     // Use team max_nproc since this will never change for the team.
4143     size_t disp_size =
4144         sizeof(dispatch_private_info_t) *
4145         (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4146     KD_TRACE(10, ("__kmp_initialize_info: T#%d max_nproc: %d\n", gtid,
4147                   team->t.t_max_nproc));
4148     KMP_ASSERT(dispatch);
4149     KMP_DEBUG_ASSERT(team->t.t_dispatch);
4150     KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4151 
4152     dispatch->th_disp_index = 0;
4153     dispatch->th_doacross_buf_idx = 0;
4154     if (!dispatch->th_disp_buffer) {
4155       dispatch->th_disp_buffer =
4156           (dispatch_private_info_t *)__kmp_allocate(disp_size);
4157 
4158       if (__kmp_storage_map) {
4159         __kmp_print_storage_map_gtid(
4160             gtid, &dispatch->th_disp_buffer[0],
4161             &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4162                                           ? 1
4163                                           : __kmp_dispatch_num_buffers],
4164             disp_size, "th_%d.th_dispatch.th_disp_buffer "
4165                        "(team_%d.t_dispatch[%d].th_disp_buffer)",
4166             gtid, team->t.t_id, gtid);
4167       }
4168     } else {
4169       memset(&dispatch->th_disp_buffer[0], '\0', disp_size);
4170     }
4171 
4172     dispatch->th_dispatch_pr_current = 0;
4173     dispatch->th_dispatch_sh_current = 0;
4174 
4175     dispatch->th_deo_fcn = 0; /* ORDERED     */
4176     dispatch->th_dxo_fcn = 0; /* END ORDERED */
4177   }
4178 
4179   this_thr->th.th_next_pool = NULL;
4180 
4181   if (!this_thr->th.th_task_state_memo_stack) {
4182     size_t i;
4183     this_thr->th.th_task_state_memo_stack =
4184         (kmp_uint8 *)__kmp_allocate(4 * sizeof(kmp_uint8));
4185     this_thr->th.th_task_state_top = 0;
4186     this_thr->th.th_task_state_stack_sz = 4;
4187     for (i = 0; i < this_thr->th.th_task_state_stack_sz;
4188          ++i) // zero init the stack
4189       this_thr->th.th_task_state_memo_stack[i] = 0;
4190   }
4191 
4192   KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
4193   KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
4194 
4195   KMP_MB();
4196 }
4197 
4198 /* allocate a new thread for the requesting team. this is only called from
4199    within a forkjoin critical section. we will first try to get an available
4200    thread from the thread pool. if none is available, we will fork a new one
4201    assuming we are able to create a new one. this should be assured, as the
4202    caller should check on this first. */
4203 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4204                                   int new_tid) {
4205   kmp_team_t *serial_team;
4206   kmp_info_t *new_thr;
4207   int new_gtid;
4208 
4209   KA_TRACE(20, ("__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
4210   KMP_DEBUG_ASSERT(root && team);
4211 #if !KMP_NESTED_HOT_TEAMS
4212   KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
4213 #endif
4214   KMP_MB();
4215 
4216   /* first, try to get one from the thread pool */
4217   if (__kmp_thread_pool) {
4218     new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
4219     __kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool;
4220     if (new_thr == __kmp_thread_pool_insert_pt) {
4221       __kmp_thread_pool_insert_pt = NULL;
4222     }
4223     TCW_4(new_thr->th.th_in_pool, FALSE);
4224     __kmp_suspend_initialize_thread(new_thr);
4225     __kmp_lock_suspend_mx(new_thr);
4226     if (new_thr->th.th_active_in_pool == TRUE) {
4227       KMP_DEBUG_ASSERT(new_thr->th.th_active == TRUE);
4228       KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
4229       new_thr->th.th_active_in_pool = FALSE;
4230     }
4231     __kmp_unlock_suspend_mx(new_thr);
4232 
4233     KA_TRACE(20, ("__kmp_allocate_thread: T#%d using thread T#%d\n",
4234                   __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid));
4235     KMP_ASSERT(!new_thr->th.th_team);
4236     KMP_DEBUG_ASSERT(__kmp_nth < __kmp_threads_capacity);
4237 
4238     /* setup the thread structure */
4239     __kmp_initialize_info(new_thr, team, new_tid,
4240                           new_thr->th.th_info.ds.ds_gtid);
4241     KMP_DEBUG_ASSERT(new_thr->th.th_serial_team);
4242 
4243     TCW_4(__kmp_nth, __kmp_nth + 1);
4244 
4245     new_thr->th.th_task_state = 0;
4246     new_thr->th.th_task_state_top = 0;
4247     new_thr->th.th_task_state_stack_sz = 4;
4248 
4249 #ifdef KMP_ADJUST_BLOCKTIME
4250     /* Adjust blocktime back to zero if necessary */
4251     /* Middle initialization might not have occurred yet */
4252     if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4253       if (__kmp_nth > __kmp_avail_proc) {
4254         __kmp_zero_bt = TRUE;
4255       }
4256     }
4257 #endif /* KMP_ADJUST_BLOCKTIME */
4258 
4259 #if KMP_DEBUG
4260     // If thread entered pool via __kmp_free_thread, wait_flag should !=
4261     // KMP_BARRIER_PARENT_FLAG.
4262     int b;
4263     kmp_balign_t *balign = new_thr->th.th_bar;
4264     for (b = 0; b < bs_last_barrier; ++b)
4265       KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
4266 #endif
4267 
4268     KF_TRACE(10, ("__kmp_allocate_thread: T#%d using thread %p T#%d\n",
4269                   __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid));
4270 
4271     KMP_MB();
4272     return new_thr;
4273   }
4274 
4275   /* no, well fork a new one */
4276   KMP_ASSERT(__kmp_nth == __kmp_all_nth);
4277   KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
4278 
4279 #if KMP_USE_MONITOR
4280   // If this is the first worker thread the RTL is creating, then also
4281   // launch the monitor thread.  We try to do this as early as possible.
4282   if (!TCR_4(__kmp_init_monitor)) {
4283     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
4284     if (!TCR_4(__kmp_init_monitor)) {
4285       KF_TRACE(10, ("before __kmp_create_monitor\n"));
4286       TCW_4(__kmp_init_monitor, 1);
4287       __kmp_create_monitor(&__kmp_monitor);
4288       KF_TRACE(10, ("after __kmp_create_monitor\n"));
4289 #if KMP_OS_WINDOWS
4290       // AC: wait until monitor has started. This is a fix for CQ232808.
4291       // The reason is that if the library is loaded/unloaded in a loop with
4292       // small (parallel) work in between, then there is high probability that
4293       // monitor thread started after the library shutdown. At shutdown it is
4294       // too late to cope with the problem, because when the master is in
4295       // DllMain (process detach) the monitor has no chances to start (it is
4296       // blocked), and master has no means to inform the monitor that the
4297       // library has gone, because all the memory which the monitor can access
4298       // is going to be released/reset.
4299       while (TCR_4(__kmp_init_monitor) < 2) {
4300         KMP_YIELD(TRUE);
4301       }
4302       KF_TRACE(10, ("after monitor thread has started\n"));
4303 #endif
4304     }
4305     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
4306   }
4307 #endif
4308 
4309   KMP_MB();
4310   for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) {
4311     KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity);
4312   }
4313 
4314   /* allocate space for it. */
4315   new_thr = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
4316 
4317   TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr);
4318 
4319 #if USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG
4320   // suppress race conditions detection on synchronization flags in debug mode
4321   // this helps to analyze library internals eliminating false positives
4322   __itt_suppress_mark_range(
4323       __itt_suppress_range, __itt_suppress_threading_errors,
4324       &new_thr->th.th_sleep_loc, sizeof(new_thr->th.th_sleep_loc));
4325   __itt_suppress_mark_range(
4326       __itt_suppress_range, __itt_suppress_threading_errors,
4327       &new_thr->th.th_reap_state, sizeof(new_thr->th.th_reap_state));
4328 #if KMP_OS_WINDOWS
4329   __itt_suppress_mark_range(
4330       __itt_suppress_range, __itt_suppress_threading_errors,
4331       &new_thr->th.th_suspend_init, sizeof(new_thr->th.th_suspend_init));
4332 #else
4333   __itt_suppress_mark_range(__itt_suppress_range,
4334                             __itt_suppress_threading_errors,
4335                             &new_thr->th.th_suspend_init_count,
4336                             sizeof(new_thr->th.th_suspend_init_count));
4337 #endif
4338   // TODO: check if we need to also suppress b_arrived flags
4339   __itt_suppress_mark_range(__itt_suppress_range,
4340                             __itt_suppress_threading_errors,
4341                             CCAST(kmp_uint64 *, &new_thr->th.th_bar[0].bb.b_go),
4342                             sizeof(new_thr->th.th_bar[0].bb.b_go));
4343   __itt_suppress_mark_range(__itt_suppress_range,
4344                             __itt_suppress_threading_errors,
4345                             CCAST(kmp_uint64 *, &new_thr->th.th_bar[1].bb.b_go),
4346                             sizeof(new_thr->th.th_bar[1].bb.b_go));
4347   __itt_suppress_mark_range(__itt_suppress_range,
4348                             __itt_suppress_threading_errors,
4349                             CCAST(kmp_uint64 *, &new_thr->th.th_bar[2].bb.b_go),
4350                             sizeof(new_thr->th.th_bar[2].bb.b_go));
4351 #endif /* USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG */
4352   if (__kmp_storage_map) {
4353     __kmp_print_thread_storage_map(new_thr, new_gtid);
4354   }
4355 
4356   // add the reserve serialized team, initialized from the team's master thread
4357   {
4358     kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4359     KF_TRACE(10, ("__kmp_allocate_thread: before th_serial/serial_team\n"));
4360     new_thr->th.th_serial_team = serial_team =
4361         (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4362 #if OMPT_SUPPORT
4363                                           ompt_data_none, // root parallel id
4364 #endif
4365                                           proc_bind_default, &r_icvs,
4366                                           0 USE_NESTED_HOT_ARG(NULL));
4367   }
4368   KMP_ASSERT(serial_team);
4369   serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
4370   // execution (it is unused for now).
4371   serial_team->t.t_threads[0] = new_thr;
4372   KF_TRACE(10,
4373            ("__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n",
4374             new_thr));
4375 
4376   /* setup the thread structures */
4377   __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4378 
4379 #if USE_FAST_MEMORY
4380   __kmp_initialize_fast_memory(new_thr);
4381 #endif /* USE_FAST_MEMORY */
4382 
4383 #if KMP_USE_BGET
4384   KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL);
4385   __kmp_initialize_bget(new_thr);
4386 #endif
4387 
4388   __kmp_init_random(new_thr); // Initialize random number generator
4389 
4390   /* Initialize these only once when thread is grabbed for a team allocation */
4391   KA_TRACE(20,
4392            ("__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n",
4393             __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
4394 
4395   int b;
4396   kmp_balign_t *balign = new_thr->th.th_bar;
4397   for (b = 0; b < bs_last_barrier; ++b) {
4398     balign[b].bb.b_go = KMP_INIT_BARRIER_STATE;
4399     balign[b].bb.team = NULL;
4400     balign[b].bb.wait_flag = KMP_BARRIER_NOT_WAITING;
4401     balign[b].bb.use_oncore_barrier = 0;
4402   }
4403 
4404   new_thr->th.th_spin_here = FALSE;
4405   new_thr->th.th_next_waiting = 0;
4406 #if KMP_OS_UNIX
4407   new_thr->th.th_blocking = false;
4408 #endif
4409 
4410 #if KMP_AFFINITY_SUPPORTED
4411   new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
4412   new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
4413   new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
4414   new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
4415 #endif
4416   new_thr->th.th_def_allocator = __kmp_def_allocator;
4417   new_thr->th.th_prev_level = 0;
4418   new_thr->th.th_prev_num_threads = 1;
4419 
4420   TCW_4(new_thr->th.th_in_pool, FALSE);
4421   new_thr->th.th_active_in_pool = FALSE;
4422   TCW_4(new_thr->th.th_active, TRUE);
4423 
4424   /* adjust the global counters */
4425   __kmp_all_nth++;
4426   __kmp_nth++;
4427 
4428   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
4429   // numbers of procs, and method #2 (keyed API call) for higher numbers.
4430   if (__kmp_adjust_gtid_mode) {
4431     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
4432       if (TCR_4(__kmp_gtid_mode) != 2) {
4433         TCW_4(__kmp_gtid_mode, 2);
4434       }
4435     } else {
4436       if (TCR_4(__kmp_gtid_mode) != 1) {
4437         TCW_4(__kmp_gtid_mode, 1);
4438       }
4439     }
4440   }
4441 
4442 #ifdef KMP_ADJUST_BLOCKTIME
4443   /* Adjust blocktime back to zero if necessary       */
4444   /* Middle initialization might not have occurred yet */
4445   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4446     if (__kmp_nth > __kmp_avail_proc) {
4447       __kmp_zero_bt = TRUE;
4448     }
4449   }
4450 #endif /* KMP_ADJUST_BLOCKTIME */
4451 
4452   /* actually fork it and create the new worker thread */
4453   KF_TRACE(
4454       10, ("__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr));
4455   __kmp_create_worker(new_gtid, new_thr, __kmp_stksize);
4456   KF_TRACE(10,
4457            ("__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr));
4458 
4459   KA_TRACE(20, ("__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(),
4460                 new_gtid));
4461   KMP_MB();
4462   return new_thr;
4463 }
4464 
4465 /* Reinitialize team for reuse.
4466    The hot team code calls this case at every fork barrier, so EPCC barrier
4467    test are extremely sensitive to changes in it, esp. writes to the team
4468    struct, which cause a cache invalidation in all threads.
4469    IF YOU TOUCH THIS ROUTINE, RUN EPCC C SYNCBENCH ON A BIG-IRON MACHINE!!! */
4470 static void __kmp_reinitialize_team(kmp_team_t *team,
4471                                     kmp_internal_control_t *new_icvs,
4472                                     ident_t *loc) {
4473   KF_TRACE(10, ("__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4474                 team->t.t_threads[0], team));
4475   KMP_DEBUG_ASSERT(team && new_icvs);
4476   KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
4477   KMP_CHECK_UPDATE(team->t.t_ident, loc);
4478 
4479   KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4480   // Copy ICVs to the master thread's implicit taskdata
4481   __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4482   copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4483 
4484   KF_TRACE(10, ("__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4485                 team->t.t_threads[0], team));
4486 }
4487 
4488 /* Initialize the team data structure.
4489    This assumes the t_threads and t_max_nproc are already set.
4490    Also, we don't touch the arguments */
4491 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
4492                                   kmp_internal_control_t *new_icvs,
4493                                   ident_t *loc) {
4494   KF_TRACE(10, ("__kmp_initialize_team: enter: team=%p\n", team));
4495 
4496   /* verify */
4497   KMP_DEBUG_ASSERT(team);
4498   KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4499   KMP_DEBUG_ASSERT(team->t.t_threads);
4500   KMP_MB();
4501 
4502   team->t.t_master_tid = 0; /* not needed */
4503   /* team->t.t_master_bar;        not needed */
4504   team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4505   team->t.t_nproc = new_nproc;
4506 
4507   /* team->t.t_parent     = NULL; TODO not needed & would mess up hot team */
4508   team->t.t_next_pool = NULL;
4509   /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess
4510    * up hot team */
4511 
4512   TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
4513   team->t.t_invoke = NULL; /* not needed */
4514 
4515   // TODO???: team->t.t_max_active_levels       = new_max_active_levels;
4516   team->t.t_sched.sched = new_icvs->sched.sched;
4517 
4518 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4519   team->t.t_fp_control_saved = FALSE; /* not needed */
4520   team->t.t_x87_fpu_control_word = 0; /* not needed */
4521   team->t.t_mxcsr = 0; /* not needed */
4522 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4523 
4524   team->t.t_construct = 0;
4525 
4526   team->t.t_ordered.dt.t_value = 0;
4527   team->t.t_master_active = FALSE;
4528 
4529 #ifdef KMP_DEBUG
4530   team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */
4531 #endif
4532 #if KMP_OS_WINDOWS
4533   team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */
4534 #endif
4535 
4536   team->t.t_control_stack_top = NULL;
4537 
4538   __kmp_reinitialize_team(team, new_icvs, loc);
4539 
4540   KMP_MB();
4541   KF_TRACE(10, ("__kmp_initialize_team: exit: team=%p\n", team));
4542 }
4543 
4544 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
4545 /* Sets full mask for thread and returns old mask, no changes to structures. */
4546 static void
4547 __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
4548   if (KMP_AFFINITY_CAPABLE()) {
4549     int status;
4550     if (old_mask != NULL) {
4551       status = __kmp_get_system_affinity(old_mask, TRUE);
4552       int error = errno;
4553       if (status != 0) {
4554         __kmp_fatal(KMP_MSG(ChangeThreadAffMaskError), KMP_ERR(error),
4555                     __kmp_msg_null);
4556       }
4557     }
4558     __kmp_set_system_affinity(__kmp_affin_fullMask, TRUE);
4559   }
4560 }
4561 #endif
4562 
4563 #if KMP_AFFINITY_SUPPORTED
4564 
4565 // __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism.
4566 // It calculates the worker + master thread's partition based upon the parent
4567 // thread's partition, and binds each worker to a thread in their partition.
4568 // The master thread's partition should already include its current binding.
4569 static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
4570   // Copy the master thread's place partition to the team struct
4571   kmp_info_t *master_th = team->t.t_threads[0];
4572   KMP_DEBUG_ASSERT(master_th != NULL);
4573   kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4574   int first_place = master_th->th.th_first_place;
4575   int last_place = master_th->th.th_last_place;
4576   int masters_place = master_th->th.th_current_place;
4577   team->t.t_first_place = first_place;
4578   team->t.t_last_place = last_place;
4579 
4580   KA_TRACE(20, ("__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) "
4581                 "bound to place %d partition = [%d,%d]\n",
4582                 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4583                 team->t.t_id, masters_place, first_place, last_place));
4584 
4585   switch (proc_bind) {
4586 
4587   case proc_bind_default:
4588     // serial teams might have the proc_bind policy set to proc_bind_default. It
4589     // doesn't matter, as we don't rebind master thread for any proc_bind policy
4590     KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4591     break;
4592 
4593   case proc_bind_master: {
4594     int f;
4595     int n_th = team->t.t_nproc;
4596     for (f = 1; f < n_th; f++) {
4597       kmp_info_t *th = team->t.t_threads[f];
4598       KMP_DEBUG_ASSERT(th != NULL);
4599       th->th.th_first_place = first_place;
4600       th->th.th_last_place = last_place;
4601       th->th.th_new_place = masters_place;
4602       if (__kmp_display_affinity && masters_place != th->th.th_current_place &&
4603           team->t.t_display_affinity != 1) {
4604         team->t.t_display_affinity = 1;
4605       }
4606 
4607       KA_TRACE(100, ("__kmp_partition_places: master: T#%d(%d:%d) place %d "
4608                      "partition = [%d,%d]\n",
4609                      __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4610                      f, masters_place, first_place, last_place));
4611     }
4612   } break;
4613 
4614   case proc_bind_close: {
4615     int f;
4616     int n_th = team->t.t_nproc;
4617     int n_places;
4618     if (first_place <= last_place) {
4619       n_places = last_place - first_place + 1;
4620     } else {
4621       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4622     }
4623     if (n_th <= n_places) {
4624       int place = masters_place;
4625       for (f = 1; f < n_th; f++) {
4626         kmp_info_t *th = team->t.t_threads[f];
4627         KMP_DEBUG_ASSERT(th != NULL);
4628 
4629         if (place == last_place) {
4630           place = first_place;
4631         } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4632           place = 0;
4633         } else {
4634           place++;
4635         }
4636         th->th.th_first_place = first_place;
4637         th->th.th_last_place = last_place;
4638         th->th.th_new_place = place;
4639         if (__kmp_display_affinity && place != th->th.th_current_place &&
4640             team->t.t_display_affinity != 1) {
4641           team->t.t_display_affinity = 1;
4642         }
4643 
4644         KA_TRACE(100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4645                        "partition = [%d,%d]\n",
4646                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4647                        team->t.t_id, f, place, first_place, last_place));
4648       }
4649     } else {
4650       int S, rem, gap, s_count;
4651       S = n_th / n_places;
4652       s_count = 0;
4653       rem = n_th - (S * n_places);
4654       gap = rem > 0 ? n_places / rem : n_places;
4655       int place = masters_place;
4656       int gap_ct = gap;
4657       for (f = 0; f < n_th; f++) {
4658         kmp_info_t *th = team->t.t_threads[f];
4659         KMP_DEBUG_ASSERT(th != NULL);
4660 
4661         th->th.th_first_place = first_place;
4662         th->th.th_last_place = last_place;
4663         th->th.th_new_place = place;
4664         if (__kmp_display_affinity && place != th->th.th_current_place &&
4665             team->t.t_display_affinity != 1) {
4666           team->t.t_display_affinity = 1;
4667         }
4668         s_count++;
4669 
4670         if ((s_count == S) && rem && (gap_ct == gap)) {
4671           // do nothing, add an extra thread to place on next iteration
4672         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4673           // we added an extra thread to this place; move to next place
4674           if (place == last_place) {
4675             place = first_place;
4676           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4677             place = 0;
4678           } else {
4679             place++;
4680           }
4681           s_count = 0;
4682           gap_ct = 1;
4683           rem--;
4684         } else if (s_count == S) { // place full; don't add extra
4685           if (place == last_place) {
4686             place = first_place;
4687           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4688             place = 0;
4689           } else {
4690             place++;
4691           }
4692           gap_ct++;
4693           s_count = 0;
4694         }
4695 
4696         KA_TRACE(100,
4697                  ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4698                   "partition = [%d,%d]\n",
4699                   __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4700                   th->th.th_new_place, first_place, last_place));
4701       }
4702       KMP_DEBUG_ASSERT(place == masters_place);
4703     }
4704   } break;
4705 
4706   case proc_bind_spread: {
4707     int f;
4708     int n_th = team->t.t_nproc;
4709     int n_places;
4710     int thidx;
4711     if (first_place <= last_place) {
4712       n_places = last_place - first_place + 1;
4713     } else {
4714       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4715     }
4716     if (n_th <= n_places) {
4717       int place = -1;
4718 
4719       if (n_places != static_cast<int>(__kmp_affinity_num_masks)) {
4720         int S = n_places / n_th;
4721         int s_count, rem, gap, gap_ct;
4722 
4723         place = masters_place;
4724         rem = n_places - n_th * S;
4725         gap = rem ? n_th / rem : 1;
4726         gap_ct = gap;
4727         thidx = n_th;
4728         if (update_master_only == 1)
4729           thidx = 1;
4730         for (f = 0; f < thidx; f++) {
4731           kmp_info_t *th = team->t.t_threads[f];
4732           KMP_DEBUG_ASSERT(th != NULL);
4733 
4734           th->th.th_first_place = place;
4735           th->th.th_new_place = place;
4736           if (__kmp_display_affinity && place != th->th.th_current_place &&
4737               team->t.t_display_affinity != 1) {
4738             team->t.t_display_affinity = 1;
4739           }
4740           s_count = 1;
4741           while (s_count < S) {
4742             if (place == last_place) {
4743               place = first_place;
4744             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4745               place = 0;
4746             } else {
4747               place++;
4748             }
4749             s_count++;
4750           }
4751           if (rem && (gap_ct == gap)) {
4752             if (place == last_place) {
4753               place = first_place;
4754             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4755               place = 0;
4756             } else {
4757               place++;
4758             }
4759             rem--;
4760             gap_ct = 0;
4761           }
4762           th->th.th_last_place = place;
4763           gap_ct++;
4764 
4765           if (place == last_place) {
4766             place = first_place;
4767           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4768             place = 0;
4769           } else {
4770             place++;
4771           }
4772 
4773           KA_TRACE(100,
4774                    ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4775                     "partition = [%d,%d], __kmp_affinity_num_masks: %u\n",
4776                     __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4777                     f, th->th.th_new_place, th->th.th_first_place,
4778                     th->th.th_last_place, __kmp_affinity_num_masks));
4779         }
4780       } else {
4781         /* Having uniform space of available computation places I can create
4782            T partitions of round(P/T) size and put threads into the first
4783            place of each partition. */
4784         double current = static_cast<double>(masters_place);
4785         double spacing =
4786             (static_cast<double>(n_places + 1) / static_cast<double>(n_th));
4787         int first, last;
4788         kmp_info_t *th;
4789 
4790         thidx = n_th + 1;
4791         if (update_master_only == 1)
4792           thidx = 1;
4793         for (f = 0; f < thidx; f++) {
4794           first = static_cast<int>(current);
4795           last = static_cast<int>(current + spacing) - 1;
4796           KMP_DEBUG_ASSERT(last >= first);
4797           if (first >= n_places) {
4798             if (masters_place) {
4799               first -= n_places;
4800               last -= n_places;
4801               if (first == (masters_place + 1)) {
4802                 KMP_DEBUG_ASSERT(f == n_th);
4803                 first--;
4804               }
4805               if (last == masters_place) {
4806                 KMP_DEBUG_ASSERT(f == (n_th - 1));
4807                 last--;
4808               }
4809             } else {
4810               KMP_DEBUG_ASSERT(f == n_th);
4811               first = 0;
4812               last = 0;
4813             }
4814           }
4815           if (last >= n_places) {
4816             last = (n_places - 1);
4817           }
4818           place = first;
4819           current += spacing;
4820           if (f < n_th) {
4821             KMP_DEBUG_ASSERT(0 <= first);
4822             KMP_DEBUG_ASSERT(n_places > first);
4823             KMP_DEBUG_ASSERT(0 <= last);
4824             KMP_DEBUG_ASSERT(n_places > last);
4825             KMP_DEBUG_ASSERT(last_place >= first_place);
4826             th = team->t.t_threads[f];
4827             KMP_DEBUG_ASSERT(th);
4828             th->th.th_first_place = first;
4829             th->th.th_new_place = place;
4830             th->th.th_last_place = last;
4831             if (__kmp_display_affinity && place != th->th.th_current_place &&
4832                 team->t.t_display_affinity != 1) {
4833               team->t.t_display_affinity = 1;
4834             }
4835             KA_TRACE(100,
4836                      ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4837                       "partition = [%d,%d], spacing = %.4f\n",
4838                       __kmp_gtid_from_thread(team->t.t_threads[f]),
4839                       team->t.t_id, f, th->th.th_new_place,
4840                       th->th.th_first_place, th->th.th_last_place, spacing));
4841           }
4842         }
4843       }
4844       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4845     } else {
4846       int S, rem, gap, s_count;
4847       S = n_th / n_places;
4848       s_count = 0;
4849       rem = n_th - (S * n_places);
4850       gap = rem > 0 ? n_places / rem : n_places;
4851       int place = masters_place;
4852       int gap_ct = gap;
4853       thidx = n_th;
4854       if (update_master_only == 1)
4855         thidx = 1;
4856       for (f = 0; f < thidx; f++) {
4857         kmp_info_t *th = team->t.t_threads[f];
4858         KMP_DEBUG_ASSERT(th != NULL);
4859 
4860         th->th.th_first_place = place;
4861         th->th.th_last_place = place;
4862         th->th.th_new_place = place;
4863         if (__kmp_display_affinity && place != th->th.th_current_place &&
4864             team->t.t_display_affinity != 1) {
4865           team->t.t_display_affinity = 1;
4866         }
4867         s_count++;
4868 
4869         if ((s_count == S) && rem && (gap_ct == gap)) {
4870           // do nothing, add an extra thread to place on next iteration
4871         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4872           // we added an extra thread to this place; move on to next place
4873           if (place == last_place) {
4874             place = first_place;
4875           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4876             place = 0;
4877           } else {
4878             place++;
4879           }
4880           s_count = 0;
4881           gap_ct = 1;
4882           rem--;
4883         } else if (s_count == S) { // place is full; don't add extra thread
4884           if (place == last_place) {
4885             place = first_place;
4886           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4887             place = 0;
4888           } else {
4889             place++;
4890           }
4891           gap_ct++;
4892           s_count = 0;
4893         }
4894 
4895         KA_TRACE(100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4896                        "partition = [%d,%d]\n",
4897                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4898                        team->t.t_id, f, th->th.th_new_place,
4899                        th->th.th_first_place, th->th.th_last_place));
4900       }
4901       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4902     }
4903   } break;
4904 
4905   default:
4906     break;
4907   }
4908 
4909   KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
4910 }
4911 
4912 #endif // KMP_AFFINITY_SUPPORTED
4913 
4914 /* allocate a new team data structure to use.  take one off of the free pool if
4915    available */
4916 kmp_team_t *
4917 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
4918 #if OMPT_SUPPORT
4919                     ompt_data_t ompt_parallel_data,
4920 #endif
4921                     kmp_proc_bind_t new_proc_bind,
4922                     kmp_internal_control_t *new_icvs,
4923                     int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
4924   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
4925   int f;
4926   kmp_team_t *team;
4927   int use_hot_team = !root->r.r_active;
4928   int level = 0;
4929 
4930   KA_TRACE(20, ("__kmp_allocate_team: called\n"));
4931   KMP_DEBUG_ASSERT(new_nproc >= 1 && argc >= 0);
4932   KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
4933   KMP_MB();
4934 
4935 #if KMP_NESTED_HOT_TEAMS
4936   kmp_hot_team_ptr_t *hot_teams;
4937   if (master) {
4938     team = master->th.th_team;
4939     level = team->t.t_active_level;
4940     if (master->th.th_teams_microtask) { // in teams construct?
4941       if (master->th.th_teams_size.nteams > 1 &&
4942           ( // #teams > 1
4943               team->t.t_pkfn ==
4944                   (microtask_t)__kmp_teams_master || // inner fork of the teams
4945               master->th.th_teams_level <
4946                   team->t.t_level)) { // or nested parallel inside the teams
4947         ++level; // not increment if #teams==1, or for outer fork of the teams;
4948         // increment otherwise
4949       }
4950     }
4951     hot_teams = master->th.th_hot_teams;
4952     if (level < __kmp_hot_teams_max_level && hot_teams &&
4953         hot_teams[level]
4954             .hot_team) { // hot team has already been allocated for given level
4955       use_hot_team = 1;
4956     } else {
4957       use_hot_team = 0;
4958     }
4959   }
4960 #endif
4961   // Optimization to use a "hot" team
4962   if (use_hot_team && new_nproc > 1) {
4963     KMP_DEBUG_ASSERT(new_nproc <= max_nproc);
4964 #if KMP_NESTED_HOT_TEAMS
4965     team = hot_teams[level].hot_team;
4966 #else
4967     team = root->r.r_hot_team;
4968 #endif
4969 #if KMP_DEBUG
4970     if (__kmp_tasking_mode != tskm_immediate_exec) {
4971       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
4972                     "task_team[1] = %p before reinit\n",
4973                     team->t.t_task_team[0], team->t.t_task_team[1]));
4974     }
4975 #endif
4976 
4977     // Has the number of threads changed?
4978     /* Let's assume the most common case is that the number of threads is
4979        unchanged, and put that case first. */
4980     if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
4981       KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
4982       // This case can mean that omp_set_num_threads() was called and the hot
4983       // team size was already reduced, so we check the special flag
4984       if (team->t.t_size_changed == -1) {
4985         team->t.t_size_changed = 1;
4986       } else {
4987         KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
4988       }
4989 
4990       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4991       kmp_r_sched_t new_sched = new_icvs->sched;
4992       // set master's schedule as new run-time schedule
4993       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
4994 
4995       __kmp_reinitialize_team(team, new_icvs,
4996                               root->r.r_uber_thread->th.th_ident);
4997 
4998       KF_TRACE(10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
4999                     team->t.t_threads[0], team));
5000       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5001 
5002 #if KMP_AFFINITY_SUPPORTED
5003       if ((team->t.t_size_changed == 0) &&
5004           (team->t.t_proc_bind == new_proc_bind)) {
5005         if (new_proc_bind == proc_bind_spread) {
5006           __kmp_partition_places(
5007               team, 1); // add flag to update only master for spread
5008         }
5009         KA_TRACE(200, ("__kmp_allocate_team: reusing hot team #%d bindings: "
5010                        "proc_bind = %d, partition = [%d,%d]\n",
5011                        team->t.t_id, new_proc_bind, team->t.t_first_place,
5012                        team->t.t_last_place));
5013       } else {
5014         KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5015         __kmp_partition_places(team);
5016       }
5017 #else
5018       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5019 #endif /* KMP_AFFINITY_SUPPORTED */
5020     } else if (team->t.t_nproc > new_nproc) {
5021       KA_TRACE(20,
5022                ("__kmp_allocate_team: decreasing hot team thread count to %d\n",
5023                 new_nproc));
5024 
5025       team->t.t_size_changed = 1;
5026 #if KMP_NESTED_HOT_TEAMS
5027       if (__kmp_hot_teams_mode == 0) {
5028         // AC: saved number of threads should correspond to team's value in this
5029         // mode, can be bigger in mode 1, when hot team has threads in reserve
5030         KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
5031         hot_teams[level].hot_team_nth = new_nproc;
5032 #endif // KMP_NESTED_HOT_TEAMS
5033         /* release the extra threads we don't need any more */
5034         for (f = new_nproc; f < team->t.t_nproc; f++) {
5035           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5036           if (__kmp_tasking_mode != tskm_immediate_exec) {
5037             // When decreasing team size, threads no longer in the team should
5038             // unref task team.
5039             team->t.t_threads[f]->th.th_task_team = NULL;
5040           }
5041           __kmp_free_thread(team->t.t_threads[f]);
5042           team->t.t_threads[f] = NULL;
5043         }
5044 #if KMP_NESTED_HOT_TEAMS
5045       } // (__kmp_hot_teams_mode == 0)
5046       else {
5047         // When keeping extra threads in team, switch threads to wait on own
5048         // b_go flag
5049         for (f = new_nproc; f < team->t.t_nproc; ++f) {
5050           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5051           kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
5052           for (int b = 0; b < bs_last_barrier; ++b) {
5053             if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG) {
5054               balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5055             }
5056             KMP_CHECK_UPDATE(balign[b].bb.leaf_kids, 0);
5057           }
5058         }
5059       }
5060 #endif // KMP_NESTED_HOT_TEAMS
5061       team->t.t_nproc = new_nproc;
5062       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
5063       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
5064       __kmp_reinitialize_team(team, new_icvs,
5065                               root->r.r_uber_thread->th.th_ident);
5066 
5067       // Update remaining threads
5068       for (f = 0; f < new_nproc; ++f) {
5069         team->t.t_threads[f]->th.th_team_nproc = new_nproc;
5070       }
5071 
5072       // restore the current task state of the master thread: should be the
5073       // implicit task
5074       KF_TRACE(10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5075                     team->t.t_threads[0], team));
5076 
5077       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5078 
5079 #ifdef KMP_DEBUG
5080       for (f = 0; f < team->t.t_nproc; f++) {
5081         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5082                          team->t.t_threads[f]->th.th_team_nproc ==
5083                              team->t.t_nproc);
5084       }
5085 #endif
5086 
5087       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5088 #if KMP_AFFINITY_SUPPORTED
5089       __kmp_partition_places(team);
5090 #endif
5091     } else { // team->t.t_nproc < new_nproc
5092 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
5093       kmp_affin_mask_t *old_mask;
5094       if (KMP_AFFINITY_CAPABLE()) {
5095         KMP_CPU_ALLOC(old_mask);
5096       }
5097 #endif
5098 
5099       KA_TRACE(20,
5100                ("__kmp_allocate_team: increasing hot team thread count to %d\n",
5101                 new_nproc));
5102 
5103       team->t.t_size_changed = 1;
5104 
5105 #if KMP_NESTED_HOT_TEAMS
5106       int avail_threads = hot_teams[level].hot_team_nth;
5107       if (new_nproc < avail_threads)
5108         avail_threads = new_nproc;
5109       kmp_info_t **other_threads = team->t.t_threads;
5110       for (f = team->t.t_nproc; f < avail_threads; ++f) {
5111         // Adjust barrier data of reserved threads (if any) of the team
5112         // Other data will be set in __kmp_initialize_info() below.
5113         int b;
5114         kmp_balign_t *balign = other_threads[f]->th.th_bar;
5115         for (b = 0; b < bs_last_barrier; ++b) {
5116           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5117           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5118 #if USE_DEBUGGER
5119           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5120 #endif
5121         }
5122       }
5123       if (hot_teams[level].hot_team_nth >= new_nproc) {
5124         // we have all needed threads in reserve, no need to allocate any
5125         // this only possible in mode 1, cannot have reserved threads in mode 0
5126         KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1);
5127         team->t.t_nproc = new_nproc; // just get reserved threads involved
5128       } else {
5129         // we may have some threads in reserve, but not enough
5130         team->t.t_nproc =
5131             hot_teams[level]
5132                 .hot_team_nth; // get reserved threads involved if any
5133         hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
5134 #endif // KMP_NESTED_HOT_TEAMS
5135         if (team->t.t_max_nproc < new_nproc) {
5136           /* reallocate larger arrays */
5137           __kmp_reallocate_team_arrays(team, new_nproc);
5138           __kmp_reinitialize_team(team, new_icvs, NULL);
5139         }
5140 
5141 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
5142         /* Temporarily set full mask for master thread before creation of
5143            workers. The reason is that workers inherit the affinity from master,
5144            so if a lot of workers are created on the single core quickly, they
5145            don't get a chance to set their own affinity for a long time. */
5146         __kmp_set_thread_affinity_mask_full_tmp(old_mask);
5147 #endif
5148 
5149         /* allocate new threads for the hot team */
5150         for (f = team->t.t_nproc; f < new_nproc; f++) {
5151           kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5152           KMP_DEBUG_ASSERT(new_worker);
5153           team->t.t_threads[f] = new_worker;
5154 
5155           KA_TRACE(20,
5156                    ("__kmp_allocate_team: team %d init T#%d arrived: "
5157                     "join=%llu, plain=%llu\n",
5158                     team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5159                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5160                     team->t.t_bar[bs_plain_barrier].b_arrived));
5161 
5162           { // Initialize barrier data for new threads.
5163             int b;
5164             kmp_balign_t *balign = new_worker->th.th_bar;
5165             for (b = 0; b < bs_last_barrier; ++b) {
5166               balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5167               KMP_DEBUG_ASSERT(balign[b].bb.wait_flag !=
5168                                KMP_BARRIER_PARENT_FLAG);
5169 #if USE_DEBUGGER
5170               balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5171 #endif
5172             }
5173           }
5174         }
5175 
5176 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
5177         if (KMP_AFFINITY_CAPABLE()) {
5178           /* Restore initial master thread's affinity mask */
5179           __kmp_set_system_affinity(old_mask, TRUE);
5180           KMP_CPU_FREE(old_mask);
5181         }
5182 #endif
5183 #if KMP_NESTED_HOT_TEAMS
5184       } // end of check of t_nproc vs. new_nproc vs. hot_team_nth
5185 #endif // KMP_NESTED_HOT_TEAMS
5186       /* make sure everyone is syncronized */
5187       int old_nproc = team->t.t_nproc; // save old value and use to update only
5188       // new threads below
5189       __kmp_initialize_team(team, new_nproc, new_icvs,
5190                             root->r.r_uber_thread->th.th_ident);
5191 
5192       /* reinitialize the threads */
5193       KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5194       for (f = 0; f < team->t.t_nproc; ++f)
5195         __kmp_initialize_info(team->t.t_threads[f], team, f,
5196                               __kmp_gtid_from_tid(f, team));
5197 
5198       if (level) { // set th_task_state for new threads in nested hot team
5199         // __kmp_initialize_info() no longer zeroes th_task_state, so we should
5200         // only need to set the th_task_state for the new threads. th_task_state
5201         // for master thread will not be accurate until after this in
5202         // __kmp_fork_call(), so we look to the master's memo_stack to get the
5203         // correct value.
5204         for (f = old_nproc; f < team->t.t_nproc; ++f)
5205           team->t.t_threads[f]->th.th_task_state =
5206               team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5207       } else { // set th_task_state for new threads in non-nested hot team
5208         int old_state =
5209             team->t.t_threads[0]->th.th_task_state; // copy master's state
5210         for (f = old_nproc; f < team->t.t_nproc; ++f)
5211           team->t.t_threads[f]->th.th_task_state = old_state;
5212       }
5213 
5214 #ifdef KMP_DEBUG
5215       for (f = 0; f < team->t.t_nproc; ++f) {
5216         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5217                          team->t.t_threads[f]->th.th_team_nproc ==
5218                              team->t.t_nproc);
5219       }
5220 #endif
5221 
5222       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5223 #if KMP_AFFINITY_SUPPORTED
5224       __kmp_partition_places(team);
5225 #endif
5226     } // Check changes in number of threads
5227 
5228     kmp_info_t *master = team->t.t_threads[0];
5229     if (master->th.th_teams_microtask) {
5230       for (f = 1; f < new_nproc; ++f) {
5231         // propagate teams construct specific info to workers
5232         kmp_info_t *thr = team->t.t_threads[f];
5233         thr->th.th_teams_microtask = master->th.th_teams_microtask;
5234         thr->th.th_teams_level = master->th.th_teams_level;
5235         thr->th.th_teams_size = master->th.th_teams_size;
5236       }
5237     }
5238 #if KMP_NESTED_HOT_TEAMS
5239     if (level) {
5240       // Sync barrier state for nested hot teams, not needed for outermost hot
5241       // team.
5242       for (f = 1; f < new_nproc; ++f) {
5243         kmp_info_t *thr = team->t.t_threads[f];
5244         int b;
5245         kmp_balign_t *balign = thr->th.th_bar;
5246         for (b = 0; b < bs_last_barrier; ++b) {
5247           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5248           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5249 #if USE_DEBUGGER
5250           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5251 #endif
5252         }
5253       }
5254     }
5255 #endif // KMP_NESTED_HOT_TEAMS
5256 
5257     /* reallocate space for arguments if necessary */
5258     __kmp_alloc_argv_entries(argc, team, TRUE);
5259     KMP_CHECK_UPDATE(team->t.t_argc, argc);
5260     // The hot team re-uses the previous task team,
5261     // if untouched during the previous release->gather phase.
5262 
5263     KF_TRACE(10, (" hot_team = %p\n", team));
5264 
5265 #if KMP_DEBUG
5266     if (__kmp_tasking_mode != tskm_immediate_exec) {
5267       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5268                     "task_team[1] = %p after reinit\n",
5269                     team->t.t_task_team[0], team->t.t_task_team[1]));
5270     }
5271 #endif
5272 
5273 #if OMPT_SUPPORT
5274     __ompt_team_assign_id(team, ompt_parallel_data);
5275 #endif
5276 
5277     KMP_MB();
5278 
5279     return team;
5280   }
5281 
5282   /* next, let's try to take one from the team pool */
5283   KMP_MB();
5284   for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5285     /* TODO: consider resizing undersized teams instead of reaping them, now
5286        that we have a resizing mechanism */
5287     if (team->t.t_max_nproc >= max_nproc) {
5288       /* take this team from the team pool */
5289       __kmp_team_pool = team->t.t_next_pool;
5290 
5291       /* setup the team for fresh use */
5292       __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5293 
5294       KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and "
5295                     "task_team[1] %p to NULL\n",
5296                     &team->t.t_task_team[0], &team->t.t_task_team[1]));
5297       team->t.t_task_team[0] = NULL;
5298       team->t.t_task_team[1] = NULL;
5299 
5300       /* reallocate space for arguments if necessary */
5301       __kmp_alloc_argv_entries(argc, team, TRUE);
5302       KMP_CHECK_UPDATE(team->t.t_argc, argc);
5303 
5304       KA_TRACE(
5305           20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5306                team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5307       { // Initialize barrier data.
5308         int b;
5309         for (b = 0; b < bs_last_barrier; ++b) {
5310           team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5311 #if USE_DEBUGGER
5312           team->t.t_bar[b].b_master_arrived = 0;
5313           team->t.t_bar[b].b_team_arrived = 0;
5314 #endif
5315         }
5316       }
5317 
5318       team->t.t_proc_bind = new_proc_bind;
5319 
5320       KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
5321                     team->t.t_id));
5322 
5323 #if OMPT_SUPPORT
5324       __ompt_team_assign_id(team, ompt_parallel_data);
5325 #endif
5326 
5327       KMP_MB();
5328 
5329       return team;
5330     }
5331 
5332     /* reap team if it is too small, then loop back and check the next one */
5333     // not sure if this is wise, but, will be redone during the hot-teams
5334     // rewrite.
5335     /* TODO: Use technique to find the right size hot-team, don't reap them */
5336     team = __kmp_reap_team(team);
5337     __kmp_team_pool = team;
5338   }
5339 
5340   /* nothing available in the pool, no matter, make a new team! */
5341   KMP_MB();
5342   team = (kmp_team_t *)__kmp_allocate(sizeof(kmp_team_t));
5343 
5344   /* and set it up */
5345   team->t.t_max_nproc = max_nproc;
5346   /* NOTE well, for some reason allocating one big buffer and dividing it up
5347      seems to really hurt performance a lot on the P4, so, let's not use this */
5348   __kmp_allocate_team_arrays(team, max_nproc);
5349 
5350   KA_TRACE(20, ("__kmp_allocate_team: making a new team\n"));
5351   __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5352 
5353   KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and task_team[1] "
5354                 "%p to NULL\n",
5355                 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5356   team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
5357   // memory, no need to duplicate
5358   team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
5359   // memory, no need to duplicate
5360 
5361   if (__kmp_storage_map) {
5362     __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
5363   }
5364 
5365   /* allocate space for arguments */
5366   __kmp_alloc_argv_entries(argc, team, FALSE);
5367   team->t.t_argc = argc;
5368 
5369   KA_TRACE(20,
5370            ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5371             team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5372   { // Initialize barrier data.
5373     int b;
5374     for (b = 0; b < bs_last_barrier; ++b) {
5375       team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5376 #if USE_DEBUGGER
5377       team->t.t_bar[b].b_master_arrived = 0;
5378       team->t.t_bar[b].b_team_arrived = 0;
5379 #endif
5380     }
5381   }
5382 
5383   team->t.t_proc_bind = new_proc_bind;
5384 
5385 #if OMPT_SUPPORT
5386   __ompt_team_assign_id(team, ompt_parallel_data);
5387   team->t.ompt_serialized_team_info = NULL;
5388 #endif
5389 
5390   KMP_MB();
5391 
5392   KA_TRACE(20, ("__kmp_allocate_team: done creating a new team %d.\n",
5393                 team->t.t_id));
5394 
5395   return team;
5396 }
5397 
5398 /* TODO implement hot-teams at all levels */
5399 /* TODO implement lazy thread release on demand (disband request) */
5400 
5401 /* free the team.  return it to the team pool.  release all the threads
5402  * associated with it */
5403 void __kmp_free_team(kmp_root_t *root,
5404                      kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5405   int f;
5406   KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5407                 team->t.t_id));
5408 
5409   /* verify state */
5410   KMP_DEBUG_ASSERT(root);
5411   KMP_DEBUG_ASSERT(team);
5412   KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5413   KMP_DEBUG_ASSERT(team->t.t_threads);
5414 
5415   int use_hot_team = team == root->r.r_hot_team;
5416 #if KMP_NESTED_HOT_TEAMS
5417   int level;
5418   kmp_hot_team_ptr_t *hot_teams;
5419   if (master) {
5420     level = team->t.t_active_level - 1;
5421     if (master->th.th_teams_microtask) { // in teams construct?
5422       if (master->th.th_teams_size.nteams > 1) {
5423         ++level; // level was not increased in teams construct for
5424         // team_of_masters
5425       }
5426       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5427           master->th.th_teams_level == team->t.t_level) {
5428         ++level; // level was not increased in teams construct for
5429         // team_of_workers before the parallel
5430       } // team->t.t_level will be increased inside parallel
5431     }
5432     hot_teams = master->th.th_hot_teams;
5433     if (level < __kmp_hot_teams_max_level) {
5434       KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5435       use_hot_team = 1;
5436     }
5437   }
5438 #endif // KMP_NESTED_HOT_TEAMS
5439 
5440   /* team is done working */
5441   TCW_SYNC_PTR(team->t.t_pkfn,
5442                NULL); // Important for Debugging Support Library.
5443 #if KMP_OS_WINDOWS
5444   team->t.t_copyin_counter = 0; // init counter for possible reuse
5445 #endif
5446   // Do not reset pointer to parent team to NULL for hot teams.
5447 
5448   /* if we are non-hot team, release our threads */
5449   if (!use_hot_team) {
5450     if (__kmp_tasking_mode != tskm_immediate_exec) {
5451       // Wait for threads to reach reapable state
5452       for (f = 1; f < team->t.t_nproc; ++f) {
5453         KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5454         kmp_info_t *th = team->t.t_threads[f];
5455         volatile kmp_uint32 *state = &th->th.th_reap_state;
5456         while (*state != KMP_SAFE_TO_REAP) {
5457 #if KMP_OS_WINDOWS
5458           // On Windows a thread can be killed at any time, check this
5459           DWORD ecode;
5460           if (!__kmp_is_thread_alive(th, &ecode)) {
5461             *state = KMP_SAFE_TO_REAP; // reset the flag for dead thread
5462             break;
5463           }
5464 #endif
5465           // first check if thread is sleeping
5466           kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
5467           if (fl.is_sleeping())
5468             fl.resume(__kmp_gtid_from_thread(th));
5469           KMP_CPU_PAUSE();
5470         }
5471       }
5472 
5473       // Delete task teams
5474       int tt_idx;
5475       for (tt_idx = 0; tt_idx < 2; ++tt_idx) {
5476         kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5477         if (task_team != NULL) {
5478           for (f = 0; f < team->t.t_nproc; ++f) { // threads unref task teams
5479             KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5480             team->t.t_threads[f]->th.th_task_team = NULL;
5481           }
5482           KA_TRACE(
5483               20,
5484               ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5485                __kmp_get_gtid(), task_team, team->t.t_id));
5486 #if KMP_NESTED_HOT_TEAMS
5487           __kmp_free_task_team(master, task_team);
5488 #endif
5489           team->t.t_task_team[tt_idx] = NULL;
5490         }
5491       }
5492     }
5493 
5494     // Reset pointer to parent team only for non-hot teams.
5495     team->t.t_parent = NULL;
5496     team->t.t_level = 0;
5497     team->t.t_active_level = 0;
5498 
5499     /* free the worker threads */
5500     for (f = 1; f < team->t.t_nproc; ++f) {
5501       KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5502       __kmp_free_thread(team->t.t_threads[f]);
5503       team->t.t_threads[f] = NULL;
5504     }
5505 
5506     /* put the team back in the team pool */
5507     /* TODO limit size of team pool, call reap_team if pool too large */
5508     team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5509     __kmp_team_pool = (volatile kmp_team_t *)team;
5510   } else { // Check if team was created for the masters in a teams construct
5511     // See if first worker is a CG root
5512     KMP_DEBUG_ASSERT(team->t.t_threads[1] &&
5513                      team->t.t_threads[1]->th.th_cg_roots);
5514     if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
5515       // Clean up the CG root nodes on workers so that this team can be re-used
5516       for (f = 1; f < team->t.t_nproc; ++f) {
5517         kmp_info_t *thr = team->t.t_threads[f];
5518         KMP_DEBUG_ASSERT(thr && thr->th.th_cg_roots &&
5519                          thr->th.th_cg_roots->cg_root == thr);
5520         // Pop current CG root off list
5521         kmp_cg_root_t *tmp = thr->th.th_cg_roots;
5522         thr->th.th_cg_roots = tmp->up;
5523         KA_TRACE(100, ("__kmp_free_team: Thread %p popping node %p and moving"
5524                        " up to node %p. cg_nthreads was %d\n",
5525                        thr, tmp, thr->th.th_cg_roots, tmp->cg_nthreads));
5526         int i = tmp->cg_nthreads--;
5527         if (i == 1) {
5528           __kmp_free(tmp); // free CG if we are the last thread in it
5529         }
5530         // Restore current task's thread_limit from CG root
5531         if (thr->th.th_cg_roots)
5532           thr->th.th_current_task->td_icvs.thread_limit =
5533               thr->th.th_cg_roots->cg_thread_limit;
5534       }
5535     }
5536   }
5537 
5538   KMP_MB();
5539 }
5540 
5541 /* reap the team.  destroy it, reclaim all its resources and free its memory */
5542 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5543   kmp_team_t *next_pool = team->t.t_next_pool;
5544 
5545   KMP_DEBUG_ASSERT(team);
5546   KMP_DEBUG_ASSERT(team->t.t_dispatch);
5547   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5548   KMP_DEBUG_ASSERT(team->t.t_threads);
5549   KMP_DEBUG_ASSERT(team->t.t_argv);
5550 
5551   /* TODO clean the threads that are a part of this? */
5552 
5553   /* free stuff */
5554   __kmp_free_team_arrays(team);
5555   if (team->t.t_argv != &team->t.t_inline_argv[0])
5556     __kmp_free((void *)team->t.t_argv);
5557   __kmp_free(team);
5558 
5559   KMP_MB();
5560   return next_pool;
5561 }
5562 
5563 // Free the thread.  Don't reap it, just place it on the pool of available
5564 // threads.
5565 //
5566 // Changes for Quad issue 527845: We need a predictable OMP tid <-> gtid
5567 // binding for the affinity mechanism to be useful.
5568 //
5569 // Now, we always keep the free list (__kmp_thread_pool) sorted by gtid.
5570 // However, we want to avoid a potential performance problem by always
5571 // scanning through the list to find the correct point at which to insert
5572 // the thread (potential N**2 behavior).  To do this we keep track of the
5573 // last place a thread struct was inserted (__kmp_thread_pool_insert_pt).
5574 // With single-level parallelism, threads will always be added to the tail
5575 // of the list, kept track of by __kmp_thread_pool_insert_pt.  With nested
5576 // parallelism, all bets are off and we may need to scan through the entire
5577 // free list.
5578 //
5579 // This change also has a potentially large performance benefit, for some
5580 // applications.  Previously, as threads were freed from the hot team, they
5581 // would be placed back on the free list in inverse order.  If the hot team
5582 // grew back to it's original size, then the freed thread would be placed
5583 // back on the hot team in reverse order.  This could cause bad cache
5584 // locality problems on programs where the size of the hot team regularly
5585 // grew and shrunk.
5586 //
5587 // Now, for single-level parallelism, the OMP tid is always == gtid.
5588 void __kmp_free_thread(kmp_info_t *this_th) {
5589   int gtid;
5590   kmp_info_t **scan;
5591 
5592   KA_TRACE(20, ("__kmp_free_thread: T#%d putting T#%d back on free pool.\n",
5593                 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid));
5594 
5595   KMP_DEBUG_ASSERT(this_th);
5596 
5597   // When moving thread to pool, switch thread to wait on own b_go flag, and
5598   // uninitialized (NULL team).
5599   int b;
5600   kmp_balign_t *balign = this_th->th.th_bar;
5601   for (b = 0; b < bs_last_barrier; ++b) {
5602     if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG)
5603       balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5604     balign[b].bb.team = NULL;
5605     balign[b].bb.leaf_kids = 0;
5606   }
5607   this_th->th.th_task_state = 0;
5608   this_th->th.th_reap_state = KMP_SAFE_TO_REAP;
5609 
5610   /* put thread back on the free pool */
5611   TCW_PTR(this_th->th.th_team, NULL);
5612   TCW_PTR(this_th->th.th_root, NULL);
5613   TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */
5614 
5615   while (this_th->th.th_cg_roots) {
5616     this_th->th.th_cg_roots->cg_nthreads--;
5617     KA_TRACE(100, ("__kmp_free_thread: Thread %p decrement cg_nthreads on node"
5618                    " %p of thread  %p to %d\n",
5619                    this_th, this_th->th.th_cg_roots,
5620                    this_th->th.th_cg_roots->cg_root,
5621                    this_th->th.th_cg_roots->cg_nthreads));
5622     kmp_cg_root_t *tmp = this_th->th.th_cg_roots;
5623     if (tmp->cg_root == this_th) { // Thread is a cg_root
5624       KMP_DEBUG_ASSERT(tmp->cg_nthreads == 0);
5625       KA_TRACE(
5626           5, ("__kmp_free_thread: Thread %p freeing node %p\n", this_th, tmp));
5627       this_th->th.th_cg_roots = tmp->up;
5628       __kmp_free(tmp);
5629     } else { // Worker thread
5630       if (tmp->cg_nthreads == 0) { // last thread leaves contention group
5631         __kmp_free(tmp);
5632       }
5633       this_th->th.th_cg_roots = NULL;
5634       break;
5635     }
5636   }
5637 
5638   /* If the implicit task assigned to this thread can be used by other threads
5639    * -> multiple threads can share the data and try to free the task at
5640    * __kmp_reap_thread at exit. This duplicate use of the task data can happen
5641    * with higher probability when hot team is disabled but can occurs even when
5642    * the hot team is enabled */
5643   __kmp_free_implicit_task(this_th);
5644   this_th->th.th_current_task = NULL;
5645 
5646   // If the __kmp_thread_pool_insert_pt is already past the new insert
5647   // point, then we need to re-scan the entire list.
5648   gtid = this_th->th.th_info.ds.ds_gtid;
5649   if (__kmp_thread_pool_insert_pt != NULL) {
5650     KMP_DEBUG_ASSERT(__kmp_thread_pool != NULL);
5651     if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
5652       __kmp_thread_pool_insert_pt = NULL;
5653     }
5654   }
5655 
5656   // Scan down the list to find the place to insert the thread.
5657   // scan is the address of a link in the list, possibly the address of
5658   // __kmp_thread_pool itself.
5659   //
5660   // In the absence of nested parallelism, the for loop will have 0 iterations.
5661   if (__kmp_thread_pool_insert_pt != NULL) {
5662     scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
5663   } else {
5664     scan = CCAST(kmp_info_t **, &__kmp_thread_pool);
5665   }
5666   for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
5667        scan = &((*scan)->th.th_next_pool))
5668     ;
5669 
5670   // Insert the new element on the list, and set __kmp_thread_pool_insert_pt
5671   // to its address.
5672   TCW_PTR(this_th->th.th_next_pool, *scan);
5673   __kmp_thread_pool_insert_pt = *scan = this_th;
5674   KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) ||
5675                    (this_th->th.th_info.ds.ds_gtid <
5676                     this_th->th.th_next_pool->th.th_info.ds.ds_gtid));
5677   TCW_4(this_th->th.th_in_pool, TRUE);
5678   __kmp_suspend_initialize_thread(this_th);
5679   __kmp_lock_suspend_mx(this_th);
5680   if (this_th->th.th_active == TRUE) {
5681     KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
5682     this_th->th.th_active_in_pool = TRUE;
5683   }
5684 #if KMP_DEBUG
5685   else {
5686     KMP_DEBUG_ASSERT(this_th->th.th_active_in_pool == FALSE);
5687   }
5688 #endif
5689   __kmp_unlock_suspend_mx(this_th);
5690 
5691   TCW_4(__kmp_nth, __kmp_nth - 1);
5692 
5693 #ifdef KMP_ADJUST_BLOCKTIME
5694   /* Adjust blocktime back to user setting or default if necessary */
5695   /* Middle initialization might never have occurred                */
5696   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5697     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5698     if (__kmp_nth <= __kmp_avail_proc) {
5699       __kmp_zero_bt = FALSE;
5700     }
5701   }
5702 #endif /* KMP_ADJUST_BLOCKTIME */
5703 
5704   KMP_MB();
5705 }
5706 
5707 /* ------------------------------------------------------------------------ */
5708 
5709 void *__kmp_launch_thread(kmp_info_t *this_thr) {
5710   int gtid = this_thr->th.th_info.ds.ds_gtid;
5711   /*    void                 *stack_data;*/
5712   kmp_team_t **volatile pteam;
5713 
5714   KMP_MB();
5715   KA_TRACE(10, ("__kmp_launch_thread: T#%d start\n", gtid));
5716 
5717   if (__kmp_env_consistency_check) {
5718     this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak?
5719   }
5720 
5721 #if OMPT_SUPPORT
5722   ompt_data_t *thread_data;
5723   if (ompt_enabled.enabled) {
5724     thread_data = &(this_thr->th.ompt_thread_info.thread_data);
5725     *thread_data = ompt_data_none;
5726 
5727     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5728     this_thr->th.ompt_thread_info.wait_id = 0;
5729     this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0);
5730     this_thr->th.ompt_thread_info.parallel_flags = 0;
5731     if (ompt_enabled.ompt_callback_thread_begin) {
5732       ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
5733           ompt_thread_worker, thread_data);
5734     }
5735     this_thr->th.ompt_thread_info.state = ompt_state_idle;
5736   }
5737 #endif
5738 
5739   /* This is the place where threads wait for work */
5740   while (!TCR_4(__kmp_global.g.g_done)) {
5741     KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]);
5742     KMP_MB();
5743 
5744     /* wait for work to do */
5745     KA_TRACE(20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid));
5746 
5747     /* No tid yet since not part of a team */
5748     __kmp_fork_barrier(gtid, KMP_GTID_DNE);
5749 
5750 #if OMPT_SUPPORT
5751     if (ompt_enabled.enabled) {
5752       this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5753     }
5754 #endif
5755 
5756     pteam = &this_thr->th.th_team;
5757 
5758     /* have we been allocated? */
5759     if (TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done)) {
5760       /* we were just woken up, so run our new task */
5761       if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
5762         int rc;
5763         KA_TRACE(20,
5764                  ("__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n",
5765                   gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5766                   (*pteam)->t.t_pkfn));
5767 
5768         updateHWFPControl(*pteam);
5769 
5770 #if OMPT_SUPPORT
5771         if (ompt_enabled.enabled) {
5772           this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
5773         }
5774 #endif
5775 
5776         rc = (*pteam)->t.t_invoke(gtid);
5777         KMP_ASSERT(rc);
5778 
5779         KMP_MB();
5780         KA_TRACE(20, ("__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n",
5781                       gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5782                       (*pteam)->t.t_pkfn));
5783       }
5784 #if OMPT_SUPPORT
5785       if (ompt_enabled.enabled) {
5786         /* no frame set while outside task */
5787         __ompt_get_task_info_object(0)->frame.exit_frame = ompt_data_none;
5788 
5789         this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5790       }
5791 #endif
5792       /* join barrier after parallel region */
5793       __kmp_join_barrier(gtid);
5794     }
5795   }
5796   TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
5797 
5798 #if OMPT_SUPPORT
5799   if (ompt_enabled.ompt_callback_thread_end) {
5800     ompt_callbacks.ompt_callback(ompt_callback_thread_end)(thread_data);
5801   }
5802 #endif
5803 
5804   this_thr->th.th_task_team = NULL;
5805   /* run the destructors for the threadprivate data for this thread */
5806   __kmp_common_destroy_gtid(gtid);
5807 
5808   KA_TRACE(10, ("__kmp_launch_thread: T#%d done\n", gtid));
5809   KMP_MB();
5810   return this_thr;
5811 }
5812 
5813 /* ------------------------------------------------------------------------ */
5814 
5815 void __kmp_internal_end_dest(void *specific_gtid) {
5816 #if KMP_COMPILER_ICC
5817 #pragma warning(push)
5818 #pragma warning(disable : 810) // conversion from "void *" to "int" may lose
5819 // significant bits
5820 #endif
5821   // Make sure no significant bits are lost
5822   int gtid = (kmp_intptr_t)specific_gtid - 1;
5823 #if KMP_COMPILER_ICC
5824 #pragma warning(pop)
5825 #endif
5826 
5827   KA_TRACE(30, ("__kmp_internal_end_dest: T#%d\n", gtid));
5828   /* NOTE: the gtid is stored as gitd+1 in the thread-local-storage
5829    * this is because 0 is reserved for the nothing-stored case */
5830 
5831   /* josh: One reason for setting the gtid specific data even when it is being
5832      destroyed by pthread is to allow gtid lookup through thread specific data
5833      (__kmp_gtid_get_specific).  Some of the code, especially stat code,
5834      that gets executed in the call to __kmp_internal_end_thread, actually
5835      gets the gtid through the thread specific data.  Setting it here seems
5836      rather inelegant and perhaps wrong, but allows __kmp_internal_end_thread
5837      to run smoothly.
5838      todo: get rid of this after we remove the dependence on
5839      __kmp_gtid_get_specific  */
5840   if (gtid >= 0 && KMP_UBER_GTID(gtid))
5841     __kmp_gtid_set_specific(gtid);
5842 #ifdef KMP_TDATA_GTID
5843   __kmp_gtid = gtid;
5844 #endif
5845   __kmp_internal_end_thread(gtid);
5846 }
5847 
5848 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
5849 
5850 __attribute__((destructor)) void __kmp_internal_end_dtor(void) {
5851   __kmp_internal_end_atexit();
5852 }
5853 
5854 #endif
5855 
5856 /* [Windows] josh: when the atexit handler is called, there may still be more
5857    than one thread alive */
5858 void __kmp_internal_end_atexit(void) {
5859   KA_TRACE(30, ("__kmp_internal_end_atexit\n"));
5860   /* [Windows]
5861      josh: ideally, we want to completely shutdown the library in this atexit
5862      handler, but stat code that depends on thread specific data for gtid fails
5863      because that data becomes unavailable at some point during the shutdown, so
5864      we call __kmp_internal_end_thread instead. We should eventually remove the
5865      dependency on __kmp_get_specific_gtid in the stat code and use
5866      __kmp_internal_end_library to cleanly shutdown the library.
5867 
5868      // TODO: Can some of this comment about GVS be removed?
5869      I suspect that the offending stat code is executed when the calling thread
5870      tries to clean up a dead root thread's data structures, resulting in GVS
5871      code trying to close the GVS structures for that thread, but since the stat
5872      code uses __kmp_get_specific_gtid to get the gtid with the assumption that
5873      the calling thread is cleaning up itself instead of another thread, it get
5874      confused. This happens because allowing a thread to unregister and cleanup
5875      another thread is a recent modification for addressing an issue.
5876      Based on the current design (20050722), a thread may end up
5877      trying to unregister another thread only if thread death does not trigger
5878      the calling of __kmp_internal_end_thread.  For Linux* OS, there is the
5879      thread specific data destructor function to detect thread death. For
5880      Windows dynamic, there is DllMain(THREAD_DETACH). For Windows static, there
5881      is nothing.  Thus, the workaround is applicable only for Windows static
5882      stat library. */
5883   __kmp_internal_end_library(-1);
5884 #if KMP_OS_WINDOWS
5885   __kmp_close_console();
5886 #endif
5887 }
5888 
5889 static void __kmp_reap_thread(kmp_info_t *thread, int is_root) {
5890   // It is assumed __kmp_forkjoin_lock is acquired.
5891 
5892   int gtid;
5893 
5894   KMP_DEBUG_ASSERT(thread != NULL);
5895 
5896   gtid = thread->th.th_info.ds.ds_gtid;
5897 
5898   if (!is_root) {
5899     if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
5900       /* Assume the threads are at the fork barrier here */
5901       KA_TRACE(
5902           20, ("__kmp_reap_thread: releasing T#%d from fork barrier for reap\n",
5903                gtid));
5904       /* Need release fence here to prevent seg faults for tree forkjoin barrier
5905        * (GEH) */
5906       ANNOTATE_HAPPENS_BEFORE(thread);
5907       kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
5908       __kmp_release_64(&flag);
5909     }
5910 
5911     // Terminate OS thread.
5912     __kmp_reap_worker(thread);
5913 
5914     // The thread was killed asynchronously.  If it was actively
5915     // spinning in the thread pool, decrement the global count.
5916     //
5917     // There is a small timing hole here - if the worker thread was just waking
5918     // up after sleeping in the pool, had reset it's th_active_in_pool flag but
5919     // not decremented the global counter __kmp_thread_pool_active_nth yet, then
5920     // the global counter might not get updated.
5921     //
5922     // Currently, this can only happen as the library is unloaded,
5923     // so there are no harmful side effects.
5924     if (thread->th.th_active_in_pool) {
5925       thread->th.th_active_in_pool = FALSE;
5926       KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
5927       KMP_DEBUG_ASSERT(__kmp_thread_pool_active_nth >= 0);
5928     }
5929   }
5930 
5931   __kmp_free_implicit_task(thread);
5932 
5933 // Free the fast memory for tasking
5934 #if USE_FAST_MEMORY
5935   __kmp_free_fast_memory(thread);
5936 #endif /* USE_FAST_MEMORY */
5937 
5938   __kmp_suspend_uninitialize_thread(thread);
5939 
5940   KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread);
5941   TCW_SYNC_PTR(__kmp_threads[gtid], NULL);
5942 
5943   --__kmp_all_nth;
5944 // __kmp_nth was decremented when thread is added to the pool.
5945 
5946 #ifdef KMP_ADJUST_BLOCKTIME
5947   /* Adjust blocktime back to user setting or default if necessary */
5948   /* Middle initialization might never have occurred                */
5949   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5950     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5951     if (__kmp_nth <= __kmp_avail_proc) {
5952       __kmp_zero_bt = FALSE;
5953     }
5954   }
5955 #endif /* KMP_ADJUST_BLOCKTIME */
5956 
5957   /* free the memory being used */
5958   if (__kmp_env_consistency_check) {
5959     if (thread->th.th_cons) {
5960       __kmp_free_cons_stack(thread->th.th_cons);
5961       thread->th.th_cons = NULL;
5962     }
5963   }
5964 
5965   if (thread->th.th_pri_common != NULL) {
5966     __kmp_free(thread->th.th_pri_common);
5967     thread->th.th_pri_common = NULL;
5968   }
5969 
5970   if (thread->th.th_task_state_memo_stack != NULL) {
5971     __kmp_free(thread->th.th_task_state_memo_stack);
5972     thread->th.th_task_state_memo_stack = NULL;
5973   }
5974 
5975 #if KMP_USE_BGET
5976   if (thread->th.th_local.bget_data != NULL) {
5977     __kmp_finalize_bget(thread);
5978   }
5979 #endif
5980 
5981 #if KMP_AFFINITY_SUPPORTED
5982   if (thread->th.th_affin_mask != NULL) {
5983     KMP_CPU_FREE(thread->th.th_affin_mask);
5984     thread->th.th_affin_mask = NULL;
5985   }
5986 #endif /* KMP_AFFINITY_SUPPORTED */
5987 
5988 #if KMP_USE_HIER_SCHED
5989   if (thread->th.th_hier_bar_data != NULL) {
5990     __kmp_free(thread->th.th_hier_bar_data);
5991     thread->th.th_hier_bar_data = NULL;
5992   }
5993 #endif
5994 
5995   __kmp_reap_team(thread->th.th_serial_team);
5996   thread->th.th_serial_team = NULL;
5997   __kmp_free(thread);
5998 
5999   KMP_MB();
6000 
6001 } // __kmp_reap_thread
6002 
6003 static void __kmp_internal_end(void) {
6004   int i;
6005 
6006   /* First, unregister the library */
6007   __kmp_unregister_library();
6008 
6009 #if KMP_OS_WINDOWS
6010   /* In Win static library, we can't tell when a root actually dies, so we
6011      reclaim the data structures for any root threads that have died but not
6012      unregistered themselves, in order to shut down cleanly.
6013      In Win dynamic library we also can't tell when a thread dies.  */
6014   __kmp_reclaim_dead_roots(); // AC: moved here to always clean resources of
6015 // dead roots
6016 #endif
6017 
6018   for (i = 0; i < __kmp_threads_capacity; i++)
6019     if (__kmp_root[i])
6020       if (__kmp_root[i]->r.r_active)
6021         break;
6022   KMP_MB(); /* Flush all pending memory write invalidates.  */
6023   TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6024 
6025   if (i < __kmp_threads_capacity) {
6026 #if KMP_USE_MONITOR
6027     // 2009-09-08 (lev): Other alive roots found. Why do we kill the monitor??
6028     KMP_MB(); /* Flush all pending memory write invalidates.  */
6029 
6030     // Need to check that monitor was initialized before reaping it. If we are
6031     // called form __kmp_atfork_child (which sets __kmp_init_parallel = 0), then
6032     // __kmp_monitor will appear to contain valid data, but it is only valid in
6033     // the parent process, not the child.
6034     // New behavior (201008): instead of keying off of the flag
6035     // __kmp_init_parallel, the monitor thread creation is keyed off
6036     // of the new flag __kmp_init_monitor.
6037     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6038     if (TCR_4(__kmp_init_monitor)) {
6039       __kmp_reap_monitor(&__kmp_monitor);
6040       TCW_4(__kmp_init_monitor, 0);
6041     }
6042     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6043     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
6044 #endif // KMP_USE_MONITOR
6045   } else {
6046 /* TODO move this to cleanup code */
6047 #ifdef KMP_DEBUG
6048     /* make sure that everything has properly ended */
6049     for (i = 0; i < __kmp_threads_capacity; i++) {
6050       if (__kmp_root[i]) {
6051         //                    KMP_ASSERT( ! KMP_UBER_GTID( i ) );         // AC:
6052         //                    there can be uber threads alive here
6053         KMP_ASSERT(!__kmp_root[i]->r.r_active); // TODO: can they be active?
6054       }
6055     }
6056 #endif
6057 
6058     KMP_MB();
6059 
6060     // Reap the worker threads.
6061     // This is valid for now, but be careful if threads are reaped sooner.
6062     while (__kmp_thread_pool != NULL) { // Loop thru all the thread in the pool.
6063       // Get the next thread from the pool.
6064       kmp_info_t *thread = CCAST(kmp_info_t *, __kmp_thread_pool);
6065       __kmp_thread_pool = thread->th.th_next_pool;
6066       // Reap it.
6067       KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP);
6068       thread->th.th_next_pool = NULL;
6069       thread->th.th_in_pool = FALSE;
6070       __kmp_reap_thread(thread, 0);
6071     }
6072     __kmp_thread_pool_insert_pt = NULL;
6073 
6074     // Reap teams.
6075     while (__kmp_team_pool != NULL) { // Loop thru all the teams in the pool.
6076       // Get the next team from the pool.
6077       kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
6078       __kmp_team_pool = team->t.t_next_pool;
6079       // Reap it.
6080       team->t.t_next_pool = NULL;
6081       __kmp_reap_team(team);
6082     }
6083 
6084     __kmp_reap_task_teams();
6085 
6086 #if KMP_OS_UNIX
6087     // Threads that are not reaped should not access any resources since they
6088     // are going to be deallocated soon, so the shutdown sequence should wait
6089     // until all threads either exit the final spin-waiting loop or begin
6090     // sleeping after the given blocktime.
6091     for (i = 0; i < __kmp_threads_capacity; i++) {
6092       kmp_info_t *thr = __kmp_threads[i];
6093       while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking))
6094         KMP_CPU_PAUSE();
6095     }
6096 #endif
6097 
6098     for (i = 0; i < __kmp_threads_capacity; ++i) {
6099       // TBD: Add some checking...
6100       // Something like KMP_DEBUG_ASSERT( __kmp_thread[ i ] == NULL );
6101     }
6102 
6103     /* Make sure all threadprivate destructors get run by joining with all
6104        worker threads before resetting this flag */
6105     TCW_SYNC_4(__kmp_init_common, FALSE);
6106 
6107     KA_TRACE(10, ("__kmp_internal_end: all workers reaped\n"));
6108     KMP_MB();
6109 
6110 #if KMP_USE_MONITOR
6111     // See note above: One of the possible fixes for CQ138434 / CQ140126
6112     //
6113     // FIXME: push both code fragments down and CSE them?
6114     // push them into __kmp_cleanup() ?
6115     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6116     if (TCR_4(__kmp_init_monitor)) {
6117       __kmp_reap_monitor(&__kmp_monitor);
6118       TCW_4(__kmp_init_monitor, 0);
6119     }
6120     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6121     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
6122 #endif
6123   } /* else !__kmp_global.t_active */
6124   TCW_4(__kmp_init_gtid, FALSE);
6125   KMP_MB(); /* Flush all pending memory write invalidates.  */
6126 
6127   __kmp_cleanup();
6128 #if OMPT_SUPPORT
6129   ompt_fini();
6130 #endif
6131 }
6132 
6133 void __kmp_internal_end_library(int gtid_req) {
6134   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6135   /* this shouldn't be a race condition because __kmp_internal_end() is the
6136      only place to clear __kmp_serial_init */
6137   /* we'll check this later too, after we get the lock */
6138   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6139   // redundant, because the next check will work in any case.
6140   if (__kmp_global.g.g_abort) {
6141     KA_TRACE(11, ("__kmp_internal_end_library: abort, exiting\n"));
6142     /* TODO abort? */
6143     return;
6144   }
6145   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6146     KA_TRACE(10, ("__kmp_internal_end_library: already finished\n"));
6147     return;
6148   }
6149 
6150   KMP_MB(); /* Flush all pending memory write invalidates.  */
6151 
6152   /* find out who we are and what we should do */
6153   {
6154     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6155     KA_TRACE(
6156         10, ("__kmp_internal_end_library: enter T#%d  (%d)\n", gtid, gtid_req));
6157     if (gtid == KMP_GTID_SHUTDOWN) {
6158       KA_TRACE(10, ("__kmp_internal_end_library: !__kmp_init_runtime, system "
6159                     "already shutdown\n"));
6160       return;
6161     } else if (gtid == KMP_GTID_MONITOR) {
6162       KA_TRACE(10, ("__kmp_internal_end_library: monitor thread, gtid not "
6163                     "registered, or system shutdown\n"));
6164       return;
6165     } else if (gtid == KMP_GTID_DNE) {
6166       KA_TRACE(10, ("__kmp_internal_end_library: gtid not registered or system "
6167                     "shutdown\n"));
6168       /* we don't know who we are, but we may still shutdown the library */
6169     } else if (KMP_UBER_GTID(gtid)) {
6170       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6171       if (__kmp_root[gtid]->r.r_active) {
6172         __kmp_global.g.g_abort = -1;
6173         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6174         KA_TRACE(10,
6175                  ("__kmp_internal_end_library: root still active, abort T#%d\n",
6176                   gtid));
6177         return;
6178       } else {
6179         KA_TRACE(
6180             10,
6181             ("__kmp_internal_end_library: unregistering sibling T#%d\n", gtid));
6182         __kmp_unregister_root_current_thread(gtid);
6183       }
6184     } else {
6185 /* worker threads may call this function through the atexit handler, if they
6186  * call exit() */
6187 /* For now, skip the usual subsequent processing and just dump the debug buffer.
6188    TODO: do a thorough shutdown instead */
6189 #ifdef DUMP_DEBUG_ON_EXIT
6190       if (__kmp_debug_buf)
6191         __kmp_dump_debug_buffer();
6192 #endif
6193       return;
6194     }
6195   }
6196   /* synchronize the termination process */
6197   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6198 
6199   /* have we already finished */
6200   if (__kmp_global.g.g_abort) {
6201     KA_TRACE(10, ("__kmp_internal_end_library: abort, exiting\n"));
6202     /* TODO abort? */
6203     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6204     return;
6205   }
6206   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6207     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6208     return;
6209   }
6210 
6211   /* We need this lock to enforce mutex between this reading of
6212      __kmp_threads_capacity and the writing by __kmp_register_root.
6213      Alternatively, we can use a counter of roots that is atomically updated by
6214      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6215      __kmp_internal_end_*.  */
6216   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6217 
6218   /* now we can safely conduct the actual termination */
6219   __kmp_internal_end();
6220 
6221   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6222   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6223 
6224   KA_TRACE(10, ("__kmp_internal_end_library: exit\n"));
6225 
6226 #ifdef DUMP_DEBUG_ON_EXIT
6227   if (__kmp_debug_buf)
6228     __kmp_dump_debug_buffer();
6229 #endif
6230 
6231 #if KMP_OS_WINDOWS
6232   __kmp_close_console();
6233 #endif
6234 
6235   __kmp_fini_allocator();
6236 
6237 } // __kmp_internal_end_library
6238 
6239 void __kmp_internal_end_thread(int gtid_req) {
6240   int i;
6241 
6242   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6243   /* this shouldn't be a race condition because __kmp_internal_end() is the
6244    * only place to clear __kmp_serial_init */
6245   /* we'll check this later too, after we get the lock */
6246   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6247   // redundant, because the next check will work in any case.
6248   if (__kmp_global.g.g_abort) {
6249     KA_TRACE(11, ("__kmp_internal_end_thread: abort, exiting\n"));
6250     /* TODO abort? */
6251     return;
6252   }
6253   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6254     KA_TRACE(10, ("__kmp_internal_end_thread: already finished\n"));
6255     return;
6256   }
6257 
6258   KMP_MB(); /* Flush all pending memory write invalidates.  */
6259 
6260   /* find out who we are and what we should do */
6261   {
6262     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6263     KA_TRACE(10,
6264              ("__kmp_internal_end_thread: enter T#%d  (%d)\n", gtid, gtid_req));
6265     if (gtid == KMP_GTID_SHUTDOWN) {
6266       KA_TRACE(10, ("__kmp_internal_end_thread: !__kmp_init_runtime, system "
6267                     "already shutdown\n"));
6268       return;
6269     } else if (gtid == KMP_GTID_MONITOR) {
6270       KA_TRACE(10, ("__kmp_internal_end_thread: monitor thread, gtid not "
6271                     "registered, or system shutdown\n"));
6272       return;
6273     } else if (gtid == KMP_GTID_DNE) {
6274       KA_TRACE(10, ("__kmp_internal_end_thread: gtid not registered or system "
6275                     "shutdown\n"));
6276       return;
6277       /* we don't know who we are */
6278     } else if (KMP_UBER_GTID(gtid)) {
6279       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6280       if (__kmp_root[gtid]->r.r_active) {
6281         __kmp_global.g.g_abort = -1;
6282         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6283         KA_TRACE(10,
6284                  ("__kmp_internal_end_thread: root still active, abort T#%d\n",
6285                   gtid));
6286         return;
6287       } else {
6288         KA_TRACE(10, ("__kmp_internal_end_thread: unregistering sibling T#%d\n",
6289                       gtid));
6290         __kmp_unregister_root_current_thread(gtid);
6291       }
6292     } else {
6293       /* just a worker thread, let's leave */
6294       KA_TRACE(10, ("__kmp_internal_end_thread: worker thread T#%d\n", gtid));
6295 
6296       if (gtid >= 0) {
6297         __kmp_threads[gtid]->th.th_task_team = NULL;
6298       }
6299 
6300       KA_TRACE(10,
6301                ("__kmp_internal_end_thread: worker thread done, exiting T#%d\n",
6302                 gtid));
6303       return;
6304     }
6305   }
6306 #if KMP_DYNAMIC_LIB
6307   if (__kmp_pause_status != kmp_hard_paused)
6308   // AC: lets not shutdown the dynamic library at the exit of uber thread,
6309   // because we will better shutdown later in the library destructor.
6310   {
6311     KA_TRACE(10, ("__kmp_internal_end_thread: exiting T#%d\n", gtid_req));
6312     return;
6313   }
6314 #endif
6315   /* synchronize the termination process */
6316   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6317 
6318   /* have we already finished */
6319   if (__kmp_global.g.g_abort) {
6320     KA_TRACE(10, ("__kmp_internal_end_thread: abort, exiting\n"));
6321     /* TODO abort? */
6322     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6323     return;
6324   }
6325   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6326     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6327     return;
6328   }
6329 
6330   /* We need this lock to enforce mutex between this reading of
6331      __kmp_threads_capacity and the writing by __kmp_register_root.
6332      Alternatively, we can use a counter of roots that is atomically updated by
6333      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6334      __kmp_internal_end_*.  */
6335 
6336   /* should we finish the run-time?  are all siblings done? */
6337   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6338 
6339   for (i = 0; i < __kmp_threads_capacity; ++i) {
6340     if (KMP_UBER_GTID(i)) {
6341       KA_TRACE(
6342           10,
6343           ("__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i));
6344       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6345       __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6346       return;
6347     }
6348   }
6349 
6350   /* now we can safely conduct the actual termination */
6351 
6352   __kmp_internal_end();
6353 
6354   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6355   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6356 
6357   KA_TRACE(10, ("__kmp_internal_end_thread: exit T#%d\n", gtid_req));
6358 
6359 #ifdef DUMP_DEBUG_ON_EXIT
6360   if (__kmp_debug_buf)
6361     __kmp_dump_debug_buffer();
6362 #endif
6363 } // __kmp_internal_end_thread
6364 
6365 // -----------------------------------------------------------------------------
6366 // Library registration stuff.
6367 
6368 static long __kmp_registration_flag = 0;
6369 // Random value used to indicate library initialization.
6370 static char *__kmp_registration_str = NULL;
6371 // Value to be saved in env var __KMP_REGISTERED_LIB_<pid>.
6372 
6373 static inline char *__kmp_reg_status_name() {
6374   /* On RHEL 3u5 if linked statically, getpid() returns different values in
6375      each thread. If registration and unregistration go in different threads
6376      (omp_misc_other_root_exit.cpp test case), the name of registered_lib_env
6377      env var can not be found, because the name will contain different pid. */
6378   return __kmp_str_format("__KMP_REGISTERED_LIB_%d", (int)getpid());
6379 } // __kmp_reg_status_get
6380 
6381 void __kmp_register_library_startup(void) {
6382 
6383   char *name = __kmp_reg_status_name(); // Name of the environment variable.
6384   int done = 0;
6385   union {
6386     double dtime;
6387     long ltime;
6388   } time;
6389 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6390   __kmp_initialize_system_tick();
6391 #endif
6392   __kmp_read_system_time(&time.dtime);
6393   __kmp_registration_flag = 0xCAFE0000L | (time.ltime & 0x0000FFFFL);
6394   __kmp_registration_str =
6395       __kmp_str_format("%p-%lx-%s", &__kmp_registration_flag,
6396                        __kmp_registration_flag, KMP_LIBRARY_FILE);
6397 
6398   KA_TRACE(50, ("__kmp_register_library_startup: %s=\"%s\"\n", name,
6399                 __kmp_registration_str));
6400 
6401   while (!done) {
6402 
6403     char *value = NULL; // Actual value of the environment variable.
6404 
6405     // Set environment variable, but do not overwrite if it is exist.
6406     __kmp_env_set(name, __kmp_registration_str, 0);
6407     // Check the variable is written.
6408     value = __kmp_env_get(name);
6409     if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6410 
6411       done = 1; // Ok, environment variable set successfully, exit the loop.
6412 
6413     } else {
6414 
6415       // Oops. Write failed. Another copy of OpenMP RTL is in memory.
6416       // Check whether it alive or dead.
6417       int neighbor = 0; // 0 -- unknown status, 1 -- alive, 2 -- dead.
6418       char *tail = value;
6419       char *flag_addr_str = NULL;
6420       char *flag_val_str = NULL;
6421       char const *file_name = NULL;
6422       __kmp_str_split(tail, '-', &flag_addr_str, &tail);
6423       __kmp_str_split(tail, '-', &flag_val_str, &tail);
6424       file_name = tail;
6425       if (tail != NULL) {
6426         long *flag_addr = 0;
6427         long flag_val = 0;
6428         KMP_SSCANF(flag_addr_str, "%p", RCAST(void**, &flag_addr));
6429         KMP_SSCANF(flag_val_str, "%lx", &flag_val);
6430         if (flag_addr != 0 && flag_val != 0 && strcmp(file_name, "") != 0) {
6431           // First, check whether environment-encoded address is mapped into
6432           // addr space.
6433           // If so, dereference it to see if it still has the right value.
6434           if (__kmp_is_address_mapped(flag_addr) && *flag_addr == flag_val) {
6435             neighbor = 1;
6436           } else {
6437             // If not, then we know the other copy of the library is no longer
6438             // running.
6439             neighbor = 2;
6440           }
6441         }
6442       }
6443       switch (neighbor) {
6444       case 0: // Cannot parse environment variable -- neighbor status unknown.
6445         // Assume it is the incompatible format of future version of the
6446         // library. Assume the other library is alive.
6447         // WARN( ... ); // TODO: Issue a warning.
6448         file_name = "unknown library";
6449         KMP_FALLTHROUGH();
6450       // Attention! Falling to the next case. That's intentional.
6451       case 1: { // Neighbor is alive.
6452         // Check it is allowed.
6453         char *duplicate_ok = __kmp_env_get("KMP_DUPLICATE_LIB_OK");
6454         if (!__kmp_str_match_true(duplicate_ok)) {
6455           // That's not allowed. Issue fatal error.
6456           __kmp_fatal(KMP_MSG(DuplicateLibrary, KMP_LIBRARY_FILE, file_name),
6457                       KMP_HNT(DuplicateLibrary), __kmp_msg_null);
6458         }
6459         KMP_INTERNAL_FREE(duplicate_ok);
6460         __kmp_duplicate_library_ok = 1;
6461         done = 1; // Exit the loop.
6462       } break;
6463       case 2: { // Neighbor is dead.
6464         // Clear the variable and try to register library again.
6465         __kmp_env_unset(name);
6466       } break;
6467       default: { KMP_DEBUG_ASSERT(0); } break;
6468       }
6469     }
6470     KMP_INTERNAL_FREE((void *)value);
6471   }
6472   KMP_INTERNAL_FREE((void *)name);
6473 
6474 } // func __kmp_register_library_startup
6475 
6476 void __kmp_unregister_library(void) {
6477 
6478   char *name = __kmp_reg_status_name();
6479   char *value = __kmp_env_get(name);
6480 
6481   KMP_DEBUG_ASSERT(__kmp_registration_flag != 0);
6482   KMP_DEBUG_ASSERT(__kmp_registration_str != NULL);
6483   if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6484     // Ok, this is our variable. Delete it.
6485     __kmp_env_unset(name);
6486   }
6487 
6488   KMP_INTERNAL_FREE(__kmp_registration_str);
6489   KMP_INTERNAL_FREE(value);
6490   KMP_INTERNAL_FREE(name);
6491 
6492   __kmp_registration_flag = 0;
6493   __kmp_registration_str = NULL;
6494 
6495 } // __kmp_unregister_library
6496 
6497 // End of Library registration stuff.
6498 // -----------------------------------------------------------------------------
6499 
6500 #if KMP_MIC_SUPPORTED
6501 
6502 static void __kmp_check_mic_type() {
6503   kmp_cpuid_t cpuid_state = {0};
6504   kmp_cpuid_t *cs_p = &cpuid_state;
6505   __kmp_x86_cpuid(1, 0, cs_p);
6506   // We don't support mic1 at the moment
6507   if ((cs_p->eax & 0xff0) == 0xB10) {
6508     __kmp_mic_type = mic2;
6509   } else if ((cs_p->eax & 0xf0ff0) == 0x50670) {
6510     __kmp_mic_type = mic3;
6511   } else {
6512     __kmp_mic_type = non_mic;
6513   }
6514 }
6515 
6516 #endif /* KMP_MIC_SUPPORTED */
6517 
6518 static void __kmp_do_serial_initialize(void) {
6519   int i, gtid;
6520   int size;
6521 
6522   KA_TRACE(10, ("__kmp_do_serial_initialize: enter\n"));
6523 
6524   KMP_DEBUG_ASSERT(sizeof(kmp_int32) == 4);
6525   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) == 4);
6526   KMP_DEBUG_ASSERT(sizeof(kmp_int64) == 8);
6527   KMP_DEBUG_ASSERT(sizeof(kmp_uint64) == 8);
6528   KMP_DEBUG_ASSERT(sizeof(kmp_intptr_t) == sizeof(void *));
6529 
6530 #if OMPT_SUPPORT
6531   ompt_pre_init();
6532 #endif
6533 
6534   __kmp_validate_locks();
6535 
6536   /* Initialize internal memory allocator */
6537   __kmp_init_allocator();
6538 
6539   /* Register the library startup via an environment variable and check to see
6540      whether another copy of the library is already registered. */
6541 
6542   __kmp_register_library_startup();
6543 
6544   /* TODO reinitialization of library */
6545   if (TCR_4(__kmp_global.g.g_done)) {
6546     KA_TRACE(10, ("__kmp_do_serial_initialize: reinitialization of library\n"));
6547   }
6548 
6549   __kmp_global.g.g_abort = 0;
6550   TCW_SYNC_4(__kmp_global.g.g_done, FALSE);
6551 
6552 /* initialize the locks */
6553 #if KMP_USE_ADAPTIVE_LOCKS
6554 #if KMP_DEBUG_ADAPTIVE_LOCKS
6555   __kmp_init_speculative_stats();
6556 #endif
6557 #endif
6558 #if KMP_STATS_ENABLED
6559   __kmp_stats_init();
6560 #endif
6561   __kmp_init_lock(&__kmp_global_lock);
6562   __kmp_init_queuing_lock(&__kmp_dispatch_lock);
6563   __kmp_init_lock(&__kmp_debug_lock);
6564   __kmp_init_atomic_lock(&__kmp_atomic_lock);
6565   __kmp_init_atomic_lock(&__kmp_atomic_lock_1i);
6566   __kmp_init_atomic_lock(&__kmp_atomic_lock_2i);
6567   __kmp_init_atomic_lock(&__kmp_atomic_lock_4i);
6568   __kmp_init_atomic_lock(&__kmp_atomic_lock_4r);
6569   __kmp_init_atomic_lock(&__kmp_atomic_lock_8i);
6570   __kmp_init_atomic_lock(&__kmp_atomic_lock_8r);
6571   __kmp_init_atomic_lock(&__kmp_atomic_lock_8c);
6572   __kmp_init_atomic_lock(&__kmp_atomic_lock_10r);
6573   __kmp_init_atomic_lock(&__kmp_atomic_lock_16r);
6574   __kmp_init_atomic_lock(&__kmp_atomic_lock_16c);
6575   __kmp_init_atomic_lock(&__kmp_atomic_lock_20c);
6576   __kmp_init_atomic_lock(&__kmp_atomic_lock_32c);
6577   __kmp_init_bootstrap_lock(&__kmp_forkjoin_lock);
6578   __kmp_init_bootstrap_lock(&__kmp_exit_lock);
6579 #if KMP_USE_MONITOR
6580   __kmp_init_bootstrap_lock(&__kmp_monitor_lock);
6581 #endif
6582   __kmp_init_bootstrap_lock(&__kmp_tp_cached_lock);
6583 
6584   /* conduct initialization and initial setup of configuration */
6585 
6586   __kmp_runtime_initialize();
6587 
6588 #if KMP_MIC_SUPPORTED
6589   __kmp_check_mic_type();
6590 #endif
6591 
6592 // Some global variable initialization moved here from kmp_env_initialize()
6593 #ifdef KMP_DEBUG
6594   kmp_diag = 0;
6595 #endif
6596   __kmp_abort_delay = 0;
6597 
6598   // From __kmp_init_dflt_team_nth()
6599   /* assume the entire machine will be used */
6600   __kmp_dflt_team_nth_ub = __kmp_xproc;
6601   if (__kmp_dflt_team_nth_ub < KMP_MIN_NTH) {
6602     __kmp_dflt_team_nth_ub = KMP_MIN_NTH;
6603   }
6604   if (__kmp_dflt_team_nth_ub > __kmp_sys_max_nth) {
6605     __kmp_dflt_team_nth_ub = __kmp_sys_max_nth;
6606   }
6607   __kmp_max_nth = __kmp_sys_max_nth;
6608   __kmp_cg_max_nth = __kmp_sys_max_nth;
6609   __kmp_teams_max_nth = __kmp_xproc; // set a "reasonable" default
6610   if (__kmp_teams_max_nth > __kmp_sys_max_nth) {
6611     __kmp_teams_max_nth = __kmp_sys_max_nth;
6612   }
6613 
6614   // Three vars below moved here from __kmp_env_initialize() "KMP_BLOCKTIME"
6615   // part
6616   __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
6617 #if KMP_USE_MONITOR
6618   __kmp_monitor_wakeups =
6619       KMP_WAKEUPS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6620   __kmp_bt_intervals =
6621       KMP_INTERVALS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6622 #endif
6623   // From "KMP_LIBRARY" part of __kmp_env_initialize()
6624   __kmp_library = library_throughput;
6625   // From KMP_SCHEDULE initialization
6626   __kmp_static = kmp_sch_static_balanced;
6627 // AC: do not use analytical here, because it is non-monotonous
6628 //__kmp_guided = kmp_sch_guided_iterative_chunked;
6629 //__kmp_auto = kmp_sch_guided_analytical_chunked; // AC: it is the default, no
6630 // need to repeat assignment
6631 // Barrier initialization. Moved here from __kmp_env_initialize() Barrier branch
6632 // bit control and barrier method control parts
6633 #if KMP_FAST_REDUCTION_BARRIER
6634 #define kmp_reduction_barrier_gather_bb ((int)1)
6635 #define kmp_reduction_barrier_release_bb ((int)1)
6636 #define kmp_reduction_barrier_gather_pat bp_hyper_bar
6637 #define kmp_reduction_barrier_release_pat bp_hyper_bar
6638 #endif // KMP_FAST_REDUCTION_BARRIER
6639   for (i = bs_plain_barrier; i < bs_last_barrier; i++) {
6640     __kmp_barrier_gather_branch_bits[i] = __kmp_barrier_gather_bb_dflt;
6641     __kmp_barrier_release_branch_bits[i] = __kmp_barrier_release_bb_dflt;
6642     __kmp_barrier_gather_pattern[i] = __kmp_barrier_gather_pat_dflt;
6643     __kmp_barrier_release_pattern[i] = __kmp_barrier_release_pat_dflt;
6644 #if KMP_FAST_REDUCTION_BARRIER
6645     if (i == bs_reduction_barrier) { // tested and confirmed on ALTIX only (
6646       // lin_64 ): hyper,1
6647       __kmp_barrier_gather_branch_bits[i] = kmp_reduction_barrier_gather_bb;
6648       __kmp_barrier_release_branch_bits[i] = kmp_reduction_barrier_release_bb;
6649       __kmp_barrier_gather_pattern[i] = kmp_reduction_barrier_gather_pat;
6650       __kmp_barrier_release_pattern[i] = kmp_reduction_barrier_release_pat;
6651     }
6652 #endif // KMP_FAST_REDUCTION_BARRIER
6653   }
6654 #if KMP_FAST_REDUCTION_BARRIER
6655 #undef kmp_reduction_barrier_release_pat
6656 #undef kmp_reduction_barrier_gather_pat
6657 #undef kmp_reduction_barrier_release_bb
6658 #undef kmp_reduction_barrier_gather_bb
6659 #endif // KMP_FAST_REDUCTION_BARRIER
6660 #if KMP_MIC_SUPPORTED
6661   if (__kmp_mic_type == mic2) { // KNC
6662     // AC: plane=3,2, forkjoin=2,1 are optimal for 240 threads on KNC
6663     __kmp_barrier_gather_branch_bits[bs_plain_barrier] = 3; // plain gather
6664     __kmp_barrier_release_branch_bits[bs_forkjoin_barrier] =
6665         1; // forkjoin release
6666     __kmp_barrier_gather_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6667     __kmp_barrier_release_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6668   }
6669 #if KMP_FAST_REDUCTION_BARRIER
6670   if (__kmp_mic_type == mic2) { // KNC
6671     __kmp_barrier_gather_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6672     __kmp_barrier_release_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6673   }
6674 #endif // KMP_FAST_REDUCTION_BARRIER
6675 #endif // KMP_MIC_SUPPORTED
6676 
6677 // From KMP_CHECKS initialization
6678 #ifdef KMP_DEBUG
6679   __kmp_env_checks = TRUE; /* development versions have the extra checks */
6680 #else
6681   __kmp_env_checks = FALSE; /* port versions do not have the extra checks */
6682 #endif
6683 
6684   // From "KMP_FOREIGN_THREADS_THREADPRIVATE" initialization
6685   __kmp_foreign_tp = TRUE;
6686 
6687   __kmp_global.g.g_dynamic = FALSE;
6688   __kmp_global.g.g_dynamic_mode = dynamic_default;
6689 
6690   __kmp_env_initialize(NULL);
6691 
6692 // Print all messages in message catalog for testing purposes.
6693 #ifdef KMP_DEBUG
6694   char const *val = __kmp_env_get("KMP_DUMP_CATALOG");
6695   if (__kmp_str_match_true(val)) {
6696     kmp_str_buf_t buffer;
6697     __kmp_str_buf_init(&buffer);
6698     __kmp_i18n_dump_catalog(&buffer);
6699     __kmp_printf("%s", buffer.str);
6700     __kmp_str_buf_free(&buffer);
6701   }
6702   __kmp_env_free(&val);
6703 #endif
6704 
6705   __kmp_threads_capacity =
6706       __kmp_initial_threads_capacity(__kmp_dflt_team_nth_ub);
6707   // Moved here from __kmp_env_initialize() "KMP_ALL_THREADPRIVATE" part
6708   __kmp_tp_capacity = __kmp_default_tp_capacity(
6709       __kmp_dflt_team_nth_ub, __kmp_max_nth, __kmp_allThreadsSpecified);
6710 
6711   // If the library is shut down properly, both pools must be NULL. Just in
6712   // case, set them to NULL -- some memory may leak, but subsequent code will
6713   // work even if pools are not freed.
6714   KMP_DEBUG_ASSERT(__kmp_thread_pool == NULL);
6715   KMP_DEBUG_ASSERT(__kmp_thread_pool_insert_pt == NULL);
6716   KMP_DEBUG_ASSERT(__kmp_team_pool == NULL);
6717   __kmp_thread_pool = NULL;
6718   __kmp_thread_pool_insert_pt = NULL;
6719   __kmp_team_pool = NULL;
6720 
6721   /* Allocate all of the variable sized records */
6722   /* NOTE: __kmp_threads_capacity entries are allocated, but the arrays are
6723    * expandable */
6724   /* Since allocation is cache-aligned, just add extra padding at the end */
6725   size =
6726       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * __kmp_threads_capacity +
6727       CACHE_LINE;
6728   __kmp_threads = (kmp_info_t **)__kmp_allocate(size);
6729   __kmp_root = (kmp_root_t **)((char *)__kmp_threads +
6730                                sizeof(kmp_info_t *) * __kmp_threads_capacity);
6731 
6732   /* init thread counts */
6733   KMP_DEBUG_ASSERT(__kmp_all_nth ==
6734                    0); // Asserts fail if the library is reinitializing and
6735   KMP_DEBUG_ASSERT(__kmp_nth == 0); // something was wrong in termination.
6736   __kmp_all_nth = 0;
6737   __kmp_nth = 0;
6738 
6739   /* setup the uber master thread and hierarchy */
6740   gtid = __kmp_register_root(TRUE);
6741   KA_TRACE(10, ("__kmp_do_serial_initialize  T#%d\n", gtid));
6742   KMP_ASSERT(KMP_UBER_GTID(gtid));
6743   KMP_ASSERT(KMP_INITIAL_GTID(gtid));
6744 
6745   KMP_MB(); /* Flush all pending memory write invalidates.  */
6746 
6747   __kmp_common_initialize();
6748 
6749 #if KMP_OS_UNIX
6750   /* invoke the child fork handler */
6751   __kmp_register_atfork();
6752 #endif
6753 
6754 #if !KMP_DYNAMIC_LIB
6755   {
6756     /* Invoke the exit handler when the program finishes, only for static
6757        library. For dynamic library, we already have _fini and DllMain. */
6758     int rc = atexit(__kmp_internal_end_atexit);
6759     if (rc != 0) {
6760       __kmp_fatal(KMP_MSG(FunctionError, "atexit()"), KMP_ERR(rc),
6761                   __kmp_msg_null);
6762     }
6763   }
6764 #endif
6765 
6766 #if KMP_HANDLE_SIGNALS
6767 #if KMP_OS_UNIX
6768   /* NOTE: make sure that this is called before the user installs their own
6769      signal handlers so that the user handlers are called first. this way they
6770      can return false, not call our handler, avoid terminating the library, and
6771      continue execution where they left off. */
6772   __kmp_install_signals(FALSE);
6773 #endif /* KMP_OS_UNIX */
6774 #if KMP_OS_WINDOWS
6775   __kmp_install_signals(TRUE);
6776 #endif /* KMP_OS_WINDOWS */
6777 #endif
6778 
6779   /* we have finished the serial initialization */
6780   __kmp_init_counter++;
6781 
6782   __kmp_init_serial = TRUE;
6783 
6784   if (__kmp_settings) {
6785     __kmp_env_print();
6786   }
6787 
6788   if (__kmp_display_env || __kmp_display_env_verbose) {
6789     __kmp_env_print_2();
6790   }
6791 
6792 #if OMPT_SUPPORT
6793   ompt_post_init();
6794 #endif
6795 
6796   KMP_MB();
6797 
6798   KA_TRACE(10, ("__kmp_do_serial_initialize: exit\n"));
6799 }
6800 
6801 void __kmp_serial_initialize(void) {
6802   if (__kmp_init_serial) {
6803     return;
6804   }
6805   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6806   if (__kmp_init_serial) {
6807     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6808     return;
6809   }
6810   __kmp_do_serial_initialize();
6811   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6812 }
6813 
6814 static void __kmp_do_middle_initialize(void) {
6815   int i, j;
6816   int prev_dflt_team_nth;
6817 
6818   if (!__kmp_init_serial) {
6819     __kmp_do_serial_initialize();
6820   }
6821 
6822   KA_TRACE(10, ("__kmp_middle_initialize: enter\n"));
6823 
6824   // Save the previous value for the __kmp_dflt_team_nth so that
6825   // we can avoid some reinitialization if it hasn't changed.
6826   prev_dflt_team_nth = __kmp_dflt_team_nth;
6827 
6828 #if KMP_AFFINITY_SUPPORTED
6829   // __kmp_affinity_initialize() will try to set __kmp_ncores to the
6830   // number of cores on the machine.
6831   __kmp_affinity_initialize();
6832 
6833   // Run through the __kmp_threads array and set the affinity mask
6834   // for each root thread that is currently registered with the RTL.
6835   for (i = 0; i < __kmp_threads_capacity; i++) {
6836     if (TCR_PTR(__kmp_threads[i]) != NULL) {
6837       __kmp_affinity_set_init_mask(i, TRUE);
6838     }
6839   }
6840 #endif /* KMP_AFFINITY_SUPPORTED */
6841 
6842   KMP_ASSERT(__kmp_xproc > 0);
6843   if (__kmp_avail_proc == 0) {
6844     __kmp_avail_proc = __kmp_xproc;
6845   }
6846 
6847   // If there were empty places in num_threads list (OMP_NUM_THREADS=,,2,3),
6848   // correct them now
6849   j = 0;
6850   while ((j < __kmp_nested_nth.used) && !__kmp_nested_nth.nth[j]) {
6851     __kmp_nested_nth.nth[j] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub =
6852         __kmp_avail_proc;
6853     j++;
6854   }
6855 
6856   if (__kmp_dflt_team_nth == 0) {
6857 #ifdef KMP_DFLT_NTH_CORES
6858     // Default #threads = #cores
6859     __kmp_dflt_team_nth = __kmp_ncores;
6860     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6861                   "__kmp_ncores (%d)\n",
6862                   __kmp_dflt_team_nth));
6863 #else
6864     // Default #threads = #available OS procs
6865     __kmp_dflt_team_nth = __kmp_avail_proc;
6866     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6867                   "__kmp_avail_proc(%d)\n",
6868                   __kmp_dflt_team_nth));
6869 #endif /* KMP_DFLT_NTH_CORES */
6870   }
6871 
6872   if (__kmp_dflt_team_nth < KMP_MIN_NTH) {
6873     __kmp_dflt_team_nth = KMP_MIN_NTH;
6874   }
6875   if (__kmp_dflt_team_nth > __kmp_sys_max_nth) {
6876     __kmp_dflt_team_nth = __kmp_sys_max_nth;
6877   }
6878 
6879   // There's no harm in continuing if the following check fails,
6880   // but it indicates an error in the previous logic.
6881   KMP_DEBUG_ASSERT(__kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub);
6882 
6883   if (__kmp_dflt_team_nth != prev_dflt_team_nth) {
6884     // Run through the __kmp_threads array and set the num threads icv for each
6885     // root thread that is currently registered with the RTL (which has not
6886     // already explicitly set its nthreads-var with a call to
6887     // omp_set_num_threads()).
6888     for (i = 0; i < __kmp_threads_capacity; i++) {
6889       kmp_info_t *thread = __kmp_threads[i];
6890       if (thread == NULL)
6891         continue;
6892       if (thread->th.th_current_task->td_icvs.nproc != 0)
6893         continue;
6894 
6895       set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
6896     }
6897   }
6898   KA_TRACE(
6899       20,
6900       ("__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n",
6901        __kmp_dflt_team_nth));
6902 
6903 #ifdef KMP_ADJUST_BLOCKTIME
6904   /* Adjust blocktime to zero if necessary  now that __kmp_avail_proc is set */
6905   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
6906     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
6907     if (__kmp_nth > __kmp_avail_proc) {
6908       __kmp_zero_bt = TRUE;
6909     }
6910   }
6911 #endif /* KMP_ADJUST_BLOCKTIME */
6912 
6913   /* we have finished middle initialization */
6914   TCW_SYNC_4(__kmp_init_middle, TRUE);
6915 
6916   KA_TRACE(10, ("__kmp_do_middle_initialize: exit\n"));
6917 }
6918 
6919 void __kmp_middle_initialize(void) {
6920   if (__kmp_init_middle) {
6921     return;
6922   }
6923   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6924   if (__kmp_init_middle) {
6925     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6926     return;
6927   }
6928   __kmp_do_middle_initialize();
6929   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6930 }
6931 
6932 void __kmp_parallel_initialize(void) {
6933   int gtid = __kmp_entry_gtid(); // this might be a new root
6934 
6935   /* synchronize parallel initialization (for sibling) */
6936   if (TCR_4(__kmp_init_parallel))
6937     return;
6938   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6939   if (TCR_4(__kmp_init_parallel)) {
6940     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6941     return;
6942   }
6943 
6944   /* TODO reinitialization after we have already shut down */
6945   if (TCR_4(__kmp_global.g.g_done)) {
6946     KA_TRACE(
6947         10,
6948         ("__kmp_parallel_initialize: attempt to init while shutting down\n"));
6949     __kmp_infinite_loop();
6950   }
6951 
6952   /* jc: The lock __kmp_initz_lock is already held, so calling
6953      __kmp_serial_initialize would cause a deadlock.  So we call
6954      __kmp_do_serial_initialize directly. */
6955   if (!__kmp_init_middle) {
6956     __kmp_do_middle_initialize();
6957   }
6958   __kmp_resume_if_hard_paused();
6959 
6960   /* begin initialization */
6961   KA_TRACE(10, ("__kmp_parallel_initialize: enter\n"));
6962   KMP_ASSERT(KMP_UBER_GTID(gtid));
6963 
6964 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6965   // Save the FP control regs.
6966   // Worker threads will set theirs to these values at thread startup.
6967   __kmp_store_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
6968   __kmp_store_mxcsr(&__kmp_init_mxcsr);
6969   __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK;
6970 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
6971 
6972 #if KMP_OS_UNIX
6973 #if KMP_HANDLE_SIGNALS
6974   /*  must be after __kmp_serial_initialize  */
6975   __kmp_install_signals(TRUE);
6976 #endif
6977 #endif
6978 
6979   __kmp_suspend_initialize();
6980 
6981 #if defined(USE_LOAD_BALANCE)
6982   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6983     __kmp_global.g.g_dynamic_mode = dynamic_load_balance;
6984   }
6985 #else
6986   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6987     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
6988   }
6989 #endif
6990 
6991   if (__kmp_version) {
6992     __kmp_print_version_2();
6993   }
6994 
6995   /* we have finished parallel initialization */
6996   TCW_SYNC_4(__kmp_init_parallel, TRUE);
6997 
6998   KMP_MB();
6999   KA_TRACE(10, ("__kmp_parallel_initialize: exit\n"));
7000 
7001   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7002 }
7003 
7004 /* ------------------------------------------------------------------------ */
7005 
7006 void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
7007                                    kmp_team_t *team) {
7008   kmp_disp_t *dispatch;
7009 
7010   KMP_MB();
7011 
7012   /* none of the threads have encountered any constructs, yet. */
7013   this_thr->th.th_local.this_construct = 0;
7014 #if KMP_CACHE_MANAGE
7015   KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived);
7016 #endif /* KMP_CACHE_MANAGE */
7017   dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
7018   KMP_DEBUG_ASSERT(dispatch);
7019   KMP_DEBUG_ASSERT(team->t.t_dispatch);
7020   // KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[
7021   // this_thr->th.th_info.ds.ds_tid ] );
7022 
7023   dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */
7024   dispatch->th_doacross_buf_idx = 0; // reset doacross dispatch buffer counter
7025   if (__kmp_env_consistency_check)
7026     __kmp_push_parallel(gtid, team->t.t_ident);
7027 
7028   KMP_MB(); /* Flush all pending memory write invalidates.  */
7029 }
7030 
7031 void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
7032                                   kmp_team_t *team) {
7033   if (__kmp_env_consistency_check)
7034     __kmp_pop_parallel(gtid, team->t.t_ident);
7035 
7036   __kmp_finish_implicit_task(this_thr);
7037 }
7038 
7039 int __kmp_invoke_task_func(int gtid) {
7040   int rc;
7041   int tid = __kmp_tid_from_gtid(gtid);
7042   kmp_info_t *this_thr = __kmp_threads[gtid];
7043   kmp_team_t *team = this_thr->th.th_team;
7044 
7045   __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
7046 #if USE_ITT_BUILD
7047   if (__itt_stack_caller_create_ptr) {
7048     __kmp_itt_stack_callee_enter(
7049         (__itt_caller)
7050             team->t.t_stack_id); // inform ittnotify about entering user's code
7051   }
7052 #endif /* USE_ITT_BUILD */
7053 #if INCLUDE_SSC_MARKS
7054   SSC_MARK_INVOKING();
7055 #endif
7056 
7057 #if OMPT_SUPPORT
7058   void *dummy;
7059   void **exit_frame_p;
7060   ompt_data_t *my_task_data;
7061   ompt_data_t *my_parallel_data;
7062   int ompt_team_size;
7063 
7064   if (ompt_enabled.enabled) {
7065     exit_frame_p = &(
7066         team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame.ptr);
7067   } else {
7068     exit_frame_p = &dummy;
7069   }
7070 
7071   my_task_data =
7072       &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
7073   my_parallel_data = &(team->t.ompt_team_info.parallel_data);
7074   if (ompt_enabled.ompt_callback_implicit_task) {
7075     ompt_team_size = team->t.t_nproc;
7076     ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7077         ompt_scope_begin, my_parallel_data, my_task_data, ompt_team_size,
7078         __kmp_tid_from_gtid(gtid), ompt_task_implicit);
7079     OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid);
7080   }
7081 #endif
7082 
7083 #if KMP_STATS_ENABLED
7084   stats_state_e previous_state = KMP_GET_THREAD_STATE();
7085   if (previous_state == stats_state_e::TEAMS_REGION) {
7086     KMP_PUSH_PARTITIONED_TIMER(OMP_teams);
7087   } else {
7088     KMP_PUSH_PARTITIONED_TIMER(OMP_parallel);
7089   }
7090   KMP_SET_THREAD_STATE(IMPLICIT_TASK);
7091 #endif
7092 
7093   rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
7094                               tid, (int)team->t.t_argc, (void **)team->t.t_argv
7095 #if OMPT_SUPPORT
7096                               ,
7097                               exit_frame_p
7098 #endif
7099                               );
7100 #if OMPT_SUPPORT
7101   *exit_frame_p = NULL;
7102    this_thr->th.ompt_thread_info.parallel_flags |= ompt_parallel_team;
7103 #endif
7104 
7105 #if KMP_STATS_ENABLED
7106   if (previous_state == stats_state_e::TEAMS_REGION) {
7107     KMP_SET_THREAD_STATE(previous_state);
7108   }
7109   KMP_POP_PARTITIONED_TIMER();
7110 #endif
7111 
7112 #if USE_ITT_BUILD
7113   if (__itt_stack_caller_create_ptr) {
7114     __kmp_itt_stack_callee_leave(
7115         (__itt_caller)
7116             team->t.t_stack_id); // inform ittnotify about leaving user's code
7117   }
7118 #endif /* USE_ITT_BUILD */
7119   __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7120 
7121   return rc;
7122 }
7123 
7124 void __kmp_teams_master(int gtid) {
7125   // This routine is called by all master threads in teams construct
7126   kmp_info_t *thr = __kmp_threads[gtid];
7127   kmp_team_t *team = thr->th.th_team;
7128   ident_t *loc = team->t.t_ident;
7129   thr->th.th_set_nproc = thr->th.th_teams_size.nth;
7130   KMP_DEBUG_ASSERT(thr->th.th_teams_microtask);
7131   KMP_DEBUG_ASSERT(thr->th.th_set_nproc);
7132   KA_TRACE(20, ("__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid,
7133                 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask));
7134 
7135   // This thread is a new CG root.  Set up the proper variables.
7136   kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(sizeof(kmp_cg_root_t));
7137   tmp->cg_root = thr; // Make thr the CG root
7138   // Init to thread limit that was stored when league masters were forked
7139   tmp->cg_thread_limit = thr->th.th_current_task->td_icvs.thread_limit;
7140   tmp->cg_nthreads = 1; // Init counter to one active thread, this one
7141   KA_TRACE(100, ("__kmp_teams_master: Thread %p created node %p and init"
7142                  " cg_nthreads to 1\n",
7143                  thr, tmp));
7144   tmp->up = thr->th.th_cg_roots;
7145   thr->th.th_cg_roots = tmp;
7146 
7147 // Launch league of teams now, but not let workers execute
7148 // (they hang on fork barrier until next parallel)
7149 #if INCLUDE_SSC_MARKS
7150   SSC_MARK_FORKING();
7151 #endif
7152   __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7153                   (microtask_t)thr->th.th_teams_microtask, // "wrapped" task
7154                   VOLATILE_CAST(launch_t) __kmp_invoke_task_func, NULL);
7155 #if INCLUDE_SSC_MARKS
7156   SSC_MARK_JOINING();
7157 #endif
7158   // If the team size was reduced from the limit, set it to the new size
7159   if (thr->th.th_team_nproc < thr->th.th_teams_size.nth)
7160     thr->th.th_teams_size.nth = thr->th.th_team_nproc;
7161   // AC: last parameter "1" eliminates join barrier which won't work because
7162   // worker threads are in a fork barrier waiting for more parallel regions
7163   __kmp_join_call(loc, gtid
7164 #if OMPT_SUPPORT
7165                   ,
7166                   fork_context_intel
7167 #endif
7168                   ,
7169                   1);
7170 }
7171 
7172 int __kmp_invoke_teams_master(int gtid) {
7173   kmp_info_t *this_thr = __kmp_threads[gtid];
7174   kmp_team_t *team = this_thr->th.th_team;
7175 #if KMP_DEBUG
7176   if (!__kmp_threads[gtid]->th.th_team->t.t_serialized)
7177     KMP_DEBUG_ASSERT((void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn ==
7178                      (void *)__kmp_teams_master);
7179 #endif
7180   __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7181 #if OMPT_SUPPORT
7182   int tid = __kmp_tid_from_gtid(gtid);
7183   ompt_data_t *task_data =
7184       &team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data;
7185   ompt_data_t *parallel_data = &team->t.ompt_team_info.parallel_data;
7186   if (ompt_enabled.ompt_callback_implicit_task) {
7187     ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7188         ompt_scope_begin, parallel_data, task_data, team->t.t_nproc, tid,
7189         ompt_task_initial);
7190     OMPT_CUR_TASK_INFO(this_thr)->thread_num = tid;
7191   }
7192 #endif
7193   __kmp_teams_master(gtid);
7194 #if OMPT_SUPPORT
7195   this_thr->th.ompt_thread_info.parallel_flags |= ompt_parallel_league;
7196 #endif
7197   __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7198   return 1;
7199 }
7200 
7201 /* this sets the requested number of threads for the next parallel region
7202    encountered by this team. since this should be enclosed in the forkjoin
7203    critical section it should avoid race conditions with asymmetrical nested
7204    parallelism */
7205 
7206 void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) {
7207   kmp_info_t *thr = __kmp_threads[gtid];
7208 
7209   if (num_threads > 0)
7210     thr->th.th_set_nproc = num_threads;
7211 }
7212 
7213 /* this sets the requested number of teams for the teams region and/or
7214    the number of threads for the next parallel region encountered  */
7215 void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams,
7216                           int num_threads) {
7217   kmp_info_t *thr = __kmp_threads[gtid];
7218   KMP_DEBUG_ASSERT(num_teams >= 0);
7219   KMP_DEBUG_ASSERT(num_threads >= 0);
7220 
7221   if (num_teams == 0)
7222     num_teams = 1; // default number of teams is 1.
7223   if (num_teams > __kmp_teams_max_nth) { // if too many teams requested?
7224     if (!__kmp_reserve_warn) {
7225       __kmp_reserve_warn = 1;
7226       __kmp_msg(kmp_ms_warning,
7227                 KMP_MSG(CantFormThrTeam, num_teams, __kmp_teams_max_nth),
7228                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7229     }
7230     num_teams = __kmp_teams_max_nth;
7231   }
7232   // Set number of teams (number of threads in the outer "parallel" of the
7233   // teams)
7234   thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
7235 
7236   // Remember the number of threads for inner parallel regions
7237   if (!TCR_4(__kmp_init_middle))
7238     __kmp_middle_initialize(); // get internal globals calculated
7239   KMP_DEBUG_ASSERT(__kmp_avail_proc);
7240   KMP_DEBUG_ASSERT(__kmp_dflt_team_nth);
7241   if (num_threads == 0) {
7242     num_threads = __kmp_avail_proc / num_teams;
7243     // adjust num_threads w/o warning as it is not user setting
7244     // num_threads = min(num_threads, nthreads-var, thread-limit-var)
7245     // no thread_limit clause specified -  do not change thread-limit-var ICV
7246     if (num_threads > __kmp_dflt_team_nth) {
7247       num_threads = __kmp_dflt_team_nth; // honor nthreads-var ICV
7248     }
7249     if (num_threads > thr->th.th_current_task->td_icvs.thread_limit) {
7250       num_threads = thr->th.th_current_task->td_icvs.thread_limit;
7251     } // prevent team size to exceed thread-limit-var
7252     if (num_teams * num_threads > __kmp_teams_max_nth) {
7253       num_threads = __kmp_teams_max_nth / num_teams;
7254     }
7255   } else {
7256     // This thread will be the master of the league masters
7257     // Store new thread limit; old limit is saved in th_cg_roots list
7258     thr->th.th_current_task->td_icvs.thread_limit = num_threads;
7259     // num_threads = min(num_threads, nthreads-var)
7260     if (num_threads > __kmp_dflt_team_nth) {
7261       num_threads = __kmp_dflt_team_nth; // honor nthreads-var ICV
7262     }
7263     if (num_teams * num_threads > __kmp_teams_max_nth) {
7264       int new_threads = __kmp_teams_max_nth / num_teams;
7265       if (!__kmp_reserve_warn) { // user asked for too many threads
7266         __kmp_reserve_warn = 1; // conflicts with KMP_TEAMS_THREAD_LIMIT
7267         __kmp_msg(kmp_ms_warning,
7268                   KMP_MSG(CantFormThrTeam, num_threads, new_threads),
7269                   KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7270       }
7271       num_threads = new_threads;
7272     }
7273   }
7274   thr->th.th_teams_size.nth = num_threads;
7275 }
7276 
7277 // Set the proc_bind var to use in the following parallel region.
7278 void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) {
7279   kmp_info_t *thr = __kmp_threads[gtid];
7280   thr->th.th_set_proc_bind = proc_bind;
7281 }
7282 
7283 /* Launch the worker threads into the microtask. */
7284 
7285 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
7286   kmp_info_t *this_thr = __kmp_threads[gtid];
7287 
7288 #ifdef KMP_DEBUG
7289   int f;
7290 #endif /* KMP_DEBUG */
7291 
7292   KMP_DEBUG_ASSERT(team);
7293   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7294   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7295   KMP_MB(); /* Flush all pending memory write invalidates.  */
7296 
7297   team->t.t_construct = 0; /* no single directives seen yet */
7298   team->t.t_ordered.dt.t_value =
7299       0; /* thread 0 enters the ordered section first */
7300 
7301   /* Reset the identifiers on the dispatch buffer */
7302   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7303   if (team->t.t_max_nproc > 1) {
7304     int i;
7305     for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
7306       team->t.t_disp_buffer[i].buffer_index = i;
7307       team->t.t_disp_buffer[i].doacross_buf_idx = i;
7308     }
7309   } else {
7310     team->t.t_disp_buffer[0].buffer_index = 0;
7311     team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7312   }
7313 
7314   KMP_MB(); /* Flush all pending memory write invalidates.  */
7315   KMP_ASSERT(this_thr->th.th_team == team);
7316 
7317 #ifdef KMP_DEBUG
7318   for (f = 0; f < team->t.t_nproc; f++) {
7319     KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7320                      team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7321   }
7322 #endif /* KMP_DEBUG */
7323 
7324   /* release the worker threads so they may begin working */
7325   __kmp_fork_barrier(gtid, 0);
7326 }
7327 
7328 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) {
7329   kmp_info_t *this_thr = __kmp_threads[gtid];
7330 
7331   KMP_DEBUG_ASSERT(team);
7332   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7333   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7334   KMP_MB(); /* Flush all pending memory write invalidates.  */
7335 
7336 /* Join barrier after fork */
7337 
7338 #ifdef KMP_DEBUG
7339   if (__kmp_threads[gtid] &&
7340       __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7341     __kmp_printf("GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid,
7342                  __kmp_threads[gtid]);
7343     __kmp_printf("__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, "
7344                  "team->t.t_nproc=%d\n",
7345                  gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7346                  team->t.t_nproc);
7347     __kmp_print_structure();
7348   }
7349   KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
7350                    __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7351 #endif /* KMP_DEBUG */
7352 
7353   __kmp_join_barrier(gtid); /* wait for everyone */
7354 #if OMPT_SUPPORT
7355   if (ompt_enabled.enabled &&
7356       this_thr->th.ompt_thread_info.state == ompt_state_wait_barrier_implicit) {
7357     int ds_tid = this_thr->th.th_info.ds.ds_tid;
7358     ompt_data_t *task_data = OMPT_CUR_TASK_DATA(this_thr);
7359     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
7360 #if OMPT_OPTIONAL
7361     void *codeptr = NULL;
7362     if (KMP_MASTER_TID(ds_tid) &&
7363         (ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait) ||
7364          ompt_callbacks.ompt_callback(ompt_callback_sync_region)))
7365       codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
7366 
7367     if (ompt_enabled.ompt_callback_sync_region_wait) {
7368       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
7369           ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
7370           codeptr);
7371     }
7372     if (ompt_enabled.ompt_callback_sync_region) {
7373       ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
7374           ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
7375           codeptr);
7376     }
7377 #endif
7378     if (!KMP_MASTER_TID(ds_tid) && ompt_enabled.ompt_callback_implicit_task) {
7379       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7380           ompt_scope_end, NULL, task_data, 0, ds_tid, ompt_task_implicit); // TODO: Can this be ompt_task_initial?
7381     }
7382   }
7383 #endif
7384 
7385   KMP_MB(); /* Flush all pending memory write invalidates.  */
7386   KMP_ASSERT(this_thr->th.th_team == team);
7387 }
7388 
7389 /* ------------------------------------------------------------------------ */
7390 
7391 #ifdef USE_LOAD_BALANCE
7392 
7393 // Return the worker threads actively spinning in the hot team, if we
7394 // are at the outermost level of parallelism.  Otherwise, return 0.
7395 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7396   int i;
7397   int retval;
7398   kmp_team_t *hot_team;
7399 
7400   if (root->r.r_active) {
7401     return 0;
7402   }
7403   hot_team = root->r.r_hot_team;
7404   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
7405     return hot_team->t.t_nproc - 1; // Don't count master thread
7406   }
7407 
7408   // Skip the master thread - it is accounted for elsewhere.
7409   retval = 0;
7410   for (i = 1; i < hot_team->t.t_nproc; i++) {
7411     if (hot_team->t.t_threads[i]->th.th_active) {
7412       retval++;
7413     }
7414   }
7415   return retval;
7416 }
7417 
7418 // Perform an automatic adjustment to the number of
7419 // threads used by the next parallel region.
7420 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc) {
7421   int retval;
7422   int pool_active;
7423   int hot_team_active;
7424   int team_curr_active;
7425   int system_active;
7426 
7427   KB_TRACE(20, ("__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7428                 set_nproc));
7429   KMP_DEBUG_ASSERT(root);
7430   KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7431                        ->th.th_current_task->td_icvs.dynamic == TRUE);
7432   KMP_DEBUG_ASSERT(set_nproc > 1);
7433 
7434   if (set_nproc == 1) {
7435     KB_TRACE(20, ("__kmp_load_balance_nproc: serial execution.\n"));
7436     return 1;
7437   }
7438 
7439   // Threads that are active in the thread pool, active in the hot team for this
7440   // particular root (if we are at the outer par level), and the currently
7441   // executing thread (to become the master) are available to add to the new
7442   // team, but are currently contributing to the system load, and must be
7443   // accounted for.
7444   pool_active = __kmp_thread_pool_active_nth;
7445   hot_team_active = __kmp_active_hot_team_nproc(root);
7446   team_curr_active = pool_active + hot_team_active + 1;
7447 
7448   // Check the system load.
7449   system_active = __kmp_get_load_balance(__kmp_avail_proc + team_curr_active);
7450   KB_TRACE(30, ("__kmp_load_balance_nproc: system active = %d pool active = %d "
7451                 "hot team active = %d\n",
7452                 system_active, pool_active, hot_team_active));
7453 
7454   if (system_active < 0) {
7455     // There was an error reading the necessary info from /proc, so use the
7456     // thread limit algorithm instead. Once we set __kmp_global.g.g_dynamic_mode
7457     // = dynamic_thread_limit, we shouldn't wind up getting back here.
7458     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7459     KMP_WARNING(CantLoadBalUsing, "KMP_DYNAMIC_MODE=thread limit");
7460 
7461     // Make this call behave like the thread limit algorithm.
7462     retval = __kmp_avail_proc - __kmp_nth +
7463              (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7464     if (retval > set_nproc) {
7465       retval = set_nproc;
7466     }
7467     if (retval < KMP_MIN_NTH) {
7468       retval = KMP_MIN_NTH;
7469     }
7470 
7471     KB_TRACE(20, ("__kmp_load_balance_nproc: thread limit exit. retval:%d\n",
7472                   retval));
7473     return retval;
7474   }
7475 
7476   // There is a slight delay in the load balance algorithm in detecting new
7477   // running procs. The real system load at this instant should be at least as
7478   // large as the #active omp thread that are available to add to the team.
7479   if (system_active < team_curr_active) {
7480     system_active = team_curr_active;
7481   }
7482   retval = __kmp_avail_proc - system_active + team_curr_active;
7483   if (retval > set_nproc) {
7484     retval = set_nproc;
7485   }
7486   if (retval < KMP_MIN_NTH) {
7487     retval = KMP_MIN_NTH;
7488   }
7489 
7490   KB_TRACE(20, ("__kmp_load_balance_nproc: exit. retval:%d\n", retval));
7491   return retval;
7492 } // __kmp_load_balance_nproc()
7493 
7494 #endif /* USE_LOAD_BALANCE */
7495 
7496 /* ------------------------------------------------------------------------ */
7497 
7498 /* NOTE: this is called with the __kmp_init_lock held */
7499 void __kmp_cleanup(void) {
7500   int f;
7501 
7502   KA_TRACE(10, ("__kmp_cleanup: enter\n"));
7503 
7504   if (TCR_4(__kmp_init_parallel)) {
7505 #if KMP_HANDLE_SIGNALS
7506     __kmp_remove_signals();
7507 #endif
7508     TCW_4(__kmp_init_parallel, FALSE);
7509   }
7510 
7511   if (TCR_4(__kmp_init_middle)) {
7512 #if KMP_AFFINITY_SUPPORTED
7513     __kmp_affinity_uninitialize();
7514 #endif /* KMP_AFFINITY_SUPPORTED */
7515     __kmp_cleanup_hierarchy();
7516     TCW_4(__kmp_init_middle, FALSE);
7517   }
7518 
7519   KA_TRACE(10, ("__kmp_cleanup: go serial cleanup\n"));
7520 
7521   if (__kmp_init_serial) {
7522     __kmp_runtime_destroy();
7523     __kmp_init_serial = FALSE;
7524   }
7525 
7526   __kmp_cleanup_threadprivate_caches();
7527 
7528   for (f = 0; f < __kmp_threads_capacity; f++) {
7529     if (__kmp_root[f] != NULL) {
7530       __kmp_free(__kmp_root[f]);
7531       __kmp_root[f] = NULL;
7532     }
7533   }
7534   __kmp_free(__kmp_threads);
7535   // __kmp_threads and __kmp_root were allocated at once, as single block, so
7536   // there is no need in freeing __kmp_root.
7537   __kmp_threads = NULL;
7538   __kmp_root = NULL;
7539   __kmp_threads_capacity = 0;
7540 
7541 #if KMP_USE_DYNAMIC_LOCK
7542   __kmp_cleanup_indirect_user_locks();
7543 #else
7544   __kmp_cleanup_user_locks();
7545 #endif
7546 
7547 #if KMP_AFFINITY_SUPPORTED
7548   KMP_INTERNAL_FREE(CCAST(char *, __kmp_cpuinfo_file));
7549   __kmp_cpuinfo_file = NULL;
7550 #endif /* KMP_AFFINITY_SUPPORTED */
7551 
7552 #if KMP_USE_ADAPTIVE_LOCKS
7553 #if KMP_DEBUG_ADAPTIVE_LOCKS
7554   __kmp_print_speculative_stats();
7555 #endif
7556 #endif
7557   KMP_INTERNAL_FREE(__kmp_nested_nth.nth);
7558   __kmp_nested_nth.nth = NULL;
7559   __kmp_nested_nth.size = 0;
7560   __kmp_nested_nth.used = 0;
7561   KMP_INTERNAL_FREE(__kmp_nested_proc_bind.bind_types);
7562   __kmp_nested_proc_bind.bind_types = NULL;
7563   __kmp_nested_proc_bind.size = 0;
7564   __kmp_nested_proc_bind.used = 0;
7565   if (__kmp_affinity_format) {
7566     KMP_INTERNAL_FREE(__kmp_affinity_format);
7567     __kmp_affinity_format = NULL;
7568   }
7569 
7570   __kmp_i18n_catclose();
7571 
7572 #if KMP_USE_HIER_SCHED
7573   __kmp_hier_scheds.deallocate();
7574 #endif
7575 
7576 #if KMP_STATS_ENABLED
7577   __kmp_stats_fini();
7578 #endif
7579 
7580   KA_TRACE(10, ("__kmp_cleanup: exit\n"));
7581 }
7582 
7583 /* ------------------------------------------------------------------------ */
7584 
7585 int __kmp_ignore_mppbeg(void) {
7586   char *env;
7587 
7588   if ((env = getenv("KMP_IGNORE_MPPBEG")) != NULL) {
7589     if (__kmp_str_match_false(env))
7590       return FALSE;
7591   }
7592   // By default __kmpc_begin() is no-op.
7593   return TRUE;
7594 }
7595 
7596 int __kmp_ignore_mppend(void) {
7597   char *env;
7598 
7599   if ((env = getenv("KMP_IGNORE_MPPEND")) != NULL) {
7600     if (__kmp_str_match_false(env))
7601       return FALSE;
7602   }
7603   // By default __kmpc_end() is no-op.
7604   return TRUE;
7605 }
7606 
7607 void __kmp_internal_begin(void) {
7608   int gtid;
7609   kmp_root_t *root;
7610 
7611   /* this is a very important step as it will register new sibling threads
7612      and assign these new uber threads a new gtid */
7613   gtid = __kmp_entry_gtid();
7614   root = __kmp_threads[gtid]->th.th_root;
7615   KMP_ASSERT(KMP_UBER_GTID(gtid));
7616 
7617   if (root->r.r_begin)
7618     return;
7619   __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7620   if (root->r.r_begin) {
7621     __kmp_release_lock(&root->r.r_begin_lock, gtid);
7622     return;
7623   }
7624 
7625   root->r.r_begin = TRUE;
7626 
7627   __kmp_release_lock(&root->r.r_begin_lock, gtid);
7628 }
7629 
7630 /* ------------------------------------------------------------------------ */
7631 
7632 void __kmp_user_set_library(enum library_type arg) {
7633   int gtid;
7634   kmp_root_t *root;
7635   kmp_info_t *thread;
7636 
7637   /* first, make sure we are initialized so we can get our gtid */
7638 
7639   gtid = __kmp_entry_gtid();
7640   thread = __kmp_threads[gtid];
7641 
7642   root = thread->th.th_root;
7643 
7644   KA_TRACE(20, ("__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg,
7645                 library_serial));
7646   if (root->r.r_in_parallel) { /* Must be called in serial section of top-level
7647                                   thread */
7648     KMP_WARNING(SetLibraryIncorrectCall);
7649     return;
7650   }
7651 
7652   switch (arg) {
7653   case library_serial:
7654     thread->th.th_set_nproc = 0;
7655     set__nproc(thread, 1);
7656     break;
7657   case library_turnaround:
7658     thread->th.th_set_nproc = 0;
7659     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7660                                            : __kmp_dflt_team_nth_ub);
7661     break;
7662   case library_throughput:
7663     thread->th.th_set_nproc = 0;
7664     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7665                                            : __kmp_dflt_team_nth_ub);
7666     break;
7667   default:
7668     KMP_FATAL(UnknownLibraryType, arg);
7669   }
7670 
7671   __kmp_aux_set_library(arg);
7672 }
7673 
7674 void __kmp_aux_set_stacksize(size_t arg) {
7675   if (!__kmp_init_serial)
7676     __kmp_serial_initialize();
7677 
7678 #if KMP_OS_DARWIN
7679   if (arg & (0x1000 - 1)) {
7680     arg &= ~(0x1000 - 1);
7681     if (arg + 0x1000) /* check for overflow if we round up */
7682       arg += 0x1000;
7683   }
7684 #endif
7685   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7686 
7687   /* only change the default stacksize before the first parallel region */
7688   if (!TCR_4(__kmp_init_parallel)) {
7689     size_t value = arg; /* argument is in bytes */
7690 
7691     if (value < __kmp_sys_min_stksize)
7692       value = __kmp_sys_min_stksize;
7693     else if (value > KMP_MAX_STKSIZE)
7694       value = KMP_MAX_STKSIZE;
7695 
7696     __kmp_stksize = value;
7697 
7698     __kmp_env_stksize = TRUE; /* was KMP_STACKSIZE specified? */
7699   }
7700 
7701   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7702 }
7703 
7704 /* set the behaviour of the runtime library */
7705 /* TODO this can cause some odd behaviour with sibling parallelism... */
7706 void __kmp_aux_set_library(enum library_type arg) {
7707   __kmp_library = arg;
7708 
7709   switch (__kmp_library) {
7710   case library_serial: {
7711     KMP_INFORM(LibraryIsSerial);
7712   } break;
7713   case library_turnaround:
7714     if (__kmp_use_yield == 1 && !__kmp_use_yield_exp_set)
7715       __kmp_use_yield = 2; // only yield when oversubscribed
7716     break;
7717   case library_throughput:
7718     if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME)
7719       __kmp_dflt_blocktime = 200;
7720     break;
7721   default:
7722     KMP_FATAL(UnknownLibraryType, arg);
7723   }
7724 }
7725 
7726 /* Getting team information common for all team API */
7727 // Returns NULL if not in teams construct
7728 static kmp_team_t *__kmp_aux_get_team_info(int &teams_serialized) {
7729   kmp_info_t *thr = __kmp_entry_thread();
7730   teams_serialized = 0;
7731   if (thr->th.th_teams_microtask) {
7732     kmp_team_t *team = thr->th.th_team;
7733     int tlevel = thr->th.th_teams_level; // the level of the teams construct
7734     int ii = team->t.t_level;
7735     teams_serialized = team->t.t_serialized;
7736     int level = tlevel + 1;
7737     KMP_DEBUG_ASSERT(ii >= tlevel);
7738     while (ii > level) {
7739       for (teams_serialized = team->t.t_serialized;
7740            (teams_serialized > 0) && (ii > level); teams_serialized--, ii--) {
7741       }
7742       if (team->t.t_serialized && (!teams_serialized)) {
7743         team = team->t.t_parent;
7744         continue;
7745       }
7746       if (ii > level) {
7747         team = team->t.t_parent;
7748         ii--;
7749       }
7750     }
7751     return team;
7752   }
7753   return NULL;
7754 }
7755 
7756 int __kmp_aux_get_team_num() {
7757   int serialized;
7758   kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7759   if (team) {
7760     if (serialized > 1) {
7761       return 0; // teams region is serialized ( 1 team of 1 thread ).
7762     } else {
7763       return team->t.t_master_tid;
7764     }
7765   }
7766   return 0;
7767 }
7768 
7769 int __kmp_aux_get_num_teams() {
7770   int serialized;
7771   kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7772   if (team) {
7773     if (serialized > 1) {
7774       return 1;
7775     } else {
7776       return team->t.t_parent->t.t_nproc;
7777     }
7778   }
7779   return 1;
7780 }
7781 
7782 /* ------------------------------------------------------------------------ */
7783 
7784 /*
7785  * Affinity Format Parser
7786  *
7787  * Field is in form of: %[[[0].]size]type
7788  * % and type are required (%% means print a literal '%')
7789  * type is either single char or long name surrounded by {},
7790  * e.g., N or {num_threads}
7791  * 0 => leading zeros
7792  * . => right justified when size is specified
7793  * by default output is left justified
7794  * size is the *minimum* field length
7795  * All other characters are printed as is
7796  *
7797  * Available field types:
7798  * L {thread_level}      - omp_get_level()
7799  * n {thread_num}        - omp_get_thread_num()
7800  * h {host}              - name of host machine
7801  * P {process_id}        - process id (integer)
7802  * T {thread_identifier} - native thread identifier (integer)
7803  * N {num_threads}       - omp_get_num_threads()
7804  * A {ancestor_tnum}     - omp_get_ancestor_thread_num(omp_get_level()-1)
7805  * a {thread_affinity}   - comma separated list of integers or integer ranges
7806  *                         (values of affinity mask)
7807  *
7808  * Implementation-specific field types can be added
7809  * If a type is unknown, print "undefined"
7810 */
7811 
7812 // Structure holding the short name, long name, and corresponding data type
7813 // for snprintf.  A table of these will represent the entire valid keyword
7814 // field types.
7815 typedef struct kmp_affinity_format_field_t {
7816   char short_name; // from spec e.g., L -> thread level
7817   const char *long_name; // from spec thread_level -> thread level
7818   char field_format; // data type for snprintf (typically 'd' or 's'
7819   // for integer or string)
7820 } kmp_affinity_format_field_t;
7821 
7822 static const kmp_affinity_format_field_t __kmp_affinity_format_table[] = {
7823 #if KMP_AFFINITY_SUPPORTED
7824     {'A', "thread_affinity", 's'},
7825 #endif
7826     {'t', "team_num", 'd'},
7827     {'T', "num_teams", 'd'},
7828     {'L', "nesting_level", 'd'},
7829     {'n', "thread_num", 'd'},
7830     {'N', "num_threads", 'd'},
7831     {'a', "ancestor_tnum", 'd'},
7832     {'H', "host", 's'},
7833     {'P', "process_id", 'd'},
7834     {'i', "native_thread_id", 'd'}};
7835 
7836 // Return the number of characters it takes to hold field
7837 static int __kmp_aux_capture_affinity_field(int gtid, const kmp_info_t *th,
7838                                             const char **ptr,
7839                                             kmp_str_buf_t *field_buffer) {
7840   int rc, format_index, field_value;
7841   const char *width_left, *width_right;
7842   bool pad_zeros, right_justify, parse_long_name, found_valid_name;
7843   static const int FORMAT_SIZE = 20;
7844   char format[FORMAT_SIZE] = {0};
7845   char absolute_short_name = 0;
7846 
7847   KMP_DEBUG_ASSERT(gtid >= 0);
7848   KMP_DEBUG_ASSERT(th);
7849   KMP_DEBUG_ASSERT(**ptr == '%');
7850   KMP_DEBUG_ASSERT(field_buffer);
7851 
7852   __kmp_str_buf_clear(field_buffer);
7853 
7854   // Skip the initial %
7855   (*ptr)++;
7856 
7857   // Check for %% first
7858   if (**ptr == '%') {
7859     __kmp_str_buf_cat(field_buffer, "%", 1);
7860     (*ptr)++; // skip over the second %
7861     return 1;
7862   }
7863 
7864   // Parse field modifiers if they are present
7865   pad_zeros = false;
7866   if (**ptr == '0') {
7867     pad_zeros = true;
7868     (*ptr)++; // skip over 0
7869   }
7870   right_justify = false;
7871   if (**ptr == '.') {
7872     right_justify = true;
7873     (*ptr)++; // skip over .
7874   }
7875   // Parse width of field: [width_left, width_right)
7876   width_left = width_right = NULL;
7877   if (**ptr >= '0' && **ptr <= '9') {
7878     width_left = *ptr;
7879     SKIP_DIGITS(*ptr);
7880     width_right = *ptr;
7881   }
7882 
7883   // Create the format for KMP_SNPRINTF based on flags parsed above
7884   format_index = 0;
7885   format[format_index++] = '%';
7886   if (!right_justify)
7887     format[format_index++] = '-';
7888   if (pad_zeros)
7889     format[format_index++] = '0';
7890   if (width_left && width_right) {
7891     int i = 0;
7892     // Only allow 8 digit number widths.
7893     // This also prevents overflowing format variable
7894     while (i < 8 && width_left < width_right) {
7895       format[format_index++] = *width_left;
7896       width_left++;
7897       i++;
7898     }
7899   }
7900 
7901   // Parse a name (long or short)
7902   // Canonicalize the name into absolute_short_name
7903   found_valid_name = false;
7904   parse_long_name = (**ptr == '{');
7905   if (parse_long_name)
7906     (*ptr)++; // skip initial left brace
7907   for (size_t i = 0; i < sizeof(__kmp_affinity_format_table) /
7908                              sizeof(__kmp_affinity_format_table[0]);
7909        ++i) {
7910     char short_name = __kmp_affinity_format_table[i].short_name;
7911     const char *long_name = __kmp_affinity_format_table[i].long_name;
7912     char field_format = __kmp_affinity_format_table[i].field_format;
7913     if (parse_long_name) {
7914       int length = KMP_STRLEN(long_name);
7915       if (strncmp(*ptr, long_name, length) == 0) {
7916         found_valid_name = true;
7917         (*ptr) += length; // skip the long name
7918       }
7919     } else if (**ptr == short_name) {
7920       found_valid_name = true;
7921       (*ptr)++; // skip the short name
7922     }
7923     if (found_valid_name) {
7924       format[format_index++] = field_format;
7925       format[format_index++] = '\0';
7926       absolute_short_name = short_name;
7927       break;
7928     }
7929   }
7930   if (parse_long_name) {
7931     if (**ptr != '}') {
7932       absolute_short_name = 0;
7933     } else {
7934       (*ptr)++; // skip over the right brace
7935     }
7936   }
7937 
7938   // Attempt to fill the buffer with the requested
7939   // value using snprintf within __kmp_str_buf_print()
7940   switch (absolute_short_name) {
7941   case 't':
7942     rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_team_num());
7943     break;
7944   case 'T':
7945     rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_num_teams());
7946     break;
7947   case 'L':
7948     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level);
7949     break;
7950   case 'n':
7951     rc = __kmp_str_buf_print(field_buffer, format, __kmp_tid_from_gtid(gtid));
7952     break;
7953   case 'H': {
7954     static const int BUFFER_SIZE = 256;
7955     char buf[BUFFER_SIZE];
7956     __kmp_expand_host_name(buf, BUFFER_SIZE);
7957     rc = __kmp_str_buf_print(field_buffer, format, buf);
7958   } break;
7959   case 'P':
7960     rc = __kmp_str_buf_print(field_buffer, format, getpid());
7961     break;
7962   case 'i':
7963     rc = __kmp_str_buf_print(field_buffer, format, __kmp_gettid());
7964     break;
7965   case 'N':
7966     rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc);
7967     break;
7968   case 'a':
7969     field_value =
7970         __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1);
7971     rc = __kmp_str_buf_print(field_buffer, format, field_value);
7972     break;
7973 #if KMP_AFFINITY_SUPPORTED
7974   case 'A': {
7975     kmp_str_buf_t buf;
7976     __kmp_str_buf_init(&buf);
7977     __kmp_affinity_str_buf_mask(&buf, th->th.th_affin_mask);
7978     rc = __kmp_str_buf_print(field_buffer, format, buf.str);
7979     __kmp_str_buf_free(&buf);
7980   } break;
7981 #endif
7982   default:
7983     // According to spec, If an implementation does not have info for field
7984     // type, then "undefined" is printed
7985     rc = __kmp_str_buf_print(field_buffer, "%s", "undefined");
7986     // Skip the field
7987     if (parse_long_name) {
7988       SKIP_TOKEN(*ptr);
7989       if (**ptr == '}')
7990         (*ptr)++;
7991     } else {
7992       (*ptr)++;
7993     }
7994   }
7995 
7996   KMP_ASSERT(format_index <= FORMAT_SIZE);
7997   return rc;
7998 }
7999 
8000 /*
8001  * Return number of characters needed to hold the affinity string
8002  * (not including null byte character)
8003  * The resultant string is printed to buffer, which the caller can then
8004  * handle afterwards
8005 */
8006 size_t __kmp_aux_capture_affinity(int gtid, const char *format,
8007                                   kmp_str_buf_t *buffer) {
8008   const char *parse_ptr;
8009   size_t retval;
8010   const kmp_info_t *th;
8011   kmp_str_buf_t field;
8012 
8013   KMP_DEBUG_ASSERT(buffer);
8014   KMP_DEBUG_ASSERT(gtid >= 0);
8015 
8016   __kmp_str_buf_init(&field);
8017   __kmp_str_buf_clear(buffer);
8018 
8019   th = __kmp_threads[gtid];
8020   retval = 0;
8021 
8022   // If format is NULL or zero-length string, then we use
8023   // affinity-format-var ICV
8024   parse_ptr = format;
8025   if (parse_ptr == NULL || *parse_ptr == '\0') {
8026     parse_ptr = __kmp_affinity_format;
8027   }
8028   KMP_DEBUG_ASSERT(parse_ptr);
8029 
8030   while (*parse_ptr != '\0') {
8031     // Parse a field
8032     if (*parse_ptr == '%') {
8033       // Put field in the buffer
8034       int rc = __kmp_aux_capture_affinity_field(gtid, th, &parse_ptr, &field);
8035       __kmp_str_buf_catbuf(buffer, &field);
8036       retval += rc;
8037     } else {
8038       // Put literal character in buffer
8039       __kmp_str_buf_cat(buffer, parse_ptr, 1);
8040       retval++;
8041       parse_ptr++;
8042     }
8043   }
8044   __kmp_str_buf_free(&field);
8045   return retval;
8046 }
8047 
8048 // Displays the affinity string to stdout
8049 void __kmp_aux_display_affinity(int gtid, const char *format) {
8050   kmp_str_buf_t buf;
8051   __kmp_str_buf_init(&buf);
8052   __kmp_aux_capture_affinity(gtid, format, &buf);
8053   __kmp_fprintf(kmp_out, "%s" KMP_END_OF_LINE, buf.str);
8054   __kmp_str_buf_free(&buf);
8055 }
8056 
8057 /* ------------------------------------------------------------------------ */
8058 
8059 void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid) {
8060   int blocktime = arg; /* argument is in milliseconds */
8061 #if KMP_USE_MONITOR
8062   int bt_intervals;
8063 #endif
8064   int bt_set;
8065 
8066   __kmp_save_internal_controls(thread);
8067 
8068   /* Normalize and set blocktime for the teams */
8069   if (blocktime < KMP_MIN_BLOCKTIME)
8070     blocktime = KMP_MIN_BLOCKTIME;
8071   else if (blocktime > KMP_MAX_BLOCKTIME)
8072     blocktime = KMP_MAX_BLOCKTIME;
8073 
8074   set__blocktime_team(thread->th.th_team, tid, blocktime);
8075   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
8076 
8077 #if KMP_USE_MONITOR
8078   /* Calculate and set blocktime intervals for the teams */
8079   bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
8080 
8081   set__bt_intervals_team(thread->th.th_team, tid, bt_intervals);
8082   set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals);
8083 #endif
8084 
8085   /* Set whether blocktime has been set to "TRUE" */
8086   bt_set = TRUE;
8087 
8088   set__bt_set_team(thread->th.th_team, tid, bt_set);
8089   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
8090 #if KMP_USE_MONITOR
8091   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, "
8092                 "bt_intervals=%d, monitor_updates=%d\n",
8093                 __kmp_gtid_from_tid(tid, thread->th.th_team),
8094                 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals,
8095                 __kmp_monitor_wakeups));
8096 #else
8097   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n",
8098                 __kmp_gtid_from_tid(tid, thread->th.th_team),
8099                 thread->th.th_team->t.t_id, tid, blocktime));
8100 #endif
8101 }
8102 
8103 void __kmp_aux_set_defaults(char const *str, int len) {
8104   if (!__kmp_init_serial) {
8105     __kmp_serial_initialize();
8106   }
8107   __kmp_env_initialize(str);
8108 
8109   if (__kmp_settings || __kmp_display_env || __kmp_display_env_verbose) {
8110     __kmp_env_print();
8111   }
8112 } // __kmp_aux_set_defaults
8113 
8114 /* ------------------------------------------------------------------------ */
8115 /* internal fast reduction routines */
8116 
8117 PACKED_REDUCTION_METHOD_T
8118 __kmp_determine_reduction_method(
8119     ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
8120     void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
8121     kmp_critical_name *lck) {
8122 
8123   // Default reduction method: critical construct ( lck != NULL, like in current
8124   // PAROPT )
8125   // If ( reduce_data!=NULL && reduce_func!=NULL ): the tree-reduction method
8126   // can be selected by RTL
8127   // If loc->flags contains KMP_IDENT_ATOMIC_REDUCE, the atomic reduce method
8128   // can be selected by RTL
8129   // Finally, it's up to OpenMP RTL to make a decision on which method to select
8130   // among generated by PAROPT.
8131 
8132   PACKED_REDUCTION_METHOD_T retval;
8133 
8134   int team_size;
8135 
8136   KMP_DEBUG_ASSERT(loc); // it would be nice to test ( loc != 0 )
8137   KMP_DEBUG_ASSERT(lck); // it would be nice to test ( lck != 0 )
8138 
8139 #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED                                 \
8140   ((loc->flags & (KMP_IDENT_ATOMIC_REDUCE)) == (KMP_IDENT_ATOMIC_REDUCE))
8141 #define FAST_REDUCTION_TREE_METHOD_GENERATED ((reduce_data) && (reduce_func))
8142 
8143   retval = critical_reduce_block;
8144 
8145   // another choice of getting a team size (with 1 dynamic deference) is slower
8146   team_size = __kmp_get_team_num_threads(global_tid);
8147   if (team_size == 1) {
8148 
8149     retval = empty_reduce_block;
8150 
8151   } else {
8152 
8153     int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8154 
8155 #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 ||                   \
8156     KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64
8157 
8158 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
8159     KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD
8160 
8161     int teamsize_cutoff = 4;
8162 
8163 #if KMP_MIC_SUPPORTED
8164     if (__kmp_mic_type != non_mic) {
8165       teamsize_cutoff = 8;
8166     }
8167 #endif
8168     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8169     if (tree_available) {
8170       if (team_size <= teamsize_cutoff) {
8171         if (atomic_available) {
8172           retval = atomic_reduce_block;
8173         }
8174       } else {
8175         retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8176       }
8177     } else if (atomic_available) {
8178       retval = atomic_reduce_block;
8179     }
8180 #else
8181 #error "Unknown or unsupported OS"
8182 #endif // KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||
8183        // KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD
8184 
8185 #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS
8186 
8187 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS || KMP_OS_HURD
8188 
8189     // basic tuning
8190 
8191     if (atomic_available) {
8192       if (num_vars <= 2) { // && ( team_size <= 8 ) due to false-sharing ???
8193         retval = atomic_reduce_block;
8194       }
8195     } // otherwise: use critical section
8196 
8197 #elif KMP_OS_DARWIN
8198 
8199     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8200     if (atomic_available && (num_vars <= 3)) {
8201       retval = atomic_reduce_block;
8202     } else if (tree_available) {
8203       if ((reduce_size > (9 * sizeof(kmp_real64))) &&
8204           (reduce_size < (2000 * sizeof(kmp_real64)))) {
8205         retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER;
8206       }
8207     } // otherwise: use critical section
8208 
8209 #else
8210 #error "Unknown or unsupported OS"
8211 #endif
8212 
8213 #else
8214 #error "Unknown or unsupported architecture"
8215 #endif
8216   }
8217 
8218   // KMP_FORCE_REDUCTION
8219 
8220   // If the team is serialized (team_size == 1), ignore the forced reduction
8221   // method and stay with the unsynchronized method (empty_reduce_block)
8222   if (__kmp_force_reduction_method != reduction_method_not_defined &&
8223       team_size != 1) {
8224 
8225     PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block;
8226 
8227     int atomic_available, tree_available;
8228 
8229     switch ((forced_retval = __kmp_force_reduction_method)) {
8230     case critical_reduce_block:
8231       KMP_ASSERT(lck); // lck should be != 0
8232       break;
8233 
8234     case atomic_reduce_block:
8235       atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8236       if (!atomic_available) {
8237         KMP_WARNING(RedMethodNotSupported, "atomic");
8238         forced_retval = critical_reduce_block;
8239       }
8240       break;
8241 
8242     case tree_reduce_block:
8243       tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8244       if (!tree_available) {
8245         KMP_WARNING(RedMethodNotSupported, "tree");
8246         forced_retval = critical_reduce_block;
8247       } else {
8248 #if KMP_FAST_REDUCTION_BARRIER
8249         forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8250 #endif
8251       }
8252       break;
8253 
8254     default:
8255       KMP_ASSERT(0); // "unsupported method specified"
8256     }
8257 
8258     retval = forced_retval;
8259   }
8260 
8261   KA_TRACE(10, ("reduction method selected=%08x\n", retval));
8262 
8263 #undef FAST_REDUCTION_TREE_METHOD_GENERATED
8264 #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED
8265 
8266   return (retval);
8267 }
8268 // this function is for testing set/get/determine reduce method
8269 kmp_int32 __kmp_get_reduce_method(void) {
8270   return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
8271 }
8272 
8273 // Soft pause sets up threads to ignore blocktime and just go to sleep.
8274 // Spin-wait code checks __kmp_pause_status and reacts accordingly.
8275 void __kmp_soft_pause() { __kmp_pause_status = kmp_soft_paused; }
8276 
8277 // Hard pause shuts down the runtime completely.  Resume happens naturally when
8278 // OpenMP is used subsequently.
8279 void __kmp_hard_pause() {
8280   __kmp_pause_status = kmp_hard_paused;
8281   __kmp_internal_end_thread(-1);
8282 }
8283 
8284 // Soft resume sets __kmp_pause_status, and wakes up all threads.
8285 void __kmp_resume_if_soft_paused() {
8286   if (__kmp_pause_status == kmp_soft_paused) {
8287     __kmp_pause_status = kmp_not_paused;
8288 
8289     for (int gtid = 1; gtid < __kmp_threads_capacity; ++gtid) {
8290       kmp_info_t *thread = __kmp_threads[gtid];
8291       if (thread) { // Wake it if sleeping
8292         kmp_flag_64 fl(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
8293         if (fl.is_sleeping())
8294           fl.resume(gtid);
8295         else if (__kmp_try_suspend_mx(thread)) { // got suspend lock
8296           __kmp_unlock_suspend_mx(thread); // unlock it; it won't sleep
8297         } else { // thread holds the lock and may sleep soon
8298           do { // until either the thread sleeps, or we can get the lock
8299             if (fl.is_sleeping()) {
8300               fl.resume(gtid);
8301               break;
8302             } else if (__kmp_try_suspend_mx(thread)) {
8303               __kmp_unlock_suspend_mx(thread);
8304               break;
8305             }
8306           } while (1);
8307         }
8308       }
8309     }
8310   }
8311 }
8312 
8313 // This function is called via __kmpc_pause_resource. Returns 0 if successful.
8314 // TODO: add warning messages
8315 int __kmp_pause_resource(kmp_pause_status_t level) {
8316   if (level == kmp_not_paused) { // requesting resume
8317     if (__kmp_pause_status == kmp_not_paused) {
8318       // error message about runtime not being paused, so can't resume
8319       return 1;
8320     } else {
8321       KMP_DEBUG_ASSERT(__kmp_pause_status == kmp_soft_paused ||
8322                        __kmp_pause_status == kmp_hard_paused);
8323       __kmp_pause_status = kmp_not_paused;
8324       return 0;
8325     }
8326   } else if (level == kmp_soft_paused) { // requesting soft pause
8327     if (__kmp_pause_status != kmp_not_paused) {
8328       // error message about already being paused
8329       return 1;
8330     } else {
8331       __kmp_soft_pause();
8332       return 0;
8333     }
8334   } else if (level == kmp_hard_paused) { // requesting hard pause
8335     if (__kmp_pause_status != kmp_not_paused) {
8336       // error message about already being paused
8337       return 1;
8338     } else {
8339       __kmp_hard_pause();
8340       return 0;
8341     }
8342   } else {
8343     // error message about invalid level
8344     return 1;
8345   }
8346 }
8347 
8348 
8349 void __kmp_omp_display_env(int verbose) {
8350   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
8351   if (__kmp_init_serial == 0)
8352     __kmp_do_serial_initialize();
8353   __kmp_display_env_impl(!verbose, verbose);
8354   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
8355 }
8356