1 /*
2  * kmp_runtime.cpp -- KPTS runtime support library
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "kmp.h"
15 #include "kmp_affinity.h"
16 #include "kmp_atomic.h"
17 #include "kmp_environment.h"
18 #include "kmp_error.h"
19 #include "kmp_i18n.h"
20 #include "kmp_io.h"
21 #include "kmp_itt.h"
22 #include "kmp_settings.h"
23 #include "kmp_stats.h"
24 #include "kmp_str.h"
25 #include "kmp_wait_release.h"
26 #include "kmp_wrapper_getpid.h"
27 #include "kmp_dispatch.h"
28 #if KMP_USE_HIER_SCHED
29 #include "kmp_dispatch_hier.h"
30 #endif
31 
32 #if OMPT_SUPPORT
33 #include "ompt-specific.h"
34 #endif
35 
36 /* these are temporary issues to be dealt with */
37 #define KMP_USE_PRCTL 0
38 
39 #if KMP_OS_WINDOWS
40 #include <process.h>
41 #endif
42 
43 #include "tsan_annotations.h"
44 
45 #if defined(KMP_GOMP_COMPAT)
46 char const __kmp_version_alt_comp[] =
47     KMP_VERSION_PREFIX "alternative compiler support: yes";
48 #endif /* defined(KMP_GOMP_COMPAT) */
49 
50 char const __kmp_version_omp_api[] = KMP_VERSION_PREFIX "API version: "
51 #if OMP_50_ENABLED
52                                                         "5.0 (201611)";
53 #elif OMP_45_ENABLED
54                                                         "4.5 (201511)";
55 #elif OMP_40_ENABLED
56                                                         "4.0 (201307)";
57 #else
58                                                         "3.1 (201107)";
59 #endif
60 
61 #ifdef KMP_DEBUG
62 char const __kmp_version_lock[] =
63     KMP_VERSION_PREFIX "lock type: run time selectable";
64 #endif /* KMP_DEBUG */
65 
66 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
67 
68 /* ------------------------------------------------------------------------ */
69 
70 #if KMP_USE_MONITOR
71 kmp_info_t __kmp_monitor;
72 #endif
73 
74 /* Forward declarations */
75 
76 void __kmp_cleanup(void);
77 
78 static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *, int tid,
79                                   int gtid);
80 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
81                                   kmp_internal_control_t *new_icvs,
82                                   ident_t *loc);
83 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
84 static void __kmp_partition_places(kmp_team_t *team,
85                                    int update_master_only = 0);
86 #endif
87 static void __kmp_do_serial_initialize(void);
88 void __kmp_fork_barrier(int gtid, int tid);
89 void __kmp_join_barrier(int gtid);
90 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
91                           kmp_internal_control_t *new_icvs, ident_t *loc);
92 
93 #ifdef USE_LOAD_BALANCE
94 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc);
95 #endif
96 
97 static int __kmp_expand_threads(int nNeed);
98 #if KMP_OS_WINDOWS
99 static int __kmp_unregister_root_other_thread(int gtid);
100 #endif
101 static void __kmp_unregister_library(void); // called by __kmp_internal_end()
102 static void __kmp_reap_thread(kmp_info_t *thread, int is_root);
103 kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
104 
105 /* Calculate the identifier of the current thread */
106 /* fast (and somewhat portable) way to get unique identifier of executing
107    thread. Returns KMP_GTID_DNE if we haven't been assigned a gtid. */
108 int __kmp_get_global_thread_id() {
109   int i;
110   kmp_info_t **other_threads;
111   size_t stack_data;
112   char *stack_addr;
113   size_t stack_size;
114   char *stack_base;
115 
116   KA_TRACE(
117       1000,
118       ("*** __kmp_get_global_thread_id: entering, nproc=%d  all_nproc=%d\n",
119        __kmp_nth, __kmp_all_nth));
120 
121   /* JPH - to handle the case where __kmpc_end(0) is called immediately prior to
122      a parallel region, made it return KMP_GTID_DNE to force serial_initialize
123      by caller. Had to handle KMP_GTID_DNE at all call-sites, or else guarantee
124      __kmp_init_gtid for this to work. */
125 
126   if (!TCR_4(__kmp_init_gtid))
127     return KMP_GTID_DNE;
128 
129 #ifdef KMP_TDATA_GTID
130   if (TCR_4(__kmp_gtid_mode) >= 3) {
131     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using TDATA\n"));
132     return __kmp_gtid;
133   }
134 #endif
135   if (TCR_4(__kmp_gtid_mode) >= 2) {
136     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using keyed TLS\n"));
137     return __kmp_gtid_get_specific();
138   }
139   KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using internal alg.\n"));
140 
141   stack_addr = (char *)&stack_data;
142   other_threads = __kmp_threads;
143 
144   /* ATT: The code below is a source of potential bugs due to unsynchronized
145      access to __kmp_threads array. For example:
146      1. Current thread loads other_threads[i] to thr and checks it, it is
147         non-NULL.
148      2. Current thread is suspended by OS.
149      3. Another thread unregisters and finishes (debug versions of free()
150         may fill memory with something like 0xEF).
151      4. Current thread is resumed.
152      5. Current thread reads junk from *thr.
153      TODO: Fix it.  --ln  */
154 
155   for (i = 0; i < __kmp_threads_capacity; i++) {
156 
157     kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]);
158     if (!thr)
159       continue;
160 
161     stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
162     stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
163 
164     /* stack grows down -- search through all of the active threads */
165 
166     if (stack_addr <= stack_base) {
167       size_t stack_diff = stack_base - stack_addr;
168 
169       if (stack_diff <= stack_size) {
170         /* The only way we can be closer than the allocated */
171         /* stack size is if we are running on this thread. */
172         KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == i);
173         return i;
174       }
175     }
176   }
177 
178   /* get specific to try and determine our gtid */
179   KA_TRACE(1000,
180            ("*** __kmp_get_global_thread_id: internal alg. failed to find "
181             "thread, using TLS\n"));
182   i = __kmp_gtid_get_specific();
183 
184   /*fprintf( stderr, "=== %d\n", i );  */ /* GROO */
185 
186   /* if we havn't been assigned a gtid, then return code */
187   if (i < 0)
188     return i;
189 
190   /* dynamically updated stack window for uber threads to avoid get_specific
191      call */
192   if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
193     KMP_FATAL(StackOverflow, i);
194   }
195 
196   stack_base = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
197   if (stack_addr > stack_base) {
198     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
199     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
200             other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
201                 stack_base);
202   } else {
203     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
204             stack_base - stack_addr);
205   }
206 
207   /* Reprint stack bounds for ubermaster since they have been refined */
208   if (__kmp_storage_map) {
209     char *stack_end = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
210     char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
211     __kmp_print_storage_map_gtid(i, stack_beg, stack_end,
212                                  other_threads[i]->th.th_info.ds.ds_stacksize,
213                                  "th_%d stack (refinement)", i);
214   }
215   return i;
216 }
217 
218 int __kmp_get_global_thread_id_reg() {
219   int gtid;
220 
221   if (!__kmp_init_serial) {
222     gtid = KMP_GTID_DNE;
223   } else
224 #ifdef KMP_TDATA_GTID
225       if (TCR_4(__kmp_gtid_mode) >= 3) {
226     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using TDATA\n"));
227     gtid = __kmp_gtid;
228   } else
229 #endif
230       if (TCR_4(__kmp_gtid_mode) >= 2) {
231     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using keyed TLS\n"));
232     gtid = __kmp_gtid_get_specific();
233   } else {
234     KA_TRACE(1000,
235              ("*** __kmp_get_global_thread_id_reg: using internal alg.\n"));
236     gtid = __kmp_get_global_thread_id();
237   }
238 
239   /* we must be a new uber master sibling thread */
240   if (gtid == KMP_GTID_DNE) {
241     KA_TRACE(10,
242              ("__kmp_get_global_thread_id_reg: Encountered new root thread. "
243               "Registering a new gtid.\n"));
244     __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
245     if (!__kmp_init_serial) {
246       __kmp_do_serial_initialize();
247       gtid = __kmp_gtid_get_specific();
248     } else {
249       gtid = __kmp_register_root(FALSE);
250     }
251     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
252     /*__kmp_printf( "+++ %d\n", gtid ); */ /* GROO */
253   }
254 
255   KMP_DEBUG_ASSERT(gtid >= 0);
256 
257   return gtid;
258 }
259 
260 /* caller must hold forkjoin_lock */
261 void __kmp_check_stack_overlap(kmp_info_t *th) {
262   int f;
263   char *stack_beg = NULL;
264   char *stack_end = NULL;
265   int gtid;
266 
267   KA_TRACE(10, ("__kmp_check_stack_overlap: called\n"));
268   if (__kmp_storage_map) {
269     stack_end = (char *)th->th.th_info.ds.ds_stackbase;
270     stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
271 
272     gtid = __kmp_gtid_from_thread(th);
273 
274     if (gtid == KMP_GTID_MONITOR) {
275       __kmp_print_storage_map_gtid(
276           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
277           "th_%s stack (%s)", "mon",
278           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
279     } else {
280       __kmp_print_storage_map_gtid(
281           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
282           "th_%d stack (%s)", gtid,
283           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
284     }
285   }
286 
287   /* No point in checking ubermaster threads since they use refinement and
288    * cannot overlap */
289   gtid = __kmp_gtid_from_thread(th);
290   if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) {
291     KA_TRACE(10,
292              ("__kmp_check_stack_overlap: performing extensive checking\n"));
293     if (stack_beg == NULL) {
294       stack_end = (char *)th->th.th_info.ds.ds_stackbase;
295       stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
296     }
297 
298     for (f = 0; f < __kmp_threads_capacity; f++) {
299       kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]);
300 
301       if (f_th && f_th != th) {
302         char *other_stack_end =
303             (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
304         char *other_stack_beg =
305             other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
306         if ((stack_beg > other_stack_beg && stack_beg < other_stack_end) ||
307             (stack_end > other_stack_beg && stack_end < other_stack_end)) {
308 
309           /* Print the other stack values before the abort */
310           if (__kmp_storage_map)
311             __kmp_print_storage_map_gtid(
312                 -1, other_stack_beg, other_stack_end,
313                 (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
314                 "th_%d stack (overlapped)", __kmp_gtid_from_thread(f_th));
315 
316           __kmp_fatal(KMP_MSG(StackOverlap), KMP_HNT(ChangeStackLimit),
317                       __kmp_msg_null);
318         }
319       }
320     }
321   }
322   KA_TRACE(10, ("__kmp_check_stack_overlap: returning\n"));
323 }
324 
325 /* ------------------------------------------------------------------------ */
326 
327 void __kmp_infinite_loop(void) {
328   static int done = FALSE;
329 
330   while (!done) {
331     KMP_YIELD(1);
332   }
333 }
334 
335 #define MAX_MESSAGE 512
336 
337 void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size,
338                                   char const *format, ...) {
339   char buffer[MAX_MESSAGE];
340   va_list ap;
341 
342   va_start(ap, format);
343   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP storage map: %p %p%8lu %s\n", p1,
344                p2, (unsigned long)size, format);
345   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
346   __kmp_vprintf(kmp_err, buffer, ap);
347 #if KMP_PRINT_DATA_PLACEMENT
348   int node;
349   if (gtid >= 0) {
350     if (p1 <= p2 && (char *)p2 - (char *)p1 == size) {
351       if (__kmp_storage_map_verbose) {
352         node = __kmp_get_host_node(p1);
353         if (node < 0) /* doesn't work, so don't try this next time */
354           __kmp_storage_map_verbose = FALSE;
355         else {
356           char *last;
357           int lastNode;
358           int localProc = __kmp_get_cpu_from_gtid(gtid);
359 
360           const int page_size = KMP_GET_PAGE_SIZE();
361 
362           p1 = (void *)((size_t)p1 & ~((size_t)page_size - 1));
363           p2 = (void *)(((size_t)p2 - 1) & ~((size_t)page_size - 1));
364           if (localProc >= 0)
365             __kmp_printf_no_lock("  GTID %d localNode %d\n", gtid,
366                                  localProc >> 1);
367           else
368             __kmp_printf_no_lock("  GTID %d\n", gtid);
369 #if KMP_USE_PRCTL
370           /* The more elaborate format is disabled for now because of the prctl
371            * hanging bug. */
372           do {
373             last = p1;
374             lastNode = node;
375             /* This loop collates adjacent pages with the same host node. */
376             do {
377               (char *)p1 += page_size;
378             } while (p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode);
379             __kmp_printf_no_lock("    %p-%p memNode %d\n", last, (char *)p1 - 1,
380                                  lastNode);
381           } while (p1 <= p2);
382 #else
383           __kmp_printf_no_lock("    %p-%p memNode %d\n", p1,
384                                (char *)p1 + (page_size - 1),
385                                __kmp_get_host_node(p1));
386           if (p1 < p2) {
387             __kmp_printf_no_lock("    %p-%p memNode %d\n", p2,
388                                  (char *)p2 + (page_size - 1),
389                                  __kmp_get_host_node(p2));
390           }
391 #endif
392         }
393       }
394     } else
395       __kmp_printf_no_lock("  %s\n", KMP_I18N_STR(StorageMapWarning));
396   }
397 #endif /* KMP_PRINT_DATA_PLACEMENT */
398   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
399 }
400 
401 void __kmp_warn(char const *format, ...) {
402   char buffer[MAX_MESSAGE];
403   va_list ap;
404 
405   if (__kmp_generate_warnings == kmp_warnings_off) {
406     return;
407   }
408 
409   va_start(ap, format);
410 
411   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP warning: %s\n", format);
412   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
413   __kmp_vprintf(kmp_err, buffer, ap);
414   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
415 
416   va_end(ap);
417 }
418 
419 void __kmp_abort_process() {
420   // Later threads may stall here, but that's ok because abort() will kill them.
421   __kmp_acquire_bootstrap_lock(&__kmp_exit_lock);
422 
423   if (__kmp_debug_buf) {
424     __kmp_dump_debug_buffer();
425   }
426 
427   if (KMP_OS_WINDOWS) {
428     // Let other threads know of abnormal termination and prevent deadlock
429     // if abort happened during library initialization or shutdown
430     __kmp_global.g.g_abort = SIGABRT;
431 
432     /* On Windows* OS by default abort() causes pop-up error box, which stalls
433        nightly testing. Unfortunately, we cannot reliably suppress pop-up error
434        boxes. _set_abort_behavior() works well, but this function is not
435        available in VS7 (this is not problem for DLL, but it is a problem for
436        static OpenMP RTL). SetErrorMode (and so, timelimit utility) does not
437        help, at least in some versions of MS C RTL.
438 
439        It seems following sequence is the only way to simulate abort() and
440        avoid pop-up error box. */
441     raise(SIGABRT);
442     _exit(3); // Just in case, if signal ignored, exit anyway.
443   } else {
444     abort();
445   }
446 
447   __kmp_infinite_loop();
448   __kmp_release_bootstrap_lock(&__kmp_exit_lock);
449 
450 } // __kmp_abort_process
451 
452 void __kmp_abort_thread(void) {
453   // TODO: Eliminate g_abort global variable and this function.
454   // In case of abort just call abort(), it will kill all the threads.
455   __kmp_infinite_loop();
456 } // __kmp_abort_thread
457 
458 /* Print out the storage map for the major kmp_info_t thread data structures
459    that are allocated together. */
460 
461 static void __kmp_print_thread_storage_map(kmp_info_t *thr, int gtid) {
462   __kmp_print_storage_map_gtid(gtid, thr, thr + 1, sizeof(kmp_info_t), "th_%d",
463                                gtid);
464 
465   __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
466                                sizeof(kmp_desc_t), "th_%d.th_info", gtid);
467 
468   __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
469                                sizeof(kmp_local_t), "th_%d.th_local", gtid);
470 
471   __kmp_print_storage_map_gtid(
472       gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
473       sizeof(kmp_balign_t) * bs_last_barrier, "th_%d.th_bar", gtid);
474 
475   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
476                                &thr->th.th_bar[bs_plain_barrier + 1],
477                                sizeof(kmp_balign_t), "th_%d.th_bar[plain]",
478                                gtid);
479 
480   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
481                                &thr->th.th_bar[bs_forkjoin_barrier + 1],
482                                sizeof(kmp_balign_t), "th_%d.th_bar[forkjoin]",
483                                gtid);
484 
485 #if KMP_FAST_REDUCTION_BARRIER
486   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
487                                &thr->th.th_bar[bs_reduction_barrier + 1],
488                                sizeof(kmp_balign_t), "th_%d.th_bar[reduction]",
489                                gtid);
490 #endif // KMP_FAST_REDUCTION_BARRIER
491 }
492 
493 /* Print out the storage map for the major kmp_team_t team data structures
494    that are allocated together. */
495 
496 static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
497                                          int team_id, int num_thr) {
498   int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
499   __kmp_print_storage_map_gtid(-1, team, team + 1, sizeof(kmp_team_t), "%s_%d",
500                                header, team_id);
501 
502   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
503                                &team->t.t_bar[bs_last_barrier],
504                                sizeof(kmp_balign_team_t) * bs_last_barrier,
505                                "%s_%d.t_bar", header, team_id);
506 
507   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
508                                &team->t.t_bar[bs_plain_barrier + 1],
509                                sizeof(kmp_balign_team_t), "%s_%d.t_bar[plain]",
510                                header, team_id);
511 
512   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
513                                &team->t.t_bar[bs_forkjoin_barrier + 1],
514                                sizeof(kmp_balign_team_t),
515                                "%s_%d.t_bar[forkjoin]", header, team_id);
516 
517 #if KMP_FAST_REDUCTION_BARRIER
518   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
519                                &team->t.t_bar[bs_reduction_barrier + 1],
520                                sizeof(kmp_balign_team_t),
521                                "%s_%d.t_bar[reduction]", header, team_id);
522 #endif // KMP_FAST_REDUCTION_BARRIER
523 
524   __kmp_print_storage_map_gtid(
525       -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
526       sizeof(kmp_disp_t) * num_thr, "%s_%d.t_dispatch", header, team_id);
527 
528   __kmp_print_storage_map_gtid(
529       -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
530       sizeof(kmp_info_t *) * num_thr, "%s_%d.t_threads", header, team_id);
531 
532   __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
533                                &team->t.t_disp_buffer[num_disp_buff],
534                                sizeof(dispatch_shared_info_t) * num_disp_buff,
535                                "%s_%d.t_disp_buffer", header, team_id);
536 
537   __kmp_print_storage_map_gtid(-1, &team->t.t_taskq, &team->t.t_copypriv_data,
538                                sizeof(kmp_taskq_t), "%s_%d.t_taskq", header,
539                                team_id);
540 }
541 
542 static void __kmp_init_allocator() {
543 #if OMP_50_ENABLED
544   __kmp_init_memkind();
545 #endif
546 }
547 static void __kmp_fini_allocator() {
548 #if OMP_50_ENABLED
549   __kmp_fini_memkind();
550 #endif
551 }
552 
553 /* ------------------------------------------------------------------------ */
554 
555 #if KMP_DYNAMIC_LIB
556 #if KMP_OS_WINDOWS
557 
558 static void __kmp_reset_lock(kmp_bootstrap_lock_t *lck) {
559   // TODO: Change to __kmp_break_bootstrap_lock().
560   __kmp_init_bootstrap_lock(lck); // make the lock released
561 }
562 
563 static void __kmp_reset_locks_on_process_detach(int gtid_req) {
564   int i;
565   int thread_count;
566 
567   // PROCESS_DETACH is expected to be called by a thread that executes
568   // ProcessExit() or FreeLibrary(). OS terminates other threads (except the one
569   // calling ProcessExit or FreeLibrary). So, it might be safe to access the
570   // __kmp_threads[] without taking the forkjoin_lock. However, in fact, some
571   // threads can be still alive here, although being about to be terminated. The
572   // threads in the array with ds_thread==0 are most suspicious. Actually, it
573   // can be not safe to access the __kmp_threads[].
574 
575   // TODO: does it make sense to check __kmp_roots[] ?
576 
577   // Let's check that there are no other alive threads registered with the OMP
578   // lib.
579   while (1) {
580     thread_count = 0;
581     for (i = 0; i < __kmp_threads_capacity; ++i) {
582       if (!__kmp_threads)
583         continue;
584       kmp_info_t *th = __kmp_threads[i];
585       if (th == NULL)
586         continue;
587       int gtid = th->th.th_info.ds.ds_gtid;
588       if (gtid == gtid_req)
589         continue;
590       if (gtid < 0)
591         continue;
592       DWORD exit_val;
593       int alive = __kmp_is_thread_alive(th, &exit_val);
594       if (alive) {
595         ++thread_count;
596       }
597     }
598     if (thread_count == 0)
599       break; // success
600   }
601 
602   // Assume that I'm alone. Now it might be safe to check and reset locks.
603   // __kmp_forkjoin_lock and __kmp_stdio_lock are expected to be reset.
604   __kmp_reset_lock(&__kmp_forkjoin_lock);
605 #ifdef KMP_DEBUG
606   __kmp_reset_lock(&__kmp_stdio_lock);
607 #endif // KMP_DEBUG
608 }
609 
610 BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
611   //__kmp_acquire_bootstrap_lock( &__kmp_initz_lock );
612 
613   switch (fdwReason) {
614 
615   case DLL_PROCESS_ATTACH:
616     KA_TRACE(10, ("DllMain: PROCESS_ATTACH\n"));
617 
618     return TRUE;
619 
620   case DLL_PROCESS_DETACH:
621     KA_TRACE(10, ("DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
622 
623     if (lpReserved != NULL) {
624       // lpReserved is used for telling the difference:
625       //   lpReserved == NULL when FreeLibrary() was called,
626       //   lpReserved != NULL when the process terminates.
627       // When FreeLibrary() is called, worker threads remain alive. So they will
628       // release the forkjoin lock by themselves. When the process terminates,
629       // worker threads disappear triggering the problem of unreleased forkjoin
630       // lock as described below.
631 
632       // A worker thread can take the forkjoin lock. The problem comes up if
633       // that worker thread becomes dead before it releases the forkjoin lock.
634       // The forkjoin lock remains taken, while the thread executing
635       // DllMain()->PROCESS_DETACH->__kmp_internal_end_library() below will try
636       // to take the forkjoin lock and will always fail, so that the application
637       // will never finish [normally]. This scenario is possible if
638       // __kmpc_end() has not been executed. It looks like it's not a corner
639       // case, but common cases:
640       // - the main function was compiled by an alternative compiler;
641       // - the main function was compiled by icl but without /Qopenmp
642       //   (application with plugins);
643       // - application terminates by calling C exit(), Fortran CALL EXIT() or
644       //   Fortran STOP.
645       // - alive foreign thread prevented __kmpc_end from doing cleanup.
646       //
647       // This is a hack to work around the problem.
648       // TODO: !!! figure out something better.
649       __kmp_reset_locks_on_process_detach(__kmp_gtid_get_specific());
650     }
651 
652     __kmp_internal_end_library(__kmp_gtid_get_specific());
653 
654     return TRUE;
655 
656   case DLL_THREAD_ATTACH:
657     KA_TRACE(10, ("DllMain: THREAD_ATTACH\n"));
658 
659     /* if we want to register new siblings all the time here call
660      * __kmp_get_gtid(); */
661     return TRUE;
662 
663   case DLL_THREAD_DETACH:
664     KA_TRACE(10, ("DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
665 
666     __kmp_internal_end_thread(__kmp_gtid_get_specific());
667     return TRUE;
668   }
669 
670   return TRUE;
671 }
672 
673 #endif /* KMP_OS_WINDOWS */
674 #endif /* KMP_DYNAMIC_LIB */
675 
676 /* Change the library type to "status" and return the old type */
677 /* called from within initialization routines where __kmp_initz_lock is held */
678 int __kmp_change_library(int status) {
679   int old_status;
680 
681   old_status = __kmp_yield_init &
682                1; // check whether KMP_LIBRARY=throughput (even init count)
683 
684   if (status) {
685     __kmp_yield_init |= 1; // throughput => turnaround (odd init count)
686   } else {
687     __kmp_yield_init &= ~1; // turnaround => throughput (even init count)
688   }
689 
690   return old_status; // return previous setting of whether
691   // KMP_LIBRARY=throughput
692 }
693 
694 /* __kmp_parallel_deo -- Wait until it's our turn. */
695 void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
696   int gtid = *gtid_ref;
697 #ifdef BUILD_PARALLEL_ORDERED
698   kmp_team_t *team = __kmp_team_from_gtid(gtid);
699 #endif /* BUILD_PARALLEL_ORDERED */
700 
701   if (__kmp_env_consistency_check) {
702     if (__kmp_threads[gtid]->th.th_root->r.r_active)
703 #if KMP_USE_DYNAMIC_LOCK
704       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0);
705 #else
706       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL);
707 #endif
708   }
709 #ifdef BUILD_PARALLEL_ORDERED
710   if (!team->t.t_serialized) {
711     KMP_MB();
712     KMP_WAIT_YIELD(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid),
713                    KMP_EQ, NULL);
714     KMP_MB();
715   }
716 #endif /* BUILD_PARALLEL_ORDERED */
717 }
718 
719 /* __kmp_parallel_dxo -- Signal the next task. */
720 void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
721   int gtid = *gtid_ref;
722 #ifdef BUILD_PARALLEL_ORDERED
723   int tid = __kmp_tid_from_gtid(gtid);
724   kmp_team_t *team = __kmp_team_from_gtid(gtid);
725 #endif /* BUILD_PARALLEL_ORDERED */
726 
727   if (__kmp_env_consistency_check) {
728     if (__kmp_threads[gtid]->th.th_root->r.r_active)
729       __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref);
730   }
731 #ifdef BUILD_PARALLEL_ORDERED
732   if (!team->t.t_serialized) {
733     KMP_MB(); /* Flush all pending memory write invalidates.  */
734 
735     /* use the tid of the next thread in this team */
736     /* TODO replace with general release procedure */
737     team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
738 
739     KMP_MB(); /* Flush all pending memory write invalidates.  */
740   }
741 #endif /* BUILD_PARALLEL_ORDERED */
742 }
743 
744 /* ------------------------------------------------------------------------ */
745 /* The BARRIER for a SINGLE process section is always explicit   */
746 
747 int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) {
748   int status;
749   kmp_info_t *th;
750   kmp_team_t *team;
751 
752   if (!TCR_4(__kmp_init_parallel))
753     __kmp_parallel_initialize();
754 
755   th = __kmp_threads[gtid];
756   team = th->th.th_team;
757   status = 0;
758 
759   th->th.th_ident = id_ref;
760 
761   if (team->t.t_serialized) {
762     status = 1;
763   } else {
764     kmp_int32 old_this = th->th.th_local.this_construct;
765 
766     ++th->th.th_local.this_construct;
767     /* try to set team count to thread count--success means thread got the
768        single block */
769     /* TODO: Should this be acquire or release? */
770     if (team->t.t_construct == old_this) {
771       status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
772                                               th->th.th_local.this_construct);
773     }
774 #if USE_ITT_BUILD
775     if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
776         KMP_MASTER_GTID(gtid) &&
777 #if OMP_40_ENABLED
778         th->th.th_teams_microtask == NULL &&
779 #endif
780         team->t.t_active_level ==
781             1) { // Only report metadata by master of active team at level 1
782       __kmp_itt_metadata_single(id_ref);
783     }
784 #endif /* USE_ITT_BUILD */
785   }
786 
787   if (__kmp_env_consistency_check) {
788     if (status && push_ws) {
789       __kmp_push_workshare(gtid, ct_psingle, id_ref);
790     } else {
791       __kmp_check_workshare(gtid, ct_psingle, id_ref);
792     }
793   }
794 #if USE_ITT_BUILD
795   if (status) {
796     __kmp_itt_single_start(gtid);
797   }
798 #endif /* USE_ITT_BUILD */
799   return status;
800 }
801 
802 void __kmp_exit_single(int gtid) {
803 #if USE_ITT_BUILD
804   __kmp_itt_single_end(gtid);
805 #endif /* USE_ITT_BUILD */
806   if (__kmp_env_consistency_check)
807     __kmp_pop_workshare(gtid, ct_psingle, NULL);
808 }
809 
810 /* determine if we can go parallel or must use a serialized parallel region and
811  * how many threads we can use
812  * set_nproc is the number of threads requested for the team
813  * returns 0 if we should serialize or only use one thread,
814  * otherwise the number of threads to use
815  * The forkjoin lock is held by the caller. */
816 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
817                                  int master_tid, int set_nthreads
818 #if OMP_40_ENABLED
819                                  ,
820                                  int enter_teams
821 #endif /* OMP_40_ENABLED */
822                                  ) {
823   int capacity;
824   int new_nthreads;
825   KMP_DEBUG_ASSERT(__kmp_init_serial);
826   KMP_DEBUG_ASSERT(root && parent_team);
827 
828   // If dyn-var is set, dynamically adjust the number of desired threads,
829   // according to the method specified by dynamic_mode.
830   new_nthreads = set_nthreads;
831   if (!get__dynamic_2(parent_team, master_tid)) {
832     ;
833   }
834 #ifdef USE_LOAD_BALANCE
835   else if (__kmp_global.g.g_dynamic_mode == dynamic_load_balance) {
836     new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
837     if (new_nthreads == 1) {
838       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
839                     "reservation to 1 thread\n",
840                     master_tid));
841       return 1;
842     }
843     if (new_nthreads < set_nthreads) {
844       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
845                     "reservation to %d threads\n",
846                     master_tid, new_nthreads));
847     }
848   }
849 #endif /* USE_LOAD_BALANCE */
850   else if (__kmp_global.g.g_dynamic_mode == dynamic_thread_limit) {
851     new_nthreads = __kmp_avail_proc - __kmp_nth +
852                    (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
853     if (new_nthreads <= 1) {
854       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
855                     "reservation to 1 thread\n",
856                     master_tid));
857       return 1;
858     }
859     if (new_nthreads < set_nthreads) {
860       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
861                     "reservation to %d threads\n",
862                     master_tid, new_nthreads));
863     } else {
864       new_nthreads = set_nthreads;
865     }
866   } else if (__kmp_global.g.g_dynamic_mode == dynamic_random) {
867     if (set_nthreads > 2) {
868       new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
869       new_nthreads = (new_nthreads % set_nthreads) + 1;
870       if (new_nthreads == 1) {
871         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
872                       "reservation to 1 thread\n",
873                       master_tid));
874         return 1;
875       }
876       if (new_nthreads < set_nthreads) {
877         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
878                       "reservation to %d threads\n",
879                       master_tid, new_nthreads));
880       }
881     }
882   } else {
883     KMP_ASSERT(0);
884   }
885 
886   // Respect KMP_ALL_THREADS/KMP_DEVICE_THREAD_LIMIT.
887   if (__kmp_nth + new_nthreads -
888           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
889       __kmp_max_nth) {
890     int tl_nthreads = __kmp_max_nth - __kmp_nth +
891                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
892     if (tl_nthreads <= 0) {
893       tl_nthreads = 1;
894     }
895 
896     // If dyn-var is false, emit a 1-time warning.
897     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
898       __kmp_reserve_warn = 1;
899       __kmp_msg(kmp_ms_warning,
900                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
901                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
902     }
903     if (tl_nthreads == 1) {
904       KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT "
905                     "reduced reservation to 1 thread\n",
906                     master_tid));
907       return 1;
908     }
909     KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT reduced "
910                   "reservation to %d threads\n",
911                   master_tid, tl_nthreads));
912     new_nthreads = tl_nthreads;
913   }
914 
915   // Respect OMP_THREAD_LIMIT
916   if (root->r.r_cg_nthreads + new_nthreads -
917           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
918       __kmp_cg_max_nth) {
919     int tl_nthreads = __kmp_cg_max_nth - root->r.r_cg_nthreads +
920                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
921     if (tl_nthreads <= 0) {
922       tl_nthreads = 1;
923     }
924 
925     // If dyn-var is false, emit a 1-time warning.
926     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
927       __kmp_reserve_warn = 1;
928       __kmp_msg(kmp_ms_warning,
929                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
930                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
931     }
932     if (tl_nthreads == 1) {
933       KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT "
934                     "reduced reservation to 1 thread\n",
935                     master_tid));
936       return 1;
937     }
938     KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT reduced "
939                   "reservation to %d threads\n",
940                   master_tid, tl_nthreads));
941     new_nthreads = tl_nthreads;
942   }
943 
944   // Check if the threads array is large enough, or needs expanding.
945   // See comment in __kmp_register_root() about the adjustment if
946   // __kmp_threads[0] == NULL.
947   capacity = __kmp_threads_capacity;
948   if (TCR_PTR(__kmp_threads[0]) == NULL) {
949     --capacity;
950   }
951   if (__kmp_nth + new_nthreads -
952           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
953       capacity) {
954     // Expand the threads array.
955     int slotsRequired = __kmp_nth + new_nthreads -
956                         (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
957                         capacity;
958     int slotsAdded = __kmp_expand_threads(slotsRequired);
959     if (slotsAdded < slotsRequired) {
960       // The threads array was not expanded enough.
961       new_nthreads -= (slotsRequired - slotsAdded);
962       KMP_ASSERT(new_nthreads >= 1);
963 
964       // If dyn-var is false, emit a 1-time warning.
965       if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
966         __kmp_reserve_warn = 1;
967         if (__kmp_tp_cached) {
968           __kmp_msg(kmp_ms_warning,
969                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
970                     KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
971                     KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
972         } else {
973           __kmp_msg(kmp_ms_warning,
974                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
975                     KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
976         }
977       }
978     }
979   }
980 
981 #ifdef KMP_DEBUG
982   if (new_nthreads == 1) {
983     KC_TRACE(10,
984              ("__kmp_reserve_threads: T#%d serializing team after reclaiming "
985               "dead roots and rechecking; requested %d threads\n",
986               __kmp_get_gtid(), set_nthreads));
987   } else {
988     KC_TRACE(10, ("__kmp_reserve_threads: T#%d allocating %d threads; requested"
989                   " %d threads\n",
990                   __kmp_get_gtid(), new_nthreads, set_nthreads));
991   }
992 #endif // KMP_DEBUG
993   return new_nthreads;
994 }
995 
996 /* Allocate threads from the thread pool and assign them to the new team. We are
997    assured that there are enough threads available, because we checked on that
998    earlier within critical section forkjoin */
999 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
1000                                     kmp_info_t *master_th, int master_gtid) {
1001   int i;
1002   int use_hot_team;
1003 
1004   KA_TRACE(10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
1005   KMP_DEBUG_ASSERT(master_gtid == __kmp_get_gtid());
1006   KMP_MB();
1007 
1008   /* first, let's setup the master thread */
1009   master_th->th.th_info.ds.ds_tid = 0;
1010   master_th->th.th_team = team;
1011   master_th->th.th_team_nproc = team->t.t_nproc;
1012   master_th->th.th_team_master = master_th;
1013   master_th->th.th_team_serialized = FALSE;
1014   master_th->th.th_dispatch = &team->t.t_dispatch[0];
1015 
1016 /* make sure we are not the optimized hot team */
1017 #if KMP_NESTED_HOT_TEAMS
1018   use_hot_team = 0;
1019   kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
1020   if (hot_teams) { // hot teams array is not allocated if
1021     // KMP_HOT_TEAMS_MAX_LEVEL=0
1022     int level = team->t.t_active_level - 1; // index in array of hot teams
1023     if (master_th->th.th_teams_microtask) { // are we inside the teams?
1024       if (master_th->th.th_teams_size.nteams > 1) {
1025         ++level; // level was not increased in teams construct for
1026         // team_of_masters
1027       }
1028       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
1029           master_th->th.th_teams_level == team->t.t_level) {
1030         ++level; // level was not increased in teams construct for
1031         // team_of_workers before the parallel
1032       } // team->t.t_level will be increased inside parallel
1033     }
1034     if (level < __kmp_hot_teams_max_level) {
1035       if (hot_teams[level].hot_team) {
1036         // hot team has already been allocated for given level
1037         KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
1038         use_hot_team = 1; // the team is ready to use
1039       } else {
1040         use_hot_team = 0; // AC: threads are not allocated yet
1041         hot_teams[level].hot_team = team; // remember new hot team
1042         hot_teams[level].hot_team_nth = team->t.t_nproc;
1043       }
1044     } else {
1045       use_hot_team = 0;
1046     }
1047   }
1048 #else
1049   use_hot_team = team == root->r.r_hot_team;
1050 #endif
1051   if (!use_hot_team) {
1052 
1053     /* install the master thread */
1054     team->t.t_threads[0] = master_th;
1055     __kmp_initialize_info(master_th, team, 0, master_gtid);
1056 
1057     /* now, install the worker threads */
1058     for (i = 1; i < team->t.t_nproc; i++) {
1059 
1060       /* fork or reallocate a new thread and install it in team */
1061       kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1062       team->t.t_threads[i] = thr;
1063       KMP_DEBUG_ASSERT(thr);
1064       KMP_DEBUG_ASSERT(thr->th.th_team == team);
1065       /* align team and thread arrived states */
1066       KA_TRACE(20, ("__kmp_fork_team_threads: T#%d(%d:%d) init arrived "
1067                     "T#%d(%d:%d) join =%llu, plain=%llu\n",
1068                     __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1069                     __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1070                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1071                     team->t.t_bar[bs_plain_barrier].b_arrived));
1072 #if OMP_40_ENABLED
1073       thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
1074       thr->th.th_teams_level = master_th->th.th_teams_level;
1075       thr->th.th_teams_size = master_th->th.th_teams_size;
1076 #endif
1077       { // Initialize threads' barrier data.
1078         int b;
1079         kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1080         for (b = 0; b < bs_last_barrier; ++b) {
1081           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1082           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
1083 #if USE_DEBUGGER
1084           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1085 #endif
1086         }
1087       }
1088     }
1089 
1090 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
1091     __kmp_partition_places(team);
1092 #endif
1093   }
1094 
1095   KMP_MB();
1096 }
1097 
1098 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1099 // Propagate any changes to the floating point control registers out to the team
1100 // We try to avoid unnecessary writes to the relevant cache line in the team
1101 // structure, so we don't make changes unless they are needed.
1102 inline static void propagateFPControl(kmp_team_t *team) {
1103   if (__kmp_inherit_fp_control) {
1104     kmp_int16 x87_fpu_control_word;
1105     kmp_uint32 mxcsr;
1106 
1107     // Get master values of FPU control flags (both X87 and vector)
1108     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1109     __kmp_store_mxcsr(&mxcsr);
1110     mxcsr &= KMP_X86_MXCSR_MASK;
1111 
1112     // There is no point looking at t_fp_control_saved here.
1113     // If it is TRUE, we still have to update the values if they are different
1114     // from those we now have. If it is FALSE we didn't save anything yet, but
1115     // our objective is the same. We have to ensure that the values in the team
1116     // are the same as those we have.
1117     // So, this code achieves what we need whether or not t_fp_control_saved is
1118     // true. By checking whether the value needs updating we avoid unnecessary
1119     // writes that would put the cache-line into a written state, causing all
1120     // threads in the team to have to read it again.
1121     KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1122     KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1123     // Although we don't use this value, other code in the runtime wants to know
1124     // whether it should restore them. So we must ensure it is correct.
1125     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1126   } else {
1127     // Similarly here. Don't write to this cache-line in the team structure
1128     // unless we have to.
1129     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1130   }
1131 }
1132 
1133 // Do the opposite, setting the hardware registers to the updated values from
1134 // the team.
1135 inline static void updateHWFPControl(kmp_team_t *team) {
1136   if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1137     // Only reset the fp control regs if they have been changed in the team.
1138     // the parallel region that we are exiting.
1139     kmp_int16 x87_fpu_control_word;
1140     kmp_uint32 mxcsr;
1141     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1142     __kmp_store_mxcsr(&mxcsr);
1143     mxcsr &= KMP_X86_MXCSR_MASK;
1144 
1145     if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1146       __kmp_clear_x87_fpu_status_word();
1147       __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1148     }
1149 
1150     if (team->t.t_mxcsr != mxcsr) {
1151       __kmp_load_mxcsr(&team->t.t_mxcsr);
1152     }
1153   }
1154 }
1155 #else
1156 #define propagateFPControl(x) ((void)0)
1157 #define updateHWFPControl(x) ((void)0)
1158 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1159 
1160 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
1161                                      int realloc); // forward declaration
1162 
1163 /* Run a parallel region that has been serialized, so runs only in a team of the
1164    single master thread. */
1165 void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
1166   kmp_info_t *this_thr;
1167   kmp_team_t *serial_team;
1168 
1169   KC_TRACE(10, ("__kmpc_serialized_parallel: called by T#%d\n", global_tid));
1170 
1171   /* Skip all this code for autopar serialized loops since it results in
1172      unacceptable overhead */
1173   if (loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR))
1174     return;
1175 
1176   if (!TCR_4(__kmp_init_parallel))
1177     __kmp_parallel_initialize();
1178 
1179   this_thr = __kmp_threads[global_tid];
1180   serial_team = this_thr->th.th_serial_team;
1181 
1182   /* utilize the serialized team held by this thread */
1183   KMP_DEBUG_ASSERT(serial_team);
1184   KMP_MB();
1185 
1186   if (__kmp_tasking_mode != tskm_immediate_exec) {
1187     KMP_DEBUG_ASSERT(
1188         this_thr->th.th_task_team ==
1189         this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]);
1190     KMP_DEBUG_ASSERT(serial_team->t.t_task_team[this_thr->th.th_task_state] ==
1191                      NULL);
1192     KA_TRACE(20, ("__kmpc_serialized_parallel: T#%d pushing task_team %p / "
1193                   "team %p, new task_team = NULL\n",
1194                   global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
1195     this_thr->th.th_task_team = NULL;
1196   }
1197 
1198 #if OMP_40_ENABLED
1199   kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
1200   if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1201     proc_bind = proc_bind_false;
1202   } else if (proc_bind == proc_bind_default) {
1203     // No proc_bind clause was specified, so use the current value
1204     // of proc-bind-var for this parallel region.
1205     proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
1206   }
1207   // Reset for next parallel region
1208   this_thr->th.th_set_proc_bind = proc_bind_default;
1209 #endif /* OMP_40_ENABLED */
1210 
1211 #if OMPT_SUPPORT
1212   ompt_data_t ompt_parallel_data = ompt_data_none;
1213   ompt_data_t *implicit_task_data;
1214   void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
1215   if (ompt_enabled.enabled &&
1216       this_thr->th.ompt_thread_info.state != omp_state_overhead) {
1217 
1218     ompt_task_info_t *parent_task_info;
1219     parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
1220 
1221     parent_task_info->frame.enter_frame = OMPT_GET_FRAME_ADDRESS(1);
1222     if (ompt_enabled.ompt_callback_parallel_begin) {
1223       int team_size = 1;
1224 
1225       ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1226           &(parent_task_info->task_data), &(parent_task_info->frame),
1227           &ompt_parallel_data, team_size, ompt_parallel_invoker_program,
1228           codeptr);
1229     }
1230   }
1231 #endif // OMPT_SUPPORT
1232 
1233   if (this_thr->th.th_team != serial_team) {
1234     // Nested level will be an index in the nested nthreads array
1235     int level = this_thr->th.th_team->t.t_level;
1236 
1237     if (serial_team->t.t_serialized) {
1238       /* this serial team was already used
1239          TODO increase performance by making this locks more specific */
1240       kmp_team_t *new_team;
1241 
1242       __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1243 
1244       new_team = __kmp_allocate_team(this_thr->th.th_root, 1, 1,
1245 #if OMPT_SUPPORT
1246                                      ompt_parallel_data,
1247 #endif
1248 #if OMP_40_ENABLED
1249                                      proc_bind,
1250 #endif
1251                                      &this_thr->th.th_current_task->td_icvs,
1252                                      0 USE_NESTED_HOT_ARG(NULL));
1253       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1254       KMP_ASSERT(new_team);
1255 
1256       /* setup new serialized team and install it */
1257       new_team->t.t_threads[0] = this_thr;
1258       new_team->t.t_parent = this_thr->th.th_team;
1259       serial_team = new_team;
1260       this_thr->th.th_serial_team = serial_team;
1261 
1262       KF_TRACE(
1263           10,
1264           ("__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1265            global_tid, serial_team));
1266 
1267       /* TODO the above breaks the requirement that if we run out of resources,
1268          then we can still guarantee that serialized teams are ok, since we may
1269          need to allocate a new one */
1270     } else {
1271       KF_TRACE(
1272           10,
1273           ("__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1274            global_tid, serial_team));
1275     }
1276 
1277     /* we have to initialize this serial team */
1278     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1279     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1280     KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team);
1281     serial_team->t.t_ident = loc;
1282     serial_team->t.t_serialized = 1;
1283     serial_team->t.t_nproc = 1;
1284     serial_team->t.t_parent = this_thr->th.th_team;
1285     serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
1286     this_thr->th.th_team = serial_team;
1287     serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
1288 
1289     KF_TRACE(10, ("__kmpc_serialized_parallel: T#d curtask=%p\n", global_tid,
1290                   this_thr->th.th_current_task));
1291     KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
1292     this_thr->th.th_current_task->td_flags.executing = 0;
1293 
1294     __kmp_push_current_task_to_thread(this_thr, serial_team, 0);
1295 
1296     /* TODO: GEH: do ICVs work for nested serialized teams? Don't we need an
1297        implicit task for each serialized task represented by
1298        team->t.t_serialized? */
1299     copy_icvs(&this_thr->th.th_current_task->td_icvs,
1300               &this_thr->th.th_current_task->td_parent->td_icvs);
1301 
1302     // Thread value exists in the nested nthreads array for the next nested
1303     // level
1304     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1305       this_thr->th.th_current_task->td_icvs.nproc =
1306           __kmp_nested_nth.nth[level + 1];
1307     }
1308 
1309 #if OMP_40_ENABLED
1310     if (__kmp_nested_proc_bind.used &&
1311         (level + 1 < __kmp_nested_proc_bind.used)) {
1312       this_thr->th.th_current_task->td_icvs.proc_bind =
1313           __kmp_nested_proc_bind.bind_types[level + 1];
1314     }
1315 #endif /* OMP_40_ENABLED */
1316 
1317 #if USE_DEBUGGER
1318     serial_team->t.t_pkfn = (microtask_t)(~0); // For the debugger.
1319 #endif
1320     this_thr->th.th_info.ds.ds_tid = 0;
1321 
1322     /* set thread cache values */
1323     this_thr->th.th_team_nproc = 1;
1324     this_thr->th.th_team_master = this_thr;
1325     this_thr->th.th_team_serialized = 1;
1326 
1327     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
1328     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
1329 #if OMP_50_ENABLED
1330     serial_team->t.t_def_allocator = this_thr->th.th_def_allocator; // save
1331 #endif
1332 
1333     propagateFPControl(serial_team);
1334 
1335     /* check if we need to allocate dispatch buffers stack */
1336     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1337     if (!serial_team->t.t_dispatch->th_disp_buffer) {
1338       serial_team->t.t_dispatch->th_disp_buffer =
1339           (dispatch_private_info_t *)__kmp_allocate(
1340               sizeof(dispatch_private_info_t));
1341     }
1342     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1343 
1344     KMP_MB();
1345 
1346   } else {
1347     /* this serialized team is already being used,
1348      * that's fine, just add another nested level */
1349     KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
1350     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1351     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1352     ++serial_team->t.t_serialized;
1353     this_thr->th.th_team_serialized = serial_team->t.t_serialized;
1354 
1355     // Nested level will be an index in the nested nthreads array
1356     int level = this_thr->th.th_team->t.t_level;
1357     // Thread value exists in the nested nthreads array for the next nested
1358     // level
1359     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1360       this_thr->th.th_current_task->td_icvs.nproc =
1361           __kmp_nested_nth.nth[level + 1];
1362     }
1363     serial_team->t.t_level++;
1364     KF_TRACE(10, ("__kmpc_serialized_parallel: T#%d increasing nesting level "
1365                   "of serial team %p to %d\n",
1366                   global_tid, serial_team, serial_team->t.t_level));
1367 
1368     /* allocate/push dispatch buffers stack */
1369     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1370     {
1371       dispatch_private_info_t *disp_buffer =
1372           (dispatch_private_info_t *)__kmp_allocate(
1373               sizeof(dispatch_private_info_t));
1374       disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
1375       serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
1376     }
1377     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1378 
1379     KMP_MB();
1380   }
1381 #if OMP_40_ENABLED
1382   KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
1383 #endif
1384 
1385   if (__kmp_env_consistency_check)
1386     __kmp_push_parallel(global_tid, NULL);
1387 #if OMPT_SUPPORT
1388   serial_team->t.ompt_team_info.master_return_address = codeptr;
1389   if (ompt_enabled.enabled &&
1390       this_thr->th.ompt_thread_info.state != omp_state_overhead) {
1391     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame = OMPT_GET_FRAME_ADDRESS(1);
1392 
1393     ompt_lw_taskteam_t lw_taskteam;
1394     __ompt_lw_taskteam_init(&lw_taskteam, this_thr, global_tid,
1395                             &ompt_parallel_data, codeptr);
1396 
1397     __ompt_lw_taskteam_link(&lw_taskteam, this_thr, 1);
1398     // don't use lw_taskteam after linking. content was swaped
1399 
1400     /* OMPT implicit task begin */
1401     implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
1402     if (ompt_enabled.ompt_callback_implicit_task) {
1403       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1404           ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
1405           OMPT_CUR_TASK_DATA(this_thr), 1, __kmp_tid_from_gtid(global_tid));
1406       OMPT_CUR_TASK_INFO(this_thr)
1407           ->thread_num = __kmp_tid_from_gtid(global_tid);
1408     }
1409 
1410     /* OMPT state */
1411     this_thr->th.ompt_thread_info.state = omp_state_work_parallel;
1412     OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame = OMPT_GET_FRAME_ADDRESS(1);
1413   }
1414 #endif
1415 }
1416 
1417 /* most of the work for a fork */
1418 /* return true if we really went parallel, false if serialized */
1419 int __kmp_fork_call(ident_t *loc, int gtid,
1420                     enum fork_context_e call_context, // Intel, GNU, ...
1421                     kmp_int32 argc, microtask_t microtask, launch_t invoker,
1422 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1423 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1424                     va_list *ap
1425 #else
1426                     va_list ap
1427 #endif
1428                     ) {
1429   void **argv;
1430   int i;
1431   int master_tid;
1432   int master_this_cons;
1433   kmp_team_t *team;
1434   kmp_team_t *parent_team;
1435   kmp_info_t *master_th;
1436   kmp_root_t *root;
1437   int nthreads;
1438   int master_active;
1439   int master_set_numthreads;
1440   int level;
1441 #if OMP_40_ENABLED
1442   int active_level;
1443   int teams_level;
1444 #endif
1445 #if KMP_NESTED_HOT_TEAMS
1446   kmp_hot_team_ptr_t **p_hot_teams;
1447 #endif
1448   { // KMP_TIME_BLOCK
1449     KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
1450     KMP_COUNT_VALUE(OMP_PARALLEL_args, argc);
1451 
1452     KA_TRACE(20, ("__kmp_fork_call: enter T#%d\n", gtid));
1453     if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) {
1454       /* Some systems prefer the stack for the root thread(s) to start with */
1455       /* some gap from the parent stack to prevent false sharing. */
1456       void *dummy = KMP_ALLOCA(__kmp_stkpadding);
1457       /* These 2 lines below are so this does not get optimized out */
1458       if (__kmp_stkpadding > KMP_MAX_STKPADDING)
1459         __kmp_stkpadding += (short)((kmp_int64)dummy);
1460     }
1461 
1462     /* initialize if needed */
1463     KMP_DEBUG_ASSERT(
1464         __kmp_init_serial); // AC: potentially unsafe, not in sync with shutdown
1465     if (!TCR_4(__kmp_init_parallel))
1466       __kmp_parallel_initialize();
1467 
1468     /* setup current data */
1469     master_th = __kmp_threads[gtid]; // AC: potentially unsafe, not in sync with
1470     // shutdown
1471     parent_team = master_th->th.th_team;
1472     master_tid = master_th->th.th_info.ds.ds_tid;
1473     master_this_cons = master_th->th.th_local.this_construct;
1474     root = master_th->th.th_root;
1475     master_active = root->r.r_active;
1476     master_set_numthreads = master_th->th.th_set_nproc;
1477 
1478 #if OMPT_SUPPORT
1479     ompt_data_t ompt_parallel_data = ompt_data_none;
1480     ompt_data_t *parent_task_data;
1481     omp_frame_t *ompt_frame;
1482     ompt_data_t *implicit_task_data;
1483     void *return_address = NULL;
1484 
1485     if (ompt_enabled.enabled) {
1486       __ompt_get_task_info_internal(0, NULL, &parent_task_data, &ompt_frame,
1487                                     NULL, NULL);
1488       return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1489     }
1490 #endif
1491 
1492     // Nested level will be an index in the nested nthreads array
1493     level = parent_team->t.t_level;
1494     // used to launch non-serial teams even if nested is not allowed
1495     active_level = parent_team->t.t_active_level;
1496 #if OMP_40_ENABLED
1497     // needed to check nesting inside the teams
1498     teams_level = master_th->th.th_teams_level;
1499 #endif
1500 #if KMP_NESTED_HOT_TEAMS
1501     p_hot_teams = &master_th->th.th_hot_teams;
1502     if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
1503       *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
1504           sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
1505       (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1506       // it is either actual or not needed (when active_level > 0)
1507       (*p_hot_teams)[0].hot_team_nth = 1;
1508     }
1509 #endif
1510 
1511 #if OMPT_SUPPORT
1512     if (ompt_enabled.enabled) {
1513       if (ompt_enabled.ompt_callback_parallel_begin) {
1514         int team_size = master_set_numthreads
1515                             ? master_set_numthreads
1516                             : get__nproc_2(parent_team, master_tid);
1517         ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1518             parent_task_data, ompt_frame, &ompt_parallel_data, team_size,
1519             OMPT_INVOKER(call_context), return_address);
1520       }
1521       master_th->th.ompt_thread_info.state = omp_state_overhead;
1522     }
1523 #endif
1524 
1525     master_th->th.th_ident = loc;
1526 
1527 #if OMP_40_ENABLED
1528     if (master_th->th.th_teams_microtask && ap &&
1529         microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
1530       // AC: This is start of parallel that is nested inside teams construct.
1531       // The team is actual (hot), all workers are ready at the fork barrier.
1532       // No lock needed to initialize the team a bit, then free workers.
1533       parent_team->t.t_ident = loc;
1534       __kmp_alloc_argv_entries(argc, parent_team, TRUE);
1535       parent_team->t.t_argc = argc;
1536       argv = (void **)parent_team->t.t_argv;
1537       for (i = argc - 1; i >= 0; --i)
1538 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1539 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1540         *argv++ = va_arg(*ap, void *);
1541 #else
1542         *argv++ = va_arg(ap, void *);
1543 #endif
1544       // Increment our nested depth levels, but not increase the serialization
1545       if (parent_team == master_th->th.th_serial_team) {
1546         // AC: we are in serialized parallel
1547         __kmpc_serialized_parallel(loc, gtid);
1548         KMP_DEBUG_ASSERT(parent_team->t.t_serialized > 1);
1549         // AC: need this in order enquiry functions work
1550         // correctly, will restore at join time
1551         parent_team->t.t_serialized--;
1552 #if OMPT_SUPPORT
1553         void *dummy;
1554         void **exit_runtime_p;
1555 
1556         ompt_lw_taskteam_t lw_taskteam;
1557 
1558         if (ompt_enabled.enabled) {
1559           __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1560                                   &ompt_parallel_data, return_address);
1561           exit_runtime_p = &(lw_taskteam.ompt_task_info.frame.exit_frame);
1562 
1563           __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1564           // don't use lw_taskteam after linking. content was swaped
1565 
1566           /* OMPT implicit task begin */
1567           implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1568           if (ompt_enabled.ompt_callback_implicit_task) {
1569             ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1570                 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1571                 implicit_task_data, 1, __kmp_tid_from_gtid(gtid));
1572             OMPT_CUR_TASK_INFO(master_th)
1573                 ->thread_num = __kmp_tid_from_gtid(gtid);
1574           }
1575 
1576           /* OMPT state */
1577           master_th->th.ompt_thread_info.state = omp_state_work_parallel;
1578         } else {
1579           exit_runtime_p = &dummy;
1580         }
1581 #endif
1582 
1583         {
1584           KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1585           KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1586           __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
1587 #if OMPT_SUPPORT
1588                                  ,
1589                                  exit_runtime_p
1590 #endif
1591                                  );
1592         }
1593 
1594 #if OMPT_SUPPORT
1595         *exit_runtime_p = NULL;
1596         if (ompt_enabled.enabled) {
1597           OMPT_CUR_TASK_INFO(master_th)->frame.exit_frame = NULL;
1598           if (ompt_enabled.ompt_callback_implicit_task) {
1599             ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1600                 ompt_scope_end, NULL, implicit_task_data, 1,
1601                 OMPT_CUR_TASK_INFO(master_th)->thread_num);
1602           }
1603           __ompt_lw_taskteam_unlink(master_th);
1604 
1605           if (ompt_enabled.ompt_callback_parallel_end) {
1606             ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1607                 OMPT_CUR_TEAM_DATA(master_th), OMPT_CUR_TASK_DATA(master_th),
1608                 OMPT_INVOKER(call_context), return_address);
1609           }
1610           master_th->th.ompt_thread_info.state = omp_state_overhead;
1611         }
1612 #endif
1613         return TRUE;
1614       }
1615 
1616       parent_team->t.t_pkfn = microtask;
1617       parent_team->t.t_invoke = invoker;
1618       KMP_ATOMIC_INC(&root->r.r_in_parallel);
1619       parent_team->t.t_active_level++;
1620       parent_team->t.t_level++;
1621 #if OMP_50_ENABLED
1622       parent_team->t.t_def_allocator = master_th->th.th_def_allocator; // save
1623 #endif
1624 
1625       /* Change number of threads in the team if requested */
1626       if (master_set_numthreads) { // The parallel has num_threads clause
1627         if (master_set_numthreads < master_th->th.th_teams_size.nth) {
1628           // AC: only can reduce number of threads dynamically, can't increase
1629           kmp_info_t **other_threads = parent_team->t.t_threads;
1630           parent_team->t.t_nproc = master_set_numthreads;
1631           for (i = 0; i < master_set_numthreads; ++i) {
1632             other_threads[i]->th.th_team_nproc = master_set_numthreads;
1633           }
1634           // Keep extra threads hot in the team for possible next parallels
1635         }
1636         master_th->th.th_set_nproc = 0;
1637       }
1638 
1639 #if USE_DEBUGGER
1640       if (__kmp_debugging) { // Let debugger override number of threads.
1641         int nth = __kmp_omp_num_threads(loc);
1642         if (nth > 0) { // 0 means debugger doesn't want to change num threads
1643           master_set_numthreads = nth;
1644         }
1645       }
1646 #endif
1647 
1648       KF_TRACE(10, ("__kmp_fork_call: before internal fork: root=%p, team=%p, "
1649                     "master_th=%p, gtid=%d\n",
1650                     root, parent_team, master_th, gtid));
1651       __kmp_internal_fork(loc, gtid, parent_team);
1652       KF_TRACE(10, ("__kmp_fork_call: after internal fork: root=%p, team=%p, "
1653                     "master_th=%p, gtid=%d\n",
1654                     root, parent_team, master_th, gtid));
1655 
1656       /* Invoke microtask for MASTER thread */
1657       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
1658                     parent_team->t.t_id, parent_team->t.t_pkfn));
1659 
1660       if (!parent_team->t.t_invoke(gtid)) {
1661         KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
1662       }
1663       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
1664                     parent_team->t.t_id, parent_team->t.t_pkfn));
1665       KMP_MB(); /* Flush all pending memory write invalidates.  */
1666 
1667       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
1668 
1669       return TRUE;
1670     } // Parallel closely nested in teams construct
1671 #endif /* OMP_40_ENABLED */
1672 
1673 #if KMP_DEBUG
1674     if (__kmp_tasking_mode != tskm_immediate_exec) {
1675       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
1676                        parent_team->t.t_task_team[master_th->th.th_task_state]);
1677     }
1678 #endif
1679 
1680     if (parent_team->t.t_active_level >=
1681         master_th->th.th_current_task->td_icvs.max_active_levels) {
1682       nthreads = 1;
1683     } else {
1684 #if OMP_40_ENABLED
1685       int enter_teams = ((ap == NULL && active_level == 0) ||
1686                          (ap && teams_level > 0 && teams_level == level));
1687 #endif
1688       nthreads =
1689           master_set_numthreads
1690               ? master_set_numthreads
1691               : get__nproc_2(
1692                     parent_team,
1693                     master_tid); // TODO: get nproc directly from current task
1694 
1695       // Check if we need to take forkjoin lock? (no need for serialized
1696       // parallel out of teams construct). This code moved here from
1697       // __kmp_reserve_threads() to speedup nested serialized parallels.
1698       if (nthreads > 1) {
1699         if ((!get__nested(master_th) && (root->r.r_in_parallel
1700 #if OMP_40_ENABLED
1701                                          && !enter_teams
1702 #endif /* OMP_40_ENABLED */
1703                                          )) ||
1704             (__kmp_library == library_serial)) {
1705           KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
1706                         " threads\n",
1707                         gtid, nthreads));
1708           nthreads = 1;
1709         }
1710       }
1711       if (nthreads > 1) {
1712         /* determine how many new threads we can use */
1713         __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1714         nthreads = __kmp_reserve_threads(
1715             root, parent_team, master_tid, nthreads
1716 #if OMP_40_ENABLED
1717             /* AC: If we execute teams from parallel region (on host), then
1718                teams should be created but each can only have 1 thread if
1719                nesting is disabled. If teams called from serial region, then
1720                teams and their threads should be created regardless of the
1721                nesting setting. */
1722             ,
1723             enter_teams
1724 #endif /* OMP_40_ENABLED */
1725             );
1726         if (nthreads == 1) {
1727           // Free lock for single thread execution here; for multi-thread
1728           // execution it will be freed later after team of threads created
1729           // and initialized
1730           __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1731         }
1732       }
1733     }
1734     KMP_DEBUG_ASSERT(nthreads > 0);
1735 
1736     // If we temporarily changed the set number of threads then restore it now
1737     master_th->th.th_set_nproc = 0;
1738 
1739     /* create a serialized parallel region? */
1740     if (nthreads == 1) {
1741 /* josh todo: hypothetical question: what do we do for OS X*? */
1742 #if KMP_OS_LINUX &&                                                            \
1743     (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
1744       void *args[argc];
1745 #else
1746       void **args = (void **)KMP_ALLOCA(argc * sizeof(void *));
1747 #endif /* KMP_OS_LINUX && ( KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || \
1748           KMP_ARCH_AARCH64) */
1749 
1750       KA_TRACE(20,
1751                ("__kmp_fork_call: T#%d serializing parallel region\n", gtid));
1752 
1753       __kmpc_serialized_parallel(loc, gtid);
1754 
1755       if (call_context == fork_context_intel) {
1756         /* TODO this sucks, use the compiler itself to pass args! :) */
1757         master_th->th.th_serial_team->t.t_ident = loc;
1758 #if OMP_40_ENABLED
1759         if (!ap) {
1760           // revert change made in __kmpc_serialized_parallel()
1761           master_th->th.th_serial_team->t.t_level--;
1762 // Get args from parent team for teams construct
1763 
1764 #if OMPT_SUPPORT
1765           void *dummy;
1766           void **exit_runtime_p;
1767           ompt_task_info_t *task_info;
1768 
1769           ompt_lw_taskteam_t lw_taskteam;
1770 
1771           if (ompt_enabled.enabled) {
1772             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1773                                     &ompt_parallel_data, return_address);
1774 
1775             __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1776             // don't use lw_taskteam after linking. content was swaped
1777 
1778             task_info = OMPT_CUR_TASK_INFO(master_th);
1779             exit_runtime_p = &(task_info->frame.exit_frame);
1780             if (ompt_enabled.ompt_callback_implicit_task) {
1781               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1782                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1783                   &(task_info->task_data), 1, __kmp_tid_from_gtid(gtid));
1784               OMPT_CUR_TASK_INFO(master_th)
1785                   ->thread_num = __kmp_tid_from_gtid(gtid);
1786             }
1787 
1788             /* OMPT state */
1789             master_th->th.ompt_thread_info.state = omp_state_work_parallel;
1790           } else {
1791             exit_runtime_p = &dummy;
1792           }
1793 #endif
1794 
1795           {
1796             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1797             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1798             __kmp_invoke_microtask(microtask, gtid, 0, argc,
1799                                    parent_team->t.t_argv
1800 #if OMPT_SUPPORT
1801                                    ,
1802                                    exit_runtime_p
1803 #endif
1804                                    );
1805           }
1806 
1807 #if OMPT_SUPPORT
1808           if (ompt_enabled.enabled) {
1809             exit_runtime_p = NULL;
1810             if (ompt_enabled.ompt_callback_implicit_task) {
1811               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1812                   ompt_scope_end, NULL, &(task_info->task_data), 1,
1813                   OMPT_CUR_TASK_INFO(master_th)->thread_num);
1814             }
1815 
1816             __ompt_lw_taskteam_unlink(master_th);
1817             if (ompt_enabled.ompt_callback_parallel_end) {
1818               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1819                   OMPT_CUR_TEAM_DATA(master_th), parent_task_data,
1820                   OMPT_INVOKER(call_context), return_address);
1821             }
1822             master_th->th.ompt_thread_info.state = omp_state_overhead;
1823           }
1824 #endif
1825         } else if (microtask == (microtask_t)__kmp_teams_master) {
1826           KMP_DEBUG_ASSERT(master_th->th.th_team ==
1827                            master_th->th.th_serial_team);
1828           team = master_th->th.th_team;
1829           // team->t.t_pkfn = microtask;
1830           team->t.t_invoke = invoker;
1831           __kmp_alloc_argv_entries(argc, team, TRUE);
1832           team->t.t_argc = argc;
1833           argv = (void **)team->t.t_argv;
1834           if (ap) {
1835             for (i = argc - 1; i >= 0; --i)
1836 // TODO: revert workaround for Intel(R) 64 tracker #96
1837 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1838               *argv++ = va_arg(*ap, void *);
1839 #else
1840               *argv++ = va_arg(ap, void *);
1841 #endif
1842           } else {
1843             for (i = 0; i < argc; ++i)
1844               // Get args from parent team for teams construct
1845               argv[i] = parent_team->t.t_argv[i];
1846           }
1847           // AC: revert change made in __kmpc_serialized_parallel()
1848           //     because initial code in teams should have level=0
1849           team->t.t_level--;
1850           // AC: call special invoker for outer "parallel" of teams construct
1851           invoker(gtid);
1852         } else {
1853 #endif /* OMP_40_ENABLED */
1854           argv = args;
1855           for (i = argc - 1; i >= 0; --i)
1856 // TODO: revert workaround for Intel(R) 64 tracker #96
1857 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1858             *argv++ = va_arg(*ap, void *);
1859 #else
1860           *argv++ = va_arg(ap, void *);
1861 #endif
1862           KMP_MB();
1863 
1864 #if OMPT_SUPPORT
1865           void *dummy;
1866           void **exit_runtime_p;
1867           ompt_task_info_t *task_info;
1868 
1869           ompt_lw_taskteam_t lw_taskteam;
1870 
1871           if (ompt_enabled.enabled) {
1872             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1873                                     &ompt_parallel_data, return_address);
1874             __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1875             // don't use lw_taskteam after linking. content was swaped
1876             task_info = OMPT_CUR_TASK_INFO(master_th);
1877             exit_runtime_p = &(task_info->frame.exit_frame);
1878 
1879             /* OMPT implicit task begin */
1880             implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1881             if (ompt_enabled.ompt_callback_implicit_task) {
1882               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1883                   ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1884                   implicit_task_data, 1, __kmp_tid_from_gtid(gtid));
1885               OMPT_CUR_TASK_INFO(master_th)
1886                   ->thread_num = __kmp_tid_from_gtid(gtid);
1887             }
1888 
1889             /* OMPT state */
1890             master_th->th.ompt_thread_info.state = omp_state_work_parallel;
1891           } else {
1892             exit_runtime_p = &dummy;
1893           }
1894 #endif
1895 
1896           {
1897             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1898             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1899             __kmp_invoke_microtask(microtask, gtid, 0, argc, args
1900 #if OMPT_SUPPORT
1901                                    ,
1902                                    exit_runtime_p
1903 #endif
1904                                    );
1905           }
1906 
1907 #if OMPT_SUPPORT
1908           if (ompt_enabled.enabled) {
1909             *exit_runtime_p = NULL;
1910             if (ompt_enabled.ompt_callback_implicit_task) {
1911               ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1912                   ompt_scope_end, NULL, &(task_info->task_data), 1,
1913                   OMPT_CUR_TASK_INFO(master_th)->thread_num);
1914             }
1915 
1916             ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1917             __ompt_lw_taskteam_unlink(master_th);
1918             if (ompt_enabled.ompt_callback_parallel_end) {
1919               ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1920                   &ompt_parallel_data, parent_task_data,
1921                   OMPT_INVOKER(call_context), return_address);
1922             }
1923             master_th->th.ompt_thread_info.state = omp_state_overhead;
1924           }
1925 #endif
1926 #if OMP_40_ENABLED
1927         }
1928 #endif /* OMP_40_ENABLED */
1929       } else if (call_context == fork_context_gnu) {
1930 #if OMPT_SUPPORT
1931         ompt_lw_taskteam_t lwt;
1932         __ompt_lw_taskteam_init(&lwt, master_th, gtid, &ompt_parallel_data,
1933                                 return_address);
1934 
1935         lwt.ompt_task_info.frame.exit_frame = NULL;
1936         __ompt_lw_taskteam_link(&lwt, master_th, 1);
1937 // don't use lw_taskteam after linking. content was swaped
1938 #endif
1939 
1940         // we were called from GNU native code
1941         KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1942         return FALSE;
1943       } else {
1944         KMP_ASSERT2(call_context < fork_context_last,
1945                     "__kmp_fork_call: unknown fork_context parameter");
1946       }
1947 
1948       KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1949       KMP_MB();
1950       return FALSE;
1951     }
1952 
1953     // GEH: only modify the executing flag in the case when not serialized
1954     //      serialized case is handled in kmpc_serialized_parallel
1955     KF_TRACE(10, ("__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, "
1956                   "curtask=%p, curtask_max_aclevel=%d\n",
1957                   parent_team->t.t_active_level, master_th,
1958                   master_th->th.th_current_task,
1959                   master_th->th.th_current_task->td_icvs.max_active_levels));
1960     // TODO: GEH - cannot do this assertion because root thread not set up as
1961     // executing
1962     // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 1 );
1963     master_th->th.th_current_task->td_flags.executing = 0;
1964 
1965 #if OMP_40_ENABLED
1966     if (!master_th->th.th_teams_microtask || level > teams_level)
1967 #endif /* OMP_40_ENABLED */
1968     {
1969       /* Increment our nested depth level */
1970       KMP_ATOMIC_INC(&root->r.r_in_parallel);
1971     }
1972 
1973     // See if we need to make a copy of the ICVs.
1974     int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
1975     if ((level + 1 < __kmp_nested_nth.used) &&
1976         (__kmp_nested_nth.nth[level + 1] != nthreads_icv)) {
1977       nthreads_icv = __kmp_nested_nth.nth[level + 1];
1978     } else {
1979       nthreads_icv = 0; // don't update
1980     }
1981 
1982 #if OMP_40_ENABLED
1983     // Figure out the proc_bind_policy for the new team.
1984     kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
1985     kmp_proc_bind_t proc_bind_icv =
1986         proc_bind_default; // proc_bind_default means don't update
1987     if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1988       proc_bind = proc_bind_false;
1989     } else {
1990       if (proc_bind == proc_bind_default) {
1991         // No proc_bind clause specified; use current proc-bind-var for this
1992         // parallel region
1993         proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
1994       }
1995       /* else: The proc_bind policy was specified explicitly on parallel clause.
1996          This overrides proc-bind-var for this parallel region, but does not
1997          change proc-bind-var. */
1998       // Figure the value of proc-bind-var for the child threads.
1999       if ((level + 1 < __kmp_nested_proc_bind.used) &&
2000           (__kmp_nested_proc_bind.bind_types[level + 1] !=
2001            master_th->th.th_current_task->td_icvs.proc_bind)) {
2002         proc_bind_icv = __kmp_nested_proc_bind.bind_types[level + 1];
2003       }
2004     }
2005 
2006     // Reset for next parallel region
2007     master_th->th.th_set_proc_bind = proc_bind_default;
2008 #endif /* OMP_40_ENABLED */
2009 
2010     if ((nthreads_icv > 0)
2011 #if OMP_40_ENABLED
2012         || (proc_bind_icv != proc_bind_default)
2013 #endif /* OMP_40_ENABLED */
2014             ) {
2015       kmp_internal_control_t new_icvs;
2016       copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
2017       new_icvs.next = NULL;
2018       if (nthreads_icv > 0) {
2019         new_icvs.nproc = nthreads_icv;
2020       }
2021 
2022 #if OMP_40_ENABLED
2023       if (proc_bind_icv != proc_bind_default) {
2024         new_icvs.proc_bind = proc_bind_icv;
2025       }
2026 #endif /* OMP_40_ENABLED */
2027 
2028       /* allocate a new parallel team */
2029       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2030       team = __kmp_allocate_team(root, nthreads, nthreads,
2031 #if OMPT_SUPPORT
2032                                  ompt_parallel_data,
2033 #endif
2034 #if OMP_40_ENABLED
2035                                  proc_bind,
2036 #endif
2037                                  &new_icvs, argc USE_NESTED_HOT_ARG(master_th));
2038     } else {
2039       /* allocate a new parallel team */
2040       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2041       team = __kmp_allocate_team(root, nthreads, nthreads,
2042 #if OMPT_SUPPORT
2043                                  ompt_parallel_data,
2044 #endif
2045 #if OMP_40_ENABLED
2046                                  proc_bind,
2047 #endif
2048                                  &master_th->th.th_current_task->td_icvs,
2049                                  argc USE_NESTED_HOT_ARG(master_th));
2050     }
2051     KF_TRACE(
2052         10, ("__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2053 
2054     /* setup the new team */
2055     KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2056     KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2057     KMP_CHECK_UPDATE(team->t.t_ident, loc);
2058     KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2059     KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2060 #if OMPT_SUPPORT
2061     KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2062                           return_address);
2063 #endif
2064     KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2065 // TODO: parent_team->t.t_level == INT_MAX ???
2066 #if OMP_40_ENABLED
2067     if (!master_th->th.th_teams_microtask || level > teams_level) {
2068 #endif /* OMP_40_ENABLED */
2069       int new_level = parent_team->t.t_level + 1;
2070       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2071       new_level = parent_team->t.t_active_level + 1;
2072       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2073 #if OMP_40_ENABLED
2074     } else {
2075       // AC: Do not increase parallel level at start of the teams construct
2076       int new_level = parent_team->t.t_level;
2077       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2078       new_level = parent_team->t.t_active_level;
2079       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2080     }
2081 #endif /* OMP_40_ENABLED */
2082     kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
2083     // set master's schedule as new run-time schedule
2084     KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2085 
2086 #if OMP_40_ENABLED
2087     KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2088 #endif
2089 #if OMP_50_ENABLED
2090     KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2091 #endif
2092 
2093     // Update the floating point rounding in the team if required.
2094     propagateFPControl(team);
2095 
2096     if (__kmp_tasking_mode != tskm_immediate_exec) {
2097       // Set master's task team to team's task team. Unless this is hot team, it
2098       // should be NULL.
2099       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2100                        parent_team->t.t_task_team[master_th->th.th_task_state]);
2101       KA_TRACE(20, ("__kmp_fork_call: Master T#%d pushing task_team %p / team "
2102                     "%p, new task_team %p / team %p\n",
2103                     __kmp_gtid_from_thread(master_th),
2104                     master_th->th.th_task_team, parent_team,
2105                     team->t.t_task_team[master_th->th.th_task_state], team));
2106 
2107       if (active_level || master_th->th.th_task_team) {
2108         // Take a memo of master's task_state
2109         KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2110         if (master_th->th.th_task_state_top >=
2111             master_th->th.th_task_state_stack_sz) { // increase size
2112           kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
2113           kmp_uint8 *old_stack, *new_stack;
2114           kmp_uint32 i;
2115           new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
2116           for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
2117             new_stack[i] = master_th->th.th_task_state_memo_stack[i];
2118           }
2119           for (i = master_th->th.th_task_state_stack_sz; i < new_size;
2120                ++i) { // zero-init rest of stack
2121             new_stack[i] = 0;
2122           }
2123           old_stack = master_th->th.th_task_state_memo_stack;
2124           master_th->th.th_task_state_memo_stack = new_stack;
2125           master_th->th.th_task_state_stack_sz = new_size;
2126           __kmp_free(old_stack);
2127         }
2128         // Store master's task_state on stack
2129         master_th->th
2130             .th_task_state_memo_stack[master_th->th.th_task_state_top] =
2131             master_th->th.th_task_state;
2132         master_th->th.th_task_state_top++;
2133 #if KMP_NESTED_HOT_TEAMS
2134         if (master_th->th.th_hot_teams &&
2135             team == master_th->th.th_hot_teams[active_level].hot_team) {
2136           // Restore master's nested state if nested hot team
2137           master_th->th.th_task_state =
2138               master_th->th
2139                   .th_task_state_memo_stack[master_th->th.th_task_state_top];
2140         } else {
2141 #endif
2142           master_th->th.th_task_state = 0;
2143 #if KMP_NESTED_HOT_TEAMS
2144         }
2145 #endif
2146       }
2147 #if !KMP_NESTED_HOT_TEAMS
2148       KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) ||
2149                        (team == root->r.r_hot_team));
2150 #endif
2151     }
2152 
2153     KA_TRACE(
2154         20,
2155         ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2156          gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2157          team->t.t_nproc));
2158     KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2159                      (team->t.t_master_tid == 0 &&
2160                       (team->t.t_parent == root->r.r_root_team ||
2161                        team->t.t_parent->t.t_serialized)));
2162     KMP_MB();
2163 
2164     /* now, setup the arguments */
2165     argv = (void **)team->t.t_argv;
2166 #if OMP_40_ENABLED
2167     if (ap) {
2168 #endif /* OMP_40_ENABLED */
2169       for (i = argc - 1; i >= 0; --i) {
2170 // TODO: revert workaround for Intel(R) 64 tracker #96
2171 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
2172         void *new_argv = va_arg(*ap, void *);
2173 #else
2174       void *new_argv = va_arg(ap, void *);
2175 #endif
2176         KMP_CHECK_UPDATE(*argv, new_argv);
2177         argv++;
2178       }
2179 #if OMP_40_ENABLED
2180     } else {
2181       for (i = 0; i < argc; ++i) {
2182         // Get args from parent team for teams construct
2183         KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2184       }
2185     }
2186 #endif /* OMP_40_ENABLED */
2187 
2188     /* now actually fork the threads */
2189     KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2190     if (!root->r.r_active) // Only do assignment if it prevents cache ping-pong
2191       root->r.r_active = TRUE;
2192 
2193     __kmp_fork_team_threads(root, team, master_th, gtid);
2194     __kmp_setup_icv_copy(team, nthreads,
2195                          &master_th->th.th_current_task->td_icvs, loc);
2196 
2197 #if OMPT_SUPPORT
2198     master_th->th.ompt_thread_info.state = omp_state_work_parallel;
2199 #endif
2200 
2201     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2202 
2203 #if USE_ITT_BUILD
2204     if (team->t.t_active_level == 1 // only report frames at level 1
2205 #if OMP_40_ENABLED
2206         && !master_th->th.th_teams_microtask // not in teams construct
2207 #endif /* OMP_40_ENABLED */
2208         ) {
2209 #if USE_ITT_NOTIFY
2210       if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2211           (__kmp_forkjoin_frames_mode == 3 ||
2212            __kmp_forkjoin_frames_mode == 1)) {
2213         kmp_uint64 tmp_time = 0;
2214         if (__itt_get_timestamp_ptr)
2215           tmp_time = __itt_get_timestamp();
2216         // Internal fork - report frame begin
2217         master_th->th.th_frame_time = tmp_time;
2218         if (__kmp_forkjoin_frames_mode == 3)
2219           team->t.t_region_time = tmp_time;
2220       } else
2221 // only one notification scheme (either "submit" or "forking/joined", not both)
2222 #endif /* USE_ITT_NOTIFY */
2223           if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
2224               __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
2225         // Mark start of "parallel" region for Intel(R) VTune(TM) analyzer.
2226         __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2227       }
2228     }
2229 #endif /* USE_ITT_BUILD */
2230 
2231     /* now go on and do the work */
2232     KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2233     KMP_MB();
2234     KF_TRACE(10,
2235              ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2236               root, team, master_th, gtid));
2237 
2238 #if USE_ITT_BUILD
2239     if (__itt_stack_caller_create_ptr) {
2240       team->t.t_stack_id =
2241           __kmp_itt_stack_caller_create(); // create new stack stitching id
2242       // before entering fork barrier
2243     }
2244 #endif /* USE_ITT_BUILD */
2245 
2246 #if OMP_40_ENABLED
2247     // AC: skip __kmp_internal_fork at teams construct, let only master
2248     // threads execute
2249     if (ap)
2250 #endif /* OMP_40_ENABLED */
2251     {
2252       __kmp_internal_fork(loc, gtid, team);
2253       KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2254                     "master_th=%p, gtid=%d\n",
2255                     root, team, master_th, gtid));
2256     }
2257 
2258     if (call_context == fork_context_gnu) {
2259       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2260       return TRUE;
2261     }
2262 
2263     /* Invoke microtask for MASTER thread */
2264     KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
2265                   team->t.t_id, team->t.t_pkfn));
2266   } // END of timer KMP_fork_call block
2267 
2268   if (!team->t.t_invoke(gtid)) {
2269     KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
2270   }
2271   KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
2272                 team->t.t_id, team->t.t_pkfn));
2273   KMP_MB(); /* Flush all pending memory write invalidates.  */
2274 
2275   KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2276 
2277 #if OMPT_SUPPORT
2278   if (ompt_enabled.enabled) {
2279     master_th->th.ompt_thread_info.state = omp_state_overhead;
2280   }
2281 #endif
2282 
2283   return TRUE;
2284 }
2285 
2286 #if OMPT_SUPPORT
2287 static inline void __kmp_join_restore_state(kmp_info_t *thread,
2288                                             kmp_team_t *team) {
2289   // restore state outside the region
2290   thread->th.ompt_thread_info.state =
2291       ((team->t.t_serialized) ? omp_state_work_serial
2292                               : omp_state_work_parallel);
2293 }
2294 
2295 static inline void __kmp_join_ompt(int gtid, kmp_info_t *thread,
2296                                    kmp_team_t *team, ompt_data_t *parallel_data,
2297                                    fork_context_e fork_context, void *codeptr) {
2298   ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2299   if (ompt_enabled.ompt_callback_parallel_end) {
2300     ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
2301         parallel_data, &(task_info->task_data), OMPT_INVOKER(fork_context),
2302         codeptr);
2303   }
2304 
2305   task_info->frame.enter_frame = NULL;
2306   __kmp_join_restore_state(thread, team);
2307 }
2308 #endif
2309 
2310 void __kmp_join_call(ident_t *loc, int gtid
2311 #if OMPT_SUPPORT
2312                      ,
2313                      enum fork_context_e fork_context
2314 #endif
2315 #if OMP_40_ENABLED
2316                      ,
2317                      int exit_teams
2318 #endif /* OMP_40_ENABLED */
2319                      ) {
2320   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
2321   kmp_team_t *team;
2322   kmp_team_t *parent_team;
2323   kmp_info_t *master_th;
2324   kmp_root_t *root;
2325   int master_active;
2326   int i;
2327 
2328   KA_TRACE(20, ("__kmp_join_call: enter T#%d\n", gtid));
2329 
2330   /* setup current data */
2331   master_th = __kmp_threads[gtid];
2332   root = master_th->th.th_root;
2333   team = master_th->th.th_team;
2334   parent_team = team->t.t_parent;
2335 
2336   master_th->th.th_ident = loc;
2337 
2338 #if OMPT_SUPPORT
2339   if (ompt_enabled.enabled) {
2340     master_th->th.ompt_thread_info.state = omp_state_overhead;
2341   }
2342 #endif
2343 
2344 #if KMP_DEBUG
2345   if (__kmp_tasking_mode != tskm_immediate_exec && !exit_teams) {
2346     KA_TRACE(20, ("__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2347                   "th_task_team = %p\n",
2348                   __kmp_gtid_from_thread(master_th), team,
2349                   team->t.t_task_team[master_th->th.th_task_state],
2350                   master_th->th.th_task_team));
2351     KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2352                      team->t.t_task_team[master_th->th.th_task_state]);
2353   }
2354 #endif
2355 
2356   if (team->t.t_serialized) {
2357 #if OMP_40_ENABLED
2358     if (master_th->th.th_teams_microtask) {
2359       // We are in teams construct
2360       int level = team->t.t_level;
2361       int tlevel = master_th->th.th_teams_level;
2362       if (level == tlevel) {
2363         // AC: we haven't incremented it earlier at start of teams construct,
2364         //     so do it here - at the end of teams construct
2365         team->t.t_level++;
2366       } else if (level == tlevel + 1) {
2367         // AC: we are exiting parallel inside teams, need to increment
2368         // serialization in order to restore it in the next call to
2369         // __kmpc_end_serialized_parallel
2370         team->t.t_serialized++;
2371       }
2372     }
2373 #endif /* OMP_40_ENABLED */
2374     __kmpc_end_serialized_parallel(loc, gtid);
2375 
2376 #if OMPT_SUPPORT
2377     if (ompt_enabled.enabled) {
2378       __kmp_join_restore_state(master_th, parent_team);
2379     }
2380 #endif
2381 
2382     return;
2383   }
2384 
2385   master_active = team->t.t_master_active;
2386 
2387 #if OMP_40_ENABLED
2388   if (!exit_teams)
2389 #endif /* OMP_40_ENABLED */
2390   {
2391     // AC: No barrier for internal teams at exit from teams construct.
2392     //     But there is barrier for external team (league).
2393     __kmp_internal_join(loc, gtid, team);
2394   }
2395 #if OMP_40_ENABLED
2396   else {
2397     master_th->th.th_task_state =
2398         0; // AC: no tasking in teams (out of any parallel)
2399   }
2400 #endif /* OMP_40_ENABLED */
2401 
2402   KMP_MB();
2403 
2404 #if OMPT_SUPPORT
2405   ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2406   void *codeptr = team->t.ompt_team_info.master_return_address;
2407 #endif
2408 
2409 #if USE_ITT_BUILD
2410   if (__itt_stack_caller_create_ptr) {
2411     __kmp_itt_stack_caller_destroy(
2412         (__itt_caller)team->t
2413             .t_stack_id); // destroy the stack stitching id after join barrier
2414   }
2415 
2416   // Mark end of "parallel" region for Intel(R) VTune(TM) analyzer.
2417   if (team->t.t_active_level == 1
2418 #if OMP_40_ENABLED
2419       && !master_th->th.th_teams_microtask /* not in teams construct */
2420 #endif /* OMP_40_ENABLED */
2421       ) {
2422     master_th->th.th_ident = loc;
2423     // only one notification scheme (either "submit" or "forking/joined", not
2424     // both)
2425     if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2426         __kmp_forkjoin_frames_mode == 3)
2427       __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2428                              master_th->th.th_frame_time, 0, loc,
2429                              master_th->th.th_team_nproc, 1);
2430     else if ((__itt_frame_end_v3_ptr || KMP_ITT_DEBUG) &&
2431              !__kmp_forkjoin_frames_mode && __kmp_forkjoin_frames)
2432       __kmp_itt_region_joined(gtid);
2433   } // active_level == 1
2434 #endif /* USE_ITT_BUILD */
2435 
2436 #if OMP_40_ENABLED
2437   if (master_th->th.th_teams_microtask && !exit_teams &&
2438       team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2439       team->t.t_level == master_th->th.th_teams_level + 1) {
2440     // AC: We need to leave the team structure intact at the end of parallel
2441     // inside the teams construct, so that at the next parallel same (hot) team
2442     // works, only adjust nesting levels
2443 
2444     /* Decrement our nested depth level */
2445     team->t.t_level--;
2446     team->t.t_active_level--;
2447     KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2448 
2449     /* Restore number of threads in the team if needed */
2450     if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
2451       int old_num = master_th->th.th_team_nproc;
2452       int new_num = master_th->th.th_teams_size.nth;
2453       kmp_info_t **other_threads = team->t.t_threads;
2454       team->t.t_nproc = new_num;
2455       for (i = 0; i < old_num; ++i) {
2456         other_threads[i]->th.th_team_nproc = new_num;
2457       }
2458       // Adjust states of non-used threads of the team
2459       for (i = old_num; i < new_num; ++i) {
2460         // Re-initialize thread's barrier data.
2461         int b;
2462         kmp_balign_t *balign = other_threads[i]->th.th_bar;
2463         for (b = 0; b < bs_last_barrier; ++b) {
2464           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2465           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
2466 #if USE_DEBUGGER
2467           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2468 #endif
2469         }
2470         if (__kmp_tasking_mode != tskm_immediate_exec) {
2471           // Synchronize thread's task state
2472           other_threads[i]->th.th_task_state = master_th->th.th_task_state;
2473         }
2474       }
2475     }
2476 
2477 #if OMPT_SUPPORT
2478     if (ompt_enabled.enabled) {
2479       __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, fork_context,
2480                       codeptr);
2481     }
2482 #endif
2483 
2484     return;
2485   }
2486 #endif /* OMP_40_ENABLED */
2487 
2488   /* do cleanup and restore the parent team */
2489   master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2490   master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2491 
2492   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2493 
2494   /* jc: The following lock has instructions with REL and ACQ semantics,
2495      separating the parallel user code called in this parallel region
2496      from the serial user code called after this function returns. */
2497   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2498 
2499 #if OMP_40_ENABLED
2500   if (!master_th->th.th_teams_microtask ||
2501       team->t.t_level > master_th->th.th_teams_level)
2502 #endif /* OMP_40_ENABLED */
2503   {
2504     /* Decrement our nested depth level */
2505     KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2506   }
2507   KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2508 
2509 #if OMPT_SUPPORT
2510   if (ompt_enabled.enabled) {
2511     ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2512     if (ompt_enabled.ompt_callback_implicit_task) {
2513       int ompt_team_size = team->t.t_nproc;
2514       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2515           ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2516           OMPT_CUR_TASK_INFO(master_th)->thread_num);
2517     }
2518 
2519     task_info->frame.exit_frame = NULL;
2520     task_info->task_data = ompt_data_none;
2521   }
2522 #endif
2523 
2524   KF_TRACE(10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2525                 master_th, team));
2526   __kmp_pop_current_task_from_thread(master_th);
2527 
2528 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2529   // Restore master thread's partition.
2530   master_th->th.th_first_place = team->t.t_first_place;
2531   master_th->th.th_last_place = team->t.t_last_place;
2532 #endif /* OMP_40_ENABLED */
2533 #if OMP_50_ENABLED
2534   master_th->th.th_def_allocator = team->t.t_def_allocator;
2535 #endif
2536 
2537   updateHWFPControl(team);
2538 
2539   if (root->r.r_active != master_active)
2540     root->r.r_active = master_active;
2541 
2542   __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2543                             master_th)); // this will free worker threads
2544 
2545   /* this race was fun to find. make sure the following is in the critical
2546      region otherwise assertions may fail occasionally since the old team may be
2547      reallocated and the hierarchy appears inconsistent. it is actually safe to
2548      run and won't cause any bugs, but will cause those assertion failures. it's
2549      only one deref&assign so might as well put this in the critical region */
2550   master_th->th.th_team = parent_team;
2551   master_th->th.th_team_nproc = parent_team->t.t_nproc;
2552   master_th->th.th_team_master = parent_team->t.t_threads[0];
2553   master_th->th.th_team_serialized = parent_team->t.t_serialized;
2554 
2555   /* restore serialized team, if need be */
2556   if (parent_team->t.t_serialized &&
2557       parent_team != master_th->th.th_serial_team &&
2558       parent_team != root->r.r_root_team) {
2559     __kmp_free_team(root,
2560                     master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
2561     master_th->th.th_serial_team = parent_team;
2562   }
2563 
2564   if (__kmp_tasking_mode != tskm_immediate_exec) {
2565     if (master_th->th.th_task_state_top >
2566         0) { // Restore task state from memo stack
2567       KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2568       // Remember master's state if we re-use this nested hot team
2569       master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
2570           master_th->th.th_task_state;
2571       --master_th->th.th_task_state_top; // pop
2572       // Now restore state at this level
2573       master_th->th.th_task_state =
2574           master_th->th
2575               .th_task_state_memo_stack[master_th->th.th_task_state_top];
2576     }
2577     // Copy the task team from the parent team to the master thread
2578     master_th->th.th_task_team =
2579         parent_team->t.t_task_team[master_th->th.th_task_state];
2580     KA_TRACE(20,
2581              ("__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2582               __kmp_gtid_from_thread(master_th), master_th->th.th_task_team,
2583               parent_team));
2584   }
2585 
2586   // TODO: GEH - cannot do this assertion because root thread not set up as
2587   // executing
2588   // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 0 );
2589   master_th->th.th_current_task->td_flags.executing = 1;
2590 
2591   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2592 
2593 #if OMPT_SUPPORT
2594   if (ompt_enabled.enabled) {
2595     __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, fork_context,
2596                     codeptr);
2597   }
2598 #endif
2599 
2600   KMP_MB();
2601   KA_TRACE(20, ("__kmp_join_call: exit T#%d\n", gtid));
2602 }
2603 
2604 /* Check whether we should push an internal control record onto the
2605    serial team stack.  If so, do it.  */
2606 void __kmp_save_internal_controls(kmp_info_t *thread) {
2607 
2608   if (thread->th.th_team != thread->th.th_serial_team) {
2609     return;
2610   }
2611   if (thread->th.th_team->t.t_serialized > 1) {
2612     int push = 0;
2613 
2614     if (thread->th.th_team->t.t_control_stack_top == NULL) {
2615       push = 1;
2616     } else {
2617       if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
2618           thread->th.th_team->t.t_serialized) {
2619         push = 1;
2620       }
2621     }
2622     if (push) { /* push a record on the serial team's stack */
2623       kmp_internal_control_t *control =
2624           (kmp_internal_control_t *)__kmp_allocate(
2625               sizeof(kmp_internal_control_t));
2626 
2627       copy_icvs(control, &thread->th.th_current_task->td_icvs);
2628 
2629       control->serial_nesting_level = thread->th.th_team->t.t_serialized;
2630 
2631       control->next = thread->th.th_team->t.t_control_stack_top;
2632       thread->th.th_team->t.t_control_stack_top = control;
2633     }
2634   }
2635 }
2636 
2637 /* Changes set_nproc */
2638 void __kmp_set_num_threads(int new_nth, int gtid) {
2639   kmp_info_t *thread;
2640   kmp_root_t *root;
2641 
2642   KF_TRACE(10, ("__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth));
2643   KMP_DEBUG_ASSERT(__kmp_init_serial);
2644 
2645   if (new_nth < 1)
2646     new_nth = 1;
2647   else if (new_nth > __kmp_max_nth)
2648     new_nth = __kmp_max_nth;
2649 
2650   KMP_COUNT_VALUE(OMP_set_numthreads, new_nth);
2651   thread = __kmp_threads[gtid];
2652   if (thread->th.th_current_task->td_icvs.nproc == new_nth)
2653     return; // nothing to do
2654 
2655   __kmp_save_internal_controls(thread);
2656 
2657   set__nproc(thread, new_nth);
2658 
2659   // If this omp_set_num_threads() call will cause the hot team size to be
2660   // reduced (in the absence of a num_threads clause), then reduce it now,
2661   // rather than waiting for the next parallel region.
2662   root = thread->th.th_root;
2663   if (__kmp_init_parallel && (!root->r.r_active) &&
2664       (root->r.r_hot_team->t.t_nproc > new_nth)
2665 #if KMP_NESTED_HOT_TEAMS
2666       && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
2667 #endif
2668       ) {
2669     kmp_team_t *hot_team = root->r.r_hot_team;
2670     int f;
2671 
2672     __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2673 
2674     // Release the extra threads we don't need any more.
2675     for (f = new_nth; f < hot_team->t.t_nproc; f++) {
2676       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2677       if (__kmp_tasking_mode != tskm_immediate_exec) {
2678         // When decreasing team size, threads no longer in the team should unref
2679         // task team.
2680         hot_team->t.t_threads[f]->th.th_task_team = NULL;
2681       }
2682       __kmp_free_thread(hot_team->t.t_threads[f]);
2683       hot_team->t.t_threads[f] = NULL;
2684     }
2685     hot_team->t.t_nproc = new_nth;
2686 #if KMP_NESTED_HOT_TEAMS
2687     if (thread->th.th_hot_teams) {
2688       KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
2689       thread->th.th_hot_teams[0].hot_team_nth = new_nth;
2690     }
2691 #endif
2692 
2693     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2694 
2695     // Update the t_nproc field in the threads that are still active.
2696     for (f = 0; f < new_nth; f++) {
2697       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2698       hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
2699     }
2700     // Special flag in case omp_set_num_threads() call
2701     hot_team->t.t_size_changed = -1;
2702   }
2703 }
2704 
2705 /* Changes max_active_levels */
2706 void __kmp_set_max_active_levels(int gtid, int max_active_levels) {
2707   kmp_info_t *thread;
2708 
2709   KF_TRACE(10, ("__kmp_set_max_active_levels: new max_active_levels for thread "
2710                 "%d = (%d)\n",
2711                 gtid, max_active_levels));
2712   KMP_DEBUG_ASSERT(__kmp_init_serial);
2713 
2714   // validate max_active_levels
2715   if (max_active_levels < 0) {
2716     KMP_WARNING(ActiveLevelsNegative, max_active_levels);
2717     // We ignore this call if the user has specified a negative value.
2718     // The current setting won't be changed. The last valid setting will be
2719     // used. A warning will be issued (if warnings are allowed as controlled by
2720     // the KMP_WARNINGS env var).
2721     KF_TRACE(10, ("__kmp_set_max_active_levels: the call is ignored: new "
2722                   "max_active_levels for thread %d = (%d)\n",
2723                   gtid, max_active_levels));
2724     return;
2725   }
2726   if (max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT) {
2727     // it's OK, the max_active_levels is within the valid range: [ 0;
2728     // KMP_MAX_ACTIVE_LEVELS_LIMIT ]
2729     // We allow a zero value. (implementation defined behavior)
2730   } else {
2731     KMP_WARNING(ActiveLevelsExceedLimit, max_active_levels,
2732                 KMP_MAX_ACTIVE_LEVELS_LIMIT);
2733     max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT;
2734     // Current upper limit is MAX_INT. (implementation defined behavior)
2735     // If the input exceeds the upper limit, we correct the input to be the
2736     // upper limit. (implementation defined behavior)
2737     // Actually, the flow should never get here until we use MAX_INT limit.
2738   }
2739   KF_TRACE(10, ("__kmp_set_max_active_levels: after validation: new "
2740                 "max_active_levels for thread %d = (%d)\n",
2741                 gtid, max_active_levels));
2742 
2743   thread = __kmp_threads[gtid];
2744 
2745   __kmp_save_internal_controls(thread);
2746 
2747   set__max_active_levels(thread, max_active_levels);
2748 }
2749 
2750 /* Gets max_active_levels */
2751 int __kmp_get_max_active_levels(int gtid) {
2752   kmp_info_t *thread;
2753 
2754   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d\n", gtid));
2755   KMP_DEBUG_ASSERT(__kmp_init_serial);
2756 
2757   thread = __kmp_threads[gtid];
2758   KMP_DEBUG_ASSERT(thread->th.th_current_task);
2759   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d, curtask=%p, "
2760                 "curtask_maxaclevel=%d\n",
2761                 gtid, thread->th.th_current_task,
2762                 thread->th.th_current_task->td_icvs.max_active_levels));
2763   return thread->th.th_current_task->td_icvs.max_active_levels;
2764 }
2765 
2766 /* Changes def_sched_var ICV values (run-time schedule kind and chunk) */
2767 void __kmp_set_schedule(int gtid, kmp_sched_t kind, int chunk) {
2768   kmp_info_t *thread;
2769   //    kmp_team_t *team;
2770 
2771   KF_TRACE(10, ("__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n",
2772                 gtid, (int)kind, chunk));
2773   KMP_DEBUG_ASSERT(__kmp_init_serial);
2774 
2775   // Check if the kind parameter is valid, correct if needed.
2776   // Valid parameters should fit in one of two intervals - standard or extended:
2777   //       <lower>, <valid>, <upper_std>, <lower_ext>, <valid>, <upper>
2778   // 2008-01-25: 0,  1 - 4,       5,         100,     101 - 102, 103
2779   if (kind <= kmp_sched_lower || kind >= kmp_sched_upper ||
2780       (kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std)) {
2781     // TODO: Hint needs attention in case we change the default schedule.
2782     __kmp_msg(kmp_ms_warning, KMP_MSG(ScheduleKindOutOfRange, kind),
2783               KMP_HNT(DefaultScheduleKindUsed, "static, no chunk"),
2784               __kmp_msg_null);
2785     kind = kmp_sched_default;
2786     chunk = 0; // ignore chunk value in case of bad kind
2787   }
2788 
2789   thread = __kmp_threads[gtid];
2790 
2791   __kmp_save_internal_controls(thread);
2792 
2793   if (kind < kmp_sched_upper_std) {
2794     if (kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK) {
2795       // differ static chunked vs. unchunked:  chunk should be invalid to
2796       // indicate unchunked schedule (which is the default)
2797       thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static;
2798     } else {
2799       thread->th.th_current_task->td_icvs.sched.r_sched_type =
2800           __kmp_sch_map[kind - kmp_sched_lower - 1];
2801     }
2802   } else {
2803     //    __kmp_sch_map[ kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2804     //    kmp_sched_lower - 2 ];
2805     thread->th.th_current_task->td_icvs.sched.r_sched_type =
2806         __kmp_sch_map[kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2807                       kmp_sched_lower - 2];
2808   }
2809   if (kind == kmp_sched_auto || chunk < 1) {
2810     // ignore parameter chunk for schedule auto
2811     thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
2812   } else {
2813     thread->th.th_current_task->td_icvs.sched.chunk = chunk;
2814   }
2815 }
2816 
2817 /* Gets def_sched_var ICV values */
2818 void __kmp_get_schedule(int gtid, kmp_sched_t *kind, int *chunk) {
2819   kmp_info_t *thread;
2820   enum sched_type th_type;
2821 
2822   KF_TRACE(10, ("__kmp_get_schedule: thread %d\n", gtid));
2823   KMP_DEBUG_ASSERT(__kmp_init_serial);
2824 
2825   thread = __kmp_threads[gtid];
2826 
2827   th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
2828 
2829   switch (th_type) {
2830   case kmp_sch_static:
2831   case kmp_sch_static_greedy:
2832   case kmp_sch_static_balanced:
2833     *kind = kmp_sched_static;
2834     *chunk = 0; // chunk was not set, try to show this fact via zero value
2835     return;
2836   case kmp_sch_static_chunked:
2837     *kind = kmp_sched_static;
2838     break;
2839   case kmp_sch_dynamic_chunked:
2840     *kind = kmp_sched_dynamic;
2841     break;
2842   case kmp_sch_guided_chunked:
2843   case kmp_sch_guided_iterative_chunked:
2844   case kmp_sch_guided_analytical_chunked:
2845     *kind = kmp_sched_guided;
2846     break;
2847   case kmp_sch_auto:
2848     *kind = kmp_sched_auto;
2849     break;
2850   case kmp_sch_trapezoidal:
2851     *kind = kmp_sched_trapezoidal;
2852     break;
2853 #if KMP_STATIC_STEAL_ENABLED
2854   case kmp_sch_static_steal:
2855     *kind = kmp_sched_static_steal;
2856     break;
2857 #endif
2858   default:
2859     KMP_FATAL(UnknownSchedulingType, th_type);
2860   }
2861 
2862   *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
2863 }
2864 
2865 int __kmp_get_ancestor_thread_num(int gtid, int level) {
2866 
2867   int ii, dd;
2868   kmp_team_t *team;
2869   kmp_info_t *thr;
2870 
2871   KF_TRACE(10, ("__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level));
2872   KMP_DEBUG_ASSERT(__kmp_init_serial);
2873 
2874   // validate level
2875   if (level == 0)
2876     return 0;
2877   if (level < 0)
2878     return -1;
2879   thr = __kmp_threads[gtid];
2880   team = thr->th.th_team;
2881   ii = team->t.t_level;
2882   if (level > ii)
2883     return -1;
2884 
2885 #if OMP_40_ENABLED
2886   if (thr->th.th_teams_microtask) {
2887     // AC: we are in teams region where multiple nested teams have same level
2888     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2889     if (level <=
2890         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2891       KMP_DEBUG_ASSERT(ii >= tlevel);
2892       // AC: As we need to pass by the teams league, we need to artificially
2893       // increase ii
2894       if (ii == tlevel) {
2895         ii += 2; // three teams have same level
2896       } else {
2897         ii++; // two teams have same level
2898       }
2899     }
2900   }
2901 #endif
2902 
2903   if (ii == level)
2904     return __kmp_tid_from_gtid(gtid);
2905 
2906   dd = team->t.t_serialized;
2907   level++;
2908   while (ii > level) {
2909     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2910     }
2911     if ((team->t.t_serialized) && (!dd)) {
2912       team = team->t.t_parent;
2913       continue;
2914     }
2915     if (ii > level) {
2916       team = team->t.t_parent;
2917       dd = team->t.t_serialized;
2918       ii--;
2919     }
2920   }
2921 
2922   return (dd > 1) ? (0) : (team->t.t_master_tid);
2923 }
2924 
2925 int __kmp_get_team_size(int gtid, int level) {
2926 
2927   int ii, dd;
2928   kmp_team_t *team;
2929   kmp_info_t *thr;
2930 
2931   KF_TRACE(10, ("__kmp_get_team_size: thread %d %d\n", gtid, level));
2932   KMP_DEBUG_ASSERT(__kmp_init_serial);
2933 
2934   // validate level
2935   if (level == 0)
2936     return 1;
2937   if (level < 0)
2938     return -1;
2939   thr = __kmp_threads[gtid];
2940   team = thr->th.th_team;
2941   ii = team->t.t_level;
2942   if (level > ii)
2943     return -1;
2944 
2945 #if OMP_40_ENABLED
2946   if (thr->th.th_teams_microtask) {
2947     // AC: we are in teams region where multiple nested teams have same level
2948     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2949     if (level <=
2950         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2951       KMP_DEBUG_ASSERT(ii >= tlevel);
2952       // AC: As we need to pass by the teams league, we need to artificially
2953       // increase ii
2954       if (ii == tlevel) {
2955         ii += 2; // three teams have same level
2956       } else {
2957         ii++; // two teams have same level
2958       }
2959     }
2960   }
2961 #endif
2962 
2963   while (ii > level) {
2964     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2965     }
2966     if (team->t.t_serialized && (!dd)) {
2967       team = team->t.t_parent;
2968       continue;
2969     }
2970     if (ii > level) {
2971       team = team->t.t_parent;
2972       ii--;
2973     }
2974   }
2975 
2976   return team->t.t_nproc;
2977 }
2978 
2979 kmp_r_sched_t __kmp_get_schedule_global() {
2980   // This routine created because pairs (__kmp_sched, __kmp_chunk) and
2981   // (__kmp_static, __kmp_guided) may be changed by kmp_set_defaults
2982   // independently. So one can get the updated schedule here.
2983 
2984   kmp_r_sched_t r_sched;
2985 
2986   // create schedule from 4 globals: __kmp_sched, __kmp_chunk, __kmp_static,
2987   // __kmp_guided. __kmp_sched should keep original value, so that user can set
2988   // KMP_SCHEDULE multiple times, and thus have different run-time schedules in
2989   // different roots (even in OMP 2.5)
2990   if (__kmp_sched == kmp_sch_static) {
2991     // replace STATIC with more detailed schedule (balanced or greedy)
2992     r_sched.r_sched_type = __kmp_static;
2993   } else if (__kmp_sched == kmp_sch_guided_chunked) {
2994     // replace GUIDED with more detailed schedule (iterative or analytical)
2995     r_sched.r_sched_type = __kmp_guided;
2996   } else { // (STATIC_CHUNKED), or (DYNAMIC_CHUNKED), or other
2997     r_sched.r_sched_type = __kmp_sched;
2998   }
2999 
3000   if (__kmp_chunk < KMP_DEFAULT_CHUNK) {
3001     // __kmp_chunk may be wrong here (if it was not ever set)
3002     r_sched.chunk = KMP_DEFAULT_CHUNK;
3003   } else {
3004     r_sched.chunk = __kmp_chunk;
3005   }
3006 
3007   return r_sched;
3008 }
3009 
3010 /* Allocate (realloc == FALSE) * or reallocate (realloc == TRUE)
3011    at least argc number of *t_argv entries for the requested team. */
3012 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) {
3013 
3014   KMP_DEBUG_ASSERT(team);
3015   if (!realloc || argc > team->t.t_max_argc) {
3016 
3017     KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: needed entries=%d, "
3018                    "current entries=%d\n",
3019                    team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
3020     /* if previously allocated heap space for args, free them */
3021     if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
3022       __kmp_free((void *)team->t.t_argv);
3023 
3024     if (argc <= KMP_INLINE_ARGV_ENTRIES) {
3025       /* use unused space in the cache line for arguments */
3026       team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
3027       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: inline allocate %d "
3028                      "argv entries\n",
3029                      team->t.t_id, team->t.t_max_argc));
3030       team->t.t_argv = &team->t.t_inline_argv[0];
3031       if (__kmp_storage_map) {
3032         __kmp_print_storage_map_gtid(
3033             -1, &team->t.t_inline_argv[0],
3034             &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3035             (sizeof(void *) * KMP_INLINE_ARGV_ENTRIES), "team_%d.t_inline_argv",
3036             team->t.t_id);
3037       }
3038     } else {
3039       /* allocate space for arguments in the heap */
3040       team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3041                                ? KMP_MIN_MALLOC_ARGV_ENTRIES
3042                                : 2 * argc;
3043       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3044                      "argv entries\n",
3045                      team->t.t_id, team->t.t_max_argc));
3046       team->t.t_argv =
3047           (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
3048       if (__kmp_storage_map) {
3049         __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3050                                      &team->t.t_argv[team->t.t_max_argc],
3051                                      sizeof(void *) * team->t.t_max_argc,
3052                                      "team_%d.t_argv", team->t.t_id);
3053       }
3054     }
3055   }
3056 }
3057 
3058 static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
3059   int i;
3060   int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2;
3061   team->t.t_threads =
3062       (kmp_info_t **)__kmp_allocate(sizeof(kmp_info_t *) * max_nth);
3063   team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3064       sizeof(dispatch_shared_info_t) * num_disp_buff);
3065   team->t.t_dispatch =
3066       (kmp_disp_t *)__kmp_allocate(sizeof(kmp_disp_t) * max_nth);
3067   team->t.t_implicit_task_taskdata =
3068       (kmp_taskdata_t *)__kmp_allocate(sizeof(kmp_taskdata_t) * max_nth);
3069   team->t.t_max_nproc = max_nth;
3070 
3071   /* setup dispatch buffers */
3072   for (i = 0; i < num_disp_buff; ++i) {
3073     team->t.t_disp_buffer[i].buffer_index = i;
3074 #if OMP_45_ENABLED
3075     team->t.t_disp_buffer[i].doacross_buf_idx = i;
3076 #endif
3077   }
3078 }
3079 
3080 static void __kmp_free_team_arrays(kmp_team_t *team) {
3081   /* Note: this does not free the threads in t_threads (__kmp_free_threads) */
3082   int i;
3083   for (i = 0; i < team->t.t_max_nproc; ++i) {
3084     if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3085       __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3086       team->t.t_dispatch[i].th_disp_buffer = NULL;
3087     }
3088   }
3089 #if KMP_USE_HIER_SCHED
3090   __kmp_dispatch_free_hierarchies(team);
3091 #endif
3092   __kmp_free(team->t.t_threads);
3093   __kmp_free(team->t.t_disp_buffer);
3094   __kmp_free(team->t.t_dispatch);
3095   __kmp_free(team->t.t_implicit_task_taskdata);
3096   team->t.t_threads = NULL;
3097   team->t.t_disp_buffer = NULL;
3098   team->t.t_dispatch = NULL;
3099   team->t.t_implicit_task_taskdata = 0;
3100 }
3101 
3102 static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) {
3103   kmp_info_t **oldThreads = team->t.t_threads;
3104 
3105   __kmp_free(team->t.t_disp_buffer);
3106   __kmp_free(team->t.t_dispatch);
3107   __kmp_free(team->t.t_implicit_task_taskdata);
3108   __kmp_allocate_team_arrays(team, max_nth);
3109 
3110   KMP_MEMCPY(team->t.t_threads, oldThreads,
3111              team->t.t_nproc * sizeof(kmp_info_t *));
3112 
3113   __kmp_free(oldThreads);
3114 }
3115 
3116 static kmp_internal_control_t __kmp_get_global_icvs(void) {
3117 
3118   kmp_r_sched_t r_sched =
3119       __kmp_get_schedule_global(); // get current state of scheduling globals
3120 
3121 #if OMP_40_ENABLED
3122   KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
3123 #endif /* OMP_40_ENABLED */
3124 
3125   kmp_internal_control_t g_icvs = {
3126     0, // int serial_nesting_level; //corresponds to value of th_team_serialized
3127     (kmp_int8)__kmp_dflt_nested, // int nested; //internal control
3128     // for nested parallelism (per thread)
3129     (kmp_int8)__kmp_global.g.g_dynamic, // internal control for dynamic
3130     // adjustment of threads (per thread)
3131     (kmp_int8)__kmp_env_blocktime, // int bt_set; //internal control for
3132     // whether blocktime is explicitly set
3133     __kmp_dflt_blocktime, // int blocktime; //internal control for blocktime
3134 #if KMP_USE_MONITOR
3135     __kmp_bt_intervals, // int bt_intervals; //internal control for blocktime
3136 // intervals
3137 #endif
3138     __kmp_dflt_team_nth, // int nproc; //internal control for # of threads for
3139     // next parallel region (per thread)
3140     // (use a max ub on value if __kmp_parallel_initialize not called yet)
3141     __kmp_dflt_max_active_levels, // int max_active_levels; //internal control
3142     // for max_active_levels
3143     r_sched, // kmp_r_sched_t sched; //internal control for runtime schedule
3144 // {sched,chunk} pair
3145 #if OMP_40_ENABLED
3146     __kmp_nested_proc_bind.bind_types[0],
3147     __kmp_default_device,
3148 #endif /* OMP_40_ENABLED */
3149     NULL // struct kmp_internal_control *next;
3150   };
3151 
3152   return g_icvs;
3153 }
3154 
3155 static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) {
3156 
3157   kmp_internal_control_t gx_icvs;
3158   gx_icvs.serial_nesting_level =
3159       0; // probably =team->t.t_serial like in save_inter_controls
3160   copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3161   gx_icvs.next = NULL;
3162 
3163   return gx_icvs;
3164 }
3165 
3166 static void __kmp_initialize_root(kmp_root_t *root) {
3167   int f;
3168   kmp_team_t *root_team;
3169   kmp_team_t *hot_team;
3170   int hot_team_max_nth;
3171   kmp_r_sched_t r_sched =
3172       __kmp_get_schedule_global(); // get current state of scheduling globals
3173   kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3174   KMP_DEBUG_ASSERT(root);
3175   KMP_ASSERT(!root->r.r_begin);
3176 
3177   /* setup the root state structure */
3178   __kmp_init_lock(&root->r.r_begin_lock);
3179   root->r.r_begin = FALSE;
3180   root->r.r_active = FALSE;
3181   root->r.r_in_parallel = 0;
3182   root->r.r_blocktime = __kmp_dflt_blocktime;
3183   root->r.r_nested = __kmp_dflt_nested;
3184   root->r.r_cg_nthreads = 1;
3185 
3186   /* setup the root team for this task */
3187   /* allocate the root team structure */
3188   KF_TRACE(10, ("__kmp_initialize_root: before root_team\n"));
3189 
3190   root_team =
3191       __kmp_allocate_team(root,
3192                           1, // new_nproc
3193                           1, // max_nproc
3194 #if OMPT_SUPPORT
3195                           ompt_data_none, // root parallel id
3196 #endif
3197 #if OMP_40_ENABLED
3198                           __kmp_nested_proc_bind.bind_types[0],
3199 #endif
3200                           &r_icvs,
3201                           0 // argc
3202                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3203                           );
3204 #if USE_DEBUGGER
3205   // Non-NULL value should be assigned to make the debugger display the root
3206   // team.
3207   TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)(~0));
3208 #endif
3209 
3210   KF_TRACE(10, ("__kmp_initialize_root: after root_team = %p\n", root_team));
3211 
3212   root->r.r_root_team = root_team;
3213   root_team->t.t_control_stack_top = NULL;
3214 
3215   /* initialize root team */
3216   root_team->t.t_threads[0] = NULL;
3217   root_team->t.t_nproc = 1;
3218   root_team->t.t_serialized = 1;
3219   // TODO???: root_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3220   root_team->t.t_sched.sched = r_sched.sched;
3221   KA_TRACE(
3222       20,
3223       ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3224        root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
3225 
3226   /* setup the  hot team for this task */
3227   /* allocate the hot team structure */
3228   KF_TRACE(10, ("__kmp_initialize_root: before hot_team\n"));
3229 
3230   hot_team =
3231       __kmp_allocate_team(root,
3232                           1, // new_nproc
3233                           __kmp_dflt_team_nth_ub * 2, // max_nproc
3234 #if OMPT_SUPPORT
3235                           ompt_data_none, // root parallel id
3236 #endif
3237 #if OMP_40_ENABLED
3238                           __kmp_nested_proc_bind.bind_types[0],
3239 #endif
3240                           &r_icvs,
3241                           0 // argc
3242                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3243                           );
3244   KF_TRACE(10, ("__kmp_initialize_root: after hot_team = %p\n", hot_team));
3245 
3246   root->r.r_hot_team = hot_team;
3247   root_team->t.t_control_stack_top = NULL;
3248 
3249   /* first-time initialization */
3250   hot_team->t.t_parent = root_team;
3251 
3252   /* initialize hot team */
3253   hot_team_max_nth = hot_team->t.t_max_nproc;
3254   for (f = 0; f < hot_team_max_nth; ++f) {
3255     hot_team->t.t_threads[f] = NULL;
3256   }
3257   hot_team->t.t_nproc = 1;
3258   // TODO???: hot_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3259   hot_team->t.t_sched.sched = r_sched.sched;
3260   hot_team->t.t_size_changed = 0;
3261 }
3262 
3263 #ifdef KMP_DEBUG
3264 
3265 typedef struct kmp_team_list_item {
3266   kmp_team_p const *entry;
3267   struct kmp_team_list_item *next;
3268 } kmp_team_list_item_t;
3269 typedef kmp_team_list_item_t *kmp_team_list_t;
3270 
3271 static void __kmp_print_structure_team_accum( // Add team to list of teams.
3272     kmp_team_list_t list, // List of teams.
3273     kmp_team_p const *team // Team to add.
3274     ) {
3275 
3276   // List must terminate with item where both entry and next are NULL.
3277   // Team is added to the list only once.
3278   // List is sorted in ascending order by team id.
3279   // Team id is *not* a key.
3280 
3281   kmp_team_list_t l;
3282 
3283   KMP_DEBUG_ASSERT(list != NULL);
3284   if (team == NULL) {
3285     return;
3286   }
3287 
3288   __kmp_print_structure_team_accum(list, team->t.t_parent);
3289   __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3290 
3291   // Search list for the team.
3292   l = list;
3293   while (l->next != NULL && l->entry != team) {
3294     l = l->next;
3295   }
3296   if (l->next != NULL) {
3297     return; // Team has been added before, exit.
3298   }
3299 
3300   // Team is not found. Search list again for insertion point.
3301   l = list;
3302   while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3303     l = l->next;
3304   }
3305 
3306   // Insert team.
3307   {
3308     kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
3309         sizeof(kmp_team_list_item_t));
3310     *item = *l;
3311     l->entry = team;
3312     l->next = item;
3313   }
3314 }
3315 
3316 static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
3317 
3318                                        ) {
3319   __kmp_printf("%s", title);
3320   if (team != NULL) {
3321     __kmp_printf("%2x %p\n", team->t.t_id, team);
3322   } else {
3323     __kmp_printf(" - (nil)\n");
3324   }
3325 }
3326 
3327 static void __kmp_print_structure_thread(char const *title,
3328                                          kmp_info_p const *thread) {
3329   __kmp_printf("%s", title);
3330   if (thread != NULL) {
3331     __kmp_printf("%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
3332   } else {
3333     __kmp_printf(" - (nil)\n");
3334   }
3335 }
3336 
3337 void __kmp_print_structure(void) {
3338 
3339   kmp_team_list_t list;
3340 
3341   // Initialize list of teams.
3342   list =
3343       (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(sizeof(kmp_team_list_item_t));
3344   list->entry = NULL;
3345   list->next = NULL;
3346 
3347   __kmp_printf("\n------------------------------\nGlobal Thread "
3348                "Table\n------------------------------\n");
3349   {
3350     int gtid;
3351     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3352       __kmp_printf("%2d", gtid);
3353       if (__kmp_threads != NULL) {
3354         __kmp_printf(" %p", __kmp_threads[gtid]);
3355       }
3356       if (__kmp_root != NULL) {
3357         __kmp_printf(" %p", __kmp_root[gtid]);
3358       }
3359       __kmp_printf("\n");
3360     }
3361   }
3362 
3363   // Print out __kmp_threads array.
3364   __kmp_printf("\n------------------------------\nThreads\n--------------------"
3365                "----------\n");
3366   if (__kmp_threads != NULL) {
3367     int gtid;
3368     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3369       kmp_info_t const *thread = __kmp_threads[gtid];
3370       if (thread != NULL) {
3371         __kmp_printf("GTID %2d %p:\n", gtid, thread);
3372         __kmp_printf("    Our Root:        %p\n", thread->th.th_root);
3373         __kmp_print_structure_team("    Our Team:     ", thread->th.th_team);
3374         __kmp_print_structure_team("    Serial Team:  ",
3375                                    thread->th.th_serial_team);
3376         __kmp_printf("    Threads:      %2d\n", thread->th.th_team_nproc);
3377         __kmp_print_structure_thread("    Master:       ",
3378                                      thread->th.th_team_master);
3379         __kmp_printf("    Serialized?:  %2d\n", thread->th.th_team_serialized);
3380         __kmp_printf("    Set NProc:    %2d\n", thread->th.th_set_nproc);
3381 #if OMP_40_ENABLED
3382         __kmp_printf("    Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
3383 #endif
3384         __kmp_print_structure_thread("    Next in pool: ",
3385                                      thread->th.th_next_pool);
3386         __kmp_printf("\n");
3387         __kmp_print_structure_team_accum(list, thread->th.th_team);
3388         __kmp_print_structure_team_accum(list, thread->th.th_serial_team);
3389       }
3390     }
3391   } else {
3392     __kmp_printf("Threads array is not allocated.\n");
3393   }
3394 
3395   // Print out __kmp_root array.
3396   __kmp_printf("\n------------------------------\nUbers\n----------------------"
3397                "--------\n");
3398   if (__kmp_root != NULL) {
3399     int gtid;
3400     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3401       kmp_root_t const *root = __kmp_root[gtid];
3402       if (root != NULL) {
3403         __kmp_printf("GTID %2d %p:\n", gtid, root);
3404         __kmp_print_structure_team("    Root Team:    ", root->r.r_root_team);
3405         __kmp_print_structure_team("    Hot Team:     ", root->r.r_hot_team);
3406         __kmp_print_structure_thread("    Uber Thread:  ",
3407                                      root->r.r_uber_thread);
3408         __kmp_printf("    Active?:      %2d\n", root->r.r_active);
3409         __kmp_printf("    Nested?:      %2d\n", root->r.r_nested);
3410         __kmp_printf("    In Parallel:  %2d\n",
3411                      KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
3412         __kmp_printf("\n");
3413         __kmp_print_structure_team_accum(list, root->r.r_root_team);
3414         __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3415       }
3416     }
3417   } else {
3418     __kmp_printf("Ubers array is not allocated.\n");
3419   }
3420 
3421   __kmp_printf("\n------------------------------\nTeams\n----------------------"
3422                "--------\n");
3423   while (list->next != NULL) {
3424     kmp_team_p const *team = list->entry;
3425     int i;
3426     __kmp_printf("Team %2x %p:\n", team->t.t_id, team);
3427     __kmp_print_structure_team("    Parent Team:      ", team->t.t_parent);
3428     __kmp_printf("    Master TID:       %2d\n", team->t.t_master_tid);
3429     __kmp_printf("    Max threads:      %2d\n", team->t.t_max_nproc);
3430     __kmp_printf("    Levels of serial: %2d\n", team->t.t_serialized);
3431     __kmp_printf("    Number threads:   %2d\n", team->t.t_nproc);
3432     for (i = 0; i < team->t.t_nproc; ++i) {
3433       __kmp_printf("    Thread %2d:      ", i);
3434       __kmp_print_structure_thread("", team->t.t_threads[i]);
3435     }
3436     __kmp_print_structure_team("    Next in pool:     ", team->t.t_next_pool);
3437     __kmp_printf("\n");
3438     list = list->next;
3439   }
3440 
3441   // Print out __kmp_thread_pool and __kmp_team_pool.
3442   __kmp_printf("\n------------------------------\nPools\n----------------------"
3443                "--------\n");
3444   __kmp_print_structure_thread("Thread pool:          ",
3445                                CCAST(kmp_info_t *, __kmp_thread_pool));
3446   __kmp_print_structure_team("Team pool:            ",
3447                              CCAST(kmp_team_t *, __kmp_team_pool));
3448   __kmp_printf("\n");
3449 
3450   // Free team list.
3451   while (list != NULL) {
3452     kmp_team_list_item_t *item = list;
3453     list = list->next;
3454     KMP_INTERNAL_FREE(item);
3455   }
3456 }
3457 
3458 #endif
3459 
3460 //---------------------------------------------------------------------------
3461 //  Stuff for per-thread fast random number generator
3462 //  Table of primes
3463 static const unsigned __kmp_primes[] = {
3464     0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877,
3465     0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
3466     0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201,
3467     0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
3468     0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7,
3469     0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
3470     0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45,
3471     0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
3472     0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363,
3473     0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
3474     0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f};
3475 
3476 //---------------------------------------------------------------------------
3477 //  __kmp_get_random: Get a random number using a linear congruential method.
3478 unsigned short __kmp_get_random(kmp_info_t *thread) {
3479   unsigned x = thread->th.th_x;
3480   unsigned short r = x >> 16;
3481 
3482   thread->th.th_x = x * thread->th.th_a + 1;
3483 
3484   KA_TRACE(30, ("__kmp_get_random: THREAD: %d, RETURN: %u\n",
3485                 thread->th.th_info.ds.ds_tid, r));
3486 
3487   return r;
3488 }
3489 //--------------------------------------------------------
3490 // __kmp_init_random: Initialize a random number generator
3491 void __kmp_init_random(kmp_info_t *thread) {
3492   unsigned seed = thread->th.th_info.ds.ds_tid;
3493 
3494   thread->th.th_a =
3495       __kmp_primes[seed % (sizeof(__kmp_primes) / sizeof(__kmp_primes[0]))];
3496   thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
3497   KA_TRACE(30,
3498            ("__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a));
3499 }
3500 
3501 #if KMP_OS_WINDOWS
3502 /* reclaim array entries for root threads that are already dead, returns number
3503  * reclaimed */
3504 static int __kmp_reclaim_dead_roots(void) {
3505   int i, r = 0;
3506 
3507   for (i = 0; i < __kmp_threads_capacity; ++i) {
3508     if (KMP_UBER_GTID(i) &&
3509         !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
3510         !__kmp_root[i]
3511              ->r.r_active) { // AC: reclaim only roots died in non-active state
3512       r += __kmp_unregister_root_other_thread(i);
3513     }
3514   }
3515   return r;
3516 }
3517 #endif
3518 
3519 /* This function attempts to create free entries in __kmp_threads and
3520    __kmp_root, and returns the number of free entries generated.
3521 
3522    For Windows* OS static library, the first mechanism used is to reclaim array
3523    entries for root threads that are already dead.
3524 
3525    On all platforms, expansion is attempted on the arrays __kmp_threads_ and
3526    __kmp_root, with appropriate update to __kmp_threads_capacity. Array
3527    capacity is increased by doubling with clipping to __kmp_tp_capacity, if
3528    threadprivate cache array has been created. Synchronization with
3529    __kmpc_threadprivate_cached is done using __kmp_tp_cached_lock.
3530 
3531    After any dead root reclamation, if the clipping value allows array expansion
3532    to result in the generation of a total of nNeed free slots, the function does
3533    that expansion. If not, nothing is done beyond the possible initial root
3534    thread reclamation.
3535 
3536    If any argument is negative, the behavior is undefined. */
3537 static int __kmp_expand_threads(int nNeed) {
3538   int added = 0;
3539   int minimumRequiredCapacity;
3540   int newCapacity;
3541   kmp_info_t **newThreads;
3542   kmp_root_t **newRoot;
3543 
3544 // All calls to __kmp_expand_threads should be under __kmp_forkjoin_lock, so
3545 // resizing __kmp_threads does not need additional protection if foreign
3546 // threads are present
3547 
3548 #if KMP_OS_WINDOWS && !KMP_DYNAMIC_LIB
3549   /* only for Windows static library */
3550   /* reclaim array entries for root threads that are already dead */
3551   added = __kmp_reclaim_dead_roots();
3552 
3553   if (nNeed) {
3554     nNeed -= added;
3555     if (nNeed < 0)
3556       nNeed = 0;
3557   }
3558 #endif
3559   if (nNeed <= 0)
3560     return added;
3561 
3562   // Note that __kmp_threads_capacity is not bounded by __kmp_max_nth. If
3563   // __kmp_max_nth is set to some value less than __kmp_sys_max_nth by the
3564   // user via KMP_DEVICE_THREAD_LIMIT, then __kmp_threads_capacity may become
3565   // > __kmp_max_nth in one of two ways:
3566   //
3567   // 1) The initialization thread (gtid = 0) exits.  __kmp_threads[0]
3568   //    may not be resused by another thread, so we may need to increase
3569   //    __kmp_threads_capacity to __kmp_max_nth + 1.
3570   //
3571   // 2) New foreign root(s) are encountered.  We always register new foreign
3572   //    roots. This may cause a smaller # of threads to be allocated at
3573   //    subsequent parallel regions, but the worker threads hang around (and
3574   //    eventually go to sleep) and need slots in the __kmp_threads[] array.
3575   //
3576   // Anyway, that is the reason for moving the check to see if
3577   // __kmp_max_nth was exceeded into __kmp_reserve_threads()
3578   // instead of having it performed here. -BB
3579 
3580   KMP_DEBUG_ASSERT(__kmp_sys_max_nth >= __kmp_threads_capacity);
3581 
3582   /* compute expansion headroom to check if we can expand */
3583   if (__kmp_sys_max_nth - __kmp_threads_capacity < nNeed) {
3584     /* possible expansion too small -- give up */
3585     return added;
3586   }
3587   minimumRequiredCapacity = __kmp_threads_capacity + nNeed;
3588 
3589   newCapacity = __kmp_threads_capacity;
3590   do {
3591     newCapacity = newCapacity <= (__kmp_sys_max_nth >> 1) ? (newCapacity << 1)
3592                                                           : __kmp_sys_max_nth;
3593   } while (newCapacity < minimumRequiredCapacity);
3594   newThreads = (kmp_info_t **)__kmp_allocate(
3595       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * newCapacity + CACHE_LINE);
3596   newRoot =
3597       (kmp_root_t **)((char *)newThreads + sizeof(kmp_info_t *) * newCapacity);
3598   KMP_MEMCPY(newThreads, __kmp_threads,
3599              __kmp_threads_capacity * sizeof(kmp_info_t *));
3600   KMP_MEMCPY(newRoot, __kmp_root,
3601              __kmp_threads_capacity * sizeof(kmp_root_t *));
3602 
3603   kmp_info_t **temp_threads = __kmp_threads;
3604   *(kmp_info_t * *volatile *)&__kmp_threads = newThreads;
3605   *(kmp_root_t * *volatile *)&__kmp_root = newRoot;
3606   __kmp_free(temp_threads);
3607   added += newCapacity - __kmp_threads_capacity;
3608   *(volatile int *)&__kmp_threads_capacity = newCapacity;
3609 
3610   if (newCapacity > __kmp_tp_capacity) {
3611     __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
3612     if (__kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3613       __kmp_threadprivate_resize_cache(newCapacity);
3614     } else { // increase __kmp_tp_capacity to correspond with kmp_threads size
3615       *(volatile int *)&__kmp_tp_capacity = newCapacity;
3616     }
3617     __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3618   }
3619 
3620   return added;
3621 }
3622 
3623 /* Register the current thread as a root thread and obtain our gtid. We must
3624    have the __kmp_initz_lock held at this point. Argument TRUE only if are the
3625    thread that calls from __kmp_do_serial_initialize() */
3626 int __kmp_register_root(int initial_thread) {
3627   kmp_info_t *root_thread;
3628   kmp_root_t *root;
3629   int gtid;
3630   int capacity;
3631   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3632   KA_TRACE(20, ("__kmp_register_root: entered\n"));
3633   KMP_MB();
3634 
3635   /* 2007-03-02:
3636      If initial thread did not invoke OpenMP RTL yet, and this thread is not an
3637      initial one, "__kmp_all_nth >= __kmp_threads_capacity" condition does not
3638      work as expected -- it may return false (that means there is at least one
3639      empty slot in __kmp_threads array), but it is possible the only free slot
3640      is #0, which is reserved for initial thread and so cannot be used for this
3641      one. Following code workarounds this bug.
3642 
3643      However, right solution seems to be not reserving slot #0 for initial
3644      thread because:
3645      (1) there is no magic in slot #0,
3646      (2) we cannot detect initial thread reliably (the first thread which does
3647         serial initialization may be not a real initial thread).
3648   */
3649   capacity = __kmp_threads_capacity;
3650   if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) {
3651     --capacity;
3652   }
3653 
3654   /* see if there are too many threads */
3655   if (__kmp_all_nth >= capacity && !__kmp_expand_threads(1)) {
3656     if (__kmp_tp_cached) {
3657       __kmp_fatal(KMP_MSG(CantRegisterNewThread),
3658                   KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
3659                   KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
3660     } else {
3661       __kmp_fatal(KMP_MSG(CantRegisterNewThread), KMP_HNT(SystemLimitOnThreads),
3662                   __kmp_msg_null);
3663     }
3664   }
3665 
3666   /* find an available thread slot */
3667   /* Don't reassign the zero slot since we need that to only be used by initial
3668      thread */
3669   for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL;
3670        gtid++)
3671     ;
3672   KA_TRACE(1,
3673            ("__kmp_register_root: found slot in threads array: T#%d\n", gtid));
3674   KMP_ASSERT(gtid < __kmp_threads_capacity);
3675 
3676   /* update global accounting */
3677   __kmp_all_nth++;
3678   TCW_4(__kmp_nth, __kmp_nth + 1);
3679 
3680   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
3681   // numbers of procs, and method #2 (keyed API call) for higher numbers.
3682   if (__kmp_adjust_gtid_mode) {
3683     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
3684       if (TCR_4(__kmp_gtid_mode) != 2) {
3685         TCW_4(__kmp_gtid_mode, 2);
3686       }
3687     } else {
3688       if (TCR_4(__kmp_gtid_mode) != 1) {
3689         TCW_4(__kmp_gtid_mode, 1);
3690       }
3691     }
3692   }
3693 
3694 #ifdef KMP_ADJUST_BLOCKTIME
3695   /* Adjust blocktime to zero if necessary            */
3696   /* Middle initialization might not have occurred yet */
3697   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
3698     if (__kmp_nth > __kmp_avail_proc) {
3699       __kmp_zero_bt = TRUE;
3700     }
3701   }
3702 #endif /* KMP_ADJUST_BLOCKTIME */
3703 
3704   /* setup this new hierarchy */
3705   if (!(root = __kmp_root[gtid])) {
3706     root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(sizeof(kmp_root_t));
3707     KMP_DEBUG_ASSERT(!root->r.r_root_team);
3708   }
3709 
3710 #if KMP_STATS_ENABLED
3711   // Initialize stats as soon as possible (right after gtid assignment).
3712   __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid);
3713   __kmp_stats_thread_ptr->startLife();
3714   KMP_SET_THREAD_STATE(SERIAL_REGION);
3715   KMP_INIT_PARTITIONED_TIMERS(OMP_serial);
3716 #endif
3717   __kmp_initialize_root(root);
3718 
3719   /* setup new root thread structure */
3720   if (root->r.r_uber_thread) {
3721     root_thread = root->r.r_uber_thread;
3722   } else {
3723     root_thread = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
3724     if (__kmp_storage_map) {
3725       __kmp_print_thread_storage_map(root_thread, gtid);
3726     }
3727     root_thread->th.th_info.ds.ds_gtid = gtid;
3728 #if OMPT_SUPPORT
3729     root_thread->th.ompt_thread_info.thread_data = ompt_data_none;
3730 #endif
3731     root_thread->th.th_root = root;
3732     if (__kmp_env_consistency_check) {
3733       root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
3734     }
3735 #if USE_FAST_MEMORY
3736     __kmp_initialize_fast_memory(root_thread);
3737 #endif /* USE_FAST_MEMORY */
3738 
3739 #if KMP_USE_BGET
3740     KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL);
3741     __kmp_initialize_bget(root_thread);
3742 #endif
3743     __kmp_init_random(root_thread); // Initialize random number generator
3744   }
3745 
3746   /* setup the serial team held in reserve by the root thread */
3747   if (!root_thread->th.th_serial_team) {
3748     kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3749     KF_TRACE(10, ("__kmp_register_root: before serial_team\n"));
3750     root_thread->th.th_serial_team =
3751         __kmp_allocate_team(root, 1, 1,
3752 #if OMPT_SUPPORT
3753                             ompt_data_none, // root parallel id
3754 #endif
3755 #if OMP_40_ENABLED
3756                             proc_bind_default,
3757 #endif
3758                             &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
3759   }
3760   KMP_ASSERT(root_thread->th.th_serial_team);
3761   KF_TRACE(10, ("__kmp_register_root: after serial_team = %p\n",
3762                 root_thread->th.th_serial_team));
3763 
3764   /* drop root_thread into place */
3765   TCW_SYNC_PTR(__kmp_threads[gtid], root_thread);
3766 
3767   root->r.r_root_team->t.t_threads[0] = root_thread;
3768   root->r.r_hot_team->t.t_threads[0] = root_thread;
3769   root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
3770   // AC: the team created in reserve, not for execution (it is unused for now).
3771   root_thread->th.th_serial_team->t.t_serialized = 0;
3772   root->r.r_uber_thread = root_thread;
3773 
3774   /* initialize the thread, get it ready to go */
3775   __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3776   TCW_4(__kmp_init_gtid, TRUE);
3777 
3778   /* prepare the master thread for get_gtid() */
3779   __kmp_gtid_set_specific(gtid);
3780 
3781 #if USE_ITT_BUILD
3782   __kmp_itt_thread_name(gtid);
3783 #endif /* USE_ITT_BUILD */
3784 
3785 #ifdef KMP_TDATA_GTID
3786   __kmp_gtid = gtid;
3787 #endif
3788   __kmp_create_worker(gtid, root_thread, __kmp_stksize);
3789   KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid);
3790 
3791   KA_TRACE(20, ("__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, "
3792                 "plain=%u\n",
3793                 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3794                 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3795                 KMP_INIT_BARRIER_STATE));
3796   { // Initialize barrier data.
3797     int b;
3798     for (b = 0; b < bs_last_barrier; ++b) {
3799       root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
3800 #if USE_DEBUGGER
3801       root_thread->th.th_bar[b].bb.b_worker_arrived = 0;
3802 #endif
3803     }
3804   }
3805   KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3806                    KMP_INIT_BARRIER_STATE);
3807 
3808 #if KMP_AFFINITY_SUPPORTED
3809 #if OMP_40_ENABLED
3810   root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
3811   root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
3812   root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
3813   root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
3814 #endif
3815   if (TCR_4(__kmp_init_middle)) {
3816     __kmp_affinity_set_init_mask(gtid, TRUE);
3817   }
3818 #endif /* KMP_AFFINITY_SUPPORTED */
3819 #if OMP_50_ENABLED
3820   root_thread->th.th_def_allocator = __kmp_def_allocator;
3821 #endif
3822 
3823   __kmp_root_counter++;
3824 
3825 #if OMPT_SUPPORT
3826   if (!initial_thread && ompt_enabled.enabled) {
3827 
3828     kmp_info_t *root_thread = ompt_get_thread();
3829 
3830     ompt_set_thread_state(root_thread, omp_state_overhead);
3831 
3832     if (ompt_enabled.ompt_callback_thread_begin) {
3833       ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
3834           ompt_thread_initial, __ompt_get_thread_data_internal());
3835     }
3836     ompt_data_t *task_data;
3837     __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL, NULL);
3838     if (ompt_enabled.ompt_callback_task_create) {
3839       ompt_callbacks.ompt_callback(ompt_callback_task_create)(
3840           NULL, NULL, task_data, ompt_task_initial, 0, NULL);
3841       // initial task has nothing to return to
3842     }
3843 
3844     ompt_set_thread_state(root_thread, omp_state_work_serial);
3845   }
3846 #endif
3847 
3848   KMP_MB();
3849   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3850 
3851   return gtid;
3852 }
3853 
3854 #if KMP_NESTED_HOT_TEAMS
3855 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
3856                                 const int max_level) {
3857   int i, n, nth;
3858   kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
3859   if (!hot_teams || !hot_teams[level].hot_team) {
3860     return 0;
3861   }
3862   KMP_DEBUG_ASSERT(level < max_level);
3863   kmp_team_t *team = hot_teams[level].hot_team;
3864   nth = hot_teams[level].hot_team_nth;
3865   n = nth - 1; // master is not freed
3866   if (level < max_level - 1) {
3867     for (i = 0; i < nth; ++i) {
3868       kmp_info_t *th = team->t.t_threads[i];
3869       n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3870       if (i > 0 && th->th.th_hot_teams) {
3871         __kmp_free(th->th.th_hot_teams);
3872         th->th.th_hot_teams = NULL;
3873       }
3874     }
3875   }
3876   __kmp_free_team(root, team, NULL);
3877   return n;
3878 }
3879 #endif
3880 
3881 // Resets a root thread and clear its root and hot teams.
3882 // Returns the number of __kmp_threads entries directly and indirectly freed.
3883 static int __kmp_reset_root(int gtid, kmp_root_t *root) {
3884   kmp_team_t *root_team = root->r.r_root_team;
3885   kmp_team_t *hot_team = root->r.r_hot_team;
3886   int n = hot_team->t.t_nproc;
3887   int i;
3888 
3889   KMP_DEBUG_ASSERT(!root->r.r_active);
3890 
3891   root->r.r_root_team = NULL;
3892   root->r.r_hot_team = NULL;
3893   // __kmp_free_team() does not free hot teams, so we have to clear r_hot_team
3894   // before call to __kmp_free_team().
3895   __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3896 #if KMP_NESTED_HOT_TEAMS
3897   if (__kmp_hot_teams_max_level >
3898       0) { // need to free nested hot teams and their threads if any
3899     for (i = 0; i < hot_team->t.t_nproc; ++i) {
3900       kmp_info_t *th = hot_team->t.t_threads[i];
3901       if (__kmp_hot_teams_max_level > 1) {
3902         n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3903       }
3904       if (th->th.th_hot_teams) {
3905         __kmp_free(th->th.th_hot_teams);
3906         th->th.th_hot_teams = NULL;
3907       }
3908     }
3909   }
3910 #endif
3911   __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3912 
3913   // Before we can reap the thread, we need to make certain that all other
3914   // threads in the teams that had this root as ancestor have stopped trying to
3915   // steal tasks.
3916   if (__kmp_tasking_mode != tskm_immediate_exec) {
3917     __kmp_wait_to_unref_task_teams();
3918   }
3919 
3920 #if KMP_OS_WINDOWS
3921   /* Close Handle of root duplicated in __kmp_create_worker (tr #62919) */
3922   KA_TRACE(
3923       10, ("__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC
3924            "\n",
3925            (LPVOID) & (root->r.r_uber_thread->th),
3926            root->r.r_uber_thread->th.th_info.ds.ds_thread));
3927   __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3928 #endif /* KMP_OS_WINDOWS */
3929 
3930 #if OMPT_SUPPORT
3931   if (ompt_enabled.ompt_callback_thread_end) {
3932     ompt_callbacks.ompt_callback(ompt_callback_thread_end)(
3933         &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
3934   }
3935 #endif
3936 
3937   TCW_4(__kmp_nth,
3938         __kmp_nth - 1); // __kmp_reap_thread will decrement __kmp_all_nth.
3939   root->r.r_cg_nthreads--;
3940 
3941   __kmp_reap_thread(root->r.r_uber_thread, 1);
3942 
3943   // We canot put root thread to __kmp_thread_pool, so we have to reap it istead
3944   // of freeing.
3945   root->r.r_uber_thread = NULL;
3946   /* mark root as no longer in use */
3947   root->r.r_begin = FALSE;
3948 
3949   return n;
3950 }
3951 
3952 void __kmp_unregister_root_current_thread(int gtid) {
3953   KA_TRACE(1, ("__kmp_unregister_root_current_thread: enter T#%d\n", gtid));
3954   /* this lock should be ok, since unregister_root_current_thread is never
3955      called during an abort, only during a normal close. furthermore, if you
3956      have the forkjoin lock, you should never try to get the initz lock */
3957   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3958   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
3959     KC_TRACE(10, ("__kmp_unregister_root_current_thread: already finished, "
3960                   "exiting T#%d\n",
3961                   gtid));
3962     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3963     return;
3964   }
3965   kmp_root_t *root = __kmp_root[gtid];
3966 
3967   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3968   KMP_ASSERT(KMP_UBER_GTID(gtid));
3969   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3970   KMP_ASSERT(root->r.r_active == FALSE);
3971 
3972   KMP_MB();
3973 
3974 #if OMP_45_ENABLED
3975   kmp_info_t *thread = __kmp_threads[gtid];
3976   kmp_team_t *team = thread->th.th_team;
3977   kmp_task_team_t *task_team = thread->th.th_task_team;
3978 
3979   // we need to wait for the proxy tasks before finishing the thread
3980   if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
3981 #if OMPT_SUPPORT
3982     // the runtime is shutting down so we won't report any events
3983     thread->th.ompt_thread_info.state = omp_state_undefined;
3984 #endif
3985     __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
3986   }
3987 #endif
3988 
3989   __kmp_reset_root(gtid, root);
3990 
3991   /* free up this thread slot */
3992   __kmp_gtid_set_specific(KMP_GTID_DNE);
3993 #ifdef KMP_TDATA_GTID
3994   __kmp_gtid = KMP_GTID_DNE;
3995 #endif
3996 
3997   KMP_MB();
3998   KC_TRACE(10,
3999            ("__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid));
4000 
4001   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
4002 }
4003 
4004 #if KMP_OS_WINDOWS
4005 /* __kmp_forkjoin_lock must be already held
4006    Unregisters a root thread that is not the current thread.  Returns the number
4007    of __kmp_threads entries freed as a result. */
4008 static int __kmp_unregister_root_other_thread(int gtid) {
4009   kmp_root_t *root = __kmp_root[gtid];
4010   int r;
4011 
4012   KA_TRACE(1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
4013   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
4014   KMP_ASSERT(KMP_UBER_GTID(gtid));
4015   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4016   KMP_ASSERT(root->r.r_active == FALSE);
4017 
4018   r = __kmp_reset_root(gtid, root);
4019   KC_TRACE(10,
4020            ("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
4021   return r;
4022 }
4023 #endif
4024 
4025 #if KMP_DEBUG
4026 void __kmp_task_info() {
4027 
4028   kmp_int32 gtid = __kmp_entry_gtid();
4029   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
4030   kmp_info_t *this_thr = __kmp_threads[gtid];
4031   kmp_team_t *steam = this_thr->th.th_serial_team;
4032   kmp_team_t *team = this_thr->th.th_team;
4033 
4034   __kmp_printf(
4035       "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
4036       "ptask=%p\n",
4037       gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
4038       team->t.t_implicit_task_taskdata[tid].td_parent);
4039 }
4040 #endif // KMP_DEBUG
4041 
4042 /* TODO optimize with one big memclr, take out what isn't needed, split
4043    responsibility to workers as much as possible, and delay initialization of
4044    features as much as possible  */
4045 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4046                                   int tid, int gtid) {
4047   /* this_thr->th.th_info.ds.ds_gtid is setup in
4048      kmp_allocate_thread/create_worker.
4049      this_thr->th.th_serial_team is setup in __kmp_allocate_thread */
4050   kmp_info_t *master = team->t.t_threads[0];
4051   KMP_DEBUG_ASSERT(this_thr != NULL);
4052   KMP_DEBUG_ASSERT(this_thr->th.th_serial_team);
4053   KMP_DEBUG_ASSERT(team);
4054   KMP_DEBUG_ASSERT(team->t.t_threads);
4055   KMP_DEBUG_ASSERT(team->t.t_dispatch);
4056   KMP_DEBUG_ASSERT(master);
4057   KMP_DEBUG_ASSERT(master->th.th_root);
4058 
4059   KMP_MB();
4060 
4061   TCW_SYNC_PTR(this_thr->th.th_team, team);
4062 
4063   this_thr->th.th_info.ds.ds_tid = tid;
4064   this_thr->th.th_set_nproc = 0;
4065   if (__kmp_tasking_mode != tskm_immediate_exec)
4066     // When tasking is possible, threads are not safe to reap until they are
4067     // done tasking; this will be set when tasking code is exited in wait
4068     this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
4069   else // no tasking --> always safe to reap
4070     this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
4071 #if OMP_40_ENABLED
4072   this_thr->th.th_set_proc_bind = proc_bind_default;
4073 #if KMP_AFFINITY_SUPPORTED
4074   this_thr->th.th_new_place = this_thr->th.th_current_place;
4075 #endif
4076 #endif
4077   this_thr->th.th_root = master->th.th_root;
4078 
4079   /* setup the thread's cache of the team structure */
4080   this_thr->th.th_team_nproc = team->t.t_nproc;
4081   this_thr->th.th_team_master = master;
4082   this_thr->th.th_team_serialized = team->t.t_serialized;
4083   TCW_PTR(this_thr->th.th_sleep_loc, NULL);
4084 
4085   KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4086 
4087   KF_TRACE(10, ("__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n",
4088                 tid, gtid, this_thr, this_thr->th.th_current_task));
4089 
4090   __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
4091                            team, tid, TRUE);
4092 
4093   KF_TRACE(10, ("__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n",
4094                 tid, gtid, this_thr, this_thr->th.th_current_task));
4095   // TODO: Initialize ICVs from parent; GEH - isn't that already done in
4096   // __kmp_initialize_team()?
4097 
4098   /* TODO no worksharing in speculative threads */
4099   this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4100 
4101   this_thr->th.th_local.this_construct = 0;
4102 
4103   if (!this_thr->th.th_pri_common) {
4104     this_thr->th.th_pri_common =
4105         (struct common_table *)__kmp_allocate(sizeof(struct common_table));
4106     if (__kmp_storage_map) {
4107       __kmp_print_storage_map_gtid(
4108           gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
4109           sizeof(struct common_table), "th_%d.th_pri_common\n", gtid);
4110     }
4111     this_thr->th.th_pri_head = NULL;
4112   }
4113 
4114   /* Initialize dynamic dispatch */
4115   {
4116     volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
4117     // Use team max_nproc since this will never change for the team.
4118     size_t disp_size =
4119         sizeof(dispatch_private_info_t) *
4120         (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4121     KD_TRACE(10, ("__kmp_initialize_info: T#%d max_nproc: %d\n", gtid,
4122                   team->t.t_max_nproc));
4123     KMP_ASSERT(dispatch);
4124     KMP_DEBUG_ASSERT(team->t.t_dispatch);
4125     KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4126 
4127     dispatch->th_disp_index = 0;
4128 #if OMP_45_ENABLED
4129     dispatch->th_doacross_buf_idx = 0;
4130 #endif
4131     if (!dispatch->th_disp_buffer) {
4132       dispatch->th_disp_buffer =
4133           (dispatch_private_info_t *)__kmp_allocate(disp_size);
4134 
4135       if (__kmp_storage_map) {
4136         __kmp_print_storage_map_gtid(
4137             gtid, &dispatch->th_disp_buffer[0],
4138             &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4139                                           ? 1
4140                                           : __kmp_dispatch_num_buffers],
4141             disp_size, "th_%d.th_dispatch.th_disp_buffer "
4142                        "(team_%d.t_dispatch[%d].th_disp_buffer)",
4143             gtid, team->t.t_id, gtid);
4144       }
4145     } else {
4146       memset(&dispatch->th_disp_buffer[0], '\0', disp_size);
4147     }
4148 
4149     dispatch->th_dispatch_pr_current = 0;
4150     dispatch->th_dispatch_sh_current = 0;
4151 
4152     dispatch->th_deo_fcn = 0; /* ORDERED     */
4153     dispatch->th_dxo_fcn = 0; /* END ORDERED */
4154   }
4155 
4156   this_thr->th.th_next_pool = NULL;
4157 
4158   if (!this_thr->th.th_task_state_memo_stack) {
4159     size_t i;
4160     this_thr->th.th_task_state_memo_stack =
4161         (kmp_uint8 *)__kmp_allocate(4 * sizeof(kmp_uint8));
4162     this_thr->th.th_task_state_top = 0;
4163     this_thr->th.th_task_state_stack_sz = 4;
4164     for (i = 0; i < this_thr->th.th_task_state_stack_sz;
4165          ++i) // zero init the stack
4166       this_thr->th.th_task_state_memo_stack[i] = 0;
4167   }
4168 
4169   KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
4170   KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
4171 
4172   KMP_MB();
4173 }
4174 
4175 /* allocate a new thread for the requesting team. this is only called from
4176    within a forkjoin critical section. we will first try to get an available
4177    thread from the thread pool. if none is available, we will fork a new one
4178    assuming we are able to create a new one. this should be assured, as the
4179    caller should check on this first. */
4180 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4181                                   int new_tid) {
4182   kmp_team_t *serial_team;
4183   kmp_info_t *new_thr;
4184   int new_gtid;
4185 
4186   KA_TRACE(20, ("__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
4187   KMP_DEBUG_ASSERT(root && team);
4188 #if !KMP_NESTED_HOT_TEAMS
4189   KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
4190 #endif
4191   KMP_MB();
4192 
4193   /* first, try to get one from the thread pool */
4194   if (__kmp_thread_pool) {
4195 
4196     new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
4197     __kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool;
4198     if (new_thr == __kmp_thread_pool_insert_pt) {
4199       __kmp_thread_pool_insert_pt = NULL;
4200     }
4201     TCW_4(new_thr->th.th_in_pool, FALSE);
4202     // Don't touch th_active_in_pool or th_active.
4203     // The worker thread adjusts those flags as it sleeps/awakens.
4204     __kmp_thread_pool_nth--;
4205 
4206     KA_TRACE(20, ("__kmp_allocate_thread: T#%d using thread T#%d\n",
4207                   __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid));
4208     KMP_ASSERT(!new_thr->th.th_team);
4209     KMP_DEBUG_ASSERT(__kmp_nth < __kmp_threads_capacity);
4210     KMP_DEBUG_ASSERT(__kmp_thread_pool_nth >= 0);
4211 
4212     /* setup the thread structure */
4213     __kmp_initialize_info(new_thr, team, new_tid,
4214                           new_thr->th.th_info.ds.ds_gtid);
4215     KMP_DEBUG_ASSERT(new_thr->th.th_serial_team);
4216 
4217     TCW_4(__kmp_nth, __kmp_nth + 1);
4218     root->r.r_cg_nthreads++;
4219 
4220     new_thr->th.th_task_state = 0;
4221     new_thr->th.th_task_state_top = 0;
4222     new_thr->th.th_task_state_stack_sz = 4;
4223 
4224 #ifdef KMP_ADJUST_BLOCKTIME
4225     /* Adjust blocktime back to zero if necessary */
4226     /* Middle initialization might not have occurred yet */
4227     if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4228       if (__kmp_nth > __kmp_avail_proc) {
4229         __kmp_zero_bt = TRUE;
4230       }
4231     }
4232 #endif /* KMP_ADJUST_BLOCKTIME */
4233 
4234 #if KMP_DEBUG
4235     // If thread entered pool via __kmp_free_thread, wait_flag should !=
4236     // KMP_BARRIER_PARENT_FLAG.
4237     int b;
4238     kmp_balign_t *balign = new_thr->th.th_bar;
4239     for (b = 0; b < bs_last_barrier; ++b)
4240       KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
4241 #endif
4242 
4243     KF_TRACE(10, ("__kmp_allocate_thread: T#%d using thread %p T#%d\n",
4244                   __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid));
4245 
4246     KMP_MB();
4247     return new_thr;
4248   }
4249 
4250   /* no, well fork a new one */
4251   KMP_ASSERT(__kmp_nth == __kmp_all_nth);
4252   KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
4253 
4254 #if KMP_USE_MONITOR
4255   // If this is the first worker thread the RTL is creating, then also
4256   // launch the monitor thread.  We try to do this as early as possible.
4257   if (!TCR_4(__kmp_init_monitor)) {
4258     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
4259     if (!TCR_4(__kmp_init_monitor)) {
4260       KF_TRACE(10, ("before __kmp_create_monitor\n"));
4261       TCW_4(__kmp_init_monitor, 1);
4262       __kmp_create_monitor(&__kmp_monitor);
4263       KF_TRACE(10, ("after __kmp_create_monitor\n"));
4264 #if KMP_OS_WINDOWS
4265       // AC: wait until monitor has started. This is a fix for CQ232808.
4266       // The reason is that if the library is loaded/unloaded in a loop with
4267       // small (parallel) work in between, then there is high probability that
4268       // monitor thread started after the library shutdown. At shutdown it is
4269       // too late to cope with the problem, because when the master is in
4270       // DllMain (process detach) the monitor has no chances to start (it is
4271       // blocked), and master has no means to inform the monitor that the
4272       // library has gone, because all the memory which the monitor can access
4273       // is going to be released/reset.
4274       while (TCR_4(__kmp_init_monitor) < 2) {
4275         KMP_YIELD(TRUE);
4276       }
4277       KF_TRACE(10, ("after monitor thread has started\n"));
4278 #endif
4279     }
4280     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
4281   }
4282 #endif
4283 
4284   KMP_MB();
4285   for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) {
4286     KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity);
4287   }
4288 
4289   /* allocate space for it. */
4290   new_thr = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
4291 
4292   TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr);
4293 
4294   if (__kmp_storage_map) {
4295     __kmp_print_thread_storage_map(new_thr, new_gtid);
4296   }
4297 
4298   // add the reserve serialized team, initialized from the team's master thread
4299   {
4300     kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4301     KF_TRACE(10, ("__kmp_allocate_thread: before th_serial/serial_team\n"));
4302     new_thr->th.th_serial_team = serial_team =
4303         (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4304 #if OMPT_SUPPORT
4305                                           ompt_data_none, // root parallel id
4306 #endif
4307 #if OMP_40_ENABLED
4308                                           proc_bind_default,
4309 #endif
4310                                           &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
4311   }
4312   KMP_ASSERT(serial_team);
4313   serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
4314   // execution (it is unused for now).
4315   serial_team->t.t_threads[0] = new_thr;
4316   KF_TRACE(10,
4317            ("__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n",
4318             new_thr));
4319 
4320   /* setup the thread structures */
4321   __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4322 
4323 #if USE_FAST_MEMORY
4324   __kmp_initialize_fast_memory(new_thr);
4325 #endif /* USE_FAST_MEMORY */
4326 
4327 #if KMP_USE_BGET
4328   KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL);
4329   __kmp_initialize_bget(new_thr);
4330 #endif
4331 
4332   __kmp_init_random(new_thr); // Initialize random number generator
4333 
4334   /* Initialize these only once when thread is grabbed for a team allocation */
4335   KA_TRACE(20,
4336            ("__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n",
4337             __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
4338 
4339   int b;
4340   kmp_balign_t *balign = new_thr->th.th_bar;
4341   for (b = 0; b < bs_last_barrier; ++b) {
4342     balign[b].bb.b_go = KMP_INIT_BARRIER_STATE;
4343     balign[b].bb.team = NULL;
4344     balign[b].bb.wait_flag = KMP_BARRIER_NOT_WAITING;
4345     balign[b].bb.use_oncore_barrier = 0;
4346   }
4347 
4348   new_thr->th.th_spin_here = FALSE;
4349   new_thr->th.th_next_waiting = 0;
4350 #if KMP_OS_UNIX
4351   new_thr->th.th_blocking = false;
4352 #endif
4353 
4354 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
4355   new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
4356   new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
4357   new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
4358   new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
4359 #endif
4360 #if OMP_50_ENABLED
4361   new_thr->th.th_def_allocator = __kmp_def_allocator;
4362 #endif
4363 
4364   TCW_4(new_thr->th.th_in_pool, FALSE);
4365   new_thr->th.th_active_in_pool = FALSE;
4366   TCW_4(new_thr->th.th_active, TRUE);
4367 
4368   /* adjust the global counters */
4369   __kmp_all_nth++;
4370   __kmp_nth++;
4371 
4372   root->r.r_cg_nthreads++;
4373 
4374   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
4375   // numbers of procs, and method #2 (keyed API call) for higher numbers.
4376   if (__kmp_adjust_gtid_mode) {
4377     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
4378       if (TCR_4(__kmp_gtid_mode) != 2) {
4379         TCW_4(__kmp_gtid_mode, 2);
4380       }
4381     } else {
4382       if (TCR_4(__kmp_gtid_mode) != 1) {
4383         TCW_4(__kmp_gtid_mode, 1);
4384       }
4385     }
4386   }
4387 
4388 #ifdef KMP_ADJUST_BLOCKTIME
4389   /* Adjust blocktime back to zero if necessary       */
4390   /* Middle initialization might not have occurred yet */
4391   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4392     if (__kmp_nth > __kmp_avail_proc) {
4393       __kmp_zero_bt = TRUE;
4394     }
4395   }
4396 #endif /* KMP_ADJUST_BLOCKTIME */
4397 
4398   /* actually fork it and create the new worker thread */
4399   KF_TRACE(
4400       10, ("__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr));
4401   __kmp_create_worker(new_gtid, new_thr, __kmp_stksize);
4402   KF_TRACE(10,
4403            ("__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr));
4404 
4405   KA_TRACE(20, ("__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(),
4406                 new_gtid));
4407   KMP_MB();
4408   return new_thr;
4409 }
4410 
4411 /* Reinitialize team for reuse.
4412    The hot team code calls this case at every fork barrier, so EPCC barrier
4413    test are extremely sensitive to changes in it, esp. writes to the team
4414    struct, which cause a cache invalidation in all threads.
4415    IF YOU TOUCH THIS ROUTINE, RUN EPCC C SYNCBENCH ON A BIG-IRON MACHINE!!! */
4416 static void __kmp_reinitialize_team(kmp_team_t *team,
4417                                     kmp_internal_control_t *new_icvs,
4418                                     ident_t *loc) {
4419   KF_TRACE(10, ("__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4420                 team->t.t_threads[0], team));
4421   KMP_DEBUG_ASSERT(team && new_icvs);
4422   KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
4423   KMP_CHECK_UPDATE(team->t.t_ident, loc);
4424 
4425   KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4426   // Copy ICVs to the master thread's implicit taskdata
4427   __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4428   copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4429 
4430   KF_TRACE(10, ("__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4431                 team->t.t_threads[0], team));
4432 }
4433 
4434 /* Initialize the team data structure.
4435    This assumes the t_threads and t_max_nproc are already set.
4436    Also, we don't touch the arguments */
4437 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
4438                                   kmp_internal_control_t *new_icvs,
4439                                   ident_t *loc) {
4440   KF_TRACE(10, ("__kmp_initialize_team: enter: team=%p\n", team));
4441 
4442   /* verify */
4443   KMP_DEBUG_ASSERT(team);
4444   KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4445   KMP_DEBUG_ASSERT(team->t.t_threads);
4446   KMP_MB();
4447 
4448   team->t.t_master_tid = 0; /* not needed */
4449   /* team->t.t_master_bar;        not needed */
4450   team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4451   team->t.t_nproc = new_nproc;
4452 
4453   /* team->t.t_parent     = NULL; TODO not needed & would mess up hot team */
4454   team->t.t_next_pool = NULL;
4455   /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess
4456    * up hot team */
4457 
4458   TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
4459   team->t.t_invoke = NULL; /* not needed */
4460 
4461   // TODO???: team->t.t_max_active_levels       = new_max_active_levels;
4462   team->t.t_sched.sched = new_icvs->sched.sched;
4463 
4464 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4465   team->t.t_fp_control_saved = FALSE; /* not needed */
4466   team->t.t_x87_fpu_control_word = 0; /* not needed */
4467   team->t.t_mxcsr = 0; /* not needed */
4468 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4469 
4470   team->t.t_construct = 0;
4471 
4472   team->t.t_ordered.dt.t_value = 0;
4473   team->t.t_master_active = FALSE;
4474 
4475   memset(&team->t.t_taskq, '\0', sizeof(kmp_taskq_t));
4476 
4477 #ifdef KMP_DEBUG
4478   team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */
4479 #endif
4480 #if KMP_OS_WINDOWS
4481   team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */
4482 #endif
4483 
4484   team->t.t_control_stack_top = NULL;
4485 
4486   __kmp_reinitialize_team(team, new_icvs, loc);
4487 
4488   KMP_MB();
4489   KF_TRACE(10, ("__kmp_initialize_team: exit: team=%p\n", team));
4490 }
4491 
4492 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
4493 /* Sets full mask for thread and returns old mask, no changes to structures. */
4494 static void
4495 __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
4496   if (KMP_AFFINITY_CAPABLE()) {
4497     int status;
4498     if (old_mask != NULL) {
4499       status = __kmp_get_system_affinity(old_mask, TRUE);
4500       int error = errno;
4501       if (status != 0) {
4502         __kmp_fatal(KMP_MSG(ChangeThreadAffMaskError), KMP_ERR(error),
4503                     __kmp_msg_null);
4504       }
4505     }
4506     __kmp_set_system_affinity(__kmp_affin_fullMask, TRUE);
4507   }
4508 }
4509 #endif
4510 
4511 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
4512 
4513 // __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism.
4514 // It calculats the worker + master thread's partition based upon the parent
4515 // thread's partition, and binds each worker to a thread in their partition.
4516 // The master thread's partition should already include its current binding.
4517 static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
4518   // Copy the master thread's place partion to the team struct
4519   kmp_info_t *master_th = team->t.t_threads[0];
4520   KMP_DEBUG_ASSERT(master_th != NULL);
4521   kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4522   int first_place = master_th->th.th_first_place;
4523   int last_place = master_th->th.th_last_place;
4524   int masters_place = master_th->th.th_current_place;
4525   team->t.t_first_place = first_place;
4526   team->t.t_last_place = last_place;
4527 
4528   KA_TRACE(20, ("__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) "
4529                 "bound to place %d partition = [%d,%d]\n",
4530                 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4531                 team->t.t_id, masters_place, first_place, last_place));
4532 
4533   switch (proc_bind) {
4534 
4535   case proc_bind_default:
4536     // serial teams might have the proc_bind policy set to proc_bind_default. It
4537     // doesn't matter, as we don't rebind master thread for any proc_bind policy
4538     KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4539     break;
4540 
4541   case proc_bind_master: {
4542     int f;
4543     int n_th = team->t.t_nproc;
4544     for (f = 1; f < n_th; f++) {
4545       kmp_info_t *th = team->t.t_threads[f];
4546       KMP_DEBUG_ASSERT(th != NULL);
4547       th->th.th_first_place = first_place;
4548       th->th.th_last_place = last_place;
4549       th->th.th_new_place = masters_place;
4550 
4551       KA_TRACE(100, ("__kmp_partition_places: master: T#%d(%d:%d) place %d "
4552                      "partition = [%d,%d]\n",
4553                      __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4554                      f, masters_place, first_place, last_place));
4555     }
4556   } break;
4557 
4558   case proc_bind_close: {
4559     int f;
4560     int n_th = team->t.t_nproc;
4561     int n_places;
4562     if (first_place <= last_place) {
4563       n_places = last_place - first_place + 1;
4564     } else {
4565       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4566     }
4567     if (n_th <= n_places) {
4568       int place = masters_place;
4569       for (f = 1; f < n_th; f++) {
4570         kmp_info_t *th = team->t.t_threads[f];
4571         KMP_DEBUG_ASSERT(th != NULL);
4572 
4573         if (place == last_place) {
4574           place = first_place;
4575         } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4576           place = 0;
4577         } else {
4578           place++;
4579         }
4580         th->th.th_first_place = first_place;
4581         th->th.th_last_place = last_place;
4582         th->th.th_new_place = place;
4583 
4584         KA_TRACE(100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4585                        "partition = [%d,%d]\n",
4586                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4587                        team->t.t_id, f, place, first_place, last_place));
4588       }
4589     } else {
4590       int S, rem, gap, s_count;
4591       S = n_th / n_places;
4592       s_count = 0;
4593       rem = n_th - (S * n_places);
4594       gap = rem > 0 ? n_places / rem : n_places;
4595       int place = masters_place;
4596       int gap_ct = gap;
4597       for (f = 0; f < n_th; f++) {
4598         kmp_info_t *th = team->t.t_threads[f];
4599         KMP_DEBUG_ASSERT(th != NULL);
4600 
4601         th->th.th_first_place = first_place;
4602         th->th.th_last_place = last_place;
4603         th->th.th_new_place = place;
4604         s_count++;
4605 
4606         if ((s_count == S) && rem && (gap_ct == gap)) {
4607           // do nothing, add an extra thread to place on next iteration
4608         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4609           // we added an extra thread to this place; move to next place
4610           if (place == last_place) {
4611             place = first_place;
4612           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4613             place = 0;
4614           } else {
4615             place++;
4616           }
4617           s_count = 0;
4618           gap_ct = 1;
4619           rem--;
4620         } else if (s_count == S) { // place full; don't add extra
4621           if (place == last_place) {
4622             place = first_place;
4623           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4624             place = 0;
4625           } else {
4626             place++;
4627           }
4628           gap_ct++;
4629           s_count = 0;
4630         }
4631 
4632         KA_TRACE(100,
4633                  ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4634                   "partition = [%d,%d]\n",
4635                   __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4636                   th->th.th_new_place, first_place, last_place));
4637       }
4638       KMP_DEBUG_ASSERT(place == masters_place);
4639     }
4640   } break;
4641 
4642   case proc_bind_spread: {
4643     int f;
4644     int n_th = team->t.t_nproc;
4645     int n_places;
4646     int thidx;
4647     if (first_place <= last_place) {
4648       n_places = last_place - first_place + 1;
4649     } else {
4650       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4651     }
4652     if (n_th <= n_places) {
4653       int place = -1;
4654 
4655       if (n_places != static_cast<int>(__kmp_affinity_num_masks)) {
4656         int S = n_places / n_th;
4657         int s_count, rem, gap, gap_ct;
4658 
4659         place = masters_place;
4660         rem = n_places - n_th * S;
4661         gap = rem ? n_th / rem : 1;
4662         gap_ct = gap;
4663         thidx = n_th;
4664         if (update_master_only == 1)
4665           thidx = 1;
4666         for (f = 0; f < thidx; f++) {
4667           kmp_info_t *th = team->t.t_threads[f];
4668           KMP_DEBUG_ASSERT(th != NULL);
4669 
4670           th->th.th_first_place = place;
4671           th->th.th_new_place = place;
4672           s_count = 1;
4673           while (s_count < S) {
4674             if (place == last_place) {
4675               place = first_place;
4676             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4677               place = 0;
4678             } else {
4679               place++;
4680             }
4681             s_count++;
4682           }
4683           if (rem && (gap_ct == gap)) {
4684             if (place == last_place) {
4685               place = first_place;
4686             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4687               place = 0;
4688             } else {
4689               place++;
4690             }
4691             rem--;
4692             gap_ct = 0;
4693           }
4694           th->th.th_last_place = place;
4695           gap_ct++;
4696 
4697           if (place == last_place) {
4698             place = first_place;
4699           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4700             place = 0;
4701           } else {
4702             place++;
4703           }
4704 
4705           KA_TRACE(100,
4706                    ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4707                     "partition = [%d,%d], __kmp_affinity_num_masks: %u\n",
4708                     __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4709                     f, th->th.th_new_place, th->th.th_first_place,
4710                     th->th.th_last_place, __kmp_affinity_num_masks));
4711         }
4712       } else {
4713         /* Having uniform space of available computation places I can create
4714            T partitions of round(P/T) size and put threads into the first
4715            place of each partition. */
4716         double current = static_cast<double>(masters_place);
4717         double spacing =
4718             (static_cast<double>(n_places + 1) / static_cast<double>(n_th));
4719         int first, last;
4720         kmp_info_t *th;
4721 
4722         thidx = n_th + 1;
4723         if (update_master_only == 1)
4724           thidx = 1;
4725         for (f = 0; f < thidx; f++) {
4726           first = static_cast<int>(current);
4727           last = static_cast<int>(current + spacing) - 1;
4728           KMP_DEBUG_ASSERT(last >= first);
4729           if (first >= n_places) {
4730             if (masters_place) {
4731               first -= n_places;
4732               last -= n_places;
4733               if (first == (masters_place + 1)) {
4734                 KMP_DEBUG_ASSERT(f == n_th);
4735                 first--;
4736               }
4737               if (last == masters_place) {
4738                 KMP_DEBUG_ASSERT(f == (n_th - 1));
4739                 last--;
4740               }
4741             } else {
4742               KMP_DEBUG_ASSERT(f == n_th);
4743               first = 0;
4744               last = 0;
4745             }
4746           }
4747           if (last >= n_places) {
4748             last = (n_places - 1);
4749           }
4750           place = first;
4751           current += spacing;
4752           if (f < n_th) {
4753             KMP_DEBUG_ASSERT(0 <= first);
4754             KMP_DEBUG_ASSERT(n_places > first);
4755             KMP_DEBUG_ASSERT(0 <= last);
4756             KMP_DEBUG_ASSERT(n_places > last);
4757             KMP_DEBUG_ASSERT(last_place >= first_place);
4758             th = team->t.t_threads[f];
4759             KMP_DEBUG_ASSERT(th);
4760             th->th.th_first_place = first;
4761             th->th.th_new_place = place;
4762             th->th.th_last_place = last;
4763 
4764             KA_TRACE(100,
4765                      ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4766                       "partition = [%d,%d], spacing = %.4f\n",
4767                       __kmp_gtid_from_thread(team->t.t_threads[f]),
4768                       team->t.t_id, f, th->th.th_new_place,
4769                       th->th.th_first_place, th->th.th_last_place, spacing));
4770           }
4771         }
4772       }
4773       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4774     } else {
4775       int S, rem, gap, s_count;
4776       S = n_th / n_places;
4777       s_count = 0;
4778       rem = n_th - (S * n_places);
4779       gap = rem > 0 ? n_places / rem : n_places;
4780       int place = masters_place;
4781       int gap_ct = gap;
4782       thidx = n_th;
4783       if (update_master_only == 1)
4784         thidx = 1;
4785       for (f = 0; f < thidx; f++) {
4786         kmp_info_t *th = team->t.t_threads[f];
4787         KMP_DEBUG_ASSERT(th != NULL);
4788 
4789         th->th.th_first_place = place;
4790         th->th.th_last_place = place;
4791         th->th.th_new_place = place;
4792         s_count++;
4793 
4794         if ((s_count == S) && rem && (gap_ct == gap)) {
4795           // do nothing, add an extra thread to place on next iteration
4796         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4797           // we added an extra thread to this place; move on to next place
4798           if (place == last_place) {
4799             place = first_place;
4800           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4801             place = 0;
4802           } else {
4803             place++;
4804           }
4805           s_count = 0;
4806           gap_ct = 1;
4807           rem--;
4808         } else if (s_count == S) { // place is full; don't add extra thread
4809           if (place == last_place) {
4810             place = first_place;
4811           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4812             place = 0;
4813           } else {
4814             place++;
4815           }
4816           gap_ct++;
4817           s_count = 0;
4818         }
4819 
4820         KA_TRACE(100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4821                        "partition = [%d,%d]\n",
4822                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4823                        team->t.t_id, f, th->th.th_new_place,
4824                        th->th.th_first_place, th->th.th_last_place));
4825       }
4826       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4827     }
4828   } break;
4829 
4830   default:
4831     break;
4832   }
4833 
4834   KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
4835 }
4836 
4837 #endif /* OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED */
4838 
4839 /* allocate a new team data structure to use.  take one off of the free pool if
4840    available */
4841 kmp_team_t *
4842 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
4843 #if OMPT_SUPPORT
4844                     ompt_data_t ompt_parallel_data,
4845 #endif
4846 #if OMP_40_ENABLED
4847                     kmp_proc_bind_t new_proc_bind,
4848 #endif
4849                     kmp_internal_control_t *new_icvs,
4850                     int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
4851   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
4852   int f;
4853   kmp_team_t *team;
4854   int use_hot_team = !root->r.r_active;
4855   int level = 0;
4856 
4857   KA_TRACE(20, ("__kmp_allocate_team: called\n"));
4858   KMP_DEBUG_ASSERT(new_nproc >= 1 && argc >= 0);
4859   KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
4860   KMP_MB();
4861 
4862 #if KMP_NESTED_HOT_TEAMS
4863   kmp_hot_team_ptr_t *hot_teams;
4864   if (master) {
4865     team = master->th.th_team;
4866     level = team->t.t_active_level;
4867     if (master->th.th_teams_microtask) { // in teams construct?
4868       if (master->th.th_teams_size.nteams > 1 &&
4869           ( // #teams > 1
4870               team->t.t_pkfn ==
4871                   (microtask_t)__kmp_teams_master || // inner fork of the teams
4872               master->th.th_teams_level <
4873                   team->t.t_level)) { // or nested parallel inside the teams
4874         ++level; // not increment if #teams==1, or for outer fork of the teams;
4875         // increment otherwise
4876       }
4877     }
4878     hot_teams = master->th.th_hot_teams;
4879     if (level < __kmp_hot_teams_max_level && hot_teams &&
4880         hot_teams[level]
4881             .hot_team) { // hot team has already been allocated for given level
4882       use_hot_team = 1;
4883     } else {
4884       use_hot_team = 0;
4885     }
4886   }
4887 #endif
4888   // Optimization to use a "hot" team
4889   if (use_hot_team && new_nproc > 1) {
4890     KMP_DEBUG_ASSERT(new_nproc == max_nproc);
4891 #if KMP_NESTED_HOT_TEAMS
4892     team = hot_teams[level].hot_team;
4893 #else
4894     team = root->r.r_hot_team;
4895 #endif
4896 #if KMP_DEBUG
4897     if (__kmp_tasking_mode != tskm_immediate_exec) {
4898       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
4899                     "task_team[1] = %p before reinit\n",
4900                     team->t.t_task_team[0], team->t.t_task_team[1]));
4901     }
4902 #endif
4903 
4904     // Has the number of threads changed?
4905     /* Let's assume the most common case is that the number of threads is
4906        unchanged, and put that case first. */
4907     if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
4908       KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
4909       // This case can mean that omp_set_num_threads() was called and the hot
4910       // team size was already reduced, so we check the special flag
4911       if (team->t.t_size_changed == -1) {
4912         team->t.t_size_changed = 1;
4913       } else {
4914         KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
4915       }
4916 
4917       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4918       kmp_r_sched_t new_sched = new_icvs->sched;
4919       // set master's schedule as new run-time schedule
4920       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
4921 
4922       __kmp_reinitialize_team(team, new_icvs,
4923                               root->r.r_uber_thread->th.th_ident);
4924 
4925       KF_TRACE(10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
4926                     team->t.t_threads[0], team));
4927       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
4928 
4929 #if OMP_40_ENABLED
4930 #if KMP_AFFINITY_SUPPORTED
4931       if ((team->t.t_size_changed == 0) &&
4932           (team->t.t_proc_bind == new_proc_bind)) {
4933         if (new_proc_bind == proc_bind_spread) {
4934           __kmp_partition_places(
4935               team, 1); // add flag to update only master for spread
4936         }
4937         KA_TRACE(200, ("__kmp_allocate_team: reusing hot team #%d bindings: "
4938                        "proc_bind = %d, partition = [%d,%d]\n",
4939                        team->t.t_id, new_proc_bind, team->t.t_first_place,
4940                        team->t.t_last_place));
4941       } else {
4942         KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4943         __kmp_partition_places(team);
4944       }
4945 #else
4946       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4947 #endif /* KMP_AFFINITY_SUPPORTED */
4948 #endif /* OMP_40_ENABLED */
4949     } else if (team->t.t_nproc > new_nproc) {
4950       KA_TRACE(20,
4951                ("__kmp_allocate_team: decreasing hot team thread count to %d\n",
4952                 new_nproc));
4953 
4954       team->t.t_size_changed = 1;
4955 #if KMP_NESTED_HOT_TEAMS
4956       if (__kmp_hot_teams_mode == 0) {
4957         // AC: saved number of threads should correspond to team's value in this
4958         // mode, can be bigger in mode 1, when hot team has threads in reserve
4959         KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
4960         hot_teams[level].hot_team_nth = new_nproc;
4961 #endif // KMP_NESTED_HOT_TEAMS
4962         /* release the extra threads we don't need any more */
4963         for (f = new_nproc; f < team->t.t_nproc; f++) {
4964           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4965           if (__kmp_tasking_mode != tskm_immediate_exec) {
4966             // When decreasing team size, threads no longer in the team should
4967             // unref task team.
4968             team->t.t_threads[f]->th.th_task_team = NULL;
4969           }
4970           __kmp_free_thread(team->t.t_threads[f]);
4971           team->t.t_threads[f] = NULL;
4972         }
4973 #if KMP_NESTED_HOT_TEAMS
4974       } // (__kmp_hot_teams_mode == 0)
4975       else {
4976         // When keeping extra threads in team, switch threads to wait on own
4977         // b_go flag
4978         for (f = new_nproc; f < team->t.t_nproc; ++f) {
4979           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4980           kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
4981           for (int b = 0; b < bs_last_barrier; ++b) {
4982             if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG) {
4983               balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
4984             }
4985             KMP_CHECK_UPDATE(balign[b].bb.leaf_kids, 0);
4986           }
4987         }
4988       }
4989 #endif // KMP_NESTED_HOT_TEAMS
4990       team->t.t_nproc = new_nproc;
4991       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4992       KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
4993       __kmp_reinitialize_team(team, new_icvs,
4994                               root->r.r_uber_thread->th.th_ident);
4995 
4996       /* update the remaining threads */
4997       for (f = 0; f < new_nproc; ++f) {
4998         team->t.t_threads[f]->th.th_team_nproc = new_nproc;
4999       }
5000       // restore the current task state of the master thread: should be the
5001       // implicit task
5002       KF_TRACE(10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5003                     team->t.t_threads[0], team));
5004 
5005       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5006 
5007 #ifdef KMP_DEBUG
5008       for (f = 0; f < team->t.t_nproc; f++) {
5009         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5010                          team->t.t_threads[f]->th.th_team_nproc ==
5011                              team->t.t_nproc);
5012       }
5013 #endif
5014 
5015 #if OMP_40_ENABLED
5016       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5017 #if KMP_AFFINITY_SUPPORTED
5018       __kmp_partition_places(team);
5019 #endif
5020 #endif
5021     } else { // team->t.t_nproc < new_nproc
5022 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5023       kmp_affin_mask_t *old_mask;
5024       if (KMP_AFFINITY_CAPABLE()) {
5025         KMP_CPU_ALLOC(old_mask);
5026       }
5027 #endif
5028 
5029       KA_TRACE(20,
5030                ("__kmp_allocate_team: increasing hot team thread count to %d\n",
5031                 new_nproc));
5032 
5033       team->t.t_size_changed = 1;
5034 
5035 #if KMP_NESTED_HOT_TEAMS
5036       int avail_threads = hot_teams[level].hot_team_nth;
5037       if (new_nproc < avail_threads)
5038         avail_threads = new_nproc;
5039       kmp_info_t **other_threads = team->t.t_threads;
5040       for (f = team->t.t_nproc; f < avail_threads; ++f) {
5041         // Adjust barrier data of reserved threads (if any) of the team
5042         // Other data will be set in __kmp_initialize_info() below.
5043         int b;
5044         kmp_balign_t *balign = other_threads[f]->th.th_bar;
5045         for (b = 0; b < bs_last_barrier; ++b) {
5046           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5047           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5048 #if USE_DEBUGGER
5049           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5050 #endif
5051         }
5052       }
5053       if (hot_teams[level].hot_team_nth >= new_nproc) {
5054         // we have all needed threads in reserve, no need to allocate any
5055         // this only possible in mode 1, cannot have reserved threads in mode 0
5056         KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1);
5057         team->t.t_nproc = new_nproc; // just get reserved threads involved
5058       } else {
5059         // we may have some threads in reserve, but not enough
5060         team->t.t_nproc =
5061             hot_teams[level]
5062                 .hot_team_nth; // get reserved threads involved if any
5063         hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
5064 #endif // KMP_NESTED_HOT_TEAMS
5065         if (team->t.t_max_nproc < new_nproc) {
5066           /* reallocate larger arrays */
5067           __kmp_reallocate_team_arrays(team, new_nproc);
5068           __kmp_reinitialize_team(team, new_icvs, NULL);
5069         }
5070 
5071 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5072         /* Temporarily set full mask for master thread before creation of
5073            workers. The reason is that workers inherit the affinity from master,
5074            so if a lot of workers are created on the single core quickly, they
5075            don't get a chance to set their own affinity for a long time. */
5076         __kmp_set_thread_affinity_mask_full_tmp(old_mask);
5077 #endif
5078 
5079         /* allocate new threads for the hot team */
5080         for (f = team->t.t_nproc; f < new_nproc; f++) {
5081           kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5082           KMP_DEBUG_ASSERT(new_worker);
5083           team->t.t_threads[f] = new_worker;
5084 
5085           KA_TRACE(20,
5086                    ("__kmp_allocate_team: team %d init T#%d arrived: "
5087                     "join=%llu, plain=%llu\n",
5088                     team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5089                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5090                     team->t.t_bar[bs_plain_barrier].b_arrived));
5091 
5092           { // Initialize barrier data for new threads.
5093             int b;
5094             kmp_balign_t *balign = new_worker->th.th_bar;
5095             for (b = 0; b < bs_last_barrier; ++b) {
5096               balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5097               KMP_DEBUG_ASSERT(balign[b].bb.wait_flag !=
5098                                KMP_BARRIER_PARENT_FLAG);
5099 #if USE_DEBUGGER
5100               balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5101 #endif
5102             }
5103           }
5104         }
5105 
5106 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5107         if (KMP_AFFINITY_CAPABLE()) {
5108           /* Restore initial master thread's affinity mask */
5109           __kmp_set_system_affinity(old_mask, TRUE);
5110           KMP_CPU_FREE(old_mask);
5111         }
5112 #endif
5113 #if KMP_NESTED_HOT_TEAMS
5114       } // end of check of t_nproc vs. new_nproc vs. hot_team_nth
5115 #endif // KMP_NESTED_HOT_TEAMS
5116       /* make sure everyone is syncronized */
5117       int old_nproc = team->t.t_nproc; // save old value and use to update only
5118       // new threads below
5119       __kmp_initialize_team(team, new_nproc, new_icvs,
5120                             root->r.r_uber_thread->th.th_ident);
5121 
5122       /* reinitialize the threads */
5123       KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5124       for (f = 0; f < team->t.t_nproc; ++f)
5125         __kmp_initialize_info(team->t.t_threads[f], team, f,
5126                               __kmp_gtid_from_tid(f, team));
5127       if (level) { // set th_task_state for new threads in nested hot team
5128         // __kmp_initialize_info() no longer zeroes th_task_state, so we should
5129         // only need to set the th_task_state for the new threads. th_task_state
5130         // for master thread will not be accurate until after this in
5131         // __kmp_fork_call(), so we look to the master's memo_stack to get the
5132         // correct value.
5133         for (f = old_nproc; f < team->t.t_nproc; ++f)
5134           team->t.t_threads[f]->th.th_task_state =
5135               team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5136       } else { // set th_task_state for new threads in non-nested hot team
5137         int old_state =
5138             team->t.t_threads[0]->th.th_task_state; // copy master's state
5139         for (f = old_nproc; f < team->t.t_nproc; ++f)
5140           team->t.t_threads[f]->th.th_task_state = old_state;
5141       }
5142 
5143 #ifdef KMP_DEBUG
5144       for (f = 0; f < team->t.t_nproc; ++f) {
5145         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5146                          team->t.t_threads[f]->th.th_team_nproc ==
5147                              team->t.t_nproc);
5148       }
5149 #endif
5150 
5151 #if OMP_40_ENABLED
5152       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5153 #if KMP_AFFINITY_SUPPORTED
5154       __kmp_partition_places(team);
5155 #endif
5156 #endif
5157     } // Check changes in number of threads
5158 
5159 #if OMP_40_ENABLED
5160     kmp_info_t *master = team->t.t_threads[0];
5161     if (master->th.th_teams_microtask) {
5162       for (f = 1; f < new_nproc; ++f) {
5163         // propagate teams construct specific info to workers
5164         kmp_info_t *thr = team->t.t_threads[f];
5165         thr->th.th_teams_microtask = master->th.th_teams_microtask;
5166         thr->th.th_teams_level = master->th.th_teams_level;
5167         thr->th.th_teams_size = master->th.th_teams_size;
5168       }
5169     }
5170 #endif /* OMP_40_ENABLED */
5171 #if KMP_NESTED_HOT_TEAMS
5172     if (level) {
5173       // Sync barrier state for nested hot teams, not needed for outermost hot
5174       // team.
5175       for (f = 1; f < new_nproc; ++f) {
5176         kmp_info_t *thr = team->t.t_threads[f];
5177         int b;
5178         kmp_balign_t *balign = thr->th.th_bar;
5179         for (b = 0; b < bs_last_barrier; ++b) {
5180           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5181           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5182 #if USE_DEBUGGER
5183           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5184 #endif
5185         }
5186       }
5187     }
5188 #endif // KMP_NESTED_HOT_TEAMS
5189 
5190     /* reallocate space for arguments if necessary */
5191     __kmp_alloc_argv_entries(argc, team, TRUE);
5192     KMP_CHECK_UPDATE(team->t.t_argc, argc);
5193     // The hot team re-uses the previous task team,
5194     // if untouched during the previous release->gather phase.
5195 
5196     KF_TRACE(10, (" hot_team = %p\n", team));
5197 
5198 #if KMP_DEBUG
5199     if (__kmp_tasking_mode != tskm_immediate_exec) {
5200       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5201                     "task_team[1] = %p after reinit\n",
5202                     team->t.t_task_team[0], team->t.t_task_team[1]));
5203     }
5204 #endif
5205 
5206 #if OMPT_SUPPORT
5207     __ompt_team_assign_id(team, ompt_parallel_data);
5208 #endif
5209 
5210     KMP_MB();
5211 
5212     return team;
5213   }
5214 
5215   /* next, let's try to take one from the team pool */
5216   KMP_MB();
5217   for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5218     /* TODO: consider resizing undersized teams instead of reaping them, now
5219        that we have a resizing mechanism */
5220     if (team->t.t_max_nproc >= max_nproc) {
5221       /* take this team from the team pool */
5222       __kmp_team_pool = team->t.t_next_pool;
5223 
5224       /* setup the team for fresh use */
5225       __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5226 
5227       KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and "
5228                     "task_team[1] %p to NULL\n",
5229                     &team->t.t_task_team[0], &team->t.t_task_team[1]));
5230       team->t.t_task_team[0] = NULL;
5231       team->t.t_task_team[1] = NULL;
5232 
5233       /* reallocate space for arguments if necessary */
5234       __kmp_alloc_argv_entries(argc, team, TRUE);
5235       KMP_CHECK_UPDATE(team->t.t_argc, argc);
5236 
5237       KA_TRACE(
5238           20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5239                team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5240       { // Initialize barrier data.
5241         int b;
5242         for (b = 0; b < bs_last_barrier; ++b) {
5243           team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5244 #if USE_DEBUGGER
5245           team->t.t_bar[b].b_master_arrived = 0;
5246           team->t.t_bar[b].b_team_arrived = 0;
5247 #endif
5248         }
5249       }
5250 
5251 #if OMP_40_ENABLED
5252       team->t.t_proc_bind = new_proc_bind;
5253 #endif
5254 
5255       KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
5256                     team->t.t_id));
5257 
5258 #if OMPT_SUPPORT
5259       __ompt_team_assign_id(team, ompt_parallel_data);
5260 #endif
5261 
5262       KMP_MB();
5263 
5264       return team;
5265     }
5266 
5267     /* reap team if it is too small, then loop back and check the next one */
5268     // not sure if this is wise, but, will be redone during the hot-teams
5269     // rewrite.
5270     /* TODO: Use technique to find the right size hot-team, don't reap them */
5271     team = __kmp_reap_team(team);
5272     __kmp_team_pool = team;
5273   }
5274 
5275   /* nothing available in the pool, no matter, make a new team! */
5276   KMP_MB();
5277   team = (kmp_team_t *)__kmp_allocate(sizeof(kmp_team_t));
5278 
5279   /* and set it up */
5280   team->t.t_max_nproc = max_nproc;
5281   /* NOTE well, for some reason allocating one big buffer and dividing it up
5282      seems to really hurt performance a lot on the P4, so, let's not use this */
5283   __kmp_allocate_team_arrays(team, max_nproc);
5284 
5285   KA_TRACE(20, ("__kmp_allocate_team: making a new team\n"));
5286   __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5287 
5288   KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and task_team[1] "
5289                 "%p to NULL\n",
5290                 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5291   team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
5292   // memory, no need to duplicate
5293   team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
5294   // memory, no need to duplicate
5295 
5296   if (__kmp_storage_map) {
5297     __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
5298   }
5299 
5300   /* allocate space for arguments */
5301   __kmp_alloc_argv_entries(argc, team, FALSE);
5302   team->t.t_argc = argc;
5303 
5304   KA_TRACE(20,
5305            ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5306             team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5307   { // Initialize barrier data.
5308     int b;
5309     for (b = 0; b < bs_last_barrier; ++b) {
5310       team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5311 #if USE_DEBUGGER
5312       team->t.t_bar[b].b_master_arrived = 0;
5313       team->t.t_bar[b].b_team_arrived = 0;
5314 #endif
5315     }
5316   }
5317 
5318 #if OMP_40_ENABLED
5319   team->t.t_proc_bind = new_proc_bind;
5320 #endif
5321 
5322 #if OMPT_SUPPORT
5323   __ompt_team_assign_id(team, ompt_parallel_data);
5324   team->t.ompt_serialized_team_info = NULL;
5325 #endif
5326 
5327   KMP_MB();
5328 
5329   KA_TRACE(20, ("__kmp_allocate_team: done creating a new team %d.\n",
5330                 team->t.t_id));
5331 
5332   return team;
5333 }
5334 
5335 /* TODO implement hot-teams at all levels */
5336 /* TODO implement lazy thread release on demand (disband request) */
5337 
5338 /* free the team.  return it to the team pool.  release all the threads
5339  * associated with it */
5340 void __kmp_free_team(kmp_root_t *root,
5341                      kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5342   int f;
5343   KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5344                 team->t.t_id));
5345 
5346   /* verify state */
5347   KMP_DEBUG_ASSERT(root);
5348   KMP_DEBUG_ASSERT(team);
5349   KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5350   KMP_DEBUG_ASSERT(team->t.t_threads);
5351 
5352   int use_hot_team = team == root->r.r_hot_team;
5353 #if KMP_NESTED_HOT_TEAMS
5354   int level;
5355   kmp_hot_team_ptr_t *hot_teams;
5356   if (master) {
5357     level = team->t.t_active_level - 1;
5358     if (master->th.th_teams_microtask) { // in teams construct?
5359       if (master->th.th_teams_size.nteams > 1) {
5360         ++level; // level was not increased in teams construct for
5361         // team_of_masters
5362       }
5363       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5364           master->th.th_teams_level == team->t.t_level) {
5365         ++level; // level was not increased in teams construct for
5366         // team_of_workers before the parallel
5367       } // team->t.t_level will be increased inside parallel
5368     }
5369     hot_teams = master->th.th_hot_teams;
5370     if (level < __kmp_hot_teams_max_level) {
5371       KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5372       use_hot_team = 1;
5373     }
5374   }
5375 #endif // KMP_NESTED_HOT_TEAMS
5376 
5377   /* team is done working */
5378   TCW_SYNC_PTR(team->t.t_pkfn,
5379                NULL); // Important for Debugging Support Library.
5380 #if KMP_OS_WINDOWS
5381   team->t.t_copyin_counter = 0; // init counter for possible reuse
5382 #endif
5383   // Do not reset pointer to parent team to NULL for hot teams.
5384 
5385   /* if we are non-hot team, release our threads */
5386   if (!use_hot_team) {
5387     if (__kmp_tasking_mode != tskm_immediate_exec) {
5388       // Wait for threads to reach reapable state
5389       for (f = 1; f < team->t.t_nproc; ++f) {
5390         KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5391         kmp_info_t *th = team->t.t_threads[f];
5392         volatile kmp_uint32 *state = &th->th.th_reap_state;
5393         while (*state != KMP_SAFE_TO_REAP) {
5394 #if KMP_OS_WINDOWS
5395           // On Windows a thread can be killed at any time, check this
5396           DWORD ecode;
5397           if (!__kmp_is_thread_alive(th, &ecode)) {
5398             *state = KMP_SAFE_TO_REAP; // reset the flag for dead thread
5399             break;
5400           }
5401 #endif
5402           // first check if thread is sleeping
5403           kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
5404           if (fl.is_sleeping())
5405             fl.resume(__kmp_gtid_from_thread(th));
5406           KMP_CPU_PAUSE();
5407         }
5408       }
5409 
5410       // Delete task teams
5411       int tt_idx;
5412       for (tt_idx = 0; tt_idx < 2; ++tt_idx) {
5413         kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5414         if (task_team != NULL) {
5415           for (f = 0; f < team->t.t_nproc;
5416                ++f) { // Have all threads unref task teams
5417             team->t.t_threads[f]->th.th_task_team = NULL;
5418           }
5419           KA_TRACE(
5420               20,
5421               ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5422                __kmp_get_gtid(), task_team, team->t.t_id));
5423 #if KMP_NESTED_HOT_TEAMS
5424           __kmp_free_task_team(master, task_team);
5425 #endif
5426           team->t.t_task_team[tt_idx] = NULL;
5427         }
5428       }
5429     }
5430 
5431     // Reset pointer to parent team only for non-hot teams.
5432     team->t.t_parent = NULL;
5433     team->t.t_level = 0;
5434     team->t.t_active_level = 0;
5435 
5436     /* free the worker threads */
5437     for (f = 1; f < team->t.t_nproc; ++f) {
5438       KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5439       __kmp_free_thread(team->t.t_threads[f]);
5440       team->t.t_threads[f] = NULL;
5441     }
5442 
5443     /* put the team back in the team pool */
5444     /* TODO limit size of team pool, call reap_team if pool too large */
5445     team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5446     __kmp_team_pool = (volatile kmp_team_t *)team;
5447   }
5448 
5449   KMP_MB();
5450 }
5451 
5452 /* reap the team.  destroy it, reclaim all its resources and free its memory */
5453 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5454   kmp_team_t *next_pool = team->t.t_next_pool;
5455 
5456   KMP_DEBUG_ASSERT(team);
5457   KMP_DEBUG_ASSERT(team->t.t_dispatch);
5458   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5459   KMP_DEBUG_ASSERT(team->t.t_threads);
5460   KMP_DEBUG_ASSERT(team->t.t_argv);
5461 
5462   /* TODO clean the threads that are a part of this? */
5463 
5464   /* free stuff */
5465   __kmp_free_team_arrays(team);
5466   if (team->t.t_argv != &team->t.t_inline_argv[0])
5467     __kmp_free((void *)team->t.t_argv);
5468   __kmp_free(team);
5469 
5470   KMP_MB();
5471   return next_pool;
5472 }
5473 
5474 // Free the thread.  Don't reap it, just place it on the pool of available
5475 // threads.
5476 //
5477 // Changes for Quad issue 527845: We need a predictable OMP tid <-> gtid
5478 // binding for the affinity mechanism to be useful.
5479 //
5480 // Now, we always keep the free list (__kmp_thread_pool) sorted by gtid.
5481 // However, we want to avoid a potential performance problem by always
5482 // scanning through the list to find the correct point at which to insert
5483 // the thread (potential N**2 behavior).  To do this we keep track of the
5484 // last place a thread struct was inserted (__kmp_thread_pool_insert_pt).
5485 // With single-level parallelism, threads will always be added to the tail
5486 // of the list, kept track of by __kmp_thread_pool_insert_pt.  With nested
5487 // parallelism, all bets are off and we may need to scan through the entire
5488 // free list.
5489 //
5490 // This change also has a potentially large performance benefit, for some
5491 // applications.  Previously, as threads were freed from the hot team, they
5492 // would be placed back on the free list in inverse order.  If the hot team
5493 // grew back to it's original size, then the freed thread would be placed
5494 // back on the hot team in reverse order.  This could cause bad cache
5495 // locality problems on programs where the size of the hot team regularly
5496 // grew and shrunk.
5497 //
5498 // Now, for single-level parallelism, the OMP tid is alway == gtid.
5499 void __kmp_free_thread(kmp_info_t *this_th) {
5500   int gtid;
5501   kmp_info_t **scan;
5502   kmp_root_t *root = this_th->th.th_root;
5503 
5504   KA_TRACE(20, ("__kmp_free_thread: T#%d putting T#%d back on free pool.\n",
5505                 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid));
5506 
5507   KMP_DEBUG_ASSERT(this_th);
5508 
5509   // When moving thread to pool, switch thread to wait on own b_go flag, and
5510   // uninitialized (NULL team).
5511   int b;
5512   kmp_balign_t *balign = this_th->th.th_bar;
5513   for (b = 0; b < bs_last_barrier; ++b) {
5514     if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG)
5515       balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5516     balign[b].bb.team = NULL;
5517     balign[b].bb.leaf_kids = 0;
5518   }
5519   this_th->th.th_task_state = 0;
5520   this_th->th.th_reap_state = KMP_SAFE_TO_REAP;
5521 
5522   /* put thread back on the free pool */
5523   TCW_PTR(this_th->th.th_team, NULL);
5524   TCW_PTR(this_th->th.th_root, NULL);
5525   TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */
5526 
5527   /* If the implicit task assigned to this thread can be used by other threads
5528    * -> multiple threads can share the data and try to free the task at
5529    * __kmp_reap_thread at exit. This duplicate use of the task data can happen
5530    * with higher probability when hot team is disabled but can occurs even when
5531    * the hot team is enabled */
5532   __kmp_free_implicit_task(this_th);
5533   this_th->th.th_current_task = NULL;
5534 
5535   // If the __kmp_thread_pool_insert_pt is already past the new insert
5536   // point, then we need to re-scan the entire list.
5537   gtid = this_th->th.th_info.ds.ds_gtid;
5538   if (__kmp_thread_pool_insert_pt != NULL) {
5539     KMP_DEBUG_ASSERT(__kmp_thread_pool != NULL);
5540     if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
5541       __kmp_thread_pool_insert_pt = NULL;
5542     }
5543   }
5544 
5545   // Scan down the list to find the place to insert the thread.
5546   // scan is the address of a link in the list, possibly the address of
5547   // __kmp_thread_pool itself.
5548   //
5549   // In the absence of nested parallism, the for loop will have 0 iterations.
5550   if (__kmp_thread_pool_insert_pt != NULL) {
5551     scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
5552   } else {
5553     scan = CCAST(kmp_info_t **, &__kmp_thread_pool);
5554   }
5555   for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
5556        scan = &((*scan)->th.th_next_pool))
5557     ;
5558 
5559   // Insert the new element on the list, and set __kmp_thread_pool_insert_pt
5560   // to its address.
5561   TCW_PTR(this_th->th.th_next_pool, *scan);
5562   __kmp_thread_pool_insert_pt = *scan = this_th;
5563   KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) ||
5564                    (this_th->th.th_info.ds.ds_gtid <
5565                     this_th->th.th_next_pool->th.th_info.ds.ds_gtid));
5566   TCW_4(this_th->th.th_in_pool, TRUE);
5567   __kmp_thread_pool_nth++;
5568 
5569   TCW_4(__kmp_nth, __kmp_nth - 1);
5570   root->r.r_cg_nthreads--;
5571 
5572 #ifdef KMP_ADJUST_BLOCKTIME
5573   /* Adjust blocktime back to user setting or default if necessary */
5574   /* Middle initialization might never have occurred                */
5575   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5576     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5577     if (__kmp_nth <= __kmp_avail_proc) {
5578       __kmp_zero_bt = FALSE;
5579     }
5580   }
5581 #endif /* KMP_ADJUST_BLOCKTIME */
5582 
5583   KMP_MB();
5584 }
5585 
5586 /* ------------------------------------------------------------------------ */
5587 
5588 void *__kmp_launch_thread(kmp_info_t *this_thr) {
5589   int gtid = this_thr->th.th_info.ds.ds_gtid;
5590   /*    void                 *stack_data;*/
5591   kmp_team_t *(*volatile pteam);
5592 
5593   KMP_MB();
5594   KA_TRACE(10, ("__kmp_launch_thread: T#%d start\n", gtid));
5595 
5596   if (__kmp_env_consistency_check) {
5597     this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak?
5598   }
5599 
5600 #if OMPT_SUPPORT
5601   ompt_data_t *thread_data;
5602   if (ompt_enabled.enabled) {
5603     thread_data = &(this_thr->th.ompt_thread_info.thread_data);
5604     *thread_data = ompt_data_none;
5605 
5606     this_thr->th.ompt_thread_info.state = omp_state_overhead;
5607     this_thr->th.ompt_thread_info.wait_id = 0;
5608     this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0);
5609     if (ompt_enabled.ompt_callback_thread_begin) {
5610       ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
5611           ompt_thread_worker, thread_data);
5612     }
5613   }
5614 #endif
5615 
5616 #if OMPT_SUPPORT
5617   if (ompt_enabled.enabled) {
5618     this_thr->th.ompt_thread_info.state = omp_state_idle;
5619   }
5620 #endif
5621   /* This is the place where threads wait for work */
5622   while (!TCR_4(__kmp_global.g.g_done)) {
5623     KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]);
5624     KMP_MB();
5625 
5626     /* wait for work to do */
5627     KA_TRACE(20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid));
5628 
5629     /* No tid yet since not part of a team */
5630     __kmp_fork_barrier(gtid, KMP_GTID_DNE);
5631 
5632 #if OMPT_SUPPORT
5633     if (ompt_enabled.enabled) {
5634       this_thr->th.ompt_thread_info.state = omp_state_overhead;
5635     }
5636 #endif
5637 
5638     pteam = (kmp_team_t * (*))(&this_thr->th.th_team);
5639 
5640     /* have we been allocated? */
5641     if (TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done)) {
5642       /* we were just woken up, so run our new task */
5643       if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
5644         int rc;
5645         KA_TRACE(20,
5646                  ("__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n",
5647                   gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5648                   (*pteam)->t.t_pkfn));
5649 
5650         updateHWFPControl(*pteam);
5651 
5652 #if OMPT_SUPPORT
5653         if (ompt_enabled.enabled) {
5654           this_thr->th.ompt_thread_info.state = omp_state_work_parallel;
5655         }
5656 #endif
5657 
5658         rc = (*pteam)->t.t_invoke(gtid);
5659         KMP_ASSERT(rc);
5660 
5661         KMP_MB();
5662         KA_TRACE(20, ("__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n",
5663                       gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5664                       (*pteam)->t.t_pkfn));
5665       }
5666 #if OMPT_SUPPORT
5667       if (ompt_enabled.enabled) {
5668         /* no frame set while outside task */
5669         __ompt_get_task_info_object(0)->frame.exit_frame = NULL;
5670 
5671         this_thr->th.ompt_thread_info.state = omp_state_overhead;
5672       }
5673 #endif
5674       /* join barrier after parallel region */
5675       __kmp_join_barrier(gtid);
5676     }
5677   }
5678   TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
5679 
5680 #if OMPT_SUPPORT
5681   if (ompt_enabled.ompt_callback_thread_end) {
5682     ompt_callbacks.ompt_callback(ompt_callback_thread_end)(thread_data);
5683   }
5684 #endif
5685 
5686   this_thr->th.th_task_team = NULL;
5687   /* run the destructors for the threadprivate data for this thread */
5688   __kmp_common_destroy_gtid(gtid);
5689 
5690   KA_TRACE(10, ("__kmp_launch_thread: T#%d done\n", gtid));
5691   KMP_MB();
5692   return this_thr;
5693 }
5694 
5695 /* ------------------------------------------------------------------------ */
5696 
5697 void __kmp_internal_end_dest(void *specific_gtid) {
5698 #if KMP_COMPILER_ICC
5699 #pragma warning(push)
5700 #pragma warning(disable : 810) // conversion from "void *" to "int" may lose
5701 // significant bits
5702 #endif
5703   // Make sure no significant bits are lost
5704   int gtid = (kmp_intptr_t)specific_gtid - 1;
5705 #if KMP_COMPILER_ICC
5706 #pragma warning(pop)
5707 #endif
5708 
5709   KA_TRACE(30, ("__kmp_internal_end_dest: T#%d\n", gtid));
5710   /* NOTE: the gtid is stored as gitd+1 in the thread-local-storage
5711    * this is because 0 is reserved for the nothing-stored case */
5712 
5713   /* josh: One reason for setting the gtid specific data even when it is being
5714      destroyed by pthread is to allow gtid lookup through thread specific data
5715      (__kmp_gtid_get_specific).  Some of the code, especially stat code,
5716      that gets executed in the call to __kmp_internal_end_thread, actually
5717      gets the gtid through the thread specific data.  Setting it here seems
5718      rather inelegant and perhaps wrong, but allows __kmp_internal_end_thread
5719      to run smoothly.
5720      todo: get rid of this after we remove the dependence on
5721      __kmp_gtid_get_specific  */
5722   if (gtid >= 0 && KMP_UBER_GTID(gtid))
5723     __kmp_gtid_set_specific(gtid);
5724 #ifdef KMP_TDATA_GTID
5725   __kmp_gtid = gtid;
5726 #endif
5727   __kmp_internal_end_thread(gtid);
5728 }
5729 
5730 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
5731 
5732 // 2009-09-08 (lev): It looks the destructor does not work. In simple test cases
5733 // destructors work perfectly, but in real libomp.so I have no evidence it is
5734 // ever called. However, -fini linker option in makefile.mk works fine.
5735 
5736 __attribute__((destructor)) void __kmp_internal_end_dtor(void) {
5737   __kmp_internal_end_atexit();
5738 }
5739 
5740 void __kmp_internal_end_fini(void) { __kmp_internal_end_atexit(); }
5741 
5742 #endif
5743 
5744 /* [Windows] josh: when the atexit handler is called, there may still be more
5745    than one thread alive */
5746 void __kmp_internal_end_atexit(void) {
5747   KA_TRACE(30, ("__kmp_internal_end_atexit\n"));
5748   /* [Windows]
5749      josh: ideally, we want to completely shutdown the library in this atexit
5750      handler, but stat code that depends on thread specific data for gtid fails
5751      because that data becomes unavailable at some point during the shutdown, so
5752      we call __kmp_internal_end_thread instead. We should eventually remove the
5753      dependency on __kmp_get_specific_gtid in the stat code and use
5754      __kmp_internal_end_library to cleanly shutdown the library.
5755 
5756      // TODO: Can some of this comment about GVS be removed?
5757      I suspect that the offending stat code is executed when the calling thread
5758      tries to clean up a dead root thread's data structures, resulting in GVS
5759      code trying to close the GVS structures for that thread, but since the stat
5760      code uses __kmp_get_specific_gtid to get the gtid with the assumption that
5761      the calling thread is cleaning up itself instead of another thread, it get
5762      confused. This happens because allowing a thread to unregister and cleanup
5763      another thread is a recent modification for addressing an issue.
5764      Based on the current design (20050722), a thread may end up
5765      trying to unregister another thread only if thread death does not trigger
5766      the calling of __kmp_internal_end_thread.  For Linux* OS, there is the
5767      thread specific data destructor function to detect thread death. For
5768      Windows dynamic, there is DllMain(THREAD_DETACH). For Windows static, there
5769      is nothing.  Thus, the workaround is applicable only for Windows static
5770      stat library. */
5771   __kmp_internal_end_library(-1);
5772 #if KMP_OS_WINDOWS
5773   __kmp_close_console();
5774 #endif
5775 }
5776 
5777 static void __kmp_reap_thread(kmp_info_t *thread, int is_root) {
5778   // It is assumed __kmp_forkjoin_lock is acquired.
5779 
5780   int gtid;
5781 
5782   KMP_DEBUG_ASSERT(thread != NULL);
5783 
5784   gtid = thread->th.th_info.ds.ds_gtid;
5785 
5786   if (!is_root) {
5787 
5788     if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
5789       /* Assume the threads are at the fork barrier here */
5790       KA_TRACE(
5791           20, ("__kmp_reap_thread: releasing T#%d from fork barrier for reap\n",
5792                gtid));
5793       /* Need release fence here to prevent seg faults for tree forkjoin barrier
5794        * (GEH) */
5795       ANNOTATE_HAPPENS_BEFORE(thread);
5796       kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
5797       __kmp_release_64(&flag);
5798     }
5799 
5800     // Terminate OS thread.
5801     __kmp_reap_worker(thread);
5802 
5803     // The thread was killed asynchronously.  If it was actively
5804     // spinning in the thread pool, decrement the global count.
5805     //
5806     // There is a small timing hole here - if the worker thread was just waking
5807     // up after sleeping in the pool, had reset it's th_active_in_pool flag but
5808     // not decremented the global counter __kmp_thread_pool_active_nth yet, then
5809     // the global counter might not get updated.
5810     //
5811     // Currently, this can only happen as the library is unloaded,
5812     // so there are no harmful side effects.
5813     if (thread->th.th_active_in_pool) {
5814       thread->th.th_active_in_pool = FALSE;
5815       KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
5816       KMP_DEBUG_ASSERT(__kmp_thread_pool_active_nth >= 0);
5817     }
5818 
5819     // Decrement # of [worker] threads in the pool.
5820     KMP_DEBUG_ASSERT(__kmp_thread_pool_nth > 0);
5821     --__kmp_thread_pool_nth;
5822   }
5823 
5824   __kmp_free_implicit_task(thread);
5825 
5826 // Free the fast memory for tasking
5827 #if USE_FAST_MEMORY
5828   __kmp_free_fast_memory(thread);
5829 #endif /* USE_FAST_MEMORY */
5830 
5831   __kmp_suspend_uninitialize_thread(thread);
5832 
5833   KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread);
5834   TCW_SYNC_PTR(__kmp_threads[gtid], NULL);
5835 
5836   --__kmp_all_nth;
5837 // __kmp_nth was decremented when thread is added to the pool.
5838 
5839 #ifdef KMP_ADJUST_BLOCKTIME
5840   /* Adjust blocktime back to user setting or default if necessary */
5841   /* Middle initialization might never have occurred                */
5842   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5843     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5844     if (__kmp_nth <= __kmp_avail_proc) {
5845       __kmp_zero_bt = FALSE;
5846     }
5847   }
5848 #endif /* KMP_ADJUST_BLOCKTIME */
5849 
5850   /* free the memory being used */
5851   if (__kmp_env_consistency_check) {
5852     if (thread->th.th_cons) {
5853       __kmp_free_cons_stack(thread->th.th_cons);
5854       thread->th.th_cons = NULL;
5855     }
5856   }
5857 
5858   if (thread->th.th_pri_common != NULL) {
5859     __kmp_free(thread->th.th_pri_common);
5860     thread->th.th_pri_common = NULL;
5861   }
5862 
5863   if (thread->th.th_task_state_memo_stack != NULL) {
5864     __kmp_free(thread->th.th_task_state_memo_stack);
5865     thread->th.th_task_state_memo_stack = NULL;
5866   }
5867 
5868 #if KMP_USE_BGET
5869   if (thread->th.th_local.bget_data != NULL) {
5870     __kmp_finalize_bget(thread);
5871   }
5872 #endif
5873 
5874 #if KMP_AFFINITY_SUPPORTED
5875   if (thread->th.th_affin_mask != NULL) {
5876     KMP_CPU_FREE(thread->th.th_affin_mask);
5877     thread->th.th_affin_mask = NULL;
5878   }
5879 #endif /* KMP_AFFINITY_SUPPORTED */
5880 
5881 #if KMP_USE_HIER_SCHED
5882   if (thread->th.th_hier_bar_data != NULL) {
5883     __kmp_free(thread->th.th_hier_bar_data);
5884     thread->th.th_hier_bar_data = NULL;
5885   }
5886 #endif
5887 
5888   __kmp_reap_team(thread->th.th_serial_team);
5889   thread->th.th_serial_team = NULL;
5890   __kmp_free(thread);
5891 
5892   KMP_MB();
5893 
5894 } // __kmp_reap_thread
5895 
5896 static void __kmp_internal_end(void) {
5897   int i;
5898 
5899   /* First, unregister the library */
5900   __kmp_unregister_library();
5901 
5902 #if KMP_OS_WINDOWS
5903   /* In Win static library, we can't tell when a root actually dies, so we
5904      reclaim the data structures for any root threads that have died but not
5905      unregistered themselves, in order to shut down cleanly.
5906      In Win dynamic library we also can't tell when a thread dies.  */
5907   __kmp_reclaim_dead_roots(); // AC: moved here to always clean resources of
5908 // dead roots
5909 #endif
5910 
5911   for (i = 0; i < __kmp_threads_capacity; i++)
5912     if (__kmp_root[i])
5913       if (__kmp_root[i]->r.r_active)
5914         break;
5915   KMP_MB(); /* Flush all pending memory write invalidates.  */
5916   TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
5917 
5918   if (i < __kmp_threads_capacity) {
5919 #if KMP_USE_MONITOR
5920     // 2009-09-08 (lev): Other alive roots found. Why do we kill the monitor??
5921     KMP_MB(); /* Flush all pending memory write invalidates.  */
5922 
5923     // Need to check that monitor was initialized before reaping it. If we are
5924     // called form __kmp_atfork_child (which sets __kmp_init_parallel = 0), then
5925     // __kmp_monitor will appear to contain valid data, but it is only valid in
5926     // the parent process, not the child.
5927     // New behavior (201008): instead of keying off of the flag
5928     // __kmp_init_parallel, the monitor thread creation is keyed off
5929     // of the new flag __kmp_init_monitor.
5930     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
5931     if (TCR_4(__kmp_init_monitor)) {
5932       __kmp_reap_monitor(&__kmp_monitor);
5933       TCW_4(__kmp_init_monitor, 0);
5934     }
5935     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
5936     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
5937 #endif // KMP_USE_MONITOR
5938   } else {
5939 /* TODO move this to cleanup code */
5940 #ifdef KMP_DEBUG
5941     /* make sure that everything has properly ended */
5942     for (i = 0; i < __kmp_threads_capacity; i++) {
5943       if (__kmp_root[i]) {
5944         //                    KMP_ASSERT( ! KMP_UBER_GTID( i ) );         // AC:
5945         //                    there can be uber threads alive here
5946         KMP_ASSERT(!__kmp_root[i]->r.r_active); // TODO: can they be active?
5947       }
5948     }
5949 #endif
5950 
5951     KMP_MB();
5952 
5953     // Reap the worker threads.
5954     // This is valid for now, but be careful if threads are reaped sooner.
5955     while (__kmp_thread_pool != NULL) { // Loop thru all the thread in the pool.
5956       // Get the next thread from the pool.
5957       kmp_info_t *thread = CCAST(kmp_info_t *, __kmp_thread_pool);
5958       __kmp_thread_pool = thread->th.th_next_pool;
5959       // Reap it.
5960       KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP);
5961       thread->th.th_next_pool = NULL;
5962       thread->th.th_in_pool = FALSE;
5963       __kmp_reap_thread(thread, 0);
5964     }
5965     __kmp_thread_pool_insert_pt = NULL;
5966 
5967     // Reap teams.
5968     while (__kmp_team_pool != NULL) { // Loop thru all the teams in the pool.
5969       // Get the next team from the pool.
5970       kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
5971       __kmp_team_pool = team->t.t_next_pool;
5972       // Reap it.
5973       team->t.t_next_pool = NULL;
5974       __kmp_reap_team(team);
5975     }
5976 
5977     __kmp_reap_task_teams();
5978 
5979 #if KMP_OS_UNIX
5980     // Threads that are not reaped should not access any resources since they
5981     // are going to be deallocated soon, so the shutdown sequence should wait
5982     // until all threads either exit the final spin-waiting loop or begin
5983     // sleeping after the given blocktime.
5984     for (i = 0; i < __kmp_threads_capacity; i++) {
5985       kmp_info_t *thr = __kmp_threads[i];
5986       while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking))
5987         KMP_CPU_PAUSE();
5988     }
5989 #endif
5990 
5991     for (i = 0; i < __kmp_threads_capacity; ++i) {
5992       // TBD: Add some checking...
5993       // Something like KMP_DEBUG_ASSERT( __kmp_thread[ i ] == NULL );
5994     }
5995 
5996     /* Make sure all threadprivate destructors get run by joining with all
5997        worker threads before resetting this flag */
5998     TCW_SYNC_4(__kmp_init_common, FALSE);
5999 
6000     KA_TRACE(10, ("__kmp_internal_end: all workers reaped\n"));
6001     KMP_MB();
6002 
6003 #if KMP_USE_MONITOR
6004     // See note above: One of the possible fixes for CQ138434 / CQ140126
6005     //
6006     // FIXME: push both code fragments down and CSE them?
6007     // push them into __kmp_cleanup() ?
6008     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6009     if (TCR_4(__kmp_init_monitor)) {
6010       __kmp_reap_monitor(&__kmp_monitor);
6011       TCW_4(__kmp_init_monitor, 0);
6012     }
6013     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6014     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
6015 #endif
6016   } /* else !__kmp_global.t_active */
6017   TCW_4(__kmp_init_gtid, FALSE);
6018   KMP_MB(); /* Flush all pending memory write invalidates.  */
6019 
6020   __kmp_cleanup();
6021 #if OMPT_SUPPORT
6022   ompt_fini();
6023 #endif
6024 }
6025 
6026 void __kmp_internal_end_library(int gtid_req) {
6027   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6028   /* this shouldn't be a race condition because __kmp_internal_end() is the
6029      only place to clear __kmp_serial_init */
6030   /* we'll check this later too, after we get the lock */
6031   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6032   // redundaant, because the next check will work in any case.
6033   if (__kmp_global.g.g_abort) {
6034     KA_TRACE(11, ("__kmp_internal_end_library: abort, exiting\n"));
6035     /* TODO abort? */
6036     return;
6037   }
6038   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6039     KA_TRACE(10, ("__kmp_internal_end_library: already finished\n"));
6040     return;
6041   }
6042 
6043   KMP_MB(); /* Flush all pending memory write invalidates.  */
6044 
6045   /* find out who we are and what we should do */
6046   {
6047     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6048     KA_TRACE(
6049         10, ("__kmp_internal_end_library: enter T#%d  (%d)\n", gtid, gtid_req));
6050     if (gtid == KMP_GTID_SHUTDOWN) {
6051       KA_TRACE(10, ("__kmp_internal_end_library: !__kmp_init_runtime, system "
6052                     "already shutdown\n"));
6053       return;
6054     } else if (gtid == KMP_GTID_MONITOR) {
6055       KA_TRACE(10, ("__kmp_internal_end_library: monitor thread, gtid not "
6056                     "registered, or system shutdown\n"));
6057       return;
6058     } else if (gtid == KMP_GTID_DNE) {
6059       KA_TRACE(10, ("__kmp_internal_end_library: gtid not registered or system "
6060                     "shutdown\n"));
6061       /* we don't know who we are, but we may still shutdown the library */
6062     } else if (KMP_UBER_GTID(gtid)) {
6063       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6064       if (__kmp_root[gtid]->r.r_active) {
6065         __kmp_global.g.g_abort = -1;
6066         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6067         KA_TRACE(10,
6068                  ("__kmp_internal_end_library: root still active, abort T#%d\n",
6069                   gtid));
6070         return;
6071       } else {
6072         KA_TRACE(
6073             10,
6074             ("__kmp_internal_end_library: unregistering sibling T#%d\n", gtid));
6075         __kmp_unregister_root_current_thread(gtid);
6076       }
6077     } else {
6078 /* worker threads may call this function through the atexit handler, if they
6079  * call exit() */
6080 /* For now, skip the usual subsequent processing and just dump the debug buffer.
6081    TODO: do a thorough shutdown instead */
6082 #ifdef DUMP_DEBUG_ON_EXIT
6083       if (__kmp_debug_buf)
6084         __kmp_dump_debug_buffer();
6085 #endif
6086       return;
6087     }
6088   }
6089   /* synchronize the termination process */
6090   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6091 
6092   /* have we already finished */
6093   if (__kmp_global.g.g_abort) {
6094     KA_TRACE(10, ("__kmp_internal_end_library: abort, exiting\n"));
6095     /* TODO abort? */
6096     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6097     return;
6098   }
6099   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6100     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6101     return;
6102   }
6103 
6104   /* We need this lock to enforce mutex between this reading of
6105      __kmp_threads_capacity and the writing by __kmp_register_root.
6106      Alternatively, we can use a counter of roots that is atomically updated by
6107      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6108      __kmp_internal_end_*.  */
6109   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6110 
6111   /* now we can safely conduct the actual termination */
6112   __kmp_internal_end();
6113 
6114   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6115   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6116 
6117   KA_TRACE(10, ("__kmp_internal_end_library: exit\n"));
6118 
6119 #ifdef DUMP_DEBUG_ON_EXIT
6120   if (__kmp_debug_buf)
6121     __kmp_dump_debug_buffer();
6122 #endif
6123 
6124 #if KMP_OS_WINDOWS
6125   __kmp_close_console();
6126 #endif
6127 
6128   __kmp_fini_allocator();
6129 
6130 } // __kmp_internal_end_library
6131 
6132 void __kmp_internal_end_thread(int gtid_req) {
6133   int i;
6134 
6135   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6136   /* this shouldn't be a race condition because __kmp_internal_end() is the
6137    * only place to clear __kmp_serial_init */
6138   /* we'll check this later too, after we get the lock */
6139   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6140   // redundant, because the next check will work in any case.
6141   if (__kmp_global.g.g_abort) {
6142     KA_TRACE(11, ("__kmp_internal_end_thread: abort, exiting\n"));
6143     /* TODO abort? */
6144     return;
6145   }
6146   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6147     KA_TRACE(10, ("__kmp_internal_end_thread: already finished\n"));
6148     return;
6149   }
6150 
6151   KMP_MB(); /* Flush all pending memory write invalidates.  */
6152 
6153   /* find out who we are and what we should do */
6154   {
6155     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6156     KA_TRACE(10,
6157              ("__kmp_internal_end_thread: enter T#%d  (%d)\n", gtid, gtid_req));
6158     if (gtid == KMP_GTID_SHUTDOWN) {
6159       KA_TRACE(10, ("__kmp_internal_end_thread: !__kmp_init_runtime, system "
6160                     "already shutdown\n"));
6161       return;
6162     } else if (gtid == KMP_GTID_MONITOR) {
6163       KA_TRACE(10, ("__kmp_internal_end_thread: monitor thread, gtid not "
6164                     "registered, or system shutdown\n"));
6165       return;
6166     } else if (gtid == KMP_GTID_DNE) {
6167       KA_TRACE(10, ("__kmp_internal_end_thread: gtid not registered or system "
6168                     "shutdown\n"));
6169       return;
6170       /* we don't know who we are */
6171     } else if (KMP_UBER_GTID(gtid)) {
6172       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6173       if (__kmp_root[gtid]->r.r_active) {
6174         __kmp_global.g.g_abort = -1;
6175         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6176         KA_TRACE(10,
6177                  ("__kmp_internal_end_thread: root still active, abort T#%d\n",
6178                   gtid));
6179         return;
6180       } else {
6181         KA_TRACE(10, ("__kmp_internal_end_thread: unregistering sibling T#%d\n",
6182                       gtid));
6183         __kmp_unregister_root_current_thread(gtid);
6184       }
6185     } else {
6186       /* just a worker thread, let's leave */
6187       KA_TRACE(10, ("__kmp_internal_end_thread: worker thread T#%d\n", gtid));
6188 
6189       if (gtid >= 0) {
6190         __kmp_threads[gtid]->th.th_task_team = NULL;
6191       }
6192 
6193       KA_TRACE(10,
6194                ("__kmp_internal_end_thread: worker thread done, exiting T#%d\n",
6195                 gtid));
6196       return;
6197     }
6198   }
6199 #if KMP_DYNAMIC_LIB
6200   // AC: lets not shutdown the Linux* OS dynamic library at the exit of uber
6201   // thread, because we will better shutdown later in the library destructor.
6202   // The reason of this change is performance problem when non-openmp thread in
6203   // a loop forks and joins many openmp threads. We can save a lot of time
6204   // keeping worker threads alive until the program shutdown.
6205   // OM: Removed Linux* OS restriction to fix the crash on OS X* (DPD200239966)
6206   // and Windows(DPD200287443) that occurs when using critical sections from
6207   // foreign threads.
6208   KA_TRACE(10, ("__kmp_internal_end_thread: exiting T#%d\n", gtid_req));
6209   return;
6210 #endif
6211   /* synchronize the termination process */
6212   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6213 
6214   /* have we already finished */
6215   if (__kmp_global.g.g_abort) {
6216     KA_TRACE(10, ("__kmp_internal_end_thread: abort, exiting\n"));
6217     /* TODO abort? */
6218     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6219     return;
6220   }
6221   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6222     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6223     return;
6224   }
6225 
6226   /* We need this lock to enforce mutex between this reading of
6227      __kmp_threads_capacity and the writing by __kmp_register_root.
6228      Alternatively, we can use a counter of roots that is atomically updated by
6229      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6230      __kmp_internal_end_*.  */
6231 
6232   /* should we finish the run-time?  are all siblings done? */
6233   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6234 
6235   for (i = 0; i < __kmp_threads_capacity; ++i) {
6236     if (KMP_UBER_GTID(i)) {
6237       KA_TRACE(
6238           10,
6239           ("__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i));
6240       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6241       __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6242       return;
6243     }
6244   }
6245 
6246   /* now we can safely conduct the actual termination */
6247 
6248   __kmp_internal_end();
6249 
6250   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6251   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6252 
6253   KA_TRACE(10, ("__kmp_internal_end_thread: exit T#%d\n", gtid_req));
6254 
6255 #ifdef DUMP_DEBUG_ON_EXIT
6256   if (__kmp_debug_buf)
6257     __kmp_dump_debug_buffer();
6258 #endif
6259 } // __kmp_internal_end_thread
6260 
6261 // -----------------------------------------------------------------------------
6262 // Library registration stuff.
6263 
6264 static long __kmp_registration_flag = 0;
6265 // Random value used to indicate library initialization.
6266 static char *__kmp_registration_str = NULL;
6267 // Value to be saved in env var __KMP_REGISTERED_LIB_<pid>.
6268 
6269 static inline char *__kmp_reg_status_name() {
6270   /* On RHEL 3u5 if linked statically, getpid() returns different values in
6271      each thread. If registration and unregistration go in different threads
6272      (omp_misc_other_root_exit.cpp test case), the name of registered_lib_env
6273      env var can not be found, because the name will contain different pid. */
6274   return __kmp_str_format("__KMP_REGISTERED_LIB_%d", (int)getpid());
6275 } // __kmp_reg_status_get
6276 
6277 void __kmp_register_library_startup(void) {
6278 
6279   char *name = __kmp_reg_status_name(); // Name of the environment variable.
6280   int done = 0;
6281   union {
6282     double dtime;
6283     long ltime;
6284   } time;
6285 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6286   __kmp_initialize_system_tick();
6287 #endif
6288   __kmp_read_system_time(&time.dtime);
6289   __kmp_registration_flag = 0xCAFE0000L | (time.ltime & 0x0000FFFFL);
6290   __kmp_registration_str =
6291       __kmp_str_format("%p-%lx-%s", &__kmp_registration_flag,
6292                        __kmp_registration_flag, KMP_LIBRARY_FILE);
6293 
6294   KA_TRACE(50, ("__kmp_register_library_startup: %s=\"%s\"\n", name,
6295                 __kmp_registration_str));
6296 
6297   while (!done) {
6298 
6299     char *value = NULL; // Actual value of the environment variable.
6300 
6301     // Set environment variable, but do not overwrite if it is exist.
6302     __kmp_env_set(name, __kmp_registration_str, 0);
6303     // Check the variable is written.
6304     value = __kmp_env_get(name);
6305     if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6306 
6307       done = 1; // Ok, environment variable set successfully, exit the loop.
6308 
6309     } else {
6310 
6311       // Oops. Write failed. Another copy of OpenMP RTL is in memory.
6312       // Check whether it alive or dead.
6313       int neighbor = 0; // 0 -- unknown status, 1 -- alive, 2 -- dead.
6314       char *tail = value;
6315       char *flag_addr_str = NULL;
6316       char *flag_val_str = NULL;
6317       char const *file_name = NULL;
6318       __kmp_str_split(tail, '-', &flag_addr_str, &tail);
6319       __kmp_str_split(tail, '-', &flag_val_str, &tail);
6320       file_name = tail;
6321       if (tail != NULL) {
6322         long *flag_addr = 0;
6323         long flag_val = 0;
6324         KMP_SSCANF(flag_addr_str, "%p", RCAST(void**, &flag_addr));
6325         KMP_SSCANF(flag_val_str, "%lx", &flag_val);
6326         if (flag_addr != 0 && flag_val != 0 && strcmp(file_name, "") != 0) {
6327           // First, check whether environment-encoded address is mapped into
6328           // addr space.
6329           // If so, dereference it to see if it still has the right value.
6330           if (__kmp_is_address_mapped(flag_addr) && *flag_addr == flag_val) {
6331             neighbor = 1;
6332           } else {
6333             // If not, then we know the other copy of the library is no longer
6334             // running.
6335             neighbor = 2;
6336           }
6337         }
6338       }
6339       switch (neighbor) {
6340       case 0: // Cannot parse environment variable -- neighbor status unknown.
6341         // Assume it is the incompatible format of future version of the
6342         // library. Assume the other library is alive.
6343         // WARN( ... ); // TODO: Issue a warning.
6344         file_name = "unknown library";
6345       // Attention! Falling to the next case. That's intentional.
6346       case 1: { // Neighbor is alive.
6347         // Check it is allowed.
6348         char *duplicate_ok = __kmp_env_get("KMP_DUPLICATE_LIB_OK");
6349         if (!__kmp_str_match_true(duplicate_ok)) {
6350           // That's not allowed. Issue fatal error.
6351           __kmp_fatal(KMP_MSG(DuplicateLibrary, KMP_LIBRARY_FILE, file_name),
6352                       KMP_HNT(DuplicateLibrary), __kmp_msg_null);
6353         }
6354         KMP_INTERNAL_FREE(duplicate_ok);
6355         __kmp_duplicate_library_ok = 1;
6356         done = 1; // Exit the loop.
6357       } break;
6358       case 2: { // Neighbor is dead.
6359         // Clear the variable and try to register library again.
6360         __kmp_env_unset(name);
6361       } break;
6362       default: { KMP_DEBUG_ASSERT(0); } break;
6363       }
6364     }
6365     KMP_INTERNAL_FREE((void *)value);
6366   }
6367   KMP_INTERNAL_FREE((void *)name);
6368 
6369 } // func __kmp_register_library_startup
6370 
6371 void __kmp_unregister_library(void) {
6372 
6373   char *name = __kmp_reg_status_name();
6374   char *value = __kmp_env_get(name);
6375 
6376   KMP_DEBUG_ASSERT(__kmp_registration_flag != 0);
6377   KMP_DEBUG_ASSERT(__kmp_registration_str != NULL);
6378   if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6379     // Ok, this is our variable. Delete it.
6380     __kmp_env_unset(name);
6381   }
6382 
6383   KMP_INTERNAL_FREE(__kmp_registration_str);
6384   KMP_INTERNAL_FREE(value);
6385   KMP_INTERNAL_FREE(name);
6386 
6387   __kmp_registration_flag = 0;
6388   __kmp_registration_str = NULL;
6389 
6390 } // __kmp_unregister_library
6391 
6392 // End of Library registration stuff.
6393 // -----------------------------------------------------------------------------
6394 
6395 #if KMP_MIC_SUPPORTED
6396 
6397 static void __kmp_check_mic_type() {
6398   kmp_cpuid_t cpuid_state = {0};
6399   kmp_cpuid_t *cs_p = &cpuid_state;
6400   __kmp_x86_cpuid(1, 0, cs_p);
6401   // We don't support mic1 at the moment
6402   if ((cs_p->eax & 0xff0) == 0xB10) {
6403     __kmp_mic_type = mic2;
6404   } else if ((cs_p->eax & 0xf0ff0) == 0x50670) {
6405     __kmp_mic_type = mic3;
6406   } else {
6407     __kmp_mic_type = non_mic;
6408   }
6409 }
6410 
6411 #endif /* KMP_MIC_SUPPORTED */
6412 
6413 static void __kmp_do_serial_initialize(void) {
6414   int i, gtid;
6415   int size;
6416 
6417   KA_TRACE(10, ("__kmp_do_serial_initialize: enter\n"));
6418 
6419   KMP_DEBUG_ASSERT(sizeof(kmp_int32) == 4);
6420   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) == 4);
6421   KMP_DEBUG_ASSERT(sizeof(kmp_int64) == 8);
6422   KMP_DEBUG_ASSERT(sizeof(kmp_uint64) == 8);
6423   KMP_DEBUG_ASSERT(sizeof(kmp_intptr_t) == sizeof(void *));
6424 
6425 #if OMPT_SUPPORT
6426   ompt_pre_init();
6427 #endif
6428 
6429   __kmp_validate_locks();
6430 
6431   /* Initialize internal memory allocator */
6432   __kmp_init_allocator();
6433 
6434   /* Register the library startup via an environment variable and check to see
6435      whether another copy of the library is already registered. */
6436 
6437   __kmp_register_library_startup();
6438 
6439   /* TODO reinitialization of library */
6440   if (TCR_4(__kmp_global.g.g_done)) {
6441     KA_TRACE(10, ("__kmp_do_serial_initialize: reinitialization of library\n"));
6442   }
6443 
6444   __kmp_global.g.g_abort = 0;
6445   TCW_SYNC_4(__kmp_global.g.g_done, FALSE);
6446 
6447 /* initialize the locks */
6448 #if KMP_USE_ADAPTIVE_LOCKS
6449 #if KMP_DEBUG_ADAPTIVE_LOCKS
6450   __kmp_init_speculative_stats();
6451 #endif
6452 #endif
6453 #if KMP_STATS_ENABLED
6454   __kmp_stats_init();
6455 #endif
6456   __kmp_init_lock(&__kmp_global_lock);
6457   __kmp_init_queuing_lock(&__kmp_dispatch_lock);
6458   __kmp_init_lock(&__kmp_debug_lock);
6459   __kmp_init_atomic_lock(&__kmp_atomic_lock);
6460   __kmp_init_atomic_lock(&__kmp_atomic_lock_1i);
6461   __kmp_init_atomic_lock(&__kmp_atomic_lock_2i);
6462   __kmp_init_atomic_lock(&__kmp_atomic_lock_4i);
6463   __kmp_init_atomic_lock(&__kmp_atomic_lock_4r);
6464   __kmp_init_atomic_lock(&__kmp_atomic_lock_8i);
6465   __kmp_init_atomic_lock(&__kmp_atomic_lock_8r);
6466   __kmp_init_atomic_lock(&__kmp_atomic_lock_8c);
6467   __kmp_init_atomic_lock(&__kmp_atomic_lock_10r);
6468   __kmp_init_atomic_lock(&__kmp_atomic_lock_16r);
6469   __kmp_init_atomic_lock(&__kmp_atomic_lock_16c);
6470   __kmp_init_atomic_lock(&__kmp_atomic_lock_20c);
6471   __kmp_init_atomic_lock(&__kmp_atomic_lock_32c);
6472   __kmp_init_bootstrap_lock(&__kmp_forkjoin_lock);
6473   __kmp_init_bootstrap_lock(&__kmp_exit_lock);
6474 #if KMP_USE_MONITOR
6475   __kmp_init_bootstrap_lock(&__kmp_monitor_lock);
6476 #endif
6477   __kmp_init_bootstrap_lock(&__kmp_tp_cached_lock);
6478 
6479   /* conduct initialization and initial setup of configuration */
6480 
6481   __kmp_runtime_initialize();
6482 
6483 #if KMP_MIC_SUPPORTED
6484   __kmp_check_mic_type();
6485 #endif
6486 
6487 // Some global variable initialization moved here from kmp_env_initialize()
6488 #ifdef KMP_DEBUG
6489   kmp_diag = 0;
6490 #endif
6491   __kmp_abort_delay = 0;
6492 
6493   // From __kmp_init_dflt_team_nth()
6494   /* assume the entire machine will be used */
6495   __kmp_dflt_team_nth_ub = __kmp_xproc;
6496   if (__kmp_dflt_team_nth_ub < KMP_MIN_NTH) {
6497     __kmp_dflt_team_nth_ub = KMP_MIN_NTH;
6498   }
6499   if (__kmp_dflt_team_nth_ub > __kmp_sys_max_nth) {
6500     __kmp_dflt_team_nth_ub = __kmp_sys_max_nth;
6501   }
6502   __kmp_max_nth = __kmp_sys_max_nth;
6503   __kmp_cg_max_nth = __kmp_sys_max_nth;
6504   __kmp_teams_max_nth = __kmp_xproc; // set a "reasonable" default
6505   if (__kmp_teams_max_nth > __kmp_sys_max_nth) {
6506     __kmp_teams_max_nth = __kmp_sys_max_nth;
6507   }
6508 
6509   // Three vars below moved here from __kmp_env_initialize() "KMP_BLOCKTIME"
6510   // part
6511   __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
6512 #if KMP_USE_MONITOR
6513   __kmp_monitor_wakeups =
6514       KMP_WAKEUPS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6515   __kmp_bt_intervals =
6516       KMP_INTERVALS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6517 #endif
6518   // From "KMP_LIBRARY" part of __kmp_env_initialize()
6519   __kmp_library = library_throughput;
6520   // From KMP_SCHEDULE initialization
6521   __kmp_static = kmp_sch_static_balanced;
6522 // AC: do not use analytical here, because it is non-monotonous
6523 //__kmp_guided = kmp_sch_guided_iterative_chunked;
6524 //__kmp_auto = kmp_sch_guided_analytical_chunked; // AC: it is the default, no
6525 // need to repeat assignment
6526 // Barrier initialization. Moved here from __kmp_env_initialize() Barrier branch
6527 // bit control and barrier method control parts
6528 #if KMP_FAST_REDUCTION_BARRIER
6529 #define kmp_reduction_barrier_gather_bb ((int)1)
6530 #define kmp_reduction_barrier_release_bb ((int)1)
6531 #define kmp_reduction_barrier_gather_pat bp_hyper_bar
6532 #define kmp_reduction_barrier_release_pat bp_hyper_bar
6533 #endif // KMP_FAST_REDUCTION_BARRIER
6534   for (i = bs_plain_barrier; i < bs_last_barrier; i++) {
6535     __kmp_barrier_gather_branch_bits[i] = __kmp_barrier_gather_bb_dflt;
6536     __kmp_barrier_release_branch_bits[i] = __kmp_barrier_release_bb_dflt;
6537     __kmp_barrier_gather_pattern[i] = __kmp_barrier_gather_pat_dflt;
6538     __kmp_barrier_release_pattern[i] = __kmp_barrier_release_pat_dflt;
6539 #if KMP_FAST_REDUCTION_BARRIER
6540     if (i == bs_reduction_barrier) { // tested and confirmed on ALTIX only (
6541       // lin_64 ): hyper,1
6542       __kmp_barrier_gather_branch_bits[i] = kmp_reduction_barrier_gather_bb;
6543       __kmp_barrier_release_branch_bits[i] = kmp_reduction_barrier_release_bb;
6544       __kmp_barrier_gather_pattern[i] = kmp_reduction_barrier_gather_pat;
6545       __kmp_barrier_release_pattern[i] = kmp_reduction_barrier_release_pat;
6546     }
6547 #endif // KMP_FAST_REDUCTION_BARRIER
6548   }
6549 #if KMP_FAST_REDUCTION_BARRIER
6550 #undef kmp_reduction_barrier_release_pat
6551 #undef kmp_reduction_barrier_gather_pat
6552 #undef kmp_reduction_barrier_release_bb
6553 #undef kmp_reduction_barrier_gather_bb
6554 #endif // KMP_FAST_REDUCTION_BARRIER
6555 #if KMP_MIC_SUPPORTED
6556   if (__kmp_mic_type == mic2) { // KNC
6557     // AC: plane=3,2, forkjoin=2,1 are optimal for 240 threads on KNC
6558     __kmp_barrier_gather_branch_bits[bs_plain_barrier] = 3; // plain gather
6559     __kmp_barrier_release_branch_bits[bs_forkjoin_barrier] =
6560         1; // forkjoin release
6561     __kmp_barrier_gather_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6562     __kmp_barrier_release_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6563   }
6564 #if KMP_FAST_REDUCTION_BARRIER
6565   if (__kmp_mic_type == mic2) { // KNC
6566     __kmp_barrier_gather_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6567     __kmp_barrier_release_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6568   }
6569 #endif // KMP_FAST_REDUCTION_BARRIER
6570 #endif // KMP_MIC_SUPPORTED
6571 
6572 // From KMP_CHECKS initialization
6573 #ifdef KMP_DEBUG
6574   __kmp_env_checks = TRUE; /* development versions have the extra checks */
6575 #else
6576   __kmp_env_checks = FALSE; /* port versions do not have the extra checks */
6577 #endif
6578 
6579   // From "KMP_FOREIGN_THREADS_THREADPRIVATE" initialization
6580   __kmp_foreign_tp = TRUE;
6581 
6582   __kmp_global.g.g_dynamic = FALSE;
6583   __kmp_global.g.g_dynamic_mode = dynamic_default;
6584 
6585   __kmp_env_initialize(NULL);
6586 
6587 // Print all messages in message catalog for testing purposes.
6588 #ifdef KMP_DEBUG
6589   char const *val = __kmp_env_get("KMP_DUMP_CATALOG");
6590   if (__kmp_str_match_true(val)) {
6591     kmp_str_buf_t buffer;
6592     __kmp_str_buf_init(&buffer);
6593     __kmp_i18n_dump_catalog(&buffer);
6594     __kmp_printf("%s", buffer.str);
6595     __kmp_str_buf_free(&buffer);
6596   }
6597   __kmp_env_free(&val);
6598 #endif
6599 
6600   __kmp_threads_capacity =
6601       __kmp_initial_threads_capacity(__kmp_dflt_team_nth_ub);
6602   // Moved here from __kmp_env_initialize() "KMP_ALL_THREADPRIVATE" part
6603   __kmp_tp_capacity = __kmp_default_tp_capacity(
6604       __kmp_dflt_team_nth_ub, __kmp_max_nth, __kmp_allThreadsSpecified);
6605 
6606   // If the library is shut down properly, both pools must be NULL. Just in
6607   // case, set them to NULL -- some memory may leak, but subsequent code will
6608   // work even if pools are not freed.
6609   KMP_DEBUG_ASSERT(__kmp_thread_pool == NULL);
6610   KMP_DEBUG_ASSERT(__kmp_thread_pool_insert_pt == NULL);
6611   KMP_DEBUG_ASSERT(__kmp_team_pool == NULL);
6612   __kmp_thread_pool = NULL;
6613   __kmp_thread_pool_insert_pt = NULL;
6614   __kmp_team_pool = NULL;
6615 
6616   /* Allocate all of the variable sized records */
6617   /* NOTE: __kmp_threads_capacity entries are allocated, but the arrays are
6618    * expandable */
6619   /* Since allocation is cache-aligned, just add extra padding at the end */
6620   size =
6621       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * __kmp_threads_capacity +
6622       CACHE_LINE;
6623   __kmp_threads = (kmp_info_t **)__kmp_allocate(size);
6624   __kmp_root = (kmp_root_t **)((char *)__kmp_threads +
6625                                sizeof(kmp_info_t *) * __kmp_threads_capacity);
6626 
6627   /* init thread counts */
6628   KMP_DEBUG_ASSERT(__kmp_all_nth ==
6629                    0); // Asserts fail if the library is reinitializing and
6630   KMP_DEBUG_ASSERT(__kmp_nth == 0); // something was wrong in termination.
6631   __kmp_all_nth = 0;
6632   __kmp_nth = 0;
6633 
6634   /* setup the uber master thread and hierarchy */
6635   gtid = __kmp_register_root(TRUE);
6636   KA_TRACE(10, ("__kmp_do_serial_initialize  T#%d\n", gtid));
6637   KMP_ASSERT(KMP_UBER_GTID(gtid));
6638   KMP_ASSERT(KMP_INITIAL_GTID(gtid));
6639 
6640   KMP_MB(); /* Flush all pending memory write invalidates.  */
6641 
6642   __kmp_common_initialize();
6643 
6644 #if KMP_OS_UNIX
6645   /* invoke the child fork handler */
6646   __kmp_register_atfork();
6647 #endif
6648 
6649 #if !KMP_DYNAMIC_LIB
6650   {
6651     /* Invoke the exit handler when the program finishes, only for static
6652        library. For dynamic library, we already have _fini and DllMain. */
6653     int rc = atexit(__kmp_internal_end_atexit);
6654     if (rc != 0) {
6655       __kmp_fatal(KMP_MSG(FunctionError, "atexit()"), KMP_ERR(rc),
6656                   __kmp_msg_null);
6657     }
6658   }
6659 #endif
6660 
6661 #if KMP_HANDLE_SIGNALS
6662 #if KMP_OS_UNIX
6663   /* NOTE: make sure that this is called before the user installs their own
6664      signal handlers so that the user handlers are called first. this way they
6665      can return false, not call our handler, avoid terminating the library, and
6666      continue execution where they left off. */
6667   __kmp_install_signals(FALSE);
6668 #endif /* KMP_OS_UNIX */
6669 #if KMP_OS_WINDOWS
6670   __kmp_install_signals(TRUE);
6671 #endif /* KMP_OS_WINDOWS */
6672 #endif
6673 
6674   /* we have finished the serial initialization */
6675   __kmp_init_counter++;
6676 
6677   __kmp_init_serial = TRUE;
6678 
6679   if (__kmp_settings) {
6680     __kmp_env_print();
6681   }
6682 
6683 #if OMP_40_ENABLED
6684   if (__kmp_display_env || __kmp_display_env_verbose) {
6685     __kmp_env_print_2();
6686   }
6687 #endif // OMP_40_ENABLED
6688 
6689 #if OMPT_SUPPORT
6690   ompt_post_init();
6691 #endif
6692 
6693   KMP_MB();
6694 
6695   KA_TRACE(10, ("__kmp_do_serial_initialize: exit\n"));
6696 }
6697 
6698 void __kmp_serial_initialize(void) {
6699   if (__kmp_init_serial) {
6700     return;
6701   }
6702   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6703   if (__kmp_init_serial) {
6704     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6705     return;
6706   }
6707   __kmp_do_serial_initialize();
6708   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6709 }
6710 
6711 static void __kmp_do_middle_initialize(void) {
6712   int i, j;
6713   int prev_dflt_team_nth;
6714 
6715   if (!__kmp_init_serial) {
6716     __kmp_do_serial_initialize();
6717   }
6718 
6719   KA_TRACE(10, ("__kmp_middle_initialize: enter\n"));
6720 
6721   // Save the previous value for the __kmp_dflt_team_nth so that
6722   // we can avoid some reinitialization if it hasn't changed.
6723   prev_dflt_team_nth = __kmp_dflt_team_nth;
6724 
6725 #if KMP_AFFINITY_SUPPORTED
6726   // __kmp_affinity_initialize() will try to set __kmp_ncores to the
6727   // number of cores on the machine.
6728   __kmp_affinity_initialize();
6729 
6730   // Run through the __kmp_threads array and set the affinity mask
6731   // for each root thread that is currently registered with the RTL.
6732   for (i = 0; i < __kmp_threads_capacity; i++) {
6733     if (TCR_PTR(__kmp_threads[i]) != NULL) {
6734       __kmp_affinity_set_init_mask(i, TRUE);
6735     }
6736   }
6737 #endif /* KMP_AFFINITY_SUPPORTED */
6738 
6739   KMP_ASSERT(__kmp_xproc > 0);
6740   if (__kmp_avail_proc == 0) {
6741     __kmp_avail_proc = __kmp_xproc;
6742   }
6743 
6744   // If there were empty places in num_threads list (OMP_NUM_THREADS=,,2,3),
6745   // correct them now
6746   j = 0;
6747   while ((j < __kmp_nested_nth.used) && !__kmp_nested_nth.nth[j]) {
6748     __kmp_nested_nth.nth[j] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub =
6749         __kmp_avail_proc;
6750     j++;
6751   }
6752 
6753   if (__kmp_dflt_team_nth == 0) {
6754 #ifdef KMP_DFLT_NTH_CORES
6755     // Default #threads = #cores
6756     __kmp_dflt_team_nth = __kmp_ncores;
6757     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6758                   "__kmp_ncores (%d)\n",
6759                   __kmp_dflt_team_nth));
6760 #else
6761     // Default #threads = #available OS procs
6762     __kmp_dflt_team_nth = __kmp_avail_proc;
6763     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6764                   "__kmp_avail_proc(%d)\n",
6765                   __kmp_dflt_team_nth));
6766 #endif /* KMP_DFLT_NTH_CORES */
6767   }
6768 
6769   if (__kmp_dflt_team_nth < KMP_MIN_NTH) {
6770     __kmp_dflt_team_nth = KMP_MIN_NTH;
6771   }
6772   if (__kmp_dflt_team_nth > __kmp_sys_max_nth) {
6773     __kmp_dflt_team_nth = __kmp_sys_max_nth;
6774   }
6775 
6776   // There's no harm in continuing if the following check fails,
6777   // but it indicates an error in the previous logic.
6778   KMP_DEBUG_ASSERT(__kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub);
6779 
6780   if (__kmp_dflt_team_nth != prev_dflt_team_nth) {
6781     // Run through the __kmp_threads array and set the num threads icv for each
6782     // root thread that is currently registered with the RTL (which has not
6783     // already explicitly set its nthreads-var with a call to
6784     // omp_set_num_threads()).
6785     for (i = 0; i < __kmp_threads_capacity; i++) {
6786       kmp_info_t *thread = __kmp_threads[i];
6787       if (thread == NULL)
6788         continue;
6789       if (thread->th.th_current_task->td_icvs.nproc != 0)
6790         continue;
6791 
6792       set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
6793     }
6794   }
6795   KA_TRACE(
6796       20,
6797       ("__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n",
6798        __kmp_dflt_team_nth));
6799 
6800 #ifdef KMP_ADJUST_BLOCKTIME
6801   /* Adjust blocktime to zero if necessary  now that __kmp_avail_proc is set */
6802   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
6803     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
6804     if (__kmp_nth > __kmp_avail_proc) {
6805       __kmp_zero_bt = TRUE;
6806     }
6807   }
6808 #endif /* KMP_ADJUST_BLOCKTIME */
6809 
6810   /* we have finished middle initialization */
6811   TCW_SYNC_4(__kmp_init_middle, TRUE);
6812 
6813   KA_TRACE(10, ("__kmp_do_middle_initialize: exit\n"));
6814 }
6815 
6816 void __kmp_middle_initialize(void) {
6817   if (__kmp_init_middle) {
6818     return;
6819   }
6820   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6821   if (__kmp_init_middle) {
6822     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6823     return;
6824   }
6825   __kmp_do_middle_initialize();
6826   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6827 }
6828 
6829 void __kmp_parallel_initialize(void) {
6830   int gtid = __kmp_entry_gtid(); // this might be a new root
6831 
6832   /* synchronize parallel initialization (for sibling) */
6833   if (TCR_4(__kmp_init_parallel))
6834     return;
6835   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6836   if (TCR_4(__kmp_init_parallel)) {
6837     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6838     return;
6839   }
6840 
6841   /* TODO reinitialization after we have already shut down */
6842   if (TCR_4(__kmp_global.g.g_done)) {
6843     KA_TRACE(
6844         10,
6845         ("__kmp_parallel_initialize: attempt to init while shutting down\n"));
6846     __kmp_infinite_loop();
6847   }
6848 
6849   /* jc: The lock __kmp_initz_lock is already held, so calling
6850      __kmp_serial_initialize would cause a deadlock.  So we call
6851      __kmp_do_serial_initialize directly. */
6852   if (!__kmp_init_middle) {
6853     __kmp_do_middle_initialize();
6854   }
6855 
6856   /* begin initialization */
6857   KA_TRACE(10, ("__kmp_parallel_initialize: enter\n"));
6858   KMP_ASSERT(KMP_UBER_GTID(gtid));
6859 
6860 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6861   // Save the FP control regs.
6862   // Worker threads will set theirs to these values at thread startup.
6863   __kmp_store_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
6864   __kmp_store_mxcsr(&__kmp_init_mxcsr);
6865   __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK;
6866 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
6867 
6868 #if KMP_OS_UNIX
6869 #if KMP_HANDLE_SIGNALS
6870   /*  must be after __kmp_serial_initialize  */
6871   __kmp_install_signals(TRUE);
6872 #endif
6873 #endif
6874 
6875   __kmp_suspend_initialize();
6876 
6877 #if defined(USE_LOAD_BALANCE)
6878   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6879     __kmp_global.g.g_dynamic_mode = dynamic_load_balance;
6880   }
6881 #else
6882   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6883     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
6884   }
6885 #endif
6886 
6887   if (__kmp_version) {
6888     __kmp_print_version_2();
6889   }
6890 
6891   /* we have finished parallel initialization */
6892   TCW_SYNC_4(__kmp_init_parallel, TRUE);
6893 
6894   KMP_MB();
6895   KA_TRACE(10, ("__kmp_parallel_initialize: exit\n"));
6896 
6897   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6898 }
6899 
6900 /* ------------------------------------------------------------------------ */
6901 
6902 void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
6903                                    kmp_team_t *team) {
6904   kmp_disp_t *dispatch;
6905 
6906   KMP_MB();
6907 
6908   /* none of the threads have encountered any constructs, yet. */
6909   this_thr->th.th_local.this_construct = 0;
6910 #if KMP_CACHE_MANAGE
6911   KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived);
6912 #endif /* KMP_CACHE_MANAGE */
6913   dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
6914   KMP_DEBUG_ASSERT(dispatch);
6915   KMP_DEBUG_ASSERT(team->t.t_dispatch);
6916   // KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[
6917   // this_thr->th.th_info.ds.ds_tid ] );
6918 
6919   dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */
6920 #if OMP_45_ENABLED
6921   dispatch->th_doacross_buf_idx =
6922       0; /* reset the doacross dispatch buffer counter */
6923 #endif
6924   if (__kmp_env_consistency_check)
6925     __kmp_push_parallel(gtid, team->t.t_ident);
6926 
6927   KMP_MB(); /* Flush all pending memory write invalidates.  */
6928 }
6929 
6930 void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
6931                                   kmp_team_t *team) {
6932   if (__kmp_env_consistency_check)
6933     __kmp_pop_parallel(gtid, team->t.t_ident);
6934 
6935   __kmp_finish_implicit_task(this_thr);
6936 }
6937 
6938 int __kmp_invoke_task_func(int gtid) {
6939   int rc;
6940   int tid = __kmp_tid_from_gtid(gtid);
6941   kmp_info_t *this_thr = __kmp_threads[gtid];
6942   kmp_team_t *team = this_thr->th.th_team;
6943 
6944   __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
6945 #if USE_ITT_BUILD
6946   if (__itt_stack_caller_create_ptr) {
6947     __kmp_itt_stack_callee_enter(
6948         (__itt_caller)
6949             team->t.t_stack_id); // inform ittnotify about entering user's code
6950   }
6951 #endif /* USE_ITT_BUILD */
6952 #if INCLUDE_SSC_MARKS
6953   SSC_MARK_INVOKING();
6954 #endif
6955 
6956 #if OMPT_SUPPORT
6957   void *dummy;
6958   void **exit_runtime_p;
6959   ompt_data_t *my_task_data;
6960   ompt_data_t *my_parallel_data;
6961   int ompt_team_size;
6962 
6963   if (ompt_enabled.enabled) {
6964     exit_runtime_p = &(
6965         team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame);
6966   } else {
6967     exit_runtime_p = &dummy;
6968   }
6969 
6970   my_task_data =
6971       &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
6972   my_parallel_data = &(team->t.ompt_team_info.parallel_data);
6973   if (ompt_enabled.ompt_callback_implicit_task) {
6974     ompt_team_size = team->t.t_nproc;
6975     ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
6976         ompt_scope_begin, my_parallel_data, my_task_data, ompt_team_size,
6977         __kmp_tid_from_gtid(gtid));
6978     OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid);
6979   }
6980 #endif
6981 
6982   {
6983     KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
6984     KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
6985     rc =
6986         __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
6987                                tid, (int)team->t.t_argc, (void **)team->t.t_argv
6988 #if OMPT_SUPPORT
6989                                ,
6990                                exit_runtime_p
6991 #endif
6992                                );
6993 #if OMPT_SUPPORT
6994     *exit_runtime_p = NULL;
6995 #endif
6996   }
6997 
6998 #if USE_ITT_BUILD
6999   if (__itt_stack_caller_create_ptr) {
7000     __kmp_itt_stack_callee_leave(
7001         (__itt_caller)
7002             team->t.t_stack_id); // inform ittnotify about leaving user's code
7003   }
7004 #endif /* USE_ITT_BUILD */
7005   __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7006 
7007   return rc;
7008 }
7009 
7010 #if OMP_40_ENABLED
7011 void __kmp_teams_master(int gtid) {
7012   // This routine is called by all master threads in teams construct
7013   kmp_info_t *thr = __kmp_threads[gtid];
7014   kmp_team_t *team = thr->th.th_team;
7015   ident_t *loc = team->t.t_ident;
7016   thr->th.th_set_nproc = thr->th.th_teams_size.nth;
7017   KMP_DEBUG_ASSERT(thr->th.th_teams_microtask);
7018   KMP_DEBUG_ASSERT(thr->th.th_set_nproc);
7019   KA_TRACE(20, ("__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid,
7020                 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask));
7021 // Launch league of teams now, but not let workers execute
7022 // (they hang on fork barrier until next parallel)
7023 #if INCLUDE_SSC_MARKS
7024   SSC_MARK_FORKING();
7025 #endif
7026   __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7027                   (microtask_t)thr->th.th_teams_microtask, // "wrapped" task
7028                   VOLATILE_CAST(launch_t) __kmp_invoke_task_func, NULL);
7029 #if INCLUDE_SSC_MARKS
7030   SSC_MARK_JOINING();
7031 #endif
7032 
7033   // AC: last parameter "1" eliminates join barrier which won't work because
7034   // worker threads are in a fork barrier waiting for more parallel regions
7035   __kmp_join_call(loc, gtid
7036 #if OMPT_SUPPORT
7037                   ,
7038                   fork_context_intel
7039 #endif
7040                   ,
7041                   1);
7042 }
7043 
7044 int __kmp_invoke_teams_master(int gtid) {
7045   kmp_info_t *this_thr = __kmp_threads[gtid];
7046   kmp_team_t *team = this_thr->th.th_team;
7047 #if KMP_DEBUG
7048   if (!__kmp_threads[gtid]->th.th_team->t.t_serialized)
7049     KMP_DEBUG_ASSERT((void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn ==
7050                      (void *)__kmp_teams_master);
7051 #endif
7052   __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7053   __kmp_teams_master(gtid);
7054   __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7055   return 1;
7056 }
7057 #endif /* OMP_40_ENABLED */
7058 
7059 /* this sets the requested number of threads for the next parallel region
7060    encountered by this team. since this should be enclosed in the forkjoin
7061    critical section it should avoid race conditions with assymmetrical nested
7062    parallelism */
7063 
7064 void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) {
7065   kmp_info_t *thr = __kmp_threads[gtid];
7066 
7067   if (num_threads > 0)
7068     thr->th.th_set_nproc = num_threads;
7069 }
7070 
7071 #if OMP_40_ENABLED
7072 
7073 /* this sets the requested number of teams for the teams region and/or
7074    the number of threads for the next parallel region encountered  */
7075 void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams,
7076                           int num_threads) {
7077   kmp_info_t *thr = __kmp_threads[gtid];
7078   KMP_DEBUG_ASSERT(num_teams >= 0);
7079   KMP_DEBUG_ASSERT(num_threads >= 0);
7080 
7081   if (num_teams == 0)
7082     num_teams = 1; // default number of teams is 1.
7083   if (num_teams > __kmp_teams_max_nth) { // if too many teams requested?
7084     if (!__kmp_reserve_warn) {
7085       __kmp_reserve_warn = 1;
7086       __kmp_msg(kmp_ms_warning,
7087                 KMP_MSG(CantFormThrTeam, num_teams, __kmp_teams_max_nth),
7088                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7089     }
7090     num_teams = __kmp_teams_max_nth;
7091   }
7092   // Set number of teams (number of threads in the outer "parallel" of the
7093   // teams)
7094   thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
7095 
7096   // Remember the number of threads for inner parallel regions
7097   if (num_threads == 0) {
7098     if (!TCR_4(__kmp_init_middle))
7099       __kmp_middle_initialize(); // get __kmp_avail_proc calculated
7100     num_threads = __kmp_avail_proc / num_teams;
7101     if (num_teams * num_threads > __kmp_teams_max_nth) {
7102       // adjust num_threads w/o warning as it is not user setting
7103       num_threads = __kmp_teams_max_nth / num_teams;
7104     }
7105   } else {
7106     if (num_teams * num_threads > __kmp_teams_max_nth) {
7107       int new_threads = __kmp_teams_max_nth / num_teams;
7108       if (!__kmp_reserve_warn) { // user asked for too many threads
7109         __kmp_reserve_warn = 1; // that conflicts with KMP_TEAMS_THREAD_LIMIT
7110         __kmp_msg(kmp_ms_warning,
7111                   KMP_MSG(CantFormThrTeam, num_threads, new_threads),
7112                   KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7113       }
7114       num_threads = new_threads;
7115     }
7116   }
7117   thr->th.th_teams_size.nth = num_threads;
7118 }
7119 
7120 // Set the proc_bind var to use in the following parallel region.
7121 void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) {
7122   kmp_info_t *thr = __kmp_threads[gtid];
7123   thr->th.th_set_proc_bind = proc_bind;
7124 }
7125 
7126 #endif /* OMP_40_ENABLED */
7127 
7128 /* Launch the worker threads into the microtask. */
7129 
7130 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
7131   kmp_info_t *this_thr = __kmp_threads[gtid];
7132 
7133 #ifdef KMP_DEBUG
7134   int f;
7135 #endif /* KMP_DEBUG */
7136 
7137   KMP_DEBUG_ASSERT(team);
7138   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7139   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7140   KMP_MB(); /* Flush all pending memory write invalidates.  */
7141 
7142   team->t.t_construct = 0; /* no single directives seen yet */
7143   team->t.t_ordered.dt.t_value =
7144       0; /* thread 0 enters the ordered section first */
7145 
7146   /* Reset the identifiers on the dispatch buffer */
7147   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7148   if (team->t.t_max_nproc > 1) {
7149     int i;
7150     for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
7151       team->t.t_disp_buffer[i].buffer_index = i;
7152 #if OMP_45_ENABLED
7153       team->t.t_disp_buffer[i].doacross_buf_idx = i;
7154 #endif
7155     }
7156   } else {
7157     team->t.t_disp_buffer[0].buffer_index = 0;
7158 #if OMP_45_ENABLED
7159     team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7160 #endif
7161   }
7162 
7163   KMP_MB(); /* Flush all pending memory write invalidates.  */
7164   KMP_ASSERT(this_thr->th.th_team == team);
7165 
7166 #ifdef KMP_DEBUG
7167   for (f = 0; f < team->t.t_nproc; f++) {
7168     KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7169                      team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7170   }
7171 #endif /* KMP_DEBUG */
7172 
7173   /* release the worker threads so they may begin working */
7174   __kmp_fork_barrier(gtid, 0);
7175 }
7176 
7177 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) {
7178   kmp_info_t *this_thr = __kmp_threads[gtid];
7179 
7180   KMP_DEBUG_ASSERT(team);
7181   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7182   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7183   KMP_MB(); /* Flush all pending memory write invalidates.  */
7184 
7185 /* Join barrier after fork */
7186 
7187 #ifdef KMP_DEBUG
7188   if (__kmp_threads[gtid] &&
7189       __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7190     __kmp_printf("GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid,
7191                  __kmp_threads[gtid]);
7192     __kmp_printf("__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, "
7193                  "team->t.t_nproc=%d\n",
7194                  gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7195                  team->t.t_nproc);
7196     __kmp_print_structure();
7197   }
7198   KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
7199                    __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7200 #endif /* KMP_DEBUG */
7201 
7202   __kmp_join_barrier(gtid); /* wait for everyone */
7203 #if OMPT_SUPPORT
7204   if (ompt_enabled.enabled &&
7205       this_thr->th.ompt_thread_info.state == omp_state_wait_barrier_implicit) {
7206     int ds_tid = this_thr->th.th_info.ds.ds_tid;
7207     ompt_data_t *task_data = OMPT_CUR_TASK_DATA(this_thr);
7208     this_thr->th.ompt_thread_info.state = omp_state_overhead;
7209 #if OMPT_OPTIONAL
7210     void *codeptr = NULL;
7211     if (KMP_MASTER_TID(ds_tid) &&
7212         (ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait) ||
7213          ompt_callbacks.ompt_callback(ompt_callback_sync_region)))
7214       codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
7215 
7216     if (ompt_enabled.ompt_callback_sync_region_wait) {
7217       ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
7218           ompt_sync_region_barrier, ompt_scope_end, NULL, task_data, codeptr);
7219     }
7220     if (ompt_enabled.ompt_callback_sync_region) {
7221       ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
7222           ompt_sync_region_barrier, ompt_scope_end, NULL, task_data, codeptr);
7223     }
7224 #endif
7225     if (!KMP_MASTER_TID(ds_tid) && ompt_enabled.ompt_callback_implicit_task) {
7226       ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7227           ompt_scope_end, NULL, task_data, 0, ds_tid);
7228     }
7229   }
7230 #endif
7231 
7232   KMP_MB(); /* Flush all pending memory write invalidates.  */
7233   KMP_ASSERT(this_thr->th.th_team == team);
7234 }
7235 
7236 /* ------------------------------------------------------------------------ */
7237 
7238 #ifdef USE_LOAD_BALANCE
7239 
7240 // Return the worker threads actively spinning in the hot team, if we
7241 // are at the outermost level of parallelism.  Otherwise, return 0.
7242 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7243   int i;
7244   int retval;
7245   kmp_team_t *hot_team;
7246 
7247   if (root->r.r_active) {
7248     return 0;
7249   }
7250   hot_team = root->r.r_hot_team;
7251   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
7252     return hot_team->t.t_nproc - 1; // Don't count master thread
7253   }
7254 
7255   // Skip the master thread - it is accounted for elsewhere.
7256   retval = 0;
7257   for (i = 1; i < hot_team->t.t_nproc; i++) {
7258     if (hot_team->t.t_threads[i]->th.th_active) {
7259       retval++;
7260     }
7261   }
7262   return retval;
7263 }
7264 
7265 // Perform an automatic adjustment to the number of
7266 // threads used by the next parallel region.
7267 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc) {
7268   int retval;
7269   int pool_active;
7270   int hot_team_active;
7271   int team_curr_active;
7272   int system_active;
7273 
7274   KB_TRACE(20, ("__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7275                 set_nproc));
7276   KMP_DEBUG_ASSERT(root);
7277   KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7278                        ->th.th_current_task->td_icvs.dynamic == TRUE);
7279   KMP_DEBUG_ASSERT(set_nproc > 1);
7280 
7281   if (set_nproc == 1) {
7282     KB_TRACE(20, ("__kmp_load_balance_nproc: serial execution.\n"));
7283     return 1;
7284   }
7285 
7286   // Threads that are active in the thread pool, active in the hot team for this
7287   // particular root (if we are at the outer par level), and the currently
7288   // executing thread (to become the master) are available to add to the new
7289   // team, but are currently contributing to the system load, and must be
7290   // accounted for.
7291   pool_active = __kmp_thread_pool_active_nth;
7292   hot_team_active = __kmp_active_hot_team_nproc(root);
7293   team_curr_active = pool_active + hot_team_active + 1;
7294 
7295   // Check the system load.
7296   system_active = __kmp_get_load_balance(__kmp_avail_proc + team_curr_active);
7297   KB_TRACE(30, ("__kmp_load_balance_nproc: system active = %d pool active = %d "
7298                 "hot team active = %d\n",
7299                 system_active, pool_active, hot_team_active));
7300 
7301   if (system_active < 0) {
7302     // There was an error reading the necessary info from /proc, so use the
7303     // thread limit algorithm instead. Once we set __kmp_global.g.g_dynamic_mode
7304     // = dynamic_thread_limit, we shouldn't wind up getting back here.
7305     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7306     KMP_WARNING(CantLoadBalUsing, "KMP_DYNAMIC_MODE=thread limit");
7307 
7308     // Make this call behave like the thread limit algorithm.
7309     retval = __kmp_avail_proc - __kmp_nth +
7310              (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7311     if (retval > set_nproc) {
7312       retval = set_nproc;
7313     }
7314     if (retval < KMP_MIN_NTH) {
7315       retval = KMP_MIN_NTH;
7316     }
7317 
7318     KB_TRACE(20, ("__kmp_load_balance_nproc: thread limit exit. retval:%d\n",
7319                   retval));
7320     return retval;
7321   }
7322 
7323   // There is a slight delay in the load balance algorithm in detecting new
7324   // running procs. The real system load at this instant should be at least as
7325   // large as the #active omp thread that are available to add to the team.
7326   if (system_active < team_curr_active) {
7327     system_active = team_curr_active;
7328   }
7329   retval = __kmp_avail_proc - system_active + team_curr_active;
7330   if (retval > set_nproc) {
7331     retval = set_nproc;
7332   }
7333   if (retval < KMP_MIN_NTH) {
7334     retval = KMP_MIN_NTH;
7335   }
7336 
7337   KB_TRACE(20, ("__kmp_load_balance_nproc: exit. retval:%d\n", retval));
7338   return retval;
7339 } // __kmp_load_balance_nproc()
7340 
7341 #endif /* USE_LOAD_BALANCE */
7342 
7343 /* ------------------------------------------------------------------------ */
7344 
7345 /* NOTE: this is called with the __kmp_init_lock held */
7346 void __kmp_cleanup(void) {
7347   int f;
7348 
7349   KA_TRACE(10, ("__kmp_cleanup: enter\n"));
7350 
7351   if (TCR_4(__kmp_init_parallel)) {
7352 #if KMP_HANDLE_SIGNALS
7353     __kmp_remove_signals();
7354 #endif
7355     TCW_4(__kmp_init_parallel, FALSE);
7356   }
7357 
7358   if (TCR_4(__kmp_init_middle)) {
7359 #if KMP_AFFINITY_SUPPORTED
7360     __kmp_affinity_uninitialize();
7361 #endif /* KMP_AFFINITY_SUPPORTED */
7362     __kmp_cleanup_hierarchy();
7363     TCW_4(__kmp_init_middle, FALSE);
7364   }
7365 
7366   KA_TRACE(10, ("__kmp_cleanup: go serial cleanup\n"));
7367 
7368   if (__kmp_init_serial) {
7369     __kmp_runtime_destroy();
7370     __kmp_init_serial = FALSE;
7371   }
7372 
7373   __kmp_cleanup_threadprivate_caches();
7374 
7375   for (f = 0; f < __kmp_threads_capacity; f++) {
7376     if (__kmp_root[f] != NULL) {
7377       __kmp_free(__kmp_root[f]);
7378       __kmp_root[f] = NULL;
7379     }
7380   }
7381   __kmp_free(__kmp_threads);
7382   // __kmp_threads and __kmp_root were allocated at once, as single block, so
7383   // there is no need in freeing __kmp_root.
7384   __kmp_threads = NULL;
7385   __kmp_root = NULL;
7386   __kmp_threads_capacity = 0;
7387 
7388 #if KMP_USE_DYNAMIC_LOCK
7389   __kmp_cleanup_indirect_user_locks();
7390 #else
7391   __kmp_cleanup_user_locks();
7392 #endif
7393 
7394 #if KMP_AFFINITY_SUPPORTED
7395   KMP_INTERNAL_FREE(CCAST(char *, __kmp_cpuinfo_file));
7396   __kmp_cpuinfo_file = NULL;
7397 #endif /* KMP_AFFINITY_SUPPORTED */
7398 
7399 #if KMP_USE_ADAPTIVE_LOCKS
7400 #if KMP_DEBUG_ADAPTIVE_LOCKS
7401   __kmp_print_speculative_stats();
7402 #endif
7403 #endif
7404   KMP_INTERNAL_FREE(__kmp_nested_nth.nth);
7405   __kmp_nested_nth.nth = NULL;
7406   __kmp_nested_nth.size = 0;
7407   __kmp_nested_nth.used = 0;
7408   KMP_INTERNAL_FREE(__kmp_nested_proc_bind.bind_types);
7409   __kmp_nested_proc_bind.bind_types = NULL;
7410   __kmp_nested_proc_bind.size = 0;
7411   __kmp_nested_proc_bind.used = 0;
7412 
7413   __kmp_i18n_catclose();
7414 
7415 #if KMP_USE_HIER_SCHED
7416   __kmp_hier_scheds.deallocate();
7417 #endif
7418 
7419 #if KMP_STATS_ENABLED
7420   __kmp_stats_fini();
7421 #endif
7422 
7423   KA_TRACE(10, ("__kmp_cleanup: exit\n"));
7424 }
7425 
7426 /* ------------------------------------------------------------------------ */
7427 
7428 int __kmp_ignore_mppbeg(void) {
7429   char *env;
7430 
7431   if ((env = getenv("KMP_IGNORE_MPPBEG")) != NULL) {
7432     if (__kmp_str_match_false(env))
7433       return FALSE;
7434   }
7435   // By default __kmpc_begin() is no-op.
7436   return TRUE;
7437 }
7438 
7439 int __kmp_ignore_mppend(void) {
7440   char *env;
7441 
7442   if ((env = getenv("KMP_IGNORE_MPPEND")) != NULL) {
7443     if (__kmp_str_match_false(env))
7444       return FALSE;
7445   }
7446   // By default __kmpc_end() is no-op.
7447   return TRUE;
7448 }
7449 
7450 void __kmp_internal_begin(void) {
7451   int gtid;
7452   kmp_root_t *root;
7453 
7454   /* this is a very important step as it will register new sibling threads
7455      and assign these new uber threads a new gtid */
7456   gtid = __kmp_entry_gtid();
7457   root = __kmp_threads[gtid]->th.th_root;
7458   KMP_ASSERT(KMP_UBER_GTID(gtid));
7459 
7460   if (root->r.r_begin)
7461     return;
7462   __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7463   if (root->r.r_begin) {
7464     __kmp_release_lock(&root->r.r_begin_lock, gtid);
7465     return;
7466   }
7467 
7468   root->r.r_begin = TRUE;
7469 
7470   __kmp_release_lock(&root->r.r_begin_lock, gtid);
7471 }
7472 
7473 /* ------------------------------------------------------------------------ */
7474 
7475 void __kmp_user_set_library(enum library_type arg) {
7476   int gtid;
7477   kmp_root_t *root;
7478   kmp_info_t *thread;
7479 
7480   /* first, make sure we are initialized so we can get our gtid */
7481 
7482   gtid = __kmp_entry_gtid();
7483   thread = __kmp_threads[gtid];
7484 
7485   root = thread->th.th_root;
7486 
7487   KA_TRACE(20, ("__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg,
7488                 library_serial));
7489   if (root->r.r_in_parallel) { /* Must be called in serial section of top-level
7490                                   thread */
7491     KMP_WARNING(SetLibraryIncorrectCall);
7492     return;
7493   }
7494 
7495   switch (arg) {
7496   case library_serial:
7497     thread->th.th_set_nproc = 0;
7498     set__nproc(thread, 1);
7499     break;
7500   case library_turnaround:
7501     thread->th.th_set_nproc = 0;
7502     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7503                                            : __kmp_dflt_team_nth_ub);
7504     break;
7505   case library_throughput:
7506     thread->th.th_set_nproc = 0;
7507     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7508                                            : __kmp_dflt_team_nth_ub);
7509     break;
7510   default:
7511     KMP_FATAL(UnknownLibraryType, arg);
7512   }
7513 
7514   __kmp_aux_set_library(arg);
7515 }
7516 
7517 void __kmp_aux_set_stacksize(size_t arg) {
7518   if (!__kmp_init_serial)
7519     __kmp_serial_initialize();
7520 
7521 #if KMP_OS_DARWIN
7522   if (arg & (0x1000 - 1)) {
7523     arg &= ~(0x1000 - 1);
7524     if (arg + 0x1000) /* check for overflow if we round up */
7525       arg += 0x1000;
7526   }
7527 #endif
7528   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7529 
7530   /* only change the default stacksize before the first parallel region */
7531   if (!TCR_4(__kmp_init_parallel)) {
7532     size_t value = arg; /* argument is in bytes */
7533 
7534     if (value < __kmp_sys_min_stksize)
7535       value = __kmp_sys_min_stksize;
7536     else if (value > KMP_MAX_STKSIZE)
7537       value = KMP_MAX_STKSIZE;
7538 
7539     __kmp_stksize = value;
7540 
7541     __kmp_env_stksize = TRUE; /* was KMP_STACKSIZE specified? */
7542   }
7543 
7544   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7545 }
7546 
7547 /* set the behaviour of the runtime library */
7548 /* TODO this can cause some odd behaviour with sibling parallelism... */
7549 void __kmp_aux_set_library(enum library_type arg) {
7550   __kmp_library = arg;
7551 
7552   switch (__kmp_library) {
7553   case library_serial: {
7554     KMP_INFORM(LibraryIsSerial);
7555     (void)__kmp_change_library(TRUE);
7556   } break;
7557   case library_turnaround:
7558     (void)__kmp_change_library(TRUE);
7559     break;
7560   case library_throughput:
7561     (void)__kmp_change_library(FALSE);
7562     break;
7563   default:
7564     KMP_FATAL(UnknownLibraryType, arg);
7565   }
7566 }
7567 
7568 /* ------------------------------------------------------------------------ */
7569 
7570 void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid) {
7571   int blocktime = arg; /* argument is in milliseconds */
7572 #if KMP_USE_MONITOR
7573   int bt_intervals;
7574 #endif
7575   int bt_set;
7576 
7577   __kmp_save_internal_controls(thread);
7578 
7579   /* Normalize and set blocktime for the teams */
7580   if (blocktime < KMP_MIN_BLOCKTIME)
7581     blocktime = KMP_MIN_BLOCKTIME;
7582   else if (blocktime > KMP_MAX_BLOCKTIME)
7583     blocktime = KMP_MAX_BLOCKTIME;
7584 
7585   set__blocktime_team(thread->th.th_team, tid, blocktime);
7586   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
7587 
7588 #if KMP_USE_MONITOR
7589   /* Calculate and set blocktime intervals for the teams */
7590   bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
7591 
7592   set__bt_intervals_team(thread->th.th_team, tid, bt_intervals);
7593   set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals);
7594 #endif
7595 
7596   /* Set whether blocktime has been set to "TRUE" */
7597   bt_set = TRUE;
7598 
7599   set__bt_set_team(thread->th.th_team, tid, bt_set);
7600   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
7601 #if KMP_USE_MONITOR
7602   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, "
7603                 "bt_intervals=%d, monitor_updates=%d\n",
7604                 __kmp_gtid_from_tid(tid, thread->th.th_team),
7605                 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals,
7606                 __kmp_monitor_wakeups));
7607 #else
7608   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n",
7609                 __kmp_gtid_from_tid(tid, thread->th.th_team),
7610                 thread->th.th_team->t.t_id, tid, blocktime));
7611 #endif
7612 }
7613 
7614 void __kmp_aux_set_defaults(char const *str, int len) {
7615   if (!__kmp_init_serial) {
7616     __kmp_serial_initialize();
7617   }
7618   __kmp_env_initialize(str);
7619 
7620   if (__kmp_settings
7621 #if OMP_40_ENABLED
7622       || __kmp_display_env || __kmp_display_env_verbose
7623 #endif // OMP_40_ENABLED
7624       ) {
7625     __kmp_env_print();
7626   }
7627 } // __kmp_aux_set_defaults
7628 
7629 /* ------------------------------------------------------------------------ */
7630 /* internal fast reduction routines */
7631 
7632 PACKED_REDUCTION_METHOD_T
7633 __kmp_determine_reduction_method(
7634     ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
7635     void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
7636     kmp_critical_name *lck) {
7637 
7638   // Default reduction method: critical construct ( lck != NULL, like in current
7639   // PAROPT )
7640   // If ( reduce_data!=NULL && reduce_func!=NULL ): the tree-reduction method
7641   // can be selected by RTL
7642   // If loc->flags contains KMP_IDENT_ATOMIC_REDUCE, the atomic reduce method
7643   // can be selected by RTL
7644   // Finally, it's up to OpenMP RTL to make a decision on which method to select
7645   // among generated by PAROPT.
7646 
7647   PACKED_REDUCTION_METHOD_T retval;
7648 
7649   int team_size;
7650 
7651   KMP_DEBUG_ASSERT(loc); // it would be nice to test ( loc != 0 )
7652   KMP_DEBUG_ASSERT(lck); // it would be nice to test ( lck != 0 )
7653 
7654 #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED                                 \
7655   ((loc->flags & (KMP_IDENT_ATOMIC_REDUCE)) == (KMP_IDENT_ATOMIC_REDUCE))
7656 #define FAST_REDUCTION_TREE_METHOD_GENERATED ((reduce_data) && (reduce_func))
7657 
7658   retval = critical_reduce_block;
7659 
7660   // another choice of getting a team size (with 1 dynamic deference) is slower
7661   team_size = __kmp_get_team_num_threads(global_tid);
7662   if (team_size == 1) {
7663 
7664     retval = empty_reduce_block;
7665 
7666   } else {
7667 
7668     int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
7669 
7670 #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS64
7671 
7672 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_WINDOWS ||       \
7673     KMP_OS_DARWIN || KMP_OS_HURD
7674 
7675     int teamsize_cutoff = 4;
7676 
7677 #if KMP_MIC_SUPPORTED
7678     if (__kmp_mic_type != non_mic) {
7679       teamsize_cutoff = 8;
7680     }
7681 #endif
7682     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
7683     if (tree_available) {
7684       if (team_size <= teamsize_cutoff) {
7685         if (atomic_available) {
7686           retval = atomic_reduce_block;
7687         }
7688       } else {
7689         retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
7690       }
7691     } else if (atomic_available) {
7692       retval = atomic_reduce_block;
7693     }
7694 #else
7695 #error "Unknown or unsupported OS"
7696 #endif // KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_WINDOWS ||
7697 // KMP_OS_DARWIN
7698 
7699 #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS
7700 
7701 #if KMP_OS_LINUX || KMP_OS_WINDOWS || KMP_OS_HURD
7702 
7703     // basic tuning
7704 
7705     if (atomic_available) {
7706       if (num_vars <= 2) { // && ( team_size <= 8 ) due to false-sharing ???
7707         retval = atomic_reduce_block;
7708       }
7709     } // otherwise: use critical section
7710 
7711 #elif KMP_OS_DARWIN
7712 
7713     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
7714     if (atomic_available && (num_vars <= 3)) {
7715       retval = atomic_reduce_block;
7716     } else if (tree_available) {
7717       if ((reduce_size > (9 * sizeof(kmp_real64))) &&
7718           (reduce_size < (2000 * sizeof(kmp_real64)))) {
7719         retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER;
7720       }
7721     } // otherwise: use critical section
7722 
7723 #else
7724 #error "Unknown or unsupported OS"
7725 #endif
7726 
7727 #else
7728 #error "Unknown or unsupported architecture"
7729 #endif
7730   }
7731 
7732   // KMP_FORCE_REDUCTION
7733 
7734   // If the team is serialized (team_size == 1), ignore the forced reduction
7735   // method and stay with the unsynchronized method (empty_reduce_block)
7736   if (__kmp_force_reduction_method != reduction_method_not_defined &&
7737       team_size != 1) {
7738 
7739     PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block;
7740 
7741     int atomic_available, tree_available;
7742 
7743     switch ((forced_retval = __kmp_force_reduction_method)) {
7744     case critical_reduce_block:
7745       KMP_ASSERT(lck); // lck should be != 0
7746       break;
7747 
7748     case atomic_reduce_block:
7749       atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
7750       if (!atomic_available) {
7751         KMP_WARNING(RedMethodNotSupported, "atomic");
7752         forced_retval = critical_reduce_block;
7753       }
7754       break;
7755 
7756     case tree_reduce_block:
7757       tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
7758       if (!tree_available) {
7759         KMP_WARNING(RedMethodNotSupported, "tree");
7760         forced_retval = critical_reduce_block;
7761       } else {
7762 #if KMP_FAST_REDUCTION_BARRIER
7763         forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
7764 #endif
7765       }
7766       break;
7767 
7768     default:
7769       KMP_ASSERT(0); // "unsupported method specified"
7770     }
7771 
7772     retval = forced_retval;
7773   }
7774 
7775   KA_TRACE(10, ("reduction method selected=%08x\n", retval));
7776 
7777 #undef FAST_REDUCTION_TREE_METHOD_GENERATED
7778 #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED
7779 
7780   return (retval);
7781 }
7782 
7783 // this function is for testing set/get/determine reduce method
7784 kmp_int32 __kmp_get_reduce_method(void) {
7785   return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
7786 }
7787