1 /*
2  * kmp_runtime.cpp -- KPTS runtime support library
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 //                     The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #include "kmp.h"
17 #include "kmp_affinity.h"
18 #include "kmp_atomic.h"
19 #include "kmp_environment.h"
20 #include "kmp_error.h"
21 #include "kmp_i18n.h"
22 #include "kmp_io.h"
23 #include "kmp_itt.h"
24 #include "kmp_settings.h"
25 #include "kmp_stats.h"
26 #include "kmp_str.h"
27 #include "kmp_wait_release.h"
28 #include "kmp_wrapper_getpid.h"
29 
30 #if OMPT_SUPPORT
31 #include "ompt-specific.h"
32 #endif
33 
34 /* these are temporary issues to be dealt with */
35 #define KMP_USE_PRCTL 0
36 
37 #if KMP_OS_WINDOWS
38 #include <process.h>
39 #endif
40 
41 #include "tsan_annotations.h"
42 
43 #if defined(KMP_GOMP_COMPAT)
44 char const __kmp_version_alt_comp[] =
45     KMP_VERSION_PREFIX "alternative compiler support: yes";
46 #endif /* defined(KMP_GOMP_COMPAT) */
47 
48 char const __kmp_version_omp_api[] = KMP_VERSION_PREFIX "API version: "
49 #if OMP_50_ENABLED
50                                                         "5.0 (201611)";
51 #elif OMP_45_ENABLED
52                                                         "4.5 (201511)";
53 #elif OMP_40_ENABLED
54                                                         "4.0 (201307)";
55 #else
56                                                         "3.1 (201107)";
57 #endif
58 
59 #ifdef KMP_DEBUG
60 char const __kmp_version_lock[] =
61     KMP_VERSION_PREFIX "lock type: run time selectable";
62 #endif /* KMP_DEBUG */
63 
64 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
65 
66 /* ------------------------------------------------------------------------ */
67 
68 kmp_info_t __kmp_monitor;
69 
70 /* Forward declarations */
71 
72 void __kmp_cleanup(void);
73 
74 static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *, int tid,
75                                   int gtid);
76 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
77                                   kmp_internal_control_t *new_icvs,
78                                   ident_t *loc);
79 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
80 static void __kmp_partition_places(kmp_team_t *team,
81                                    int update_master_only = 0);
82 #endif
83 static void __kmp_do_serial_initialize(void);
84 void __kmp_fork_barrier(int gtid, int tid);
85 void __kmp_join_barrier(int gtid);
86 void __kmp_setup_icv_copy(kmp_team_t *team, int new_nproc,
87                           kmp_internal_control_t *new_icvs, ident_t *loc);
88 
89 #ifdef USE_LOAD_BALANCE
90 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc);
91 #endif
92 
93 static int __kmp_expand_threads(int nWish, int nNeed);
94 #if KMP_OS_WINDOWS
95 static int __kmp_unregister_root_other_thread(int gtid);
96 #endif
97 static void __kmp_unregister_library(void); // called by __kmp_internal_end()
98 static void __kmp_reap_thread(kmp_info_t *thread, int is_root);
99 static kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
100 
101 /* Calculate the identifier of the current thread */
102 /* fast (and somewhat portable) way to get unique identifier of executing
103    thread. Returns KMP_GTID_DNE if we haven't been assigned a gtid. */
104 int __kmp_get_global_thread_id() {
105   int i;
106   kmp_info_t **other_threads;
107   size_t stack_data;
108   char *stack_addr;
109   size_t stack_size;
110   char *stack_base;
111 
112   KA_TRACE(
113       1000,
114       ("*** __kmp_get_global_thread_id: entering, nproc=%d  all_nproc=%d\n",
115        __kmp_nth, __kmp_all_nth));
116 
117   /* JPH - to handle the case where __kmpc_end(0) is called immediately prior to
118      a parallel region, made it return KMP_GTID_DNE to force serial_initialize
119      by caller. Had to handle KMP_GTID_DNE at all call-sites, or else guarantee
120      __kmp_init_gtid for this to work. */
121 
122   if (!TCR_4(__kmp_init_gtid))
123     return KMP_GTID_DNE;
124 
125 #ifdef KMP_TDATA_GTID
126   if (TCR_4(__kmp_gtid_mode) >= 3) {
127     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using TDATA\n"));
128     return __kmp_gtid;
129   }
130 #endif
131   if (TCR_4(__kmp_gtid_mode) >= 2) {
132     KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using keyed TLS\n"));
133     return __kmp_gtid_get_specific();
134   }
135   KA_TRACE(1000, ("*** __kmp_get_global_thread_id: using internal alg.\n"));
136 
137   stack_addr = (char *)&stack_data;
138   other_threads = __kmp_threads;
139 
140   /* ATT: The code below is a source of potential bugs due to unsynchronized
141      access to __kmp_threads array. For example:
142      1. Current thread loads other_threads[i] to thr and checks it, it is
143         non-NULL.
144      2. Current thread is suspended by OS.
145      3. Another thread unregisters and finishes (debug versions of free()
146         may fill memory with something like 0xEF).
147      4. Current thread is resumed.
148      5. Current thread reads junk from *thr.
149      TODO: Fix it.  --ln  */
150 
151   for (i = 0; i < __kmp_threads_capacity; i++) {
152 
153     kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]);
154     if (!thr)
155       continue;
156 
157     stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
158     stack_base = (char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
159 
160     /* stack grows down -- search through all of the active threads */
161 
162     if (stack_addr <= stack_base) {
163       size_t stack_diff = stack_base - stack_addr;
164 
165       if (stack_diff <= stack_size) {
166         /* The only way we can be closer than the allocated */
167         /* stack size is if we are running on this thread. */
168         KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == i);
169         return i;
170       }
171     }
172   }
173 
174   /* get specific to try and determine our gtid */
175   KA_TRACE(1000,
176            ("*** __kmp_get_global_thread_id: internal alg. failed to find "
177             "thread, using TLS\n"));
178   i = __kmp_gtid_get_specific();
179 
180   /*fprintf( stderr, "=== %d\n", i );  */ /* GROO */
181 
182   /* if we havn't been assigned a gtid, then return code */
183   if (i < 0)
184     return i;
185 
186   /* dynamically updated stack window for uber threads to avoid get_specific
187      call */
188   if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
189     KMP_FATAL(StackOverflow, i);
190   }
191 
192   stack_base = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
193   if (stack_addr > stack_base) {
194     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
195     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
196             other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
197                 stack_base);
198   } else {
199     TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
200             stack_base - stack_addr);
201   }
202 
203   /* Reprint stack bounds for ubermaster since they have been refined */
204   if (__kmp_storage_map) {
205     char *stack_end = (char *)other_threads[i]->th.th_info.ds.ds_stackbase;
206     char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
207     __kmp_print_storage_map_gtid(i, stack_beg, stack_end,
208                                  other_threads[i]->th.th_info.ds.ds_stacksize,
209                                  "th_%d stack (refinement)", i);
210   }
211   return i;
212 }
213 
214 int __kmp_get_global_thread_id_reg() {
215   int gtid;
216 
217   if (!__kmp_init_serial) {
218     gtid = KMP_GTID_DNE;
219   } else
220 #ifdef KMP_TDATA_GTID
221       if (TCR_4(__kmp_gtid_mode) >= 3) {
222     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using TDATA\n"));
223     gtid = __kmp_gtid;
224   } else
225 #endif
226       if (TCR_4(__kmp_gtid_mode) >= 2) {
227     KA_TRACE(1000, ("*** __kmp_get_global_thread_id_reg: using keyed TLS\n"));
228     gtid = __kmp_gtid_get_specific();
229   } else {
230     KA_TRACE(1000,
231              ("*** __kmp_get_global_thread_id_reg: using internal alg.\n"));
232     gtid = __kmp_get_global_thread_id();
233   }
234 
235   /* we must be a new uber master sibling thread */
236   if (gtid == KMP_GTID_DNE) {
237     KA_TRACE(10,
238              ("__kmp_get_global_thread_id_reg: Encountered new root thread. "
239               "Registering a new gtid.\n"));
240     __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
241     if (!__kmp_init_serial) {
242       __kmp_do_serial_initialize();
243       gtid = __kmp_gtid_get_specific();
244     } else {
245       gtid = __kmp_register_root(FALSE);
246     }
247     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
248     /*__kmp_printf( "+++ %d\n", gtid ); */ /* GROO */
249   }
250 
251   KMP_DEBUG_ASSERT(gtid >= 0);
252 
253   return gtid;
254 }
255 
256 /* caller must hold forkjoin_lock */
257 void __kmp_check_stack_overlap(kmp_info_t *th) {
258   int f;
259   char *stack_beg = NULL;
260   char *stack_end = NULL;
261   int gtid;
262 
263   KA_TRACE(10, ("__kmp_check_stack_overlap: called\n"));
264   if (__kmp_storage_map) {
265     stack_end = (char *)th->th.th_info.ds.ds_stackbase;
266     stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
267 
268     gtid = __kmp_gtid_from_thread(th);
269 
270     if (gtid == KMP_GTID_MONITOR) {
271       __kmp_print_storage_map_gtid(
272           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
273           "th_%s stack (%s)", "mon",
274           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
275     } else {
276       __kmp_print_storage_map_gtid(
277           gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
278           "th_%d stack (%s)", gtid,
279           (th->th.th_info.ds.ds_stackgrow) ? "initial" : "actual");
280     }
281   }
282 
283   /* No point in checking ubermaster threads since they use refinement and
284    * cannot overlap */
285   gtid = __kmp_gtid_from_thread(th);
286   if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) {
287     KA_TRACE(10,
288              ("__kmp_check_stack_overlap: performing extensive checking\n"));
289     if (stack_beg == NULL) {
290       stack_end = (char *)th->th.th_info.ds.ds_stackbase;
291       stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
292     }
293 
294     for (f = 0; f < __kmp_threads_capacity; f++) {
295       kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]);
296 
297       if (f_th && f_th != th) {
298         char *other_stack_end =
299             (char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
300         char *other_stack_beg =
301             other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
302         if ((stack_beg > other_stack_beg && stack_beg < other_stack_end) ||
303             (stack_end > other_stack_beg && stack_end < other_stack_end)) {
304 
305           /* Print the other stack values before the abort */
306           if (__kmp_storage_map)
307             __kmp_print_storage_map_gtid(
308                 -1, other_stack_beg, other_stack_end,
309                 (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
310                 "th_%d stack (overlapped)", __kmp_gtid_from_thread(f_th));
311 
312           __kmp_msg(kmp_ms_fatal, KMP_MSG(StackOverlap),
313                     KMP_HNT(ChangeStackLimit), __kmp_msg_null);
314         }
315       }
316     }
317   }
318   KA_TRACE(10, ("__kmp_check_stack_overlap: returning\n"));
319 }
320 
321 /* ------------------------------------------------------------------------ */
322 
323 void __kmp_infinite_loop(void) {
324   static int done = FALSE;
325 
326   while (!done) {
327     KMP_YIELD(1);
328   }
329 }
330 
331 #define MAX_MESSAGE 512
332 
333 void __kmp_print_storage_map_gtid(int gtid, void *p1, void *p2, size_t size,
334                                   char const *format, ...) {
335   char buffer[MAX_MESSAGE];
336   va_list ap;
337 
338   va_start(ap, format);
339   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP storage map: %p %p%8lu %s\n", p1,
340                p2, (unsigned long)size, format);
341   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
342   __kmp_vprintf(kmp_err, buffer, ap);
343 #if KMP_PRINT_DATA_PLACEMENT
344   int node;
345   if (gtid >= 0) {
346     if (p1 <= p2 && (char *)p2 - (char *)p1 == size) {
347       if (__kmp_storage_map_verbose) {
348         node = __kmp_get_host_node(p1);
349         if (node < 0) /* doesn't work, so don't try this next time */
350           __kmp_storage_map_verbose = FALSE;
351         else {
352           char *last;
353           int lastNode;
354           int localProc = __kmp_get_cpu_from_gtid(gtid);
355 
356           const int page_size = KMP_GET_PAGE_SIZE();
357 
358           p1 = (void *)((size_t)p1 & ~((size_t)page_size - 1));
359           p2 = (void *)(((size_t)p2 - 1) & ~((size_t)page_size - 1));
360           if (localProc >= 0)
361             __kmp_printf_no_lock("  GTID %d localNode %d\n", gtid,
362                                  localProc >> 1);
363           else
364             __kmp_printf_no_lock("  GTID %d\n", gtid);
365 #if KMP_USE_PRCTL
366           /* The more elaborate format is disabled for now because of the prctl
367            * hanging bug. */
368           do {
369             last = p1;
370             lastNode = node;
371             /* This loop collates adjacent pages with the same host node. */
372             do {
373               (char *)p1 += page_size;
374             } while (p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode);
375             __kmp_printf_no_lock("    %p-%p memNode %d\n", last, (char *)p1 - 1,
376                                  lastNode);
377           } while (p1 <= p2);
378 #else
379           __kmp_printf_no_lock("    %p-%p memNode %d\n", p1,
380                                (char *)p1 + (page_size - 1),
381                                __kmp_get_host_node(p1));
382           if (p1 < p2) {
383             __kmp_printf_no_lock("    %p-%p memNode %d\n", p2,
384                                  (char *)p2 + (page_size - 1),
385                                  __kmp_get_host_node(p2));
386           }
387 #endif
388         }
389       }
390     } else
391       __kmp_printf_no_lock("  %s\n", KMP_I18N_STR(StorageMapWarning));
392   }
393 #endif /* KMP_PRINT_DATA_PLACEMENT */
394   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
395 }
396 
397 void __kmp_warn(char const *format, ...) {
398   char buffer[MAX_MESSAGE];
399   va_list ap;
400 
401   if (__kmp_generate_warnings == kmp_warnings_off) {
402     return;
403   }
404 
405   va_start(ap, format);
406 
407   KMP_SNPRINTF(buffer, sizeof(buffer), "OMP warning: %s\n", format);
408   __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
409   __kmp_vprintf(kmp_err, buffer, ap);
410   __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
411 
412   va_end(ap);
413 }
414 
415 void __kmp_abort_process() {
416   // Later threads may stall here, but that's ok because abort() will kill them.
417   __kmp_acquire_bootstrap_lock(&__kmp_exit_lock);
418 
419   if (__kmp_debug_buf) {
420     __kmp_dump_debug_buffer();
421   }; // if
422 
423   if (KMP_OS_WINDOWS) {
424     // Let other threads know of abnormal termination and prevent deadlock
425     // if abort happened during library initialization or shutdown
426     __kmp_global.g.g_abort = SIGABRT;
427 
428     /* On Windows* OS by default abort() causes pop-up error box, which stalls
429        nightly testing. Unfortunately, we cannot reliably suppress pop-up error
430        boxes. _set_abort_behavior() works well, but this function is not
431        available in VS7 (this is not problem for DLL, but it is a problem for
432        static OpenMP RTL). SetErrorMode (and so, timelimit utility) does not
433        help, at least in some versions of MS C RTL.
434 
435        It seems following sequence is the only way to simulate abort() and
436        avoid pop-up error box. */
437     raise(SIGABRT);
438     _exit(3); // Just in case, if signal ignored, exit anyway.
439   } else {
440     abort();
441   }; // if
442 
443   __kmp_infinite_loop();
444   __kmp_release_bootstrap_lock(&__kmp_exit_lock);
445 
446 } // __kmp_abort_process
447 
448 void __kmp_abort_thread(void) {
449   // TODO: Eliminate g_abort global variable and this function.
450   // In case of abort just call abort(), it will kill all the threads.
451   __kmp_infinite_loop();
452 } // __kmp_abort_thread
453 
454 /* Print out the storage map for the major kmp_info_t thread data structures
455    that are allocated together. */
456 
457 static void __kmp_print_thread_storage_map(kmp_info_t *thr, int gtid) {
458   __kmp_print_storage_map_gtid(gtid, thr, thr + 1, sizeof(kmp_info_t), "th_%d",
459                                gtid);
460 
461   __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
462                                sizeof(kmp_desc_t), "th_%d.th_info", gtid);
463 
464   __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
465                                sizeof(kmp_local_t), "th_%d.th_local", gtid);
466 
467   __kmp_print_storage_map_gtid(
468       gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
469       sizeof(kmp_balign_t) * bs_last_barrier, "th_%d.th_bar", gtid);
470 
471   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
472                                &thr->th.th_bar[bs_plain_barrier + 1],
473                                sizeof(kmp_balign_t), "th_%d.th_bar[plain]",
474                                gtid);
475 
476   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
477                                &thr->th.th_bar[bs_forkjoin_barrier + 1],
478                                sizeof(kmp_balign_t), "th_%d.th_bar[forkjoin]",
479                                gtid);
480 
481 #if KMP_FAST_REDUCTION_BARRIER
482   __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
483                                &thr->th.th_bar[bs_reduction_barrier + 1],
484                                sizeof(kmp_balign_t), "th_%d.th_bar[reduction]",
485                                gtid);
486 #endif // KMP_FAST_REDUCTION_BARRIER
487 }
488 
489 /* Print out the storage map for the major kmp_team_t team data structures
490    that are allocated together. */
491 
492 static void __kmp_print_team_storage_map(const char *header, kmp_team_t *team,
493                                          int team_id, int num_thr) {
494   int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
495   __kmp_print_storage_map_gtid(-1, team, team + 1, sizeof(kmp_team_t), "%s_%d",
496                                header, team_id);
497 
498   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
499                                &team->t.t_bar[bs_last_barrier],
500                                sizeof(kmp_balign_team_t) * bs_last_barrier,
501                                "%s_%d.t_bar", header, team_id);
502 
503   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
504                                &team->t.t_bar[bs_plain_barrier + 1],
505                                sizeof(kmp_balign_team_t), "%s_%d.t_bar[plain]",
506                                header, team_id);
507 
508   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
509                                &team->t.t_bar[bs_forkjoin_barrier + 1],
510                                sizeof(kmp_balign_team_t),
511                                "%s_%d.t_bar[forkjoin]", header, team_id);
512 
513 #if KMP_FAST_REDUCTION_BARRIER
514   __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
515                                &team->t.t_bar[bs_reduction_barrier + 1],
516                                sizeof(kmp_balign_team_t),
517                                "%s_%d.t_bar[reduction]", header, team_id);
518 #endif // KMP_FAST_REDUCTION_BARRIER
519 
520   __kmp_print_storage_map_gtid(
521       -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
522       sizeof(kmp_disp_t) * num_thr, "%s_%d.t_dispatch", header, team_id);
523 
524   __kmp_print_storage_map_gtid(
525       -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
526       sizeof(kmp_info_t *) * num_thr, "%s_%d.t_threads", header, team_id);
527 
528   __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
529                                &team->t.t_disp_buffer[num_disp_buff],
530                                sizeof(dispatch_shared_info_t) * num_disp_buff,
531                                "%s_%d.t_disp_buffer", header, team_id);
532 
533   __kmp_print_storage_map_gtid(-1, &team->t.t_taskq, &team->t.t_copypriv_data,
534                                sizeof(kmp_taskq_t), "%s_%d.t_taskq", header,
535                                team_id);
536 }
537 
538 static void __kmp_init_allocator() {}
539 static void __kmp_fini_allocator() {}
540 
541 /* ------------------------------------------------------------------------ */
542 
543 #ifdef KMP_DYNAMIC_LIB
544 #if KMP_OS_WINDOWS
545 
546 static void __kmp_reset_lock(kmp_bootstrap_lock_t *lck) {
547   // TODO: Change to __kmp_break_bootstrap_lock().
548   __kmp_init_bootstrap_lock(lck); // make the lock released
549 }
550 
551 static void __kmp_reset_locks_on_process_detach(int gtid_req) {
552   int i;
553   int thread_count;
554 
555   // PROCESS_DETACH is expected to be called by a thread that executes
556   // ProcessExit() or FreeLibrary(). OS terminates other threads (except the one
557   // calling ProcessExit or FreeLibrary). So, it might be safe to access the
558   // __kmp_threads[] without taking the forkjoin_lock. However, in fact, some
559   // threads can be still alive here, although being about to be terminated. The
560   // threads in the array with ds_thread==0 are most suspicious. Actually, it
561   // can be not safe to access the __kmp_threads[].
562 
563   // TODO: does it make sense to check __kmp_roots[] ?
564 
565   // Let's check that there are no other alive threads registered with the OMP
566   // lib.
567   while (1) {
568     thread_count = 0;
569     for (i = 0; i < __kmp_threads_capacity; ++i) {
570       if (!__kmp_threads)
571         continue;
572       kmp_info_t *th = __kmp_threads[i];
573       if (th == NULL)
574         continue;
575       int gtid = th->th.th_info.ds.ds_gtid;
576       if (gtid == gtid_req)
577         continue;
578       if (gtid < 0)
579         continue;
580       DWORD exit_val;
581       int alive = __kmp_is_thread_alive(th, &exit_val);
582       if (alive) {
583         ++thread_count;
584       }
585     }
586     if (thread_count == 0)
587       break; // success
588   }
589 
590   // Assume that I'm alone. Now it might be safe to check and reset locks.
591   // __kmp_forkjoin_lock and __kmp_stdio_lock are expected to be reset.
592   __kmp_reset_lock(&__kmp_forkjoin_lock);
593 #ifdef KMP_DEBUG
594   __kmp_reset_lock(&__kmp_stdio_lock);
595 #endif // KMP_DEBUG
596 }
597 
598 BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
599   //__kmp_acquire_bootstrap_lock( &__kmp_initz_lock );
600 
601   switch (fdwReason) {
602 
603   case DLL_PROCESS_ATTACH:
604     KA_TRACE(10, ("DllMain: PROCESS_ATTACH\n"));
605 
606     return TRUE;
607 
608   case DLL_PROCESS_DETACH:
609     KA_TRACE(10, ("DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
610 
611     if (lpReserved != NULL) {
612       // lpReserved is used for telling the difference:
613       //   lpReserved == NULL when FreeLibrary() was called,
614       //   lpReserved != NULL when the process terminates.
615       // When FreeLibrary() is called, worker threads remain alive. So they will
616       // release the forkjoin lock by themselves. When the process terminates,
617       // worker threads disappear triggering the problem of unreleased forkjoin
618       // lock as described below.
619 
620       // A worker thread can take the forkjoin lock. The problem comes up if
621       // that worker thread becomes dead before it releases the forkjoin lock.
622       // The forkjoin lock remains taken, while the thread executing
623       // DllMain()->PROCESS_DETACH->__kmp_internal_end_library() below will try
624       // to take the forkjoin lock and will always fail, so that the application
625       // will never finish [normally]. This scenario is possible if
626       // __kmpc_end() has not been executed. It looks like it's not a corner
627       // case, but common cases:
628       // - the main function was compiled by an alternative compiler;
629       // - the main function was compiled by icl but without /Qopenmp
630       //   (application with plugins);
631       // - application terminates by calling C exit(), Fortran CALL EXIT() or
632       //   Fortran STOP.
633       // - alive foreign thread prevented __kmpc_end from doing cleanup.
634       //
635       // This is a hack to work around the problem.
636       // TODO: !!! figure out something better.
637       __kmp_reset_locks_on_process_detach(__kmp_gtid_get_specific());
638     }
639 
640     __kmp_internal_end_library(__kmp_gtid_get_specific());
641 
642     return TRUE;
643 
644   case DLL_THREAD_ATTACH:
645     KA_TRACE(10, ("DllMain: THREAD_ATTACH\n"));
646 
647     /* if we want to register new siblings all the time here call
648      * __kmp_get_gtid(); */
649     return TRUE;
650 
651   case DLL_THREAD_DETACH:
652     KA_TRACE(10, ("DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
653 
654     __kmp_internal_end_thread(__kmp_gtid_get_specific());
655     return TRUE;
656   }
657 
658   return TRUE;
659 }
660 
661 #endif /* KMP_OS_WINDOWS */
662 #endif /* KMP_DYNAMIC_LIB */
663 
664 /* Change the library type to "status" and return the old type */
665 /* called from within initialization routines where __kmp_initz_lock is held */
666 int __kmp_change_library(int status) {
667   int old_status;
668 
669   old_status = __kmp_yield_init &
670                1; // check whether KMP_LIBRARY=throughput (even init count)
671 
672   if (status) {
673     __kmp_yield_init |= 1; // throughput => turnaround (odd init count)
674   } else {
675     __kmp_yield_init &= ~1; // turnaround => throughput (even init count)
676   }
677 
678   return old_status; // return previous setting of whether
679   // KMP_LIBRARY=throughput
680 }
681 
682 /* __kmp_parallel_deo -- Wait until it's our turn. */
683 void __kmp_parallel_deo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
684   int gtid = *gtid_ref;
685 #ifdef BUILD_PARALLEL_ORDERED
686   kmp_team_t *team = __kmp_team_from_gtid(gtid);
687 #endif /* BUILD_PARALLEL_ORDERED */
688 
689   if (__kmp_env_consistency_check) {
690     if (__kmp_threads[gtid]->th.th_root->r.r_active)
691 #if KMP_USE_DYNAMIC_LOCK
692       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0);
693 #else
694       __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL);
695 #endif
696   }
697 #ifdef BUILD_PARALLEL_ORDERED
698   if (!team->t.t_serialized) {
699     KMP_MB();
700     KMP_WAIT_YIELD(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid),
701                    KMP_EQ, NULL);
702     KMP_MB();
703   }
704 #endif /* BUILD_PARALLEL_ORDERED */
705 }
706 
707 /* __kmp_parallel_dxo -- Signal the next task. */
708 void __kmp_parallel_dxo(int *gtid_ref, int *cid_ref, ident_t *loc_ref) {
709   int gtid = *gtid_ref;
710 #ifdef BUILD_PARALLEL_ORDERED
711   int tid = __kmp_tid_from_gtid(gtid);
712   kmp_team_t *team = __kmp_team_from_gtid(gtid);
713 #endif /* BUILD_PARALLEL_ORDERED */
714 
715   if (__kmp_env_consistency_check) {
716     if (__kmp_threads[gtid]->th.th_root->r.r_active)
717       __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref);
718   }
719 #ifdef BUILD_PARALLEL_ORDERED
720   if (!team->t.t_serialized) {
721     KMP_MB(); /* Flush all pending memory write invalidates.  */
722 
723     /* use the tid of the next thread in this team */
724     /* TODO replace with general release procedure */
725     team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
726 
727 #if OMPT_SUPPORT && OMPT_BLAME
728     if (ompt_enabled &&
729         ompt_callbacks.ompt_callback(ompt_event_release_ordered)) {
730       /* accept blame for "ordered" waiting */
731       kmp_info_t *this_thread = __kmp_threads[gtid];
732       ompt_callbacks.ompt_callback(ompt_event_release_ordered)(
733           this_thread->th.ompt_thread_info.wait_id);
734     }
735 #endif
736 
737     KMP_MB(); /* Flush all pending memory write invalidates.  */
738   }
739 #endif /* BUILD_PARALLEL_ORDERED */
740 }
741 
742 /* ------------------------------------------------------------------------ */
743 /* The BARRIER for a SINGLE process section is always explicit   */
744 
745 int __kmp_enter_single(int gtid, ident_t *id_ref, int push_ws) {
746   int status;
747   kmp_info_t *th;
748   kmp_team_t *team;
749 
750   if (!TCR_4(__kmp_init_parallel))
751     __kmp_parallel_initialize();
752 
753   th = __kmp_threads[gtid];
754   team = th->th.th_team;
755   status = 0;
756 
757   th->th.th_ident = id_ref;
758 
759   if (team->t.t_serialized) {
760     status = 1;
761   } else {
762     kmp_int32 old_this = th->th.th_local.this_construct;
763 
764     ++th->th.th_local.this_construct;
765     /* try to set team count to thread count--success means thread got the
766        single block */
767     /* TODO: Should this be acquire or release? */
768     if (team->t.t_construct == old_this) {
769       status = KMP_COMPARE_AND_STORE_ACQ32(&team->t.t_construct, old_this,
770                                            th->th.th_local.this_construct);
771     }
772 #if USE_ITT_BUILD
773     if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
774         KMP_MASTER_GTID(gtid) &&
775 #if OMP_40_ENABLED
776         th->th.th_teams_microtask == NULL &&
777 #endif
778         team->t.t_active_level ==
779             1) { // Only report metadata by master of active team at level 1
780       __kmp_itt_metadata_single(id_ref);
781     }
782 #endif /* USE_ITT_BUILD */
783   }
784 
785   if (__kmp_env_consistency_check) {
786     if (status && push_ws) {
787       __kmp_push_workshare(gtid, ct_psingle, id_ref);
788     } else {
789       __kmp_check_workshare(gtid, ct_psingle, id_ref);
790     }
791   }
792 #if USE_ITT_BUILD
793   if (status) {
794     __kmp_itt_single_start(gtid);
795   }
796 #endif /* USE_ITT_BUILD */
797   return status;
798 }
799 
800 void __kmp_exit_single(int gtid) {
801 #if USE_ITT_BUILD
802   __kmp_itt_single_end(gtid);
803 #endif /* USE_ITT_BUILD */
804   if (__kmp_env_consistency_check)
805     __kmp_pop_workshare(gtid, ct_psingle, NULL);
806 }
807 
808 /* determine if we can go parallel or must use a serialized parallel region and
809  * how many threads we can use
810  * set_nproc is the number of threads requested for the team
811  * returns 0 if we should serialize or only use one thread,
812  * otherwise the number of threads to use
813  * The forkjoin lock is held by the caller. */
814 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
815                                  int master_tid, int set_nthreads
816 #if OMP_40_ENABLED
817                                  ,
818                                  int enter_teams
819 #endif /* OMP_40_ENABLED */
820                                  ) {
821   int capacity;
822   int new_nthreads;
823   KMP_DEBUG_ASSERT(__kmp_init_serial);
824   KMP_DEBUG_ASSERT(root && parent_team);
825 
826   // If dyn-var is set, dynamically adjust the number of desired threads,
827   // according to the method specified by dynamic_mode.
828   new_nthreads = set_nthreads;
829   if (!get__dynamic_2(parent_team, master_tid)) {
830     ;
831   }
832 #ifdef USE_LOAD_BALANCE
833   else if (__kmp_global.g.g_dynamic_mode == dynamic_load_balance) {
834     new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
835     if (new_nthreads == 1) {
836       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
837                     "reservation to 1 thread\n",
838                     master_tid));
839       return 1;
840     }
841     if (new_nthreads < set_nthreads) {
842       KC_TRACE(10, ("__kmp_reserve_threads: T#%d load balance reduced "
843                     "reservation to %d threads\n",
844                     master_tid, new_nthreads));
845     }
846   }
847 #endif /* USE_LOAD_BALANCE */
848   else if (__kmp_global.g.g_dynamic_mode == dynamic_thread_limit) {
849     new_nthreads = __kmp_avail_proc - __kmp_nth +
850                    (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
851     if (new_nthreads <= 1) {
852       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
853                     "reservation to 1 thread\n",
854                     master_tid));
855       return 1;
856     }
857     if (new_nthreads < set_nthreads) {
858       KC_TRACE(10, ("__kmp_reserve_threads: T#%d thread limit reduced "
859                     "reservation to %d threads\n",
860                     master_tid, new_nthreads));
861     } else {
862       new_nthreads = set_nthreads;
863     }
864   } else if (__kmp_global.g.g_dynamic_mode == dynamic_random) {
865     if (set_nthreads > 2) {
866       new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
867       new_nthreads = (new_nthreads % set_nthreads) + 1;
868       if (new_nthreads == 1) {
869         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
870                       "reservation to 1 thread\n",
871                       master_tid));
872         return 1;
873       }
874       if (new_nthreads < set_nthreads) {
875         KC_TRACE(10, ("__kmp_reserve_threads: T#%d dynamic random reduced "
876                       "reservation to %d threads\n",
877                       master_tid, new_nthreads));
878       }
879     }
880   } else {
881     KMP_ASSERT(0);
882   }
883 
884   // Respect KMP_ALL_THREADS/KMP_DEVICE_THREAD_LIMIT.
885   if (__kmp_nth + new_nthreads -
886           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
887       __kmp_max_nth) {
888     int tl_nthreads = __kmp_max_nth - __kmp_nth +
889                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
890     if (tl_nthreads <= 0) {
891       tl_nthreads = 1;
892     }
893 
894     // If dyn-var is false, emit a 1-time warning.
895     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
896       __kmp_reserve_warn = 1;
897       __kmp_msg(kmp_ms_warning,
898                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
899                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
900     }
901     if (tl_nthreads == 1) {
902       KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT "
903                     "reduced reservation to 1 thread\n",
904                     master_tid));
905       return 1;
906     }
907     KC_TRACE(10, ("__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT reduced "
908                   "reservation to %d threads\n",
909                   master_tid, tl_nthreads));
910     new_nthreads = tl_nthreads;
911   }
912 
913   // Respect OMP_THREAD_LIMIT
914   if (root->r.r_cg_nthreads + new_nthreads -
915           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
916       __kmp_cg_max_nth) {
917     int tl_nthreads = __kmp_cg_max_nth - root->r.r_cg_nthreads +
918                       (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
919     if (tl_nthreads <= 0) {
920       tl_nthreads = 1;
921     }
922 
923     // If dyn-var is false, emit a 1-time warning.
924     if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
925       __kmp_reserve_warn = 1;
926       __kmp_msg(kmp_ms_warning,
927                 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
928                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
929     }
930     if (tl_nthreads == 1) {
931       KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT "
932                     "reduced reservation to 1 thread\n",
933                     master_tid));
934       return 1;
935     }
936     KC_TRACE(10, ("__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT reduced "
937                   "reservation to %d threads\n",
938                   master_tid, tl_nthreads));
939     new_nthreads = tl_nthreads;
940   }
941 
942   // Check if the threads array is large enough, or needs expanding.
943   // See comment in __kmp_register_root() about the adjustment if
944   // __kmp_threads[0] == NULL.
945   capacity = __kmp_threads_capacity;
946   if (TCR_PTR(__kmp_threads[0]) == NULL) {
947     --capacity;
948   }
949   if (__kmp_nth + new_nthreads -
950           (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
951       capacity) {
952     // Expand the threads array.
953     int slotsRequired = __kmp_nth + new_nthreads -
954                         (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
955                         capacity;
956     int slotsAdded = __kmp_expand_threads(slotsRequired, slotsRequired);
957     if (slotsAdded < slotsRequired) {
958       // The threads array was not expanded enough.
959       new_nthreads -= (slotsRequired - slotsAdded);
960       KMP_ASSERT(new_nthreads >= 1);
961 
962       // If dyn-var is false, emit a 1-time warning.
963       if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
964         __kmp_reserve_warn = 1;
965         if (__kmp_tp_cached) {
966           __kmp_msg(kmp_ms_warning,
967                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
968                     KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
969                     KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
970         } else {
971           __kmp_msg(kmp_ms_warning,
972                     KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
973                     KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
974         }
975       }
976     }
977   }
978 
979 #ifdef KMP_DEBUG
980   if (new_nthreads == 1) {
981     KC_TRACE(10,
982              ("__kmp_reserve_threads: T#%d serializing team after reclaiming "
983               "dead roots and rechecking; requested %d threads\n",
984               __kmp_get_gtid(), set_nthreads));
985   } else {
986     KC_TRACE(10, ("__kmp_reserve_threads: T#%d allocating %d threads; requested"
987                   " %d threads\n",
988                   __kmp_get_gtid(), new_nthreads, set_nthreads));
989   }
990 #endif // KMP_DEBUG
991   return new_nthreads;
992 }
993 
994 /* Allocate threads from the thread pool and assign them to the new team. We are
995    assured that there are enough threads available, because we checked on that
996    earlier within critical section forkjoin */
997 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
998                                     kmp_info_t *master_th, int master_gtid) {
999   int i;
1000   int use_hot_team;
1001 
1002   KA_TRACE(10, ("__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
1003   KMP_DEBUG_ASSERT(master_gtid == __kmp_get_gtid());
1004   KMP_MB();
1005 
1006   /* first, let's setup the master thread */
1007   master_th->th.th_info.ds.ds_tid = 0;
1008   master_th->th.th_team = team;
1009   master_th->th.th_team_nproc = team->t.t_nproc;
1010   master_th->th.th_team_master = master_th;
1011   master_th->th.th_team_serialized = FALSE;
1012   master_th->th.th_dispatch = &team->t.t_dispatch[0];
1013 
1014 /* make sure we are not the optimized hot team */
1015 #if KMP_NESTED_HOT_TEAMS
1016   use_hot_team = 0;
1017   kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
1018   if (hot_teams) { // hot teams array is not allocated if
1019     // KMP_HOT_TEAMS_MAX_LEVEL=0
1020     int level = team->t.t_active_level - 1; // index in array of hot teams
1021     if (master_th->th.th_teams_microtask) { // are we inside the teams?
1022       if (master_th->th.th_teams_size.nteams > 1) {
1023         ++level; // level was not increased in teams construct for
1024         // team_of_masters
1025       }
1026       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
1027           master_th->th.th_teams_level == team->t.t_level) {
1028         ++level; // level was not increased in teams construct for
1029         // team_of_workers before the parallel
1030       } // team->t.t_level will be increased inside parallel
1031     }
1032     if (level < __kmp_hot_teams_max_level) {
1033       if (hot_teams[level].hot_team) {
1034         // hot team has already been allocated for given level
1035         KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
1036         use_hot_team = 1; // the team is ready to use
1037       } else {
1038         use_hot_team = 0; // AC: threads are not allocated yet
1039         hot_teams[level].hot_team = team; // remember new hot team
1040         hot_teams[level].hot_team_nth = team->t.t_nproc;
1041       }
1042     } else {
1043       use_hot_team = 0;
1044     }
1045   }
1046 #else
1047   use_hot_team = team == root->r.r_hot_team;
1048 #endif
1049   if (!use_hot_team) {
1050 
1051     /* install the master thread */
1052     team->t.t_threads[0] = master_th;
1053     __kmp_initialize_info(master_th, team, 0, master_gtid);
1054 
1055     /* now, install the worker threads */
1056     for (i = 1; i < team->t.t_nproc; i++) {
1057 
1058       /* fork or reallocate a new thread and install it in team */
1059       kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1060       team->t.t_threads[i] = thr;
1061       KMP_DEBUG_ASSERT(thr);
1062       KMP_DEBUG_ASSERT(thr->th.th_team == team);
1063       /* align team and thread arrived states */
1064       KA_TRACE(20, ("__kmp_fork_team_threads: T#%d(%d:%d) init arrived "
1065                     "T#%d(%d:%d) join =%llu, plain=%llu\n",
1066                     __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1067                     __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1068                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1069                     team->t.t_bar[bs_plain_barrier].b_arrived));
1070 #if OMP_40_ENABLED
1071       thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
1072       thr->th.th_teams_level = master_th->th.th_teams_level;
1073       thr->th.th_teams_size = master_th->th.th_teams_size;
1074 #endif
1075       { // Initialize threads' barrier data.
1076         int b;
1077         kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1078         for (b = 0; b < bs_last_barrier; ++b) {
1079           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1080           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
1081 #if USE_DEBUGGER
1082           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1083 #endif
1084         }; // for b
1085       }
1086     }
1087 
1088 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
1089     __kmp_partition_places(team);
1090 #endif
1091   }
1092 
1093   KMP_MB();
1094 }
1095 
1096 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1097 // Propagate any changes to the floating point control registers out to the team
1098 // We try to avoid unnecessary writes to the relevant cache line in the team
1099 // structure, so we don't make changes unless they are needed.
1100 inline static void propagateFPControl(kmp_team_t *team) {
1101   if (__kmp_inherit_fp_control) {
1102     kmp_int16 x87_fpu_control_word;
1103     kmp_uint32 mxcsr;
1104 
1105     // Get master values of FPU control flags (both X87 and vector)
1106     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1107     __kmp_store_mxcsr(&mxcsr);
1108     mxcsr &= KMP_X86_MXCSR_MASK;
1109 
1110 // There is no point looking at t_fp_control_saved here.
1111 // If it is TRUE, we still have to update the values if they are different from
1112 // those we now have.
1113 // If it is FALSE we didn't save anything yet, but our objective is the same. We
1114 // have to ensure that the values in the team are the same as those we have.
1115 // So, this code achieves what we need whether or not t_fp_control_saved is
1116 // true. By checking whether the value needs updating we avoid unnecessary
1117 // writes that would put the cache-line into a written state, causing all
1118 // threads in the team to have to read it again.
1119     KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1120     KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1121     // Although we don't use this value, other code in the runtime wants to know
1122     // whether it should restore them. So we must ensure it is correct.
1123     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1124   } else {
1125     // Similarly here. Don't write to this cache-line in the team structure
1126     // unless we have to.
1127     KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1128   }
1129 }
1130 
1131 // Do the opposite, setting the hardware registers to the updated values from
1132 // the team.
1133 inline static void updateHWFPControl(kmp_team_t *team) {
1134   if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1135     // Only reset the fp control regs if they have been changed in the team.
1136     // the parallel region that we are exiting.
1137     kmp_int16 x87_fpu_control_word;
1138     kmp_uint32 mxcsr;
1139     __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1140     __kmp_store_mxcsr(&mxcsr);
1141     mxcsr &= KMP_X86_MXCSR_MASK;
1142 
1143     if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1144       __kmp_clear_x87_fpu_status_word();
1145       __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1146     }
1147 
1148     if (team->t.t_mxcsr != mxcsr) {
1149       __kmp_load_mxcsr(&team->t.t_mxcsr);
1150     }
1151   }
1152 }
1153 #else
1154 #define propagateFPControl(x) ((void)0)
1155 #define updateHWFPControl(x) ((void)0)
1156 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1157 
1158 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team,
1159                                      int realloc); // forward declaration
1160 
1161 /* Run a parallel region that has been serialized, so runs only in a team of the
1162    single master thread. */
1163 void __kmp_serialized_parallel(ident_t *loc, kmp_int32 global_tid) {
1164   kmp_info_t *this_thr;
1165   kmp_team_t *serial_team;
1166 
1167   KC_TRACE(10, ("__kmpc_serialized_parallel: called by T#%d\n", global_tid));
1168 
1169   /* Skip all this code for autopar serialized loops since it results in
1170      unacceptable overhead */
1171   if (loc != NULL && (loc->flags & KMP_IDENT_AUTOPAR))
1172     return;
1173 
1174   if (!TCR_4(__kmp_init_parallel))
1175     __kmp_parallel_initialize();
1176 
1177   this_thr = __kmp_threads[global_tid];
1178   serial_team = this_thr->th.th_serial_team;
1179 
1180   /* utilize the serialized team held by this thread */
1181   KMP_DEBUG_ASSERT(serial_team);
1182   KMP_MB();
1183 
1184   if (__kmp_tasking_mode != tskm_immediate_exec) {
1185     KMP_DEBUG_ASSERT(
1186         this_thr->th.th_task_team ==
1187         this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]);
1188     KMP_DEBUG_ASSERT(serial_team->t.t_task_team[this_thr->th.th_task_state] ==
1189                      NULL);
1190     KA_TRACE(20, ("__kmpc_serialized_parallel: T#%d pushing task_team %p / "
1191                   "team %p, new task_team = NULL\n",
1192                   global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
1193     this_thr->th.th_task_team = NULL;
1194   }
1195 
1196 #if OMP_40_ENABLED
1197   kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
1198   if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1199     proc_bind = proc_bind_false;
1200   } else if (proc_bind == proc_bind_default) {
1201     // No proc_bind clause was specified, so use the current value
1202     // of proc-bind-var for this parallel region.
1203     proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
1204   }
1205   // Reset for next parallel region
1206   this_thr->th.th_set_proc_bind = proc_bind_default;
1207 #endif /* OMP_40_ENABLED */
1208 
1209   if (this_thr->th.th_team != serial_team) {
1210     // Nested level will be an index in the nested nthreads array
1211     int level = this_thr->th.th_team->t.t_level;
1212 
1213     if (serial_team->t.t_serialized) {
1214       /* this serial team was already used
1215          TODO increase performance by making this locks more specific */
1216       kmp_team_t *new_team;
1217 
1218       __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1219 
1220 #if OMPT_SUPPORT
1221       ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(global_tid);
1222 #endif
1223 
1224       new_team = __kmp_allocate_team(this_thr->th.th_root, 1, 1,
1225 #if OMPT_SUPPORT
1226                                      ompt_parallel_id,
1227 #endif
1228 #if OMP_40_ENABLED
1229                                      proc_bind,
1230 #endif
1231                                      &this_thr->th.th_current_task->td_icvs,
1232                                      0 USE_NESTED_HOT_ARG(NULL));
1233       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1234       KMP_ASSERT(new_team);
1235 
1236       /* setup new serialized team and install it */
1237       new_team->t.t_threads[0] = this_thr;
1238       new_team->t.t_parent = this_thr->th.th_team;
1239       serial_team = new_team;
1240       this_thr->th.th_serial_team = serial_team;
1241 
1242       KF_TRACE(
1243           10,
1244           ("__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1245            global_tid, serial_team));
1246 
1247       /* TODO the above breaks the requirement that if we run out of resources,
1248          then we can still guarantee that serialized teams are ok, since we may
1249          need to allocate a new one */
1250     } else {
1251       KF_TRACE(
1252           10,
1253           ("__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1254            global_tid, serial_team));
1255     }
1256 
1257     /* we have to initialize this serial team */
1258     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1259     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1260     KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team);
1261     serial_team->t.t_ident = loc;
1262     serial_team->t.t_serialized = 1;
1263     serial_team->t.t_nproc = 1;
1264     serial_team->t.t_parent = this_thr->th.th_team;
1265     serial_team->t.t_sched = this_thr->th.th_team->t.t_sched;
1266     this_thr->th.th_team = serial_team;
1267     serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
1268 
1269     KF_TRACE(10, ("__kmpc_serialized_parallel: T#d curtask=%p\n", global_tid,
1270                   this_thr->th.th_current_task));
1271     KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
1272     this_thr->th.th_current_task->td_flags.executing = 0;
1273 
1274     __kmp_push_current_task_to_thread(this_thr, serial_team, 0);
1275 
1276     /* TODO: GEH: do ICVs work for nested serialized teams? Don't we need an
1277        implicit task for each serialized task represented by
1278        team->t.t_serialized? */
1279     copy_icvs(&this_thr->th.th_current_task->td_icvs,
1280               &this_thr->th.th_current_task->td_parent->td_icvs);
1281 
1282     // Thread value exists in the nested nthreads array for the next nested
1283     // level
1284     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1285       this_thr->th.th_current_task->td_icvs.nproc =
1286           __kmp_nested_nth.nth[level + 1];
1287     }
1288 
1289 #if OMP_40_ENABLED
1290     if (__kmp_nested_proc_bind.used &&
1291         (level + 1 < __kmp_nested_proc_bind.used)) {
1292       this_thr->th.th_current_task->td_icvs.proc_bind =
1293           __kmp_nested_proc_bind.bind_types[level + 1];
1294     }
1295 #endif /* OMP_40_ENABLED */
1296 
1297 #if USE_DEBUGGER
1298     serial_team->t.t_pkfn = (microtask_t)(~0); // For the debugger.
1299 #endif
1300     this_thr->th.th_info.ds.ds_tid = 0;
1301 
1302     /* set thread cache values */
1303     this_thr->th.th_team_nproc = 1;
1304     this_thr->th.th_team_master = this_thr;
1305     this_thr->th.th_team_serialized = 1;
1306 
1307     serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
1308     serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
1309 
1310     propagateFPControl(serial_team);
1311 
1312     /* check if we need to allocate dispatch buffers stack */
1313     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1314     if (!serial_team->t.t_dispatch->th_disp_buffer) {
1315       serial_team->t.t_dispatch->th_disp_buffer =
1316           (dispatch_private_info_t *)__kmp_allocate(
1317               sizeof(dispatch_private_info_t));
1318     }
1319     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1320 
1321 #if OMPT_SUPPORT
1322     ompt_parallel_id_t ompt_parallel_id = __ompt_parallel_id_new(global_tid);
1323     __ompt_team_assign_id(serial_team, ompt_parallel_id);
1324 #endif
1325 
1326     KMP_MB();
1327 
1328   } else {
1329     /* this serialized team is already being used,
1330      * that's fine, just add another nested level */
1331     KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
1332     KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1333     KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1334     ++serial_team->t.t_serialized;
1335     this_thr->th.th_team_serialized = serial_team->t.t_serialized;
1336 
1337     // Nested level will be an index in the nested nthreads array
1338     int level = this_thr->th.th_team->t.t_level;
1339     // Thread value exists in the nested nthreads array for the next nested
1340     // level
1341     if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1342       this_thr->th.th_current_task->td_icvs.nproc =
1343           __kmp_nested_nth.nth[level + 1];
1344     }
1345     serial_team->t.t_level++;
1346     KF_TRACE(10, ("__kmpc_serialized_parallel: T#%d increasing nesting level "
1347                   "of serial team %p to %d\n",
1348                   global_tid, serial_team, serial_team->t.t_level));
1349 
1350     /* allocate/push dispatch buffers stack */
1351     KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1352     {
1353       dispatch_private_info_t *disp_buffer =
1354           (dispatch_private_info_t *)__kmp_allocate(
1355               sizeof(dispatch_private_info_t));
1356       disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
1357       serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
1358     }
1359     this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1360 
1361     KMP_MB();
1362   }
1363 #if OMP_40_ENABLED
1364   KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
1365 #endif
1366 
1367   if (__kmp_env_consistency_check)
1368     __kmp_push_parallel(global_tid, NULL);
1369 }
1370 
1371 /* most of the work for a fork */
1372 /* return true if we really went parallel, false if serialized */
1373 int __kmp_fork_call(ident_t *loc, int gtid,
1374                     enum fork_context_e call_context, // Intel, GNU, ...
1375                     kmp_int32 argc,
1376 #if OMPT_SUPPORT
1377                     void *unwrapped_task,
1378 #endif
1379                     microtask_t microtask, launch_t invoker,
1380 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1381 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1382                     va_list *ap
1383 #else
1384                     va_list ap
1385 #endif
1386                     ) {
1387   void **argv;
1388   int i;
1389   int master_tid;
1390   int master_this_cons;
1391   kmp_team_t *team;
1392   kmp_team_t *parent_team;
1393   kmp_info_t *master_th;
1394   kmp_root_t *root;
1395   int nthreads;
1396   int master_active;
1397   int master_set_numthreads;
1398   int level;
1399 #if OMP_40_ENABLED
1400   int active_level;
1401   int teams_level;
1402 #endif
1403 #if KMP_NESTED_HOT_TEAMS
1404   kmp_hot_team_ptr_t **p_hot_teams;
1405 #endif
1406   { // KMP_TIME_BLOCK
1407     KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
1408     KMP_COUNT_VALUE(OMP_PARALLEL_args, argc);
1409 
1410     KA_TRACE(20, ("__kmp_fork_call: enter T#%d\n", gtid));
1411     if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) {
1412       /* Some systems prefer the stack for the root thread(s) to start with */
1413       /* some gap from the parent stack to prevent false sharing. */
1414       void *dummy = KMP_ALLOCA(__kmp_stkpadding);
1415       /* These 2 lines below are so this does not get optimized out */
1416       if (__kmp_stkpadding > KMP_MAX_STKPADDING)
1417         __kmp_stkpadding += (short)((kmp_int64)dummy);
1418     }
1419 
1420     /* initialize if needed */
1421     KMP_DEBUG_ASSERT(
1422         __kmp_init_serial); // AC: potentially unsafe, not in sync with shutdown
1423     if (!TCR_4(__kmp_init_parallel))
1424       __kmp_parallel_initialize();
1425 
1426     /* setup current data */
1427     master_th = __kmp_threads[gtid]; // AC: potentially unsafe, not in sync with
1428     // shutdown
1429     parent_team = master_th->th.th_team;
1430     master_tid = master_th->th.th_info.ds.ds_tid;
1431     master_this_cons = master_th->th.th_local.this_construct;
1432     root = master_th->th.th_root;
1433     master_active = root->r.r_active;
1434     master_set_numthreads = master_th->th.th_set_nproc;
1435 
1436 #if OMPT_SUPPORT
1437     ompt_parallel_id_t ompt_parallel_id;
1438     ompt_task_id_t ompt_task_id;
1439     ompt_frame_t *ompt_frame;
1440     ompt_task_id_t my_task_id;
1441     ompt_parallel_id_t my_parallel_id;
1442 
1443     if (ompt_enabled) {
1444       ompt_parallel_id = __ompt_parallel_id_new(gtid);
1445       ompt_task_id = __ompt_get_task_id_internal(0);
1446       ompt_frame = __ompt_get_task_frame_internal(0);
1447     }
1448 #endif
1449 
1450     // Nested level will be an index in the nested nthreads array
1451     level = parent_team->t.t_level;
1452     // used to launch non-serial teams even if nested is not allowed
1453     active_level = parent_team->t.t_active_level;
1454 #if OMP_40_ENABLED
1455     // needed to check nesting inside the teams
1456     teams_level = master_th->th.th_teams_level;
1457 #endif
1458 #if KMP_NESTED_HOT_TEAMS
1459     p_hot_teams = &master_th->th.th_hot_teams;
1460     if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
1461       *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
1462           sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
1463       (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1464       // it is either actual or not needed (when active_level > 0)
1465       (*p_hot_teams)[0].hot_team_nth = 1;
1466     }
1467 #endif
1468 
1469 #if OMPT_SUPPORT
1470     if (ompt_enabled &&
1471         ompt_callbacks.ompt_callback(ompt_event_parallel_begin)) {
1472       int team_size = master_set_numthreads;
1473 
1474       ompt_callbacks.ompt_callback(ompt_event_parallel_begin)(
1475           ompt_task_id, ompt_frame, ompt_parallel_id, team_size, unwrapped_task,
1476           OMPT_INVOKER(call_context));
1477     }
1478 #endif
1479 
1480     master_th->th.th_ident = loc;
1481 
1482 #if OMP_40_ENABLED
1483     if (master_th->th.th_teams_microtask && ap &&
1484         microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
1485       // AC: This is start of parallel that is nested inside teams construct.
1486       // The team is actual (hot), all workers are ready at the fork barrier.
1487       // No lock needed to initialize the team a bit, then free workers.
1488       parent_team->t.t_ident = loc;
1489       __kmp_alloc_argv_entries(argc, parent_team, TRUE);
1490       parent_team->t.t_argc = argc;
1491       argv = (void **)parent_team->t.t_argv;
1492       for (i = argc - 1; i >= 0; --i)
1493 /* TODO: revert workaround for Intel(R) 64 tracker #96 */
1494 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1495         *argv++ = va_arg(*ap, void *);
1496 #else
1497         *argv++ = va_arg(ap, void *);
1498 #endif
1499       // Increment our nested depth levels, but not increase the serialization
1500       if (parent_team == master_th->th.th_serial_team) {
1501         // AC: we are in serialized parallel
1502         __kmpc_serialized_parallel(loc, gtid);
1503         KMP_DEBUG_ASSERT(parent_team->t.t_serialized > 1);
1504         // AC: need this in order enquiry functions work
1505         // correctly, will restore at join time
1506         parent_team->t.t_serialized--;
1507 #if OMPT_SUPPORT
1508         void *dummy;
1509         void **exit_runtime_p;
1510 
1511         ompt_lw_taskteam_t lw_taskteam;
1512 
1513         if (ompt_enabled) {
1514           __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid, unwrapped_task,
1515                                   ompt_parallel_id);
1516           lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid);
1517           exit_runtime_p =
1518               &(lw_taskteam.ompt_task_info.frame.exit_runtime_frame);
1519 
1520           __ompt_lw_taskteam_link(&lw_taskteam, master_th);
1521 
1522 #if OMPT_TRACE
1523           /* OMPT implicit task begin */
1524           my_task_id = lw_taskteam.ompt_task_info.task_id;
1525           my_parallel_id = parent_team->t.ompt_team_info.parallel_id;
1526           if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
1527             ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
1528                 my_parallel_id, my_task_id);
1529           }
1530 #endif
1531 
1532           /* OMPT state */
1533           master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1534         } else {
1535           exit_runtime_p = &dummy;
1536         }
1537 #endif
1538 
1539         {
1540           KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1541           KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1542           __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
1543 #if OMPT_SUPPORT
1544                                  ,
1545                                  exit_runtime_p
1546 #endif
1547                                  );
1548         }
1549 
1550 #if OMPT_SUPPORT
1551         *exit_runtime_p = NULL;
1552         if (ompt_enabled) {
1553 #if OMPT_TRACE
1554           lw_taskteam.ompt_task_info.frame.exit_runtime_frame = NULL;
1555 
1556           if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
1557             ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
1558                 ompt_parallel_id, ompt_task_id);
1559           }
1560 
1561           __ompt_lw_taskteam_unlink(master_th);
1562           // reset clear the task id only after unlinking the task
1563           lw_taskteam.ompt_task_info.task_id = ompt_task_id_none;
1564 #endif
1565 
1566           if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
1567             ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
1568                 ompt_parallel_id, ompt_task_id, OMPT_INVOKER(call_context));
1569           }
1570           master_th->th.ompt_thread_info.state = ompt_state_overhead;
1571         }
1572 #endif
1573         return TRUE;
1574       }
1575 
1576       parent_team->t.t_pkfn = microtask;
1577 #if OMPT_SUPPORT
1578       parent_team->t.ompt_team_info.microtask = unwrapped_task;
1579 #endif
1580       parent_team->t.t_invoke = invoker;
1581       KMP_TEST_THEN_INC32((kmp_int32 *)&root->r.r_in_parallel);
1582       parent_team->t.t_active_level++;
1583       parent_team->t.t_level++;
1584 
1585       /* Change number of threads in the team if requested */
1586       if (master_set_numthreads) { // The parallel has num_threads clause
1587         if (master_set_numthreads < master_th->th.th_teams_size.nth) {
1588           // AC: only can reduce number of threads dynamically, can't increase
1589           kmp_info_t **other_threads = parent_team->t.t_threads;
1590           parent_team->t.t_nproc = master_set_numthreads;
1591           for (i = 0; i < master_set_numthreads; ++i) {
1592             other_threads[i]->th.th_team_nproc = master_set_numthreads;
1593           }
1594           // Keep extra threads hot in the team for possible next parallels
1595         }
1596         master_th->th.th_set_nproc = 0;
1597       }
1598 
1599 #if USE_DEBUGGER
1600       if (__kmp_debugging) { // Let debugger override number of threads.
1601         int nth = __kmp_omp_num_threads(loc);
1602         if (nth > 0) { // 0 means debugger doesn't want to change num threads
1603           master_set_numthreads = nth;
1604         }; // if
1605       }; // if
1606 #endif
1607 
1608       KF_TRACE(10, ("__kmp_fork_call: before internal fork: root=%p, team=%p, "
1609                     "master_th=%p, gtid=%d\n",
1610                     root, parent_team, master_th, gtid));
1611       __kmp_internal_fork(loc, gtid, parent_team);
1612       KF_TRACE(10, ("__kmp_fork_call: after internal fork: root=%p, team=%p, "
1613                     "master_th=%p, gtid=%d\n",
1614                     root, parent_team, master_th, gtid));
1615 
1616       /* Invoke microtask for MASTER thread */
1617       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
1618                     parent_team->t.t_id, parent_team->t.t_pkfn));
1619 
1620       {
1621         KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1622         KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1623         if (!parent_team->t.t_invoke(gtid)) {
1624           KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
1625         }
1626       }
1627       KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
1628                     parent_team->t.t_id, parent_team->t.t_pkfn));
1629       KMP_MB(); /* Flush all pending memory write invalidates.  */
1630 
1631       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
1632 
1633       return TRUE;
1634     } // Parallel closely nested in teams construct
1635 #endif /* OMP_40_ENABLED */
1636 
1637 #if KMP_DEBUG
1638     if (__kmp_tasking_mode != tskm_immediate_exec) {
1639       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
1640                        parent_team->t.t_task_team[master_th->th.th_task_state]);
1641     }
1642 #endif
1643 
1644     if (parent_team->t.t_active_level >=
1645         master_th->th.th_current_task->td_icvs.max_active_levels) {
1646       nthreads = 1;
1647     } else {
1648 #if OMP_40_ENABLED
1649       int enter_teams = ((ap == NULL && active_level == 0) ||
1650                          (ap && teams_level > 0 && teams_level == level));
1651 #endif
1652       nthreads =
1653           master_set_numthreads
1654               ? master_set_numthreads
1655               : get__nproc_2(
1656                     parent_team,
1657                     master_tid); // TODO: get nproc directly from current task
1658 
1659       // Check if we need to take forkjoin lock? (no need for serialized
1660       // parallel out of teams construct). This code moved here from
1661       // __kmp_reserve_threads() to speedup nested serialized parallels.
1662       if (nthreads > 1) {
1663         if ((!get__nested(master_th) && (root->r.r_in_parallel
1664 #if OMP_40_ENABLED
1665                                          && !enter_teams
1666 #endif /* OMP_40_ENABLED */
1667                                          )) ||
1668             (__kmp_library == library_serial)) {
1669           KC_TRACE(10, ("__kmp_fork_call: T#%d serializing team; requested %d"
1670                         " threads\n",
1671                         gtid, nthreads));
1672           nthreads = 1;
1673         }
1674       }
1675       if (nthreads > 1) {
1676         /* determine how many new threads we can use */
1677         __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1678         nthreads = __kmp_reserve_threads(
1679             root, parent_team, master_tid, nthreads
1680 #if OMP_40_ENABLED
1681             /* AC: If we execute teams from parallel region (on host), then
1682                teams should be created but each can only have 1 thread if
1683                nesting is disabled. If teams called from serial region, then
1684                teams and their threads should be created regardless of the
1685                nesting setting. */
1686             ,
1687             enter_teams
1688 #endif /* OMP_40_ENABLED */
1689             );
1690         if (nthreads == 1) {
1691           // Free lock for single thread execution here; for multi-thread
1692           // execution it will be freed later after team of threads created
1693           // and initialized
1694           __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1695         }
1696       }
1697     }
1698     KMP_DEBUG_ASSERT(nthreads > 0);
1699 
1700     // If we temporarily changed the set number of threads then restore it now
1701     master_th->th.th_set_nproc = 0;
1702 
1703     /* create a serialized parallel region? */
1704     if (nthreads == 1) {
1705 /* josh todo: hypothetical question: what do we do for OS X*? */
1706 #if KMP_OS_LINUX &&                                                            \
1707     (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
1708       void *args[argc];
1709 #else
1710       void **args = (void **)KMP_ALLOCA(argc * sizeof(void *));
1711 #endif /* KMP_OS_LINUX && ( KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || \
1712           KMP_ARCH_AARCH64) */
1713 
1714       KA_TRACE(20,
1715                ("__kmp_fork_call: T#%d serializing parallel region\n", gtid));
1716 
1717       __kmpc_serialized_parallel(loc, gtid);
1718 
1719       if (call_context == fork_context_intel) {
1720         /* TODO this sucks, use the compiler itself to pass args! :) */
1721         master_th->th.th_serial_team->t.t_ident = loc;
1722 #if OMP_40_ENABLED
1723         if (!ap) {
1724           // revert change made in __kmpc_serialized_parallel()
1725           master_th->th.th_serial_team->t.t_level--;
1726 // Get args from parent team for teams construct
1727 
1728 #if OMPT_SUPPORT
1729           void *dummy;
1730           void **exit_runtime_p;
1731 
1732           ompt_lw_taskteam_t lw_taskteam;
1733 
1734           if (ompt_enabled) {
1735             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1736                                     unwrapped_task, ompt_parallel_id);
1737             lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid);
1738             exit_runtime_p =
1739                 &(lw_taskteam.ompt_task_info.frame.exit_runtime_frame);
1740 
1741             __ompt_lw_taskteam_link(&lw_taskteam, master_th);
1742 
1743 #if OMPT_TRACE
1744             my_task_id = lw_taskteam.ompt_task_info.task_id;
1745             if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
1746               ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
1747                   ompt_parallel_id, my_task_id);
1748             }
1749 #endif
1750 
1751             /* OMPT state */
1752             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1753           } else {
1754             exit_runtime_p = &dummy;
1755           }
1756 #endif
1757 
1758           {
1759             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1760             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1761             __kmp_invoke_microtask(microtask, gtid, 0, argc,
1762                                    parent_team->t.t_argv
1763 #if OMPT_SUPPORT
1764                                    ,
1765                                    exit_runtime_p
1766 #endif
1767                                    );
1768           }
1769 
1770 #if OMPT_SUPPORT
1771           *exit_runtime_p = NULL;
1772           if (ompt_enabled) {
1773             lw_taskteam.ompt_task_info.frame.exit_runtime_frame = NULL;
1774 
1775 #if OMPT_TRACE
1776             if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
1777               ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
1778                   ompt_parallel_id, ompt_task_id);
1779             }
1780 #endif
1781 
1782             __ompt_lw_taskteam_unlink(master_th);
1783             // reset clear the task id only after unlinking the task
1784             lw_taskteam.ompt_task_info.task_id = ompt_task_id_none;
1785 
1786             if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
1787               ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
1788                   ompt_parallel_id, ompt_task_id, OMPT_INVOKER(call_context));
1789             }
1790             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1791           }
1792 #endif
1793         } else if (microtask == (microtask_t)__kmp_teams_master) {
1794           KMP_DEBUG_ASSERT(master_th->th.th_team ==
1795                            master_th->th.th_serial_team);
1796           team = master_th->th.th_team;
1797           // team->t.t_pkfn = microtask;
1798           team->t.t_invoke = invoker;
1799           __kmp_alloc_argv_entries(argc, team, TRUE);
1800           team->t.t_argc = argc;
1801           argv = (void **)team->t.t_argv;
1802           if (ap) {
1803             for (i = argc - 1; i >= 0; --i)
1804 // TODO: revert workaround for Intel(R) 64 tracker #96
1805 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1806               *argv++ = va_arg(*ap, void *);
1807 #else
1808               *argv++ = va_arg(ap, void *);
1809 #endif
1810           } else {
1811             for (i = 0; i < argc; ++i)
1812               // Get args from parent team for teams construct
1813               argv[i] = parent_team->t.t_argv[i];
1814           }
1815           // AC: revert change made in __kmpc_serialized_parallel()
1816           //     because initial code in teams should have level=0
1817           team->t.t_level--;
1818           // AC: call special invoker for outer "parallel" of teams construct
1819           {
1820             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1821             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1822             invoker(gtid);
1823           }
1824         } else {
1825 #endif /* OMP_40_ENABLED */
1826           argv = args;
1827           for (i = argc - 1; i >= 0; --i)
1828 // TODO: revert workaround for Intel(R) 64 tracker #96
1829 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1830             *argv++ = va_arg(*ap, void *);
1831 #else
1832           *argv++ = va_arg(ap, void *);
1833 #endif
1834           KMP_MB();
1835 
1836 #if OMPT_SUPPORT
1837           void *dummy;
1838           void **exit_runtime_p;
1839 
1840           ompt_lw_taskteam_t lw_taskteam;
1841 
1842           if (ompt_enabled) {
1843             __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1844                                     unwrapped_task, ompt_parallel_id);
1845             lw_taskteam.ompt_task_info.task_id = __ompt_task_id_new(gtid);
1846             exit_runtime_p =
1847                 &(lw_taskteam.ompt_task_info.frame.exit_runtime_frame);
1848 
1849             __ompt_lw_taskteam_link(&lw_taskteam, master_th);
1850 
1851 #if OMPT_TRACE
1852             /* OMPT implicit task begin */
1853             my_task_id = lw_taskteam.ompt_task_info.task_id;
1854             my_parallel_id = ompt_parallel_id;
1855             if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
1856               ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(
1857                   my_parallel_id, my_task_id);
1858             }
1859 #endif
1860 
1861             /* OMPT state */
1862             master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1863           } else {
1864             exit_runtime_p = &dummy;
1865           }
1866 #endif
1867 
1868           {
1869             KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1870             KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1871             __kmp_invoke_microtask(microtask, gtid, 0, argc, args
1872 #if OMPT_SUPPORT
1873                                    ,
1874                                    exit_runtime_p
1875 #endif
1876                                    );
1877           }
1878 
1879 #if OMPT_SUPPORT
1880           *exit_runtime_p = NULL;
1881           if (ompt_enabled) {
1882 #if OMPT_TRACE
1883             lw_taskteam.ompt_task_info.frame.exit_runtime_frame = NULL;
1884 
1885             if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
1886               ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
1887                   my_parallel_id, my_task_id);
1888             }
1889 #endif
1890 
1891             __ompt_lw_taskteam_unlink(master_th);
1892             // reset clear the task id only after unlinking the task
1893             lw_taskteam.ompt_task_info.task_id = ompt_task_id_none;
1894 
1895             if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
1896               ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
1897                   ompt_parallel_id, ompt_task_id, OMPT_INVOKER(call_context));
1898             }
1899             master_th->th.ompt_thread_info.state = ompt_state_overhead;
1900           }
1901 #endif
1902 #if OMP_40_ENABLED
1903         }
1904 #endif /* OMP_40_ENABLED */
1905       } else if (call_context == fork_context_gnu) {
1906 #if OMPT_SUPPORT
1907         ompt_lw_taskteam_t *lwt =
1908             (ompt_lw_taskteam_t *)__kmp_allocate(sizeof(ompt_lw_taskteam_t));
1909         __ompt_lw_taskteam_init(lwt, master_th, gtid, unwrapped_task,
1910                                 ompt_parallel_id);
1911 
1912         lwt->ompt_task_info.task_id = __ompt_task_id_new(gtid);
1913         lwt->ompt_task_info.frame.exit_runtime_frame = NULL;
1914         __ompt_lw_taskteam_link(lwt, master_th);
1915 #endif
1916 
1917         // we were called from GNU native code
1918         KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1919         return FALSE;
1920       } else {
1921         KMP_ASSERT2(call_context < fork_context_last,
1922                     "__kmp_fork_call: unknown fork_context parameter");
1923       }
1924 
1925       KA_TRACE(20, ("__kmp_fork_call: T#%d serial exit\n", gtid));
1926       KMP_MB();
1927       return FALSE;
1928     }
1929 
1930     // GEH: only modify the executing flag in the case when not serialized
1931     //      serialized case is handled in kmpc_serialized_parallel
1932     KF_TRACE(10, ("__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, "
1933                   "curtask=%p, curtask_max_aclevel=%d\n",
1934                   parent_team->t.t_active_level, master_th,
1935                   master_th->th.th_current_task,
1936                   master_th->th.th_current_task->td_icvs.max_active_levels));
1937     // TODO: GEH - cannot do this assertion because root thread not set up as
1938     // executing
1939     // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 1 );
1940     master_th->th.th_current_task->td_flags.executing = 0;
1941 
1942 #if OMP_40_ENABLED
1943     if (!master_th->th.th_teams_microtask || level > teams_level)
1944 #endif /* OMP_40_ENABLED */
1945     {
1946       /* Increment our nested depth level */
1947       KMP_TEST_THEN_INC32((kmp_int32 *)&root->r.r_in_parallel);
1948     }
1949 
1950     // See if we need to make a copy of the ICVs.
1951     int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
1952     if ((level + 1 < __kmp_nested_nth.used) &&
1953         (__kmp_nested_nth.nth[level + 1] != nthreads_icv)) {
1954       nthreads_icv = __kmp_nested_nth.nth[level + 1];
1955     } else {
1956       nthreads_icv = 0; // don't update
1957     }
1958 
1959 #if OMP_40_ENABLED
1960     // Figure out the proc_bind_policy for the new team.
1961     kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
1962     kmp_proc_bind_t proc_bind_icv =
1963         proc_bind_default; // proc_bind_default means don't update
1964     if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1965       proc_bind = proc_bind_false;
1966     } else {
1967       if (proc_bind == proc_bind_default) {
1968         // No proc_bind clause specified; use current proc-bind-var for this
1969         // parallel region
1970         proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
1971       }
1972       /* else: The proc_bind policy was specified explicitly on parallel clause.
1973          This overrides proc-bind-var for this parallel region, but does not
1974          change proc-bind-var. */
1975       // Figure the value of proc-bind-var for the child threads.
1976       if ((level + 1 < __kmp_nested_proc_bind.used) &&
1977           (__kmp_nested_proc_bind.bind_types[level + 1] !=
1978            master_th->th.th_current_task->td_icvs.proc_bind)) {
1979         proc_bind_icv = __kmp_nested_proc_bind.bind_types[level + 1];
1980       }
1981     }
1982 
1983     // Reset for next parallel region
1984     master_th->th.th_set_proc_bind = proc_bind_default;
1985 #endif /* OMP_40_ENABLED */
1986 
1987     if ((nthreads_icv > 0)
1988 #if OMP_40_ENABLED
1989         || (proc_bind_icv != proc_bind_default)
1990 #endif /* OMP_40_ENABLED */
1991             ) {
1992       kmp_internal_control_t new_icvs;
1993       copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
1994       new_icvs.next = NULL;
1995       if (nthreads_icv > 0) {
1996         new_icvs.nproc = nthreads_icv;
1997       }
1998 
1999 #if OMP_40_ENABLED
2000       if (proc_bind_icv != proc_bind_default) {
2001         new_icvs.proc_bind = proc_bind_icv;
2002       }
2003 #endif /* OMP_40_ENABLED */
2004 
2005       /* allocate a new parallel team */
2006       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2007       team = __kmp_allocate_team(root, nthreads, nthreads,
2008 #if OMPT_SUPPORT
2009                                  ompt_parallel_id,
2010 #endif
2011 #if OMP_40_ENABLED
2012                                  proc_bind,
2013 #endif
2014                                  &new_icvs, argc USE_NESTED_HOT_ARG(master_th));
2015     } else {
2016       /* allocate a new parallel team */
2017       KF_TRACE(10, ("__kmp_fork_call: before __kmp_allocate_team\n"));
2018       team = __kmp_allocate_team(root, nthreads, nthreads,
2019 #if OMPT_SUPPORT
2020                                  ompt_parallel_id,
2021 #endif
2022 #if OMP_40_ENABLED
2023                                  proc_bind,
2024 #endif
2025                                  &master_th->th.th_current_task->td_icvs,
2026                                  argc USE_NESTED_HOT_ARG(master_th));
2027     }
2028     KF_TRACE(
2029         10, ("__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2030 
2031     /* setup the new team */
2032     KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2033     KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2034     KMP_CHECK_UPDATE(team->t.t_ident, loc);
2035     KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2036     KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2037 #if OMPT_SUPPORT
2038     KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.microtask, unwrapped_task);
2039 #endif
2040     KMP_CHECK_UPDATE(team->t.t_invoke, invoker); // TODO move to root, maybe
2041 // TODO: parent_team->t.t_level == INT_MAX ???
2042 #if OMP_40_ENABLED
2043     if (!master_th->th.th_teams_microtask || level > teams_level) {
2044 #endif /* OMP_40_ENABLED */
2045       int new_level = parent_team->t.t_level + 1;
2046       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2047       new_level = parent_team->t.t_active_level + 1;
2048       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2049 #if OMP_40_ENABLED
2050     } else {
2051       // AC: Do not increase parallel level at start of the teams construct
2052       int new_level = parent_team->t.t_level;
2053       KMP_CHECK_UPDATE(team->t.t_level, new_level);
2054       new_level = parent_team->t.t_active_level;
2055       KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2056     }
2057 #endif /* OMP_40_ENABLED */
2058     kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
2059     if (team->t.t_sched.r_sched_type != new_sched.r_sched_type ||
2060         team->t.t_sched.chunk != new_sched.chunk)
2061       team->t.t_sched =
2062           new_sched; // set master's schedule as new run-time schedule
2063 
2064 #if OMP_40_ENABLED
2065     KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2066 #endif
2067 
2068     // Update the floating point rounding in the team if required.
2069     propagateFPControl(team);
2070 
2071     if (__kmp_tasking_mode != tskm_immediate_exec) {
2072       // Set master's task team to team's task team. Unless this is hot team, it
2073       // should be NULL.
2074       KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2075                        parent_team->t.t_task_team[master_th->th.th_task_state]);
2076       KA_TRACE(20, ("__kmp_fork_call: Master T#%d pushing task_team %p / team "
2077                     "%p, new task_team %p / team %p\n",
2078                     __kmp_gtid_from_thread(master_th),
2079                     master_th->th.th_task_team, parent_team,
2080                     team->t.t_task_team[master_th->th.th_task_state], team));
2081 
2082       if (active_level || master_th->th.th_task_team) {
2083         // Take a memo of master's task_state
2084         KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2085         if (master_th->th.th_task_state_top >=
2086             master_th->th.th_task_state_stack_sz) { // increase size
2087           kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
2088           kmp_uint8 *old_stack, *new_stack;
2089           kmp_uint32 i;
2090           new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
2091           for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
2092             new_stack[i] = master_th->th.th_task_state_memo_stack[i];
2093           }
2094           for (i = master_th->th.th_task_state_stack_sz; i < new_size;
2095                ++i) { // zero-init rest of stack
2096             new_stack[i] = 0;
2097           }
2098           old_stack = master_th->th.th_task_state_memo_stack;
2099           master_th->th.th_task_state_memo_stack = new_stack;
2100           master_th->th.th_task_state_stack_sz = new_size;
2101           __kmp_free(old_stack);
2102         }
2103         // Store master's task_state on stack
2104         master_th->th
2105             .th_task_state_memo_stack[master_th->th.th_task_state_top] =
2106             master_th->th.th_task_state;
2107         master_th->th.th_task_state_top++;
2108 #if KMP_NESTED_HOT_TEAMS
2109         if (team == master_th->th.th_hot_teams[active_level].hot_team) {
2110           // Restore master's nested state if nested hot team
2111           master_th->th.th_task_state =
2112               master_th->th
2113                   .th_task_state_memo_stack[master_th->th.th_task_state_top];
2114         } else {
2115 #endif
2116           master_th->th.th_task_state = 0;
2117 #if KMP_NESTED_HOT_TEAMS
2118         }
2119 #endif
2120       }
2121 #if !KMP_NESTED_HOT_TEAMS
2122       KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) ||
2123                        (team == root->r.r_hot_team));
2124 #endif
2125     }
2126 
2127     KA_TRACE(
2128         20,
2129         ("__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2130          gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2131          team->t.t_nproc));
2132     KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2133                      (team->t.t_master_tid == 0 &&
2134                       (team->t.t_parent == root->r.r_root_team ||
2135                        team->t.t_parent->t.t_serialized)));
2136     KMP_MB();
2137 
2138     /* now, setup the arguments */
2139     argv = (void **)team->t.t_argv;
2140 #if OMP_40_ENABLED
2141     if (ap) {
2142 #endif /* OMP_40_ENABLED */
2143       for (i = argc - 1; i >= 0; --i) {
2144 // TODO: revert workaround for Intel(R) 64 tracker #96
2145 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
2146         void *new_argv = va_arg(*ap, void *);
2147 #else
2148       void *new_argv = va_arg(ap, void *);
2149 #endif
2150         KMP_CHECK_UPDATE(*argv, new_argv);
2151         argv++;
2152       }
2153 #if OMP_40_ENABLED
2154     } else {
2155       for (i = 0; i < argc; ++i) {
2156         // Get args from parent team for teams construct
2157         KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2158       }
2159     }
2160 #endif /* OMP_40_ENABLED */
2161 
2162     /* now actually fork the threads */
2163     KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2164     if (!root->r.r_active) // Only do assignment if it prevents cache ping-pong
2165       root->r.r_active = TRUE;
2166 
2167     __kmp_fork_team_threads(root, team, master_th, gtid);
2168     __kmp_setup_icv_copy(team, nthreads,
2169                          &master_th->th.th_current_task->td_icvs, loc);
2170 
2171 #if OMPT_SUPPORT
2172     master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
2173 #endif
2174 
2175     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2176 
2177 #if USE_ITT_BUILD
2178     if (team->t.t_active_level == 1 // only report frames at level 1
2179 #if OMP_40_ENABLED
2180         && !master_th->th.th_teams_microtask // not in teams construct
2181 #endif /* OMP_40_ENABLED */
2182         ) {
2183 #if USE_ITT_NOTIFY
2184       if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2185           (__kmp_forkjoin_frames_mode == 3 ||
2186            __kmp_forkjoin_frames_mode == 1)) {
2187         kmp_uint64 tmp_time = 0;
2188         if (__itt_get_timestamp_ptr)
2189           tmp_time = __itt_get_timestamp();
2190         // Internal fork - report frame begin
2191         master_th->th.th_frame_time = tmp_time;
2192         if (__kmp_forkjoin_frames_mode == 3)
2193           team->t.t_region_time = tmp_time;
2194       } else
2195 // only one notification scheme (either "submit" or "forking/joined", not both)
2196 #endif /* USE_ITT_NOTIFY */
2197           if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
2198               __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
2199         // Mark start of "parallel" region for VTune.
2200         __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2201       }
2202     }
2203 #endif /* USE_ITT_BUILD */
2204 
2205     /* now go on and do the work */
2206     KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2207     KMP_MB();
2208     KF_TRACE(10,
2209              ("__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2210               root, team, master_th, gtid));
2211 
2212 #if USE_ITT_BUILD
2213     if (__itt_stack_caller_create_ptr) {
2214       team->t.t_stack_id =
2215           __kmp_itt_stack_caller_create(); // create new stack stitching id
2216       // before entering fork barrier
2217     }
2218 #endif /* USE_ITT_BUILD */
2219 
2220 #if OMP_40_ENABLED
2221     // AC: skip __kmp_internal_fork at teams construct, let only master
2222     // threads execute
2223     if (ap)
2224 #endif /* OMP_40_ENABLED */
2225     {
2226       __kmp_internal_fork(loc, gtid, team);
2227       KF_TRACE(10, ("__kmp_internal_fork : after : root=%p, team=%p, "
2228                     "master_th=%p, gtid=%d\n",
2229                     root, team, master_th, gtid));
2230     }
2231 
2232     if (call_context == fork_context_gnu) {
2233       KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2234       return TRUE;
2235     }
2236 
2237     /* Invoke microtask for MASTER thread */
2238     KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
2239                   team->t.t_id, team->t.t_pkfn));
2240   } // END of timer KMP_fork_call block
2241 
2242   {
2243     KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
2244     KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
2245     if (!team->t.t_invoke(gtid)) {
2246       KMP_ASSERT2(0, "cannot invoke microtask for MASTER thread");
2247     }
2248   }
2249   KA_TRACE(20, ("__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
2250                 team->t.t_id, team->t.t_pkfn));
2251   KMP_MB(); /* Flush all pending memory write invalidates.  */
2252 
2253   KA_TRACE(20, ("__kmp_fork_call: parallel exit T#%d\n", gtid));
2254 
2255 #if OMPT_SUPPORT
2256   if (ompt_enabled) {
2257     master_th->th.ompt_thread_info.state = ompt_state_overhead;
2258   }
2259 #endif
2260 
2261   return TRUE;
2262 }
2263 
2264 #if OMPT_SUPPORT
2265 static inline void __kmp_join_restore_state(kmp_info_t *thread,
2266                                             kmp_team_t *team) {
2267   // restore state outside the region
2268   thread->th.ompt_thread_info.state =
2269       ((team->t.t_serialized) ? ompt_state_work_serial
2270                               : ompt_state_work_parallel);
2271 }
2272 
2273 static inline void __kmp_join_ompt(kmp_info_t *thread, kmp_team_t *team,
2274                                    ompt_parallel_id_t parallel_id,
2275                                    fork_context_e fork_context) {
2276   ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
2277   if (ompt_callbacks.ompt_callback(ompt_event_parallel_end)) {
2278     ompt_callbacks.ompt_callback(ompt_event_parallel_end)(
2279         parallel_id, task_info->task_id, OMPT_INVOKER(fork_context));
2280   }
2281 
2282   task_info->frame.reenter_runtime_frame = NULL;
2283   __kmp_join_restore_state(thread, team);
2284 }
2285 #endif
2286 
2287 void __kmp_join_call(ident_t *loc, int gtid
2288 #if OMPT_SUPPORT
2289                      ,
2290                      enum fork_context_e fork_context
2291 #endif
2292 #if OMP_40_ENABLED
2293                      ,
2294                      int exit_teams
2295 #endif /* OMP_40_ENABLED */
2296                      ) {
2297   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
2298   kmp_team_t *team;
2299   kmp_team_t *parent_team;
2300   kmp_info_t *master_th;
2301   kmp_root_t *root;
2302   int master_active;
2303   int i;
2304 
2305   KA_TRACE(20, ("__kmp_join_call: enter T#%d\n", gtid));
2306 
2307   /* setup current data */
2308   master_th = __kmp_threads[gtid];
2309   root = master_th->th.th_root;
2310   team = master_th->th.th_team;
2311   parent_team = team->t.t_parent;
2312 
2313   master_th->th.th_ident = loc;
2314 
2315 #if OMPT_SUPPORT
2316   if (ompt_enabled) {
2317     master_th->th.ompt_thread_info.state = ompt_state_overhead;
2318   }
2319 #endif
2320 
2321 #if KMP_DEBUG
2322   if (__kmp_tasking_mode != tskm_immediate_exec && !exit_teams) {
2323     KA_TRACE(20, ("__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2324                   "th_task_team = %p\n",
2325                   __kmp_gtid_from_thread(master_th), team,
2326                   team->t.t_task_team[master_th->th.th_task_state],
2327                   master_th->th.th_task_team));
2328     KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2329                      team->t.t_task_team[master_th->th.th_task_state]);
2330   }
2331 #endif
2332 
2333   if (team->t.t_serialized) {
2334 #if OMP_40_ENABLED
2335     if (master_th->th.th_teams_microtask) {
2336       // We are in teams construct
2337       int level = team->t.t_level;
2338       int tlevel = master_th->th.th_teams_level;
2339       if (level == tlevel) {
2340         // AC: we haven't incremented it earlier at start of teams construct,
2341         //     so do it here - at the end of teams construct
2342         team->t.t_level++;
2343       } else if (level == tlevel + 1) {
2344         // AC: we are exiting parallel inside teams, need to increment
2345         // serialization in order to restore it in the next call to
2346         // __kmpc_end_serialized_parallel
2347         team->t.t_serialized++;
2348       }
2349     }
2350 #endif /* OMP_40_ENABLED */
2351     __kmpc_end_serialized_parallel(loc, gtid);
2352 
2353 #if OMPT_SUPPORT
2354     if (ompt_enabled) {
2355       __kmp_join_restore_state(master_th, parent_team);
2356     }
2357 #endif
2358 
2359     return;
2360   }
2361 
2362   master_active = team->t.t_master_active;
2363 
2364 #if OMP_40_ENABLED
2365   if (!exit_teams)
2366 #endif /* OMP_40_ENABLED */
2367   {
2368     // AC: No barrier for internal teams at exit from teams construct.
2369     //     But there is barrier for external team (league).
2370     __kmp_internal_join(loc, gtid, team);
2371   }
2372 #if OMP_40_ENABLED
2373   else {
2374     master_th->th.th_task_state =
2375         0; // AC: no tasking in teams (out of any parallel)
2376   }
2377 #endif /* OMP_40_ENABLED */
2378 
2379   KMP_MB();
2380 
2381 #if OMPT_SUPPORT
2382   ompt_parallel_id_t parallel_id = team->t.ompt_team_info.parallel_id;
2383 #endif
2384 
2385 #if USE_ITT_BUILD
2386   if (__itt_stack_caller_create_ptr) {
2387     __kmp_itt_stack_caller_destroy(
2388         (__itt_caller)team->t
2389             .t_stack_id); // destroy the stack stitching id after join barrier
2390   }
2391 
2392   // Mark end of "parallel" region for VTune.
2393   if (team->t.t_active_level == 1
2394 #if OMP_40_ENABLED
2395       && !master_th->th.th_teams_microtask /* not in teams construct */
2396 #endif /* OMP_40_ENABLED */
2397       ) {
2398     master_th->th.th_ident = loc;
2399     // only one notification scheme (either "submit" or "forking/joined", not
2400     // both)
2401     if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2402         __kmp_forkjoin_frames_mode == 3)
2403       __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2404                              master_th->th.th_frame_time, 0, loc,
2405                              master_th->th.th_team_nproc, 1);
2406     else if ((__itt_frame_end_v3_ptr || KMP_ITT_DEBUG) &&
2407              !__kmp_forkjoin_frames_mode && __kmp_forkjoin_frames)
2408       __kmp_itt_region_joined(gtid);
2409   } // active_level == 1
2410 #endif /* USE_ITT_BUILD */
2411 
2412 #if OMP_40_ENABLED
2413   if (master_th->th.th_teams_microtask && !exit_teams &&
2414       team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2415       team->t.t_level == master_th->th.th_teams_level + 1) {
2416     // AC: We need to leave the team structure intact at the end of parallel
2417     // inside the teams construct, so that at the next parallel same (hot) team
2418     // works, only adjust nesting levels
2419 
2420     /* Decrement our nested depth level */
2421     team->t.t_level--;
2422     team->t.t_active_level--;
2423     KMP_TEST_THEN_DEC32((kmp_int32 *)&root->r.r_in_parallel);
2424 
2425     /* Restore number of threads in the team if needed */
2426     if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
2427       int old_num = master_th->th.th_team_nproc;
2428       int new_num = master_th->th.th_teams_size.nth;
2429       kmp_info_t **other_threads = team->t.t_threads;
2430       team->t.t_nproc = new_num;
2431       for (i = 0; i < old_num; ++i) {
2432         other_threads[i]->th.th_team_nproc = new_num;
2433       }
2434       // Adjust states of non-used threads of the team
2435       for (i = old_num; i < new_num; ++i) {
2436         // Re-initialize thread's barrier data.
2437         int b;
2438         kmp_balign_t *balign = other_threads[i]->th.th_bar;
2439         for (b = 0; b < bs_last_barrier; ++b) {
2440           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2441           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
2442 #if USE_DEBUGGER
2443           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2444 #endif
2445         }
2446         if (__kmp_tasking_mode != tskm_immediate_exec) {
2447           // Synchronize thread's task state
2448           other_threads[i]->th.th_task_state = master_th->th.th_task_state;
2449         }
2450       }
2451     }
2452 
2453 #if OMPT_SUPPORT
2454     if (ompt_enabled) {
2455       __kmp_join_ompt(master_th, parent_team, parallel_id, fork_context);
2456     }
2457 #endif
2458 
2459     return;
2460   }
2461 #endif /* OMP_40_ENABLED */
2462 
2463   /* do cleanup and restore the parent team */
2464   master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2465   master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2466 
2467   master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2468 
2469   /* jc: The following lock has instructions with REL and ACQ semantics,
2470      separating the parallel user code called in this parallel region
2471      from the serial user code called after this function returns. */
2472   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2473 
2474 #if OMP_40_ENABLED
2475   if (!master_th->th.th_teams_microtask ||
2476       team->t.t_level > master_th->th.th_teams_level)
2477 #endif /* OMP_40_ENABLED */
2478   {
2479     /* Decrement our nested depth level */
2480     KMP_TEST_THEN_DEC32((kmp_int32 *)&root->r.r_in_parallel);
2481   }
2482   KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2483 
2484 #if OMPT_SUPPORT && OMPT_TRACE
2485   if (ompt_enabled) {
2486     ompt_task_info_t *task_info = __ompt_get_taskinfo(0);
2487     if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
2488       ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
2489           parallel_id, task_info->task_id);
2490     }
2491     task_info->frame.exit_runtime_frame = NULL;
2492     task_info->task_id = 0;
2493   }
2494 #endif
2495 
2496   KF_TRACE(10, ("__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2497                 master_th, team));
2498   __kmp_pop_current_task_from_thread(master_th);
2499 
2500 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
2501   // Restore master thread's partition.
2502   master_th->th.th_first_place = team->t.t_first_place;
2503   master_th->th.th_last_place = team->t.t_last_place;
2504 #endif /* OMP_40_ENABLED */
2505 
2506   updateHWFPControl(team);
2507 
2508   if (root->r.r_active != master_active)
2509     root->r.r_active = master_active;
2510 
2511   __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2512                             master_th)); // this will free worker threads
2513 
2514   /* this race was fun to find. make sure the following is in the critical
2515      region otherwise assertions may fail occasionally since the old team may be
2516      reallocated and the hierarchy appears inconsistent. it is actually safe to
2517      run and won't cause any bugs, but will cause those assertion failures. it's
2518      only one deref&assign so might as well put this in the critical region */
2519   master_th->th.th_team = parent_team;
2520   master_th->th.th_team_nproc = parent_team->t.t_nproc;
2521   master_th->th.th_team_master = parent_team->t.t_threads[0];
2522   master_th->th.th_team_serialized = parent_team->t.t_serialized;
2523 
2524   /* restore serialized team, if need be */
2525   if (parent_team->t.t_serialized &&
2526       parent_team != master_th->th.th_serial_team &&
2527       parent_team != root->r.r_root_team) {
2528     __kmp_free_team(root,
2529                     master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
2530     master_th->th.th_serial_team = parent_team;
2531   }
2532 
2533   if (__kmp_tasking_mode != tskm_immediate_exec) {
2534     if (master_th->th.th_task_state_top >
2535         0) { // Restore task state from memo stack
2536       KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2537       // Remember master's state if we re-use this nested hot team
2538       master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
2539           master_th->th.th_task_state;
2540       --master_th->th.th_task_state_top; // pop
2541       // Now restore state at this level
2542       master_th->th.th_task_state =
2543           master_th->th
2544               .th_task_state_memo_stack[master_th->th.th_task_state_top];
2545     }
2546     // Copy the task team from the parent team to the master thread
2547     master_th->th.th_task_team =
2548         parent_team->t.t_task_team[master_th->th.th_task_state];
2549     KA_TRACE(20,
2550              ("__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2551               __kmp_gtid_from_thread(master_th), master_th->th.th_task_team,
2552               parent_team));
2553   }
2554 
2555   // TODO: GEH - cannot do this assertion because root thread not set up as
2556   // executing
2557   // KMP_ASSERT( master_th->th.th_current_task->td_flags.executing == 0 );
2558   master_th->th.th_current_task->td_flags.executing = 1;
2559 
2560   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2561 
2562 #if OMPT_SUPPORT
2563   if (ompt_enabled) {
2564     __kmp_join_ompt(master_th, parent_team, parallel_id, fork_context);
2565   }
2566 #endif
2567 
2568   KMP_MB();
2569   KA_TRACE(20, ("__kmp_join_call: exit T#%d\n", gtid));
2570 }
2571 
2572 /* Check whether we should push an internal control record onto the
2573    serial team stack.  If so, do it.  */
2574 void __kmp_save_internal_controls(kmp_info_t *thread) {
2575 
2576   if (thread->th.th_team != thread->th.th_serial_team) {
2577     return;
2578   }
2579   if (thread->th.th_team->t.t_serialized > 1) {
2580     int push = 0;
2581 
2582     if (thread->th.th_team->t.t_control_stack_top == NULL) {
2583       push = 1;
2584     } else {
2585       if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
2586           thread->th.th_team->t.t_serialized) {
2587         push = 1;
2588       }
2589     }
2590     if (push) { /* push a record on the serial team's stack */
2591       kmp_internal_control_t *control =
2592           (kmp_internal_control_t *)__kmp_allocate(
2593               sizeof(kmp_internal_control_t));
2594 
2595       copy_icvs(control, &thread->th.th_current_task->td_icvs);
2596 
2597       control->serial_nesting_level = thread->th.th_team->t.t_serialized;
2598 
2599       control->next = thread->th.th_team->t.t_control_stack_top;
2600       thread->th.th_team->t.t_control_stack_top = control;
2601     }
2602   }
2603 }
2604 
2605 /* Changes set_nproc */
2606 void __kmp_set_num_threads(int new_nth, int gtid) {
2607   kmp_info_t *thread;
2608   kmp_root_t *root;
2609 
2610   KF_TRACE(10, ("__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth));
2611   KMP_DEBUG_ASSERT(__kmp_init_serial);
2612 
2613   if (new_nth < 1)
2614     new_nth = 1;
2615   else if (new_nth > __kmp_max_nth)
2616     new_nth = __kmp_max_nth;
2617 
2618   KMP_COUNT_VALUE(OMP_set_numthreads, new_nth);
2619   thread = __kmp_threads[gtid];
2620 
2621   __kmp_save_internal_controls(thread);
2622 
2623   set__nproc(thread, new_nth);
2624 
2625   // If this omp_set_num_threads() call will cause the hot team size to be
2626   // reduced (in the absence of a num_threads clause), then reduce it now,
2627   // rather than waiting for the next parallel region.
2628   root = thread->th.th_root;
2629   if (__kmp_init_parallel && (!root->r.r_active) &&
2630       (root->r.r_hot_team->t.t_nproc > new_nth)
2631 #if KMP_NESTED_HOT_TEAMS
2632       && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
2633 #endif
2634       ) {
2635     kmp_team_t *hot_team = root->r.r_hot_team;
2636     int f;
2637 
2638     __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2639 
2640     // Release the extra threads we don't need any more.
2641     for (f = new_nth; f < hot_team->t.t_nproc; f++) {
2642       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2643       if (__kmp_tasking_mode != tskm_immediate_exec) {
2644         // When decreasing team size, threads no longer in the team should unref
2645         // task team.
2646         hot_team->t.t_threads[f]->th.th_task_team = NULL;
2647       }
2648       __kmp_free_thread(hot_team->t.t_threads[f]);
2649       hot_team->t.t_threads[f] = NULL;
2650     }
2651     hot_team->t.t_nproc = new_nth;
2652 #if KMP_NESTED_HOT_TEAMS
2653     if (thread->th.th_hot_teams) {
2654       KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
2655       thread->th.th_hot_teams[0].hot_team_nth = new_nth;
2656     }
2657 #endif
2658 
2659     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2660 
2661     // Update the t_nproc field in the threads that are still active.
2662     for (f = 0; f < new_nth; f++) {
2663       KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2664       hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
2665     }
2666     // Special flag in case omp_set_num_threads() call
2667     hot_team->t.t_size_changed = -1;
2668   }
2669 }
2670 
2671 /* Changes max_active_levels */
2672 void __kmp_set_max_active_levels(int gtid, int max_active_levels) {
2673   kmp_info_t *thread;
2674 
2675   KF_TRACE(10, ("__kmp_set_max_active_levels: new max_active_levels for thread "
2676                 "%d = (%d)\n",
2677                 gtid, max_active_levels));
2678   KMP_DEBUG_ASSERT(__kmp_init_serial);
2679 
2680   // validate max_active_levels
2681   if (max_active_levels < 0) {
2682     KMP_WARNING(ActiveLevelsNegative, max_active_levels);
2683     // We ignore this call if the user has specified a negative value.
2684     // The current setting won't be changed. The last valid setting will be
2685     // used. A warning will be issued (if warnings are allowed as controlled by
2686     // the KMP_WARNINGS env var).
2687     KF_TRACE(10, ("__kmp_set_max_active_levels: the call is ignored: new "
2688                   "max_active_levels for thread %d = (%d)\n",
2689                   gtid, max_active_levels));
2690     return;
2691   }
2692   if (max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT) {
2693     // it's OK, the max_active_levels is within the valid range: [ 0;
2694     // KMP_MAX_ACTIVE_LEVELS_LIMIT ]
2695     // We allow a zero value. (implementation defined behavior)
2696   } else {
2697     KMP_WARNING(ActiveLevelsExceedLimit, max_active_levels,
2698                 KMP_MAX_ACTIVE_LEVELS_LIMIT);
2699     max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT;
2700     // Current upper limit is MAX_INT. (implementation defined behavior)
2701     // If the input exceeds the upper limit, we correct the input to be the
2702     // upper limit. (implementation defined behavior)
2703     // Actually, the flow should never get here until we use MAX_INT limit.
2704   }
2705   KF_TRACE(10, ("__kmp_set_max_active_levels: after validation: new "
2706                 "max_active_levels for thread %d = (%d)\n",
2707                 gtid, max_active_levels));
2708 
2709   thread = __kmp_threads[gtid];
2710 
2711   __kmp_save_internal_controls(thread);
2712 
2713   set__max_active_levels(thread, max_active_levels);
2714 }
2715 
2716 /* Gets max_active_levels */
2717 int __kmp_get_max_active_levels(int gtid) {
2718   kmp_info_t *thread;
2719 
2720   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d\n", gtid));
2721   KMP_DEBUG_ASSERT(__kmp_init_serial);
2722 
2723   thread = __kmp_threads[gtid];
2724   KMP_DEBUG_ASSERT(thread->th.th_current_task);
2725   KF_TRACE(10, ("__kmp_get_max_active_levels: thread %d, curtask=%p, "
2726                 "curtask_maxaclevel=%d\n",
2727                 gtid, thread->th.th_current_task,
2728                 thread->th.th_current_task->td_icvs.max_active_levels));
2729   return thread->th.th_current_task->td_icvs.max_active_levels;
2730 }
2731 
2732 /* Changes def_sched_var ICV values (run-time schedule kind and chunk) */
2733 void __kmp_set_schedule(int gtid, kmp_sched_t kind, int chunk) {
2734   kmp_info_t *thread;
2735   //    kmp_team_t *team;
2736 
2737   KF_TRACE(10, ("__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n",
2738                 gtid, (int)kind, chunk));
2739   KMP_DEBUG_ASSERT(__kmp_init_serial);
2740 
2741   // Check if the kind parameter is valid, correct if needed.
2742   // Valid parameters should fit in one of two intervals - standard or extended:
2743   //       <lower>, <valid>, <upper_std>, <lower_ext>, <valid>, <upper>
2744   // 2008-01-25: 0,  1 - 4,       5,         100,     101 - 102, 103
2745   if (kind <= kmp_sched_lower || kind >= kmp_sched_upper ||
2746       (kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std)) {
2747     // TODO: Hint needs attention in case we change the default schedule.
2748     __kmp_msg(kmp_ms_warning, KMP_MSG(ScheduleKindOutOfRange, kind),
2749               KMP_HNT(DefaultScheduleKindUsed, "static, no chunk"),
2750               __kmp_msg_null);
2751     kind = kmp_sched_default;
2752     chunk = 0; // ignore chunk value in case of bad kind
2753   }
2754 
2755   thread = __kmp_threads[gtid];
2756 
2757   __kmp_save_internal_controls(thread);
2758 
2759   if (kind < kmp_sched_upper_std) {
2760     if (kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK) {
2761       // differ static chunked vs. unchunked:  chunk should be invalid to
2762       // indicate unchunked schedule (which is the default)
2763       thread->th.th_current_task->td_icvs.sched.r_sched_type = kmp_sch_static;
2764     } else {
2765       thread->th.th_current_task->td_icvs.sched.r_sched_type =
2766           __kmp_sch_map[kind - kmp_sched_lower - 1];
2767     }
2768   } else {
2769     //    __kmp_sch_map[ kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2770     //    kmp_sched_lower - 2 ];
2771     thread->th.th_current_task->td_icvs.sched.r_sched_type =
2772         __kmp_sch_map[kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2773                       kmp_sched_lower - 2];
2774   }
2775   if (kind == kmp_sched_auto || chunk < 1) {
2776     // ignore parameter chunk for schedule auto
2777     thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
2778   } else {
2779     thread->th.th_current_task->td_icvs.sched.chunk = chunk;
2780   }
2781 }
2782 
2783 /* Gets def_sched_var ICV values */
2784 void __kmp_get_schedule(int gtid, kmp_sched_t *kind, int *chunk) {
2785   kmp_info_t *thread;
2786   enum sched_type th_type;
2787 
2788   KF_TRACE(10, ("__kmp_get_schedule: thread %d\n", gtid));
2789   KMP_DEBUG_ASSERT(__kmp_init_serial);
2790 
2791   thread = __kmp_threads[gtid];
2792 
2793   th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
2794 
2795   switch (th_type) {
2796   case kmp_sch_static:
2797   case kmp_sch_static_greedy:
2798   case kmp_sch_static_balanced:
2799     *kind = kmp_sched_static;
2800     *chunk = 0; // chunk was not set, try to show this fact via zero value
2801     return;
2802   case kmp_sch_static_chunked:
2803     *kind = kmp_sched_static;
2804     break;
2805   case kmp_sch_dynamic_chunked:
2806     *kind = kmp_sched_dynamic;
2807     break;
2808   case kmp_sch_guided_chunked:
2809   case kmp_sch_guided_iterative_chunked:
2810   case kmp_sch_guided_analytical_chunked:
2811     *kind = kmp_sched_guided;
2812     break;
2813   case kmp_sch_auto:
2814     *kind = kmp_sched_auto;
2815     break;
2816   case kmp_sch_trapezoidal:
2817     *kind = kmp_sched_trapezoidal;
2818     break;
2819 #if KMP_STATIC_STEAL_ENABLED
2820   case kmp_sch_static_steal:
2821     *kind = kmp_sched_static_steal;
2822     break;
2823 #endif
2824   default:
2825     KMP_FATAL(UnknownSchedulingType, th_type);
2826   }
2827 
2828   *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
2829 }
2830 
2831 int __kmp_get_ancestor_thread_num(int gtid, int level) {
2832 
2833   int ii, dd;
2834   kmp_team_t *team;
2835   kmp_info_t *thr;
2836 
2837   KF_TRACE(10, ("__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level));
2838   KMP_DEBUG_ASSERT(__kmp_init_serial);
2839 
2840   // validate level
2841   if (level == 0)
2842     return 0;
2843   if (level < 0)
2844     return -1;
2845   thr = __kmp_threads[gtid];
2846   team = thr->th.th_team;
2847   ii = team->t.t_level;
2848   if (level > ii)
2849     return -1;
2850 
2851 #if OMP_40_ENABLED
2852   if (thr->th.th_teams_microtask) {
2853     // AC: we are in teams region where multiple nested teams have same level
2854     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2855     if (level <=
2856         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2857       KMP_DEBUG_ASSERT(ii >= tlevel);
2858       // AC: As we need to pass by the teams league, we need to artificially
2859       // increase ii
2860       if (ii == tlevel) {
2861         ii += 2; // three teams have same level
2862       } else {
2863         ii++; // two teams have same level
2864       }
2865     }
2866   }
2867 #endif
2868 
2869   if (ii == level)
2870     return __kmp_tid_from_gtid(gtid);
2871 
2872   dd = team->t.t_serialized;
2873   level++;
2874   while (ii > level) {
2875     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2876     }
2877     if ((team->t.t_serialized) && (!dd)) {
2878       team = team->t.t_parent;
2879       continue;
2880     }
2881     if (ii > level) {
2882       team = team->t.t_parent;
2883       dd = team->t.t_serialized;
2884       ii--;
2885     }
2886   }
2887 
2888   return (dd > 1) ? (0) : (team->t.t_master_tid);
2889 }
2890 
2891 int __kmp_get_team_size(int gtid, int level) {
2892 
2893   int ii, dd;
2894   kmp_team_t *team;
2895   kmp_info_t *thr;
2896 
2897   KF_TRACE(10, ("__kmp_get_team_size: thread %d %d\n", gtid, level));
2898   KMP_DEBUG_ASSERT(__kmp_init_serial);
2899 
2900   // validate level
2901   if (level == 0)
2902     return 1;
2903   if (level < 0)
2904     return -1;
2905   thr = __kmp_threads[gtid];
2906   team = thr->th.th_team;
2907   ii = team->t.t_level;
2908   if (level > ii)
2909     return -1;
2910 
2911 #if OMP_40_ENABLED
2912   if (thr->th.th_teams_microtask) {
2913     // AC: we are in teams region where multiple nested teams have same level
2914     int tlevel = thr->th.th_teams_level; // the level of the teams construct
2915     if (level <=
2916         tlevel) { // otherwise usual algorithm works (will not touch the teams)
2917       KMP_DEBUG_ASSERT(ii >= tlevel);
2918       // AC: As we need to pass by the teams league, we need to artificially
2919       // increase ii
2920       if (ii == tlevel) {
2921         ii += 2; // three teams have same level
2922       } else {
2923         ii++; // two teams have same level
2924       }
2925     }
2926   }
2927 #endif
2928 
2929   while (ii > level) {
2930     for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2931     }
2932     if (team->t.t_serialized && (!dd)) {
2933       team = team->t.t_parent;
2934       continue;
2935     }
2936     if (ii > level) {
2937       team = team->t.t_parent;
2938       ii--;
2939     }
2940   }
2941 
2942   return team->t.t_nproc;
2943 }
2944 
2945 kmp_r_sched_t __kmp_get_schedule_global() {
2946   // This routine created because pairs (__kmp_sched, __kmp_chunk) and
2947   // (__kmp_static, __kmp_guided) may be changed by kmp_set_defaults
2948   // independently. So one can get the updated schedule here.
2949 
2950   kmp_r_sched_t r_sched;
2951 
2952   // create schedule from 4 globals: __kmp_sched, __kmp_chunk, __kmp_static,
2953   // __kmp_guided. __kmp_sched should keep original value, so that user can set
2954   // KMP_SCHEDULE multiple times, and thus have different run-time schedules in
2955   // different roots (even in OMP 2.5)
2956   if (__kmp_sched == kmp_sch_static) {
2957     r_sched.r_sched_type = __kmp_static; // replace STATIC with more detailed
2958     // schedule (balanced or greedy)
2959   } else if (__kmp_sched == kmp_sch_guided_chunked) {
2960     r_sched.r_sched_type = __kmp_guided; // replace GUIDED with more detailed
2961     // schedule (iterative or analytical)
2962   } else {
2963     r_sched.r_sched_type =
2964         __kmp_sched; // (STATIC_CHUNKED), or (DYNAMIC_CHUNKED), or other
2965   }
2966 
2967   if (__kmp_chunk < KMP_DEFAULT_CHUNK) { // __kmp_chunk may be wrong here (if it
2968     // was not ever set)
2969     r_sched.chunk = KMP_DEFAULT_CHUNK;
2970   } else {
2971     r_sched.chunk = __kmp_chunk;
2972   }
2973 
2974   return r_sched;
2975 }
2976 
2977 /* Allocate (realloc == FALSE) * or reallocate (realloc == TRUE)
2978    at least argc number of *t_argv entries for the requested team. */
2979 static void __kmp_alloc_argv_entries(int argc, kmp_team_t *team, int realloc) {
2980 
2981   KMP_DEBUG_ASSERT(team);
2982   if (!realloc || argc > team->t.t_max_argc) {
2983 
2984     KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: needed entries=%d, "
2985                    "current entries=%d\n",
2986                    team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
2987     /* if previously allocated heap space for args, free them */
2988     if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
2989       __kmp_free((void *)team->t.t_argv);
2990 
2991     if (argc <= KMP_INLINE_ARGV_ENTRIES) {
2992       /* use unused space in the cache line for arguments */
2993       team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
2994       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: inline allocate %d "
2995                      "argv entries\n",
2996                      team->t.t_id, team->t.t_max_argc));
2997       team->t.t_argv = &team->t.t_inline_argv[0];
2998       if (__kmp_storage_map) {
2999         __kmp_print_storage_map_gtid(
3000             -1, &team->t.t_inline_argv[0],
3001             &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3002             (sizeof(void *) * KMP_INLINE_ARGV_ENTRIES), "team_%d.t_inline_argv",
3003             team->t.t_id);
3004       }
3005     } else {
3006       /* allocate space for arguments in the heap */
3007       team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3008                                ? KMP_MIN_MALLOC_ARGV_ENTRIES
3009                                : 2 * argc;
3010       KA_TRACE(100, ("__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3011                      "argv entries\n",
3012                      team->t.t_id, team->t.t_max_argc));
3013       team->t.t_argv =
3014           (void **)__kmp_page_allocate(sizeof(void *) * team->t.t_max_argc);
3015       if (__kmp_storage_map) {
3016         __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3017                                      &team->t.t_argv[team->t.t_max_argc],
3018                                      sizeof(void *) * team->t.t_max_argc,
3019                                      "team_%d.t_argv", team->t.t_id);
3020       }
3021     }
3022   }
3023 }
3024 
3025 static void __kmp_allocate_team_arrays(kmp_team_t *team, int max_nth) {
3026   int i;
3027   int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2;
3028   team->t.t_threads =
3029       (kmp_info_t **)__kmp_allocate(sizeof(kmp_info_t *) * max_nth);
3030   team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3031       sizeof(dispatch_shared_info_t) * num_disp_buff);
3032   team->t.t_dispatch =
3033       (kmp_disp_t *)__kmp_allocate(sizeof(kmp_disp_t) * max_nth);
3034   team->t.t_implicit_task_taskdata =
3035       (kmp_taskdata_t *)__kmp_allocate(sizeof(kmp_taskdata_t) * max_nth);
3036   team->t.t_max_nproc = max_nth;
3037 
3038   /* setup dispatch buffers */
3039   for (i = 0; i < num_disp_buff; ++i) {
3040     team->t.t_disp_buffer[i].buffer_index = i;
3041 #if OMP_45_ENABLED
3042     team->t.t_disp_buffer[i].doacross_buf_idx = i;
3043 #endif
3044   }
3045 }
3046 
3047 static void __kmp_free_team_arrays(kmp_team_t *team) {
3048   /* Note: this does not free the threads in t_threads (__kmp_free_threads) */
3049   int i;
3050   for (i = 0; i < team->t.t_max_nproc; ++i) {
3051     if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3052       __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3053       team->t.t_dispatch[i].th_disp_buffer = NULL;
3054     }; // if
3055   }; // for
3056   __kmp_free(team->t.t_threads);
3057   __kmp_free(team->t.t_disp_buffer);
3058   __kmp_free(team->t.t_dispatch);
3059   __kmp_free(team->t.t_implicit_task_taskdata);
3060   team->t.t_threads = NULL;
3061   team->t.t_disp_buffer = NULL;
3062   team->t.t_dispatch = NULL;
3063   team->t.t_implicit_task_taskdata = 0;
3064 }
3065 
3066 static void __kmp_reallocate_team_arrays(kmp_team_t *team, int max_nth) {
3067   kmp_info_t **oldThreads = team->t.t_threads;
3068 
3069   __kmp_free(team->t.t_disp_buffer);
3070   __kmp_free(team->t.t_dispatch);
3071   __kmp_free(team->t.t_implicit_task_taskdata);
3072   __kmp_allocate_team_arrays(team, max_nth);
3073 
3074   KMP_MEMCPY(team->t.t_threads, oldThreads,
3075              team->t.t_nproc * sizeof(kmp_info_t *));
3076 
3077   __kmp_free(oldThreads);
3078 }
3079 
3080 static kmp_internal_control_t __kmp_get_global_icvs(void) {
3081 
3082   kmp_r_sched_t r_sched =
3083       __kmp_get_schedule_global(); // get current state of scheduling globals
3084 
3085 #if OMP_40_ENABLED
3086   KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
3087 #endif /* OMP_40_ENABLED */
3088 
3089   kmp_internal_control_t g_icvs = {
3090     0, // int serial_nesting_level; //corresponds to value of th_team_serialized
3091     (kmp_int8)__kmp_dflt_nested, // int nested; //internal control
3092     // for nested parallelism (per thread)
3093     (kmp_int8)__kmp_global.g.g_dynamic, // internal control for dynamic
3094     // adjustment of threads (per thread)
3095     (kmp_int8)__kmp_env_blocktime, // int bt_set; //internal control for
3096     // whether blocktime is explicitly set
3097     __kmp_dflt_blocktime, // int blocktime; //internal control for blocktime
3098 #if KMP_USE_MONITOR
3099     __kmp_bt_intervals, // int bt_intervals; //internal control for blocktime
3100 // intervals
3101 #endif
3102     __kmp_dflt_team_nth, // int nproc; //internal control for # of threads for
3103     // next parallel region (per thread)
3104     // (use a max ub on value if __kmp_parallel_initialize not called yet)
3105     __kmp_dflt_max_active_levels, // int max_active_levels; //internal control
3106     // for max_active_levels
3107     r_sched, // kmp_r_sched_t sched; //internal control for runtime schedule
3108 // {sched,chunk} pair
3109 #if OMP_40_ENABLED
3110     __kmp_nested_proc_bind.bind_types[0],
3111     __kmp_default_device,
3112 #endif /* OMP_40_ENABLED */
3113     NULL // struct kmp_internal_control *next;
3114   };
3115 
3116   return g_icvs;
3117 }
3118 
3119 static kmp_internal_control_t __kmp_get_x_global_icvs(const kmp_team_t *team) {
3120 
3121   kmp_internal_control_t gx_icvs;
3122   gx_icvs.serial_nesting_level =
3123       0; // probably =team->t.t_serial like in save_inter_controls
3124   copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3125   gx_icvs.next = NULL;
3126 
3127   return gx_icvs;
3128 }
3129 
3130 static void __kmp_initialize_root(kmp_root_t *root) {
3131   int f;
3132   kmp_team_t *root_team;
3133   kmp_team_t *hot_team;
3134   int hot_team_max_nth;
3135   kmp_r_sched_t r_sched =
3136       __kmp_get_schedule_global(); // get current state of scheduling globals
3137   kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3138   KMP_DEBUG_ASSERT(root);
3139   KMP_ASSERT(!root->r.r_begin);
3140 
3141   /* setup the root state structure */
3142   __kmp_init_lock(&root->r.r_begin_lock);
3143   root->r.r_begin = FALSE;
3144   root->r.r_active = FALSE;
3145   root->r.r_in_parallel = 0;
3146   root->r.r_blocktime = __kmp_dflt_blocktime;
3147   root->r.r_nested = __kmp_dflt_nested;
3148   root->r.r_cg_nthreads = 1;
3149 
3150   /* setup the root team for this task */
3151   /* allocate the root team structure */
3152   KF_TRACE(10, ("__kmp_initialize_root: before root_team\n"));
3153 
3154   root_team =
3155       __kmp_allocate_team(root,
3156                           1, // new_nproc
3157                           1, // max_nproc
3158 #if OMPT_SUPPORT
3159                           0, // root parallel id
3160 #endif
3161 #if OMP_40_ENABLED
3162                           __kmp_nested_proc_bind.bind_types[0],
3163 #endif
3164                           &r_icvs,
3165                           0 // argc
3166                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3167                           );
3168 #if USE_DEBUGGER
3169   // Non-NULL value should be assigned to make the debugger display the root
3170   // team.
3171   TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)(~0));
3172 #endif
3173 
3174   KF_TRACE(10, ("__kmp_initialize_root: after root_team = %p\n", root_team));
3175 
3176   root->r.r_root_team = root_team;
3177   root_team->t.t_control_stack_top = NULL;
3178 
3179   /* initialize root team */
3180   root_team->t.t_threads[0] = NULL;
3181   root_team->t.t_nproc = 1;
3182   root_team->t.t_serialized = 1;
3183   // TODO???: root_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3184   root_team->t.t_sched.r_sched_type = r_sched.r_sched_type;
3185   root_team->t.t_sched.chunk = r_sched.chunk;
3186   KA_TRACE(
3187       20,
3188       ("__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3189        root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
3190 
3191   /* setup the  hot team for this task */
3192   /* allocate the hot team structure */
3193   KF_TRACE(10, ("__kmp_initialize_root: before hot_team\n"));
3194 
3195   hot_team =
3196       __kmp_allocate_team(root,
3197                           1, // new_nproc
3198                           __kmp_dflt_team_nth_ub * 2, // max_nproc
3199 #if OMPT_SUPPORT
3200                           0, // root parallel id
3201 #endif
3202 #if OMP_40_ENABLED
3203                           __kmp_nested_proc_bind.bind_types[0],
3204 #endif
3205                           &r_icvs,
3206                           0 // argc
3207                           USE_NESTED_HOT_ARG(NULL) // master thread is unknown
3208                           );
3209   KF_TRACE(10, ("__kmp_initialize_root: after hot_team = %p\n", hot_team));
3210 
3211   root->r.r_hot_team = hot_team;
3212   root_team->t.t_control_stack_top = NULL;
3213 
3214   /* first-time initialization */
3215   hot_team->t.t_parent = root_team;
3216 
3217   /* initialize hot team */
3218   hot_team_max_nth = hot_team->t.t_max_nproc;
3219   for (f = 0; f < hot_team_max_nth; ++f) {
3220     hot_team->t.t_threads[f] = NULL;
3221   }; // for
3222   hot_team->t.t_nproc = 1;
3223   // TODO???: hot_team->t.t_max_active_levels = __kmp_dflt_max_active_levels;
3224   hot_team->t.t_sched.r_sched_type = r_sched.r_sched_type;
3225   hot_team->t.t_sched.chunk = r_sched.chunk;
3226   hot_team->t.t_size_changed = 0;
3227 }
3228 
3229 #ifdef KMP_DEBUG
3230 
3231 typedef struct kmp_team_list_item {
3232   kmp_team_p const *entry;
3233   struct kmp_team_list_item *next;
3234 } kmp_team_list_item_t;
3235 typedef kmp_team_list_item_t *kmp_team_list_t;
3236 
3237 static void __kmp_print_structure_team_accum( // Add team to list of teams.
3238     kmp_team_list_t list, // List of teams.
3239     kmp_team_p const *team // Team to add.
3240     ) {
3241 
3242   // List must terminate with item where both entry and next are NULL.
3243   // Team is added to the list only once.
3244   // List is sorted in ascending order by team id.
3245   // Team id is *not* a key.
3246 
3247   kmp_team_list_t l;
3248 
3249   KMP_DEBUG_ASSERT(list != NULL);
3250   if (team == NULL) {
3251     return;
3252   }; // if
3253 
3254   __kmp_print_structure_team_accum(list, team->t.t_parent);
3255   __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3256 
3257   // Search list for the team.
3258   l = list;
3259   while (l->next != NULL && l->entry != team) {
3260     l = l->next;
3261   }; // while
3262   if (l->next != NULL) {
3263     return; // Team has been added before, exit.
3264   }; // if
3265 
3266   // Team is not found. Search list again for insertion point.
3267   l = list;
3268   while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3269     l = l->next;
3270   }; // while
3271 
3272   // Insert team.
3273   {
3274     kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
3275         sizeof(kmp_team_list_item_t));
3276     *item = *l;
3277     l->entry = team;
3278     l->next = item;
3279   }
3280 }
3281 
3282 static void __kmp_print_structure_team(char const *title, kmp_team_p const *team
3283 
3284                                        ) {
3285   __kmp_printf("%s", title);
3286   if (team != NULL) {
3287     __kmp_printf("%2x %p\n", team->t.t_id, team);
3288   } else {
3289     __kmp_printf(" - (nil)\n");
3290   }; // if
3291 }
3292 
3293 static void __kmp_print_structure_thread(char const *title,
3294                                          kmp_info_p const *thread) {
3295   __kmp_printf("%s", title);
3296   if (thread != NULL) {
3297     __kmp_printf("%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
3298   } else {
3299     __kmp_printf(" - (nil)\n");
3300   }; // if
3301 }
3302 
3303 void __kmp_print_structure(void) {
3304 
3305   kmp_team_list_t list;
3306 
3307   // Initialize list of teams.
3308   list =
3309       (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(sizeof(kmp_team_list_item_t));
3310   list->entry = NULL;
3311   list->next = NULL;
3312 
3313   __kmp_printf("\n------------------------------\nGlobal Thread "
3314                "Table\n------------------------------\n");
3315   {
3316     int gtid;
3317     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3318       __kmp_printf("%2d", gtid);
3319       if (__kmp_threads != NULL) {
3320         __kmp_printf(" %p", __kmp_threads[gtid]);
3321       }; // if
3322       if (__kmp_root != NULL) {
3323         __kmp_printf(" %p", __kmp_root[gtid]);
3324       }; // if
3325       __kmp_printf("\n");
3326     }; // for gtid
3327   }
3328 
3329   // Print out __kmp_threads array.
3330   __kmp_printf("\n------------------------------\nThreads\n--------------------"
3331                "----------\n");
3332   if (__kmp_threads != NULL) {
3333     int gtid;
3334     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3335       kmp_info_t const *thread = __kmp_threads[gtid];
3336       if (thread != NULL) {
3337         __kmp_printf("GTID %2d %p:\n", gtid, thread);
3338         __kmp_printf("    Our Root:        %p\n", thread->th.th_root);
3339         __kmp_print_structure_team("    Our Team:     ", thread->th.th_team);
3340         __kmp_print_structure_team("    Serial Team:  ",
3341                                    thread->th.th_serial_team);
3342         __kmp_printf("    Threads:      %2d\n", thread->th.th_team_nproc);
3343         __kmp_print_structure_thread("    Master:       ",
3344                                      thread->th.th_team_master);
3345         __kmp_printf("    Serialized?:  %2d\n", thread->th.th_team_serialized);
3346         __kmp_printf("    Set NProc:    %2d\n", thread->th.th_set_nproc);
3347 #if OMP_40_ENABLED
3348         __kmp_printf("    Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
3349 #endif
3350         __kmp_print_structure_thread("    Next in pool: ",
3351                                      thread->th.th_next_pool);
3352         __kmp_printf("\n");
3353         __kmp_print_structure_team_accum(list, thread->th.th_team);
3354         __kmp_print_structure_team_accum(list, thread->th.th_serial_team);
3355       }; // if
3356     }; // for gtid
3357   } else {
3358     __kmp_printf("Threads array is not allocated.\n");
3359   }; // if
3360 
3361   // Print out __kmp_root array.
3362   __kmp_printf("\n------------------------------\nUbers\n----------------------"
3363                "--------\n");
3364   if (__kmp_root != NULL) {
3365     int gtid;
3366     for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3367       kmp_root_t const *root = __kmp_root[gtid];
3368       if (root != NULL) {
3369         __kmp_printf("GTID %2d %p:\n", gtid, root);
3370         __kmp_print_structure_team("    Root Team:    ", root->r.r_root_team);
3371         __kmp_print_structure_team("    Hot Team:     ", root->r.r_hot_team);
3372         __kmp_print_structure_thread("    Uber Thread:  ",
3373                                      root->r.r_uber_thread);
3374         __kmp_printf("    Active?:      %2d\n", root->r.r_active);
3375         __kmp_printf("    Nested?:      %2d\n", root->r.r_nested);
3376         __kmp_printf("    In Parallel:  %2d\n", root->r.r_in_parallel);
3377         __kmp_printf("\n");
3378         __kmp_print_structure_team_accum(list, root->r.r_root_team);
3379         __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3380       }; // if
3381     }; // for gtid
3382   } else {
3383     __kmp_printf("Ubers array is not allocated.\n");
3384   }; // if
3385 
3386   __kmp_printf("\n------------------------------\nTeams\n----------------------"
3387                "--------\n");
3388   while (list->next != NULL) {
3389     kmp_team_p const *team = list->entry;
3390     int i;
3391     __kmp_printf("Team %2x %p:\n", team->t.t_id, team);
3392     __kmp_print_structure_team("    Parent Team:      ", team->t.t_parent);
3393     __kmp_printf("    Master TID:       %2d\n", team->t.t_master_tid);
3394     __kmp_printf("    Max threads:      %2d\n", team->t.t_max_nproc);
3395     __kmp_printf("    Levels of serial: %2d\n", team->t.t_serialized);
3396     __kmp_printf("    Number threads:   %2d\n", team->t.t_nproc);
3397     for (i = 0; i < team->t.t_nproc; ++i) {
3398       __kmp_printf("    Thread %2d:      ", i);
3399       __kmp_print_structure_thread("", team->t.t_threads[i]);
3400     }; // for i
3401     __kmp_print_structure_team("    Next in pool:     ", team->t.t_next_pool);
3402     __kmp_printf("\n");
3403     list = list->next;
3404   }; // while
3405 
3406   // Print out __kmp_thread_pool and __kmp_team_pool.
3407   __kmp_printf("\n------------------------------\nPools\n----------------------"
3408                "--------\n");
3409   __kmp_print_structure_thread("Thread pool:          ",
3410                                CCAST(kmp_info_t *, __kmp_thread_pool));
3411   __kmp_print_structure_team("Team pool:            ",
3412                              CCAST(kmp_team_t *, __kmp_team_pool));
3413   __kmp_printf("\n");
3414 
3415   // Free team list.
3416   while (list != NULL) {
3417     kmp_team_list_item_t *item = list;
3418     list = list->next;
3419     KMP_INTERNAL_FREE(item);
3420   }; // while
3421 }
3422 
3423 #endif
3424 
3425 //---------------------------------------------------------------------------
3426 //  Stuff for per-thread fast random number generator
3427 //  Table of primes
3428 static const unsigned __kmp_primes[] = {
3429     0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877,
3430     0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
3431     0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201,
3432     0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
3433     0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7,
3434     0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
3435     0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45,
3436     0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
3437     0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363,
3438     0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
3439     0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f};
3440 
3441 //---------------------------------------------------------------------------
3442 //  __kmp_get_random: Get a random number using a linear congruential method.
3443 unsigned short __kmp_get_random(kmp_info_t *thread) {
3444   unsigned x = thread->th.th_x;
3445   unsigned short r = x >> 16;
3446 
3447   thread->th.th_x = x * thread->th.th_a + 1;
3448 
3449   KA_TRACE(30, ("__kmp_get_random: THREAD: %d, RETURN: %u\n",
3450                 thread->th.th_info.ds.ds_tid, r));
3451 
3452   return r;
3453 }
3454 //--------------------------------------------------------
3455 // __kmp_init_random: Initialize a random number generator
3456 void __kmp_init_random(kmp_info_t *thread) {
3457   unsigned seed = thread->th.th_info.ds.ds_tid;
3458 
3459   thread->th.th_a =
3460       __kmp_primes[seed % (sizeof(__kmp_primes) / sizeof(__kmp_primes[0]))];
3461   thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
3462   KA_TRACE(30,
3463            ("__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a));
3464 }
3465 
3466 #if KMP_OS_WINDOWS
3467 /* reclaim array entries for root threads that are already dead, returns number
3468  * reclaimed */
3469 static int __kmp_reclaim_dead_roots(void) {
3470   int i, r = 0;
3471 
3472   for (i = 0; i < __kmp_threads_capacity; ++i) {
3473     if (KMP_UBER_GTID(i) &&
3474         !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
3475         !__kmp_root[i]
3476              ->r.r_active) { // AC: reclaim only roots died in non-active state
3477       r += __kmp_unregister_root_other_thread(i);
3478     }
3479   }
3480   return r;
3481 }
3482 #endif
3483 
3484 /* This function attempts to create free entries in __kmp_threads and
3485    __kmp_root, and returns the number of free entries generated.
3486 
3487    For Windows* OS static library, the first mechanism used is to reclaim array
3488    entries for root threads that are already dead.
3489 
3490    On all platforms, expansion is attempted on the arrays __kmp_threads_ and
3491    __kmp_root, with appropriate update to __kmp_threads_capacity. Array
3492    capacity is increased by doubling with clipping to __kmp_tp_capacity, if
3493    threadprivate cache array has been created. Synchronization with
3494    __kmpc_threadprivate_cached is done using __kmp_tp_cached_lock.
3495 
3496    After any dead root reclamation, if the clipping value allows array expansion
3497    to result in the generation of a total of nWish free slots, the function does
3498    that expansion. If not, but the clipping value allows array expansion to
3499    result in the generation of a total of nNeed free slots, the function does
3500    that expansion. Otherwise, nothing is done beyond the possible initial root
3501    thread reclamation. However, if nNeed is zero, a best-effort attempt is made
3502    to fulfil nWish as far as possible, i.e. the function will attempt to create
3503    as many free slots as possible up to nWish.
3504 
3505    If any argument is negative, the behavior is undefined. */
3506 static int __kmp_expand_threads(int nWish, int nNeed) {
3507   int added = 0;
3508   int old_tp_cached;
3509   int __kmp_actual_max_nth;
3510 
3511   if (nNeed > nWish) /* normalize the arguments */
3512     nWish = nNeed;
3513 #if KMP_OS_WINDOWS && !defined KMP_DYNAMIC_LIB
3514   /* only for Windows static library */
3515   /* reclaim array entries for root threads that are already dead */
3516   added = __kmp_reclaim_dead_roots();
3517 
3518   if (nNeed) {
3519     nNeed -= added;
3520     if (nNeed < 0)
3521       nNeed = 0;
3522   }
3523   if (nWish) {
3524     nWish -= added;
3525     if (nWish < 0)
3526       nWish = 0;
3527   }
3528 #endif
3529   if (nWish <= 0)
3530     return added;
3531 
3532   while (1) {
3533     int nTarget;
3534     int minimumRequiredCapacity;
3535     int newCapacity;
3536     kmp_info_t **newThreads;
3537     kmp_root_t **newRoot;
3538 
3539     // Note that __kmp_threads_capacity is not bounded by __kmp_max_nth. If
3540     // __kmp_max_nth is set to some value less than __kmp_sys_max_nth by the
3541     // user via KMP_DEVICE_THREAD_LIMIT, then __kmp_threads_capacity may become
3542     // > __kmp_max_nth in one of two ways:
3543     //
3544     // 1) The initialization thread (gtid = 0) exits.  __kmp_threads[0]
3545     //    may not be resused by another thread, so we may need to increase
3546     //    __kmp_threads_capacity to __kmp_max_nth + 1.
3547     //
3548     // 2) New foreign root(s) are encountered.  We always register new foreign
3549     //    roots. This may cause a smaller # of threads to be allocated at
3550     //    subsequent parallel regions, but the worker threads hang around (and
3551     //    eventually go to sleep) and need slots in the __kmp_threads[] array.
3552     //
3553     // Anyway, that is the reason for moving the check to see if
3554     // __kmp_max_nth was exceeded into __kmp_reserve_threads()
3555     // instead of having it performed here. -BB
3556     old_tp_cached = __kmp_tp_cached;
3557     __kmp_actual_max_nth =
3558         old_tp_cached ? __kmp_tp_capacity : __kmp_sys_max_nth;
3559     KMP_DEBUG_ASSERT(__kmp_actual_max_nth >= __kmp_threads_capacity);
3560 
3561     /* compute expansion headroom to check if we can expand and whether to aim
3562        for nWish or nNeed */
3563     nTarget = nWish;
3564     if (__kmp_actual_max_nth - __kmp_threads_capacity < nTarget) {
3565       /* can't fulfil nWish, so try nNeed */
3566       if (nNeed) {
3567         nTarget = nNeed;
3568         if (__kmp_actual_max_nth - __kmp_threads_capacity < nTarget) {
3569           /* possible expansion too small -- give up */
3570           break;
3571         }
3572       } else {
3573         /* best-effort */
3574         nTarget = __kmp_actual_max_nth - __kmp_threads_capacity;
3575         if (!nTarget) {
3576           /* can expand at all -- give up */
3577           break;
3578         }
3579       }
3580     }
3581     minimumRequiredCapacity = __kmp_threads_capacity + nTarget;
3582 
3583     newCapacity = __kmp_threads_capacity;
3584     do {
3585       newCapacity = newCapacity <= (__kmp_actual_max_nth >> 1)
3586                         ? (newCapacity << 1)
3587                         : __kmp_actual_max_nth;
3588     } while (newCapacity < minimumRequiredCapacity);
3589     newThreads = (kmp_info_t **)__kmp_allocate(
3590         (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * newCapacity +
3591         CACHE_LINE);
3592     newRoot = (kmp_root_t **)((char *)newThreads +
3593                               sizeof(kmp_info_t *) * newCapacity);
3594     KMP_MEMCPY(newThreads, __kmp_threads,
3595                __kmp_threads_capacity * sizeof(kmp_info_t *));
3596     KMP_MEMCPY(newRoot, __kmp_root,
3597                __kmp_threads_capacity * sizeof(kmp_root_t *));
3598     memset(newThreads + __kmp_threads_capacity, 0,
3599            (newCapacity - __kmp_threads_capacity) * sizeof(kmp_info_t *));
3600     memset(newRoot + __kmp_threads_capacity, 0,
3601            (newCapacity - __kmp_threads_capacity) * sizeof(kmp_root_t *));
3602 
3603     if (!old_tp_cached && __kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3604       /* __kmp_tp_cached has changed, i.e. __kmpc_threadprivate_cached has
3605          allocated a threadprivate cache while we were allocating the expanded
3606          array, and our new capacity is larger than the threadprivate cache
3607          capacity, so we should deallocate the expanded arrays and try again.
3608          This is the first check of a double-check pair. */
3609       __kmp_free(newThreads);
3610       continue; /* start over and try again */
3611     }
3612     __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
3613     if (!old_tp_cached && __kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3614       /* Same check as above, but this time with the lock so we can be sure if
3615          we can succeed. */
3616       __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3617       __kmp_free(newThreads);
3618       continue; /* start over and try again */
3619     } else {
3620       /* success */
3621       // __kmp_free( __kmp_threads ); // ATT: It leads to crash. Need to be
3622       // investigated.
3623       *(kmp_info_t * *volatile *)&__kmp_threads = newThreads;
3624       *(kmp_root_t * *volatile *)&__kmp_root = newRoot;
3625       added += newCapacity - __kmp_threads_capacity;
3626       *(volatile int *)&__kmp_threads_capacity = newCapacity;
3627       __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3628       break; /* succeeded, so we can exit the loop */
3629     }
3630   }
3631   return added;
3632 }
3633 
3634 /* Register the current thread as a root thread and obtain our gtid. We must
3635    have the __kmp_initz_lock held at this point. Argument TRUE only if are the
3636    thread that calls from __kmp_do_serial_initialize() */
3637 int __kmp_register_root(int initial_thread) {
3638   kmp_info_t *root_thread;
3639   kmp_root_t *root;
3640   int gtid;
3641   int capacity;
3642   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3643   KA_TRACE(20, ("__kmp_register_root: entered\n"));
3644   KMP_MB();
3645 
3646   /* 2007-03-02:
3647      If initial thread did not invoke OpenMP RTL yet, and this thread is not an
3648      initial one, "__kmp_all_nth >= __kmp_threads_capacity" condition does not
3649      work as expected -- it may return false (that means there is at least one
3650      empty slot in __kmp_threads array), but it is possible the only free slot
3651      is #0, which is reserved for initial thread and so cannot be used for this
3652      one. Following code workarounds this bug.
3653 
3654      However, right solution seems to be not reserving slot #0 for initial
3655      thread because:
3656      (1) there is no magic in slot #0,
3657      (2) we cannot detect initial thread reliably (the first thread which does
3658         serial initialization may be not a real initial thread).
3659   */
3660   capacity = __kmp_threads_capacity;
3661   if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) {
3662     --capacity;
3663   }; // if
3664 
3665   /* see if there are too many threads */
3666   if (__kmp_all_nth >= capacity && !__kmp_expand_threads(1, 1)) {
3667     if (__kmp_tp_cached) {
3668       __kmp_msg(kmp_ms_fatal, KMP_MSG(CantRegisterNewThread),
3669                 KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
3670                 KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
3671     } else {
3672       __kmp_msg(kmp_ms_fatal, KMP_MSG(CantRegisterNewThread),
3673                 KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
3674     }
3675   }; // if
3676 
3677   /* find an available thread slot */
3678   /* Don't reassign the zero slot since we need that to only be used by initial
3679      thread */
3680   for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL;
3681        gtid++)
3682     ;
3683   KA_TRACE(1,
3684            ("__kmp_register_root: found slot in threads array: T#%d\n", gtid));
3685   KMP_ASSERT(gtid < __kmp_threads_capacity);
3686 
3687   /* update global accounting */
3688   __kmp_all_nth++;
3689   TCW_4(__kmp_nth, __kmp_nth + 1);
3690 
3691   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
3692   // numbers of procs, and method #2 (keyed API call) for higher numbers.
3693   if (__kmp_adjust_gtid_mode) {
3694     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
3695       if (TCR_4(__kmp_gtid_mode) != 2) {
3696         TCW_4(__kmp_gtid_mode, 2);
3697       }
3698     } else {
3699       if (TCR_4(__kmp_gtid_mode) != 1) {
3700         TCW_4(__kmp_gtid_mode, 1);
3701       }
3702     }
3703   }
3704 
3705 #ifdef KMP_ADJUST_BLOCKTIME
3706   /* Adjust blocktime to zero if necessary            */
3707   /* Middle initialization might not have occurred yet */
3708   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
3709     if (__kmp_nth > __kmp_avail_proc) {
3710       __kmp_zero_bt = TRUE;
3711     }
3712   }
3713 #endif /* KMP_ADJUST_BLOCKTIME */
3714 
3715   /* setup this new hierarchy */
3716   if (!(root = __kmp_root[gtid])) {
3717     root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(sizeof(kmp_root_t));
3718     KMP_DEBUG_ASSERT(!root->r.r_root_team);
3719   }
3720 
3721 #if KMP_STATS_ENABLED
3722   // Initialize stats as soon as possible (right after gtid assignment).
3723   __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid);
3724   KMP_START_EXPLICIT_TIMER(OMP_worker_thread_life);
3725   KMP_SET_THREAD_STATE(SERIAL_REGION);
3726   KMP_INIT_PARTITIONED_TIMERS(OMP_serial);
3727 #endif
3728   __kmp_initialize_root(root);
3729 
3730   /* setup new root thread structure */
3731   if (root->r.r_uber_thread) {
3732     root_thread = root->r.r_uber_thread;
3733   } else {
3734     root_thread = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
3735     if (__kmp_storage_map) {
3736       __kmp_print_thread_storage_map(root_thread, gtid);
3737     }
3738     root_thread->th.th_info.ds.ds_gtid = gtid;
3739     root_thread->th.th_root = root;
3740     if (__kmp_env_consistency_check) {
3741       root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
3742     }
3743 #if USE_FAST_MEMORY
3744     __kmp_initialize_fast_memory(root_thread);
3745 #endif /* USE_FAST_MEMORY */
3746 
3747 #if KMP_USE_BGET
3748     KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL);
3749     __kmp_initialize_bget(root_thread);
3750 #endif
3751     __kmp_init_random(root_thread); // Initialize random number generator
3752   }
3753 
3754   /* setup the serial team held in reserve by the root thread */
3755   if (!root_thread->th.th_serial_team) {
3756     kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3757     KF_TRACE(10, ("__kmp_register_root: before serial_team\n"));
3758     root_thread->th.th_serial_team =
3759         __kmp_allocate_team(root, 1, 1,
3760 #if OMPT_SUPPORT
3761                             0, // root parallel id
3762 #endif
3763 #if OMP_40_ENABLED
3764                             proc_bind_default,
3765 #endif
3766                             &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
3767   }
3768   KMP_ASSERT(root_thread->th.th_serial_team);
3769   KF_TRACE(10, ("__kmp_register_root: after serial_team = %p\n",
3770                 root_thread->th.th_serial_team));
3771 
3772   /* drop root_thread into place */
3773   TCW_SYNC_PTR(__kmp_threads[gtid], root_thread);
3774 
3775   root->r.r_root_team->t.t_threads[0] = root_thread;
3776   root->r.r_hot_team->t.t_threads[0] = root_thread;
3777   root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
3778   // AC: the team created in reserve, not for execution (it is unused for now).
3779   root_thread->th.th_serial_team->t.t_serialized = 0;
3780   root->r.r_uber_thread = root_thread;
3781 
3782   /* initialize the thread, get it ready to go */
3783   __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3784   TCW_4(__kmp_init_gtid, TRUE);
3785 
3786   /* prepare the master thread for get_gtid() */
3787   __kmp_gtid_set_specific(gtid);
3788 
3789 #if USE_ITT_BUILD
3790   __kmp_itt_thread_name(gtid);
3791 #endif /* USE_ITT_BUILD */
3792 
3793 #ifdef KMP_TDATA_GTID
3794   __kmp_gtid = gtid;
3795 #endif
3796   __kmp_create_worker(gtid, root_thread, __kmp_stksize);
3797   KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid);
3798 
3799   KA_TRACE(20, ("__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, "
3800                 "plain=%u\n",
3801                 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3802                 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3803                 KMP_INIT_BARRIER_STATE));
3804   { // Initialize barrier data.
3805     int b;
3806     for (b = 0; b < bs_last_barrier; ++b) {
3807       root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
3808 #if USE_DEBUGGER
3809       root_thread->th.th_bar[b].bb.b_worker_arrived = 0;
3810 #endif
3811     }; // for
3812   }
3813   KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3814                    KMP_INIT_BARRIER_STATE);
3815 
3816 #if KMP_AFFINITY_SUPPORTED
3817 #if OMP_40_ENABLED
3818   root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
3819   root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
3820   root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
3821   root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
3822 #endif
3823 
3824   if (TCR_4(__kmp_init_middle)) {
3825     __kmp_affinity_set_init_mask(gtid, TRUE);
3826   }
3827 #endif /* KMP_AFFINITY_SUPPORTED */
3828 
3829   __kmp_root_counter++;
3830 
3831   KMP_MB();
3832   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3833 
3834   return gtid;
3835 }
3836 
3837 #if KMP_NESTED_HOT_TEAMS
3838 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr, int level,
3839                                 const int max_level) {
3840   int i, n, nth;
3841   kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
3842   if (!hot_teams || !hot_teams[level].hot_team) {
3843     return 0;
3844   }
3845   KMP_DEBUG_ASSERT(level < max_level);
3846   kmp_team_t *team = hot_teams[level].hot_team;
3847   nth = hot_teams[level].hot_team_nth;
3848   n = nth - 1; // master is not freed
3849   if (level < max_level - 1) {
3850     for (i = 0; i < nth; ++i) {
3851       kmp_info_t *th = team->t.t_threads[i];
3852       n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3853       if (i > 0 && th->th.th_hot_teams) {
3854         __kmp_free(th->th.th_hot_teams);
3855         th->th.th_hot_teams = NULL;
3856       }
3857     }
3858   }
3859   __kmp_free_team(root, team, NULL);
3860   return n;
3861 }
3862 #endif
3863 
3864 // Resets a root thread and clear its root and hot teams.
3865 // Returns the number of __kmp_threads entries directly and indirectly freed.
3866 static int __kmp_reset_root(int gtid, kmp_root_t *root) {
3867   kmp_team_t *root_team = root->r.r_root_team;
3868   kmp_team_t *hot_team = root->r.r_hot_team;
3869   int n = hot_team->t.t_nproc;
3870   int i;
3871 
3872   KMP_DEBUG_ASSERT(!root->r.r_active);
3873 
3874   root->r.r_root_team = NULL;
3875   root->r.r_hot_team = NULL;
3876   // __kmp_free_team() does not free hot teams, so we have to clear r_hot_team
3877   // before call to __kmp_free_team().
3878   __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3879 #if KMP_NESTED_HOT_TEAMS
3880   if (__kmp_hot_teams_max_level >
3881       0) { // need to free nested hot teams and their threads if any
3882     for (i = 0; i < hot_team->t.t_nproc; ++i) {
3883       kmp_info_t *th = hot_team->t.t_threads[i];
3884       if (__kmp_hot_teams_max_level > 1) {
3885         n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3886       }
3887       if (th->th.th_hot_teams) {
3888         __kmp_free(th->th.th_hot_teams);
3889         th->th.th_hot_teams = NULL;
3890       }
3891     }
3892   }
3893 #endif
3894   __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3895 
3896   // Before we can reap the thread, we need to make certain that all other
3897   // threads in the teams that had this root as ancestor have stopped trying to
3898   // steal tasks.
3899   if (__kmp_tasking_mode != tskm_immediate_exec) {
3900     __kmp_wait_to_unref_task_teams();
3901   }
3902 
3903 #if KMP_OS_WINDOWS
3904   /* Close Handle of root duplicated in __kmp_create_worker (tr #62919) */
3905   KA_TRACE(
3906       10, ("__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC
3907            "\n",
3908            (LPVOID) & (root->r.r_uber_thread->th),
3909            root->r.r_uber_thread->th.th_info.ds.ds_thread));
3910   __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3911 #endif /* KMP_OS_WINDOWS */
3912 
3913 #if OMPT_SUPPORT
3914   if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_thread_end)) {
3915     int gtid = __kmp_get_gtid();
3916     __ompt_thread_end(ompt_thread_initial, gtid);
3917   }
3918 #endif
3919 
3920   TCW_4(__kmp_nth,
3921         __kmp_nth - 1); // __kmp_reap_thread will decrement __kmp_all_nth.
3922   root->r.r_cg_nthreads--;
3923 
3924   __kmp_reap_thread(root->r.r_uber_thread, 1);
3925 
3926   // We canot put root thread to __kmp_thread_pool, so we have to reap it istead
3927   // of freeing.
3928   root->r.r_uber_thread = NULL;
3929   /* mark root as no longer in use */
3930   root->r.r_begin = FALSE;
3931 
3932   return n;
3933 }
3934 
3935 void __kmp_unregister_root_current_thread(int gtid) {
3936   KA_TRACE(1, ("__kmp_unregister_root_current_thread: enter T#%d\n", gtid));
3937   /* this lock should be ok, since unregister_root_current_thread is never
3938      called during an abort, only during a normal close. furthermore, if you
3939      have the forkjoin lock, you should never try to get the initz lock */
3940   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3941   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
3942     KC_TRACE(10, ("__kmp_unregister_root_current_thread: already finished, "
3943                   "exiting T#%d\n",
3944                   gtid));
3945     __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3946     return;
3947   }
3948   kmp_root_t *root = __kmp_root[gtid];
3949 
3950   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3951   KMP_ASSERT(KMP_UBER_GTID(gtid));
3952   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3953   KMP_ASSERT(root->r.r_active == FALSE);
3954 
3955   KMP_MB();
3956 
3957 #if OMP_45_ENABLED
3958   kmp_info_t *thread = __kmp_threads[gtid];
3959   kmp_team_t *team = thread->th.th_team;
3960   kmp_task_team_t *task_team = thread->th.th_task_team;
3961 
3962   // we need to wait for the proxy tasks before finishing the thread
3963   if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
3964 #if OMPT_SUPPORT
3965     // the runtime is shutting down so we won't report any events
3966     thread->th.ompt_thread_info.state = ompt_state_undefined;
3967 #endif
3968     __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
3969   }
3970 #endif
3971 
3972   __kmp_reset_root(gtid, root);
3973 
3974   /* free up this thread slot */
3975   __kmp_gtid_set_specific(KMP_GTID_DNE);
3976 #ifdef KMP_TDATA_GTID
3977   __kmp_gtid = KMP_GTID_DNE;
3978 #endif
3979 
3980   KMP_MB();
3981   KC_TRACE(10,
3982            ("__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid));
3983 
3984   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3985 }
3986 
3987 #if KMP_OS_WINDOWS
3988 /* __kmp_forkjoin_lock must be already held
3989    Unregisters a root thread that is not the current thread.  Returns the number
3990    of __kmp_threads entries freed as a result. */
3991 static int __kmp_unregister_root_other_thread(int gtid) {
3992   kmp_root_t *root = __kmp_root[gtid];
3993   int r;
3994 
3995   KA_TRACE(1, ("__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
3996   KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3997   KMP_ASSERT(KMP_UBER_GTID(gtid));
3998   KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3999   KMP_ASSERT(root->r.r_active == FALSE);
4000 
4001   r = __kmp_reset_root(gtid, root);
4002   KC_TRACE(10,
4003            ("__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
4004   return r;
4005 }
4006 #endif
4007 
4008 #if KMP_DEBUG
4009 void __kmp_task_info() {
4010 
4011   kmp_int32 gtid = __kmp_entry_gtid();
4012   kmp_int32 tid = __kmp_tid_from_gtid(gtid);
4013   kmp_info_t *this_thr = __kmp_threads[gtid];
4014   kmp_team_t *steam = this_thr->th.th_serial_team;
4015   kmp_team_t *team = this_thr->th.th_team;
4016 
4017   __kmp_printf("__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p curtask=%p "
4018                "ptask=%p\n",
4019                gtid, tid, this_thr, team, this_thr->th.th_current_task,
4020                team->t.t_implicit_task_taskdata[tid].td_parent);
4021 }
4022 #endif // KMP_DEBUG
4023 
4024 /* TODO optimize with one big memclr, take out what isn't needed, split
4025    responsibility to workers as much as possible, and delay initialization of
4026    features as much as possible  */
4027 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4028                                   int tid, int gtid) {
4029   /* this_thr->th.th_info.ds.ds_gtid is setup in
4030      kmp_allocate_thread/create_worker.
4031      this_thr->th.th_serial_team is setup in __kmp_allocate_thread */
4032   kmp_info_t *master = team->t.t_threads[0];
4033   KMP_DEBUG_ASSERT(this_thr != NULL);
4034   KMP_DEBUG_ASSERT(this_thr->th.th_serial_team);
4035   KMP_DEBUG_ASSERT(team);
4036   KMP_DEBUG_ASSERT(team->t.t_threads);
4037   KMP_DEBUG_ASSERT(team->t.t_dispatch);
4038   KMP_DEBUG_ASSERT(master);
4039   KMP_DEBUG_ASSERT(master->th.th_root);
4040 
4041   KMP_MB();
4042 
4043   TCW_SYNC_PTR(this_thr->th.th_team, team);
4044 
4045   this_thr->th.th_info.ds.ds_tid = tid;
4046   this_thr->th.th_set_nproc = 0;
4047   if (__kmp_tasking_mode != tskm_immediate_exec)
4048     // When tasking is possible, threads are not safe to reap until they are
4049     // done tasking; this will be set when tasking code is exited in wait
4050     this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
4051   else // no tasking --> always safe to reap
4052     this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
4053 #if OMP_40_ENABLED
4054   this_thr->th.th_set_proc_bind = proc_bind_default;
4055 #if KMP_AFFINITY_SUPPORTED
4056   this_thr->th.th_new_place = this_thr->th.th_current_place;
4057 #endif
4058 #endif
4059   this_thr->th.th_root = master->th.th_root;
4060 
4061   /* setup the thread's cache of the team structure */
4062   this_thr->th.th_team_nproc = team->t.t_nproc;
4063   this_thr->th.th_team_master = master;
4064   this_thr->th.th_team_serialized = team->t.t_serialized;
4065   TCW_PTR(this_thr->th.th_sleep_loc, NULL);
4066 
4067   KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4068 
4069   KF_TRACE(10, ("__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n",
4070                 tid, gtid, this_thr, this_thr->th.th_current_task));
4071 
4072   __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
4073                            team, tid, TRUE);
4074 
4075   KF_TRACE(10, ("__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n",
4076                 tid, gtid, this_thr, this_thr->th.th_current_task));
4077   // TODO: Initialize ICVs from parent; GEH - isn't that already done in
4078   // __kmp_initialize_team()?
4079 
4080   /* TODO no worksharing in speculative threads */
4081   this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4082 
4083   this_thr->th.th_local.this_construct = 0;
4084 
4085 #ifdef BUILD_TV
4086   this_thr->th.th_local.tv_data = 0;
4087 #endif
4088 
4089   if (!this_thr->th.th_pri_common) {
4090     this_thr->th.th_pri_common =
4091         (struct common_table *)__kmp_allocate(sizeof(struct common_table));
4092     if (__kmp_storage_map) {
4093       __kmp_print_storage_map_gtid(
4094           gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
4095           sizeof(struct common_table), "th_%d.th_pri_common\n", gtid);
4096     }; // if
4097     this_thr->th.th_pri_head = NULL;
4098   }; // if
4099 
4100   /* Initialize dynamic dispatch */
4101   {
4102     volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
4103     // Use team max_nproc since this will never change for the team.
4104     size_t disp_size =
4105         sizeof(dispatch_private_info_t) *
4106         (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4107     KD_TRACE(10, ("__kmp_initialize_info: T#%d max_nproc: %d\n", gtid,
4108                   team->t.t_max_nproc));
4109     KMP_ASSERT(dispatch);
4110     KMP_DEBUG_ASSERT(team->t.t_dispatch);
4111     KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4112 
4113     dispatch->th_disp_index = 0;
4114 #if OMP_45_ENABLED
4115     dispatch->th_doacross_buf_idx = 0;
4116 #endif
4117     if (!dispatch->th_disp_buffer) {
4118       dispatch->th_disp_buffer =
4119           (dispatch_private_info_t *)__kmp_allocate(disp_size);
4120 
4121       if (__kmp_storage_map) {
4122         __kmp_print_storage_map_gtid(
4123             gtid, &dispatch->th_disp_buffer[0],
4124             &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4125                                           ? 1
4126                                           : __kmp_dispatch_num_buffers],
4127             disp_size, "th_%d.th_dispatch.th_disp_buffer "
4128                        "(team_%d.t_dispatch[%d].th_disp_buffer)",
4129             gtid, team->t.t_id, gtid);
4130       }
4131     } else {
4132       memset(&dispatch->th_disp_buffer[0], '\0', disp_size);
4133     }
4134 
4135     dispatch->th_dispatch_pr_current = 0;
4136     dispatch->th_dispatch_sh_current = 0;
4137 
4138     dispatch->th_deo_fcn = 0; /* ORDERED     */
4139     dispatch->th_dxo_fcn = 0; /* END ORDERED */
4140   }
4141 
4142   this_thr->th.th_next_pool = NULL;
4143 
4144   if (!this_thr->th.th_task_state_memo_stack) {
4145     size_t i;
4146     this_thr->th.th_task_state_memo_stack =
4147         (kmp_uint8 *)__kmp_allocate(4 * sizeof(kmp_uint8));
4148     this_thr->th.th_task_state_top = 0;
4149     this_thr->th.th_task_state_stack_sz = 4;
4150     for (i = 0; i < this_thr->th.th_task_state_stack_sz;
4151          ++i) // zero init the stack
4152       this_thr->th.th_task_state_memo_stack[i] = 0;
4153   }
4154 
4155   KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
4156   KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
4157 
4158   KMP_MB();
4159 }
4160 
4161 /* allocate a new thread for the requesting team. this is only called from
4162    within a forkjoin critical section. we will first try to get an available
4163    thread from the thread pool. if none is available, we will fork a new one
4164    assuming we are able to create a new one. this should be assured, as the
4165    caller should check on this first. */
4166 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4167                                   int new_tid) {
4168   kmp_team_t *serial_team;
4169   kmp_info_t *new_thr;
4170   int new_gtid;
4171 
4172   KA_TRACE(20, ("__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
4173   KMP_DEBUG_ASSERT(root && team);
4174 #if !KMP_NESTED_HOT_TEAMS
4175   KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
4176 #endif
4177   KMP_MB();
4178 
4179   /* first, try to get one from the thread pool */
4180   if (__kmp_thread_pool) {
4181 
4182     new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
4183     __kmp_thread_pool = (volatile kmp_info_t *)new_thr->th.th_next_pool;
4184     if (new_thr == __kmp_thread_pool_insert_pt) {
4185       __kmp_thread_pool_insert_pt = NULL;
4186     }
4187     TCW_4(new_thr->th.th_in_pool, FALSE);
4188     // Don't touch th_active_in_pool or th_active.
4189     // The worker thread adjusts those flags as it sleeps/awakens.
4190     __kmp_thread_pool_nth--;
4191 
4192     KA_TRACE(20, ("__kmp_allocate_thread: T#%d using thread T#%d\n",
4193                   __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid));
4194     KMP_ASSERT(!new_thr->th.th_team);
4195     KMP_DEBUG_ASSERT(__kmp_nth < __kmp_threads_capacity);
4196     KMP_DEBUG_ASSERT(__kmp_thread_pool_nth >= 0);
4197 
4198     /* setup the thread structure */
4199     __kmp_initialize_info(new_thr, team, new_tid,
4200                           new_thr->th.th_info.ds.ds_gtid);
4201     KMP_DEBUG_ASSERT(new_thr->th.th_serial_team);
4202 
4203     TCW_4(__kmp_nth, __kmp_nth + 1);
4204     root->r.r_cg_nthreads++;
4205 
4206     new_thr->th.th_task_state = 0;
4207     new_thr->th.th_task_state_top = 0;
4208     new_thr->th.th_task_state_stack_sz = 4;
4209 
4210 #ifdef KMP_ADJUST_BLOCKTIME
4211     /* Adjust blocktime back to zero if necessary */
4212     /* Middle initialization might not have occurred yet */
4213     if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4214       if (__kmp_nth > __kmp_avail_proc) {
4215         __kmp_zero_bt = TRUE;
4216       }
4217     }
4218 #endif /* KMP_ADJUST_BLOCKTIME */
4219 
4220 #if KMP_DEBUG
4221     // If thread entered pool via __kmp_free_thread, wait_flag should !=
4222     // KMP_BARRIER_PARENT_FLAG.
4223     int b;
4224     kmp_balign_t *balign = new_thr->th.th_bar;
4225     for (b = 0; b < bs_last_barrier; ++b)
4226       KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
4227 #endif
4228 
4229     KF_TRACE(10, ("__kmp_allocate_thread: T#%d using thread %p T#%d\n",
4230                   __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid));
4231 
4232     KMP_MB();
4233     return new_thr;
4234   }
4235 
4236   /* no, well fork a new one */
4237   KMP_ASSERT(__kmp_nth == __kmp_all_nth);
4238   KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
4239 
4240 #if KMP_USE_MONITOR
4241   // If this is the first worker thread the RTL is creating, then also
4242   // launch the monitor thread.  We try to do this as early as possible.
4243   if (!TCR_4(__kmp_init_monitor)) {
4244     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
4245     if (!TCR_4(__kmp_init_monitor)) {
4246       KF_TRACE(10, ("before __kmp_create_monitor\n"));
4247       TCW_4(__kmp_init_monitor, 1);
4248       __kmp_create_monitor(&__kmp_monitor);
4249       KF_TRACE(10, ("after __kmp_create_monitor\n"));
4250 #if KMP_OS_WINDOWS
4251       // AC: wait until monitor has started. This is a fix for CQ232808.
4252       // The reason is that if the library is loaded/unloaded in a loop with
4253       // small (parallel) work in between, then there is high probability that
4254       // monitor thread started after the library shutdown. At shutdown it is
4255       // too late to cope with the problem, because when the master is in
4256       // DllMain (process detach) the monitor has no chances to start (it is
4257       // blocked), and master has no means to inform the monitor that the
4258       // library has gone, because all the memory which the monitor can access
4259       // is going to be released/reset.
4260       while (TCR_4(__kmp_init_monitor) < 2) {
4261         KMP_YIELD(TRUE);
4262       }
4263       KF_TRACE(10, ("after monitor thread has started\n"));
4264 #endif
4265     }
4266     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
4267   }
4268 #endif
4269 
4270   KMP_MB();
4271   for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) {
4272     KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity);
4273   }
4274 
4275   /* allocate space for it. */
4276   new_thr = (kmp_info_t *)__kmp_allocate(sizeof(kmp_info_t));
4277 
4278   TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr);
4279 
4280   if (__kmp_storage_map) {
4281     __kmp_print_thread_storage_map(new_thr, new_gtid);
4282   }
4283 
4284   // add the reserve serialized team, initialized from the team's master thread
4285   {
4286     kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4287     KF_TRACE(10, ("__kmp_allocate_thread: before th_serial/serial_team\n"));
4288     new_thr->th.th_serial_team = serial_team =
4289         (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4290 #if OMPT_SUPPORT
4291                                           0, // root parallel id
4292 #endif
4293 #if OMP_40_ENABLED
4294                                           proc_bind_default,
4295 #endif
4296                                           &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
4297   }
4298   KMP_ASSERT(serial_team);
4299   serial_team->t.t_serialized = 0; // AC: the team created in reserve, not for
4300   // execution (it is unused for now).
4301   serial_team->t.t_threads[0] = new_thr;
4302   KF_TRACE(10,
4303            ("__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n",
4304             new_thr));
4305 
4306   /* setup the thread structures */
4307   __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4308 
4309 #if USE_FAST_MEMORY
4310   __kmp_initialize_fast_memory(new_thr);
4311 #endif /* USE_FAST_MEMORY */
4312 
4313 #if KMP_USE_BGET
4314   KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL);
4315   __kmp_initialize_bget(new_thr);
4316 #endif
4317 
4318   __kmp_init_random(new_thr); // Initialize random number generator
4319 
4320   /* Initialize these only once when thread is grabbed for a team allocation */
4321   KA_TRACE(20,
4322            ("__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n",
4323             __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
4324 
4325   int b;
4326   kmp_balign_t *balign = new_thr->th.th_bar;
4327   for (b = 0; b < bs_last_barrier; ++b) {
4328     balign[b].bb.b_go = KMP_INIT_BARRIER_STATE;
4329     balign[b].bb.team = NULL;
4330     balign[b].bb.wait_flag = KMP_BARRIER_NOT_WAITING;
4331     balign[b].bb.use_oncore_barrier = 0;
4332   }
4333 
4334   new_thr->th.th_spin_here = FALSE;
4335   new_thr->th.th_next_waiting = 0;
4336 
4337 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
4338   new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
4339   new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
4340   new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
4341   new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
4342 #endif
4343 
4344   TCW_4(new_thr->th.th_in_pool, FALSE);
4345   new_thr->th.th_active_in_pool = FALSE;
4346   TCW_4(new_thr->th.th_active, TRUE);
4347 
4348   /* adjust the global counters */
4349   __kmp_all_nth++;
4350   __kmp_nth++;
4351 
4352   root->r.r_cg_nthreads++;
4353 
4354   // if __kmp_adjust_gtid_mode is set, then we use method #1 (sp search) for low
4355   // numbers of procs, and method #2 (keyed API call) for higher numbers.
4356   if (__kmp_adjust_gtid_mode) {
4357     if (__kmp_all_nth >= __kmp_tls_gtid_min) {
4358       if (TCR_4(__kmp_gtid_mode) != 2) {
4359         TCW_4(__kmp_gtid_mode, 2);
4360       }
4361     } else {
4362       if (TCR_4(__kmp_gtid_mode) != 1) {
4363         TCW_4(__kmp_gtid_mode, 1);
4364       }
4365     }
4366   }
4367 
4368 #ifdef KMP_ADJUST_BLOCKTIME
4369   /* Adjust blocktime back to zero if necessary       */
4370   /* Middle initialization might not have occurred yet */
4371   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4372     if (__kmp_nth > __kmp_avail_proc) {
4373       __kmp_zero_bt = TRUE;
4374     }
4375   }
4376 #endif /* KMP_ADJUST_BLOCKTIME */
4377 
4378   /* actually fork it and create the new worker thread */
4379   KF_TRACE(
4380       10, ("__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr));
4381   __kmp_create_worker(new_gtid, new_thr, __kmp_stksize);
4382   KF_TRACE(10,
4383            ("__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr));
4384 
4385   KA_TRACE(20, ("__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(),
4386                 new_gtid));
4387   KMP_MB();
4388   return new_thr;
4389 }
4390 
4391 /* Reinitialize team for reuse.
4392    The hot team code calls this case at every fork barrier, so EPCC barrier
4393    test are extremely sensitive to changes in it, esp. writes to the team
4394    struct, which cause a cache invalidation in all threads.
4395    IF YOU TOUCH THIS ROUTINE, RUN EPCC C SYNCBENCH ON A BIG-IRON MACHINE!!! */
4396 static void __kmp_reinitialize_team(kmp_team_t *team,
4397                                     kmp_internal_control_t *new_icvs,
4398                                     ident_t *loc) {
4399   KF_TRACE(10, ("__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4400                 team->t.t_threads[0], team));
4401   KMP_DEBUG_ASSERT(team && new_icvs);
4402   KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
4403   KMP_CHECK_UPDATE(team->t.t_ident, loc);
4404 
4405   KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4406   // Copy ICVs to the master thread's implicit taskdata
4407   __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4408   copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4409 
4410   KF_TRACE(10, ("__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4411                 team->t.t_threads[0], team));
4412 }
4413 
4414 /* Initialize the team data structure.
4415    This assumes the t_threads and t_max_nproc are already set.
4416    Also, we don't touch the arguments */
4417 static void __kmp_initialize_team(kmp_team_t *team, int new_nproc,
4418                                   kmp_internal_control_t *new_icvs,
4419                                   ident_t *loc) {
4420   KF_TRACE(10, ("__kmp_initialize_team: enter: team=%p\n", team));
4421 
4422   /* verify */
4423   KMP_DEBUG_ASSERT(team);
4424   KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4425   KMP_DEBUG_ASSERT(team->t.t_threads);
4426   KMP_MB();
4427 
4428   team->t.t_master_tid = 0; /* not needed */
4429   /* team->t.t_master_bar;        not needed */
4430   team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4431   team->t.t_nproc = new_nproc;
4432 
4433   /* team->t.t_parent     = NULL; TODO not needed & would mess up hot team */
4434   team->t.t_next_pool = NULL;
4435   /* memset( team->t.t_threads, 0, sizeof(kmp_info_t*)*new_nproc ); would mess
4436    * up hot team */
4437 
4438   TCW_SYNC_PTR(team->t.t_pkfn, NULL); /* not needed */
4439   team->t.t_invoke = NULL; /* not needed */
4440 
4441   // TODO???: team->t.t_max_active_levels       = new_max_active_levels;
4442   team->t.t_sched = new_icvs->sched;
4443 
4444 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4445   team->t.t_fp_control_saved = FALSE; /* not needed */
4446   team->t.t_x87_fpu_control_word = 0; /* not needed */
4447   team->t.t_mxcsr = 0; /* not needed */
4448 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
4449 
4450   team->t.t_construct = 0;
4451   __kmp_init_lock(&team->t.t_single_lock);
4452 
4453   team->t.t_ordered.dt.t_value = 0;
4454   team->t.t_master_active = FALSE;
4455 
4456   memset(&team->t.t_taskq, '\0', sizeof(kmp_taskq_t));
4457 
4458 #ifdef KMP_DEBUG
4459   team->t.t_copypriv_data = NULL; /* not necessary, but nice for debugging */
4460 #endif
4461   team->t.t_copyin_counter = 0; /* for barrier-free copyin implementation */
4462 
4463   team->t.t_control_stack_top = NULL;
4464 
4465   __kmp_reinitialize_team(team, new_icvs, loc);
4466 
4467   KMP_MB();
4468   KF_TRACE(10, ("__kmp_initialize_team: exit: team=%p\n", team));
4469 }
4470 
4471 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
4472 /* Sets full mask for thread and returns old mask, no changes to structures. */
4473 static void
4474 __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
4475   if (KMP_AFFINITY_CAPABLE()) {
4476     int status;
4477     if (old_mask != NULL) {
4478       status = __kmp_get_system_affinity(old_mask, TRUE);
4479       int error = errno;
4480       if (status != 0) {
4481         __kmp_msg(kmp_ms_fatal, KMP_MSG(ChangeThreadAffMaskError),
4482                   KMP_ERR(error), __kmp_msg_null);
4483       }
4484     }
4485     __kmp_set_system_affinity(__kmp_affin_fullMask, TRUE);
4486   }
4487 }
4488 #endif
4489 
4490 #if OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED
4491 
4492 // __kmp_partition_places() is the heart of the OpenMP 4.0 affinity mechanism.
4493 // It calculats the worker + master thread's partition based upon the parent
4494 // thread's partition, and binds each worker to a thread in their partition.
4495 // The master thread's partition should already include its current binding.
4496 static void __kmp_partition_places(kmp_team_t *team, int update_master_only) {
4497   // Copy the master thread's place partion to the team struct
4498   kmp_info_t *master_th = team->t.t_threads[0];
4499   KMP_DEBUG_ASSERT(master_th != NULL);
4500   kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4501   int first_place = master_th->th.th_first_place;
4502   int last_place = master_th->th.th_last_place;
4503   int masters_place = master_th->th.th_current_place;
4504   team->t.t_first_place = first_place;
4505   team->t.t_last_place = last_place;
4506 
4507   KA_TRACE(20, ("__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) "
4508                 "bound to place %d partition = [%d,%d]\n",
4509                 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4510                 team->t.t_id, masters_place, first_place, last_place));
4511 
4512   switch (proc_bind) {
4513 
4514   case proc_bind_default:
4515     // serial teams might have the proc_bind policy set to proc_bind_default. It
4516     // doesn't matter, as we don't rebind master thread for any proc_bind policy
4517     KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4518     break;
4519 
4520   case proc_bind_master: {
4521     int f;
4522     int n_th = team->t.t_nproc;
4523     for (f = 1; f < n_th; f++) {
4524       kmp_info_t *th = team->t.t_threads[f];
4525       KMP_DEBUG_ASSERT(th != NULL);
4526       th->th.th_first_place = first_place;
4527       th->th.th_last_place = last_place;
4528       th->th.th_new_place = masters_place;
4529 
4530       KA_TRACE(100, ("__kmp_partition_places: master: T#%d(%d:%d) place %d "
4531                      "partition = [%d,%d]\n",
4532                      __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4533                      f, masters_place, first_place, last_place));
4534     }
4535   } break;
4536 
4537   case proc_bind_close: {
4538     int f;
4539     int n_th = team->t.t_nproc;
4540     int n_places;
4541     if (first_place <= last_place) {
4542       n_places = last_place - first_place + 1;
4543     } else {
4544       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4545     }
4546     if (n_th <= n_places) {
4547       int place = masters_place;
4548       for (f = 1; f < n_th; f++) {
4549         kmp_info_t *th = team->t.t_threads[f];
4550         KMP_DEBUG_ASSERT(th != NULL);
4551 
4552         if (place == last_place) {
4553           place = first_place;
4554         } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4555           place = 0;
4556         } else {
4557           place++;
4558         }
4559         th->th.th_first_place = first_place;
4560         th->th.th_last_place = last_place;
4561         th->th.th_new_place = place;
4562 
4563         KA_TRACE(100, ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4564                        "partition = [%d,%d]\n",
4565                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4566                        team->t.t_id, f, place, first_place, last_place));
4567       }
4568     } else {
4569       int S, rem, gap, s_count;
4570       S = n_th / n_places;
4571       s_count = 0;
4572       rem = n_th - (S * n_places);
4573       gap = rem > 0 ? n_places / rem : n_places;
4574       int place = masters_place;
4575       int gap_ct = gap;
4576       for (f = 0; f < n_th; f++) {
4577         kmp_info_t *th = team->t.t_threads[f];
4578         KMP_DEBUG_ASSERT(th != NULL);
4579 
4580         th->th.th_first_place = first_place;
4581         th->th.th_last_place = last_place;
4582         th->th.th_new_place = place;
4583         s_count++;
4584 
4585         if ((s_count == S) && rem && (gap_ct == gap)) {
4586           // do nothing, add an extra thread to place on next iteration
4587         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4588           // we added an extra thread to this place; move to next place
4589           if (place == last_place) {
4590             place = first_place;
4591           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4592             place = 0;
4593           } else {
4594             place++;
4595           }
4596           s_count = 0;
4597           gap_ct = 1;
4598           rem--;
4599         } else if (s_count == S) { // place full; don't add extra
4600           if (place == last_place) {
4601             place = first_place;
4602           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4603             place = 0;
4604           } else {
4605             place++;
4606           }
4607           gap_ct++;
4608           s_count = 0;
4609         }
4610 
4611         KA_TRACE(100,
4612                  ("__kmp_partition_places: close: T#%d(%d:%d) place %d "
4613                   "partition = [%d,%d]\n",
4614                   __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4615                   th->th.th_new_place, first_place, last_place));
4616       }
4617       KMP_DEBUG_ASSERT(place == masters_place);
4618     }
4619   } break;
4620 
4621   case proc_bind_spread: {
4622     int f;
4623     int n_th = team->t.t_nproc;
4624     int n_places;
4625     int thidx;
4626     if (first_place <= last_place) {
4627       n_places = last_place - first_place + 1;
4628     } else {
4629       n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4630     }
4631     if (n_th <= n_places) {
4632       int place = -1;
4633 
4634       if (n_places != static_cast<int>(__kmp_affinity_num_masks)) {
4635         int S = n_places / n_th;
4636         int s_count, rem, gap, gap_ct;
4637 
4638         place = masters_place;
4639         rem = n_places - n_th * S;
4640         gap = rem ? n_th / rem : 1;
4641         gap_ct = gap;
4642         thidx = n_th;
4643         if (update_master_only == 1)
4644           thidx = 1;
4645         for (f = 0; f < thidx; f++) {
4646           kmp_info_t *th = team->t.t_threads[f];
4647           KMP_DEBUG_ASSERT(th != NULL);
4648 
4649           th->th.th_first_place = place;
4650           th->th.th_new_place = place;
4651           s_count = 1;
4652           while (s_count < S) {
4653             if (place == last_place) {
4654               place = first_place;
4655             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4656               place = 0;
4657             } else {
4658               place++;
4659             }
4660             s_count++;
4661           }
4662           if (rem && (gap_ct == gap)) {
4663             if (place == last_place) {
4664               place = first_place;
4665             } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4666               place = 0;
4667             } else {
4668               place++;
4669             }
4670             rem--;
4671             gap_ct = 0;
4672           }
4673           th->th.th_last_place = place;
4674           gap_ct++;
4675 
4676           if (place == last_place) {
4677             place = first_place;
4678           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4679             place = 0;
4680           } else {
4681             place++;
4682           }
4683 
4684           KA_TRACE(100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4685                          "partition = [%d,%d], __kmp_affinity_num_masks: %u\n",
4686                          __kmp_gtid_from_thread(team->t.t_threads[f]),
4687                          team->t.t_id, f, th->th.th_new_place,
4688                          th->th.th_first_place, th->th.th_last_place,
4689                          __kmp_affinity_num_masks));
4690         }
4691       } else {
4692         /* Having uniform space of available computation places I can create
4693            T partitions of round(P/T) size and put threads into the first
4694            place of each partition. */
4695         double current = static_cast<double>(masters_place);
4696         double spacing =
4697                 (static_cast<double>(n_places + 1) / static_cast<double>(n_th));
4698         int first, last;
4699         kmp_info_t *th;
4700 
4701         thidx = n_th + 1;
4702         if (update_master_only == 1)
4703           thidx = 1;
4704         for (f = 0; f < thidx; f++) {
4705           first = static_cast<int>(current);
4706           last = static_cast<int>(current + spacing) - 1;
4707           KMP_DEBUG_ASSERT(last >= first);
4708           if (first >= n_places) {
4709             if (masters_place) {
4710               first -= n_places;
4711               last -= n_places;
4712               if (first == (masters_place + 1)) {
4713                 KMP_DEBUG_ASSERT(f == n_th);
4714                 first--;
4715               }
4716               if (last == masters_place) {
4717                 KMP_DEBUG_ASSERT(f == (n_th - 1));
4718                 last--;
4719               }
4720             } else {
4721               KMP_DEBUG_ASSERT(f == n_th);
4722               first = 0;
4723               last = 0;
4724             }
4725           }
4726           if (last >= n_places) {
4727             last = (n_places - 1);
4728           }
4729           place = first;
4730           current += spacing;
4731           if (f < n_th) {
4732             KMP_DEBUG_ASSERT(0 <= first);
4733             KMP_DEBUG_ASSERT(n_places > first);
4734             KMP_DEBUG_ASSERT(0 <= last);
4735             KMP_DEBUG_ASSERT(n_places > last);
4736             KMP_DEBUG_ASSERT(last_place >= first_place);
4737             th = team->t.t_threads[f];
4738             KMP_DEBUG_ASSERT(th);
4739             th->th.th_first_place = first;
4740             th->th.th_new_place = place;
4741             th->th.th_last_place = last;
4742 
4743             KA_TRACE(100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4744                            "partition = [%d,%d], spacing = %.4f\n",
4745                            __kmp_gtid_from_thread(team->t.t_threads[f]),
4746                            team->t.t_id, f, th->th.th_new_place,
4747                            th->th.th_first_place, th->th.th_last_place,
4748                            spacing));
4749           }
4750         }
4751       }
4752       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4753     } else {
4754       int S, rem, gap, s_count;
4755       S = n_th / n_places;
4756       s_count = 0;
4757       rem = n_th - (S * n_places);
4758       gap = rem > 0 ? n_places / rem : n_places;
4759       int place = masters_place;
4760       int gap_ct = gap;
4761       thidx = n_th;
4762       if (update_master_only == 1)
4763         thidx = 1;
4764       for (f = 0; f < thidx; f++) {
4765         kmp_info_t *th = team->t.t_threads[f];
4766         KMP_DEBUG_ASSERT(th != NULL);
4767 
4768         th->th.th_first_place = place;
4769         th->th.th_last_place = place;
4770         th->th.th_new_place = place;
4771         s_count++;
4772 
4773         if ((s_count == S) && rem && (gap_ct == gap)) {
4774           // do nothing, add an extra thread to place on next iteration
4775         } else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4776           // we added an extra thread to this place; move on to next place
4777           if (place == last_place) {
4778             place = first_place;
4779           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4780             place = 0;
4781           } else {
4782             place++;
4783           }
4784           s_count = 0;
4785           gap_ct = 1;
4786           rem--;
4787         } else if (s_count == S) { // place is full; don't add extra thread
4788           if (place == last_place) {
4789             place = first_place;
4790           } else if (place == (int)(__kmp_affinity_num_masks - 1)) {
4791             place = 0;
4792           } else {
4793             place++;
4794           }
4795           gap_ct++;
4796           s_count = 0;
4797         }
4798 
4799         KA_TRACE(100, ("__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4800                        "partition = [%d,%d]\n",
4801                        __kmp_gtid_from_thread(team->t.t_threads[f]),
4802                        team->t.t_id, f, th->th.th_new_place,
4803                        th->th.th_first_place, th->th.th_last_place));
4804       }
4805       KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4806     }
4807   } break;
4808 
4809   default:
4810     break;
4811   }
4812 
4813   KA_TRACE(20, ("__kmp_partition_places: exit T#%d\n", team->t.t_id));
4814 }
4815 
4816 #endif /* OMP_40_ENABLED && KMP_AFFINITY_SUPPORTED */
4817 
4818 /* allocate a new team data structure to use.  take one off of the free pool if
4819    available */
4820 kmp_team_t *
4821 __kmp_allocate_team(kmp_root_t *root, int new_nproc, int max_nproc,
4822 #if OMPT_SUPPORT
4823                     ompt_parallel_id_t ompt_parallel_id,
4824 #endif
4825 #if OMP_40_ENABLED
4826                     kmp_proc_bind_t new_proc_bind,
4827 #endif
4828                     kmp_internal_control_t *new_icvs,
4829                     int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
4830   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
4831   int f;
4832   kmp_team_t *team;
4833   int use_hot_team = !root->r.r_active;
4834   int level = 0;
4835 
4836   KA_TRACE(20, ("__kmp_allocate_team: called\n"));
4837   KMP_DEBUG_ASSERT(new_nproc >= 1 && argc >= 0);
4838   KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
4839   KMP_MB();
4840 
4841 #if KMP_NESTED_HOT_TEAMS
4842   kmp_hot_team_ptr_t *hot_teams;
4843   if (master) {
4844     team = master->th.th_team;
4845     level = team->t.t_active_level;
4846     if (master->th.th_teams_microtask) { // in teams construct?
4847       if (master->th.th_teams_size.nteams > 1 &&
4848           ( // #teams > 1
4849               team->t.t_pkfn ==
4850                   (microtask_t)__kmp_teams_master || // inner fork of the teams
4851               master->th.th_teams_level <
4852                   team->t.t_level)) { // or nested parallel inside the teams
4853         ++level; // not increment if #teams==1, or for outer fork of the teams;
4854         // increment otherwise
4855       }
4856     }
4857     hot_teams = master->th.th_hot_teams;
4858     if (level < __kmp_hot_teams_max_level && hot_teams &&
4859         hot_teams[level]
4860             .hot_team) { // hot team has already been allocated for given level
4861       use_hot_team = 1;
4862     } else {
4863       use_hot_team = 0;
4864     }
4865   }
4866 #endif
4867   // Optimization to use a "hot" team
4868   if (use_hot_team && new_nproc > 1) {
4869     KMP_DEBUG_ASSERT(new_nproc == max_nproc);
4870 #if KMP_NESTED_HOT_TEAMS
4871     team = hot_teams[level].hot_team;
4872 #else
4873     team = root->r.r_hot_team;
4874 #endif
4875 #if KMP_DEBUG
4876     if (__kmp_tasking_mode != tskm_immediate_exec) {
4877       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
4878                     "task_team[1] = %p before reinit\n",
4879                     team->t.t_task_team[0], team->t.t_task_team[1]));
4880     }
4881 #endif
4882 
4883     // Has the number of threads changed?
4884     /* Let's assume the most common case is that the number of threads is
4885        unchanged, and put that case first. */
4886     if (team->t.t_nproc == new_nproc) { // Check changes in number of threads
4887       KA_TRACE(20, ("__kmp_allocate_team: reusing hot team\n"));
4888       // This case can mean that omp_set_num_threads() was called and the hot
4889       // team size was already reduced, so we check the special flag
4890       if (team->t.t_size_changed == -1) {
4891         team->t.t_size_changed = 1;
4892       } else {
4893         KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
4894       }
4895 
4896       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4897       kmp_r_sched_t new_sched = new_icvs->sched;
4898       if (team->t.t_sched.r_sched_type != new_sched.r_sched_type ||
4899           team->t.t_sched.chunk != new_sched.chunk)
4900         team->t.t_sched =
4901             new_sched; // set master's schedule as new run-time schedule
4902 
4903       __kmp_reinitialize_team(team, new_icvs,
4904                               root->r.r_uber_thread->th.th_ident);
4905 
4906       KF_TRACE(10, ("__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
4907                     team->t.t_threads[0], team));
4908       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
4909 
4910 #if OMP_40_ENABLED
4911 #if KMP_AFFINITY_SUPPORTED
4912       if ((team->t.t_size_changed == 0) &&
4913           (team->t.t_proc_bind == new_proc_bind)) {
4914         if (new_proc_bind == proc_bind_spread) {
4915           __kmp_partition_places(
4916               team, 1); // add flag to update only master for spread
4917         }
4918         KA_TRACE(200, ("__kmp_allocate_team: reusing hot team #%d bindings: "
4919                        "proc_bind = %d, partition = [%d,%d]\n",
4920                        team->t.t_id, new_proc_bind, team->t.t_first_place,
4921                        team->t.t_last_place));
4922       } else {
4923         KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4924         __kmp_partition_places(team);
4925       }
4926 #else
4927       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4928 #endif /* KMP_AFFINITY_SUPPORTED */
4929 #endif /* OMP_40_ENABLED */
4930     } else if (team->t.t_nproc > new_nproc) {
4931       KA_TRACE(20,
4932                ("__kmp_allocate_team: decreasing hot team thread count to %d\n",
4933                 new_nproc));
4934 
4935       team->t.t_size_changed = 1;
4936 #if KMP_NESTED_HOT_TEAMS
4937       if (__kmp_hot_teams_mode == 0) {
4938         // AC: saved number of threads should correspond to team's value in this
4939         // mode, can be bigger in mode 1, when hot team has threads in reserve
4940         KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
4941         hot_teams[level].hot_team_nth = new_nproc;
4942 #endif // KMP_NESTED_HOT_TEAMS
4943         /* release the extra threads we don't need any more */
4944         for (f = new_nproc; f < team->t.t_nproc; f++) {
4945           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4946           if (__kmp_tasking_mode != tskm_immediate_exec) {
4947             // When decreasing team size, threads no longer in the team should
4948             // unref task team.
4949             team->t.t_threads[f]->th.th_task_team = NULL;
4950           }
4951           __kmp_free_thread(team->t.t_threads[f]);
4952           team->t.t_threads[f] = NULL;
4953         }
4954 #if KMP_NESTED_HOT_TEAMS
4955       } // (__kmp_hot_teams_mode == 0)
4956       else {
4957         // When keeping extra threads in team, switch threads to wait on own
4958         // b_go flag
4959         for (f = new_nproc; f < team->t.t_nproc; ++f) {
4960           KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4961           kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
4962           for (int b = 0; b < bs_last_barrier; ++b) {
4963             if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG) {
4964               balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
4965             }
4966             KMP_CHECK_UPDATE(balign[b].bb.leaf_kids, 0);
4967           }
4968         }
4969       }
4970 #endif // KMP_NESTED_HOT_TEAMS
4971       team->t.t_nproc = new_nproc;
4972       // TODO???: team->t.t_max_active_levels = new_max_active_levels;
4973       if (team->t.t_sched.r_sched_type != new_icvs->sched.r_sched_type ||
4974           team->t.t_sched.chunk != new_icvs->sched.chunk)
4975         team->t.t_sched = new_icvs->sched;
4976       __kmp_reinitialize_team(team, new_icvs,
4977                               root->r.r_uber_thread->th.th_ident);
4978 
4979       /* update the remaining threads */
4980       for (f = 0; f < new_nproc; ++f) {
4981         team->t.t_threads[f]->th.th_team_nproc = new_nproc;
4982       }
4983       // restore the current task state of the master thread: should be the
4984       // implicit task
4985       KF_TRACE(10, ("__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
4986                     team->t.t_threads[0], team));
4987 
4988       __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
4989 
4990 #ifdef KMP_DEBUG
4991       for (f = 0; f < team->t.t_nproc; f++) {
4992         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
4993                          team->t.t_threads[f]->th.th_team_nproc ==
4994                              team->t.t_nproc);
4995       }
4996 #endif
4997 
4998 #if OMP_40_ENABLED
4999       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5000 #if KMP_AFFINITY_SUPPORTED
5001       __kmp_partition_places(team);
5002 #endif
5003 #endif
5004     } else { // team->t.t_nproc < new_nproc
5005 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5006       kmp_affin_mask_t *old_mask;
5007       if (KMP_AFFINITY_CAPABLE()) {
5008         KMP_CPU_ALLOC(old_mask);
5009       }
5010 #endif
5011 
5012       KA_TRACE(20,
5013                ("__kmp_allocate_team: increasing hot team thread count to %d\n",
5014                 new_nproc));
5015 
5016       team->t.t_size_changed = 1;
5017 
5018 #if KMP_NESTED_HOT_TEAMS
5019       int avail_threads = hot_teams[level].hot_team_nth;
5020       if (new_nproc < avail_threads)
5021         avail_threads = new_nproc;
5022       kmp_info_t **other_threads = team->t.t_threads;
5023       for (f = team->t.t_nproc; f < avail_threads; ++f) {
5024         // Adjust barrier data of reserved threads (if any) of the team
5025         // Other data will be set in __kmp_initialize_info() below.
5026         int b;
5027         kmp_balign_t *balign = other_threads[f]->th.th_bar;
5028         for (b = 0; b < bs_last_barrier; ++b) {
5029           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5030           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5031 #if USE_DEBUGGER
5032           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5033 #endif
5034         }
5035       }
5036       if (hot_teams[level].hot_team_nth >= new_nproc) {
5037         // we have all needed threads in reserve, no need to allocate any
5038         // this only possible in mode 1, cannot have reserved threads in mode 0
5039         KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1);
5040         team->t.t_nproc = new_nproc; // just get reserved threads involved
5041       } else {
5042         // we may have some threads in reserve, but not enough
5043         team->t.t_nproc =
5044             hot_teams[level]
5045                 .hot_team_nth; // get reserved threads involved if any
5046         hot_teams[level].hot_team_nth = new_nproc; // adjust hot team max size
5047 #endif // KMP_NESTED_HOT_TEAMS
5048         if (team->t.t_max_nproc < new_nproc) {
5049           /* reallocate larger arrays */
5050           __kmp_reallocate_team_arrays(team, new_nproc);
5051           __kmp_reinitialize_team(team, new_icvs, NULL);
5052         }
5053 
5054 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5055         /* Temporarily set full mask for master thread before creation of
5056            workers. The reason is that workers inherit the affinity from master,
5057            so if a lot of workers are created on the single core quickly, they
5058            don't get a chance to set their own affinity for a long time. */
5059         __kmp_set_thread_affinity_mask_full_tmp(old_mask);
5060 #endif
5061 
5062         /* allocate new threads for the hot team */
5063         for (f = team->t.t_nproc; f < new_nproc; f++) {
5064           kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5065           KMP_DEBUG_ASSERT(new_worker);
5066           team->t.t_threads[f] = new_worker;
5067 
5068           KA_TRACE(20,
5069                    ("__kmp_allocate_team: team %d init T#%d arrived: "
5070                     "join=%llu, plain=%llu\n",
5071                     team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5072                     team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5073                     team->t.t_bar[bs_plain_barrier].b_arrived));
5074 
5075           { // Initialize barrier data for new threads.
5076             int b;
5077             kmp_balign_t *balign = new_worker->th.th_bar;
5078             for (b = 0; b < bs_last_barrier; ++b) {
5079               balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5080               KMP_DEBUG_ASSERT(balign[b].bb.wait_flag !=
5081                                KMP_BARRIER_PARENT_FLAG);
5082 #if USE_DEBUGGER
5083               balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5084 #endif
5085             }
5086           }
5087         }
5088 
5089 #if KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
5090         if (KMP_AFFINITY_CAPABLE()) {
5091           /* Restore initial master thread's affinity mask */
5092           __kmp_set_system_affinity(old_mask, TRUE);
5093           KMP_CPU_FREE(old_mask);
5094         }
5095 #endif
5096 #if KMP_NESTED_HOT_TEAMS
5097       } // end of check of t_nproc vs. new_nproc vs. hot_team_nth
5098 #endif // KMP_NESTED_HOT_TEAMS
5099       /* make sure everyone is syncronized */
5100       int old_nproc = team->t.t_nproc; // save old value and use to update only
5101       // new threads below
5102       __kmp_initialize_team(team, new_nproc, new_icvs,
5103                             root->r.r_uber_thread->th.th_ident);
5104 
5105       /* reinitialize the threads */
5106       KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5107       for (f = 0; f < team->t.t_nproc; ++f)
5108         __kmp_initialize_info(team->t.t_threads[f], team, f,
5109                               __kmp_gtid_from_tid(f, team));
5110       if (level) { // set th_task_state for new threads in nested hot team
5111         // __kmp_initialize_info() no longer zeroes th_task_state, so we should
5112         // only need to set the th_task_state for the new threads. th_task_state
5113         // for master thread will not be accurate until after this in
5114         // __kmp_fork_call(), so we look to the master's memo_stack to get the
5115         // correct value.
5116         for (f = old_nproc; f < team->t.t_nproc; ++f)
5117           team->t.t_threads[f]->th.th_task_state =
5118               team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5119       } else { // set th_task_state for new threads in non-nested hot team
5120         int old_state =
5121             team->t.t_threads[0]->th.th_task_state; // copy master's state
5122         for (f = old_nproc; f < team->t.t_nproc; ++f)
5123           team->t.t_threads[f]->th.th_task_state = old_state;
5124       }
5125 
5126 #ifdef KMP_DEBUG
5127       for (f = 0; f < team->t.t_nproc; ++f) {
5128         KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5129                          team->t.t_threads[f]->th.th_team_nproc ==
5130                              team->t.t_nproc);
5131       }
5132 #endif
5133 
5134 #if OMP_40_ENABLED
5135       KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5136 #if KMP_AFFINITY_SUPPORTED
5137       __kmp_partition_places(team);
5138 #endif
5139 #endif
5140     } // Check changes in number of threads
5141 
5142 #if OMP_40_ENABLED
5143     kmp_info_t *master = team->t.t_threads[0];
5144     if (master->th.th_teams_microtask) {
5145       for (f = 1; f < new_nproc; ++f) {
5146         // propagate teams construct specific info to workers
5147         kmp_info_t *thr = team->t.t_threads[f];
5148         thr->th.th_teams_microtask = master->th.th_teams_microtask;
5149         thr->th.th_teams_level = master->th.th_teams_level;
5150         thr->th.th_teams_size = master->th.th_teams_size;
5151       }
5152     }
5153 #endif /* OMP_40_ENABLED */
5154 #if KMP_NESTED_HOT_TEAMS
5155     if (level) {
5156       // Sync barrier state for nested hot teams, not needed for outermost hot
5157       // team.
5158       for (f = 1; f < new_nproc; ++f) {
5159         kmp_info_t *thr = team->t.t_threads[f];
5160         int b;
5161         kmp_balign_t *balign = thr->th.th_bar;
5162         for (b = 0; b < bs_last_barrier; ++b) {
5163           balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5164           KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5165 #if USE_DEBUGGER
5166           balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5167 #endif
5168         }
5169       }
5170     }
5171 #endif // KMP_NESTED_HOT_TEAMS
5172 
5173     /* reallocate space for arguments if necessary */
5174     __kmp_alloc_argv_entries(argc, team, TRUE);
5175     KMP_CHECK_UPDATE(team->t.t_argc, argc);
5176     // The hot team re-uses the previous task team,
5177     // if untouched during the previous release->gather phase.
5178 
5179     KF_TRACE(10, (" hot_team = %p\n", team));
5180 
5181 #if KMP_DEBUG
5182     if (__kmp_tasking_mode != tskm_immediate_exec) {
5183       KA_TRACE(20, ("__kmp_allocate_team: hot team task_team[0] = %p "
5184                     "task_team[1] = %p after reinit\n",
5185                     team->t.t_task_team[0], team->t.t_task_team[1]));
5186     }
5187 #endif
5188 
5189 #if OMPT_SUPPORT
5190     __ompt_team_assign_id(team, ompt_parallel_id);
5191 #endif
5192 
5193     KMP_MB();
5194 
5195     return team;
5196   }
5197 
5198   /* next, let's try to take one from the team pool */
5199   KMP_MB();
5200   for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5201     /* TODO: consider resizing undersized teams instead of reaping them, now
5202        that we have a resizing mechanism */
5203     if (team->t.t_max_nproc >= max_nproc) {
5204       /* take this team from the team pool */
5205       __kmp_team_pool = team->t.t_next_pool;
5206 
5207       /* setup the team for fresh use */
5208       __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5209 
5210       KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and "
5211                     "task_team[1] %p to NULL\n",
5212                     &team->t.t_task_team[0], &team->t.t_task_team[1]));
5213       team->t.t_task_team[0] = NULL;
5214       team->t.t_task_team[1] = NULL;
5215 
5216       /* reallocate space for arguments if necessary */
5217       __kmp_alloc_argv_entries(argc, team, TRUE);
5218       KMP_CHECK_UPDATE(team->t.t_argc, argc);
5219 
5220       KA_TRACE(
5221           20, ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5222                team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5223       { // Initialize barrier data.
5224         int b;
5225         for (b = 0; b < bs_last_barrier; ++b) {
5226           team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5227 #if USE_DEBUGGER
5228           team->t.t_bar[b].b_master_arrived = 0;
5229           team->t.t_bar[b].b_team_arrived = 0;
5230 #endif
5231         }
5232       }
5233 
5234 #if OMP_40_ENABLED
5235       team->t.t_proc_bind = new_proc_bind;
5236 #endif
5237 
5238       KA_TRACE(20, ("__kmp_allocate_team: using team from pool %d.\n",
5239                     team->t.t_id));
5240 
5241 #if OMPT_SUPPORT
5242       __ompt_team_assign_id(team, ompt_parallel_id);
5243 #endif
5244 
5245       KMP_MB();
5246 
5247       return team;
5248     }
5249 
5250 /* reap team if it is too small, then loop back and check the next one */
5251 // not sure if this is wise, but, will be redone during the hot-teams rewrite.
5252 /* TODO: Use technique to find the right size hot-team, don't reap them */
5253     team = __kmp_reap_team(team);
5254     __kmp_team_pool = team;
5255   }
5256 
5257   /* nothing available in the pool, no matter, make a new team! */
5258   KMP_MB();
5259   team = (kmp_team_t *)__kmp_allocate(sizeof(kmp_team_t));
5260 
5261   /* and set it up */
5262   team->t.t_max_nproc = max_nproc;
5263   /* NOTE well, for some reason allocating one big buffer and dividing it up
5264      seems to really hurt performance a lot on the P4, so, let's not use this */
5265   __kmp_allocate_team_arrays(team, max_nproc);
5266 
5267   KA_TRACE(20, ("__kmp_allocate_team: making a new team\n"));
5268   __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5269 
5270   KA_TRACE(20, ("__kmp_allocate_team: setting task_team[0] %p and task_team[1] "
5271                 "%p to NULL\n",
5272                 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5273   team->t.t_task_team[0] = NULL; // to be removed, as __kmp_allocate zeroes
5274   // memory, no need to duplicate
5275   team->t.t_task_team[1] = NULL; // to be removed, as __kmp_allocate zeroes
5276   // memory, no need to duplicate
5277 
5278   if (__kmp_storage_map) {
5279     __kmp_print_team_storage_map("team", team, team->t.t_id, new_nproc);
5280   }
5281 
5282   /* allocate space for arguments */
5283   __kmp_alloc_argv_entries(argc, team, FALSE);
5284   team->t.t_argc = argc;
5285 
5286   KA_TRACE(20,
5287            ("__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5288             team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5289   { // Initialize barrier data.
5290     int b;
5291     for (b = 0; b < bs_last_barrier; ++b) {
5292       team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5293 #if USE_DEBUGGER
5294       team->t.t_bar[b].b_master_arrived = 0;
5295       team->t.t_bar[b].b_team_arrived = 0;
5296 #endif
5297     }
5298   }
5299 
5300 #if OMP_40_ENABLED
5301   team->t.t_proc_bind = new_proc_bind;
5302 #endif
5303 
5304 #if OMPT_SUPPORT
5305   __ompt_team_assign_id(team, ompt_parallel_id);
5306   team->t.ompt_serialized_team_info = NULL;
5307 #endif
5308 
5309   KMP_MB();
5310 
5311   KA_TRACE(20, ("__kmp_allocate_team: done creating a new team %d.\n",
5312                 team->t.t_id));
5313 
5314   return team;
5315 }
5316 
5317 /* TODO implement hot-teams at all levels */
5318 /* TODO implement lazy thread release on demand (disband request) */
5319 
5320 /* free the team.  return it to the team pool.  release all the threads
5321  * associated with it */
5322 void __kmp_free_team(kmp_root_t *root,
5323                      kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5324   int f;
5325   KA_TRACE(20, ("__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5326                 team->t.t_id));
5327 
5328   /* verify state */
5329   KMP_DEBUG_ASSERT(root);
5330   KMP_DEBUG_ASSERT(team);
5331   KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5332   KMP_DEBUG_ASSERT(team->t.t_threads);
5333 
5334   int use_hot_team = team == root->r.r_hot_team;
5335 #if KMP_NESTED_HOT_TEAMS
5336   int level;
5337   kmp_hot_team_ptr_t *hot_teams;
5338   if (master) {
5339     level = team->t.t_active_level - 1;
5340     if (master->th.th_teams_microtask) { // in teams construct?
5341       if (master->th.th_teams_size.nteams > 1) {
5342         ++level; // level was not increased in teams construct for
5343         // team_of_masters
5344       }
5345       if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5346           master->th.th_teams_level == team->t.t_level) {
5347         ++level; // level was not increased in teams construct for
5348         // team_of_workers before the parallel
5349       } // team->t.t_level will be increased inside parallel
5350     }
5351     hot_teams = master->th.th_hot_teams;
5352     if (level < __kmp_hot_teams_max_level) {
5353       KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5354       use_hot_team = 1;
5355     }
5356   }
5357 #endif // KMP_NESTED_HOT_TEAMS
5358 
5359   /* team is done working */
5360   TCW_SYNC_PTR(team->t.t_pkfn,
5361                NULL); // Important for Debugging Support Library.
5362   team->t.t_copyin_counter = 0; // init counter for possible reuse
5363   // Do not reset pointer to parent team to NULL for hot teams.
5364 
5365   /* if we are non-hot team, release our threads */
5366   if (!use_hot_team) {
5367     if (__kmp_tasking_mode != tskm_immediate_exec) {
5368       // Wait for threads to reach reapable state
5369       for (f = 1; f < team->t.t_nproc; ++f) {
5370         KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5371         kmp_info_t *th = team->t.t_threads[f];
5372         volatile kmp_uint32 *state = &th->th.th_reap_state;
5373         while (*state != KMP_SAFE_TO_REAP) {
5374 #if KMP_OS_WINDOWS
5375           // On Windows a thread can be killed at any time, check this
5376           DWORD ecode;
5377           if (!__kmp_is_thread_alive(th, &ecode)) {
5378             *state = KMP_SAFE_TO_REAP; // reset the flag for dead thread
5379             break;
5380           }
5381 #endif
5382           // first check if thread is sleeping
5383           kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
5384           if (fl.is_sleeping())
5385             fl.resume(__kmp_gtid_from_thread(th));
5386           KMP_CPU_PAUSE();
5387         }
5388       }
5389 
5390       // Delete task teams
5391       int tt_idx;
5392       for (tt_idx = 0; tt_idx < 2; ++tt_idx) {
5393         kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5394         if (task_team != NULL) {
5395           for (f = 0; f < team->t.t_nproc;
5396                ++f) { // Have all threads unref task teams
5397             team->t.t_threads[f]->th.th_task_team = NULL;
5398           }
5399           KA_TRACE(
5400               20,
5401               ("__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5402                __kmp_get_gtid(), task_team, team->t.t_id));
5403 #if KMP_NESTED_HOT_TEAMS
5404           __kmp_free_task_team(master, task_team);
5405 #endif
5406           team->t.t_task_team[tt_idx] = NULL;
5407         }
5408       }
5409     }
5410 
5411     // Reset pointer to parent team only for non-hot teams.
5412     team->t.t_parent = NULL;
5413     team->t.t_level = 0;
5414     team->t.t_active_level = 0;
5415 
5416     /* free the worker threads */
5417     for (f = 1; f < team->t.t_nproc; ++f) {
5418       KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5419       __kmp_free_thread(team->t.t_threads[f]);
5420       team->t.t_threads[f] = NULL;
5421     }
5422 
5423     /* put the team back in the team pool */
5424     /* TODO limit size of team pool, call reap_team if pool too large */
5425     team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5426     __kmp_team_pool = (volatile kmp_team_t *)team;
5427   }
5428 
5429   KMP_MB();
5430 }
5431 
5432 /* reap the team.  destroy it, reclaim all its resources and free its memory */
5433 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5434   kmp_team_t *next_pool = team->t.t_next_pool;
5435 
5436   KMP_DEBUG_ASSERT(team);
5437   KMP_DEBUG_ASSERT(team->t.t_dispatch);
5438   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5439   KMP_DEBUG_ASSERT(team->t.t_threads);
5440   KMP_DEBUG_ASSERT(team->t.t_argv);
5441 
5442   /* TODO clean the threads that are a part of this? */
5443 
5444   /* free stuff */
5445   __kmp_free_team_arrays(team);
5446   if (team->t.t_argv != &team->t.t_inline_argv[0])
5447     __kmp_free((void *)team->t.t_argv);
5448   __kmp_free(team);
5449 
5450   KMP_MB();
5451   return next_pool;
5452 }
5453 
5454 // Free the thread.  Don't reap it, just place it on the pool of available
5455 // threads.
5456 //
5457 // Changes for Quad issue 527845: We need a predictable OMP tid <-> gtid
5458 // binding for the affinity mechanism to be useful.
5459 //
5460 // Now, we always keep the free list (__kmp_thread_pool) sorted by gtid.
5461 // However, we want to avoid a potential performance problem by always
5462 // scanning through the list to find the correct point at which to insert
5463 // the thread (potential N**2 behavior).  To do this we keep track of the
5464 // last place a thread struct was inserted (__kmp_thread_pool_insert_pt).
5465 // With single-level parallelism, threads will always be added to the tail
5466 // of the list, kept track of by __kmp_thread_pool_insert_pt.  With nested
5467 // parallelism, all bets are off and we may need to scan through the entire
5468 // free list.
5469 //
5470 // This change also has a potentially large performance benefit, for some
5471 // applications.  Previously, as threads were freed from the hot team, they
5472 // would be placed back on the free list in inverse order.  If the hot team
5473 // grew back to it's original size, then the freed thread would be placed
5474 // back on the hot team in reverse order.  This could cause bad cache
5475 // locality problems on programs where the size of the hot team regularly
5476 // grew and shrunk.
5477 //
5478 // Now, for single-level parallelism, the OMP tid is alway == gtid.
5479 void __kmp_free_thread(kmp_info_t *this_th) {
5480   int gtid;
5481   kmp_info_t **scan;
5482   kmp_root_t *root = this_th->th.th_root;
5483 
5484   KA_TRACE(20, ("__kmp_free_thread: T#%d putting T#%d back on free pool.\n",
5485                 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid));
5486 
5487   KMP_DEBUG_ASSERT(this_th);
5488 
5489   // When moving thread to pool, switch thread to wait on own b_go flag, and
5490   // uninitialized (NULL team).
5491   int b;
5492   kmp_balign_t *balign = this_th->th.th_bar;
5493   for (b = 0; b < bs_last_barrier; ++b) {
5494     if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG)
5495       balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5496     balign[b].bb.team = NULL;
5497     balign[b].bb.leaf_kids = 0;
5498   }
5499   this_th->th.th_task_state = 0;
5500 
5501   /* put thread back on the free pool */
5502   TCW_PTR(this_th->th.th_team, NULL);
5503   TCW_PTR(this_th->th.th_root, NULL);
5504   TCW_PTR(this_th->th.th_dispatch, NULL); /* NOT NEEDED */
5505 
5506   // If the __kmp_thread_pool_insert_pt is already past the new insert
5507   // point, then we need to re-scan the entire list.
5508   gtid = this_th->th.th_info.ds.ds_gtid;
5509   if (__kmp_thread_pool_insert_pt != NULL) {
5510     KMP_DEBUG_ASSERT(__kmp_thread_pool != NULL);
5511     if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
5512       __kmp_thread_pool_insert_pt = NULL;
5513     }
5514   }
5515 
5516   // Scan down the list to find the place to insert the thread.
5517   // scan is the address of a link in the list, possibly the address of
5518   // __kmp_thread_pool itself.
5519   //
5520   // In the absence of nested parallism, the for loop will have 0 iterations.
5521   if (__kmp_thread_pool_insert_pt != NULL) {
5522     scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
5523   } else {
5524     scan = CCAST(kmp_info_t **, &__kmp_thread_pool);
5525   }
5526   for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
5527        scan = &((*scan)->th.th_next_pool))
5528     ;
5529 
5530   // Insert the new element on the list, and set __kmp_thread_pool_insert_pt
5531   // to its address.
5532   TCW_PTR(this_th->th.th_next_pool, *scan);
5533   __kmp_thread_pool_insert_pt = *scan = this_th;
5534   KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) ||
5535                    (this_th->th.th_info.ds.ds_gtid <
5536                     this_th->th.th_next_pool->th.th_info.ds.ds_gtid));
5537   TCW_4(this_th->th.th_in_pool, TRUE);
5538   __kmp_thread_pool_nth++;
5539 
5540   TCW_4(__kmp_nth, __kmp_nth - 1);
5541   root->r.r_cg_nthreads--;
5542 
5543 #ifdef KMP_ADJUST_BLOCKTIME
5544   /* Adjust blocktime back to user setting or default if necessary */
5545   /* Middle initialization might never have occurred                */
5546   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5547     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5548     if (__kmp_nth <= __kmp_avail_proc) {
5549       __kmp_zero_bt = FALSE;
5550     }
5551   }
5552 #endif /* KMP_ADJUST_BLOCKTIME */
5553 
5554   KMP_MB();
5555 }
5556 
5557 /* ------------------------------------------------------------------------ */
5558 
5559 void *__kmp_launch_thread(kmp_info_t *this_thr) {
5560   int gtid = this_thr->th.th_info.ds.ds_gtid;
5561   /*    void                 *stack_data;*/
5562   kmp_team_t *(*volatile pteam);
5563 
5564   KMP_MB();
5565   KA_TRACE(10, ("__kmp_launch_thread: T#%d start\n", gtid));
5566 
5567   if (__kmp_env_consistency_check) {
5568     this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid); // ATT: Memory leak?
5569   }
5570 
5571 #if OMPT_SUPPORT
5572   if (ompt_enabled) {
5573     this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5574     this_thr->th.ompt_thread_info.wait_id = 0;
5575     this_thr->th.ompt_thread_info.idle_frame = __builtin_frame_address(0);
5576     if (ompt_callbacks.ompt_callback(ompt_event_thread_begin)) {
5577       __ompt_thread_begin(ompt_thread_worker, gtid);
5578     }
5579   }
5580 #endif
5581 
5582   /* This is the place where threads wait for work */
5583   while (!TCR_4(__kmp_global.g.g_done)) {
5584     KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]);
5585     KMP_MB();
5586 
5587     /* wait for work to do */
5588     KA_TRACE(20, ("__kmp_launch_thread: T#%d waiting for work\n", gtid));
5589 
5590 #if OMPT_SUPPORT
5591     if (ompt_enabled) {
5592       this_thr->th.ompt_thread_info.state = ompt_state_idle;
5593     }
5594 #endif
5595 
5596     /* No tid yet since not part of a team */
5597     __kmp_fork_barrier(gtid, KMP_GTID_DNE);
5598 
5599 #if OMPT_SUPPORT
5600     if (ompt_enabled) {
5601       this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5602     }
5603 #endif
5604 
5605     pteam = (kmp_team_t * (*))(&this_thr->th.th_team);
5606 
5607     /* have we been allocated? */
5608     if (TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done)) {
5609 #if OMPT_SUPPORT
5610       ompt_task_info_t *task_info;
5611       ompt_parallel_id_t my_parallel_id;
5612       if (ompt_enabled) {
5613         task_info = __ompt_get_taskinfo(0);
5614         my_parallel_id = (*pteam)->t.ompt_team_info.parallel_id;
5615       }
5616 #endif
5617       /* we were just woken up, so run our new task */
5618       if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
5619         int rc;
5620         KA_TRACE(20,
5621                  ("__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n",
5622                   gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5623                   (*pteam)->t.t_pkfn));
5624 
5625         updateHWFPControl(*pteam);
5626 
5627 #if OMPT_SUPPORT
5628         if (ompt_enabled) {
5629           this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
5630           // Initialize OMPT task id for implicit task.
5631           int tid = __kmp_tid_from_gtid(gtid);
5632           task_info->task_id = __ompt_task_id_new(tid);
5633         }
5634 #endif
5635 
5636         {
5637           KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
5638           KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
5639           rc = (*pteam)->t.t_invoke(gtid);
5640         }
5641         KMP_ASSERT(rc);
5642 
5643 #if OMPT_SUPPORT
5644         if (ompt_enabled) {
5645           /* no frame set while outside task */
5646           task_info->frame.exit_runtime_frame = NULL;
5647 
5648           this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5649         }
5650 #endif
5651         KMP_MB();
5652         KA_TRACE(20, ("__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n",
5653                       gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5654                       (*pteam)->t.t_pkfn));
5655       }
5656       /* join barrier after parallel region */
5657       __kmp_join_barrier(gtid);
5658 #if OMPT_SUPPORT && OMPT_TRACE
5659       if (ompt_enabled) {
5660         if (ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)) {
5661           // don't access *pteam here: it may have already been freed
5662           // by the master thread behind the barrier (possible race)
5663           ompt_callbacks.ompt_callback(ompt_event_implicit_task_end)(
5664               my_parallel_id, task_info->task_id);
5665         }
5666         task_info->frame.exit_runtime_frame = NULL;
5667         task_info->task_id = 0;
5668       }
5669 #endif
5670     }
5671   }
5672   TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
5673 
5674 #if OMPT_SUPPORT
5675   if (ompt_enabled && ompt_callbacks.ompt_callback(ompt_event_thread_end)) {
5676     __ompt_thread_end(ompt_thread_worker, gtid);
5677   }
5678 #endif
5679 
5680   this_thr->th.th_task_team = NULL;
5681   /* run the destructors for the threadprivate data for this thread */
5682   __kmp_common_destroy_gtid(gtid);
5683 
5684   KA_TRACE(10, ("__kmp_launch_thread: T#%d done\n", gtid));
5685   KMP_MB();
5686   return this_thr;
5687 }
5688 
5689 /* ------------------------------------------------------------------------ */
5690 
5691 void __kmp_internal_end_dest(void *specific_gtid) {
5692 #if KMP_COMPILER_ICC
5693 #pragma warning(push)
5694 #pragma warning(disable : 810) // conversion from "void *" to "int" may lose
5695 // significant bits
5696 #endif
5697   // Make sure no significant bits are lost
5698   int gtid = (kmp_intptr_t)specific_gtid - 1;
5699 #if KMP_COMPILER_ICC
5700 #pragma warning(pop)
5701 #endif
5702 
5703   KA_TRACE(30, ("__kmp_internal_end_dest: T#%d\n", gtid));
5704   /* NOTE: the gtid is stored as gitd+1 in the thread-local-storage
5705    * this is because 0 is reserved for the nothing-stored case */
5706 
5707   /* josh: One reason for setting the gtid specific data even when it is being
5708      destroyed by pthread is to allow gtid lookup through thread specific data
5709      (__kmp_gtid_get_specific).  Some of the code, especially stat code,
5710      that gets executed in the call to __kmp_internal_end_thread, actually
5711      gets the gtid through the thread specific data.  Setting it here seems
5712      rather inelegant and perhaps wrong, but allows __kmp_internal_end_thread
5713      to run smoothly.
5714      todo: get rid of this after we remove the dependence on
5715      __kmp_gtid_get_specific  */
5716   if (gtid >= 0 && KMP_UBER_GTID(gtid))
5717     __kmp_gtid_set_specific(gtid);
5718 #ifdef KMP_TDATA_GTID
5719   __kmp_gtid = gtid;
5720 #endif
5721   __kmp_internal_end_thread(gtid);
5722 }
5723 
5724 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
5725 
5726 // 2009-09-08 (lev): It looks the destructor does not work. In simple test cases
5727 // destructors work perfectly, but in real libomp.so I have no evidence it is
5728 // ever called. However, -fini linker option in makefile.mk works fine.
5729 
5730 __attribute__((destructor)) void __kmp_internal_end_dtor(void) {
5731   __kmp_internal_end_atexit();
5732 }
5733 
5734 void __kmp_internal_end_fini(void) { __kmp_internal_end_atexit(); }
5735 
5736 #endif
5737 
5738 /* [Windows] josh: when the atexit handler is called, there may still be more
5739    than one thread alive */
5740 void __kmp_internal_end_atexit(void) {
5741   KA_TRACE(30, ("__kmp_internal_end_atexit\n"));
5742   /* [Windows]
5743      josh: ideally, we want to completely shutdown the library in this atexit
5744      handler, but stat code that depends on thread specific data for gtid fails
5745      because that data becomes unavailable at some point during the shutdown, so
5746      we call __kmp_internal_end_thread instead. We should eventually remove the
5747      dependency on __kmp_get_specific_gtid in the stat code and use
5748      __kmp_internal_end_library to cleanly shutdown the library.
5749 
5750      // TODO: Can some of this comment about GVS be removed?
5751      I suspect that the offending stat code is executed when the calling thread
5752      tries to clean up a dead root thread's data structures, resulting in GVS
5753      code trying to close the GVS structures for that thread, but since the stat
5754      code uses __kmp_get_specific_gtid to get the gtid with the assumption that
5755      the calling thread is cleaning up itself instead of another thread, it get
5756      confused. This happens because allowing a thread to unregister and cleanup
5757      another thread is a recent modification for addressing an issue.
5758      Based on the current design (20050722), a thread may end up
5759      trying to unregister another thread only if thread death does not trigger
5760      the calling of __kmp_internal_end_thread.  For Linux* OS, there is the
5761      thread specific data destructor function to detect thread death. For
5762      Windows dynamic, there is DllMain(THREAD_DETACH). For Windows static, there
5763      is nothing.  Thus, the workaround is applicable only for Windows static
5764      stat library. */
5765   __kmp_internal_end_library(-1);
5766 #if KMP_OS_WINDOWS
5767   __kmp_close_console();
5768 #endif
5769 }
5770 
5771 static void __kmp_reap_thread(kmp_info_t *thread, int is_root) {
5772   // It is assumed __kmp_forkjoin_lock is acquired.
5773 
5774   int gtid;
5775 
5776   KMP_DEBUG_ASSERT(thread != NULL);
5777 
5778   gtid = thread->th.th_info.ds.ds_gtid;
5779 
5780   if (!is_root) {
5781 
5782     if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
5783       /* Assume the threads are at the fork barrier here */
5784       KA_TRACE(
5785           20, ("__kmp_reap_thread: releasing T#%d from fork barrier for reap\n",
5786                gtid));
5787       /* Need release fence here to prevent seg faults for tree forkjoin barrier
5788        * (GEH) */
5789       ANNOTATE_HAPPENS_BEFORE(thread);
5790       kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
5791       __kmp_release_64(&flag);
5792     }; // if
5793 
5794     // Terminate OS thread.
5795     __kmp_reap_worker(thread);
5796 
5797     // The thread was killed asynchronously.  If it was actively
5798     // spinning in the thread pool, decrement the global count.
5799     //
5800     // There is a small timing hole here - if the worker thread was just waking
5801     // up after sleeping in the pool, had reset it's th_active_in_pool flag but
5802     // not decremented the global counter __kmp_thread_pool_active_nth yet, then
5803     // the global counter might not get updated.
5804     //
5805     // Currently, this can only happen as the library is unloaded,
5806     // so there are no harmful side effects.
5807     if (thread->th.th_active_in_pool) {
5808       thread->th.th_active_in_pool = FALSE;
5809       KMP_TEST_THEN_DEC32(&__kmp_thread_pool_active_nth);
5810       KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
5811     }
5812 
5813     // Decrement # of [worker] threads in the pool.
5814     KMP_DEBUG_ASSERT(__kmp_thread_pool_nth > 0);
5815     --__kmp_thread_pool_nth;
5816   }; // if
5817 
5818   __kmp_free_implicit_task(thread);
5819 
5820 // Free the fast memory for tasking
5821 #if USE_FAST_MEMORY
5822   __kmp_free_fast_memory(thread);
5823 #endif /* USE_FAST_MEMORY */
5824 
5825   __kmp_suspend_uninitialize_thread(thread);
5826 
5827   KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread);
5828   TCW_SYNC_PTR(__kmp_threads[gtid], NULL);
5829 
5830   --__kmp_all_nth;
5831 // __kmp_nth was decremented when thread is added to the pool.
5832 
5833 #ifdef KMP_ADJUST_BLOCKTIME
5834   /* Adjust blocktime back to user setting or default if necessary */
5835   /* Middle initialization might never have occurred                */
5836   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5837     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5838     if (__kmp_nth <= __kmp_avail_proc) {
5839       __kmp_zero_bt = FALSE;
5840     }
5841   }
5842 #endif /* KMP_ADJUST_BLOCKTIME */
5843 
5844   /* free the memory being used */
5845   if (__kmp_env_consistency_check) {
5846     if (thread->th.th_cons) {
5847       __kmp_free_cons_stack(thread->th.th_cons);
5848       thread->th.th_cons = NULL;
5849     }; // if
5850   }
5851 
5852   if (thread->th.th_pri_common != NULL) {
5853     __kmp_free(thread->th.th_pri_common);
5854     thread->th.th_pri_common = NULL;
5855   }; // if
5856 
5857   if (thread->th.th_task_state_memo_stack != NULL) {
5858     __kmp_free(thread->th.th_task_state_memo_stack);
5859     thread->th.th_task_state_memo_stack = NULL;
5860   }
5861 
5862 #if KMP_USE_BGET
5863   if (thread->th.th_local.bget_data != NULL) {
5864     __kmp_finalize_bget(thread);
5865   }; // if
5866 #endif
5867 
5868 #if KMP_AFFINITY_SUPPORTED
5869   if (thread->th.th_affin_mask != NULL) {
5870     KMP_CPU_FREE(thread->th.th_affin_mask);
5871     thread->th.th_affin_mask = NULL;
5872   }; // if
5873 #endif /* KMP_AFFINITY_SUPPORTED */
5874 
5875   __kmp_reap_team(thread->th.th_serial_team);
5876   thread->th.th_serial_team = NULL;
5877   __kmp_free(thread);
5878 
5879   KMP_MB();
5880 
5881 } // __kmp_reap_thread
5882 
5883 static void __kmp_internal_end(void) {
5884   int i;
5885 
5886   /* First, unregister the library */
5887   __kmp_unregister_library();
5888 
5889 #if KMP_OS_WINDOWS
5890   /* In Win static library, we can't tell when a root actually dies, so we
5891      reclaim the data structures for any root threads that have died but not
5892      unregistered themselves, in order to shut down cleanly.
5893      In Win dynamic library we also can't tell when a thread dies.  */
5894   __kmp_reclaim_dead_roots(); // AC: moved here to always clean resources of
5895 // dead roots
5896 #endif
5897 
5898   for (i = 0; i < __kmp_threads_capacity; i++)
5899     if (__kmp_root[i])
5900       if (__kmp_root[i]->r.r_active)
5901         break;
5902   KMP_MB(); /* Flush all pending memory write invalidates.  */
5903   TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
5904 
5905   if (i < __kmp_threads_capacity) {
5906 #if KMP_USE_MONITOR
5907     // 2009-09-08 (lev): Other alive roots found. Why do we kill the monitor??
5908     KMP_MB(); /* Flush all pending memory write invalidates.  */
5909 
5910 // Need to check that monitor was initialized before reaping it. If we are
5911 // called form __kmp_atfork_child (which sets __kmp_init_parallel = 0), then
5912 // __kmp_monitor will appear to contain valid data, but it is only valid in the
5913 // parent process, not the child.
5914     // New behavior (201008): instead of keying off of the flag
5915     // __kmp_init_parallel, the monitor thread creation is keyed off
5916     // of the new flag __kmp_init_monitor.
5917     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
5918     if (TCR_4(__kmp_init_monitor)) {
5919       __kmp_reap_monitor(&__kmp_monitor);
5920       TCW_4(__kmp_init_monitor, 0);
5921     }
5922     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
5923     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
5924 #endif // KMP_USE_MONITOR
5925   } else {
5926 /* TODO move this to cleanup code */
5927 #ifdef KMP_DEBUG
5928     /* make sure that everything has properly ended */
5929     for (i = 0; i < __kmp_threads_capacity; i++) {
5930       if (__kmp_root[i]) {
5931         //                    KMP_ASSERT( ! KMP_UBER_GTID( i ) );         // AC:
5932         //                    there can be uber threads alive here
5933         KMP_ASSERT(!__kmp_root[i]->r.r_active); // TODO: can they be active?
5934       }
5935     }
5936 #endif
5937 
5938     KMP_MB();
5939 
5940     // Reap the worker threads.
5941     // This is valid for now, but be careful if threads are reaped sooner.
5942     while (__kmp_thread_pool != NULL) { // Loop thru all the thread in the pool.
5943       // Get the next thread from the pool.
5944       kmp_info_t *thread = CCAST(kmp_info_t *, __kmp_thread_pool);
5945       __kmp_thread_pool = thread->th.th_next_pool;
5946       // Reap it.
5947       KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP);
5948       thread->th.th_next_pool = NULL;
5949       thread->th.th_in_pool = FALSE;
5950       __kmp_reap_thread(thread, 0);
5951     }; // while
5952     __kmp_thread_pool_insert_pt = NULL;
5953 
5954     // Reap teams.
5955     while (__kmp_team_pool != NULL) { // Loop thru all the teams in the pool.
5956       // Get the next team from the pool.
5957       kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
5958       __kmp_team_pool = team->t.t_next_pool;
5959       // Reap it.
5960       team->t.t_next_pool = NULL;
5961       __kmp_reap_team(team);
5962     }; // while
5963 
5964     __kmp_reap_task_teams();
5965 
5966     for (i = 0; i < __kmp_threads_capacity; ++i) {
5967       // TBD: Add some checking...
5968       // Something like KMP_DEBUG_ASSERT( __kmp_thread[ i ] == NULL );
5969     }
5970 
5971     /* Make sure all threadprivate destructors get run by joining with all
5972        worker threads before resetting this flag */
5973     TCW_SYNC_4(__kmp_init_common, FALSE);
5974 
5975     KA_TRACE(10, ("__kmp_internal_end: all workers reaped\n"));
5976     KMP_MB();
5977 
5978 #if KMP_USE_MONITOR
5979     // See note above: One of the possible fixes for CQ138434 / CQ140126
5980     //
5981     // FIXME: push both code fragments down and CSE them?
5982     // push them into __kmp_cleanup() ?
5983     __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
5984     if (TCR_4(__kmp_init_monitor)) {
5985       __kmp_reap_monitor(&__kmp_monitor);
5986       TCW_4(__kmp_init_monitor, 0);
5987     }
5988     __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
5989     KA_TRACE(10, ("__kmp_internal_end: monitor reaped\n"));
5990 #endif
5991   } /* else !__kmp_global.t_active */
5992   TCW_4(__kmp_init_gtid, FALSE);
5993   KMP_MB(); /* Flush all pending memory write invalidates.  */
5994 
5995   __kmp_cleanup();
5996 #if OMPT_SUPPORT
5997   ompt_fini();
5998 #endif
5999 }
6000 
6001 void __kmp_internal_end_library(int gtid_req) {
6002   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6003   /* this shouldn't be a race condition because __kmp_internal_end() is the
6004      only place to clear __kmp_serial_init */
6005   /* we'll check this later too, after we get the lock */
6006   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6007   // redundaant, because the next check will work in any case.
6008   if (__kmp_global.g.g_abort) {
6009     KA_TRACE(11, ("__kmp_internal_end_library: abort, exiting\n"));
6010     /* TODO abort? */
6011     return;
6012   }
6013   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6014     KA_TRACE(10, ("__kmp_internal_end_library: already finished\n"));
6015     return;
6016   }
6017 
6018   KMP_MB(); /* Flush all pending memory write invalidates.  */
6019 
6020   /* find out who we are and what we should do */
6021   {
6022     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6023     KA_TRACE(
6024         10, ("__kmp_internal_end_library: enter T#%d  (%d)\n", gtid, gtid_req));
6025     if (gtid == KMP_GTID_SHUTDOWN) {
6026       KA_TRACE(10, ("__kmp_internal_end_library: !__kmp_init_runtime, system "
6027                     "already shutdown\n"));
6028       return;
6029     } else if (gtid == KMP_GTID_MONITOR) {
6030       KA_TRACE(10, ("__kmp_internal_end_library: monitor thread, gtid not "
6031                     "registered, or system shutdown\n"));
6032       return;
6033     } else if (gtid == KMP_GTID_DNE) {
6034       KA_TRACE(10, ("__kmp_internal_end_library: gtid not registered or system "
6035                     "shutdown\n"));
6036       /* we don't know who we are, but we may still shutdown the library */
6037     } else if (KMP_UBER_GTID(gtid)) {
6038       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6039       if (__kmp_root[gtid]->r.r_active) {
6040         __kmp_global.g.g_abort = -1;
6041         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6042         KA_TRACE(10,
6043                  ("__kmp_internal_end_library: root still active, abort T#%d\n",
6044                   gtid));
6045         return;
6046       } else {
6047         KA_TRACE(
6048             10,
6049             ("__kmp_internal_end_library: unregistering sibling T#%d\n", gtid));
6050         __kmp_unregister_root_current_thread(gtid);
6051       }
6052     } else {
6053 /* worker threads may call this function through the atexit handler, if they
6054  * call exit() */
6055 /* For now, skip the usual subsequent processing and just dump the debug buffer.
6056    TODO: do a thorough shutdown instead */
6057 #ifdef DUMP_DEBUG_ON_EXIT
6058       if (__kmp_debug_buf)
6059         __kmp_dump_debug_buffer();
6060 #endif
6061       return;
6062     }
6063   }
6064   /* synchronize the termination process */
6065   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6066 
6067   /* have we already finished */
6068   if (__kmp_global.g.g_abort) {
6069     KA_TRACE(10, ("__kmp_internal_end_library: abort, exiting\n"));
6070     /* TODO abort? */
6071     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6072     return;
6073   }
6074   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6075     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6076     return;
6077   }
6078 
6079   /* We need this lock to enforce mutex between this reading of
6080      __kmp_threads_capacity and the writing by __kmp_register_root.
6081      Alternatively, we can use a counter of roots that is atomically updated by
6082      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6083      __kmp_internal_end_*.  */
6084   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6085 
6086   /* now we can safely conduct the actual termination */
6087   __kmp_internal_end();
6088 
6089   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6090   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6091 
6092   KA_TRACE(10, ("__kmp_internal_end_library: exit\n"));
6093 
6094 #ifdef DUMP_DEBUG_ON_EXIT
6095   if (__kmp_debug_buf)
6096     __kmp_dump_debug_buffer();
6097 #endif
6098 
6099 #if KMP_OS_WINDOWS
6100   __kmp_close_console();
6101 #endif
6102 
6103   __kmp_fini_allocator();
6104 
6105 } // __kmp_internal_end_library
6106 
6107 void __kmp_internal_end_thread(int gtid_req) {
6108   int i;
6109 
6110   /* if we have already cleaned up, don't try again, it wouldn't be pretty */
6111   /* this shouldn't be a race condition because __kmp_internal_end() is the
6112    * only place to clear __kmp_serial_init */
6113   /* we'll check this later too, after we get the lock */
6114   // 2009-09-06: We do not set g_abort without setting g_done. This check looks
6115   // redundant, because the next check will work in any case.
6116   if (__kmp_global.g.g_abort) {
6117     KA_TRACE(11, ("__kmp_internal_end_thread: abort, exiting\n"));
6118     /* TODO abort? */
6119     return;
6120   }
6121   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6122     KA_TRACE(10, ("__kmp_internal_end_thread: already finished\n"));
6123     return;
6124   }
6125 
6126   KMP_MB(); /* Flush all pending memory write invalidates.  */
6127 
6128   /* find out who we are and what we should do */
6129   {
6130     int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6131     KA_TRACE(10,
6132              ("__kmp_internal_end_thread: enter T#%d  (%d)\n", gtid, gtid_req));
6133     if (gtid == KMP_GTID_SHUTDOWN) {
6134       KA_TRACE(10, ("__kmp_internal_end_thread: !__kmp_init_runtime, system "
6135                     "already shutdown\n"));
6136       return;
6137     } else if (gtid == KMP_GTID_MONITOR) {
6138       KA_TRACE(10, ("__kmp_internal_end_thread: monitor thread, gtid not "
6139                     "registered, or system shutdown\n"));
6140       return;
6141     } else if (gtid == KMP_GTID_DNE) {
6142       KA_TRACE(10, ("__kmp_internal_end_thread: gtid not registered or system "
6143                     "shutdown\n"));
6144       return;
6145       /* we don't know who we are */
6146     } else if (KMP_UBER_GTID(gtid)) {
6147       /* unregister ourselves as an uber thread.  gtid is no longer valid */
6148       if (__kmp_root[gtid]->r.r_active) {
6149         __kmp_global.g.g_abort = -1;
6150         TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6151         KA_TRACE(10,
6152                  ("__kmp_internal_end_thread: root still active, abort T#%d\n",
6153                   gtid));
6154         return;
6155       } else {
6156         KA_TRACE(10, ("__kmp_internal_end_thread: unregistering sibling T#%d\n",
6157                       gtid));
6158         __kmp_unregister_root_current_thread(gtid);
6159       }
6160     } else {
6161       /* just a worker thread, let's leave */
6162       KA_TRACE(10, ("__kmp_internal_end_thread: worker thread T#%d\n", gtid));
6163 
6164       if (gtid >= 0) {
6165         __kmp_threads[gtid]->th.th_task_team = NULL;
6166       }
6167 
6168       KA_TRACE(10,
6169                ("__kmp_internal_end_thread: worker thread done, exiting T#%d\n",
6170                 gtid));
6171       return;
6172     }
6173   }
6174 #if defined KMP_DYNAMIC_LIB
6175   // AC: lets not shutdown the Linux* OS dynamic library at the exit of uber
6176   // thread, because we will better shutdown later in the library destructor.
6177   // The reason of this change is performance problem when non-openmp thread in
6178   // a loop forks and joins many openmp threads. We can save a lot of time
6179   // keeping worker threads alive until the program shutdown.
6180   // OM: Removed Linux* OS restriction to fix the crash on OS X* (DPD200239966)
6181   // and Windows(DPD200287443) that occurs when using critical sections from
6182   // foreign threads.
6183   KA_TRACE(10, ("__kmp_internal_end_thread: exiting T#%d\n", gtid_req));
6184   return;
6185 #endif
6186   /* synchronize the termination process */
6187   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6188 
6189   /* have we already finished */
6190   if (__kmp_global.g.g_abort) {
6191     KA_TRACE(10, ("__kmp_internal_end_thread: abort, exiting\n"));
6192     /* TODO abort? */
6193     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6194     return;
6195   }
6196   if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6197     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6198     return;
6199   }
6200 
6201   /* We need this lock to enforce mutex between this reading of
6202      __kmp_threads_capacity and the writing by __kmp_register_root.
6203      Alternatively, we can use a counter of roots that is atomically updated by
6204      __kmp_get_global_thread_id_reg, __kmp_do_serial_initialize and
6205      __kmp_internal_end_*.  */
6206 
6207   /* should we finish the run-time?  are all siblings done? */
6208   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6209 
6210   for (i = 0; i < __kmp_threads_capacity; ++i) {
6211     if (KMP_UBER_GTID(i)) {
6212       KA_TRACE(
6213           10,
6214           ("__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i));
6215       __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6216       __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6217       return;
6218     };
6219   }
6220 
6221   /* now we can safely conduct the actual termination */
6222 
6223   __kmp_internal_end();
6224 
6225   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6226   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6227 
6228   KA_TRACE(10, ("__kmp_internal_end_thread: exit T#%d\n", gtid_req));
6229 
6230 #ifdef DUMP_DEBUG_ON_EXIT
6231   if (__kmp_debug_buf)
6232     __kmp_dump_debug_buffer();
6233 #endif
6234 } // __kmp_internal_end_thread
6235 
6236 // -----------------------------------------------------------------------------
6237 // Library registration stuff.
6238 
6239 static long __kmp_registration_flag = 0;
6240 // Random value used to indicate library initialization.
6241 static char *__kmp_registration_str = NULL;
6242 // Value to be saved in env var __KMP_REGISTERED_LIB_<pid>.
6243 
6244 static inline char *__kmp_reg_status_name() {
6245   /* On RHEL 3u5 if linked statically, getpid() returns different values in
6246      each thread. If registration and unregistration go in different threads
6247      (omp_misc_other_root_exit.cpp test case), the name of registered_lib_env
6248      env var can not be found, because the name will contain different pid. */
6249   return __kmp_str_format("__KMP_REGISTERED_LIB_%d", (int)getpid());
6250 } // __kmp_reg_status_get
6251 
6252 void __kmp_register_library_startup(void) {
6253 
6254   char *name = __kmp_reg_status_name(); // Name of the environment variable.
6255   int done = 0;
6256   union {
6257     double dtime;
6258     long ltime;
6259   } time;
6260 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6261   __kmp_initialize_system_tick();
6262 #endif
6263   __kmp_read_system_time(&time.dtime);
6264   __kmp_registration_flag = 0xCAFE0000L | (time.ltime & 0x0000FFFFL);
6265   __kmp_registration_str =
6266       __kmp_str_format("%p-%lx-%s", &__kmp_registration_flag,
6267                        __kmp_registration_flag, KMP_LIBRARY_FILE);
6268 
6269   KA_TRACE(50, ("__kmp_register_library_startup: %s=\"%s\"\n", name,
6270                 __kmp_registration_str));
6271 
6272   while (!done) {
6273 
6274     char *value = NULL; // Actual value of the environment variable.
6275 
6276     // Set environment variable, but do not overwrite if it is exist.
6277     __kmp_env_set(name, __kmp_registration_str, 0);
6278     // Check the variable is written.
6279     value = __kmp_env_get(name);
6280     if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6281 
6282       done = 1; // Ok, environment variable set successfully, exit the loop.
6283 
6284     } else {
6285 
6286       // Oops. Write failed. Another copy of OpenMP RTL is in memory.
6287       // Check whether it alive or dead.
6288       int neighbor = 0; // 0 -- unknown status, 1 -- alive, 2 -- dead.
6289       char *tail = value;
6290       char *flag_addr_str = NULL;
6291       char *flag_val_str = NULL;
6292       char const *file_name = NULL;
6293       __kmp_str_split(tail, '-', &flag_addr_str, &tail);
6294       __kmp_str_split(tail, '-', &flag_val_str, &tail);
6295       file_name = tail;
6296       if (tail != NULL) {
6297         long *flag_addr = 0;
6298         long flag_val = 0;
6299         KMP_SSCANF(flag_addr_str, "%p", &flag_addr);
6300         KMP_SSCANF(flag_val_str, "%lx", &flag_val);
6301         if (flag_addr != 0 && flag_val != 0 && strcmp(file_name, "") != 0) {
6302           // First, check whether environment-encoded address is mapped into
6303           // addr space.
6304           // If so, dereference it to see if it still has the right value.
6305           if (__kmp_is_address_mapped(flag_addr) && *flag_addr == flag_val) {
6306             neighbor = 1;
6307           } else {
6308             // If not, then we know the other copy of the library is no longer
6309             // running.
6310             neighbor = 2;
6311           }; // if
6312         }; // if
6313       }; // if
6314       switch (neighbor) {
6315       case 0: // Cannot parse environment variable -- neighbor status unknown.
6316         // Assume it is the incompatible format of future version of the
6317         // library. Assume the other library is alive.
6318         // WARN( ... ); // TODO: Issue a warning.
6319         file_name = "unknown library";
6320       // Attention! Falling to the next case. That's intentional.
6321       case 1: { // Neighbor is alive.
6322         // Check it is allowed.
6323         char *duplicate_ok = __kmp_env_get("KMP_DUPLICATE_LIB_OK");
6324         if (!__kmp_str_match_true(duplicate_ok)) {
6325           // That's not allowed. Issue fatal error.
6326           __kmp_msg(kmp_ms_fatal,
6327                     KMP_MSG(DuplicateLibrary, KMP_LIBRARY_FILE, file_name),
6328                     KMP_HNT(DuplicateLibrary), __kmp_msg_null);
6329         }; // if
6330         KMP_INTERNAL_FREE(duplicate_ok);
6331         __kmp_duplicate_library_ok = 1;
6332         done = 1; // Exit the loop.
6333       } break;
6334       case 2: { // Neighbor is dead.
6335         // Clear the variable and try to register library again.
6336         __kmp_env_unset(name);
6337       } break;
6338       default: { KMP_DEBUG_ASSERT(0); } break;
6339       }; // switch
6340 
6341     }; // if
6342     KMP_INTERNAL_FREE((void *)value);
6343 
6344   }; // while
6345   KMP_INTERNAL_FREE((void *)name);
6346 
6347 } // func __kmp_register_library_startup
6348 
6349 void __kmp_unregister_library(void) {
6350 
6351   char *name = __kmp_reg_status_name();
6352   char *value = __kmp_env_get(name);
6353 
6354   KMP_DEBUG_ASSERT(__kmp_registration_flag != 0);
6355   KMP_DEBUG_ASSERT(__kmp_registration_str != NULL);
6356   if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6357     // Ok, this is our variable. Delete it.
6358     __kmp_env_unset(name);
6359   }; // if
6360 
6361   KMP_INTERNAL_FREE(__kmp_registration_str);
6362   KMP_INTERNAL_FREE(value);
6363   KMP_INTERNAL_FREE(name);
6364 
6365   __kmp_registration_flag = 0;
6366   __kmp_registration_str = NULL;
6367 
6368 } // __kmp_unregister_library
6369 
6370 // End of Library registration stuff.
6371 // -----------------------------------------------------------------------------
6372 
6373 #if KMP_MIC_SUPPORTED
6374 
6375 static void __kmp_check_mic_type() {
6376   kmp_cpuid_t cpuid_state = {0};
6377   kmp_cpuid_t *cs_p = &cpuid_state;
6378   __kmp_x86_cpuid(1, 0, cs_p);
6379   // We don't support mic1 at the moment
6380   if ((cs_p->eax & 0xff0) == 0xB10) {
6381     __kmp_mic_type = mic2;
6382   } else if ((cs_p->eax & 0xf0ff0) == 0x50670) {
6383     __kmp_mic_type = mic3;
6384   } else {
6385     __kmp_mic_type = non_mic;
6386   }
6387 }
6388 
6389 #endif /* KMP_MIC_SUPPORTED */
6390 
6391 static void __kmp_do_serial_initialize(void) {
6392   int i, gtid;
6393   int size;
6394 
6395   KA_TRACE(10, ("__kmp_do_serial_initialize: enter\n"));
6396 
6397   KMP_DEBUG_ASSERT(sizeof(kmp_int32) == 4);
6398   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) == 4);
6399   KMP_DEBUG_ASSERT(sizeof(kmp_int64) == 8);
6400   KMP_DEBUG_ASSERT(sizeof(kmp_uint64) == 8);
6401   KMP_DEBUG_ASSERT(sizeof(kmp_intptr_t) == sizeof(void *));
6402 
6403 #if OMPT_SUPPORT
6404   ompt_pre_init();
6405 #endif
6406 
6407   __kmp_validate_locks();
6408 
6409   /* Initialize internal memory allocator */
6410   __kmp_init_allocator();
6411 
6412   /* Register the library startup via an environment variable and check to see
6413      whether another copy of the library is already registered. */
6414 
6415   __kmp_register_library_startup();
6416 
6417   /* TODO reinitialization of library */
6418   if (TCR_4(__kmp_global.g.g_done)) {
6419     KA_TRACE(10, ("__kmp_do_serial_initialize: reinitialization of library\n"));
6420   }
6421 
6422   __kmp_global.g.g_abort = 0;
6423   TCW_SYNC_4(__kmp_global.g.g_done, FALSE);
6424 
6425 /* initialize the locks */
6426 #if KMP_USE_ADAPTIVE_LOCKS
6427 #if KMP_DEBUG_ADAPTIVE_LOCKS
6428   __kmp_init_speculative_stats();
6429 #endif
6430 #endif
6431 #if KMP_STATS_ENABLED
6432   __kmp_stats_init();
6433 #endif
6434   __kmp_init_lock(&__kmp_global_lock);
6435   __kmp_init_queuing_lock(&__kmp_dispatch_lock);
6436   __kmp_init_lock(&__kmp_debug_lock);
6437   __kmp_init_atomic_lock(&__kmp_atomic_lock);
6438   __kmp_init_atomic_lock(&__kmp_atomic_lock_1i);
6439   __kmp_init_atomic_lock(&__kmp_atomic_lock_2i);
6440   __kmp_init_atomic_lock(&__kmp_atomic_lock_4i);
6441   __kmp_init_atomic_lock(&__kmp_atomic_lock_4r);
6442   __kmp_init_atomic_lock(&__kmp_atomic_lock_8i);
6443   __kmp_init_atomic_lock(&__kmp_atomic_lock_8r);
6444   __kmp_init_atomic_lock(&__kmp_atomic_lock_8c);
6445   __kmp_init_atomic_lock(&__kmp_atomic_lock_10r);
6446   __kmp_init_atomic_lock(&__kmp_atomic_lock_16r);
6447   __kmp_init_atomic_lock(&__kmp_atomic_lock_16c);
6448   __kmp_init_atomic_lock(&__kmp_atomic_lock_20c);
6449   __kmp_init_atomic_lock(&__kmp_atomic_lock_32c);
6450   __kmp_init_bootstrap_lock(&__kmp_forkjoin_lock);
6451   __kmp_init_bootstrap_lock(&__kmp_exit_lock);
6452 #if KMP_USE_MONITOR
6453   __kmp_init_bootstrap_lock(&__kmp_monitor_lock);
6454 #endif
6455   __kmp_init_bootstrap_lock(&__kmp_tp_cached_lock);
6456 
6457   /* conduct initialization and initial setup of configuration */
6458 
6459   __kmp_runtime_initialize();
6460 
6461 #if KMP_MIC_SUPPORTED
6462   __kmp_check_mic_type();
6463 #endif
6464 
6465 // Some global variable initialization moved here from kmp_env_initialize()
6466 #ifdef KMP_DEBUG
6467   kmp_diag = 0;
6468 #endif
6469   __kmp_abort_delay = 0;
6470 
6471   // From __kmp_init_dflt_team_nth()
6472   /* assume the entire machine will be used */
6473   __kmp_dflt_team_nth_ub = __kmp_xproc;
6474   if (__kmp_dflt_team_nth_ub < KMP_MIN_NTH) {
6475     __kmp_dflt_team_nth_ub = KMP_MIN_NTH;
6476   }
6477   if (__kmp_dflt_team_nth_ub > __kmp_sys_max_nth) {
6478     __kmp_dflt_team_nth_ub = __kmp_sys_max_nth;
6479   }
6480   __kmp_max_nth = __kmp_sys_max_nth;
6481   __kmp_cg_max_nth = __kmp_sys_max_nth;
6482   __kmp_teams_max_nth = __kmp_xproc; // set a "reasonable" default
6483   if (__kmp_teams_max_nth > __kmp_sys_max_nth) {
6484     __kmp_teams_max_nth = __kmp_sys_max_nth;
6485   }
6486 
6487   // Three vars below moved here from __kmp_env_initialize() "KMP_BLOCKTIME"
6488   // part
6489   __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
6490 #if KMP_USE_MONITOR
6491   __kmp_monitor_wakeups =
6492       KMP_WAKEUPS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6493   __kmp_bt_intervals =
6494       KMP_INTERVALS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6495 #endif
6496   // From "KMP_LIBRARY" part of __kmp_env_initialize()
6497   __kmp_library = library_throughput;
6498   // From KMP_SCHEDULE initialization
6499   __kmp_static = kmp_sch_static_balanced;
6500 // AC: do not use analytical here, because it is non-monotonous
6501 //__kmp_guided = kmp_sch_guided_iterative_chunked;
6502 //__kmp_auto = kmp_sch_guided_analytical_chunked; // AC: it is the default, no
6503 // need to repeat assignment
6504 // Barrier initialization. Moved here from __kmp_env_initialize() Barrier branch
6505 // bit control and barrier method control parts
6506 #if KMP_FAST_REDUCTION_BARRIER
6507 #define kmp_reduction_barrier_gather_bb ((int)1)
6508 #define kmp_reduction_barrier_release_bb ((int)1)
6509 #define kmp_reduction_barrier_gather_pat bp_hyper_bar
6510 #define kmp_reduction_barrier_release_pat bp_hyper_bar
6511 #endif // KMP_FAST_REDUCTION_BARRIER
6512   for (i = bs_plain_barrier; i < bs_last_barrier; i++) {
6513     __kmp_barrier_gather_branch_bits[i] = __kmp_barrier_gather_bb_dflt;
6514     __kmp_barrier_release_branch_bits[i] = __kmp_barrier_release_bb_dflt;
6515     __kmp_barrier_gather_pattern[i] = __kmp_barrier_gather_pat_dflt;
6516     __kmp_barrier_release_pattern[i] = __kmp_barrier_release_pat_dflt;
6517 #if KMP_FAST_REDUCTION_BARRIER
6518     if (i == bs_reduction_barrier) { // tested and confirmed on ALTIX only (
6519       // lin_64 ): hyper,1
6520       __kmp_barrier_gather_branch_bits[i] = kmp_reduction_barrier_gather_bb;
6521       __kmp_barrier_release_branch_bits[i] = kmp_reduction_barrier_release_bb;
6522       __kmp_barrier_gather_pattern[i] = kmp_reduction_barrier_gather_pat;
6523       __kmp_barrier_release_pattern[i] = kmp_reduction_barrier_release_pat;
6524     }
6525 #endif // KMP_FAST_REDUCTION_BARRIER
6526   }
6527 #if KMP_FAST_REDUCTION_BARRIER
6528 #undef kmp_reduction_barrier_release_pat
6529 #undef kmp_reduction_barrier_gather_pat
6530 #undef kmp_reduction_barrier_release_bb
6531 #undef kmp_reduction_barrier_gather_bb
6532 #endif // KMP_FAST_REDUCTION_BARRIER
6533 #if KMP_MIC_SUPPORTED
6534   if (__kmp_mic_type == mic2) { // KNC
6535     // AC: plane=3,2, forkjoin=2,1 are optimal for 240 threads on KNC
6536     __kmp_barrier_gather_branch_bits[bs_plain_barrier] = 3; // plain gather
6537     __kmp_barrier_release_branch_bits[bs_forkjoin_barrier] =
6538         1; // forkjoin release
6539     __kmp_barrier_gather_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6540     __kmp_barrier_release_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6541   }
6542 #if KMP_FAST_REDUCTION_BARRIER
6543   if (__kmp_mic_type == mic2) { // KNC
6544     __kmp_barrier_gather_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6545     __kmp_barrier_release_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6546   }
6547 #endif // KMP_FAST_REDUCTION_BARRIER
6548 #endif // KMP_MIC_SUPPORTED
6549 
6550 // From KMP_CHECKS initialization
6551 #ifdef KMP_DEBUG
6552   __kmp_env_checks = TRUE; /* development versions have the extra checks */
6553 #else
6554   __kmp_env_checks = FALSE; /* port versions do not have the extra checks */
6555 #endif
6556 
6557   // From "KMP_FOREIGN_THREADS_THREADPRIVATE" initialization
6558   __kmp_foreign_tp = TRUE;
6559 
6560   __kmp_global.g.g_dynamic = FALSE;
6561   __kmp_global.g.g_dynamic_mode = dynamic_default;
6562 
6563   __kmp_env_initialize(NULL);
6564 
6565 // Print all messages in message catalog for testing purposes.
6566 #ifdef KMP_DEBUG
6567   char const *val = __kmp_env_get("KMP_DUMP_CATALOG");
6568   if (__kmp_str_match_true(val)) {
6569     kmp_str_buf_t buffer;
6570     __kmp_str_buf_init(&buffer);
6571     __kmp_i18n_dump_catalog(&buffer);
6572     __kmp_printf("%s", buffer.str);
6573     __kmp_str_buf_free(&buffer);
6574   }; // if
6575   __kmp_env_free(&val);
6576 #endif
6577 
6578   __kmp_threads_capacity =
6579       __kmp_initial_threads_capacity(__kmp_dflt_team_nth_ub);
6580   // Moved here from __kmp_env_initialize() "KMP_ALL_THREADPRIVATE" part
6581   __kmp_tp_capacity = __kmp_default_tp_capacity(
6582       __kmp_dflt_team_nth_ub, __kmp_max_nth, __kmp_allThreadsSpecified);
6583 
6584   // If the library is shut down properly, both pools must be NULL. Just in
6585   // case, set them to NULL -- some memory may leak, but subsequent code will
6586   // work even if pools are not freed.
6587   KMP_DEBUG_ASSERT(__kmp_thread_pool == NULL);
6588   KMP_DEBUG_ASSERT(__kmp_thread_pool_insert_pt == NULL);
6589   KMP_DEBUG_ASSERT(__kmp_team_pool == NULL);
6590   __kmp_thread_pool = NULL;
6591   __kmp_thread_pool_insert_pt = NULL;
6592   __kmp_team_pool = NULL;
6593 
6594   /* Allocate all of the variable sized records */
6595   /* NOTE: __kmp_threads_capacity entries are allocated, but the arrays are
6596    * expandable */
6597   /* Since allocation is cache-aligned, just add extra padding at the end */
6598   size =
6599       (sizeof(kmp_info_t *) + sizeof(kmp_root_t *)) * __kmp_threads_capacity +
6600       CACHE_LINE;
6601   __kmp_threads = (kmp_info_t **)__kmp_allocate(size);
6602   __kmp_root = (kmp_root_t **)((char *)__kmp_threads +
6603                                sizeof(kmp_info_t *) * __kmp_threads_capacity);
6604 
6605   /* init thread counts */
6606   KMP_DEBUG_ASSERT(__kmp_all_nth ==
6607                    0); // Asserts fail if the library is reinitializing and
6608   KMP_DEBUG_ASSERT(__kmp_nth == 0); // something was wrong in termination.
6609   __kmp_all_nth = 0;
6610   __kmp_nth = 0;
6611 
6612   /* setup the uber master thread and hierarchy */
6613   gtid = __kmp_register_root(TRUE);
6614   KA_TRACE(10, ("__kmp_do_serial_initialize  T#%d\n", gtid));
6615   KMP_ASSERT(KMP_UBER_GTID(gtid));
6616   KMP_ASSERT(KMP_INITIAL_GTID(gtid));
6617 
6618   KMP_MB(); /* Flush all pending memory write invalidates.  */
6619 
6620   __kmp_common_initialize();
6621 
6622 #if KMP_OS_UNIX
6623   /* invoke the child fork handler */
6624   __kmp_register_atfork();
6625 #endif
6626 
6627 #if !defined KMP_DYNAMIC_LIB
6628   {
6629     /* Invoke the exit handler when the program finishes, only for static
6630        library. For dynamic library, we already have _fini and DllMain. */
6631     int rc = atexit(__kmp_internal_end_atexit);
6632     if (rc != 0) {
6633       __kmp_msg(kmp_ms_fatal, KMP_MSG(FunctionError, "atexit()"), KMP_ERR(rc),
6634                 __kmp_msg_null);
6635     }; // if
6636   }
6637 #endif
6638 
6639 #if KMP_HANDLE_SIGNALS
6640 #if KMP_OS_UNIX
6641   /* NOTE: make sure that this is called before the user installs their own
6642      signal handlers so that the user handlers are called first. this way they
6643      can return false, not call our handler, avoid terminating the library, and
6644      continue execution where they left off. */
6645   __kmp_install_signals(FALSE);
6646 #endif /* KMP_OS_UNIX */
6647 #if KMP_OS_WINDOWS
6648   __kmp_install_signals(TRUE);
6649 #endif /* KMP_OS_WINDOWS */
6650 #endif
6651 
6652   /* we have finished the serial initialization */
6653   __kmp_init_counter++;
6654 
6655   __kmp_init_serial = TRUE;
6656 
6657   if (__kmp_settings) {
6658     __kmp_env_print();
6659   }
6660 
6661 #if OMP_40_ENABLED
6662   if (__kmp_display_env || __kmp_display_env_verbose) {
6663     __kmp_env_print_2();
6664   }
6665 #endif // OMP_40_ENABLED
6666 
6667 #if OMPT_SUPPORT
6668   ompt_post_init();
6669 #endif
6670 
6671   KMP_MB();
6672 
6673   KA_TRACE(10, ("__kmp_do_serial_initialize: exit\n"));
6674 }
6675 
6676 void __kmp_serial_initialize(void) {
6677   if (__kmp_init_serial) {
6678     return;
6679   }
6680   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6681   if (__kmp_init_serial) {
6682     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6683     return;
6684   }
6685   __kmp_do_serial_initialize();
6686   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6687 }
6688 
6689 static void __kmp_do_middle_initialize(void) {
6690   int i, j;
6691   int prev_dflt_team_nth;
6692 
6693   if (!__kmp_init_serial) {
6694     __kmp_do_serial_initialize();
6695   }
6696 
6697   KA_TRACE(10, ("__kmp_middle_initialize: enter\n"));
6698 
6699   // Save the previous value for the __kmp_dflt_team_nth so that
6700   // we can avoid some reinitialization if it hasn't changed.
6701   prev_dflt_team_nth = __kmp_dflt_team_nth;
6702 
6703 #if KMP_AFFINITY_SUPPORTED
6704   // __kmp_affinity_initialize() will try to set __kmp_ncores to the
6705   // number of cores on the machine.
6706   __kmp_affinity_initialize();
6707 
6708   // Run through the __kmp_threads array and set the affinity mask
6709   // for each root thread that is currently registered with the RTL.
6710   for (i = 0; i < __kmp_threads_capacity; i++) {
6711     if (TCR_PTR(__kmp_threads[i]) != NULL) {
6712       __kmp_affinity_set_init_mask(i, TRUE);
6713     }
6714   }
6715 #endif /* KMP_AFFINITY_SUPPORTED */
6716 
6717   KMP_ASSERT(__kmp_xproc > 0);
6718   if (__kmp_avail_proc == 0) {
6719     __kmp_avail_proc = __kmp_xproc;
6720   }
6721 
6722   // If there were empty places in num_threads list (OMP_NUM_THREADS=,,2,3),
6723   // correct them now
6724   j = 0;
6725   while ((j < __kmp_nested_nth.used) && !__kmp_nested_nth.nth[j]) {
6726     __kmp_nested_nth.nth[j] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub =
6727         __kmp_avail_proc;
6728     j++;
6729   }
6730 
6731   if (__kmp_dflt_team_nth == 0) {
6732 #ifdef KMP_DFLT_NTH_CORES
6733     // Default #threads = #cores
6734     __kmp_dflt_team_nth = __kmp_ncores;
6735     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6736                   "__kmp_ncores (%d)\n",
6737                   __kmp_dflt_team_nth));
6738 #else
6739     // Default #threads = #available OS procs
6740     __kmp_dflt_team_nth = __kmp_avail_proc;
6741     KA_TRACE(20, ("__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6742                   "__kmp_avail_proc(%d)\n",
6743                   __kmp_dflt_team_nth));
6744 #endif /* KMP_DFLT_NTH_CORES */
6745   }
6746 
6747   if (__kmp_dflt_team_nth < KMP_MIN_NTH) {
6748     __kmp_dflt_team_nth = KMP_MIN_NTH;
6749   }
6750   if (__kmp_dflt_team_nth > __kmp_sys_max_nth) {
6751     __kmp_dflt_team_nth = __kmp_sys_max_nth;
6752   }
6753 
6754   // There's no harm in continuing if the following check fails,
6755   // but it indicates an error in the previous logic.
6756   KMP_DEBUG_ASSERT(__kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub);
6757 
6758   if (__kmp_dflt_team_nth != prev_dflt_team_nth) {
6759     // Run through the __kmp_threads array and set the num threads icv for each
6760     // root thread that is currently registered with the RTL (which has not
6761     // already explicitly set its nthreads-var with a call to
6762     // omp_set_num_threads()).
6763     for (i = 0; i < __kmp_threads_capacity; i++) {
6764       kmp_info_t *thread = __kmp_threads[i];
6765       if (thread == NULL)
6766         continue;
6767       if (thread->th.th_current_task->td_icvs.nproc != 0)
6768         continue;
6769 
6770       set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
6771     }
6772   }
6773   KA_TRACE(
6774       20,
6775       ("__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n",
6776        __kmp_dflt_team_nth));
6777 
6778 #ifdef KMP_ADJUST_BLOCKTIME
6779   /* Adjust blocktime to zero if necessary  now that __kmp_avail_proc is set */
6780   if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
6781     KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
6782     if (__kmp_nth > __kmp_avail_proc) {
6783       __kmp_zero_bt = TRUE;
6784     }
6785   }
6786 #endif /* KMP_ADJUST_BLOCKTIME */
6787 
6788   /* we have finished middle initialization */
6789   TCW_SYNC_4(__kmp_init_middle, TRUE);
6790 
6791   KA_TRACE(10, ("__kmp_do_middle_initialize: exit\n"));
6792 }
6793 
6794 void __kmp_middle_initialize(void) {
6795   if (__kmp_init_middle) {
6796     return;
6797   }
6798   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6799   if (__kmp_init_middle) {
6800     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6801     return;
6802   }
6803   __kmp_do_middle_initialize();
6804   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6805 }
6806 
6807 void __kmp_parallel_initialize(void) {
6808   int gtid = __kmp_entry_gtid(); // this might be a new root
6809 
6810   /* synchronize parallel initialization (for sibling) */
6811   if (TCR_4(__kmp_init_parallel))
6812     return;
6813   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6814   if (TCR_4(__kmp_init_parallel)) {
6815     __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6816     return;
6817   }
6818 
6819   /* TODO reinitialization after we have already shut down */
6820   if (TCR_4(__kmp_global.g.g_done)) {
6821     KA_TRACE(
6822         10,
6823         ("__kmp_parallel_initialize: attempt to init while shutting down\n"));
6824     __kmp_infinite_loop();
6825   }
6826 
6827   /* jc: The lock __kmp_initz_lock is already held, so calling
6828      __kmp_serial_initialize would cause a deadlock.  So we call
6829      __kmp_do_serial_initialize directly. */
6830   if (!__kmp_init_middle) {
6831     __kmp_do_middle_initialize();
6832   }
6833 
6834   /* begin initialization */
6835   KA_TRACE(10, ("__kmp_parallel_initialize: enter\n"));
6836   KMP_ASSERT(KMP_UBER_GTID(gtid));
6837 
6838 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6839   // Save the FP control regs.
6840   // Worker threads will set theirs to these values at thread startup.
6841   __kmp_store_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
6842   __kmp_store_mxcsr(&__kmp_init_mxcsr);
6843   __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK;
6844 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
6845 
6846 #if KMP_OS_UNIX
6847 #if KMP_HANDLE_SIGNALS
6848   /*  must be after __kmp_serial_initialize  */
6849   __kmp_install_signals(TRUE);
6850 #endif
6851 #endif
6852 
6853   __kmp_suspend_initialize();
6854 
6855 #if defined(USE_LOAD_BALANCE)
6856   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6857     __kmp_global.g.g_dynamic_mode = dynamic_load_balance;
6858   }
6859 #else
6860   if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6861     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
6862   }
6863 #endif
6864 
6865   if (__kmp_version) {
6866     __kmp_print_version_2();
6867   }
6868 
6869   /* we have finished parallel initialization */
6870   TCW_SYNC_4(__kmp_init_parallel, TRUE);
6871 
6872   KMP_MB();
6873   KA_TRACE(10, ("__kmp_parallel_initialize: exit\n"));
6874 
6875   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6876 }
6877 
6878 /* ------------------------------------------------------------------------ */
6879 
6880 void __kmp_run_before_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
6881                                    kmp_team_t *team) {
6882   kmp_disp_t *dispatch;
6883 
6884   KMP_MB();
6885 
6886   /* none of the threads have encountered any constructs, yet. */
6887   this_thr->th.th_local.this_construct = 0;
6888 #if KMP_CACHE_MANAGE
6889   KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived);
6890 #endif /* KMP_CACHE_MANAGE */
6891   dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
6892   KMP_DEBUG_ASSERT(dispatch);
6893   KMP_DEBUG_ASSERT(team->t.t_dispatch);
6894   // KMP_DEBUG_ASSERT( this_thr->th.th_dispatch == &team->t.t_dispatch[
6895   // this_thr->th.th_info.ds.ds_tid ] );
6896 
6897   dispatch->th_disp_index = 0; /* reset the dispatch buffer counter */
6898 #if OMP_45_ENABLED
6899   dispatch->th_doacross_buf_idx =
6900       0; /* reset the doacross dispatch buffer counter */
6901 #endif
6902   if (__kmp_env_consistency_check)
6903     __kmp_push_parallel(gtid, team->t.t_ident);
6904 
6905   KMP_MB(); /* Flush all pending memory write invalidates.  */
6906 }
6907 
6908 void __kmp_run_after_invoked_task(int gtid, int tid, kmp_info_t *this_thr,
6909                                   kmp_team_t *team) {
6910   if (__kmp_env_consistency_check)
6911     __kmp_pop_parallel(gtid, team->t.t_ident);
6912 
6913   __kmp_finish_implicit_task(this_thr);
6914 }
6915 
6916 int __kmp_invoke_task_func(int gtid) {
6917   int rc;
6918   int tid = __kmp_tid_from_gtid(gtid);
6919   kmp_info_t *this_thr = __kmp_threads[gtid];
6920   kmp_team_t *team = this_thr->th.th_team;
6921 
6922   __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
6923 #if USE_ITT_BUILD
6924   if (__itt_stack_caller_create_ptr) {
6925     __kmp_itt_stack_callee_enter(
6926         (__itt_caller)
6927             team->t.t_stack_id); // inform ittnotify about entering user's code
6928   }
6929 #endif /* USE_ITT_BUILD */
6930 #if INCLUDE_SSC_MARKS
6931   SSC_MARK_INVOKING();
6932 #endif
6933 
6934 #if OMPT_SUPPORT
6935   void *dummy;
6936   void **exit_runtime_p;
6937   ompt_task_id_t my_task_id;
6938   ompt_parallel_id_t my_parallel_id;
6939 
6940   if (ompt_enabled) {
6941     exit_runtime_p = &(team->t.t_implicit_task_taskdata[tid]
6942                            .ompt_task_info.frame.exit_runtime_frame);
6943   } else {
6944     exit_runtime_p = &dummy;
6945   }
6946 
6947 #if OMPT_TRACE
6948   my_task_id = team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_id;
6949   my_parallel_id = team->t.ompt_team_info.parallel_id;
6950   if (ompt_enabled &&
6951       ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)) {
6952     ompt_callbacks.ompt_callback(ompt_event_implicit_task_begin)(my_parallel_id,
6953                                                                  my_task_id);
6954   }
6955 #endif
6956 #endif
6957 
6958   {
6959     KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
6960     KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
6961     rc =
6962         __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
6963                                tid, (int)team->t.t_argc, (void **)team->t.t_argv
6964 #if OMPT_SUPPORT
6965                                ,
6966                                exit_runtime_p
6967 #endif
6968                                );
6969 #if OMPT_SUPPORT
6970     *exit_runtime_p = NULL;
6971 #endif
6972   }
6973 
6974 #if USE_ITT_BUILD
6975   if (__itt_stack_caller_create_ptr) {
6976     __kmp_itt_stack_callee_leave(
6977         (__itt_caller)
6978             team->t.t_stack_id); // inform ittnotify about leaving user's code
6979   }
6980 #endif /* USE_ITT_BUILD */
6981   __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
6982 
6983   return rc;
6984 }
6985 
6986 #if OMP_40_ENABLED
6987 void __kmp_teams_master(int gtid) {
6988   // This routine is called by all master threads in teams construct
6989   kmp_info_t *thr = __kmp_threads[gtid];
6990   kmp_team_t *team = thr->th.th_team;
6991   ident_t *loc = team->t.t_ident;
6992   thr->th.th_set_nproc = thr->th.th_teams_size.nth;
6993   KMP_DEBUG_ASSERT(thr->th.th_teams_microtask);
6994   KMP_DEBUG_ASSERT(thr->th.th_set_nproc);
6995   KA_TRACE(20, ("__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid,
6996                 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask));
6997 // Launch league of teams now, but not let workers execute
6998 // (they hang on fork barrier until next parallel)
6999 #if INCLUDE_SSC_MARKS
7000   SSC_MARK_FORKING();
7001 #endif
7002   __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7003 #if OMPT_SUPPORT
7004                   (void *)thr->th.th_teams_microtask, // "unwrapped" task
7005 #endif
7006                   (microtask_t)thr->th.th_teams_microtask, // "wrapped" task
7007                   VOLATILE_CAST(launch_t) __kmp_invoke_task_func, NULL);
7008 #if INCLUDE_SSC_MARKS
7009   SSC_MARK_JOINING();
7010 #endif
7011 
7012   // AC: last parameter "1" eliminates join barrier which won't work because
7013   // worker threads are in a fork barrier waiting for more parallel regions
7014   __kmp_join_call(loc, gtid
7015 #if OMPT_SUPPORT
7016                   ,
7017                   fork_context_intel
7018 #endif
7019                   ,
7020                   1);
7021 }
7022 
7023 int __kmp_invoke_teams_master(int gtid) {
7024   kmp_info_t *this_thr = __kmp_threads[gtid];
7025   kmp_team_t *team = this_thr->th.th_team;
7026 #if KMP_DEBUG
7027   if (!__kmp_threads[gtid]->th.th_team->t.t_serialized)
7028     KMP_DEBUG_ASSERT((void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn ==
7029                      (void *)__kmp_teams_master);
7030 #endif
7031   __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7032   __kmp_teams_master(gtid);
7033   __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7034   return 1;
7035 }
7036 #endif /* OMP_40_ENABLED */
7037 
7038 /* this sets the requested number of threads for the next parallel region
7039    encountered by this team. since this should be enclosed in the forkjoin
7040    critical section it should avoid race conditions with assymmetrical nested
7041    parallelism */
7042 
7043 void __kmp_push_num_threads(ident_t *id, int gtid, int num_threads) {
7044   kmp_info_t *thr = __kmp_threads[gtid];
7045 
7046   if (num_threads > 0)
7047     thr->th.th_set_nproc = num_threads;
7048 }
7049 
7050 #if OMP_40_ENABLED
7051 
7052 /* this sets the requested number of teams for the teams region and/or
7053    the number of threads for the next parallel region encountered  */
7054 void __kmp_push_num_teams(ident_t *id, int gtid, int num_teams,
7055                           int num_threads) {
7056   kmp_info_t *thr = __kmp_threads[gtid];
7057   KMP_DEBUG_ASSERT(num_teams >= 0);
7058   KMP_DEBUG_ASSERT(num_threads >= 0);
7059 
7060   if (num_teams == 0)
7061     num_teams = 1; // default number of teams is 1.
7062   if (num_teams > __kmp_teams_max_nth) { // if too many teams requested?
7063     if (!__kmp_reserve_warn) {
7064       __kmp_reserve_warn = 1;
7065       __kmp_msg(kmp_ms_warning,
7066                 KMP_MSG(CantFormThrTeam, num_teams, __kmp_teams_max_nth),
7067                 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7068     }
7069     num_teams = __kmp_teams_max_nth;
7070   }
7071   // Set number of teams (number of threads in the outer "parallel" of the
7072   // teams)
7073   thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
7074 
7075   // Remember the number of threads for inner parallel regions
7076   if (num_threads == 0) {
7077     if (!TCR_4(__kmp_init_middle))
7078       __kmp_middle_initialize(); // get __kmp_avail_proc calculated
7079     num_threads = __kmp_avail_proc / num_teams;
7080     if (num_teams * num_threads > __kmp_teams_max_nth) {
7081       // adjust num_threads w/o warning as it is not user setting
7082       num_threads = __kmp_teams_max_nth / num_teams;
7083     }
7084   } else {
7085     if (num_teams * num_threads > __kmp_teams_max_nth) {
7086       int new_threads = __kmp_teams_max_nth / num_teams;
7087       if (!__kmp_reserve_warn) { // user asked for too many threads
7088         __kmp_reserve_warn = 1; // that conflicts with KMP_TEAMS_THREAD_LIMIT
7089         __kmp_msg(kmp_ms_warning,
7090                   KMP_MSG(CantFormThrTeam, num_threads, new_threads),
7091                   KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7092       }
7093       num_threads = new_threads;
7094     }
7095   }
7096   thr->th.th_teams_size.nth = num_threads;
7097 }
7098 
7099 // Set the proc_bind var to use in the following parallel region.
7100 void __kmp_push_proc_bind(ident_t *id, int gtid, kmp_proc_bind_t proc_bind) {
7101   kmp_info_t *thr = __kmp_threads[gtid];
7102   thr->th.th_set_proc_bind = proc_bind;
7103 }
7104 
7105 #endif /* OMP_40_ENABLED */
7106 
7107 /* Launch the worker threads into the microtask. */
7108 
7109 void __kmp_internal_fork(ident_t *id, int gtid, kmp_team_t *team) {
7110   kmp_info_t *this_thr = __kmp_threads[gtid];
7111 
7112 #ifdef KMP_DEBUG
7113   int f;
7114 #endif /* KMP_DEBUG */
7115 
7116   KMP_DEBUG_ASSERT(team);
7117   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7118   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7119   KMP_MB(); /* Flush all pending memory write invalidates.  */
7120 
7121   team->t.t_construct = 0; /* no single directives seen yet */
7122   team->t.t_ordered.dt.t_value =
7123       0; /* thread 0 enters the ordered section first */
7124 
7125   /* Reset the identifiers on the dispatch buffer */
7126   KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7127   if (team->t.t_max_nproc > 1) {
7128     int i;
7129     for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
7130       team->t.t_disp_buffer[i].buffer_index = i;
7131 #if OMP_45_ENABLED
7132       team->t.t_disp_buffer[i].doacross_buf_idx = i;
7133 #endif
7134     }
7135   } else {
7136     team->t.t_disp_buffer[0].buffer_index = 0;
7137 #if OMP_45_ENABLED
7138     team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7139 #endif
7140   }
7141 
7142   KMP_MB(); /* Flush all pending memory write invalidates.  */
7143   KMP_ASSERT(this_thr->th.th_team == team);
7144 
7145 #ifdef KMP_DEBUG
7146   for (f = 0; f < team->t.t_nproc; f++) {
7147     KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7148                      team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7149   }
7150 #endif /* KMP_DEBUG */
7151 
7152   /* release the worker threads so they may begin working */
7153   __kmp_fork_barrier(gtid, 0);
7154 }
7155 
7156 void __kmp_internal_join(ident_t *id, int gtid, kmp_team_t *team) {
7157   kmp_info_t *this_thr = __kmp_threads[gtid];
7158 
7159   KMP_DEBUG_ASSERT(team);
7160   KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7161   KMP_ASSERT(KMP_MASTER_GTID(gtid));
7162   KMP_MB(); /* Flush all pending memory write invalidates.  */
7163 
7164 /* Join barrier after fork */
7165 
7166 #ifdef KMP_DEBUG
7167   if (__kmp_threads[gtid] &&
7168       __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7169     __kmp_printf("GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid,
7170                  __kmp_threads[gtid]);
7171     __kmp_printf("__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, "
7172                  "team->t.t_nproc=%d\n",
7173                  gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7174                  team->t.t_nproc);
7175     __kmp_print_structure();
7176   }
7177   KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
7178                    __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7179 #endif /* KMP_DEBUG */
7180 
7181   __kmp_join_barrier(gtid); /* wait for everyone */
7182 
7183   KMP_MB(); /* Flush all pending memory write invalidates.  */
7184   KMP_ASSERT(this_thr->th.th_team == team);
7185 }
7186 
7187 /* ------------------------------------------------------------------------ */
7188 
7189 #ifdef USE_LOAD_BALANCE
7190 
7191 // Return the worker threads actively spinning in the hot team, if we
7192 // are at the outermost level of parallelism.  Otherwise, return 0.
7193 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7194   int i;
7195   int retval;
7196   kmp_team_t *hot_team;
7197 
7198   if (root->r.r_active) {
7199     return 0;
7200   }
7201   hot_team = root->r.r_hot_team;
7202   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
7203     return hot_team->t.t_nproc - 1; // Don't count master thread
7204   }
7205 
7206   // Skip the master thread - it is accounted for elsewhere.
7207   retval = 0;
7208   for (i = 1; i < hot_team->t.t_nproc; i++) {
7209     if (hot_team->t.t_threads[i]->th.th_active) {
7210       retval++;
7211     }
7212   }
7213   return retval;
7214 }
7215 
7216 // Perform an automatic adjustment to the number of
7217 // threads used by the next parallel region.
7218 static int __kmp_load_balance_nproc(kmp_root_t *root, int set_nproc) {
7219   int retval;
7220   int pool_active;
7221   int hot_team_active;
7222   int team_curr_active;
7223   int system_active;
7224 
7225   KB_TRACE(20, ("__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7226                 set_nproc));
7227   KMP_DEBUG_ASSERT(root);
7228   KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7229                        ->th.th_current_task->td_icvs.dynamic == TRUE);
7230   KMP_DEBUG_ASSERT(set_nproc > 1);
7231 
7232   if (set_nproc == 1) {
7233     KB_TRACE(20, ("__kmp_load_balance_nproc: serial execution.\n"));
7234     return 1;
7235   }
7236 
7237   // Threads that are active in the thread pool, active in the hot team for this
7238   // particular root (if we are at the outer par level), and the currently
7239   // executing thread (to become the master) are available to add to the new
7240   // team, but are currently contributing to the system load, and must be
7241   // accounted for.
7242   pool_active = TCR_4(__kmp_thread_pool_active_nth);
7243   hot_team_active = __kmp_active_hot_team_nproc(root);
7244   team_curr_active = pool_active + hot_team_active + 1;
7245 
7246   // Check the system load.
7247   system_active = __kmp_get_load_balance(__kmp_avail_proc + team_curr_active);
7248   KB_TRACE(30, ("__kmp_load_balance_nproc: system active = %d pool active = %d "
7249                 "hot team active = %d\n",
7250                 system_active, pool_active, hot_team_active));
7251 
7252   if (system_active < 0) {
7253     // There was an error reading the necessary info from /proc, so use the
7254     // thread limit algorithm instead. Once we set __kmp_global.g.g_dynamic_mode
7255     // = dynamic_thread_limit, we shouldn't wind up getting back here.
7256     __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7257     KMP_WARNING(CantLoadBalUsing, "KMP_DYNAMIC_MODE=thread limit");
7258 
7259     // Make this call behave like the thread limit algorithm.
7260     retval = __kmp_avail_proc - __kmp_nth +
7261              (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7262     if (retval > set_nproc) {
7263       retval = set_nproc;
7264     }
7265     if (retval < KMP_MIN_NTH) {
7266       retval = KMP_MIN_NTH;
7267     }
7268 
7269     KB_TRACE(20, ("__kmp_load_balance_nproc: thread limit exit. retval:%d\n",
7270                   retval));
7271     return retval;
7272   }
7273 
7274   // There is a slight delay in the load balance algorithm in detecting new
7275   // running procs. The real system load at this instant should be at least as
7276   // large as the #active omp thread that are available to add to the team.
7277   if (system_active < team_curr_active) {
7278     system_active = team_curr_active;
7279   }
7280   retval = __kmp_avail_proc - system_active + team_curr_active;
7281   if (retval > set_nproc) {
7282     retval = set_nproc;
7283   }
7284   if (retval < KMP_MIN_NTH) {
7285     retval = KMP_MIN_NTH;
7286   }
7287 
7288   KB_TRACE(20, ("__kmp_load_balance_nproc: exit. retval:%d\n", retval));
7289   return retval;
7290 } // __kmp_load_balance_nproc()
7291 
7292 #endif /* USE_LOAD_BALANCE */
7293 
7294 /* ------------------------------------------------------------------------ */
7295 
7296 /* NOTE: this is called with the __kmp_init_lock held */
7297 void __kmp_cleanup(void) {
7298   int f;
7299 
7300   KA_TRACE(10, ("__kmp_cleanup: enter\n"));
7301 
7302   if (TCR_4(__kmp_init_parallel)) {
7303 #if KMP_HANDLE_SIGNALS
7304     __kmp_remove_signals();
7305 #endif
7306     TCW_4(__kmp_init_parallel, FALSE);
7307   }
7308 
7309   if (TCR_4(__kmp_init_middle)) {
7310 #if KMP_AFFINITY_SUPPORTED
7311     __kmp_affinity_uninitialize();
7312 #endif /* KMP_AFFINITY_SUPPORTED */
7313     __kmp_cleanup_hierarchy();
7314     TCW_4(__kmp_init_middle, FALSE);
7315   }
7316 
7317   KA_TRACE(10, ("__kmp_cleanup: go serial cleanup\n"));
7318 
7319   if (__kmp_init_serial) {
7320     __kmp_runtime_destroy();
7321     __kmp_init_serial = FALSE;
7322   }
7323 
7324   for (f = 0; f < __kmp_threads_capacity; f++) {
7325     if (__kmp_root[f] != NULL) {
7326       __kmp_free(__kmp_root[f]);
7327       __kmp_root[f] = NULL;
7328     }
7329   }
7330   __kmp_free(__kmp_threads);
7331   // __kmp_threads and __kmp_root were allocated at once, as single block, so
7332   // there is no need in freeing __kmp_root.
7333   __kmp_threads = NULL;
7334   __kmp_root = NULL;
7335   __kmp_threads_capacity = 0;
7336 
7337 #if KMP_USE_DYNAMIC_LOCK
7338   __kmp_cleanup_indirect_user_locks();
7339 #else
7340   __kmp_cleanup_user_locks();
7341 #endif
7342 
7343 #if KMP_AFFINITY_SUPPORTED
7344   KMP_INTERNAL_FREE(CCAST(char *, __kmp_cpuinfo_file));
7345   __kmp_cpuinfo_file = NULL;
7346 #endif /* KMP_AFFINITY_SUPPORTED */
7347 
7348 #if KMP_USE_ADAPTIVE_LOCKS
7349 #if KMP_DEBUG_ADAPTIVE_LOCKS
7350   __kmp_print_speculative_stats();
7351 #endif
7352 #endif
7353   KMP_INTERNAL_FREE(__kmp_nested_nth.nth);
7354   __kmp_nested_nth.nth = NULL;
7355   __kmp_nested_nth.size = 0;
7356   __kmp_nested_nth.used = 0;
7357   KMP_INTERNAL_FREE(__kmp_nested_proc_bind.bind_types);
7358   __kmp_nested_proc_bind.bind_types = NULL;
7359   __kmp_nested_proc_bind.size = 0;
7360   __kmp_nested_proc_bind.used = 0;
7361 
7362   __kmp_i18n_catclose();
7363 
7364 #if KMP_STATS_ENABLED
7365   __kmp_stats_fini();
7366 #endif
7367 
7368   KA_TRACE(10, ("__kmp_cleanup: exit\n"));
7369 }
7370 
7371 /* ------------------------------------------------------------------------ */
7372 
7373 int __kmp_ignore_mppbeg(void) {
7374   char *env;
7375 
7376   if ((env = getenv("KMP_IGNORE_MPPBEG")) != NULL) {
7377     if (__kmp_str_match_false(env))
7378       return FALSE;
7379   }
7380   // By default __kmpc_begin() is no-op.
7381   return TRUE;
7382 }
7383 
7384 int __kmp_ignore_mppend(void) {
7385   char *env;
7386 
7387   if ((env = getenv("KMP_IGNORE_MPPEND")) != NULL) {
7388     if (__kmp_str_match_false(env))
7389       return FALSE;
7390   }
7391   // By default __kmpc_end() is no-op.
7392   return TRUE;
7393 }
7394 
7395 void __kmp_internal_begin(void) {
7396   int gtid;
7397   kmp_root_t *root;
7398 
7399   /* this is a very important step as it will register new sibling threads
7400      and assign these new uber threads a new gtid */
7401   gtid = __kmp_entry_gtid();
7402   root = __kmp_threads[gtid]->th.th_root;
7403   KMP_ASSERT(KMP_UBER_GTID(gtid));
7404 
7405   if (root->r.r_begin)
7406     return;
7407   __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7408   if (root->r.r_begin) {
7409     __kmp_release_lock(&root->r.r_begin_lock, gtid);
7410     return;
7411   }
7412 
7413   root->r.r_begin = TRUE;
7414 
7415   __kmp_release_lock(&root->r.r_begin_lock, gtid);
7416 }
7417 
7418 /* ------------------------------------------------------------------------ */
7419 
7420 void __kmp_user_set_library(enum library_type arg) {
7421   int gtid;
7422   kmp_root_t *root;
7423   kmp_info_t *thread;
7424 
7425   /* first, make sure we are initialized so we can get our gtid */
7426 
7427   gtid = __kmp_entry_gtid();
7428   thread = __kmp_threads[gtid];
7429 
7430   root = thread->th.th_root;
7431 
7432   KA_TRACE(20, ("__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg,
7433                 library_serial));
7434   if (root->r.r_in_parallel) { /* Must be called in serial section of top-level
7435                                   thread */
7436     KMP_WARNING(SetLibraryIncorrectCall);
7437     return;
7438   }
7439 
7440   switch (arg) {
7441   case library_serial:
7442     thread->th.th_set_nproc = 0;
7443     set__nproc(thread, 1);
7444     break;
7445   case library_turnaround:
7446     thread->th.th_set_nproc = 0;
7447     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7448                                            : __kmp_dflt_team_nth_ub);
7449     break;
7450   case library_throughput:
7451     thread->th.th_set_nproc = 0;
7452     set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7453                                            : __kmp_dflt_team_nth_ub);
7454     break;
7455   default:
7456     KMP_FATAL(UnknownLibraryType, arg);
7457   }
7458 
7459   __kmp_aux_set_library(arg);
7460 }
7461 
7462 void __kmp_aux_set_stacksize(size_t arg) {
7463   if (!__kmp_init_serial)
7464     __kmp_serial_initialize();
7465 
7466 #if KMP_OS_DARWIN
7467   if (arg & (0x1000 - 1)) {
7468     arg &= ~(0x1000 - 1);
7469     if (arg + 0x1000) /* check for overflow if we round up */
7470       arg += 0x1000;
7471   }
7472 #endif
7473   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7474 
7475   /* only change the default stacksize before the first parallel region */
7476   if (!TCR_4(__kmp_init_parallel)) {
7477     size_t value = arg; /* argument is in bytes */
7478 
7479     if (value < __kmp_sys_min_stksize)
7480       value = __kmp_sys_min_stksize;
7481     else if (value > KMP_MAX_STKSIZE)
7482       value = KMP_MAX_STKSIZE;
7483 
7484     __kmp_stksize = value;
7485 
7486     __kmp_env_stksize = TRUE; /* was KMP_STACKSIZE specified? */
7487   }
7488 
7489   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7490 }
7491 
7492 /* set the behaviour of the runtime library */
7493 /* TODO this can cause some odd behaviour with sibling parallelism... */
7494 void __kmp_aux_set_library(enum library_type arg) {
7495   __kmp_library = arg;
7496 
7497   switch (__kmp_library) {
7498   case library_serial: {
7499     KMP_INFORM(LibraryIsSerial);
7500     (void)__kmp_change_library(TRUE);
7501   } break;
7502   case library_turnaround:
7503     (void)__kmp_change_library(TRUE);
7504     break;
7505   case library_throughput:
7506     (void)__kmp_change_library(FALSE);
7507     break;
7508   default:
7509     KMP_FATAL(UnknownLibraryType, arg);
7510   }
7511 }
7512 
7513 /* ------------------------------------------------------------------------ */
7514 
7515 void __kmp_aux_set_blocktime(int arg, kmp_info_t *thread, int tid) {
7516   int blocktime = arg; /* argument is in milliseconds */
7517 #if KMP_USE_MONITOR
7518   int bt_intervals;
7519 #endif
7520   int bt_set;
7521 
7522   __kmp_save_internal_controls(thread);
7523 
7524   /* Normalize and set blocktime for the teams */
7525   if (blocktime < KMP_MIN_BLOCKTIME)
7526     blocktime = KMP_MIN_BLOCKTIME;
7527   else if (blocktime > KMP_MAX_BLOCKTIME)
7528     blocktime = KMP_MAX_BLOCKTIME;
7529 
7530   set__blocktime_team(thread->th.th_team, tid, blocktime);
7531   set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
7532 
7533 #if KMP_USE_MONITOR
7534   /* Calculate and set blocktime intervals for the teams */
7535   bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
7536 
7537   set__bt_intervals_team(thread->th.th_team, tid, bt_intervals);
7538   set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals);
7539 #endif
7540 
7541   /* Set whether blocktime has been set to "TRUE" */
7542   bt_set = TRUE;
7543 
7544   set__bt_set_team(thread->th.th_team, tid, bt_set);
7545   set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
7546 #if KMP_USE_MONITOR
7547   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, "
7548                 "bt_intervals=%d, monitor_updates=%d\n",
7549                 __kmp_gtid_from_tid(tid, thread->th.th_team),
7550                 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals,
7551                 __kmp_monitor_wakeups));
7552 #else
7553   KF_TRACE(10, ("kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n",
7554                 __kmp_gtid_from_tid(tid, thread->th.th_team),
7555                 thread->th.th_team->t.t_id, tid, blocktime));
7556 #endif
7557 }
7558 
7559 void __kmp_aux_set_defaults(char const *str, int len) {
7560   if (!__kmp_init_serial) {
7561     __kmp_serial_initialize();
7562   };
7563   __kmp_env_initialize(str);
7564 
7565   if (__kmp_settings
7566 #if OMP_40_ENABLED
7567       || __kmp_display_env || __kmp_display_env_verbose
7568 #endif // OMP_40_ENABLED
7569       ) {
7570     __kmp_env_print();
7571   }
7572 } // __kmp_aux_set_defaults
7573 
7574 /* ------------------------------------------------------------------------ */
7575 /* internal fast reduction routines */
7576 
7577 PACKED_REDUCTION_METHOD_T
7578 __kmp_determine_reduction_method(
7579     ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars, size_t reduce_size,
7580     void *reduce_data, void (*reduce_func)(void *lhs_data, void *rhs_data),
7581     kmp_critical_name *lck) {
7582 
7583   // Default reduction method: critical construct ( lck != NULL, like in current
7584   // PAROPT )
7585   // If ( reduce_data!=NULL && reduce_func!=NULL ): the tree-reduction method
7586   // can be selected by RTL
7587   // If loc->flags contains KMP_IDENT_ATOMIC_REDUCE, the atomic reduce method
7588   // can be selected by RTL
7589   // Finally, it's up to OpenMP RTL to make a decision on which method to select
7590   // among generated by PAROPT.
7591 
7592   PACKED_REDUCTION_METHOD_T retval;
7593 
7594   int team_size;
7595 
7596   KMP_DEBUG_ASSERT(loc); // it would be nice to test ( loc != 0 )
7597   KMP_DEBUG_ASSERT(lck); // it would be nice to test ( lck != 0 )
7598 
7599 #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED                                 \
7600   ((loc->flags & (KMP_IDENT_ATOMIC_REDUCE)) == (KMP_IDENT_ATOMIC_REDUCE))
7601 #define FAST_REDUCTION_TREE_METHOD_GENERATED ((reduce_data) && (reduce_func))
7602 
7603   retval = critical_reduce_block;
7604 
7605   // another choice of getting a team size (with 1 dynamic deference) is slower
7606   team_size = __kmp_get_team_num_threads(global_tid);
7607   if (team_size == 1) {
7608 
7609     retval = empty_reduce_block;
7610 
7611   } else {
7612 
7613     int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
7614     int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
7615 
7616 #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || KMP_ARCH_MIPS64
7617 
7618 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_WINDOWS ||       \
7619     KMP_OS_DARWIN
7620 
7621     int teamsize_cutoff = 4;
7622 
7623 #if KMP_MIC_SUPPORTED
7624     if (__kmp_mic_type != non_mic) {
7625       teamsize_cutoff = 8;
7626     }
7627 #endif
7628     if (tree_available) {
7629       if (team_size <= teamsize_cutoff) {
7630         if (atomic_available) {
7631           retval = atomic_reduce_block;
7632         }
7633       } else {
7634         retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
7635       }
7636     } else if (atomic_available) {
7637       retval = atomic_reduce_block;
7638     }
7639 #else
7640 #error "Unknown or unsupported OS"
7641 #endif // KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_WINDOWS ||
7642 // KMP_OS_DARWIN
7643 
7644 #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS
7645 
7646 #if KMP_OS_LINUX || KMP_OS_WINDOWS
7647 
7648     // basic tuning
7649 
7650     if (atomic_available) {
7651       if (num_vars <= 2) { // && ( team_size <= 8 ) due to false-sharing ???
7652         retval = atomic_reduce_block;
7653       }
7654     } // otherwise: use critical section
7655 
7656 #elif KMP_OS_DARWIN
7657 
7658     if (atomic_available && (num_vars <= 3)) {
7659       retval = atomic_reduce_block;
7660     } else if (tree_available) {
7661       if ((reduce_size > (9 * sizeof(kmp_real64))) &&
7662           (reduce_size < (2000 * sizeof(kmp_real64)))) {
7663         retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER;
7664       }
7665     } // otherwise: use critical section
7666 
7667 #else
7668 #error "Unknown or unsupported OS"
7669 #endif
7670 
7671 #else
7672 #error "Unknown or unsupported architecture"
7673 #endif
7674   }
7675 
7676   // KMP_FORCE_REDUCTION
7677 
7678   // If the team is serialized (team_size == 1), ignore the forced reduction
7679   // method and stay with the unsynchronized method (empty_reduce_block)
7680   if (__kmp_force_reduction_method != reduction_method_not_defined &&
7681       team_size != 1) {
7682 
7683     PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block;
7684 
7685     int atomic_available, tree_available;
7686 
7687     switch ((forced_retval = __kmp_force_reduction_method)) {
7688     case critical_reduce_block:
7689       KMP_ASSERT(lck); // lck should be != 0
7690       break;
7691 
7692     case atomic_reduce_block:
7693       atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
7694       if (!atomic_available) {
7695         KMP_WARNING(RedMethodNotSupported, "atomic");
7696         forced_retval = critical_reduce_block;
7697       }
7698       break;
7699 
7700     case tree_reduce_block:
7701       tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
7702       if (!tree_available) {
7703         KMP_WARNING(RedMethodNotSupported, "tree");
7704         forced_retval = critical_reduce_block;
7705       } else {
7706 #if KMP_FAST_REDUCTION_BARRIER
7707         forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
7708 #endif
7709       }
7710       break;
7711 
7712     default:
7713       KMP_ASSERT(0); // "unsupported method specified"
7714     }
7715 
7716     retval = forced_retval;
7717   }
7718 
7719   KA_TRACE(10, ("reduction method selected=%08x\n", retval));
7720 
7721 #undef FAST_REDUCTION_TREE_METHOD_GENERATED
7722 #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED
7723 
7724   return (retval);
7725 }
7726 
7727 // this function is for testing set/get/determine reduce method
7728 kmp_int32 __kmp_get_reduce_method(void) {
7729   return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
7730 }
7731