1 /*
2  * z_Linux_util.cpp -- platform specific routines.
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "kmp.h"
14 #include "kmp_affinity.h"
15 #include "kmp_i18n.h"
16 #include "kmp_io.h"
17 #include "kmp_itt.h"
18 #include "kmp_lock.h"
19 #include "kmp_stats.h"
20 #include "kmp_str.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
23 
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
25 #include <alloca.h>
26 #endif
27 #include <math.h> // HUGE_VAL.
28 #if KMP_OS_LINUX
29 #include <semaphore.h>
30 #endif // KMP_OS_LINUX
31 #include <sys/resource.h>
32 #include <sys/syscall.h>
33 #include <sys/time.h>
34 #include <sys/times.h>
35 #include <unistd.h>
36 
37 #if KMP_OS_LINUX
38 #include <sys/sysinfo.h>
39 #if KMP_USE_FUTEX
40 // We should really include <futex.h>, but that causes compatibility problems on
41 // different Linux* OS distributions that either require that you include (or
42 // break when you try to include) <pci/types.h>. Since all we need is the two
43 // macros below (which are part of the kernel ABI, so can't change) we just
44 // define the constants here and don't include <futex.h>
45 #ifndef FUTEX_WAIT
46 #define FUTEX_WAIT 0
47 #endif
48 #ifndef FUTEX_WAKE
49 #define FUTEX_WAKE 1
50 #endif
51 #endif
52 #elif KMP_OS_DARWIN
53 #include <mach/mach.h>
54 #include <sys/sysctl.h>
55 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
56 #include <sys/types.h>
57 #include <sys/sysctl.h>
58 #include <sys/user.h>
59 #include <pthread_np.h>
60 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
61 #include <sys/types.h>
62 #include <sys/sysctl.h>
63 #endif
64 
65 #include <ctype.h>
66 #include <dirent.h>
67 #include <fcntl.h>
68 
69 struct kmp_sys_timer {
70   struct timespec start;
71 };
72 
73 // Convert timespec to nanoseconds.
74 #define TS2NS(timespec)                                                        \
75   (((timespec).tv_sec * (long int)1e9) + (timespec).tv_nsec)
76 
77 static struct kmp_sys_timer __kmp_sys_timer_data;
78 
79 #if KMP_HANDLE_SIGNALS
80 typedef void (*sig_func_t)(int);
81 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG];
82 static sigset_t __kmp_sigset;
83 #endif
84 
85 static int __kmp_init_runtime = FALSE;
86 
87 static int __kmp_fork_count = 0;
88 
89 static pthread_condattr_t __kmp_suspend_cond_attr;
90 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
91 
92 static kmp_cond_align_t __kmp_wait_cv;
93 static kmp_mutex_align_t __kmp_wait_mx;
94 
95 kmp_uint64 __kmp_ticks_per_msec = 1000000;
96 
97 #ifdef DEBUG_SUSPEND
98 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) {
99   KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))",
100                cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
101                cond->c_cond.__c_waiting);
102 }
103 #endif
104 
105 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
106 
107 /* Affinity support */
108 
109 void __kmp_affinity_bind_thread(int which) {
110   KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
111               "Illegal set affinity operation when not capable");
112 
113   kmp_affin_mask_t *mask;
114   KMP_CPU_ALLOC_ON_STACK(mask);
115   KMP_CPU_ZERO(mask);
116   KMP_CPU_SET(which, mask);
117   __kmp_set_system_affinity(mask, TRUE);
118   KMP_CPU_FREE_FROM_STACK(mask);
119 }
120 
121 /* Determine if we can access affinity functionality on this version of
122  * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set
123  * __kmp_affin_mask_size to the appropriate value (0 means not capable). */
124 void __kmp_affinity_determine_capable(const char *env_var) {
125   // Check and see if the OS supports thread affinity.
126 
127 #if KMP_OS_LINUX
128 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
129 #define KMP_CPU_SET_TRY_SIZE CACHE_LINE
130 #elif KMP_OS_FREEBSD
131 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
132 #endif
133 
134 #if KMP_OS_LINUX
135   long gCode;
136   unsigned char *buf;
137   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
138 
139   // If the syscall returns a suggestion for the size,
140   // then we don't have to search for an appropriate size.
141   gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_TRY_SIZE, buf);
142   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
143                 "initial getaffinity call returned %ld errno = %d\n",
144                 gCode, errno));
145 
146   if (gCode < 0 && errno != EINVAL) {
147     // System call not supported
148     if (__kmp_affinity_verbose ||
149         (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
150          (__kmp_affinity_type != affinity_default) &&
151          (__kmp_affinity_type != affinity_disabled))) {
152       int error = errno;
153       kmp_msg_t err_code = KMP_ERR(error);
154       __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
155                 err_code, __kmp_msg_null);
156       if (__kmp_generate_warnings == kmp_warnings_off) {
157         __kmp_str_free(&err_code.str);
158       }
159     }
160     KMP_AFFINITY_DISABLE();
161     KMP_INTERNAL_FREE(buf);
162     return;
163   } else if (gCode > 0) {
164     // The optimal situation: the OS returns the size of the buffer it expects.
165     KMP_AFFINITY_ENABLE(gCode);
166     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
167                   "affinity supported (mask size %d)\n",
168                   (int)__kmp_affin_mask_size));
169     KMP_INTERNAL_FREE(buf);
170     return;
171   }
172 
173   // Call the getaffinity system call repeatedly with increasing set sizes
174   // until we succeed, or reach an upper bound on the search.
175   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
176                 "searching for proper set size\n"));
177   int size;
178   for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
179     gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
180     KA_TRACE(30, ("__kmp_affinity_determine_capable: "
181                   "getaffinity for mask size %ld returned %ld errno = %d\n",
182                   size, gCode, errno));
183 
184     if (gCode < 0) {
185       if (errno == ENOSYS) {
186         // We shouldn't get here
187         KA_TRACE(30, ("__kmp_affinity_determine_capable: "
188                       "inconsistent OS call behavior: errno == ENOSYS for mask "
189                       "size %d\n",
190                       size));
191         if (__kmp_affinity_verbose ||
192             (__kmp_affinity_warnings &&
193              (__kmp_affinity_type != affinity_none) &&
194              (__kmp_affinity_type != affinity_default) &&
195              (__kmp_affinity_type != affinity_disabled))) {
196           int error = errno;
197           kmp_msg_t err_code = KMP_ERR(error);
198           __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
199                     err_code, __kmp_msg_null);
200           if (__kmp_generate_warnings == kmp_warnings_off) {
201             __kmp_str_free(&err_code.str);
202           }
203         }
204         KMP_AFFINITY_DISABLE();
205         KMP_INTERNAL_FREE(buf);
206         return;
207       }
208       continue;
209     }
210 
211     KMP_AFFINITY_ENABLE(gCode);
212     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
213                   "affinity supported (mask size %d)\n",
214                   (int)__kmp_affin_mask_size));
215     KMP_INTERNAL_FREE(buf);
216     return;
217   }
218 #elif KMP_OS_FREEBSD
219   long gCode;
220   unsigned char *buf;
221   buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
222   gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
223                                  reinterpret_cast<cpuset_t *>(buf));
224   KA_TRACE(30, ("__kmp_affinity_determine_capable: "
225                 "initial getaffinity call returned %d errno = %d\n",
226                 gCode, errno));
227   if (gCode == 0) {
228     KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
229     KA_TRACE(10, ("__kmp_affinity_determine_capable: "
230                   "affinity supported (mask size %d)\n",
231                   (int)__kmp_affin_mask_size));
232     KMP_INTERNAL_FREE(buf);
233     return;
234   }
235 #endif
236   KMP_INTERNAL_FREE(buf);
237 
238   // Affinity is not supported
239   KMP_AFFINITY_DISABLE();
240   KA_TRACE(10, ("__kmp_affinity_determine_capable: "
241                 "cannot determine mask size - affinity not supported\n"));
242   if (__kmp_affinity_verbose ||
243       (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
244        (__kmp_affinity_type != affinity_default) &&
245        (__kmp_affinity_type != affinity_disabled))) {
246     KMP_WARNING(AffCantGetMaskSize, env_var);
247   }
248 }
249 
250 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
251 
252 #if KMP_USE_FUTEX
253 
254 int __kmp_futex_determine_capable() {
255   int loc = 0;
256   long rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
257   int retval = (rc == 0) || (errno != ENOSYS);
258 
259   KA_TRACE(10,
260            ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
261   KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n",
262                 retval ? "" : " not"));
263 
264   return retval;
265 }
266 
267 #endif // KMP_USE_FUTEX
268 
269 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
270 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to
271    use compare_and_store for these routines */
272 
273 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) {
274   kmp_int8 old_value, new_value;
275 
276   old_value = TCR_1(*p);
277   new_value = old_value | d;
278 
279   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
280     KMP_CPU_PAUSE();
281     old_value = TCR_1(*p);
282     new_value = old_value | d;
283   }
284   return old_value;
285 }
286 
287 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) {
288   kmp_int8 old_value, new_value;
289 
290   old_value = TCR_1(*p);
291   new_value = old_value & d;
292 
293   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
294     KMP_CPU_PAUSE();
295     old_value = TCR_1(*p);
296     new_value = old_value & d;
297   }
298   return old_value;
299 }
300 
301 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) {
302   kmp_uint32 old_value, new_value;
303 
304   old_value = TCR_4(*p);
305   new_value = old_value | d;
306 
307   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
308     KMP_CPU_PAUSE();
309     old_value = TCR_4(*p);
310     new_value = old_value | d;
311   }
312   return old_value;
313 }
314 
315 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) {
316   kmp_uint32 old_value, new_value;
317 
318   old_value = TCR_4(*p);
319   new_value = old_value & d;
320 
321   while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
322     KMP_CPU_PAUSE();
323     old_value = TCR_4(*p);
324     new_value = old_value & d;
325   }
326   return old_value;
327 }
328 
329 #if KMP_ARCH_X86
330 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) {
331   kmp_int8 old_value, new_value;
332 
333   old_value = TCR_1(*p);
334   new_value = old_value + d;
335 
336   while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
337     KMP_CPU_PAUSE();
338     old_value = TCR_1(*p);
339     new_value = old_value + d;
340   }
341   return old_value;
342 }
343 
344 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) {
345   kmp_int64 old_value, new_value;
346 
347   old_value = TCR_8(*p);
348   new_value = old_value + d;
349 
350   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
351     KMP_CPU_PAUSE();
352     old_value = TCR_8(*p);
353     new_value = old_value + d;
354   }
355   return old_value;
356 }
357 #endif /* KMP_ARCH_X86 */
358 
359 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) {
360   kmp_uint64 old_value, new_value;
361 
362   old_value = TCR_8(*p);
363   new_value = old_value | d;
364   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
365     KMP_CPU_PAUSE();
366     old_value = TCR_8(*p);
367     new_value = old_value | d;
368   }
369   return old_value;
370 }
371 
372 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) {
373   kmp_uint64 old_value, new_value;
374 
375   old_value = TCR_8(*p);
376   new_value = old_value & d;
377   while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
378     KMP_CPU_PAUSE();
379     old_value = TCR_8(*p);
380     new_value = old_value & d;
381   }
382   return old_value;
383 }
384 
385 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */
386 
387 void __kmp_terminate_thread(int gtid) {
388   int status;
389   kmp_info_t *th = __kmp_threads[gtid];
390 
391   if (!th)
392     return;
393 
394 #ifdef KMP_CANCEL_THREADS
395   KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid));
396   status = pthread_cancel(th->th.th_info.ds.ds_thread);
397   if (status != 0 && status != ESRCH) {
398     __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
399                 __kmp_msg_null);
400   }
401 #endif
402   KMP_YIELD(TRUE);
403 } //
404 
405 /* Set thread stack info according to values returned by pthread_getattr_np().
406    If values are unreasonable, assume call failed and use incremental stack
407    refinement method instead. Returns TRUE if the stack parameters could be
408    determined exactly, FALSE if incremental refinement is necessary. */
409 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) {
410   int stack_data;
411 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
412     KMP_OS_HURD
413   pthread_attr_t attr;
414   int status;
415   size_t size = 0;
416   void *addr = 0;
417 
418   /* Always do incremental stack refinement for ubermaster threads since the
419      initial thread stack range can be reduced by sibling thread creation so
420      pthread_attr_getstack may cause thread gtid aliasing */
421   if (!KMP_UBER_GTID(gtid)) {
422 
423     /* Fetch the real thread attributes */
424     status = pthread_attr_init(&attr);
425     KMP_CHECK_SYSFAIL("pthread_attr_init", status);
426 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
427     status = pthread_attr_get_np(pthread_self(), &attr);
428     KMP_CHECK_SYSFAIL("pthread_attr_get_np", status);
429 #else
430     status = pthread_getattr_np(pthread_self(), &attr);
431     KMP_CHECK_SYSFAIL("pthread_getattr_np", status);
432 #endif
433     status = pthread_attr_getstack(&attr, &addr, &size);
434     KMP_CHECK_SYSFAIL("pthread_attr_getstack", status);
435     KA_TRACE(60,
436              ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
437               " %lu, low addr: %p\n",
438               gtid, size, addr));
439     status = pthread_attr_destroy(&attr);
440     KMP_CHECK_SYSFAIL("pthread_attr_destroy", status);
441   }
442 
443   if (size != 0 && addr != 0) { // was stack parameter determination successful?
444     /* Store the correct base and size */
445     TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size));
446     TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
447     TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
448     return TRUE;
449   }
450 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD  \
451           || KMP_OS_HURD */
452   /* Use incremental refinement starting from initial conservative estimate */
453   TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
454   TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
455   TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
456   return FALSE;
457 }
458 
459 static void *__kmp_launch_worker(void *thr) {
460   int status, old_type, old_state;
461 #ifdef KMP_BLOCK_SIGNALS
462   sigset_t new_set, old_set;
463 #endif /* KMP_BLOCK_SIGNALS */
464   void *exit_val;
465 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
466     KMP_OS_OPENBSD || KMP_OS_HURD
467   void *volatile padding = 0;
468 #endif
469   int gtid;
470 
471   gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
472   __kmp_gtid_set_specific(gtid);
473 #ifdef KMP_TDATA_GTID
474   __kmp_gtid = gtid;
475 #endif
476 #if KMP_STATS_ENABLED
477   // set thread local index to point to thread-specific stats
478   __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
479   __kmp_stats_thread_ptr->startLife();
480   KMP_SET_THREAD_STATE(IDLE);
481   KMP_INIT_PARTITIONED_TIMERS(OMP_idle);
482 #endif
483 
484 #if USE_ITT_BUILD
485   __kmp_itt_thread_name(gtid);
486 #endif /* USE_ITT_BUILD */
487 
488 #if KMP_AFFINITY_SUPPORTED
489   __kmp_affinity_set_init_mask(gtid, FALSE);
490 #endif
491 
492 #ifdef KMP_CANCEL_THREADS
493   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
494   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
495   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
496   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
497   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
498 #endif
499 
500 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
501   // Set FP control regs to be a copy of the parallel initialization thread's.
502   __kmp_clear_x87_fpu_status_word();
503   __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
504   __kmp_load_mxcsr(&__kmp_init_mxcsr);
505 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
506 
507 #ifdef KMP_BLOCK_SIGNALS
508   status = sigfillset(&new_set);
509   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
510   status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
511   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
512 #endif /* KMP_BLOCK_SIGNALS */
513 
514 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
515     KMP_OS_OPENBSD
516   if (__kmp_stkoffset > 0 && gtid > 0) {
517     padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
518     (void)padding;
519   }
520 #endif
521 
522   KMP_MB();
523   __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
524 
525   __kmp_check_stack_overlap((kmp_info_t *)thr);
526 
527   exit_val = __kmp_launch_thread((kmp_info_t *)thr);
528 
529 #ifdef KMP_BLOCK_SIGNALS
530   status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
531   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
532 #endif /* KMP_BLOCK_SIGNALS */
533 
534   return exit_val;
535 }
536 
537 #if KMP_USE_MONITOR
538 /* The monitor thread controls all of the threads in the complex */
539 
540 static void *__kmp_launch_monitor(void *thr) {
541   int status, old_type, old_state;
542 #ifdef KMP_BLOCK_SIGNALS
543   sigset_t new_set;
544 #endif /* KMP_BLOCK_SIGNALS */
545   struct timespec interval;
546 
547   KMP_MB(); /* Flush all pending memory write invalidates.  */
548 
549   KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n"));
550 
551   /* register us as the monitor thread */
552   __kmp_gtid_set_specific(KMP_GTID_MONITOR);
553 #ifdef KMP_TDATA_GTID
554   __kmp_gtid = KMP_GTID_MONITOR;
555 #endif
556 
557   KMP_MB();
558 
559 #if USE_ITT_BUILD
560   // Instruct Intel(R) Threading Tools to ignore monitor thread.
561   __kmp_itt_thread_ignore();
562 #endif /* USE_ITT_BUILD */
563 
564   __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
565                        (kmp_info_t *)thr);
566 
567   __kmp_check_stack_overlap((kmp_info_t *)thr);
568 
569 #ifdef KMP_CANCEL_THREADS
570   status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
571   KMP_CHECK_SYSFAIL("pthread_setcanceltype", status);
572   // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads?
573   status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
574   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
575 #endif
576 
577 #if KMP_REAL_TIME_FIX
578   // This is a potential fix which allows application with real-time scheduling
579   // policy work. However, decision about the fix is not made yet, so it is
580   // disabled by default.
581   { // Are program started with real-time scheduling policy?
582     int sched = sched_getscheduler(0);
583     if (sched == SCHED_FIFO || sched == SCHED_RR) {
584       // Yes, we are a part of real-time application. Try to increase the
585       // priority of the monitor.
586       struct sched_param param;
587       int max_priority = sched_get_priority_max(sched);
588       int rc;
589       KMP_WARNING(RealTimeSchedNotSupported);
590       sched_getparam(0, &param);
591       if (param.sched_priority < max_priority) {
592         param.sched_priority += 1;
593         rc = sched_setscheduler(0, sched, &param);
594         if (rc != 0) {
595           int error = errno;
596           kmp_msg_t err_code = KMP_ERR(error);
597           __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
598                     err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
599           if (__kmp_generate_warnings == kmp_warnings_off) {
600             __kmp_str_free(&err_code.str);
601           }
602         }
603       } else {
604         // We cannot abort here, because number of CPUs may be enough for all
605         // the threads, including the monitor thread, so application could
606         // potentially work...
607         __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
608                   KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
609                   __kmp_msg_null);
610       }
611     }
612     // AC: free thread that waits for monitor started
613     TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
614   }
615 #endif // KMP_REAL_TIME_FIX
616 
617   KMP_MB(); /* Flush all pending memory write invalidates.  */
618 
619   if (__kmp_monitor_wakeups == 1) {
620     interval.tv_sec = 1;
621     interval.tv_nsec = 0;
622   } else {
623     interval.tv_sec = 0;
624     interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
625   }
626 
627   KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n"));
628 
629   while (!TCR_4(__kmp_global.g.g_done)) {
630     struct timespec now;
631     struct timeval tval;
632 
633     /*  This thread monitors the state of the system */
634 
635     KA_TRACE(15, ("__kmp_launch_monitor: update\n"));
636 
637     status = gettimeofday(&tval, NULL);
638     KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
639     TIMEVAL_TO_TIMESPEC(&tval, &now);
640 
641     now.tv_sec += interval.tv_sec;
642     now.tv_nsec += interval.tv_nsec;
643 
644     if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
645       now.tv_sec += 1;
646       now.tv_nsec -= KMP_NSEC_PER_SEC;
647     }
648 
649     status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
650     KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
651     // AC: the monitor should not fall asleep if g_done has been set
652     if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex
653       status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
654                                       &__kmp_wait_mx.m_mutex, &now);
655       if (status != 0) {
656         if (status != ETIMEDOUT && status != EINTR) {
657           KMP_SYSFAIL("pthread_cond_timedwait", status);
658         }
659       }
660     }
661     status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
662     KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
663 
664     TCW_4(__kmp_global.g.g_time.dt.t_value,
665           TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
666 
667     KMP_MB(); /* Flush all pending memory write invalidates.  */
668   }
669 
670   KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n"));
671 
672 #ifdef KMP_BLOCK_SIGNALS
673   status = sigfillset(&new_set);
674   KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status);
675   status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
676   KMP_CHECK_SYSFAIL("pthread_sigmask", status);
677 #endif /* KMP_BLOCK_SIGNALS */
678 
679   KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n"));
680 
681   if (__kmp_global.g.g_abort != 0) {
682     /* now we need to terminate the worker threads  */
683     /* the value of t_abort is the signal we caught */
684 
685     int gtid;
686 
687     KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n",
688                   __kmp_global.g.g_abort));
689 
690     /* terminate the OpenMP worker threads */
691     /* TODO this is not valid for sibling threads!!
692      * the uber master might not be 0 anymore.. */
693     for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
694       __kmp_terminate_thread(gtid);
695 
696     __kmp_cleanup();
697 
698     KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n",
699                   __kmp_global.g.g_abort));
700 
701     if (__kmp_global.g.g_abort > 0)
702       raise(__kmp_global.g.g_abort);
703   }
704 
705   KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n"));
706 
707   return thr;
708 }
709 #endif // KMP_USE_MONITOR
710 
711 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) {
712   pthread_t handle;
713   pthread_attr_t thread_attr;
714   int status;
715 
716   th->th.th_info.ds.ds_gtid = gtid;
717 
718 #if KMP_STATS_ENABLED
719   // sets up worker thread stats
720   __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
721 
722   // th->th.th_stats is used to transfer thread-specific stats-pointer to
723   // __kmp_launch_worker. So when thread is created (goes into
724   // __kmp_launch_worker) it will set its thread local pointer to
725   // th->th.th_stats
726   if (!KMP_UBER_GTID(gtid)) {
727     th->th.th_stats = __kmp_stats_list->push_back(gtid);
728   } else {
729     // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(),
730     // so set the th->th.th_stats field to it.
731     th->th.th_stats = __kmp_stats_thread_ptr;
732   }
733   __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
734 
735 #endif // KMP_STATS_ENABLED
736 
737   if (KMP_UBER_GTID(gtid)) {
738     KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid));
739     th->th.th_info.ds.ds_thread = pthread_self();
740     __kmp_set_stack_info(gtid, th);
741     __kmp_check_stack_overlap(th);
742     return;
743   }
744 
745   KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid));
746 
747   KMP_MB(); /* Flush all pending memory write invalidates.  */
748 
749 #ifdef KMP_THREAD_ATTR
750   status = pthread_attr_init(&thread_attr);
751   if (status != 0) {
752     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
753   }
754   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
755   if (status != 0) {
756     __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
757   }
758 
759   /* Set stack size for this thread now.
760      The multiple of 2 is there because on some machines, requesting an unusual
761      stacksize causes the thread to have an offset before the dummy alloca()
762      takes place to create the offset.  Since we want the user to have a
763      sufficient stacksize AND support a stack offset, we alloca() twice the
764      offset so that the upcoming alloca() does not eliminate any premade offset,
765      and also gives the user the stack space they requested for all threads */
766   stack_size += gtid * __kmp_stkoffset * 2;
767 
768 #if defined(__ANDROID__) && __ANDROID_API__ < 19
769   // Round the stack size to a multiple of the page size. Older versions of
770   // Android (until KitKat) would fail pthread_attr_setstacksize with EINVAL
771   // if the stack size was not a multiple of the page size.
772   stack_size = (stack_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
773 #endif
774 
775   KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
776                 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
777                 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
778 
779 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
780   status = pthread_attr_setstacksize(&thread_attr, stack_size);
781 #ifdef KMP_BACKUP_STKSIZE
782   if (status != 0) {
783     if (!__kmp_env_stksize) {
784       stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
785       __kmp_stksize = KMP_BACKUP_STKSIZE;
786       KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
787                     "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
788                     "bytes\n",
789                     gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
790       status = pthread_attr_setstacksize(&thread_attr, stack_size);
791     }
792   }
793 #endif /* KMP_BACKUP_STKSIZE */
794   if (status != 0) {
795     __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
796                 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
797   }
798 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
799 
800 #endif /* KMP_THREAD_ATTR */
801 
802   status =
803       pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th);
804   if (status != 0 || !handle) { // ??? Why do we check handle??
805 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
806     if (status == EINVAL) {
807       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
808                   KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
809     }
810     if (status == ENOMEM) {
811       __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
812                   KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
813     }
814 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
815     if (status == EAGAIN) {
816       __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
817                   KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
818     }
819     KMP_SYSFAIL("pthread_create", status);
820   }
821 
822   th->th.th_info.ds.ds_thread = handle;
823 
824 #ifdef KMP_THREAD_ATTR
825   status = pthread_attr_destroy(&thread_attr);
826   if (status) {
827     kmp_msg_t err_code = KMP_ERR(status);
828     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
829               __kmp_msg_null);
830     if (__kmp_generate_warnings == kmp_warnings_off) {
831       __kmp_str_free(&err_code.str);
832     }
833   }
834 #endif /* KMP_THREAD_ATTR */
835 
836   KMP_MB(); /* Flush all pending memory write invalidates.  */
837 
838   KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid));
839 
840 } // __kmp_create_worker
841 
842 #if KMP_USE_MONITOR
843 void __kmp_create_monitor(kmp_info_t *th) {
844   pthread_t handle;
845   pthread_attr_t thread_attr;
846   size_t size;
847   int status;
848   int auto_adj_size = FALSE;
849 
850   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
851     // We don't need monitor thread in case of MAX_BLOCKTIME
852     KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of "
853                   "MAX blocktime\n"));
854     th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op
855     th->th.th_info.ds.ds_gtid = 0;
856     return;
857   }
858   KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n"));
859 
860   KMP_MB(); /* Flush all pending memory write invalidates.  */
861 
862   th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
863   th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
864 #if KMP_REAL_TIME_FIX
865   TCW_4(__kmp_global.g.g_time.dt.t_value,
866         -1); // Will use it for synchronization a bit later.
867 #else
868   TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
869 #endif // KMP_REAL_TIME_FIX
870 
871 #ifdef KMP_THREAD_ATTR
872   if (__kmp_monitor_stksize == 0) {
873     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
874     auto_adj_size = TRUE;
875   }
876   status = pthread_attr_init(&thread_attr);
877   if (status != 0) {
878     __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
879   }
880   status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
881   if (status != 0) {
882     __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
883   }
884 
885 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
886   status = pthread_attr_getstacksize(&thread_attr, &size);
887   KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status);
888 #else
889   size = __kmp_sys_min_stksize;
890 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
891 #endif /* KMP_THREAD_ATTR */
892 
893   if (__kmp_monitor_stksize == 0) {
894     __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
895   }
896   if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
897     __kmp_monitor_stksize = __kmp_sys_min_stksize;
898   }
899 
900   KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes,"
901                 "requested stacksize = %lu bytes\n",
902                 size, __kmp_monitor_stksize));
903 
904 retry:
905 
906 /* Set stack size for this thread now. */
907 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
908   KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,",
909                 __kmp_monitor_stksize));
910   status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
911   if (status != 0) {
912     if (auto_adj_size) {
913       __kmp_monitor_stksize *= 2;
914       goto retry;
915     }
916     kmp_msg_t err_code = KMP_ERR(status);
917     __kmp_msg(kmp_ms_warning, // should this be fatal?  BB
918               KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize),
919               err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
920     if (__kmp_generate_warnings == kmp_warnings_off) {
921       __kmp_str_free(&err_code.str);
922     }
923   }
924 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
925 
926   status =
927       pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th);
928 
929   if (status != 0) {
930 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
931     if (status == EINVAL) {
932       if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) {
933         __kmp_monitor_stksize *= 2;
934         goto retry;
935       }
936       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
937                   KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
938                   __kmp_msg_null);
939     }
940     if (status == ENOMEM) {
941       __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
942                   KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
943                   __kmp_msg_null);
944     }
945 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */
946     if (status == EAGAIN) {
947       __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
948                   KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
949     }
950     KMP_SYSFAIL("pthread_create", status);
951   }
952 
953   th->th.th_info.ds.ds_thread = handle;
954 
955 #if KMP_REAL_TIME_FIX
956   // Wait for the monitor thread is really started and set its *priority*.
957   KMP_DEBUG_ASSERT(sizeof(kmp_uint32) ==
958                    sizeof(__kmp_global.g.g_time.dt.t_value));
959   __kmp_wait_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
960                &__kmp_neq_4, NULL);
961 #endif // KMP_REAL_TIME_FIX
962 
963 #ifdef KMP_THREAD_ATTR
964   status = pthread_attr_destroy(&thread_attr);
965   if (status != 0) {
966     kmp_msg_t err_code = KMP_ERR(status);
967     __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
968               __kmp_msg_null);
969     if (__kmp_generate_warnings == kmp_warnings_off) {
970       __kmp_str_free(&err_code.str);
971     }
972   }
973 #endif
974 
975   KMP_MB(); /* Flush all pending memory write invalidates.  */
976 
977   KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n",
978                 th->th.th_info.ds.ds_thread));
979 
980 } // __kmp_create_monitor
981 #endif // KMP_USE_MONITOR
982 
983 void __kmp_exit_thread(int exit_status) {
984   pthread_exit((void *)(intptr_t)exit_status);
985 } // __kmp_exit_thread
986 
987 #if KMP_USE_MONITOR
988 void __kmp_resume_monitor();
989 
990 void __kmp_reap_monitor(kmp_info_t *th) {
991   int status;
992   void *exit_val;
993 
994   KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle"
995                 " %#.8lx\n",
996                 th->th.th_info.ds.ds_thread));
997 
998   // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR.
999   // If both tid and gtid are 0, it means the monitor did not ever start.
1000   // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down.
1001   KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1002   if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1003     KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n"));
1004     return;
1005   }
1006 
1007   KMP_MB(); /* Flush all pending memory write invalidates.  */
1008 
1009   /* First, check to see whether the monitor thread exists to wake it up. This
1010      is to avoid performance problem when the monitor sleeps during
1011      blocktime-size interval */
1012 
1013   status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1014   if (status != ESRCH) {
1015     __kmp_resume_monitor(); // Wake up the monitor thread
1016   }
1017   KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n"));
1018   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1019   if (exit_val != th) {
1020     __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1021   }
1022 
1023   th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1024   th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1025 
1026   KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle"
1027                 " %#.8lx\n",
1028                 th->th.th_info.ds.ds_thread));
1029 
1030   KMP_MB(); /* Flush all pending memory write invalidates.  */
1031 }
1032 #endif // KMP_USE_MONITOR
1033 
1034 void __kmp_reap_worker(kmp_info_t *th) {
1035   int status;
1036   void *exit_val;
1037 
1038   KMP_MB(); /* Flush all pending memory write invalidates.  */
1039 
1040   KA_TRACE(
1041       10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1042 
1043   status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1044 #ifdef KMP_DEBUG
1045   /* Don't expose these to the user until we understand when they trigger */
1046   if (status != 0) {
1047     __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1048   }
1049   if (exit_val != th) {
1050     KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, "
1051                   "exit_val = %p\n",
1052                   th->th.th_info.ds.ds_gtid, exit_val));
1053   }
1054 #else
1055   (void)status; // unused variable
1056 #endif /* KMP_DEBUG */
1057 
1058   KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n",
1059                 th->th.th_info.ds.ds_gtid));
1060 
1061   KMP_MB(); /* Flush all pending memory write invalidates.  */
1062 }
1063 
1064 #if KMP_HANDLE_SIGNALS
1065 
1066 static void __kmp_null_handler(int signo) {
1067   //  Do nothing, for doing SIG_IGN-type actions.
1068 } // __kmp_null_handler
1069 
1070 static void __kmp_team_handler(int signo) {
1071   if (__kmp_global.g.g_abort == 0) {
1072 /* Stage 1 signal handler, let's shut down all of the threads */
1073 #ifdef KMP_DEBUG
1074     __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo);
1075 #endif
1076     switch (signo) {
1077     case SIGHUP:
1078     case SIGINT:
1079     case SIGQUIT:
1080     case SIGILL:
1081     case SIGABRT:
1082     case SIGFPE:
1083     case SIGBUS:
1084     case SIGSEGV:
1085 #ifdef SIGSYS
1086     case SIGSYS:
1087 #endif
1088     case SIGTERM:
1089       if (__kmp_debug_buf) {
1090         __kmp_dump_debug_buffer();
1091       }
1092       __kmp_unregister_library(); // cleanup shared memory
1093       KMP_MB(); // Flush all pending memory write invalidates.
1094       TCW_4(__kmp_global.g.g_abort, signo);
1095       KMP_MB(); // Flush all pending memory write invalidates.
1096       TCW_4(__kmp_global.g.g_done, TRUE);
1097       KMP_MB(); // Flush all pending memory write invalidates.
1098       break;
1099     default:
1100 #ifdef KMP_DEBUG
1101       __kmp_debug_printf("__kmp_team_handler: unknown signal type");
1102 #endif
1103       break;
1104     }
1105   }
1106 } // __kmp_team_handler
1107 
1108 static void __kmp_sigaction(int signum, const struct sigaction *act,
1109                             struct sigaction *oldact) {
1110   int rc = sigaction(signum, act, oldact);
1111   KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc);
1112 }
1113 
1114 static void __kmp_install_one_handler(int sig, sig_func_t handler_func,
1115                                       int parallel_init) {
1116   KMP_MB(); // Flush all pending memory write invalidates.
1117   KB_TRACE(60,
1118            ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1119   if (parallel_init) {
1120     struct sigaction new_action;
1121     struct sigaction old_action;
1122     new_action.sa_handler = handler_func;
1123     new_action.sa_flags = 0;
1124     sigfillset(&new_action.sa_mask);
1125     __kmp_sigaction(sig, &new_action, &old_action);
1126     if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1127       sigaddset(&__kmp_sigset, sig);
1128     } else {
1129       // Restore/keep user's handler if one previously installed.
1130       __kmp_sigaction(sig, &old_action, NULL);
1131     }
1132   } else {
1133     // Save initial/system signal handlers to see if user handlers installed.
1134     __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1135   }
1136   KMP_MB(); // Flush all pending memory write invalidates.
1137 } // __kmp_install_one_handler
1138 
1139 static void __kmp_remove_one_handler(int sig) {
1140   KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig));
1141   if (sigismember(&__kmp_sigset, sig)) {
1142     struct sigaction old;
1143     KMP_MB(); // Flush all pending memory write invalidates.
1144     __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1145     if ((old.sa_handler != __kmp_team_handler) &&
1146         (old.sa_handler != __kmp_null_handler)) {
1147       // Restore the users signal handler.
1148       KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, "
1149                     "restoring: sig=%d\n",
1150                     sig));
1151       __kmp_sigaction(sig, &old, NULL);
1152     }
1153     sigdelset(&__kmp_sigset, sig);
1154     KMP_MB(); // Flush all pending memory write invalidates.
1155   }
1156 } // __kmp_remove_one_handler
1157 
1158 void __kmp_install_signals(int parallel_init) {
1159   KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init));
1160   if (__kmp_handle_signals || !parallel_init) {
1161     // If ! parallel_init, we do not install handlers, just save original
1162     // handlers. Let us do it even __handle_signals is 0.
1163     sigemptyset(&__kmp_sigset);
1164     __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1165     __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1166     __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1167     __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1168     __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1169     __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1170     __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1171     __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1172 #ifdef SIGSYS
1173     __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1174 #endif // SIGSYS
1175     __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1176 #ifdef SIGPIPE
1177     __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1178 #endif // SIGPIPE
1179   }
1180 } // __kmp_install_signals
1181 
1182 void __kmp_remove_signals(void) {
1183   int sig;
1184   KB_TRACE(10, ("__kmp_remove_signals()\n"));
1185   for (sig = 1; sig < NSIG; ++sig) {
1186     __kmp_remove_one_handler(sig);
1187   }
1188 } // __kmp_remove_signals
1189 
1190 #endif // KMP_HANDLE_SIGNALS
1191 
1192 void __kmp_enable(int new_state) {
1193 #ifdef KMP_CANCEL_THREADS
1194   int status, old_state;
1195   status = pthread_setcancelstate(new_state, &old_state);
1196   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1197   KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1198 #endif
1199 }
1200 
1201 void __kmp_disable(int *old_state) {
1202 #ifdef KMP_CANCEL_THREADS
1203   int status;
1204   status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1205   KMP_CHECK_SYSFAIL("pthread_setcancelstate", status);
1206 #endif
1207 }
1208 
1209 static void __kmp_atfork_prepare(void) {
1210   __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1211   __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1212 }
1213 
1214 static void __kmp_atfork_parent(void) {
1215   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1216   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1217 }
1218 
1219 /* Reset the library so execution in the child starts "all over again" with
1220    clean data structures in initial states.  Don't worry about freeing memory
1221    allocated by parent, just abandon it to be safe. */
1222 static void __kmp_atfork_child(void) {
1223   __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1224   __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1225   /* TODO make sure this is done right for nested/sibling */
1226   // ATT:  Memory leaks are here? TODO: Check it and fix.
1227   /* KMP_ASSERT( 0 ); */
1228 
1229   ++__kmp_fork_count;
1230 
1231 #if KMP_AFFINITY_SUPPORTED
1232 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1233   // reset the affinity in the child to the initial thread
1234   // affinity in the parent
1235   kmp_set_thread_affinity_mask_initial();
1236 #endif
1237   // Set default not to bind threads tightly in the child (we’re expecting
1238   // over-subscription after the fork and this can improve things for
1239   // scripting languages that use OpenMP inside process-parallel code).
1240   __kmp_affinity_type = affinity_none;
1241   if (__kmp_nested_proc_bind.bind_types != NULL) {
1242     __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1243   }
1244   __kmp_affinity_masks = NULL;
1245   __kmp_affinity_num_masks = 0;
1246 #endif // KMP_AFFINITY_SUPPORTED
1247 
1248 #if KMP_USE_MONITOR
1249   __kmp_init_monitor = 0;
1250 #endif
1251   __kmp_init_parallel = FALSE;
1252   __kmp_init_middle = FALSE;
1253   __kmp_init_serial = FALSE;
1254   TCW_4(__kmp_init_gtid, FALSE);
1255   __kmp_init_common = FALSE;
1256 
1257   TCW_4(__kmp_init_user_locks, FALSE);
1258 #if !KMP_USE_DYNAMIC_LOCK
1259   __kmp_user_lock_table.used = 1;
1260   __kmp_user_lock_table.allocated = 0;
1261   __kmp_user_lock_table.table = NULL;
1262   __kmp_lock_blocks = NULL;
1263 #endif
1264 
1265   __kmp_all_nth = 0;
1266   TCW_4(__kmp_nth, 0);
1267 
1268   __kmp_thread_pool = NULL;
1269   __kmp_thread_pool_insert_pt = NULL;
1270   __kmp_team_pool = NULL;
1271 
1272   /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate
1273      here so threadprivate doesn't use stale data */
1274   KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n",
1275                 __kmp_threadpriv_cache_list));
1276 
1277   while (__kmp_threadpriv_cache_list != NULL) {
1278 
1279     if (*__kmp_threadpriv_cache_list->addr != NULL) {
1280       KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n",
1281                     &(*__kmp_threadpriv_cache_list->addr)));
1282 
1283       *__kmp_threadpriv_cache_list->addr = NULL;
1284     }
1285     __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1286   }
1287 
1288   __kmp_init_runtime = FALSE;
1289 
1290   /* reset statically initialized locks */
1291   __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1292   __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1293   __kmp_init_bootstrap_lock(&__kmp_console_lock);
1294   __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1295 
1296 #if USE_ITT_BUILD
1297   __kmp_itt_reset(); // reset ITT's global state
1298 #endif /* USE_ITT_BUILD */
1299 
1300   __kmp_serial_initialize();
1301 
1302   /* This is necessary to make sure no stale data is left around */
1303   /* AC: customers complain that we use unsafe routines in the atfork
1304      handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen
1305      in dynamic_link when check the presence of shared tbbmalloc library.
1306      Suggestion is to make the library initialization lazier, similar
1307      to what done for __kmpc_begin(). */
1308   // TODO: synchronize all static initializations with regular library
1309   //       startup; look at kmp_global.cpp and etc.
1310   //__kmp_internal_begin ();
1311 }
1312 
1313 void __kmp_register_atfork(void) {
1314   if (__kmp_need_register_atfork) {
1315     int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1316                                 __kmp_atfork_child);
1317     KMP_CHECK_SYSFAIL("pthread_atfork", status);
1318     __kmp_need_register_atfork = FALSE;
1319   }
1320 }
1321 
1322 void __kmp_suspend_initialize(void) {
1323   int status;
1324   status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1325   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1326   status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1327   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1328 }
1329 
1330 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1331   int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1332   int new_value = __kmp_fork_count + 1;
1333   // Return if already initialized
1334   if (old_value == new_value)
1335     return;
1336   // Wait, then return if being initialized
1337   if (old_value == -1 || !__kmp_atomic_compare_store(
1338                              &th->th.th_suspend_init_count, old_value, -1)) {
1339     while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1340       KMP_CPU_PAUSE();
1341     }
1342   } else {
1343     // Claim to be the initializer and do initializations
1344     int status;
1345     status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1346                                &__kmp_suspend_cond_attr);
1347     KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1348     status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1349                                 &__kmp_suspend_mutex_attr);
1350     KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1351     KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1352   }
1353 }
1354 
1355 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1356   if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1357     /* this means we have initialize the suspension pthread objects for this
1358        thread in this instance of the process */
1359     int status;
1360 
1361     status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1362     if (status != 0 && status != EBUSY) {
1363       KMP_SYSFAIL("pthread_cond_destroy", status);
1364     }
1365     status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1366     if (status != 0 && status != EBUSY) {
1367       KMP_SYSFAIL("pthread_mutex_destroy", status);
1368     }
1369     --th->th.th_suspend_init_count;
1370     KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1371                      __kmp_fork_count);
1372   }
1373 }
1374 
1375 // return true if lock obtained, false otherwise
1376 int __kmp_try_suspend_mx(kmp_info_t *th) {
1377   return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1378 }
1379 
1380 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1381   int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1382   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1383 }
1384 
1385 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1386   int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1387   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1388 }
1389 
1390 /* This routine puts the calling thread to sleep after setting the
1391    sleep bit for the indicated flag variable to true. */
1392 template <class C>
1393 static inline void __kmp_suspend_template(int th_gtid, C *flag) {
1394   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1395   kmp_info_t *th = __kmp_threads[th_gtid];
1396   int status;
1397   typename C::flag_t old_spin;
1398 
1399   KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1400                 flag->get()));
1401 
1402   __kmp_suspend_initialize_thread(th);
1403 
1404   __kmp_lock_suspend_mx(th);
1405 
1406   KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1407                 th_gtid, flag->get()));
1408 
1409   /* TODO: shouldn't this use release semantics to ensure that
1410      __kmp_suspend_initialize_thread gets called first? */
1411   old_spin = flag->set_sleeping();
1412   TCW_PTR(th->th.th_sleep_loc, (void *)flag);
1413   th->th.th_sleep_loc_type = flag->get_type();
1414   if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1415       __kmp_pause_status != kmp_soft_paused) {
1416     flag->unset_sleeping();
1417     TCW_PTR(th->th.th_sleep_loc, NULL);
1418     th->th.th_sleep_loc_type = flag_unset;
1419     __kmp_unlock_suspend_mx(th);
1420     return;
1421   }
1422   KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1423                " was %x\n",
1424                th_gtid, flag->get(), flag->load(), old_spin));
1425 
1426   if (flag->done_check_val(old_spin) || flag->done_check()) {
1427     flag->unset_sleeping();
1428     TCW_PTR(th->th.th_sleep_loc, NULL);
1429     th->th.th_sleep_loc_type = flag_unset;
1430     KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1431                  "for spin(%p)\n",
1432                  th_gtid, flag->get()));
1433   } else {
1434     /* Encapsulate in a loop as the documentation states that this may
1435        "with low probability" return when the condition variable has
1436        not been signaled or broadcast */
1437     int deactivated = FALSE;
1438 
1439     while (flag->is_sleeping()) {
1440 #ifdef DEBUG_SUSPEND
1441       char buffer[128];
1442       __kmp_suspend_count++;
1443       __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1444       __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1445                    buffer);
1446 #endif
1447       // Mark the thread as no longer active (only in the first iteration of the
1448       // loop).
1449       if (!deactivated) {
1450         th->th.th_active = FALSE;
1451         if (th->th.th_active_in_pool) {
1452           th->th.th_active_in_pool = FALSE;
1453           KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1454           KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1455         }
1456         deactivated = TRUE;
1457       }
1458 
1459       KMP_DEBUG_ASSERT(th->th.th_sleep_loc);
1460       KMP_DEBUG_ASSERT(flag->get_type() == th->th.th_sleep_loc_type);
1461 
1462 #if USE_SUSPEND_TIMEOUT
1463       struct timespec now;
1464       struct timeval tval;
1465       int msecs;
1466 
1467       status = gettimeofday(&tval, NULL);
1468       KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1469       TIMEVAL_TO_TIMESPEC(&tval, &now);
1470 
1471       msecs = (4 * __kmp_dflt_blocktime) + 200;
1472       now.tv_sec += msecs / 1000;
1473       now.tv_nsec += (msecs % 1000) * 1000;
1474 
1475       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform "
1476                     "pthread_cond_timedwait\n",
1477                     th_gtid));
1478       status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1479                                       &th->th.th_suspend_mx.m_mutex, &now);
1480 #else
1481       KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform"
1482                     " pthread_cond_wait\n",
1483                     th_gtid));
1484       status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1485                                  &th->th.th_suspend_mx.m_mutex);
1486 #endif // USE_SUSPEND_TIMEOUT
1487 
1488       if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1489         KMP_SYSFAIL("pthread_cond_wait", status);
1490       }
1491 
1492       KMP_DEBUG_ASSERT(flag->get_type() == flag->get_ptr_type());
1493 
1494       if (!flag->is_sleeping() &&
1495           ((status == EINTR) || (status == ETIMEDOUT))) {
1496         // if interrupt or timeout, and thread is no longer sleeping, we need to
1497         // make sure sleep_loc gets reset; however, this shouldn't be needed if
1498         // we woke up with resume
1499         flag->unset_sleeping();
1500         TCW_PTR(th->th.th_sleep_loc, NULL);
1501         th->th.th_sleep_loc_type = flag_unset;
1502       }
1503 #ifdef KMP_DEBUG
1504       if (status == ETIMEDOUT) {
1505         if (flag->is_sleeping()) {
1506           KF_TRACE(100,
1507                    ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1508         } else {
1509           KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1510                        "not set!\n",
1511                        th_gtid));
1512           TCW_PTR(th->th.th_sleep_loc, NULL);
1513           th->th.th_sleep_loc_type = flag_unset;
1514         }
1515       } else if (flag->is_sleeping()) {
1516         KF_TRACE(100,
1517                  ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1518       }
1519 #endif
1520     } // while
1521 
1522     // Mark the thread as active again (if it was previous marked as inactive)
1523     if (deactivated) {
1524       th->th.th_active = TRUE;
1525       if (TCR_4(th->th.th_in_pool)) {
1526         KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1527         th->th.th_active_in_pool = TRUE;
1528       }
1529     }
1530   }
1531   // We may have had the loop variable set before entering the loop body;
1532   // so we need to reset sleep_loc.
1533   TCW_PTR(th->th.th_sleep_loc, NULL);
1534   th->th.th_sleep_loc_type = flag_unset;
1535 
1536   KMP_DEBUG_ASSERT(!flag->is_sleeping());
1537   KMP_DEBUG_ASSERT(!th->th.th_sleep_loc);
1538 #ifdef DEBUG_SUSPEND
1539   {
1540     char buffer[128];
1541     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1542     __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1543                  buffer);
1544   }
1545 #endif
1546 
1547   __kmp_unlock_suspend_mx(th);
1548   KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid));
1549 }
1550 
1551 template <bool C, bool S>
1552 void __kmp_suspend_32(int th_gtid, kmp_flag_32<C, S> *flag) {
1553   __kmp_suspend_template(th_gtid, flag);
1554 }
1555 template <bool C, bool S>
1556 void __kmp_suspend_64(int th_gtid, kmp_flag_64<C, S> *flag) {
1557   __kmp_suspend_template(th_gtid, flag);
1558 }
1559 template <bool C, bool S>
1560 void __kmp_atomic_suspend_64(int th_gtid, kmp_atomic_flag_64<C, S> *flag) {
1561   __kmp_suspend_template(th_gtid, flag);
1562 }
1563 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) {
1564   __kmp_suspend_template(th_gtid, flag);
1565 }
1566 
1567 template void __kmp_suspend_32<false, false>(int, kmp_flag_32<false, false> *);
1568 template void __kmp_suspend_64<false, true>(int, kmp_flag_64<false, true> *);
1569 template void __kmp_suspend_64<true, false>(int, kmp_flag_64<true, false> *);
1570 template void
1571 __kmp_atomic_suspend_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1572 template void
1573 __kmp_atomic_suspend_64<true, false>(int, kmp_atomic_flag_64<true, false> *);
1574 
1575 /* This routine signals the thread specified by target_gtid to wake up
1576    after setting the sleep bit indicated by the flag argument to FALSE.
1577    The target thread must already have called __kmp_suspend_template() */
1578 template <class C>
1579 static inline void __kmp_resume_template(int target_gtid, C *flag) {
1580   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1581   kmp_info_t *th = __kmp_threads[target_gtid];
1582   int status;
1583 
1584 #ifdef KMP_DEBUG
1585   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1586 #endif
1587 
1588   KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1589                 gtid, target_gtid));
1590   KMP_DEBUG_ASSERT(gtid != target_gtid);
1591 
1592   __kmp_suspend_initialize_thread(th);
1593 
1594   __kmp_lock_suspend_mx(th);
1595 
1596   if (!flag || flag != th->th.th_sleep_loc) {
1597     // coming from __kmp_null_resume_wrapper, or thread is now sleeping on a
1598     // different location; wake up at new location
1599     flag = (C *)CCAST(void *, th->th.th_sleep_loc);
1600   }
1601 
1602   // First, check if the flag is null or its type has changed. If so, someone
1603   // else woke it up.
1604   if (!flag) { // Thread doesn't appear to be sleeping on anything
1605     KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1606                  "awake: flag(%p)\n",
1607                  gtid, target_gtid, (void *)NULL));
1608     __kmp_unlock_suspend_mx(th);
1609     return;
1610   } else if (flag->get_type() != th->th.th_sleep_loc_type) {
1611     // Flag type does not appear to match this function template; possibly the
1612     // thread is sleeping on something else. Try null resume again.
1613     KF_TRACE(
1614         5,
1615         ("__kmp_resume_template: T#%d retrying, thread T#%d Mismatch flag(%p), "
1616          "spin(%p) type=%d ptr_type=%d\n",
1617          gtid, target_gtid, flag, flag->get(), flag->get_type(),
1618          th->th.th_sleep_loc_type));
1619     __kmp_unlock_suspend_mx(th);
1620     __kmp_null_resume_wrapper(th);
1621     return;
1622   } else { // if multiple threads are sleeping, flag should be internally
1623     // referring to a specific thread here
1624     if (!flag->is_sleeping()) {
1625       KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already "
1626                    "awake: flag(%p): %u\n",
1627                    gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1628       __kmp_unlock_suspend_mx(th);
1629       return;
1630     }
1631   }
1632   KMP_DEBUG_ASSERT(flag);
1633   flag->unset_sleeping();
1634   TCW_PTR(th->th.th_sleep_loc, NULL);
1635   th->th.th_sleep_loc_type = flag_unset;
1636 
1637   KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1638                "sleep bit for flag's loc(%p): %u\n",
1639                gtid, target_gtid, flag->get(), (unsigned int)flag->load()));
1640 
1641 #ifdef DEBUG_SUSPEND
1642   {
1643     char buffer[128];
1644     __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1645     __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1646                  target_gtid, buffer);
1647   }
1648 #endif
1649   status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1650   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1651   __kmp_unlock_suspend_mx(th);
1652   KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up"
1653                 " for T#%d\n",
1654                 gtid, target_gtid));
1655 }
1656 
1657 template <bool C, bool S>
1658 void __kmp_resume_32(int target_gtid, kmp_flag_32<C, S> *flag) {
1659   __kmp_resume_template(target_gtid, flag);
1660 }
1661 template <bool C, bool S>
1662 void __kmp_resume_64(int target_gtid, kmp_flag_64<C, S> *flag) {
1663   __kmp_resume_template(target_gtid, flag);
1664 }
1665 template <bool C, bool S>
1666 void __kmp_atomic_resume_64(int target_gtid, kmp_atomic_flag_64<C, S> *flag) {
1667   __kmp_resume_template(target_gtid, flag);
1668 }
1669 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) {
1670   __kmp_resume_template(target_gtid, flag);
1671 }
1672 
1673 template void __kmp_resume_32<false, true>(int, kmp_flag_32<false, true> *);
1674 template void __kmp_resume_32<false, false>(int, kmp_flag_32<false, false> *);
1675 template void __kmp_resume_64<false, true>(int, kmp_flag_64<false, true> *);
1676 template void
1677 __kmp_atomic_resume_64<false, true>(int, kmp_atomic_flag_64<false, true> *);
1678 
1679 #if KMP_USE_MONITOR
1680 void __kmp_resume_monitor() {
1681   KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1682   int status;
1683 #ifdef KMP_DEBUG
1684   int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1685   KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1686                 KMP_GTID_MONITOR));
1687   KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1688 #endif
1689   status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1690   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
1691 #ifdef DEBUG_SUSPEND
1692   {
1693     char buffer[128];
1694     __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1695     __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1696                  KMP_GTID_MONITOR, buffer);
1697   }
1698 #endif
1699   status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1700   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
1701   status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1702   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
1703   KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up"
1704                 " for T#%d\n",
1705                 gtid, KMP_GTID_MONITOR));
1706 }
1707 #endif // KMP_USE_MONITOR
1708 
1709 void __kmp_yield() { sched_yield(); }
1710 
1711 void __kmp_gtid_set_specific(int gtid) {
1712   if (__kmp_init_gtid) {
1713     int status;
1714     status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1715                                  (void *)(intptr_t)(gtid + 1));
1716     KMP_CHECK_SYSFAIL("pthread_setspecific", status);
1717   } else {
1718     KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1719   }
1720 }
1721 
1722 int __kmp_gtid_get_specific() {
1723   int gtid;
1724   if (!__kmp_init_gtid) {
1725     KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning "
1726                   "KMP_GTID_SHUTDOWN\n"));
1727     return KMP_GTID_SHUTDOWN;
1728   }
1729   gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1730   if (gtid == 0) {
1731     gtid = KMP_GTID_DNE;
1732   } else {
1733     gtid--;
1734   }
1735   KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n",
1736                 __kmp_gtid_threadprivate_key, gtid));
1737   return gtid;
1738 }
1739 
1740 double __kmp_read_cpu_time(void) {
1741   /*clock_t   t;*/
1742   struct tms buffer;
1743 
1744   /*t =*/times(&buffer);
1745 
1746   return (double)(buffer.tms_utime + buffer.tms_cutime) /
1747          (double)CLOCKS_PER_SEC;
1748 }
1749 
1750 int __kmp_read_system_info(struct kmp_sys_info *info) {
1751   int status;
1752   struct rusage r_usage;
1753 
1754   memset(info, 0, sizeof(*info));
1755 
1756   status = getrusage(RUSAGE_SELF, &r_usage);
1757   KMP_CHECK_SYSFAIL_ERRNO("getrusage", status);
1758 
1759   // The maximum resident set size utilized (in kilobytes)
1760   info->maxrss = r_usage.ru_maxrss;
1761   // The number of page faults serviced without any I/O
1762   info->minflt = r_usage.ru_minflt;
1763   // The number of page faults serviced that required I/O
1764   info->majflt = r_usage.ru_majflt;
1765   // The number of times a process was "swapped" out of memory
1766   info->nswap = r_usage.ru_nswap;
1767   // The number of times the file system had to perform input
1768   info->inblock = r_usage.ru_inblock;
1769   // The number of times the file system had to perform output
1770   info->oublock = r_usage.ru_oublock;
1771   // The number of times a context switch was voluntarily
1772   info->nvcsw = r_usage.ru_nvcsw;
1773   // The number of times a context switch was forced
1774   info->nivcsw = r_usage.ru_nivcsw;
1775 
1776   return (status != 0);
1777 }
1778 
1779 void __kmp_read_system_time(double *delta) {
1780   double t_ns;
1781   struct timeval tval;
1782   struct timespec stop;
1783   int status;
1784 
1785   status = gettimeofday(&tval, NULL);
1786   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1787   TIMEVAL_TO_TIMESPEC(&tval, &stop);
1788   t_ns = (double)(TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start));
1789   *delta = (t_ns * 1e-9);
1790 }
1791 
1792 void __kmp_clear_system_time(void) {
1793   struct timeval tval;
1794   int status;
1795   status = gettimeofday(&tval, NULL);
1796   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1797   TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1798 }
1799 
1800 static int __kmp_get_xproc(void) {
1801 
1802   int r = 0;
1803 
1804 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||     \
1805     KMP_OS_OPENBSD || KMP_OS_HURD
1806 
1807   __kmp_type_convert(sysconf(_SC_NPROCESSORS_ONLN), &(r));
1808 
1809 #elif KMP_OS_DARWIN
1810 
1811   // Bug C77011 High "OpenMP Threads and number of active cores".
1812 
1813   // Find the number of available CPUs.
1814   kern_return_t rc;
1815   host_basic_info_data_t info;
1816   mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1817   rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1818   if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1819     // Cannot use KA_TRACE() here because this code works before trace support
1820     // is initialized.
1821     r = info.avail_cpus;
1822   } else {
1823     KMP_WARNING(CantGetNumAvailCPU);
1824     KMP_INFORM(AssumedNumCPU);
1825   }
1826 
1827 #else
1828 
1829 #error "Unknown or unsupported OS."
1830 
1831 #endif
1832 
1833   return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */
1834 
1835 } // __kmp_get_xproc
1836 
1837 int __kmp_read_from_file(char const *path, char const *format, ...) {
1838   int result;
1839   va_list args;
1840 
1841   va_start(args, format);
1842   FILE *f = fopen(path, "rb");
1843   if (f == NULL)
1844     return 0;
1845   result = vfscanf(f, format, args);
1846   fclose(f);
1847 
1848   return result;
1849 }
1850 
1851 void __kmp_runtime_initialize(void) {
1852   int status;
1853   pthread_mutexattr_t mutex_attr;
1854   pthread_condattr_t cond_attr;
1855 
1856   if (__kmp_init_runtime) {
1857     return;
1858   }
1859 
1860 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1861   if (!__kmp_cpuinfo.initialized) {
1862     __kmp_query_cpuid(&__kmp_cpuinfo);
1863   }
1864 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */
1865 
1866   __kmp_xproc = __kmp_get_xproc();
1867 
1868 #if !KMP_32_BIT_ARCH
1869   struct rlimit rlim;
1870   // read stack size of calling thread, save it as default for worker threads;
1871   // this should be done before reading environment variables
1872   status = getrlimit(RLIMIT_STACK, &rlim);
1873   if (status == 0) { // success?
1874     __kmp_stksize = rlim.rlim_cur;
1875     __kmp_check_stksize(&__kmp_stksize); // check value and adjust if needed
1876   }
1877 #endif /* KMP_32_BIT_ARCH */
1878 
1879   if (sysconf(_SC_THREADS)) {
1880 
1881     /* Query the maximum number of threads */
1882     __kmp_type_convert(sysconf(_SC_THREAD_THREADS_MAX), &(__kmp_sys_max_nth));
1883     if (__kmp_sys_max_nth == -1) {
1884       /* Unlimited threads for NPTL */
1885       __kmp_sys_max_nth = INT_MAX;
1886     } else if (__kmp_sys_max_nth <= 1) {
1887       /* Can't tell, just use PTHREAD_THREADS_MAX */
1888       __kmp_sys_max_nth = KMP_MAX_NTH;
1889     }
1890 
1891     /* Query the minimum stack size */
1892     __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1893     if (__kmp_sys_min_stksize <= 1) {
1894       __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1895     }
1896   }
1897 
1898   /* Set up minimum number of threads to switch to TLS gtid */
1899   __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1900 
1901   status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1902                               __kmp_internal_end_dest);
1903   KMP_CHECK_SYSFAIL("pthread_key_create", status);
1904   status = pthread_mutexattr_init(&mutex_attr);
1905   KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status);
1906   status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1907   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
1908   status = pthread_mutexattr_destroy(&mutex_attr);
1909   KMP_CHECK_SYSFAIL("pthread_mutexattr_destroy", status);
1910   status = pthread_condattr_init(&cond_attr);
1911   KMP_CHECK_SYSFAIL("pthread_condattr_init", status);
1912   status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1913   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
1914   status = pthread_condattr_destroy(&cond_attr);
1915   KMP_CHECK_SYSFAIL("pthread_condattr_destroy", status);
1916 #if USE_ITT_BUILD
1917   __kmp_itt_initialize();
1918 #endif /* USE_ITT_BUILD */
1919 
1920   __kmp_init_runtime = TRUE;
1921 }
1922 
1923 void __kmp_runtime_destroy(void) {
1924   int status;
1925 
1926   if (!__kmp_init_runtime) {
1927     return; // Nothing to do.
1928   }
1929 
1930 #if USE_ITT_BUILD
1931   __kmp_itt_destroy();
1932 #endif /* USE_ITT_BUILD */
1933 
1934   status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1935   KMP_CHECK_SYSFAIL("pthread_key_delete", status);
1936 
1937   status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1938   if (status != 0 && status != EBUSY) {
1939     KMP_SYSFAIL("pthread_mutex_destroy", status);
1940   }
1941   status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1942   if (status != 0 && status != EBUSY) {
1943     KMP_SYSFAIL("pthread_cond_destroy", status);
1944   }
1945 #if KMP_AFFINITY_SUPPORTED
1946   __kmp_affinity_uninitialize();
1947 #endif
1948 
1949   __kmp_init_runtime = FALSE;
1950 }
1951 
1952 /* Put the thread to sleep for a time period */
1953 /* NOTE: not currently used anywhere */
1954 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); }
1955 
1956 /* Calculate the elapsed wall clock time for the user */
1957 void __kmp_elapsed(double *t) {
1958   int status;
1959 #ifdef FIX_SGI_CLOCK
1960   struct timespec ts;
1961 
1962   status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1963   KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status);
1964   *t =
1965       (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec;
1966 #else
1967   struct timeval tv;
1968 
1969   status = gettimeofday(&tv, NULL);
1970   KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status);
1971   *t =
1972       (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec;
1973 #endif
1974 }
1975 
1976 /* Calculate the elapsed wall clock tick for the user */
1977 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1978 
1979 /* Return the current time stamp in nsec */
1980 kmp_uint64 __kmp_now_nsec() {
1981   struct timeval t;
1982   gettimeofday(&t, NULL);
1983   kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1984                     (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1985   return nsec;
1986 }
1987 
1988 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1989 /* Measure clock ticks per millisecond */
1990 void __kmp_initialize_system_tick() {
1991   kmp_uint64 now, nsec2, diff;
1992   kmp_uint64 delay = 100000; // 50~100 usec on most machines.
1993   kmp_uint64 nsec = __kmp_now_nsec();
1994   kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1995   while ((now = __kmp_hardware_timestamp()) < goal)
1996     ;
1997   nsec2 = __kmp_now_nsec();
1998   diff = nsec2 - nsec;
1999   if (diff > 0) {
2000     kmp_uint64 tpms = ((kmp_uint64)1e6 * (delay + (now - goal)) / diff);
2001     if (tpms > 0)
2002       __kmp_ticks_per_msec = tpms;
2003   }
2004 }
2005 #endif
2006 
2007 /* Determine whether the given address is mapped into the current address
2008    space. */
2009 
2010 int __kmp_is_address_mapped(void *addr) {
2011 
2012   int found = 0;
2013   int rc;
2014 
2015 #if KMP_OS_LINUX || KMP_OS_HURD
2016 
2017   /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the
2018      address ranges mapped into the address space. */
2019 
2020   char *name = __kmp_str_format("/proc/%d/maps", getpid());
2021   FILE *file = NULL;
2022 
2023   file = fopen(name, "r");
2024   KMP_ASSERT(file != NULL);
2025 
2026   for (;;) {
2027 
2028     void *beginning = NULL;
2029     void *ending = NULL;
2030     char perms[5];
2031 
2032     rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2033     if (rc == EOF) {
2034       break;
2035     }
2036     KMP_ASSERT(rc == 3 &&
2037                KMP_STRLEN(perms) == 4); // Make sure all fields are read.
2038 
2039     // Ending address is not included in the region, but beginning is.
2040     if ((addr >= beginning) && (addr < ending)) {
2041       perms[2] = 0; // 3th and 4th character does not matter.
2042       if (strcmp(perms, "rw") == 0) {
2043         // Memory we are looking for should be readable and writable.
2044         found = 1;
2045       }
2046       break;
2047     }
2048   }
2049 
2050   // Free resources.
2051   fclose(file);
2052   KMP_INTERNAL_FREE(name);
2053 #elif KMP_OS_FREEBSD
2054   char *buf;
2055   size_t lstsz;
2056   int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2057   rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2058   if (rc < 0)
2059     return 0;
2060   // We pass from number of vm entry's semantic
2061   // to size of whole entry map list.
2062   lstsz = lstsz * 4 / 3;
2063   buf = reinterpret_cast<char *>(kmpc_malloc(lstsz));
2064   rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2065   if (rc < 0) {
2066     kmpc_free(buf);
2067     return 0;
2068   }
2069 
2070   char *lw = buf;
2071   char *up = buf + lstsz;
2072 
2073   while (lw < up) {
2074     struct kinfo_vmentry *cur = reinterpret_cast<struct kinfo_vmentry *>(lw);
2075     size_t cursz = cur->kve_structsize;
2076     if (cursz == 0)
2077       break;
2078     void *start = reinterpret_cast<void *>(cur->kve_start);
2079     void *end = reinterpret_cast<void *>(cur->kve_end);
2080     // Readable/Writable addresses within current map entry
2081     if ((addr >= start) && (addr < end)) {
2082       if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2083           (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2084         found = 1;
2085         break;
2086       }
2087     }
2088     lw += cursz;
2089   }
2090   kmpc_free(buf);
2091 
2092 #elif KMP_OS_DARWIN
2093 
2094   /* On OS X*, /proc pseudo filesystem is not available. Try to read memory
2095      using vm interface. */
2096 
2097   int buffer;
2098   vm_size_t count;
2099   rc = vm_read_overwrite(
2100       mach_task_self(), // Task to read memory of.
2101       (vm_address_t)(addr), // Address to read from.
2102       1, // Number of bytes to be read.
2103       (vm_address_t)(&buffer), // Address of buffer to save read bytes in.
2104       &count // Address of var to save number of read bytes in.
2105   );
2106   if (rc == 0) {
2107     // Memory successfully read.
2108     found = 1;
2109   }
2110 
2111 #elif KMP_OS_NETBSD
2112 
2113   int mib[5];
2114   mib[0] = CTL_VM;
2115   mib[1] = VM_PROC;
2116   mib[2] = VM_PROC_MAP;
2117   mib[3] = getpid();
2118   mib[4] = sizeof(struct kinfo_vmentry);
2119 
2120   size_t size;
2121   rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2122   KMP_ASSERT(!rc);
2123   KMP_ASSERT(size);
2124 
2125   size = size * 4 / 3;
2126   struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2127   KMP_ASSERT(kiv);
2128 
2129   rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2130   KMP_ASSERT(!rc);
2131   KMP_ASSERT(size);
2132 
2133   for (size_t i = 0; i < size; i++) {
2134     if (kiv[i].kve_start >= (uint64_t)addr &&
2135         kiv[i].kve_end <= (uint64_t)addr) {
2136       found = 1;
2137       break;
2138     }
2139   }
2140   KMP_INTERNAL_FREE(kiv);
2141 #elif KMP_OS_OPENBSD
2142 
2143   int mib[3];
2144   mib[0] = CTL_KERN;
2145   mib[1] = KERN_PROC_VMMAP;
2146   mib[2] = getpid();
2147 
2148   size_t size;
2149   uint64_t end;
2150   rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2151   KMP_ASSERT(!rc);
2152   KMP_ASSERT(size);
2153   end = size;
2154 
2155   struct kinfo_vmentry kiv = {.kve_start = 0};
2156 
2157   while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2158     KMP_ASSERT(size);
2159     if (kiv.kve_end == end)
2160       break;
2161 
2162     if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2163       found = 1;
2164       break;
2165     }
2166     kiv.kve_start += 1;
2167   }
2168 #elif KMP_OS_DRAGONFLY
2169 
2170   // FIXME(DragonFly): Implement this
2171   found = 1;
2172 
2173 #else
2174 
2175 #error "Unknown or unsupported OS"
2176 
2177 #endif
2178 
2179   return found;
2180 
2181 } // __kmp_is_address_mapped
2182 
2183 #ifdef USE_LOAD_BALANCE
2184 
2185 #if KMP_OS_DARWIN || KMP_OS_NETBSD
2186 
2187 // The function returns the rounded value of the system load average
2188 // during given time interval which depends on the value of
2189 // __kmp_load_balance_interval variable (default is 60 sec, other values
2190 // may be 300 sec or 900 sec).
2191 // It returns -1 in case of error.
2192 int __kmp_get_load_balance(int max) {
2193   double averages[3];
2194   int ret_avg = 0;
2195 
2196   int res = getloadavg(averages, 3);
2197 
2198   // Check __kmp_load_balance_interval to determine which of averages to use.
2199   // getloadavg() may return the number of samples less than requested that is
2200   // less than 3.
2201   if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2202     ret_avg = (int)averages[0]; // 1 min
2203   } else if ((__kmp_load_balance_interval >= 180 &&
2204               __kmp_load_balance_interval < 600) &&
2205              (res >= 2)) {
2206     ret_avg = (int)averages[1]; // 5 min
2207   } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2208     ret_avg = (int)averages[2]; // 15 min
2209   } else { // Error occurred
2210     return -1;
2211   }
2212 
2213   return ret_avg;
2214 }
2215 
2216 #else // Linux* OS
2217 
2218 // The function returns number of running (not sleeping) threads, or -1 in case
2219 // of error. Error could be reported if Linux* OS kernel too old (without
2220 // "/proc" support). Counting running threads stops if max running threads
2221 // encountered.
2222 int __kmp_get_load_balance(int max) {
2223   static int permanent_error = 0;
2224   static int glb_running_threads = 0; // Saved count of the running threads for
2225   // the thread balance algorithm
2226   static double glb_call_time = 0; /* Thread balance algorithm call time */
2227 
2228   int running_threads = 0; // Number of running threads in the system.
2229 
2230   DIR *proc_dir = NULL; // Handle of "/proc/" directory.
2231   struct dirent *proc_entry = NULL;
2232 
2233   kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path.
2234   DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory.
2235   struct dirent *task_entry = NULL;
2236   int task_path_fixed_len;
2237 
2238   kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path.
2239   int stat_file = -1;
2240   int stat_path_fixed_len;
2241 
2242   int total_processes = 0; // Total number of processes in system.
2243   int total_threads = 0; // Total number of threads in system.
2244 
2245   double call_time = 0.0;
2246 
2247   __kmp_str_buf_init(&task_path);
2248   __kmp_str_buf_init(&stat_path);
2249 
2250   __kmp_elapsed(&call_time);
2251 
2252   if (glb_call_time &&
2253       (call_time - glb_call_time < __kmp_load_balance_interval)) {
2254     running_threads = glb_running_threads;
2255     goto finish;
2256   }
2257 
2258   glb_call_time = call_time;
2259 
2260   // Do not spend time on scanning "/proc/" if we have a permanent error.
2261   if (permanent_error) {
2262     running_threads = -1;
2263     goto finish;
2264   }
2265 
2266   if (max <= 0) {
2267     max = INT_MAX;
2268   }
2269 
2270   // Open "/proc/" directory.
2271   proc_dir = opendir("/proc");
2272   if (proc_dir == NULL) {
2273     // Cannot open "/prroc/". Probably the kernel does not support it. Return an
2274     // error now and in subsequent calls.
2275     running_threads = -1;
2276     permanent_error = 1;
2277     goto finish;
2278   }
2279 
2280   // Initialize fixed part of task_path. This part will not change.
2281   __kmp_str_buf_cat(&task_path, "/proc/", 6);
2282   task_path_fixed_len = task_path.used; // Remember number of used characters.
2283 
2284   proc_entry = readdir(proc_dir);
2285   while (proc_entry != NULL) {
2286     // Proc entry is a directory and name starts with a digit. Assume it is a
2287     // process' directory.
2288     if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2289 
2290       ++total_processes;
2291       // Make sure init process is the very first in "/proc", so we can replace
2292       // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes ==
2293       // 1. We are going to check that total_processes == 1 => d_name == "1" is
2294       // true (where "=>" is implication). Since C++ does not have => operator,
2295       // let us replace it with its equivalent: a => b == ! a || b.
2296       KMP_DEBUG_ASSERT(total_processes != 1 ||
2297                        strcmp(proc_entry->d_name, "1") == 0);
2298 
2299       // Construct task_path.
2300       task_path.used = task_path_fixed_len; // Reset task_path to "/proc/".
2301       __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2302                         KMP_STRLEN(proc_entry->d_name));
2303       __kmp_str_buf_cat(&task_path, "/task", 5);
2304 
2305       task_dir = opendir(task_path.str);
2306       if (task_dir == NULL) {
2307         // Process can finish between reading "/proc/" directory entry and
2308         // opening process' "task/" directory. So, in general case we should not
2309         // complain, but have to skip this process and read the next one. But on
2310         // systems with no "task/" support we will spend lot of time to scan
2311         // "/proc/" tree again and again without any benefit. "init" process
2312         // (its pid is 1) should exist always, so, if we cannot open
2313         // "/proc/1/task/" directory, it means "task/" is not supported by
2314         // kernel. Report an error now and in the future.
2315         if (strcmp(proc_entry->d_name, "1") == 0) {
2316           running_threads = -1;
2317           permanent_error = 1;
2318           goto finish;
2319         }
2320       } else {
2321         // Construct fixed part of stat file path.
2322         __kmp_str_buf_clear(&stat_path);
2323         __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2324         __kmp_str_buf_cat(&stat_path, "/", 1);
2325         stat_path_fixed_len = stat_path.used;
2326 
2327         task_entry = readdir(task_dir);
2328         while (task_entry != NULL) {
2329           // It is a directory and name starts with a digit.
2330           if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2331             ++total_threads;
2332 
2333             // Construct complete stat file path. Easiest way would be:
2334             //  __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str,
2335             //  task_entry->d_name );
2336             // but seriae of __kmp_str_buf_cat works a bit faster.
2337             stat_path.used =
2338                 stat_path_fixed_len; // Reset stat path to its fixed part.
2339             __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2340                               KMP_STRLEN(task_entry->d_name));
2341             __kmp_str_buf_cat(&stat_path, "/stat", 5);
2342 
2343             // Note: Low-level API (open/read/close) is used. High-level API
2344             // (fopen/fclose)  works ~ 30 % slower.
2345             stat_file = open(stat_path.str, O_RDONLY);
2346             if (stat_file == -1) {
2347               // We cannot report an error because task (thread) can terminate
2348               // just before reading this file.
2349             } else {
2350               /* Content of "stat" file looks like:
2351                  24285 (program) S ...
2352 
2353                  It is a single line (if program name does not include funny
2354                  symbols). First number is a thread id, then name of executable
2355                  file name in paretheses, then state of the thread. We need just
2356                  thread state.
2357 
2358                  Good news: Length of program name is 15 characters max. Longer
2359                  names are truncated.
2360 
2361                  Thus, we need rather short buffer: 15 chars for program name +
2362                  2 parenthesis, + 3 spaces + ~7 digits of pid = 37.
2363 
2364                  Bad news: Program name may contain special symbols like space,
2365                  closing parenthesis, or even new line. This makes parsing
2366                  "stat" file not 100 % reliable. In case of fanny program names
2367                  parsing may fail (report incorrect thread state).
2368 
2369                  Parsing "status" file looks more promissing (due to different
2370                  file structure and escaping special symbols) but reading and
2371                  parsing of "status" file works slower.
2372                   -- ln
2373               */
2374               char buffer[65];
2375               ssize_t len;
2376               len = read(stat_file, buffer, sizeof(buffer) - 1);
2377               if (len >= 0) {
2378                 buffer[len] = 0;
2379                 // Using scanf:
2380                 //     sscanf( buffer, "%*d (%*s) %c ", & state );
2381                 // looks very nice, but searching for a closing parenthesis
2382                 // works a bit faster.
2383                 char *close_parent = strstr(buffer, ") ");
2384                 if (close_parent != NULL) {
2385                   char state = *(close_parent + 2);
2386                   if (state == 'R') {
2387                     ++running_threads;
2388                     if (running_threads >= max) {
2389                       goto finish;
2390                     }
2391                   }
2392                 }
2393               }
2394               close(stat_file);
2395               stat_file = -1;
2396             }
2397           }
2398           task_entry = readdir(task_dir);
2399         }
2400         closedir(task_dir);
2401         task_dir = NULL;
2402       }
2403     }
2404     proc_entry = readdir(proc_dir);
2405   }
2406 
2407   // There _might_ be a timing hole where the thread executing this
2408   // code get skipped in the load balance, and running_threads is 0.
2409   // Assert in the debug builds only!!!
2410   KMP_DEBUG_ASSERT(running_threads > 0);
2411   if (running_threads <= 0) {
2412     running_threads = 1;
2413   }
2414 
2415 finish: // Clean up and exit.
2416   if (proc_dir != NULL) {
2417     closedir(proc_dir);
2418   }
2419   __kmp_str_buf_free(&task_path);
2420   if (task_dir != NULL) {
2421     closedir(task_dir);
2422   }
2423   __kmp_str_buf_free(&stat_path);
2424   if (stat_file != -1) {
2425     close(stat_file);
2426   }
2427 
2428   glb_running_threads = running_threads;
2429 
2430   return running_threads;
2431 
2432 } // __kmp_get_load_balance
2433 
2434 #endif // KMP_OS_DARWIN
2435 
2436 #endif // USE_LOAD_BALANCE
2437 
2438 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC ||                            \
2439       ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) ||                 \
2440       KMP_ARCH_PPC64 || KMP_ARCH_RISCV64)
2441 
2442 // we really only need the case with 1 argument, because CLANG always build
2443 // a struct of pointers to shared variables referenced in the outlined function
2444 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc,
2445                            void *p_argv[]
2446 #if OMPT_SUPPORT
2447                            ,
2448                            void **exit_frame_ptr
2449 #endif
2450 ) {
2451 #if OMPT_SUPPORT
2452   *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2453 #endif
2454 
2455   switch (argc) {
2456   default:
2457     fprintf(stderr, "Too many args to microtask: %d!\n", argc);
2458     fflush(stderr);
2459     exit(-1);
2460   case 0:
2461     (*pkfn)(&gtid, &tid);
2462     break;
2463   case 1:
2464     (*pkfn)(&gtid, &tid, p_argv[0]);
2465     break;
2466   case 2:
2467     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1]);
2468     break;
2469   case 3:
2470     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2]);
2471     break;
2472   case 4:
2473     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2474     break;
2475   case 5:
2476     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2477     break;
2478   case 6:
2479     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2480             p_argv[5]);
2481     break;
2482   case 7:
2483     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2484             p_argv[5], p_argv[6]);
2485     break;
2486   case 8:
2487     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2488             p_argv[5], p_argv[6], p_argv[7]);
2489     break;
2490   case 9:
2491     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2492             p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2493     break;
2494   case 10:
2495     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2496             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2497     break;
2498   case 11:
2499     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2500             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2501     break;
2502   case 12:
2503     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2504             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2505             p_argv[11]);
2506     break;
2507   case 13:
2508     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2509             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2510             p_argv[11], p_argv[12]);
2511     break;
2512   case 14:
2513     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2514             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2515             p_argv[11], p_argv[12], p_argv[13]);
2516     break;
2517   case 15:
2518     (*pkfn)(&gtid, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2519             p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2520             p_argv[11], p_argv[12], p_argv[13], p_argv[14]);
2521     break;
2522   }
2523 
2524   return 1;
2525 }
2526 
2527 #endif
2528 
2529 #if KMP_OS_LINUX
2530 // Functions for hidden helper task
2531 namespace {
2532 // Condition variable for initializing hidden helper team
2533 pthread_cond_t hidden_helper_threads_initz_cond_var;
2534 pthread_mutex_t hidden_helper_threads_initz_lock;
2535 volatile int hidden_helper_initz_signaled = FALSE;
2536 
2537 // Condition variable for deinitializing hidden helper team
2538 pthread_cond_t hidden_helper_threads_deinitz_cond_var;
2539 pthread_mutex_t hidden_helper_threads_deinitz_lock;
2540 volatile int hidden_helper_deinitz_signaled = FALSE;
2541 
2542 // Condition variable for the wrapper function of main thread
2543 pthread_cond_t hidden_helper_main_thread_cond_var;
2544 pthread_mutex_t hidden_helper_main_thread_lock;
2545 volatile int hidden_helper_main_thread_signaled = FALSE;
2546 
2547 // Semaphore for worker threads. We don't use condition variable here in case
2548 // that when multiple signals are sent at the same time, only one thread might
2549 // be waken.
2550 sem_t hidden_helper_task_sem;
2551 } // namespace
2552 
2553 void __kmp_hidden_helper_worker_thread_wait() {
2554   int status = sem_wait(&hidden_helper_task_sem);
2555   KMP_CHECK_SYSFAIL("sem_wait", status);
2556 }
2557 
2558 void __kmp_do_initialize_hidden_helper_threads() {
2559   // Initialize condition variable
2560   int status =
2561       pthread_cond_init(&hidden_helper_threads_initz_cond_var, nullptr);
2562   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2563 
2564   status = pthread_cond_init(&hidden_helper_threads_deinitz_cond_var, nullptr);
2565   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2566 
2567   status = pthread_cond_init(&hidden_helper_main_thread_cond_var, nullptr);
2568   KMP_CHECK_SYSFAIL("pthread_cond_init", status);
2569 
2570   status = pthread_mutex_init(&hidden_helper_threads_initz_lock, nullptr);
2571   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2572 
2573   status = pthread_mutex_init(&hidden_helper_threads_deinitz_lock, nullptr);
2574   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2575 
2576   status = pthread_mutex_init(&hidden_helper_main_thread_lock, nullptr);
2577   KMP_CHECK_SYSFAIL("pthread_mutex_init", status);
2578 
2579   // Initialize the semaphore
2580   status = sem_init(&hidden_helper_task_sem, 0, 0);
2581   KMP_CHECK_SYSFAIL("sem_init", status);
2582 
2583   // Create a new thread to finish initialization
2584   pthread_t handle;
2585   status = pthread_create(
2586       &handle, nullptr,
2587       [](void *) -> void * {
2588         __kmp_hidden_helper_threads_initz_routine();
2589         return nullptr;
2590       },
2591       nullptr);
2592   KMP_CHECK_SYSFAIL("pthread_create", status);
2593 }
2594 
2595 void __kmp_hidden_helper_threads_initz_wait() {
2596   // Initial thread waits here for the completion of the initialization. The
2597   // condition variable will be notified by main thread of hidden helper teams.
2598   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2599   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2600 
2601   if (!TCR_4(hidden_helper_initz_signaled)) {
2602     status = pthread_cond_wait(&hidden_helper_threads_initz_cond_var,
2603                                &hidden_helper_threads_initz_lock);
2604     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2605   }
2606 
2607   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2608   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2609 }
2610 
2611 void __kmp_hidden_helper_initz_release() {
2612   // After all initialization, reset __kmp_init_hidden_helper_threads to false.
2613   int status = pthread_mutex_lock(&hidden_helper_threads_initz_lock);
2614   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2615 
2616   status = pthread_cond_signal(&hidden_helper_threads_initz_cond_var);
2617   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2618 
2619   TCW_SYNC_4(hidden_helper_initz_signaled, TRUE);
2620 
2621   status = pthread_mutex_unlock(&hidden_helper_threads_initz_lock);
2622   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2623 }
2624 
2625 void __kmp_hidden_helper_main_thread_wait() {
2626   // The main thread of hidden helper team will be blocked here. The
2627   // condition variable can only be signal in the destructor of RTL.
2628   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2629   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2630 
2631   if (!TCR_4(hidden_helper_main_thread_signaled)) {
2632     status = pthread_cond_wait(&hidden_helper_main_thread_cond_var,
2633                                &hidden_helper_main_thread_lock);
2634     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2635   }
2636 
2637   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2638   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2639 }
2640 
2641 void __kmp_hidden_helper_main_thread_release() {
2642   // The initial thread of OpenMP RTL should call this function to wake up the
2643   // main thread of hidden helper team.
2644   int status = pthread_mutex_lock(&hidden_helper_main_thread_lock);
2645   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2646 
2647   status = pthread_cond_signal(&hidden_helper_main_thread_cond_var);
2648   KMP_CHECK_SYSFAIL("pthread_cond_signal", status);
2649 
2650   // The hidden helper team is done here
2651   TCW_SYNC_4(hidden_helper_main_thread_signaled, TRUE);
2652 
2653   status = pthread_mutex_unlock(&hidden_helper_main_thread_lock);
2654   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2655 }
2656 
2657 void __kmp_hidden_helper_worker_thread_signal() {
2658   int status = sem_post(&hidden_helper_task_sem);
2659   KMP_CHECK_SYSFAIL("sem_post", status);
2660 }
2661 
2662 void __kmp_hidden_helper_threads_deinitz_wait() {
2663   // Initial thread waits here for the completion of the deinitialization. The
2664   // condition variable will be notified by main thread of hidden helper teams.
2665   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2666   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2667 
2668   if (!TCR_4(hidden_helper_deinitz_signaled)) {
2669     status = pthread_cond_wait(&hidden_helper_threads_deinitz_cond_var,
2670                                &hidden_helper_threads_deinitz_lock);
2671     KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2672   }
2673 
2674   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2675   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2676 }
2677 
2678 void __kmp_hidden_helper_threads_deinitz_release() {
2679   int status = pthread_mutex_lock(&hidden_helper_threads_deinitz_lock);
2680   KMP_CHECK_SYSFAIL("pthread_mutex_lock", status);
2681 
2682   status = pthread_cond_signal(&hidden_helper_threads_deinitz_cond_var);
2683   KMP_CHECK_SYSFAIL("pthread_cond_wait", status);
2684 
2685   TCW_SYNC_4(hidden_helper_deinitz_signaled, TRUE);
2686 
2687   status = pthread_mutex_unlock(&hidden_helper_threads_deinitz_lock);
2688   KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status);
2689 }
2690 #else // KMP_OS_LINUX
2691 void __kmp_hidden_helper_worker_thread_wait() {
2692   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2693 }
2694 
2695 void __kmp_do_initialize_hidden_helper_threads() {
2696   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2697 }
2698 
2699 void __kmp_hidden_helper_threads_initz_wait() {
2700   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2701 }
2702 
2703 void __kmp_hidden_helper_initz_release() {
2704   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2705 }
2706 
2707 void __kmp_hidden_helper_main_thread_wait() {
2708   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2709 }
2710 
2711 void __kmp_hidden_helper_main_thread_release() {
2712   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2713 }
2714 
2715 void __kmp_hidden_helper_worker_thread_signal() {
2716   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2717 }
2718 
2719 void __kmp_hidden_helper_threads_deinitz_wait() {
2720   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2721 }
2722 
2723 void __kmp_hidden_helper_threads_deinitz_release() {
2724   KMP_ASSERT(0 && "Hidden helper task is not supported on this OS");
2725 }
2726 #endif // KMP_OS_LINUX
2727 
2728 // end of file //
2729