1 /* 2 * z_Linux_util.cpp -- platform specific routines. 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // The LLVM Compiler Infrastructure 8 // 9 // This file is dual licensed under the MIT and the University of Illinois Open 10 // Source Licenses. See LICENSE.txt for details. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #include "kmp.h" 15 #include "kmp_affinity.h" 16 #include "kmp_i18n.h" 17 #include "kmp_io.h" 18 #include "kmp_itt.h" 19 #include "kmp_lock.h" 20 #include "kmp_stats.h" 21 #include "kmp_str.h" 22 #include "kmp_wait_release.h" 23 #include "kmp_wrapper_getpid.h" 24 25 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD 26 #include <alloca.h> 27 #endif 28 #include <math.h> // HUGE_VAL. 29 #include <sys/resource.h> 30 #include <sys/syscall.h> 31 #include <sys/time.h> 32 #include <sys/times.h> 33 #include <unistd.h> 34 35 #if KMP_OS_LINUX && !KMP_OS_CNK 36 #include <sys/sysinfo.h> 37 #if KMP_USE_FUTEX 38 // We should really include <futex.h>, but that causes compatibility problems on 39 // different Linux* OS distributions that either require that you include (or 40 // break when you try to include) <pci/types.h>. Since all we need is the two 41 // macros below (which are part of the kernel ABI, so can't change) we just 42 // define the constants here and don't include <futex.h> 43 #ifndef FUTEX_WAIT 44 #define FUTEX_WAIT 0 45 #endif 46 #ifndef FUTEX_WAKE 47 #define FUTEX_WAKE 1 48 #endif 49 #endif 50 #elif KMP_OS_DARWIN 51 #include <mach/mach.h> 52 #include <sys/sysctl.h> 53 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD 54 #include <pthread_np.h> 55 #elif KMP_OS_NETBSD 56 #include <sys/types.h> 57 #include <sys/sysctl.h> 58 #endif 59 60 #include <ctype.h> 61 #include <dirent.h> 62 #include <fcntl.h> 63 64 #include "tsan_annotations.h" 65 66 struct kmp_sys_timer { 67 struct timespec start; 68 }; 69 70 // Convert timespec to nanoseconds. 71 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec) 72 73 static struct kmp_sys_timer __kmp_sys_timer_data; 74 75 #if KMP_HANDLE_SIGNALS 76 typedef void (*sig_func_t)(int); 77 STATIC_EFI2_WORKAROUND struct sigaction __kmp_sighldrs[NSIG]; 78 static sigset_t __kmp_sigset; 79 #endif 80 81 static int __kmp_init_runtime = FALSE; 82 83 static int __kmp_fork_count = 0; 84 85 static pthread_condattr_t __kmp_suspend_cond_attr; 86 static pthread_mutexattr_t __kmp_suspend_mutex_attr; 87 88 static kmp_cond_align_t __kmp_wait_cv; 89 static kmp_mutex_align_t __kmp_wait_mx; 90 91 kmp_uint64 __kmp_ticks_per_msec = 1000000; 92 93 #ifdef DEBUG_SUSPEND 94 static void __kmp_print_cond(char *buffer, kmp_cond_align_t *cond) { 95 KMP_SNPRINTF(buffer, 128, "(cond (lock (%ld, %d)), (descr (%p)))", 96 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock, 97 cond->c_cond.__c_waiting); 98 } 99 #endif 100 101 #if (KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED) 102 103 /* Affinity support */ 104 105 void __kmp_affinity_bind_thread(int which) { 106 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(), 107 "Illegal set affinity operation when not capable"); 108 109 kmp_affin_mask_t *mask; 110 KMP_CPU_ALLOC_ON_STACK(mask); 111 KMP_CPU_ZERO(mask); 112 KMP_CPU_SET(which, mask); 113 __kmp_set_system_affinity(mask, TRUE); 114 KMP_CPU_FREE_FROM_STACK(mask); 115 } 116 117 /* Determine if we can access affinity functionality on this version of 118 * Linux* OS by checking __NR_sched_{get,set}affinity system calls, and set 119 * __kmp_affin_mask_size to the appropriate value (0 means not capable). */ 120 void __kmp_affinity_determine_capable(const char *env_var) { 121 // Check and see if the OS supports thread affinity. 122 123 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024) 124 125 int gCode; 126 int sCode; 127 unsigned char *buf; 128 buf = (unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT); 129 130 // If Linux* OS: 131 // If the syscall fails or returns a suggestion for the size, 132 // then we don't have to search for an appropriate size. 133 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf); 134 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 135 "initial getaffinity call returned %d errno = %d\n", 136 gCode, errno)); 137 138 // if ((gCode < 0) && (errno == ENOSYS)) 139 if (gCode < 0) { 140 // System call not supported 141 if (__kmp_affinity_verbose || 142 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) && 143 (__kmp_affinity_type != affinity_default) && 144 (__kmp_affinity_type != affinity_disabled))) { 145 int error = errno; 146 kmp_msg_t err_code = KMP_ERR(error); 147 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var), 148 err_code, __kmp_msg_null); 149 if (__kmp_generate_warnings == kmp_warnings_off) { 150 __kmp_str_free(&err_code.str); 151 } 152 } 153 KMP_AFFINITY_DISABLE(); 154 KMP_INTERNAL_FREE(buf); 155 return; 156 } 157 if (gCode > 0) { // Linux* OS only 158 // The optimal situation: the OS returns the size of the buffer it expects. 159 // 160 // A verification of correct behavior is that Isetaffinity on a NULL 161 // buffer with the same size fails with errno set to EFAULT. 162 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL); 163 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 164 "setaffinity for mask size %d returned %d errno = %d\n", 165 gCode, sCode, errno)); 166 if (sCode < 0) { 167 if (errno == ENOSYS) { 168 if (__kmp_affinity_verbose || 169 (__kmp_affinity_warnings && 170 (__kmp_affinity_type != affinity_none) && 171 (__kmp_affinity_type != affinity_default) && 172 (__kmp_affinity_type != affinity_disabled))) { 173 int error = errno; 174 kmp_msg_t err_code = KMP_ERR(error); 175 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var), 176 err_code, __kmp_msg_null); 177 if (__kmp_generate_warnings == kmp_warnings_off) { 178 __kmp_str_free(&err_code.str); 179 } 180 } 181 KMP_AFFINITY_DISABLE(); 182 KMP_INTERNAL_FREE(buf); 183 } 184 if (errno == EFAULT) { 185 KMP_AFFINITY_ENABLE(gCode); 186 KA_TRACE(10, ("__kmp_affinity_determine_capable: " 187 "affinity supported (mask size %d)\n", 188 (int)__kmp_affin_mask_size)); 189 KMP_INTERNAL_FREE(buf); 190 return; 191 } 192 } 193 } 194 195 // Call the getaffinity system call repeatedly with increasing set sizes 196 // until we succeed, or reach an upper bound on the search. 197 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 198 "searching for proper set size\n")); 199 int size; 200 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) { 201 gCode = syscall(__NR_sched_getaffinity, 0, size, buf); 202 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 203 "getaffinity for mask size %d returned %d errno = %d\n", 204 size, gCode, errno)); 205 206 if (gCode < 0) { 207 if (errno == ENOSYS) { 208 // We shouldn't get here 209 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 210 "inconsistent OS call behavior: errno == ENOSYS for mask " 211 "size %d\n", 212 size)); 213 if (__kmp_affinity_verbose || 214 (__kmp_affinity_warnings && 215 (__kmp_affinity_type != affinity_none) && 216 (__kmp_affinity_type != affinity_default) && 217 (__kmp_affinity_type != affinity_disabled))) { 218 int error = errno; 219 kmp_msg_t err_code = KMP_ERR(error); 220 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var), 221 err_code, __kmp_msg_null); 222 if (__kmp_generate_warnings == kmp_warnings_off) { 223 __kmp_str_free(&err_code.str); 224 } 225 } 226 KMP_AFFINITY_DISABLE(); 227 KMP_INTERNAL_FREE(buf); 228 return; 229 } 230 continue; 231 } 232 233 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL); 234 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 235 "setaffinity for mask size %d returned %d errno = %d\n", 236 gCode, sCode, errno)); 237 if (sCode < 0) { 238 if (errno == ENOSYS) { // Linux* OS only 239 // We shouldn't get here 240 KA_TRACE(30, ("__kmp_affinity_determine_capable: " 241 "inconsistent OS call behavior: errno == ENOSYS for mask " 242 "size %d\n", 243 size)); 244 if (__kmp_affinity_verbose || 245 (__kmp_affinity_warnings && 246 (__kmp_affinity_type != affinity_none) && 247 (__kmp_affinity_type != affinity_default) && 248 (__kmp_affinity_type != affinity_disabled))) { 249 int error = errno; 250 kmp_msg_t err_code = KMP_ERR(error); 251 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var), 252 err_code, __kmp_msg_null); 253 if (__kmp_generate_warnings == kmp_warnings_off) { 254 __kmp_str_free(&err_code.str); 255 } 256 } 257 KMP_AFFINITY_DISABLE(); 258 KMP_INTERNAL_FREE(buf); 259 return; 260 } 261 if (errno == EFAULT) { 262 KMP_AFFINITY_ENABLE(gCode); 263 KA_TRACE(10, ("__kmp_affinity_determine_capable: " 264 "affinity supported (mask size %d)\n", 265 (int)__kmp_affin_mask_size)); 266 KMP_INTERNAL_FREE(buf); 267 return; 268 } 269 } 270 } 271 // save uncaught error code 272 // int error = errno; 273 KMP_INTERNAL_FREE(buf); 274 // restore uncaught error code, will be printed at the next KMP_WARNING below 275 // errno = error; 276 277 // Affinity is not supported 278 KMP_AFFINITY_DISABLE(); 279 KA_TRACE(10, ("__kmp_affinity_determine_capable: " 280 "cannot determine mask size - affinity not supported\n")); 281 if (__kmp_affinity_verbose || 282 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) && 283 (__kmp_affinity_type != affinity_default) && 284 (__kmp_affinity_type != affinity_disabled))) { 285 KMP_WARNING(AffCantGetMaskSize, env_var); 286 } 287 } 288 289 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED 290 291 #if KMP_USE_FUTEX 292 293 int __kmp_futex_determine_capable() { 294 int loc = 0; 295 int rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0); 296 int retval = (rc == 0) || (errno != ENOSYS); 297 298 KA_TRACE(10, 299 ("__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno)); 300 KA_TRACE(10, ("__kmp_futex_determine_capable: futex syscall%s supported\n", 301 retval ? "" : " not")); 302 303 return retval; 304 } 305 306 #endif // KMP_USE_FUTEX 307 308 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS) 309 /* Only 32-bit "add-exchange" instruction on IA-32 architecture causes us to 310 use compare_and_store for these routines */ 311 312 kmp_int8 __kmp_test_then_or8(volatile kmp_int8 *p, kmp_int8 d) { 313 kmp_int8 old_value, new_value; 314 315 old_value = TCR_1(*p); 316 new_value = old_value | d; 317 318 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) { 319 KMP_CPU_PAUSE(); 320 old_value = TCR_1(*p); 321 new_value = old_value | d; 322 } 323 return old_value; 324 } 325 326 kmp_int8 __kmp_test_then_and8(volatile kmp_int8 *p, kmp_int8 d) { 327 kmp_int8 old_value, new_value; 328 329 old_value = TCR_1(*p); 330 new_value = old_value & d; 331 332 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) { 333 KMP_CPU_PAUSE(); 334 old_value = TCR_1(*p); 335 new_value = old_value & d; 336 } 337 return old_value; 338 } 339 340 kmp_uint32 __kmp_test_then_or32(volatile kmp_uint32 *p, kmp_uint32 d) { 341 kmp_uint32 old_value, new_value; 342 343 old_value = TCR_4(*p); 344 new_value = old_value | d; 345 346 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) { 347 KMP_CPU_PAUSE(); 348 old_value = TCR_4(*p); 349 new_value = old_value | d; 350 } 351 return old_value; 352 } 353 354 kmp_uint32 __kmp_test_then_and32(volatile kmp_uint32 *p, kmp_uint32 d) { 355 kmp_uint32 old_value, new_value; 356 357 old_value = TCR_4(*p); 358 new_value = old_value & d; 359 360 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) { 361 KMP_CPU_PAUSE(); 362 old_value = TCR_4(*p); 363 new_value = old_value & d; 364 } 365 return old_value; 366 } 367 368 #if KMP_ARCH_X86 369 kmp_int8 __kmp_test_then_add8(volatile kmp_int8 *p, kmp_int8 d) { 370 kmp_int8 old_value, new_value; 371 372 old_value = TCR_1(*p); 373 new_value = old_value + d; 374 375 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) { 376 KMP_CPU_PAUSE(); 377 old_value = TCR_1(*p); 378 new_value = old_value + d; 379 } 380 return old_value; 381 } 382 383 kmp_int64 __kmp_test_then_add64(volatile kmp_int64 *p, kmp_int64 d) { 384 kmp_int64 old_value, new_value; 385 386 old_value = TCR_8(*p); 387 new_value = old_value + d; 388 389 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) { 390 KMP_CPU_PAUSE(); 391 old_value = TCR_8(*p); 392 new_value = old_value + d; 393 } 394 return old_value; 395 } 396 #endif /* KMP_ARCH_X86 */ 397 398 kmp_uint64 __kmp_test_then_or64(volatile kmp_uint64 *p, kmp_uint64 d) { 399 kmp_uint64 old_value, new_value; 400 401 old_value = TCR_8(*p); 402 new_value = old_value | d; 403 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) { 404 KMP_CPU_PAUSE(); 405 old_value = TCR_8(*p); 406 new_value = old_value | d; 407 } 408 return old_value; 409 } 410 411 kmp_uint64 __kmp_test_then_and64(volatile kmp_uint64 *p, kmp_uint64 d) { 412 kmp_uint64 old_value, new_value; 413 414 old_value = TCR_8(*p); 415 new_value = old_value & d; 416 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) { 417 KMP_CPU_PAUSE(); 418 old_value = TCR_8(*p); 419 new_value = old_value & d; 420 } 421 return old_value; 422 } 423 424 #endif /* (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (! KMP_ASM_INTRINS) */ 425 426 void __kmp_terminate_thread(int gtid) { 427 int status; 428 kmp_info_t *th = __kmp_threads[gtid]; 429 430 if (!th) 431 return; 432 433 #ifdef KMP_CANCEL_THREADS 434 KA_TRACE(10, ("__kmp_terminate_thread: kill (%d)\n", gtid)); 435 status = pthread_cancel(th->th.th_info.ds.ds_thread); 436 if (status != 0 && status != ESRCH) { 437 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status), 438 __kmp_msg_null); 439 } 440 #endif 441 __kmp_yield(TRUE); 442 } // 443 444 /* Set thread stack info according to values returned by pthread_getattr_np(). 445 If values are unreasonable, assume call failed and use incremental stack 446 refinement method instead. Returns TRUE if the stack parameters could be 447 determined exactly, FALSE if incremental refinement is necessary. */ 448 static kmp_int32 __kmp_set_stack_info(int gtid, kmp_info_t *th) { 449 int stack_data; 450 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 451 KMP_OS_HURD 452 pthread_attr_t attr; 453 int status; 454 size_t size = 0; 455 void *addr = 0; 456 457 /* Always do incremental stack refinement for ubermaster threads since the 458 initial thread stack range can be reduced by sibling thread creation so 459 pthread_attr_getstack may cause thread gtid aliasing */ 460 if (!KMP_UBER_GTID(gtid)) { 461 462 /* Fetch the real thread attributes */ 463 status = pthread_attr_init(&attr); 464 KMP_CHECK_SYSFAIL("pthread_attr_init", status); 465 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD 466 status = pthread_attr_get_np(pthread_self(), &attr); 467 KMP_CHECK_SYSFAIL("pthread_attr_get_np", status); 468 #else 469 status = pthread_getattr_np(pthread_self(), &attr); 470 KMP_CHECK_SYSFAIL("pthread_getattr_np", status); 471 #endif 472 status = pthread_attr_getstack(&attr, &addr, &size); 473 KMP_CHECK_SYSFAIL("pthread_attr_getstack", status); 474 KA_TRACE(60, 475 ("__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:" 476 " %lu, low addr: %p\n", 477 gtid, size, addr)); 478 status = pthread_attr_destroy(&attr); 479 KMP_CHECK_SYSFAIL("pthread_attr_destroy", status); 480 } 481 482 if (size != 0 && addr != 0) { // was stack parameter determination successful? 483 /* Store the correct base and size */ 484 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((char *)addr) + size)); 485 TCW_PTR(th->th.th_info.ds.ds_stacksize, size); 486 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE); 487 return TRUE; 488 } 489 #endif /* KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || 490 KMP_OS_HURD */ 491 /* Use incremental refinement starting from initial conservative estimate */ 492 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0); 493 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data); 494 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE); 495 return FALSE; 496 } 497 498 static void *__kmp_launch_worker(void *thr) { 499 int status, old_type, old_state; 500 #ifdef KMP_BLOCK_SIGNALS 501 sigset_t new_set, old_set; 502 #endif /* KMP_BLOCK_SIGNALS */ 503 void *exit_val; 504 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 505 KMP_OS_OPENBSD || KMP_OS_HURD 506 void *volatile padding = 0; 507 #endif 508 int gtid; 509 510 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid; 511 __kmp_gtid_set_specific(gtid); 512 #ifdef KMP_TDATA_GTID 513 __kmp_gtid = gtid; 514 #endif 515 #if KMP_STATS_ENABLED 516 // set thread local index to point to thread-specific stats 517 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats; 518 __kmp_stats_thread_ptr->startLife(); 519 KMP_SET_THREAD_STATE(IDLE); 520 KMP_INIT_PARTITIONED_TIMERS(OMP_idle); 521 #endif 522 523 #if USE_ITT_BUILD 524 __kmp_itt_thread_name(gtid); 525 #endif /* USE_ITT_BUILD */ 526 527 #if KMP_AFFINITY_SUPPORTED 528 __kmp_affinity_set_init_mask(gtid, FALSE); 529 #endif 530 531 #ifdef KMP_CANCEL_THREADS 532 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type); 533 KMP_CHECK_SYSFAIL("pthread_setcanceltype", status); 534 // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? 535 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state); 536 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status); 537 #endif 538 539 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 540 // Set FP control regs to be a copy of the parallel initialization thread's. 541 __kmp_clear_x87_fpu_status_word(); 542 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word); 543 __kmp_load_mxcsr(&__kmp_init_mxcsr); 544 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ 545 546 #ifdef KMP_BLOCK_SIGNALS 547 status = sigfillset(&new_set); 548 KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status); 549 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set); 550 KMP_CHECK_SYSFAIL("pthread_sigmask", status); 551 #endif /* KMP_BLOCK_SIGNALS */ 552 553 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 554 KMP_OS_OPENBSD 555 if (__kmp_stkoffset > 0 && gtid > 0) { 556 padding = KMP_ALLOCA(gtid * __kmp_stkoffset); 557 } 558 #endif 559 560 KMP_MB(); 561 __kmp_set_stack_info(gtid, (kmp_info_t *)thr); 562 563 __kmp_check_stack_overlap((kmp_info_t *)thr); 564 565 exit_val = __kmp_launch_thread((kmp_info_t *)thr); 566 567 #ifdef KMP_BLOCK_SIGNALS 568 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL); 569 KMP_CHECK_SYSFAIL("pthread_sigmask", status); 570 #endif /* KMP_BLOCK_SIGNALS */ 571 572 return exit_val; 573 } 574 575 #if KMP_USE_MONITOR 576 /* The monitor thread controls all of the threads in the complex */ 577 578 static void *__kmp_launch_monitor(void *thr) { 579 int status, old_type, old_state; 580 #ifdef KMP_BLOCK_SIGNALS 581 sigset_t new_set; 582 #endif /* KMP_BLOCK_SIGNALS */ 583 struct timespec interval; 584 int yield_count; 585 int yield_cycles = 0; 586 587 KMP_MB(); /* Flush all pending memory write invalidates. */ 588 589 KA_TRACE(10, ("__kmp_launch_monitor: #1 launched\n")); 590 591 /* register us as the monitor thread */ 592 __kmp_gtid_set_specific(KMP_GTID_MONITOR); 593 #ifdef KMP_TDATA_GTID 594 __kmp_gtid = KMP_GTID_MONITOR; 595 #endif 596 597 KMP_MB(); 598 599 #if USE_ITT_BUILD 600 // Instruct Intel(R) Threading Tools to ignore monitor thread. 601 __kmp_itt_thread_ignore(); 602 #endif /* USE_ITT_BUILD */ 603 604 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid, 605 (kmp_info_t *)thr); 606 607 __kmp_check_stack_overlap((kmp_info_t *)thr); 608 609 #ifdef KMP_CANCEL_THREADS 610 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type); 611 KMP_CHECK_SYSFAIL("pthread_setcanceltype", status); 612 // josh todo: isn't PTHREAD_CANCEL_ENABLE default for newly-created threads? 613 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state); 614 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status); 615 #endif 616 617 #if KMP_REAL_TIME_FIX 618 // This is a potential fix which allows application with real-time scheduling 619 // policy work. However, decision about the fix is not made yet, so it is 620 // disabled by default. 621 { // Are program started with real-time scheduling policy? 622 int sched = sched_getscheduler(0); 623 if (sched == SCHED_FIFO || sched == SCHED_RR) { 624 // Yes, we are a part of real-time application. Try to increase the 625 // priority of the monitor. 626 struct sched_param param; 627 int max_priority = sched_get_priority_max(sched); 628 int rc; 629 KMP_WARNING(RealTimeSchedNotSupported); 630 sched_getparam(0, ¶m); 631 if (param.sched_priority < max_priority) { 632 param.sched_priority += 1; 633 rc = sched_setscheduler(0, sched, ¶m); 634 if (rc != 0) { 635 int error = errno; 636 kmp_msg_t err_code = KMP_ERR(error); 637 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority), 638 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null); 639 if (__kmp_generate_warnings == kmp_warnings_off) { 640 __kmp_str_free(&err_code.str); 641 } 642 } 643 } else { 644 // We cannot abort here, because number of CPUs may be enough for all 645 // the threads, including the monitor thread, so application could 646 // potentially work... 647 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority), 648 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority), 649 __kmp_msg_null); 650 } 651 } 652 // AC: free thread that waits for monitor started 653 TCW_4(__kmp_global.g.g_time.dt.t_value, 0); 654 } 655 #endif // KMP_REAL_TIME_FIX 656 657 KMP_MB(); /* Flush all pending memory write invalidates. */ 658 659 if (__kmp_monitor_wakeups == 1) { 660 interval.tv_sec = 1; 661 interval.tv_nsec = 0; 662 } else { 663 interval.tv_sec = 0; 664 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups); 665 } 666 667 KA_TRACE(10, ("__kmp_launch_monitor: #2 monitor\n")); 668 669 if (__kmp_yield_cycle) { 670 __kmp_yielding_on = 0; /* Start out with yielding shut off */ 671 yield_count = __kmp_yield_off_count; 672 } else { 673 __kmp_yielding_on = 1; /* Yielding is on permanently */ 674 } 675 676 while (!TCR_4(__kmp_global.g.g_done)) { 677 struct timespec now; 678 struct timeval tval; 679 680 /* This thread monitors the state of the system */ 681 682 KA_TRACE(15, ("__kmp_launch_monitor: update\n")); 683 684 status = gettimeofday(&tval, NULL); 685 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status); 686 TIMEVAL_TO_TIMESPEC(&tval, &now); 687 688 now.tv_sec += interval.tv_sec; 689 now.tv_nsec += interval.tv_nsec; 690 691 if (now.tv_nsec >= KMP_NSEC_PER_SEC) { 692 now.tv_sec += 1; 693 now.tv_nsec -= KMP_NSEC_PER_SEC; 694 } 695 696 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex); 697 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status); 698 // AC: the monitor should not fall asleep if g_done has been set 699 if (!TCR_4(__kmp_global.g.g_done)) { // check once more under mutex 700 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond, 701 &__kmp_wait_mx.m_mutex, &now); 702 if (status != 0) { 703 if (status != ETIMEDOUT && status != EINTR) { 704 KMP_SYSFAIL("pthread_cond_timedwait", status); 705 } 706 } 707 } 708 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex); 709 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 710 711 if (__kmp_yield_cycle) { 712 yield_cycles++; 713 if ((yield_cycles % yield_count) == 0) { 714 if (__kmp_yielding_on) { 715 __kmp_yielding_on = 0; /* Turn it off now */ 716 yield_count = __kmp_yield_off_count; 717 } else { 718 __kmp_yielding_on = 1; /* Turn it on now */ 719 yield_count = __kmp_yield_on_count; 720 } 721 yield_cycles = 0; 722 } 723 } else { 724 __kmp_yielding_on = 1; 725 } 726 727 TCW_4(__kmp_global.g.g_time.dt.t_value, 728 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1); 729 730 KMP_MB(); /* Flush all pending memory write invalidates. */ 731 } 732 733 KA_TRACE(10, ("__kmp_launch_monitor: #3 cleanup\n")); 734 735 #ifdef KMP_BLOCK_SIGNALS 736 status = sigfillset(&new_set); 737 KMP_CHECK_SYSFAIL_ERRNO("sigfillset", status); 738 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL); 739 KMP_CHECK_SYSFAIL("pthread_sigmask", status); 740 #endif /* KMP_BLOCK_SIGNALS */ 741 742 KA_TRACE(10, ("__kmp_launch_monitor: #4 finished\n")); 743 744 if (__kmp_global.g.g_abort != 0) { 745 /* now we need to terminate the worker threads */ 746 /* the value of t_abort is the signal we caught */ 747 748 int gtid; 749 750 KA_TRACE(10, ("__kmp_launch_monitor: #5 terminate sig=%d\n", 751 __kmp_global.g.g_abort)); 752 753 /* terminate the OpenMP worker threads */ 754 /* TODO this is not valid for sibling threads!! 755 * the uber master might not be 0 anymore.. */ 756 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid) 757 __kmp_terminate_thread(gtid); 758 759 __kmp_cleanup(); 760 761 KA_TRACE(10, ("__kmp_launch_monitor: #6 raise sig=%d\n", 762 __kmp_global.g.g_abort)); 763 764 if (__kmp_global.g.g_abort > 0) 765 raise(__kmp_global.g.g_abort); 766 } 767 768 KA_TRACE(10, ("__kmp_launch_monitor: #7 exit\n")); 769 770 return thr; 771 } 772 #endif // KMP_USE_MONITOR 773 774 void __kmp_create_worker(int gtid, kmp_info_t *th, size_t stack_size) { 775 pthread_t handle; 776 pthread_attr_t thread_attr; 777 int status; 778 779 th->th.th_info.ds.ds_gtid = gtid; 780 781 #if KMP_STATS_ENABLED 782 // sets up worker thread stats 783 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid); 784 785 // th->th.th_stats is used to transfer thread-specific stats-pointer to 786 // __kmp_launch_worker. So when thread is created (goes into 787 // __kmp_launch_worker) it will set its thread local pointer to 788 // th->th.th_stats 789 if (!KMP_UBER_GTID(gtid)) { 790 th->th.th_stats = __kmp_stats_list->push_back(gtid); 791 } else { 792 // For root threads, __kmp_stats_thread_ptr is set in __kmp_register_root(), 793 // so set the th->th.th_stats field to it. 794 th->th.th_stats = __kmp_stats_thread_ptr; 795 } 796 __kmp_release_tas_lock(&__kmp_stats_lock, gtid); 797 798 #endif // KMP_STATS_ENABLED 799 800 if (KMP_UBER_GTID(gtid)) { 801 KA_TRACE(10, ("__kmp_create_worker: uber thread (%d)\n", gtid)); 802 th->th.th_info.ds.ds_thread = pthread_self(); 803 __kmp_set_stack_info(gtid, th); 804 __kmp_check_stack_overlap(th); 805 return; 806 } 807 808 KA_TRACE(10, ("__kmp_create_worker: try to create thread (%d)\n", gtid)); 809 810 KMP_MB(); /* Flush all pending memory write invalidates. */ 811 812 #ifdef KMP_THREAD_ATTR 813 status = pthread_attr_init(&thread_attr); 814 if (status != 0) { 815 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null); 816 } 817 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); 818 if (status != 0) { 819 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null); 820 } 821 822 /* Set stack size for this thread now. 823 The multiple of 2 is there because on some machines, requesting an unusual 824 stacksize causes the thread to have an offset before the dummy alloca() 825 takes place to create the offset. Since we want the user to have a 826 sufficient stacksize AND support a stack offset, we alloca() twice the 827 offset so that the upcoming alloca() does not eliminate any premade offset, 828 and also gives the user the stack space they requested for all threads */ 829 stack_size += gtid * __kmp_stkoffset * 2; 830 831 KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 832 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n", 833 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size)); 834 835 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 836 status = pthread_attr_setstacksize(&thread_attr, stack_size); 837 #ifdef KMP_BACKUP_STKSIZE 838 if (status != 0) { 839 if (!__kmp_env_stksize) { 840 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset; 841 __kmp_stksize = KMP_BACKUP_STKSIZE; 842 KA_TRACE(10, ("__kmp_create_worker: T#%d, default stacksize = %lu bytes, " 843 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu " 844 "bytes\n", 845 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size)); 846 status = pthread_attr_setstacksize(&thread_attr, stack_size); 847 } 848 } 849 #endif /* KMP_BACKUP_STKSIZE */ 850 if (status != 0) { 851 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status), 852 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null); 853 } 854 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ 855 856 #endif /* KMP_THREAD_ATTR */ 857 858 status = 859 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (void *)th); 860 if (status != 0 || !handle) { // ??? Why do we check handle?? 861 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 862 if (status == EINVAL) { 863 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status), 864 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null); 865 } 866 if (status == ENOMEM) { 867 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status), 868 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null); 869 } 870 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ 871 if (status == EAGAIN) { 872 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status), 873 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null); 874 } 875 KMP_SYSFAIL("pthread_create", status); 876 } 877 878 th->th.th_info.ds.ds_thread = handle; 879 880 #ifdef KMP_THREAD_ATTR 881 status = pthread_attr_destroy(&thread_attr); 882 if (status) { 883 kmp_msg_t err_code = KMP_ERR(status); 884 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code, 885 __kmp_msg_null); 886 if (__kmp_generate_warnings == kmp_warnings_off) { 887 __kmp_str_free(&err_code.str); 888 } 889 } 890 #endif /* KMP_THREAD_ATTR */ 891 892 KMP_MB(); /* Flush all pending memory write invalidates. */ 893 894 KA_TRACE(10, ("__kmp_create_worker: done creating thread (%d)\n", gtid)); 895 896 } // __kmp_create_worker 897 898 #if KMP_USE_MONITOR 899 void __kmp_create_monitor(kmp_info_t *th) { 900 pthread_t handle; 901 pthread_attr_t thread_attr; 902 size_t size; 903 int status; 904 int auto_adj_size = FALSE; 905 906 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) { 907 // We don't need monitor thread in case of MAX_BLOCKTIME 908 KA_TRACE(10, ("__kmp_create_monitor: skipping monitor thread because of " 909 "MAX blocktime\n")); 910 th->th.th_info.ds.ds_tid = 0; // this makes reap_monitor no-op 911 th->th.th_info.ds.ds_gtid = 0; 912 return; 913 } 914 KA_TRACE(10, ("__kmp_create_monitor: try to create monitor\n")); 915 916 KMP_MB(); /* Flush all pending memory write invalidates. */ 917 918 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR; 919 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR; 920 #if KMP_REAL_TIME_FIX 921 TCW_4(__kmp_global.g.g_time.dt.t_value, 922 -1); // Will use it for synchronization a bit later. 923 #else 924 TCW_4(__kmp_global.g.g_time.dt.t_value, 0); 925 #endif // KMP_REAL_TIME_FIX 926 927 #ifdef KMP_THREAD_ATTR 928 if (__kmp_monitor_stksize == 0) { 929 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE; 930 auto_adj_size = TRUE; 931 } 932 status = pthread_attr_init(&thread_attr); 933 if (status != 0) { 934 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null); 935 } 936 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE); 937 if (status != 0) { 938 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null); 939 } 940 941 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 942 status = pthread_attr_getstacksize(&thread_attr, &size); 943 KMP_CHECK_SYSFAIL("pthread_attr_getstacksize", status); 944 #else 945 size = __kmp_sys_min_stksize; 946 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ 947 #endif /* KMP_THREAD_ATTR */ 948 949 if (__kmp_monitor_stksize == 0) { 950 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE; 951 } 952 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) { 953 __kmp_monitor_stksize = __kmp_sys_min_stksize; 954 } 955 956 KA_TRACE(10, ("__kmp_create_monitor: default stacksize = %lu bytes," 957 "requested stacksize = %lu bytes\n", 958 size, __kmp_monitor_stksize)); 959 960 retry: 961 962 /* Set stack size for this thread now. */ 963 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 964 KA_TRACE(10, ("__kmp_create_monitor: setting stacksize = %lu bytes,", 965 __kmp_monitor_stksize)); 966 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize); 967 if (status != 0) { 968 if (auto_adj_size) { 969 __kmp_monitor_stksize *= 2; 970 goto retry; 971 } 972 kmp_msg_t err_code = KMP_ERR(status); 973 __kmp_msg(kmp_ms_warning, // should this be fatal? BB 974 KMP_MSG(CantSetMonitorStackSize, (long int)__kmp_monitor_stksize), 975 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null); 976 if (__kmp_generate_warnings == kmp_warnings_off) { 977 __kmp_str_free(&err_code.str); 978 } 979 } 980 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ 981 982 status = 983 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (void *)th); 984 985 if (status != 0) { 986 #ifdef _POSIX_THREAD_ATTR_STACKSIZE 987 if (status == EINVAL) { 988 if (auto_adj_size && (__kmp_monitor_stksize < (size_t)0x40000000)) { 989 __kmp_monitor_stksize *= 2; 990 goto retry; 991 } 992 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize), 993 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize), 994 __kmp_msg_null); 995 } 996 if (status == ENOMEM) { 997 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize), 998 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize), 999 __kmp_msg_null); 1000 } 1001 #endif /* _POSIX_THREAD_ATTR_STACKSIZE */ 1002 if (status == EAGAIN) { 1003 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status), 1004 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null); 1005 } 1006 KMP_SYSFAIL("pthread_create", status); 1007 } 1008 1009 th->th.th_info.ds.ds_thread = handle; 1010 1011 #if KMP_REAL_TIME_FIX 1012 // Wait for the monitor thread is really started and set its *priority*. 1013 KMP_DEBUG_ASSERT(sizeof(kmp_uint32) == 1014 sizeof(__kmp_global.g.g_time.dt.t_value)); 1015 __kmp_wait_yield_4((kmp_uint32 volatile *)&__kmp_global.g.g_time.dt.t_value, 1016 -1, &__kmp_neq_4, NULL); 1017 #endif // KMP_REAL_TIME_FIX 1018 1019 #ifdef KMP_THREAD_ATTR 1020 status = pthread_attr_destroy(&thread_attr); 1021 if (status != 0) { 1022 kmp_msg_t err_code = KMP_ERR(status); 1023 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code, 1024 __kmp_msg_null); 1025 if (__kmp_generate_warnings == kmp_warnings_off) { 1026 __kmp_str_free(&err_code.str); 1027 } 1028 } 1029 #endif 1030 1031 KMP_MB(); /* Flush all pending memory write invalidates. */ 1032 1033 KA_TRACE(10, ("__kmp_create_monitor: monitor created %#.8lx\n", 1034 th->th.th_info.ds.ds_thread)); 1035 1036 } // __kmp_create_monitor 1037 #endif // KMP_USE_MONITOR 1038 1039 void __kmp_exit_thread(int exit_status) { 1040 pthread_exit((void *)(intptr_t)exit_status); 1041 } // __kmp_exit_thread 1042 1043 #if KMP_USE_MONITOR 1044 void __kmp_resume_monitor(); 1045 1046 void __kmp_reap_monitor(kmp_info_t *th) { 1047 int status; 1048 void *exit_val; 1049 1050 KA_TRACE(10, ("__kmp_reap_monitor: try to reap monitor thread with handle" 1051 " %#.8lx\n", 1052 th->th.th_info.ds.ds_thread)); 1053 1054 // If monitor has been created, its tid and gtid should be KMP_GTID_MONITOR. 1055 // If both tid and gtid are 0, it means the monitor did not ever start. 1056 // If both tid and gtid are KMP_GTID_DNE, the monitor has been shut down. 1057 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid); 1058 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) { 1059 KA_TRACE(10, ("__kmp_reap_monitor: monitor did not start, returning\n")); 1060 return; 1061 } 1062 1063 KMP_MB(); /* Flush all pending memory write invalidates. */ 1064 1065 /* First, check to see whether the monitor thread exists to wake it up. This 1066 is to avoid performance problem when the monitor sleeps during 1067 blocktime-size interval */ 1068 1069 status = pthread_kill(th->th.th_info.ds.ds_thread, 0); 1070 if (status != ESRCH) { 1071 __kmp_resume_monitor(); // Wake up the monitor thread 1072 } 1073 KA_TRACE(10, ("__kmp_reap_monitor: try to join with monitor\n")); 1074 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val); 1075 if (exit_val != th) { 1076 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null); 1077 } 1078 1079 th->th.th_info.ds.ds_tid = KMP_GTID_DNE; 1080 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE; 1081 1082 KA_TRACE(10, ("__kmp_reap_monitor: done reaping monitor thread with handle" 1083 " %#.8lx\n", 1084 th->th.th_info.ds.ds_thread)); 1085 1086 KMP_MB(); /* Flush all pending memory write invalidates. */ 1087 } 1088 #endif // KMP_USE_MONITOR 1089 1090 void __kmp_reap_worker(kmp_info_t *th) { 1091 int status; 1092 void *exit_val; 1093 1094 KMP_MB(); /* Flush all pending memory write invalidates. */ 1095 1096 KA_TRACE( 1097 10, ("__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid)); 1098 1099 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val); 1100 #ifdef KMP_DEBUG 1101 /* Don't expose these to the user until we understand when they trigger */ 1102 if (status != 0) { 1103 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null); 1104 } 1105 if (exit_val != th) { 1106 KA_TRACE(10, ("__kmp_reap_worker: worker T#%d did not reap properly, " 1107 "exit_val = %p\n", 1108 th->th.th_info.ds.ds_gtid, exit_val)); 1109 } 1110 #endif /* KMP_DEBUG */ 1111 1112 KA_TRACE(10, ("__kmp_reap_worker: done reaping T#%d\n", 1113 th->th.th_info.ds.ds_gtid)); 1114 1115 KMP_MB(); /* Flush all pending memory write invalidates. */ 1116 } 1117 1118 #if KMP_HANDLE_SIGNALS 1119 1120 static void __kmp_null_handler(int signo) { 1121 // Do nothing, for doing SIG_IGN-type actions. 1122 } // __kmp_null_handler 1123 1124 static void __kmp_team_handler(int signo) { 1125 if (__kmp_global.g.g_abort == 0) { 1126 /* Stage 1 signal handler, let's shut down all of the threads */ 1127 #ifdef KMP_DEBUG 1128 __kmp_debug_printf("__kmp_team_handler: caught signal = %d\n", signo); 1129 #endif 1130 switch (signo) { 1131 case SIGHUP: 1132 case SIGINT: 1133 case SIGQUIT: 1134 case SIGILL: 1135 case SIGABRT: 1136 case SIGFPE: 1137 case SIGBUS: 1138 case SIGSEGV: 1139 #ifdef SIGSYS 1140 case SIGSYS: 1141 #endif 1142 case SIGTERM: 1143 if (__kmp_debug_buf) { 1144 __kmp_dump_debug_buffer(); 1145 } 1146 KMP_MB(); // Flush all pending memory write invalidates. 1147 TCW_4(__kmp_global.g.g_abort, signo); 1148 KMP_MB(); // Flush all pending memory write invalidates. 1149 TCW_4(__kmp_global.g.g_done, TRUE); 1150 KMP_MB(); // Flush all pending memory write invalidates. 1151 break; 1152 default: 1153 #ifdef KMP_DEBUG 1154 __kmp_debug_printf("__kmp_team_handler: unknown signal type"); 1155 #endif 1156 break; 1157 } 1158 } 1159 } // __kmp_team_handler 1160 1161 static void __kmp_sigaction(int signum, const struct sigaction *act, 1162 struct sigaction *oldact) { 1163 int rc = sigaction(signum, act, oldact); 1164 KMP_CHECK_SYSFAIL_ERRNO("sigaction", rc); 1165 } 1166 1167 static void __kmp_install_one_handler(int sig, sig_func_t handler_func, 1168 int parallel_init) { 1169 KMP_MB(); // Flush all pending memory write invalidates. 1170 KB_TRACE(60, 1171 ("__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init)); 1172 if (parallel_init) { 1173 struct sigaction new_action; 1174 struct sigaction old_action; 1175 new_action.sa_handler = handler_func; 1176 new_action.sa_flags = 0; 1177 sigfillset(&new_action.sa_mask); 1178 __kmp_sigaction(sig, &new_action, &old_action); 1179 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) { 1180 sigaddset(&__kmp_sigset, sig); 1181 } else { 1182 // Restore/keep user's handler if one previously installed. 1183 __kmp_sigaction(sig, &old_action, NULL); 1184 } 1185 } else { 1186 // Save initial/system signal handlers to see if user handlers installed. 1187 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]); 1188 } 1189 KMP_MB(); // Flush all pending memory write invalidates. 1190 } // __kmp_install_one_handler 1191 1192 static void __kmp_remove_one_handler(int sig) { 1193 KB_TRACE(60, ("__kmp_remove_one_handler( %d )\n", sig)); 1194 if (sigismember(&__kmp_sigset, sig)) { 1195 struct sigaction old; 1196 KMP_MB(); // Flush all pending memory write invalidates. 1197 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old); 1198 if ((old.sa_handler != __kmp_team_handler) && 1199 (old.sa_handler != __kmp_null_handler)) { 1200 // Restore the users signal handler. 1201 KB_TRACE(10, ("__kmp_remove_one_handler: oops, not our handler, " 1202 "restoring: sig=%d\n", 1203 sig)); 1204 __kmp_sigaction(sig, &old, NULL); 1205 } 1206 sigdelset(&__kmp_sigset, sig); 1207 KMP_MB(); // Flush all pending memory write invalidates. 1208 } 1209 } // __kmp_remove_one_handler 1210 1211 void __kmp_install_signals(int parallel_init) { 1212 KB_TRACE(10, ("__kmp_install_signals( %d )\n", parallel_init)); 1213 if (__kmp_handle_signals || !parallel_init) { 1214 // If ! parallel_init, we do not install handlers, just save original 1215 // handlers. Let us do it even __handle_signals is 0. 1216 sigemptyset(&__kmp_sigset); 1217 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init); 1218 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init); 1219 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init); 1220 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init); 1221 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init); 1222 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init); 1223 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init); 1224 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init); 1225 #ifdef SIGSYS 1226 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init); 1227 #endif // SIGSYS 1228 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init); 1229 #ifdef SIGPIPE 1230 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init); 1231 #endif // SIGPIPE 1232 } 1233 } // __kmp_install_signals 1234 1235 void __kmp_remove_signals(void) { 1236 int sig; 1237 KB_TRACE(10, ("__kmp_remove_signals()\n")); 1238 for (sig = 1; sig < NSIG; ++sig) { 1239 __kmp_remove_one_handler(sig); 1240 } 1241 } // __kmp_remove_signals 1242 1243 #endif // KMP_HANDLE_SIGNALS 1244 1245 void __kmp_enable(int new_state) { 1246 #ifdef KMP_CANCEL_THREADS 1247 int status, old_state; 1248 status = pthread_setcancelstate(new_state, &old_state); 1249 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status); 1250 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE); 1251 #endif 1252 } 1253 1254 void __kmp_disable(int *old_state) { 1255 #ifdef KMP_CANCEL_THREADS 1256 int status; 1257 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state); 1258 KMP_CHECK_SYSFAIL("pthread_setcancelstate", status); 1259 #endif 1260 } 1261 1262 static void __kmp_atfork_prepare(void) { 1263 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock); 1264 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock); 1265 } 1266 1267 static void __kmp_atfork_parent(void) { 1268 __kmp_release_bootstrap_lock(&__kmp_initz_lock); 1269 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock); 1270 } 1271 1272 /* Reset the library so execution in the child starts "all over again" with 1273 clean data structures in initial states. Don't worry about freeing memory 1274 allocated by parent, just abandon it to be safe. */ 1275 static void __kmp_atfork_child(void) { 1276 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock); 1277 /* TODO make sure this is done right for nested/sibling */ 1278 // ATT: Memory leaks are here? TODO: Check it and fix. 1279 /* KMP_ASSERT( 0 ); */ 1280 1281 ++__kmp_fork_count; 1282 1283 #if KMP_AFFINITY_SUPPORTED 1284 #if KMP_OS_LINUX 1285 // reset the affinity in the child to the initial thread 1286 // affinity in the parent 1287 kmp_set_thread_affinity_mask_initial(); 1288 #endif 1289 // Set default not to bind threads tightly in the child (we’re expecting 1290 // over-subscription after the fork and this can improve things for 1291 // scripting languages that use OpenMP inside process-parallel code). 1292 __kmp_affinity_type = affinity_none; 1293 #if OMP_40_ENABLED 1294 if (__kmp_nested_proc_bind.bind_types != NULL) { 1295 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false; 1296 } 1297 #endif // OMP_40_ENABLED 1298 #endif // KMP_AFFINITY_SUPPORTED 1299 1300 __kmp_init_runtime = FALSE; 1301 #if KMP_USE_MONITOR 1302 __kmp_init_monitor = 0; 1303 #endif 1304 __kmp_init_parallel = FALSE; 1305 __kmp_init_middle = FALSE; 1306 __kmp_init_serial = FALSE; 1307 TCW_4(__kmp_init_gtid, FALSE); 1308 __kmp_init_common = FALSE; 1309 1310 TCW_4(__kmp_init_user_locks, FALSE); 1311 #if !KMP_USE_DYNAMIC_LOCK 1312 __kmp_user_lock_table.used = 1; 1313 __kmp_user_lock_table.allocated = 0; 1314 __kmp_user_lock_table.table = NULL; 1315 __kmp_lock_blocks = NULL; 1316 #endif 1317 1318 __kmp_all_nth = 0; 1319 TCW_4(__kmp_nth, 0); 1320 1321 __kmp_thread_pool = NULL; 1322 __kmp_thread_pool_insert_pt = NULL; 1323 __kmp_team_pool = NULL; 1324 1325 /* Must actually zero all the *cache arguments passed to __kmpc_threadprivate 1326 here so threadprivate doesn't use stale data */ 1327 KA_TRACE(10, ("__kmp_atfork_child: checking cache address list %p\n", 1328 __kmp_threadpriv_cache_list)); 1329 1330 while (__kmp_threadpriv_cache_list != NULL) { 1331 1332 if (*__kmp_threadpriv_cache_list->addr != NULL) { 1333 KC_TRACE(50, ("__kmp_atfork_child: zeroing cache at address %p\n", 1334 &(*__kmp_threadpriv_cache_list->addr))); 1335 1336 *__kmp_threadpriv_cache_list->addr = NULL; 1337 } 1338 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next; 1339 } 1340 1341 __kmp_init_runtime = FALSE; 1342 1343 /* reset statically initialized locks */ 1344 __kmp_init_bootstrap_lock(&__kmp_initz_lock); 1345 __kmp_init_bootstrap_lock(&__kmp_stdio_lock); 1346 __kmp_init_bootstrap_lock(&__kmp_console_lock); 1347 __kmp_init_bootstrap_lock(&__kmp_task_team_lock); 1348 1349 #if USE_ITT_BUILD 1350 __kmp_itt_reset(); // reset ITT's global state 1351 #endif /* USE_ITT_BUILD */ 1352 1353 /* This is necessary to make sure no stale data is left around */ 1354 /* AC: customers complain that we use unsafe routines in the atfork 1355 handler. Mathworks: dlsym() is unsafe. We call dlsym and dlopen 1356 in dynamic_link when check the presence of shared tbbmalloc library. 1357 Suggestion is to make the library initialization lazier, similar 1358 to what done for __kmpc_begin(). */ 1359 // TODO: synchronize all static initializations with regular library 1360 // startup; look at kmp_global.cpp and etc. 1361 //__kmp_internal_begin (); 1362 } 1363 1364 void __kmp_register_atfork(void) { 1365 if (__kmp_need_register_atfork) { 1366 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent, 1367 __kmp_atfork_child); 1368 KMP_CHECK_SYSFAIL("pthread_atfork", status); 1369 __kmp_need_register_atfork = FALSE; 1370 } 1371 } 1372 1373 void __kmp_suspend_initialize(void) { 1374 int status; 1375 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr); 1376 KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status); 1377 status = pthread_condattr_init(&__kmp_suspend_cond_attr); 1378 KMP_CHECK_SYSFAIL("pthread_condattr_init", status); 1379 } 1380 1381 static void __kmp_suspend_initialize_thread(kmp_info_t *th) { 1382 ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count); 1383 if (th->th.th_suspend_init_count <= __kmp_fork_count) { 1384 /* this means we haven't initialized the suspension pthread objects for this 1385 thread in this instance of the process */ 1386 int status; 1387 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond, 1388 &__kmp_suspend_cond_attr); 1389 KMP_CHECK_SYSFAIL("pthread_cond_init", status); 1390 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex, 1391 &__kmp_suspend_mutex_attr); 1392 KMP_CHECK_SYSFAIL("pthread_mutex_init", status); 1393 *(volatile int *)&th->th.th_suspend_init_count = __kmp_fork_count + 1; 1394 ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count); 1395 } 1396 } 1397 1398 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) { 1399 if (th->th.th_suspend_init_count > __kmp_fork_count) { 1400 /* this means we have initialize the suspension pthread objects for this 1401 thread in this instance of the process */ 1402 int status; 1403 1404 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond); 1405 if (status != 0 && status != EBUSY) { 1406 KMP_SYSFAIL("pthread_cond_destroy", status); 1407 } 1408 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex); 1409 if (status != 0 && status != EBUSY) { 1410 KMP_SYSFAIL("pthread_mutex_destroy", status); 1411 } 1412 --th->th.th_suspend_init_count; 1413 KMP_DEBUG_ASSERT(th->th.th_suspend_init_count == __kmp_fork_count); 1414 } 1415 } 1416 1417 // return true if lock obtained, false otherwise 1418 int __kmp_try_suspend_mx(kmp_info_t *th) { 1419 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0); 1420 } 1421 1422 void __kmp_lock_suspend_mx(kmp_info_t *th) { 1423 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex); 1424 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status); 1425 } 1426 1427 void __kmp_unlock_suspend_mx(kmp_info_t *th) { 1428 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex); 1429 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1430 } 1431 1432 /* This routine puts the calling thread to sleep after setting the 1433 sleep bit for the indicated flag variable to true. */ 1434 template <class C> 1435 static inline void __kmp_suspend_template(int th_gtid, C *flag) { 1436 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend); 1437 kmp_info_t *th = __kmp_threads[th_gtid]; 1438 int status; 1439 typename C::flag_t old_spin; 1440 1441 KF_TRACE(30, ("__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid, 1442 flag->get())); 1443 1444 __kmp_suspend_initialize_thread(th); 1445 1446 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex); 1447 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status); 1448 1449 KF_TRACE(10, ("__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n", 1450 th_gtid, flag->get())); 1451 1452 /* TODO: shouldn't this use release semantics to ensure that 1453 __kmp_suspend_initialize_thread gets called first? */ 1454 old_spin = flag->set_sleeping(); 1455 #if OMP_50_ENABLED 1456 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME && 1457 __kmp_pause_status != kmp_soft_paused) { 1458 flag->unset_sleeping(); 1459 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex); 1460 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1461 return; 1462 } 1463 #endif 1464 KF_TRACE(5, ("__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x," 1465 " was %x\n", 1466 th_gtid, flag->get(), flag->load(), old_spin)); 1467 1468 if (flag->done_check_val(old_spin)) { 1469 old_spin = flag->unset_sleeping(); 1470 KF_TRACE(5, ("__kmp_suspend_template: T#%d false alarm, reset sleep bit " 1471 "for spin(%p)\n", 1472 th_gtid, flag->get())); 1473 } else { 1474 /* Encapsulate in a loop as the documentation states that this may 1475 "with low probability" return when the condition variable has 1476 not been signaled or broadcast */ 1477 int deactivated = FALSE; 1478 TCW_PTR(th->th.th_sleep_loc, (void *)flag); 1479 1480 while (flag->is_sleeping()) { 1481 #ifdef DEBUG_SUSPEND 1482 char buffer[128]; 1483 __kmp_suspend_count++; 1484 __kmp_print_cond(buffer, &th->th.th_suspend_cv); 1485 __kmp_printf("__kmp_suspend_template: suspending T#%d: %s\n", th_gtid, 1486 buffer); 1487 #endif 1488 // Mark the thread as no longer active (only in the first iteration of the 1489 // loop). 1490 if (!deactivated) { 1491 th->th.th_active = FALSE; 1492 if (th->th.th_active_in_pool) { 1493 th->th.th_active_in_pool = FALSE; 1494 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth); 1495 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0); 1496 } 1497 deactivated = TRUE; 1498 } 1499 1500 #if USE_SUSPEND_TIMEOUT 1501 struct timespec now; 1502 struct timeval tval; 1503 int msecs; 1504 1505 status = gettimeofday(&tval, NULL); 1506 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status); 1507 TIMEVAL_TO_TIMESPEC(&tval, &now); 1508 1509 msecs = (4 * __kmp_dflt_blocktime) + 200; 1510 now.tv_sec += msecs / 1000; 1511 now.tv_nsec += (msecs % 1000) * 1000; 1512 1513 KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform " 1514 "pthread_cond_timedwait\n", 1515 th_gtid)); 1516 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond, 1517 &th->th.th_suspend_mx.m_mutex, &now); 1518 #else 1519 KF_TRACE(15, ("__kmp_suspend_template: T#%d about to perform" 1520 " pthread_cond_wait\n", 1521 th_gtid)); 1522 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond, 1523 &th->th.th_suspend_mx.m_mutex); 1524 #endif 1525 1526 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) { 1527 KMP_SYSFAIL("pthread_cond_wait", status); 1528 } 1529 #ifdef KMP_DEBUG 1530 if (status == ETIMEDOUT) { 1531 if (flag->is_sleeping()) { 1532 KF_TRACE(100, 1533 ("__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid)); 1534 } else { 1535 KF_TRACE(2, ("__kmp_suspend_template: T#%d timeout wakeup, sleep bit " 1536 "not set!\n", 1537 th_gtid)); 1538 } 1539 } else if (flag->is_sleeping()) { 1540 KF_TRACE(100, 1541 ("__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid)); 1542 } 1543 #endif 1544 } // while 1545 1546 // Mark the thread as active again (if it was previous marked as inactive) 1547 if (deactivated) { 1548 th->th.th_active = TRUE; 1549 if (TCR_4(th->th.th_in_pool)) { 1550 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth); 1551 th->th.th_active_in_pool = TRUE; 1552 } 1553 } 1554 } 1555 #ifdef DEBUG_SUSPEND 1556 { 1557 char buffer[128]; 1558 __kmp_print_cond(buffer, &th->th.th_suspend_cv); 1559 __kmp_printf("__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid, 1560 buffer); 1561 } 1562 #endif 1563 1564 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex); 1565 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1566 KF_TRACE(30, ("__kmp_suspend_template: T#%d exit\n", th_gtid)); 1567 } 1568 1569 void __kmp_suspend_32(int th_gtid, kmp_flag_32 *flag) { 1570 __kmp_suspend_template(th_gtid, flag); 1571 } 1572 void __kmp_suspend_64(int th_gtid, kmp_flag_64 *flag) { 1573 __kmp_suspend_template(th_gtid, flag); 1574 } 1575 void __kmp_suspend_oncore(int th_gtid, kmp_flag_oncore *flag) { 1576 __kmp_suspend_template(th_gtid, flag); 1577 } 1578 1579 /* This routine signals the thread specified by target_gtid to wake up 1580 after setting the sleep bit indicated by the flag argument to FALSE. 1581 The target thread must already have called __kmp_suspend_template() */ 1582 template <class C> 1583 static inline void __kmp_resume_template(int target_gtid, C *flag) { 1584 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume); 1585 kmp_info_t *th = __kmp_threads[target_gtid]; 1586 int status; 1587 1588 #ifdef KMP_DEBUG 1589 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1; 1590 #endif 1591 1592 KF_TRACE(30, ("__kmp_resume_template: T#%d wants to wakeup T#%d enter\n", 1593 gtid, target_gtid)); 1594 KMP_DEBUG_ASSERT(gtid != target_gtid); 1595 1596 __kmp_suspend_initialize_thread(th); 1597 1598 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex); 1599 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status); 1600 1601 if (!flag) { // coming from __kmp_null_resume_wrapper 1602 flag = (C *)CCAST(void *, th->th.th_sleep_loc); 1603 } 1604 1605 // First, check if the flag is null or its type has changed. If so, someone 1606 // else woke it up. 1607 if (!flag || flag->get_type() != flag->get_ptr_type()) { // get_ptr_type 1608 // simply shows what 1609 // flag was cast to 1610 KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already " 1611 "awake: flag(%p)\n", 1612 gtid, target_gtid, NULL)); 1613 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex); 1614 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1615 return; 1616 } else { // if multiple threads are sleeping, flag should be internally 1617 // referring to a specific thread here 1618 typename C::flag_t old_spin = flag->unset_sleeping(); 1619 if (!flag->is_sleeping_val(old_spin)) { 1620 KF_TRACE(5, ("__kmp_resume_template: T#%d exiting, thread T#%d already " 1621 "awake: flag(%p): " 1622 "%u => %u\n", 1623 gtid, target_gtid, flag->get(), old_spin, flag->load())); 1624 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex); 1625 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1626 return; 1627 } 1628 KF_TRACE(5, ("__kmp_resume_template: T#%d about to wakeup T#%d, reset " 1629 "sleep bit for flag's loc(%p): " 1630 "%u => %u\n", 1631 gtid, target_gtid, flag->get(), old_spin, flag->load())); 1632 } 1633 TCW_PTR(th->th.th_sleep_loc, NULL); 1634 1635 #ifdef DEBUG_SUSPEND 1636 { 1637 char buffer[128]; 1638 __kmp_print_cond(buffer, &th->th.th_suspend_cv); 1639 __kmp_printf("__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid, 1640 target_gtid, buffer); 1641 } 1642 #endif 1643 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond); 1644 KMP_CHECK_SYSFAIL("pthread_cond_signal", status); 1645 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex); 1646 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1647 KF_TRACE(30, ("__kmp_resume_template: T#%d exiting after signaling wake up" 1648 " for T#%d\n", 1649 gtid, target_gtid)); 1650 } 1651 1652 void __kmp_resume_32(int target_gtid, kmp_flag_32 *flag) { 1653 __kmp_resume_template(target_gtid, flag); 1654 } 1655 void __kmp_resume_64(int target_gtid, kmp_flag_64 *flag) { 1656 __kmp_resume_template(target_gtid, flag); 1657 } 1658 void __kmp_resume_oncore(int target_gtid, kmp_flag_oncore *flag) { 1659 __kmp_resume_template(target_gtid, flag); 1660 } 1661 1662 #if KMP_USE_MONITOR 1663 void __kmp_resume_monitor() { 1664 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume); 1665 int status; 1666 #ifdef KMP_DEBUG 1667 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1; 1668 KF_TRACE(30, ("__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid, 1669 KMP_GTID_MONITOR)); 1670 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR); 1671 #endif 1672 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex); 1673 KMP_CHECK_SYSFAIL("pthread_mutex_lock", status); 1674 #ifdef DEBUG_SUSPEND 1675 { 1676 char buffer[128]; 1677 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond); 1678 __kmp_printf("__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid, 1679 KMP_GTID_MONITOR, buffer); 1680 } 1681 #endif 1682 status = pthread_cond_signal(&__kmp_wait_cv.c_cond); 1683 KMP_CHECK_SYSFAIL("pthread_cond_signal", status); 1684 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex); 1685 KMP_CHECK_SYSFAIL("pthread_mutex_unlock", status); 1686 KF_TRACE(30, ("__kmp_resume_monitor: T#%d exiting after signaling wake up" 1687 " for T#%d\n", 1688 gtid, KMP_GTID_MONITOR)); 1689 } 1690 #endif // KMP_USE_MONITOR 1691 1692 void __kmp_yield(int cond) { 1693 if (!cond) 1694 return; 1695 #if KMP_USE_MONITOR 1696 if (!__kmp_yielding_on) 1697 return; 1698 #else 1699 if (__kmp_yield_cycle && !KMP_YIELD_NOW()) 1700 return; 1701 #endif 1702 sched_yield(); 1703 } 1704 1705 void __kmp_gtid_set_specific(int gtid) { 1706 if (__kmp_init_gtid) { 1707 int status; 1708 status = pthread_setspecific(__kmp_gtid_threadprivate_key, 1709 (void *)(intptr_t)(gtid + 1)); 1710 KMP_CHECK_SYSFAIL("pthread_setspecific", status); 1711 } else { 1712 KA_TRACE(50, ("__kmp_gtid_set_specific: runtime shutdown, returning\n")); 1713 } 1714 } 1715 1716 int __kmp_gtid_get_specific() { 1717 int gtid; 1718 if (!__kmp_init_gtid) { 1719 KA_TRACE(50, ("__kmp_gtid_get_specific: runtime shutdown, returning " 1720 "KMP_GTID_SHUTDOWN\n")); 1721 return KMP_GTID_SHUTDOWN; 1722 } 1723 gtid = (int)(size_t)pthread_getspecific(__kmp_gtid_threadprivate_key); 1724 if (gtid == 0) { 1725 gtid = KMP_GTID_DNE; 1726 } else { 1727 gtid--; 1728 } 1729 KA_TRACE(50, ("__kmp_gtid_get_specific: key:%d gtid:%d\n", 1730 __kmp_gtid_threadprivate_key, gtid)); 1731 return gtid; 1732 } 1733 1734 double __kmp_read_cpu_time(void) { 1735 /*clock_t t;*/ 1736 struct tms buffer; 1737 1738 /*t =*/times(&buffer); 1739 1740 return (buffer.tms_utime + buffer.tms_cutime) / (double)CLOCKS_PER_SEC; 1741 } 1742 1743 int __kmp_read_system_info(struct kmp_sys_info *info) { 1744 int status; 1745 struct rusage r_usage; 1746 1747 memset(info, 0, sizeof(*info)); 1748 1749 status = getrusage(RUSAGE_SELF, &r_usage); 1750 KMP_CHECK_SYSFAIL_ERRNO("getrusage", status); 1751 1752 // The maximum resident set size utilized (in kilobytes) 1753 info->maxrss = r_usage.ru_maxrss; 1754 // The number of page faults serviced without any I/O 1755 info->minflt = r_usage.ru_minflt; 1756 // The number of page faults serviced that required I/O 1757 info->majflt = r_usage.ru_majflt; 1758 // The number of times a process was "swapped" out of memory 1759 info->nswap = r_usage.ru_nswap; 1760 // The number of times the file system had to perform input 1761 info->inblock = r_usage.ru_inblock; 1762 // The number of times the file system had to perform output 1763 info->oublock = r_usage.ru_oublock; 1764 // The number of times a context switch was voluntarily 1765 info->nvcsw = r_usage.ru_nvcsw; 1766 // The number of times a context switch was forced 1767 info->nivcsw = r_usage.ru_nivcsw; 1768 1769 return (status != 0); 1770 } 1771 1772 void __kmp_read_system_time(double *delta) { 1773 double t_ns; 1774 struct timeval tval; 1775 struct timespec stop; 1776 int status; 1777 1778 status = gettimeofday(&tval, NULL); 1779 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status); 1780 TIMEVAL_TO_TIMESPEC(&tval, &stop); 1781 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start); 1782 *delta = (t_ns * 1e-9); 1783 } 1784 1785 void __kmp_clear_system_time(void) { 1786 struct timeval tval; 1787 int status; 1788 status = gettimeofday(&tval, NULL); 1789 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status); 1790 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start); 1791 } 1792 1793 static int __kmp_get_xproc(void) { 1794 1795 int r = 0; 1796 1797 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \ 1798 KMP_OS_OPENBSD || KMP_OS_HURD 1799 1800 r = sysconf(_SC_NPROCESSORS_ONLN); 1801 1802 #elif KMP_OS_DARWIN 1803 1804 // Bug C77011 High "OpenMP Threads and number of active cores". 1805 1806 // Find the number of available CPUs. 1807 kern_return_t rc; 1808 host_basic_info_data_t info; 1809 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT; 1810 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num); 1811 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) { 1812 // Cannot use KA_TRACE() here because this code works before trace support 1813 // is initialized. 1814 r = info.avail_cpus; 1815 } else { 1816 KMP_WARNING(CantGetNumAvailCPU); 1817 KMP_INFORM(AssumedNumCPU); 1818 } 1819 1820 #else 1821 1822 #error "Unknown or unsupported OS." 1823 1824 #endif 1825 1826 return r > 0 ? r : 2; /* guess value of 2 if OS told us 0 */ 1827 1828 } // __kmp_get_xproc 1829 1830 int __kmp_read_from_file(char const *path, char const *format, ...) { 1831 int result; 1832 va_list args; 1833 1834 va_start(args, format); 1835 FILE *f = fopen(path, "rb"); 1836 if (f == NULL) 1837 return 0; 1838 result = vfscanf(f, format, args); 1839 fclose(f); 1840 1841 return result; 1842 } 1843 1844 void __kmp_runtime_initialize(void) { 1845 int status; 1846 pthread_mutexattr_t mutex_attr; 1847 pthread_condattr_t cond_attr; 1848 1849 if (__kmp_init_runtime) { 1850 return; 1851 } 1852 1853 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) 1854 if (!__kmp_cpuinfo.initialized) { 1855 __kmp_query_cpuid(&__kmp_cpuinfo); 1856 } 1857 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ 1858 1859 __kmp_xproc = __kmp_get_xproc(); 1860 1861 if (sysconf(_SC_THREADS)) { 1862 1863 /* Query the maximum number of threads */ 1864 __kmp_sys_max_nth = sysconf(_SC_THREAD_THREADS_MAX); 1865 if (__kmp_sys_max_nth == -1) { 1866 /* Unlimited threads for NPTL */ 1867 __kmp_sys_max_nth = INT_MAX; 1868 } else if (__kmp_sys_max_nth <= 1) { 1869 /* Can't tell, just use PTHREAD_THREADS_MAX */ 1870 __kmp_sys_max_nth = KMP_MAX_NTH; 1871 } 1872 1873 /* Query the minimum stack size */ 1874 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN); 1875 if (__kmp_sys_min_stksize <= 1) { 1876 __kmp_sys_min_stksize = KMP_MIN_STKSIZE; 1877 } 1878 } 1879 1880 /* Set up minimum number of threads to switch to TLS gtid */ 1881 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN; 1882 1883 status = pthread_key_create(&__kmp_gtid_threadprivate_key, 1884 __kmp_internal_end_dest); 1885 KMP_CHECK_SYSFAIL("pthread_key_create", status); 1886 status = pthread_mutexattr_init(&mutex_attr); 1887 KMP_CHECK_SYSFAIL("pthread_mutexattr_init", status); 1888 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr); 1889 KMP_CHECK_SYSFAIL("pthread_mutex_init", status); 1890 status = pthread_condattr_init(&cond_attr); 1891 KMP_CHECK_SYSFAIL("pthread_condattr_init", status); 1892 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr); 1893 KMP_CHECK_SYSFAIL("pthread_cond_init", status); 1894 #if USE_ITT_BUILD 1895 __kmp_itt_initialize(); 1896 #endif /* USE_ITT_BUILD */ 1897 1898 __kmp_init_runtime = TRUE; 1899 } 1900 1901 void __kmp_runtime_destroy(void) { 1902 int status; 1903 1904 if (!__kmp_init_runtime) { 1905 return; // Nothing to do. 1906 } 1907 1908 #if USE_ITT_BUILD 1909 __kmp_itt_destroy(); 1910 #endif /* USE_ITT_BUILD */ 1911 1912 status = pthread_key_delete(__kmp_gtid_threadprivate_key); 1913 KMP_CHECK_SYSFAIL("pthread_key_delete", status); 1914 1915 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex); 1916 if (status != 0 && status != EBUSY) { 1917 KMP_SYSFAIL("pthread_mutex_destroy", status); 1918 } 1919 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond); 1920 if (status != 0 && status != EBUSY) { 1921 KMP_SYSFAIL("pthread_cond_destroy", status); 1922 } 1923 #if KMP_AFFINITY_SUPPORTED 1924 __kmp_affinity_uninitialize(); 1925 #endif 1926 1927 __kmp_init_runtime = FALSE; 1928 } 1929 1930 /* Put the thread to sleep for a time period */ 1931 /* NOTE: not currently used anywhere */ 1932 void __kmp_thread_sleep(int millis) { sleep((millis + 500) / 1000); } 1933 1934 /* Calculate the elapsed wall clock time for the user */ 1935 void __kmp_elapsed(double *t) { 1936 int status; 1937 #ifdef FIX_SGI_CLOCK 1938 struct timespec ts; 1939 1940 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts); 1941 KMP_CHECK_SYSFAIL_ERRNO("clock_gettime", status); 1942 *t = 1943 (double)ts.tv_nsec * (1.0 / (double)KMP_NSEC_PER_SEC) + (double)ts.tv_sec; 1944 #else 1945 struct timeval tv; 1946 1947 status = gettimeofday(&tv, NULL); 1948 KMP_CHECK_SYSFAIL_ERRNO("gettimeofday", status); 1949 *t = 1950 (double)tv.tv_usec * (1.0 / (double)KMP_USEC_PER_SEC) + (double)tv.tv_sec; 1951 #endif 1952 } 1953 1954 /* Calculate the elapsed wall clock tick for the user */ 1955 void __kmp_elapsed_tick(double *t) { *t = 1 / (double)CLOCKS_PER_SEC; } 1956 1957 /* Return the current time stamp in nsec */ 1958 kmp_uint64 __kmp_now_nsec() { 1959 struct timeval t; 1960 gettimeofday(&t, NULL); 1961 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec + 1962 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec; 1963 return nsec; 1964 } 1965 1966 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 1967 /* Measure clock ticks per millisecond */ 1968 void __kmp_initialize_system_tick() { 1969 kmp_uint64 now, nsec2, diff; 1970 kmp_uint64 delay = 100000; // 50~100 usec on most machines. 1971 kmp_uint64 nsec = __kmp_now_nsec(); 1972 kmp_uint64 goal = __kmp_hardware_timestamp() + delay; 1973 while ((now = __kmp_hardware_timestamp()) < goal) 1974 ; 1975 nsec2 = __kmp_now_nsec(); 1976 diff = nsec2 - nsec; 1977 if (diff > 0) { 1978 kmp_uint64 tpms = (kmp_uint64)(1e6 * (delay + (now - goal)) / diff); 1979 if (tpms > 0) 1980 __kmp_ticks_per_msec = tpms; 1981 } 1982 } 1983 #endif 1984 1985 /* Determine whether the given address is mapped into the current address 1986 space. */ 1987 1988 int __kmp_is_address_mapped(void *addr) { 1989 1990 int found = 0; 1991 int rc; 1992 1993 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_HURD 1994 1995 /* On GNUish OSes, read the /proc/<pid>/maps pseudo-file to get all the address 1996 ranges mapped into the address space. */ 1997 1998 char *name = __kmp_str_format("/proc/%d/maps", getpid()); 1999 FILE *file = NULL; 2000 2001 file = fopen(name, "r"); 2002 KMP_ASSERT(file != NULL); 2003 2004 for (;;) { 2005 2006 void *beginning = NULL; 2007 void *ending = NULL; 2008 char perms[5]; 2009 2010 rc = fscanf(file, "%p-%p %4s %*[^\n]\n", &beginning, &ending, perms); 2011 if (rc == EOF) { 2012 break; 2013 } 2014 KMP_ASSERT(rc == 3 && 2015 KMP_STRLEN(perms) == 4); // Make sure all fields are read. 2016 2017 // Ending address is not included in the region, but beginning is. 2018 if ((addr >= beginning) && (addr < ending)) { 2019 perms[2] = 0; // 3th and 4th character does not matter. 2020 if (strcmp(perms, "rw") == 0) { 2021 // Memory we are looking for should be readable and writable. 2022 found = 1; 2023 } 2024 break; 2025 } 2026 } 2027 2028 // Free resources. 2029 fclose(file); 2030 KMP_INTERNAL_FREE(name); 2031 2032 #elif KMP_OS_DARWIN 2033 2034 /* On OS X*, /proc pseudo filesystem is not available. Try to read memory 2035 using vm interface. */ 2036 2037 int buffer; 2038 vm_size_t count; 2039 rc = vm_read_overwrite( 2040 mach_task_self(), // Task to read memory of. 2041 (vm_address_t)(addr), // Address to read from. 2042 1, // Number of bytes to be read. 2043 (vm_address_t)(&buffer), // Address of buffer to save read bytes in. 2044 &count // Address of var to save number of read bytes in. 2045 ); 2046 if (rc == 0) { 2047 // Memory successfully read. 2048 found = 1; 2049 } 2050 2051 #elif KMP_OS_NETBSD 2052 2053 int mib[5]; 2054 mib[0] = CTL_VM; 2055 mib[1] = VM_PROC; 2056 mib[2] = VM_PROC_MAP; 2057 mib[3] = getpid(); 2058 mib[4] = sizeof(struct kinfo_vmentry); 2059 2060 size_t size; 2061 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0); 2062 KMP_ASSERT(!rc); 2063 KMP_ASSERT(size); 2064 2065 size = size * 4 / 3; 2066 struct kinfo_vmentry *kiv = (struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size); 2067 KMP_ASSERT(kiv); 2068 2069 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0); 2070 KMP_ASSERT(!rc); 2071 KMP_ASSERT(size); 2072 2073 for (size_t i = 0; i < size; i++) { 2074 if (kiv[i].kve_start >= (uint64_t)addr && 2075 kiv[i].kve_end <= (uint64_t)addr) { 2076 found = 1; 2077 break; 2078 } 2079 } 2080 KMP_INTERNAL_FREE(kiv); 2081 #elif KMP_OS_DRAGONFLY || KMP_OS_OPENBSD 2082 2083 // FIXME(DragonFly, OpenBSD): Implement this 2084 found = 1; 2085 2086 #else 2087 2088 #error "Unknown or unsupported OS" 2089 2090 #endif 2091 2092 return found; 2093 2094 } // __kmp_is_address_mapped 2095 2096 #ifdef USE_LOAD_BALANCE 2097 2098 #if KMP_OS_DARWIN || KMP_OS_NETBSD 2099 2100 // The function returns the rounded value of the system load average 2101 // during given time interval which depends on the value of 2102 // __kmp_load_balance_interval variable (default is 60 sec, other values 2103 // may be 300 sec or 900 sec). 2104 // It returns -1 in case of error. 2105 int __kmp_get_load_balance(int max) { 2106 double averages[3]; 2107 int ret_avg = 0; 2108 2109 int res = getloadavg(averages, 3); 2110 2111 // Check __kmp_load_balance_interval to determine which of averages to use. 2112 // getloadavg() may return the number of samples less than requested that is 2113 // less than 3. 2114 if (__kmp_load_balance_interval < 180 && (res >= 1)) { 2115 ret_avg = averages[0]; // 1 min 2116 } else if ((__kmp_load_balance_interval >= 180 && 2117 __kmp_load_balance_interval < 600) && 2118 (res >= 2)) { 2119 ret_avg = averages[1]; // 5 min 2120 } else if ((__kmp_load_balance_interval >= 600) && (res == 3)) { 2121 ret_avg = averages[2]; // 15 min 2122 } else { // Error occurred 2123 return -1; 2124 } 2125 2126 return ret_avg; 2127 } 2128 2129 #else // Linux* OS 2130 2131 // The fuction returns number of running (not sleeping) threads, or -1 in case 2132 // of error. Error could be reported if Linux* OS kernel too old (without 2133 // "/proc" support). Counting running threads stops if max running threads 2134 // encountered. 2135 int __kmp_get_load_balance(int max) { 2136 static int permanent_error = 0; 2137 static int glb_running_threads = 0; // Saved count of the running threads for 2138 // the thread balance algortihm 2139 static double glb_call_time = 0; /* Thread balance algorithm call time */ 2140 2141 int running_threads = 0; // Number of running threads in the system. 2142 2143 DIR *proc_dir = NULL; // Handle of "/proc/" directory. 2144 struct dirent *proc_entry = NULL; 2145 2146 kmp_str_buf_t task_path; // "/proc/<pid>/task/<tid>/" path. 2147 DIR *task_dir = NULL; // Handle of "/proc/<pid>/task/<tid>/" directory. 2148 struct dirent *task_entry = NULL; 2149 int task_path_fixed_len; 2150 2151 kmp_str_buf_t stat_path; // "/proc/<pid>/task/<tid>/stat" path. 2152 int stat_file = -1; 2153 int stat_path_fixed_len; 2154 2155 int total_processes = 0; // Total number of processes in system. 2156 int total_threads = 0; // Total number of threads in system. 2157 2158 double call_time = 0.0; 2159 2160 __kmp_str_buf_init(&task_path); 2161 __kmp_str_buf_init(&stat_path); 2162 2163 __kmp_elapsed(&call_time); 2164 2165 if (glb_call_time && 2166 (call_time - glb_call_time < __kmp_load_balance_interval)) { 2167 running_threads = glb_running_threads; 2168 goto finish; 2169 } 2170 2171 glb_call_time = call_time; 2172 2173 // Do not spend time on scanning "/proc/" if we have a permanent error. 2174 if (permanent_error) { 2175 running_threads = -1; 2176 goto finish; 2177 } 2178 2179 if (max <= 0) { 2180 max = INT_MAX; 2181 } 2182 2183 // Open "/proc/" directory. 2184 proc_dir = opendir("/proc"); 2185 if (proc_dir == NULL) { 2186 // Cannot open "/prroc/". Probably the kernel does not support it. Return an 2187 // error now and in subsequent calls. 2188 running_threads = -1; 2189 permanent_error = 1; 2190 goto finish; 2191 } 2192 2193 // Initialize fixed part of task_path. This part will not change. 2194 __kmp_str_buf_cat(&task_path, "/proc/", 6); 2195 task_path_fixed_len = task_path.used; // Remember number of used characters. 2196 2197 proc_entry = readdir(proc_dir); 2198 while (proc_entry != NULL) { 2199 // Proc entry is a directory and name starts with a digit. Assume it is a 2200 // process' directory. 2201 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) { 2202 2203 ++total_processes; 2204 // Make sure init process is the very first in "/proc", so we can replace 2205 // strcmp( proc_entry->d_name, "1" ) == 0 with simpler total_processes == 2206 // 1. We are going to check that total_processes == 1 => d_name == "1" is 2207 // true (where "=>" is implication). Since C++ does not have => operator, 2208 // let us replace it with its equivalent: a => b == ! a || b. 2209 KMP_DEBUG_ASSERT(total_processes != 1 || 2210 strcmp(proc_entry->d_name, "1") == 0); 2211 2212 // Construct task_path. 2213 task_path.used = task_path_fixed_len; // Reset task_path to "/proc/". 2214 __kmp_str_buf_cat(&task_path, proc_entry->d_name, 2215 KMP_STRLEN(proc_entry->d_name)); 2216 __kmp_str_buf_cat(&task_path, "/task", 5); 2217 2218 task_dir = opendir(task_path.str); 2219 if (task_dir == NULL) { 2220 // Process can finish between reading "/proc/" directory entry and 2221 // opening process' "task/" directory. So, in general case we should not 2222 // complain, but have to skip this process and read the next one. But on 2223 // systems with no "task/" support we will spend lot of time to scan 2224 // "/proc/" tree again and again without any benefit. "init" process 2225 // (its pid is 1) should exist always, so, if we cannot open 2226 // "/proc/1/task/" directory, it means "task/" is not supported by 2227 // kernel. Report an error now and in the future. 2228 if (strcmp(proc_entry->d_name, "1") == 0) { 2229 running_threads = -1; 2230 permanent_error = 1; 2231 goto finish; 2232 } 2233 } else { 2234 // Construct fixed part of stat file path. 2235 __kmp_str_buf_clear(&stat_path); 2236 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used); 2237 __kmp_str_buf_cat(&stat_path, "/", 1); 2238 stat_path_fixed_len = stat_path.used; 2239 2240 task_entry = readdir(task_dir); 2241 while (task_entry != NULL) { 2242 // It is a directory and name starts with a digit. 2243 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) { 2244 ++total_threads; 2245 2246 // Consruct complete stat file path. Easiest way would be: 2247 // __kmp_str_buf_print( & stat_path, "%s/%s/stat", task_path.str, 2248 // task_entry->d_name ); 2249 // but seriae of __kmp_str_buf_cat works a bit faster. 2250 stat_path.used = 2251 stat_path_fixed_len; // Reset stat path to its fixed part. 2252 __kmp_str_buf_cat(&stat_path, task_entry->d_name, 2253 KMP_STRLEN(task_entry->d_name)); 2254 __kmp_str_buf_cat(&stat_path, "/stat", 5); 2255 2256 // Note: Low-level API (open/read/close) is used. High-level API 2257 // (fopen/fclose) works ~ 30 % slower. 2258 stat_file = open(stat_path.str, O_RDONLY); 2259 if (stat_file == -1) { 2260 // We cannot report an error because task (thread) can terminate 2261 // just before reading this file. 2262 } else { 2263 /* Content of "stat" file looks like: 2264 24285 (program) S ... 2265 2266 It is a single line (if program name does not include funny 2267 symbols). First number is a thread id, then name of executable 2268 file name in paretheses, then state of the thread. We need just 2269 thread state. 2270 2271 Good news: Length of program name is 15 characters max. Longer 2272 names are truncated. 2273 2274 Thus, we need rather short buffer: 15 chars for program name + 2275 2 parenthesis, + 3 spaces + ~7 digits of pid = 37. 2276 2277 Bad news: Program name may contain special symbols like space, 2278 closing parenthesis, or even new line. This makes parsing 2279 "stat" file not 100 % reliable. In case of fanny program names 2280 parsing may fail (report incorrect thread state). 2281 2282 Parsing "status" file looks more promissing (due to different 2283 file structure and escaping special symbols) but reading and 2284 parsing of "status" file works slower. 2285 -- ln 2286 */ 2287 char buffer[65]; 2288 int len; 2289 len = read(stat_file, buffer, sizeof(buffer) - 1); 2290 if (len >= 0) { 2291 buffer[len] = 0; 2292 // Using scanf: 2293 // sscanf( buffer, "%*d (%*s) %c ", & state ); 2294 // looks very nice, but searching for a closing parenthesis 2295 // works a bit faster. 2296 char *close_parent = strstr(buffer, ") "); 2297 if (close_parent != NULL) { 2298 char state = *(close_parent + 2); 2299 if (state == 'R') { 2300 ++running_threads; 2301 if (running_threads >= max) { 2302 goto finish; 2303 } 2304 } 2305 } 2306 } 2307 close(stat_file); 2308 stat_file = -1; 2309 } 2310 } 2311 task_entry = readdir(task_dir); 2312 } 2313 closedir(task_dir); 2314 task_dir = NULL; 2315 } 2316 } 2317 proc_entry = readdir(proc_dir); 2318 } 2319 2320 // There _might_ be a timing hole where the thread executing this 2321 // code get skipped in the load balance, and running_threads is 0. 2322 // Assert in the debug builds only!!! 2323 KMP_DEBUG_ASSERT(running_threads > 0); 2324 if (running_threads <= 0) { 2325 running_threads = 1; 2326 } 2327 2328 finish: // Clean up and exit. 2329 if (proc_dir != NULL) { 2330 closedir(proc_dir); 2331 } 2332 __kmp_str_buf_free(&task_path); 2333 if (task_dir != NULL) { 2334 closedir(task_dir); 2335 } 2336 __kmp_str_buf_free(&stat_path); 2337 if (stat_file != -1) { 2338 close(stat_file); 2339 } 2340 2341 glb_running_threads = running_threads; 2342 2343 return running_threads; 2344 2345 } // __kmp_get_load_balance 2346 2347 #endif // KMP_OS_DARWIN 2348 2349 #endif // USE_LOAD_BALANCE 2350 2351 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \ 2352 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || KMP_ARCH_PPC64) 2353 2354 // we really only need the case with 1 argument, because CLANG always build 2355 // a struct of pointers to shared variables referenced in the outlined function 2356 int __kmp_invoke_microtask(microtask_t pkfn, int gtid, int tid, int argc, 2357 void *p_argv[] 2358 #if OMPT_SUPPORT 2359 , 2360 void **exit_frame_ptr 2361 #endif 2362 ) { 2363 #if OMPT_SUPPORT 2364 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0); 2365 #endif 2366 2367 switch (argc) { 2368 default: 2369 fprintf(stderr, "Too many args to microtask: %d!\n", argc); 2370 fflush(stderr); 2371 exit(-1); 2372 case 0: 2373 (*pkfn)(>id, &tid); 2374 break; 2375 case 1: 2376 (*pkfn)(>id, &tid, p_argv[0]); 2377 break; 2378 case 2: 2379 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]); 2380 break; 2381 case 3: 2382 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]); 2383 break; 2384 case 4: 2385 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]); 2386 break; 2387 case 5: 2388 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]); 2389 break; 2390 case 6: 2391 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2392 p_argv[5]); 2393 break; 2394 case 7: 2395 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2396 p_argv[5], p_argv[6]); 2397 break; 2398 case 8: 2399 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2400 p_argv[5], p_argv[6], p_argv[7]); 2401 break; 2402 case 9: 2403 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2404 p_argv[5], p_argv[6], p_argv[7], p_argv[8]); 2405 break; 2406 case 10: 2407 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2408 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]); 2409 break; 2410 case 11: 2411 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2412 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]); 2413 break; 2414 case 12: 2415 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2416 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], 2417 p_argv[11]); 2418 break; 2419 case 13: 2420 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2421 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], 2422 p_argv[11], p_argv[12]); 2423 break; 2424 case 14: 2425 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2426 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], 2427 p_argv[11], p_argv[12], p_argv[13]); 2428 break; 2429 case 15: 2430 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4], 2431 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10], 2432 p_argv[11], p_argv[12], p_argv[13], p_argv[14]); 2433 break; 2434 } 2435 2436 #if OMPT_SUPPORT 2437 *exit_frame_ptr = 0; 2438 #endif 2439 2440 return 1; 2441 } 2442 2443 #endif 2444 2445 // end of file // 2446