1 /* 2 * kmp_global.cpp -- KPTS global variables for runtime support library 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include "kmp.h" 14 #include "kmp_affinity.h" 15 #if KMP_USE_HIER_SCHED 16 #include "kmp_dispatch_hier.h" 17 #endif 18 19 kmp_key_t __kmp_gtid_threadprivate_key; 20 21 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 22 kmp_cpuinfo_t __kmp_cpuinfo = {0}; // Not initialized 23 #endif 24 25 #if KMP_STATS_ENABLED 26 #include "kmp_stats.h" 27 // lock for modifying the global __kmp_stats_list 28 kmp_tas_lock_t __kmp_stats_lock; 29 30 // global list of per thread stats, the head is a sentinel node which 31 // accumulates all stats produced before __kmp_create_worker is called. 32 kmp_stats_list *__kmp_stats_list; 33 34 // thread local pointer to stats node within list 35 KMP_THREAD_LOCAL kmp_stats_list *__kmp_stats_thread_ptr = NULL; 36 37 // gives reference tick for all events (considered the 0 tick) 38 tsc_tick_count __kmp_stats_start_time; 39 #endif 40 41 /* ----------------------------------------------------- */ 42 /* INITIALIZATION VARIABLES */ 43 /* they are syncronized to write during init, but read anytime */ 44 volatile int __kmp_init_serial = FALSE; 45 volatile int __kmp_init_gtid = FALSE; 46 volatile int __kmp_init_common = FALSE; 47 volatile int __kmp_init_middle = FALSE; 48 volatile int __kmp_init_parallel = FALSE; 49 volatile int __kmp_init_hidden_helper = FALSE; 50 volatile int __kmp_init_hidden_helper_threads = FALSE; 51 volatile int __kmp_hidden_helper_team_done = FALSE; 52 #if KMP_USE_MONITOR 53 volatile int __kmp_init_monitor = 54 0; /* 1 - launched, 2 - actually started (Windows* OS only) */ 55 #endif 56 volatile int __kmp_init_user_locks = FALSE; 57 58 /* list of address of allocated caches for commons */ 59 kmp_cached_addr_t *__kmp_threadpriv_cache_list = NULL; 60 61 int __kmp_init_counter = 0; 62 int __kmp_root_counter = 0; 63 int __kmp_version = 0; 64 65 std::atomic<kmp_int32> __kmp_team_counter = ATOMIC_VAR_INIT(0); 66 std::atomic<kmp_int32> __kmp_task_counter = ATOMIC_VAR_INIT(0); 67 68 size_t __kmp_stksize = KMP_DEFAULT_STKSIZE; 69 #if KMP_USE_MONITOR 70 size_t __kmp_monitor_stksize = 0; // auto adjust 71 #endif 72 size_t __kmp_stkoffset = KMP_DEFAULT_STKOFFSET; 73 int __kmp_stkpadding = KMP_MIN_STKPADDING; 74 75 size_t __kmp_malloc_pool_incr = KMP_DEFAULT_MALLOC_POOL_INCR; 76 77 // Barrier method defaults, settings, and strings. 78 // branch factor = 2^branch_bits (only relevant for tree & hyper barrier types) 79 kmp_uint32 __kmp_barrier_gather_bb_dflt = 2; 80 /* branch_factor = 4 */ /* hyper2: C78980 */ 81 kmp_uint32 __kmp_barrier_release_bb_dflt = 2; 82 /* branch_factor = 4 */ /* hyper2: C78980 */ 83 84 kmp_bar_pat_e __kmp_barrier_gather_pat_dflt = bp_hyper_bar; 85 /* hyper2: C78980 */ 86 kmp_bar_pat_e __kmp_barrier_release_pat_dflt = bp_hyper_bar; 87 /* hyper2: C78980 */ 88 89 kmp_uint32 __kmp_barrier_gather_branch_bits[bs_last_barrier] = {0}; 90 kmp_uint32 __kmp_barrier_release_branch_bits[bs_last_barrier] = {0}; 91 kmp_bar_pat_e __kmp_barrier_gather_pattern[bs_last_barrier] = {bp_linear_bar}; 92 kmp_bar_pat_e __kmp_barrier_release_pattern[bs_last_barrier] = {bp_linear_bar}; 93 char const *__kmp_barrier_branch_bit_env_name[bs_last_barrier] = { 94 "KMP_PLAIN_BARRIER", "KMP_FORKJOIN_BARRIER" 95 #if KMP_FAST_REDUCTION_BARRIER 96 , 97 "KMP_REDUCTION_BARRIER" 98 #endif // KMP_FAST_REDUCTION_BARRIER 99 }; 100 char const *__kmp_barrier_pattern_env_name[bs_last_barrier] = { 101 "KMP_PLAIN_BARRIER_PATTERN", "KMP_FORKJOIN_BARRIER_PATTERN" 102 #if KMP_FAST_REDUCTION_BARRIER 103 , 104 "KMP_REDUCTION_BARRIER_PATTERN" 105 #endif // KMP_FAST_REDUCTION_BARRIER 106 }; 107 char const *__kmp_barrier_type_name[bs_last_barrier] = {"plain", "forkjoin" 108 #if KMP_FAST_REDUCTION_BARRIER 109 , 110 "reduction" 111 #endif // KMP_FAST_REDUCTION_BARRIER 112 }; 113 char const *__kmp_barrier_pattern_name[bp_last_bar] = {"linear", "tree", 114 "hyper", "hierarchical"}; 115 116 int __kmp_allThreadsSpecified = 0; 117 size_t __kmp_align_alloc = CACHE_LINE; 118 119 int __kmp_generate_warnings = kmp_warnings_low; 120 int __kmp_reserve_warn = 0; 121 int __kmp_xproc = 0; 122 int __kmp_avail_proc = 0; 123 size_t __kmp_sys_min_stksize = KMP_MIN_STKSIZE; 124 int __kmp_sys_max_nth = KMP_MAX_NTH; 125 int __kmp_max_nth = 0; 126 int __kmp_cg_max_nth = 0; 127 int __kmp_teams_max_nth = 0; 128 int __kmp_threads_capacity = 0; 129 int __kmp_dflt_team_nth = 0; 130 int __kmp_dflt_team_nth_ub = 0; 131 int __kmp_tp_capacity = 0; 132 int __kmp_tp_cached = 0; 133 int __kmp_dispatch_num_buffers = KMP_DFLT_DISP_NUM_BUFF; 134 int __kmp_dflt_max_active_levels = 1; // Nesting off by default 135 bool __kmp_dflt_max_active_levels_set = false; // Don't override set value 136 #if KMP_NESTED_HOT_TEAMS 137 int __kmp_hot_teams_mode = 0; /* 0 - free extra threads when reduced */ 138 /* 1 - keep extra threads when reduced */ 139 int __kmp_hot_teams_max_level = 1; /* nesting level of hot teams */ 140 #endif 141 enum library_type __kmp_library = library_none; 142 enum sched_type __kmp_sched = 143 kmp_sch_default; /* scheduling method for runtime scheduling */ 144 enum sched_type __kmp_static = 145 kmp_sch_static_greedy; /* default static scheduling method */ 146 enum sched_type __kmp_guided = 147 kmp_sch_guided_iterative_chunked; /* default guided scheduling method */ 148 enum sched_type __kmp_auto = 149 kmp_sch_guided_analytical_chunked; /* default auto scheduling method */ 150 #if KMP_USE_HIER_SCHED 151 int __kmp_dispatch_hand_threading = 0; 152 int __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LAST + 1]; 153 int __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LAST + 1]; 154 kmp_hier_sched_env_t __kmp_hier_scheds = {0, 0, NULL, NULL, NULL}; 155 #endif 156 int __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME; 157 #if KMP_USE_MONITOR 158 int __kmp_monitor_wakeups = KMP_MIN_MONITOR_WAKEUPS; 159 int __kmp_bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(KMP_DEFAULT_BLOCKTIME, 160 KMP_MIN_MONITOR_WAKEUPS); 161 #endif 162 #ifdef KMP_ADJUST_BLOCKTIME 163 int __kmp_zero_bt = FALSE; 164 #endif /* KMP_ADJUST_BLOCKTIME */ 165 #ifdef KMP_DFLT_NTH_CORES 166 int __kmp_ncores = 0; 167 #endif 168 int __kmp_chunk = 0; 169 int __kmp_force_monotonic = 0; 170 int __kmp_abort_delay = 0; 171 #if KMP_OS_LINUX && defined(KMP_TDATA_GTID) 172 int __kmp_gtid_mode = 3; /* use __declspec(thread) TLS to store gtid */ 173 int __kmp_adjust_gtid_mode = FALSE; 174 #elif KMP_OS_WINDOWS 175 int __kmp_gtid_mode = 2; /* use TLS functions to store gtid */ 176 int __kmp_adjust_gtid_mode = FALSE; 177 #else 178 int __kmp_gtid_mode = 0; /* select method to get gtid based on #threads */ 179 int __kmp_adjust_gtid_mode = TRUE; 180 #endif /* KMP_OS_LINUX && defined(KMP_TDATA_GTID) */ 181 #ifdef KMP_TDATA_GTID 182 KMP_THREAD_LOCAL int __kmp_gtid = KMP_GTID_DNE; 183 #endif /* KMP_TDATA_GTID */ 184 int __kmp_tls_gtid_min = INT_MAX; 185 int __kmp_foreign_tp = TRUE; 186 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 187 int __kmp_inherit_fp_control = TRUE; 188 kmp_int16 __kmp_init_x87_fpu_control_word = 0; 189 kmp_uint32 __kmp_init_mxcsr = 0; 190 #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ 191 192 #ifdef USE_LOAD_BALANCE 193 double __kmp_load_balance_interval = 1.0; 194 #endif /* USE_LOAD_BALANCE */ 195 196 kmp_nested_nthreads_t __kmp_nested_nth = {NULL, 0, 0}; 197 198 #if KMP_USE_ADAPTIVE_LOCKS 199 200 kmp_adaptive_backoff_params_t __kmp_adaptive_backoff_params = { 201 1, 1024}; // TODO: tune it! 202 203 #if KMP_DEBUG_ADAPTIVE_LOCKS 204 const char *__kmp_speculative_statsfile = "-"; 205 #endif 206 207 #endif // KMP_USE_ADAPTIVE_LOCKS 208 209 int __kmp_display_env = FALSE; 210 int __kmp_display_env_verbose = FALSE; 211 int __kmp_omp_cancellation = FALSE; 212 int __kmp_nteams = 0; 213 int __kmp_teams_thread_limit = 0; 214 215 #if KMP_HAVE_MWAIT || KMP_HAVE_UMWAIT 216 int __kmp_user_level_mwait = FALSE; 217 int __kmp_umwait_enabled = FALSE; 218 int __kmp_mwait_enabled = FALSE; 219 int __kmp_mwait_hints = 0; 220 #endif 221 222 /* map OMP 3.0 schedule types with our internal schedule types */ 223 enum sched_type __kmp_sch_map[kmp_sched_upper - kmp_sched_lower_ext + 224 kmp_sched_upper_std - kmp_sched_lower - 2] = { 225 kmp_sch_static_chunked, // ==> kmp_sched_static = 1 226 kmp_sch_dynamic_chunked, // ==> kmp_sched_dynamic = 2 227 kmp_sch_guided_chunked, // ==> kmp_sched_guided = 3 228 kmp_sch_auto, // ==> kmp_sched_auto = 4 229 kmp_sch_trapezoidal // ==> kmp_sched_trapezoidal = 101 230 // will likely not be used, introduced here just to debug the code 231 // of public intel extension schedules 232 }; 233 234 #if KMP_OS_LINUX 235 enum clock_function_type __kmp_clock_function; 236 int __kmp_clock_function_param; 237 #endif /* KMP_OS_LINUX */ 238 239 #if KMP_MIC_SUPPORTED 240 enum mic_type __kmp_mic_type = non_mic; 241 #endif 242 243 #if KMP_AFFINITY_SUPPORTED 244 245 KMPAffinity *__kmp_affinity_dispatch = NULL; 246 247 #if KMP_USE_HWLOC 248 int __kmp_hwloc_error = FALSE; 249 hwloc_topology_t __kmp_hwloc_topology = NULL; 250 int __kmp_numa_detected = FALSE; 251 int __kmp_tile_depth = 0; 252 #endif 253 254 #if KMP_OS_WINDOWS 255 #if KMP_GROUP_AFFINITY 256 int __kmp_num_proc_groups = 1; 257 #endif /* KMP_GROUP_AFFINITY */ 258 kmp_GetActiveProcessorCount_t __kmp_GetActiveProcessorCount = NULL; 259 kmp_GetActiveProcessorGroupCount_t __kmp_GetActiveProcessorGroupCount = NULL; 260 kmp_GetThreadGroupAffinity_t __kmp_GetThreadGroupAffinity = NULL; 261 kmp_SetThreadGroupAffinity_t __kmp_SetThreadGroupAffinity = NULL; 262 #endif /* KMP_OS_WINDOWS */ 263 264 size_t __kmp_affin_mask_size = 0; 265 enum affinity_type __kmp_affinity_type = affinity_default; 266 enum affinity_gran __kmp_affinity_gran = affinity_gran_default; 267 int __kmp_affinity_gran_levels = -1; 268 int __kmp_affinity_dups = TRUE; 269 enum affinity_top_method __kmp_affinity_top_method = 270 affinity_top_method_default; 271 int __kmp_affinity_compact = 0; 272 int __kmp_affinity_offset = 0; 273 int __kmp_affinity_verbose = FALSE; 274 int __kmp_affinity_warnings = TRUE; 275 int __kmp_affinity_respect_mask = affinity_respect_mask_default; 276 char *__kmp_affinity_proclist = NULL; 277 kmp_affin_mask_t *__kmp_affinity_masks = NULL; 278 unsigned __kmp_affinity_num_masks = 0; 279 280 char *__kmp_cpuinfo_file = NULL; 281 282 #endif /* KMP_AFFINITY_SUPPORTED */ 283 284 kmp_nested_proc_bind_t __kmp_nested_proc_bind = {NULL, 0, 0}; 285 int __kmp_affinity_num_places = 0; 286 int __kmp_display_affinity = FALSE; 287 char *__kmp_affinity_format = NULL; 288 289 kmp_hws_item_t __kmp_hws_socket = {0, 0}; 290 kmp_hws_item_t __kmp_hws_die = {0, 0}; 291 kmp_hws_item_t __kmp_hws_node = {0, 0}; 292 kmp_hws_item_t __kmp_hws_tile = {0, 0}; 293 kmp_hws_item_t __kmp_hws_core = {0, 0}; 294 kmp_hws_item_t __kmp_hws_proc = {0, 0}; 295 int __kmp_hws_requested = 0; 296 int __kmp_hws_abs_flag = 0; // absolute or per-item number requested 297 298 kmp_int32 __kmp_default_device = 0; 299 300 kmp_tasking_mode_t __kmp_tasking_mode = tskm_task_teams; 301 kmp_int32 __kmp_max_task_priority = 0; 302 kmp_uint64 __kmp_taskloop_min_tasks = 0; 303 304 int __kmp_memkind_available = 0; 305 omp_allocator_handle_t const omp_null_allocator = NULL; 306 omp_allocator_handle_t const omp_default_mem_alloc = 307 (omp_allocator_handle_t const)1; 308 omp_allocator_handle_t const omp_large_cap_mem_alloc = 309 (omp_allocator_handle_t const)2; 310 omp_allocator_handle_t const omp_const_mem_alloc = 311 (omp_allocator_handle_t const)3; 312 omp_allocator_handle_t const omp_high_bw_mem_alloc = 313 (omp_allocator_handle_t const)4; 314 omp_allocator_handle_t const omp_low_lat_mem_alloc = 315 (omp_allocator_handle_t const)5; 316 omp_allocator_handle_t const omp_cgroup_mem_alloc = 317 (omp_allocator_handle_t const)6; 318 omp_allocator_handle_t const omp_pteam_mem_alloc = 319 (omp_allocator_handle_t const)7; 320 omp_allocator_handle_t const omp_thread_mem_alloc = 321 (omp_allocator_handle_t const)8; 322 // Preview of target memory support 323 omp_allocator_handle_t const llvm_omp_target_host_mem_alloc = 324 (omp_allocator_handle_t const)100; 325 omp_allocator_handle_t const llvm_omp_target_shared_mem_alloc = 326 (omp_allocator_handle_t const)101; 327 omp_allocator_handle_t const llvm_omp_target_device_mem_alloc = 328 (omp_allocator_handle_t const)102; 329 omp_allocator_handle_t const kmp_max_mem_alloc = 330 (omp_allocator_handle_t const)1024; 331 omp_allocator_handle_t __kmp_def_allocator = omp_default_mem_alloc; 332 333 omp_memspace_handle_t const omp_default_mem_space = 334 (omp_memspace_handle_t const)0; 335 omp_memspace_handle_t const omp_large_cap_mem_space = 336 (omp_memspace_handle_t const)1; 337 omp_memspace_handle_t const omp_const_mem_space = 338 (omp_memspace_handle_t const)2; 339 omp_memspace_handle_t const omp_high_bw_mem_space = 340 (omp_memspace_handle_t const)3; 341 omp_memspace_handle_t const omp_low_lat_mem_space = 342 (omp_memspace_handle_t const)4; 343 // Preview of target memory support 344 omp_memspace_handle_t const llvm_omp_target_host_mem_space = 345 (omp_memspace_handle_t const)100; 346 omp_memspace_handle_t const llvm_omp_target_shared_mem_space = 347 (omp_memspace_handle_t const)101; 348 omp_memspace_handle_t const llvm_omp_target_device_mem_space = 349 (omp_memspace_handle_t const)102; 350 351 /* This check ensures that the compiler is passing the correct data type for the 352 flags formal parameter of the function kmpc_omp_task_alloc(). If the type is 353 not a 4-byte type, then give an error message about a non-positive length 354 array pointing here. If that happens, the kmp_tasking_flags_t structure must 355 be redefined to have exactly 32 bits. */ 356 KMP_BUILD_ASSERT(sizeof(kmp_tasking_flags_t) == 4); 357 358 int __kmp_task_stealing_constraint = 1; /* Constrain task stealing by default */ 359 int __kmp_enable_task_throttling = 1; 360 361 #ifdef DEBUG_SUSPEND 362 int __kmp_suspend_count = 0; 363 #endif 364 365 int __kmp_settings = FALSE; 366 int __kmp_duplicate_library_ok = 0; 367 #if USE_ITT_BUILD 368 int __kmp_forkjoin_frames = 1; 369 int __kmp_forkjoin_frames_mode = 3; 370 #endif 371 PACKED_REDUCTION_METHOD_T __kmp_force_reduction_method = 372 reduction_method_not_defined; 373 int __kmp_determ_red = FALSE; 374 375 #ifdef KMP_DEBUG 376 int kmp_a_debug = 0; 377 int kmp_b_debug = 0; 378 int kmp_c_debug = 0; 379 int kmp_d_debug = 0; 380 int kmp_e_debug = 0; 381 int kmp_f_debug = 0; 382 int kmp_diag = 0; 383 #endif 384 385 /* For debug information logging using rotating buffer */ 386 int __kmp_debug_buf = 387 FALSE; /* TRUE means use buffer, FALSE means print to stderr */ 388 int __kmp_debug_buf_lines = 389 KMP_DEBUG_BUF_LINES_INIT; /* Lines of debug stored in buffer */ 390 int __kmp_debug_buf_chars = 391 KMP_DEBUG_BUF_CHARS_INIT; /* Characters allowed per line in buffer */ 392 int __kmp_debug_buf_atomic = 393 FALSE; /* TRUE means use atomic update of buffer entry pointer */ 394 395 char *__kmp_debug_buffer = NULL; /* Debug buffer itself */ 396 std::atomic<int> __kmp_debug_count = 397 ATOMIC_VAR_INIT(0); /* number of lines printed in buffer so far */ 398 int __kmp_debug_buf_warn_chars = 399 0; /* Keep track of char increase recommended in warnings */ 400 /* end rotating debug buffer */ 401 402 #ifdef KMP_DEBUG 403 int __kmp_par_range; /* +1 => only go par for constructs in range */ 404 /* -1 => only go par for constructs outside range */ 405 char __kmp_par_range_routine[KMP_PAR_RANGE_ROUTINE_LEN] = {'\0'}; 406 char __kmp_par_range_filename[KMP_PAR_RANGE_FILENAME_LEN] = {'\0'}; 407 int __kmp_par_range_lb = 0; 408 int __kmp_par_range_ub = INT_MAX; 409 #endif /* KMP_DEBUG */ 410 411 /* For printing out dynamic storage map for threads and teams */ 412 int __kmp_storage_map = 413 FALSE; /* True means print storage map for threads and teams */ 414 int __kmp_storage_map_verbose = 415 FALSE; /* True means storage map includes placement info */ 416 int __kmp_storage_map_verbose_specified = FALSE; 417 /* Initialize the library data structures when we fork a child process, defaults 418 * to TRUE */ 419 int __kmp_need_register_atfork = 420 TRUE; /* At initialization, call pthread_atfork to install fork handler */ 421 int __kmp_need_register_atfork_specified = TRUE; 422 423 int __kmp_env_stksize = FALSE; /* KMP_STACKSIZE specified? */ 424 int __kmp_env_blocktime = FALSE; /* KMP_BLOCKTIME specified? */ 425 int __kmp_env_checks = FALSE; /* KMP_CHECKS specified? */ 426 int __kmp_env_consistency_check = FALSE; /* KMP_CONSISTENCY_CHECK specified? */ 427 428 // From KMP_USE_YIELD: 429 // 0 = never yield; 430 // 1 = always yield (default); 431 // 2 = yield only if oversubscribed 432 kmp_int32 __kmp_use_yield = 1; 433 // This will be 1 if KMP_USE_YIELD environment variable was set explicitly 434 kmp_int32 __kmp_use_yield_exp_set = 0; 435 436 kmp_uint32 __kmp_yield_init = KMP_INIT_WAIT; 437 kmp_uint32 __kmp_yield_next = KMP_NEXT_WAIT; 438 439 /* ------------------------------------------------------ */ 440 /* STATE mostly syncronized with global lock */ 441 /* data written to rarely by primary threads, read often by workers */ 442 /* TODO: None of this global padding stuff works consistently because the order 443 of declaration is not necessarily correlated to storage order. To fix this, 444 all the important globals must be put in a big structure instead. */ 445 KMP_ALIGN_CACHE 446 kmp_info_t **__kmp_threads = NULL; 447 kmp_root_t **__kmp_root = NULL; 448 449 /* data read/written to often by primary threads */ 450 KMP_ALIGN_CACHE 451 volatile int __kmp_nth = 0; 452 volatile int __kmp_all_nth = 0; 453 volatile kmp_info_t *__kmp_thread_pool = NULL; 454 volatile kmp_team_t *__kmp_team_pool = NULL; 455 456 KMP_ALIGN_CACHE 457 std::atomic<int> __kmp_thread_pool_active_nth = ATOMIC_VAR_INIT(0); 458 459 /* ------------------------------------------------- 460 * GLOBAL/ROOT STATE */ 461 KMP_ALIGN_CACHE 462 kmp_global_t __kmp_global; 463 464 /* ----------------------------------------------- */ 465 /* GLOBAL SYNCHRONIZATION LOCKS */ 466 /* TODO verify the need for these locks and if they need to be global */ 467 468 #if KMP_USE_INTERNODE_ALIGNMENT 469 /* Multinode systems have larger cache line granularity which can cause 470 * false sharing if the alignment is not large enough for these locks */ 471 KMP_ALIGN_CACHE_INTERNODE 472 473 KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock); /* Control initializations */ 474 KMP_ALIGN_CACHE_INTERNODE 475 KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock); /* control fork/join access */ 476 KMP_ALIGN_CACHE_INTERNODE 477 KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock); /* exit() is not always thread-safe */ 478 #if KMP_USE_MONITOR 479 /* control monitor thread creation */ 480 KMP_ALIGN_CACHE_INTERNODE 481 KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock); 482 #endif 483 /* used for the hack to allow threadprivate cache and __kmp_threads expansion 484 to co-exist */ 485 KMP_ALIGN_CACHE_INTERNODE 486 KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock); 487 488 KMP_ALIGN_CACHE_INTERNODE 489 KMP_LOCK_INIT(__kmp_global_lock); /* Control OS/global access */ 490 KMP_ALIGN_CACHE_INTERNODE 491 kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ 492 KMP_ALIGN_CACHE_INTERNODE 493 KMP_LOCK_INIT(__kmp_debug_lock); /* Control I/O access for KMP_DEBUG */ 494 #else 495 KMP_ALIGN_CACHE 496 497 KMP_BOOTSTRAP_LOCK_INIT(__kmp_initz_lock); /* Control initializations */ 498 KMP_BOOTSTRAP_LOCK_INIT(__kmp_forkjoin_lock); /* control fork/join access */ 499 KMP_BOOTSTRAP_LOCK_INIT(__kmp_exit_lock); /* exit() is not always thread-safe */ 500 #if KMP_USE_MONITOR 501 /* control monitor thread creation */ 502 KMP_BOOTSTRAP_LOCK_INIT(__kmp_monitor_lock); 503 #endif 504 /* used for the hack to allow threadprivate cache and __kmp_threads expansion 505 to co-exist */ 506 KMP_BOOTSTRAP_LOCK_INIT(__kmp_tp_cached_lock); 507 508 KMP_ALIGN(128) 509 KMP_LOCK_INIT(__kmp_global_lock); /* Control OS/global access */ 510 KMP_ALIGN(128) 511 kmp_queuing_lock_t __kmp_dispatch_lock; /* Control dispatch access */ 512 KMP_ALIGN(128) 513 KMP_LOCK_INIT(__kmp_debug_lock); /* Control I/O access for KMP_DEBUG */ 514 #endif 515 516 /* ----------------------------------------------- */ 517 518 #if KMP_HANDLE_SIGNALS 519 /* Signal handling is disabled by default, because it confuses users: In case of 520 sigsegv (or other trouble) in user code signal handler catches the signal, 521 which then "appears" in the monitor thread (when the monitor executes raise() 522 function). Users see signal in the monitor thread and blame OpenMP RTL. 523 524 Grant said signal handling required on some older OSes (Irix?) supported by 525 KAI, because bad applications hung but not aborted. Currently it is not a 526 problem for Linux* OS, OS X* and Windows* OS. 527 528 Grant: Found new hangs for EL4, EL5, and a Fedora Core machine. So I'm 529 putting the default back for now to see if that fixes hangs on those 530 machines. 531 532 2010-04013 Lev: It was a bug in Fortran RTL. Fortran RTL prints a kind of 533 stack backtrace when program is aborting, but the code is not signal-safe. 534 When multiple signals raised at the same time (which occurs in dynamic 535 negative tests because all the worker threads detects the same error), 536 Fortran RTL may hang. The bug finally fixed in Fortran RTL library provided 537 by Steve R., and will be available soon. */ 538 int __kmp_handle_signals = FALSE; 539 #endif 540 541 #ifdef DEBUG_SUSPEND 542 int get_suspend_count_(void) { 543 int count = __kmp_suspend_count; 544 __kmp_suspend_count = 0; 545 return count; 546 } 547 void set_suspend_count_(int *value) { __kmp_suspend_count = *value; } 548 #endif 549 550 // Symbols for MS mutual detection. 551 int _You_must_link_with_exactly_one_OpenMP_library = 1; 552 int _You_must_link_with_Intel_OpenMP_library = 1; 553 #if KMP_OS_WINDOWS && (KMP_VERSION_MAJOR > 4) 554 int _You_must_link_with_Microsoft_OpenMP_library = 1; 555 #endif 556 557 kmp_target_offload_kind_t __kmp_target_offload = tgt_default; 558 559 // OMP Pause Resources 560 kmp_pause_status_t __kmp_pause_status = kmp_not_paused; 561 562 // end of file // 563