1 /* 2 * kmp_lock.cpp -- lock-related functions 3 */ 4 5 //===----------------------------------------------------------------------===// 6 // 7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 8 // See https://llvm.org/LICENSE.txt for license information. 9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 10 // 11 //===----------------------------------------------------------------------===// 12 13 #include <stddef.h> 14 #include <atomic> 15 16 #include "kmp.h" 17 #include "kmp_i18n.h" 18 #include "kmp_io.h" 19 #include "kmp_itt.h" 20 #include "kmp_lock.h" 21 #include "kmp_wait_release.h" 22 #include "kmp_wrapper_getpid.h" 23 24 #include "tsan_annotations.h" 25 26 #if KMP_USE_FUTEX 27 #include <sys/syscall.h> 28 #include <unistd.h> 29 // We should really include <futex.h>, but that causes compatibility problems on 30 // different Linux* OS distributions that either require that you include (or 31 // break when you try to include) <pci/types.h>. Since all we need is the two 32 // macros below (which are part of the kernel ABI, so can't change) we just 33 // define the constants here and don't include <futex.h> 34 #ifndef FUTEX_WAIT 35 #define FUTEX_WAIT 0 36 #endif 37 #ifndef FUTEX_WAKE 38 #define FUTEX_WAKE 1 39 #endif 40 #endif 41 42 /* Implement spin locks for internal library use. */ 43 /* The algorithm implemented is Lamport's bakery lock [1974]. */ 44 45 void __kmp_validate_locks(void) { 46 int i; 47 kmp_uint32 x, y; 48 49 /* Check to make sure unsigned arithmetic does wraps properly */ 50 x = ~((kmp_uint32)0) - 2; 51 y = x - 2; 52 53 for (i = 0; i < 8; ++i, ++x, ++y) { 54 kmp_uint32 z = (x - y); 55 KMP_ASSERT(z == 2); 56 } 57 58 KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0); 59 } 60 61 /* ------------------------------------------------------------------------ */ 62 /* test and set locks */ 63 64 // For the non-nested locks, we can only assume that the first 4 bytes were 65 // allocated, since gcc only allocates 4 bytes for omp_lock_t, and the Intel 66 // compiler only allocates a 4 byte pointer on IA-32 architecture. On 67 // Windows* OS on Intel(R) 64, we can assume that all 8 bytes were allocated. 68 // 69 // gcc reserves >= 8 bytes for nested locks, so we can assume that the 70 // entire 8 bytes were allocated for nested locks on all 64-bit platforms. 71 72 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) { 73 return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1; 74 } 75 76 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) { 77 return lck->lk.depth_locked != -1; 78 } 79 80 __forceinline static int 81 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) { 82 KMP_MB(); 83 84 #ifdef USE_LOCK_PROFILE 85 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll); 86 if ((curr != 0) && (curr != gtid + 1)) 87 __kmp_printf("LOCK CONTENTION: %p\n", lck); 88 /* else __kmp_printf( "." );*/ 89 #endif /* USE_LOCK_PROFILE */ 90 91 kmp_int32 tas_free = KMP_LOCK_FREE(tas); 92 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); 93 94 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free && 95 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) { 96 KMP_FSYNC_ACQUIRED(lck); 97 return KMP_LOCK_ACQUIRED_FIRST; 98 } 99 100 kmp_uint32 spins; 101 KMP_FSYNC_PREPARE(lck); 102 KMP_INIT_YIELD(spins); 103 kmp_backoff_t backoff = __kmp_spin_backoff_params; 104 do { 105 __kmp_spin_backoff(&backoff); 106 KMP_YIELD_OVERSUB_ELSE_SPIN(spins); 107 } while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free || 108 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)); 109 KMP_FSYNC_ACQUIRED(lck); 110 return KMP_LOCK_ACQUIRED_FIRST; 111 } 112 113 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { 114 int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid); 115 ANNOTATE_TAS_ACQUIRED(lck); 116 return retval; 117 } 118 119 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck, 120 kmp_int32 gtid) { 121 char const *const func = "omp_set_lock"; 122 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && 123 __kmp_is_tas_lock_nestable(lck)) { 124 KMP_FATAL(LockNestableUsedAsSimple, func); 125 } 126 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) { 127 KMP_FATAL(LockIsAlreadyOwned, func); 128 } 129 return __kmp_acquire_tas_lock(lck, gtid); 130 } 131 132 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { 133 kmp_int32 tas_free = KMP_LOCK_FREE(tas); 134 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas); 135 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free && 136 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) { 137 KMP_FSYNC_ACQUIRED(lck); 138 return TRUE; 139 } 140 return FALSE; 141 } 142 143 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck, 144 kmp_int32 gtid) { 145 char const *const func = "omp_test_lock"; 146 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && 147 __kmp_is_tas_lock_nestable(lck)) { 148 KMP_FATAL(LockNestableUsedAsSimple, func); 149 } 150 return __kmp_test_tas_lock(lck, gtid); 151 } 152 153 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { 154 KMP_MB(); /* Flush all pending memory write invalidates. */ 155 156 KMP_FSYNC_RELEASING(lck); 157 ANNOTATE_TAS_RELEASED(lck); 158 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas)); 159 KMP_MB(); /* Flush all pending memory write invalidates. */ 160 161 KMP_YIELD_OVERSUB(); 162 return KMP_LOCK_RELEASED; 163 } 164 165 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck, 166 kmp_int32 gtid) { 167 char const *const func = "omp_unset_lock"; 168 KMP_MB(); /* in case another processor initialized lock */ 169 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && 170 __kmp_is_tas_lock_nestable(lck)) { 171 KMP_FATAL(LockNestableUsedAsSimple, func); 172 } 173 if (__kmp_get_tas_lock_owner(lck) == -1) { 174 KMP_FATAL(LockUnsettingFree, func); 175 } 176 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) && 177 (__kmp_get_tas_lock_owner(lck) != gtid)) { 178 KMP_FATAL(LockUnsettingSetByAnother, func); 179 } 180 return __kmp_release_tas_lock(lck, gtid); 181 } 182 183 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) { 184 lck->lk.poll = KMP_LOCK_FREE(tas); 185 } 186 187 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; } 188 189 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) { 190 char const *const func = "omp_destroy_lock"; 191 if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) && 192 __kmp_is_tas_lock_nestable(lck)) { 193 KMP_FATAL(LockNestableUsedAsSimple, func); 194 } 195 if (__kmp_get_tas_lock_owner(lck) != -1) { 196 KMP_FATAL(LockStillOwned, func); 197 } 198 __kmp_destroy_tas_lock(lck); 199 } 200 201 // nested test and set locks 202 203 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { 204 KMP_DEBUG_ASSERT(gtid >= 0); 205 206 if (__kmp_get_tas_lock_owner(lck) == gtid) { 207 lck->lk.depth_locked += 1; 208 return KMP_LOCK_ACQUIRED_NEXT; 209 } else { 210 __kmp_acquire_tas_lock_timed_template(lck, gtid); 211 ANNOTATE_TAS_ACQUIRED(lck); 212 lck->lk.depth_locked = 1; 213 return KMP_LOCK_ACQUIRED_FIRST; 214 } 215 } 216 217 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck, 218 kmp_int32 gtid) { 219 char const *const func = "omp_set_nest_lock"; 220 if (!__kmp_is_tas_lock_nestable(lck)) { 221 KMP_FATAL(LockSimpleUsedAsNestable, func); 222 } 223 return __kmp_acquire_nested_tas_lock(lck, gtid); 224 } 225 226 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { 227 int retval; 228 229 KMP_DEBUG_ASSERT(gtid >= 0); 230 231 if (__kmp_get_tas_lock_owner(lck) == gtid) { 232 retval = ++lck->lk.depth_locked; 233 } else if (!__kmp_test_tas_lock(lck, gtid)) { 234 retval = 0; 235 } else { 236 KMP_MB(); 237 retval = lck->lk.depth_locked = 1; 238 } 239 return retval; 240 } 241 242 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck, 243 kmp_int32 gtid) { 244 char const *const func = "omp_test_nest_lock"; 245 if (!__kmp_is_tas_lock_nestable(lck)) { 246 KMP_FATAL(LockSimpleUsedAsNestable, func); 247 } 248 return __kmp_test_nested_tas_lock(lck, gtid); 249 } 250 251 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) { 252 KMP_DEBUG_ASSERT(gtid >= 0); 253 254 KMP_MB(); 255 if (--(lck->lk.depth_locked) == 0) { 256 __kmp_release_tas_lock(lck, gtid); 257 return KMP_LOCK_RELEASED; 258 } 259 return KMP_LOCK_STILL_HELD; 260 } 261 262 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck, 263 kmp_int32 gtid) { 264 char const *const func = "omp_unset_nest_lock"; 265 KMP_MB(); /* in case another processor initialized lock */ 266 if (!__kmp_is_tas_lock_nestable(lck)) { 267 KMP_FATAL(LockSimpleUsedAsNestable, func); 268 } 269 if (__kmp_get_tas_lock_owner(lck) == -1) { 270 KMP_FATAL(LockUnsettingFree, func); 271 } 272 if (__kmp_get_tas_lock_owner(lck) != gtid) { 273 KMP_FATAL(LockUnsettingSetByAnother, func); 274 } 275 return __kmp_release_nested_tas_lock(lck, gtid); 276 } 277 278 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) { 279 __kmp_init_tas_lock(lck); 280 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks 281 } 282 283 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) { 284 __kmp_destroy_tas_lock(lck); 285 lck->lk.depth_locked = 0; 286 } 287 288 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) { 289 char const *const func = "omp_destroy_nest_lock"; 290 if (!__kmp_is_tas_lock_nestable(lck)) { 291 KMP_FATAL(LockSimpleUsedAsNestable, func); 292 } 293 if (__kmp_get_tas_lock_owner(lck) != -1) { 294 KMP_FATAL(LockStillOwned, func); 295 } 296 __kmp_destroy_nested_tas_lock(lck); 297 } 298 299 #if KMP_USE_FUTEX 300 301 /* ------------------------------------------------------------------------ */ 302 /* futex locks */ 303 304 // futex locks are really just test and set locks, with a different method 305 // of handling contention. They take the same amount of space as test and 306 // set locks, and are allocated the same way (i.e. use the area allocated by 307 // the compiler for non-nested locks / allocate nested locks on the heap). 308 309 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) { 310 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1; 311 } 312 313 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) { 314 return lck->lk.depth_locked != -1; 315 } 316 317 __forceinline static int 318 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) { 319 kmp_int32 gtid_code = (gtid + 1) << 1; 320 321 KMP_MB(); 322 323 #ifdef USE_LOCK_PROFILE 324 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll)); 325 if ((curr != 0) && (curr != gtid_code)) 326 __kmp_printf("LOCK CONTENTION: %p\n", lck); 327 /* else __kmp_printf( "." );*/ 328 #endif /* USE_LOCK_PROFILE */ 329 330 KMP_FSYNC_PREPARE(lck); 331 KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n", 332 lck, lck->lk.poll, gtid)); 333 334 kmp_int32 poll_val; 335 336 while ((poll_val = KMP_COMPARE_AND_STORE_RET32( 337 &(lck->lk.poll), KMP_LOCK_FREE(futex), 338 KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) { 339 340 kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1; 341 KA_TRACE( 342 1000, 343 ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n", 344 lck, gtid, poll_val, cond)); 345 346 // NOTE: if you try to use the following condition for this branch 347 // 348 // if ( poll_val & 1 == 0 ) 349 // 350 // Then the 12.0 compiler has a bug where the following block will 351 // always be skipped, regardless of the value of the LSB of poll_val. 352 if (!cond) { 353 // Try to set the lsb in the poll to indicate to the owner 354 // thread that they need to wake this thread up. 355 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val, 356 poll_val | KMP_LOCK_BUSY(1, futex))) { 357 KA_TRACE( 358 1000, 359 ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n", 360 lck, lck->lk.poll, gtid)); 361 continue; 362 } 363 poll_val |= KMP_LOCK_BUSY(1, futex); 364 365 KA_TRACE(1000, 366 ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck, 367 lck->lk.poll, gtid)); 368 } 369 370 KA_TRACE( 371 1000, 372 ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n", 373 lck, gtid, poll_val)); 374 375 long rc; 376 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL, 377 NULL, 0)) != 0) { 378 KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) " 379 "failed (rc=%ld errno=%d)\n", 380 lck, gtid, poll_val, rc, errno)); 381 continue; 382 } 383 384 KA_TRACE(1000, 385 ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n", 386 lck, gtid, poll_val)); 387 // This thread has now done a successful futex wait call and was entered on 388 // the OS futex queue. We must now perform a futex wake call when releasing 389 // the lock, as we have no idea how many other threads are in the queue. 390 gtid_code |= 1; 391 } 392 393 KMP_FSYNC_ACQUIRED(lck); 394 KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck, 395 lck->lk.poll, gtid)); 396 return KMP_LOCK_ACQUIRED_FIRST; 397 } 398 399 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { 400 int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid); 401 ANNOTATE_FUTEX_ACQUIRED(lck); 402 return retval; 403 } 404 405 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck, 406 kmp_int32 gtid) { 407 char const *const func = "omp_set_lock"; 408 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) && 409 __kmp_is_futex_lock_nestable(lck)) { 410 KMP_FATAL(LockNestableUsedAsSimple, func); 411 } 412 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) { 413 KMP_FATAL(LockIsAlreadyOwned, func); 414 } 415 return __kmp_acquire_futex_lock(lck, gtid); 416 } 417 418 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { 419 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex), 420 KMP_LOCK_BUSY((gtid + 1) << 1, futex))) { 421 KMP_FSYNC_ACQUIRED(lck); 422 return TRUE; 423 } 424 return FALSE; 425 } 426 427 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck, 428 kmp_int32 gtid) { 429 char const *const func = "omp_test_lock"; 430 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) && 431 __kmp_is_futex_lock_nestable(lck)) { 432 KMP_FATAL(LockNestableUsedAsSimple, func); 433 } 434 return __kmp_test_futex_lock(lck, gtid); 435 } 436 437 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { 438 KMP_MB(); /* Flush all pending memory write invalidates. */ 439 440 KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n", 441 lck, lck->lk.poll, gtid)); 442 443 KMP_FSYNC_RELEASING(lck); 444 ANNOTATE_FUTEX_RELEASED(lck); 445 446 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex)); 447 448 KA_TRACE(1000, 449 ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n", 450 lck, gtid, poll_val)); 451 452 if (KMP_LOCK_STRIP(poll_val) & 1) { 453 KA_TRACE(1000, 454 ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n", 455 lck, gtid)); 456 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex), 457 NULL, NULL, 0); 458 } 459 460 KMP_MB(); /* Flush all pending memory write invalidates. */ 461 462 KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck, 463 lck->lk.poll, gtid)); 464 465 KMP_YIELD_OVERSUB(); 466 return KMP_LOCK_RELEASED; 467 } 468 469 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck, 470 kmp_int32 gtid) { 471 char const *const func = "omp_unset_lock"; 472 KMP_MB(); /* in case another processor initialized lock */ 473 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) && 474 __kmp_is_futex_lock_nestable(lck)) { 475 KMP_FATAL(LockNestableUsedAsSimple, func); 476 } 477 if (__kmp_get_futex_lock_owner(lck) == -1) { 478 KMP_FATAL(LockUnsettingFree, func); 479 } 480 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) && 481 (__kmp_get_futex_lock_owner(lck) != gtid)) { 482 KMP_FATAL(LockUnsettingSetByAnother, func); 483 } 484 return __kmp_release_futex_lock(lck, gtid); 485 } 486 487 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) { 488 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex)); 489 } 490 491 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; } 492 493 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) { 494 char const *const func = "omp_destroy_lock"; 495 if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) && 496 __kmp_is_futex_lock_nestable(lck)) { 497 KMP_FATAL(LockNestableUsedAsSimple, func); 498 } 499 if (__kmp_get_futex_lock_owner(lck) != -1) { 500 KMP_FATAL(LockStillOwned, func); 501 } 502 __kmp_destroy_futex_lock(lck); 503 } 504 505 // nested futex locks 506 507 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { 508 KMP_DEBUG_ASSERT(gtid >= 0); 509 510 if (__kmp_get_futex_lock_owner(lck) == gtid) { 511 lck->lk.depth_locked += 1; 512 return KMP_LOCK_ACQUIRED_NEXT; 513 } else { 514 __kmp_acquire_futex_lock_timed_template(lck, gtid); 515 ANNOTATE_FUTEX_ACQUIRED(lck); 516 lck->lk.depth_locked = 1; 517 return KMP_LOCK_ACQUIRED_FIRST; 518 } 519 } 520 521 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck, 522 kmp_int32 gtid) { 523 char const *const func = "omp_set_nest_lock"; 524 if (!__kmp_is_futex_lock_nestable(lck)) { 525 KMP_FATAL(LockSimpleUsedAsNestable, func); 526 } 527 return __kmp_acquire_nested_futex_lock(lck, gtid); 528 } 529 530 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { 531 int retval; 532 533 KMP_DEBUG_ASSERT(gtid >= 0); 534 535 if (__kmp_get_futex_lock_owner(lck) == gtid) { 536 retval = ++lck->lk.depth_locked; 537 } else if (!__kmp_test_futex_lock(lck, gtid)) { 538 retval = 0; 539 } else { 540 KMP_MB(); 541 retval = lck->lk.depth_locked = 1; 542 } 543 return retval; 544 } 545 546 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck, 547 kmp_int32 gtid) { 548 char const *const func = "omp_test_nest_lock"; 549 if (!__kmp_is_futex_lock_nestable(lck)) { 550 KMP_FATAL(LockSimpleUsedAsNestable, func); 551 } 552 return __kmp_test_nested_futex_lock(lck, gtid); 553 } 554 555 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) { 556 KMP_DEBUG_ASSERT(gtid >= 0); 557 558 KMP_MB(); 559 if (--(lck->lk.depth_locked) == 0) { 560 __kmp_release_futex_lock(lck, gtid); 561 return KMP_LOCK_RELEASED; 562 } 563 return KMP_LOCK_STILL_HELD; 564 } 565 566 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck, 567 kmp_int32 gtid) { 568 char const *const func = "omp_unset_nest_lock"; 569 KMP_MB(); /* in case another processor initialized lock */ 570 if (!__kmp_is_futex_lock_nestable(lck)) { 571 KMP_FATAL(LockSimpleUsedAsNestable, func); 572 } 573 if (__kmp_get_futex_lock_owner(lck) == -1) { 574 KMP_FATAL(LockUnsettingFree, func); 575 } 576 if (__kmp_get_futex_lock_owner(lck) != gtid) { 577 KMP_FATAL(LockUnsettingSetByAnother, func); 578 } 579 return __kmp_release_nested_futex_lock(lck, gtid); 580 } 581 582 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) { 583 __kmp_init_futex_lock(lck); 584 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks 585 } 586 587 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) { 588 __kmp_destroy_futex_lock(lck); 589 lck->lk.depth_locked = 0; 590 } 591 592 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) { 593 char const *const func = "omp_destroy_nest_lock"; 594 if (!__kmp_is_futex_lock_nestable(lck)) { 595 KMP_FATAL(LockSimpleUsedAsNestable, func); 596 } 597 if (__kmp_get_futex_lock_owner(lck) != -1) { 598 KMP_FATAL(LockStillOwned, func); 599 } 600 __kmp_destroy_nested_futex_lock(lck); 601 } 602 603 #endif // KMP_USE_FUTEX 604 605 /* ------------------------------------------------------------------------ */ 606 /* ticket (bakery) locks */ 607 608 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) { 609 return std::atomic_load_explicit(&lck->lk.owner_id, 610 std::memory_order_relaxed) - 611 1; 612 } 613 614 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) { 615 return std::atomic_load_explicit(&lck->lk.depth_locked, 616 std::memory_order_relaxed) != -1; 617 } 618 619 static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket) { 620 return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving, 621 std::memory_order_acquire) == my_ticket; 622 } 623 624 __forceinline static int 625 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck, 626 kmp_int32 gtid) { 627 kmp_uint32 my_ticket = std::atomic_fetch_add_explicit( 628 &lck->lk.next_ticket, 1U, std::memory_order_relaxed); 629 630 #ifdef USE_LOCK_PROFILE 631 if (std::atomic_load_explicit(&lck->lk.now_serving, 632 std::memory_order_relaxed) != my_ticket) 633 __kmp_printf("LOCK CONTENTION: %p\n", lck); 634 /* else __kmp_printf( "." );*/ 635 #endif /* USE_LOCK_PROFILE */ 636 637 if (std::atomic_load_explicit(&lck->lk.now_serving, 638 std::memory_order_acquire) == my_ticket) { 639 return KMP_LOCK_ACQUIRED_FIRST; 640 } 641 KMP_WAIT_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck); 642 return KMP_LOCK_ACQUIRED_FIRST; 643 } 644 645 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { 646 int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid); 647 ANNOTATE_TICKET_ACQUIRED(lck); 648 return retval; 649 } 650 651 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck, 652 kmp_int32 gtid) { 653 char const *const func = "omp_set_lock"; 654 655 if (!std::atomic_load_explicit(&lck->lk.initialized, 656 std::memory_order_relaxed)) { 657 KMP_FATAL(LockIsUninitialized, func); 658 } 659 if (lck->lk.self != lck) { 660 KMP_FATAL(LockIsUninitialized, func); 661 } 662 if (__kmp_is_ticket_lock_nestable(lck)) { 663 KMP_FATAL(LockNestableUsedAsSimple, func); 664 } 665 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) { 666 KMP_FATAL(LockIsAlreadyOwned, func); 667 } 668 669 __kmp_acquire_ticket_lock(lck, gtid); 670 671 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, 672 std::memory_order_relaxed); 673 return KMP_LOCK_ACQUIRED_FIRST; 674 } 675 676 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { 677 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket, 678 std::memory_order_relaxed); 679 680 if (std::atomic_load_explicit(&lck->lk.now_serving, 681 std::memory_order_relaxed) == my_ticket) { 682 kmp_uint32 next_ticket = my_ticket + 1; 683 if (std::atomic_compare_exchange_strong_explicit( 684 &lck->lk.next_ticket, &my_ticket, next_ticket, 685 std::memory_order_acquire, std::memory_order_acquire)) { 686 return TRUE; 687 } 688 } 689 return FALSE; 690 } 691 692 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck, 693 kmp_int32 gtid) { 694 char const *const func = "omp_test_lock"; 695 696 if (!std::atomic_load_explicit(&lck->lk.initialized, 697 std::memory_order_relaxed)) { 698 KMP_FATAL(LockIsUninitialized, func); 699 } 700 if (lck->lk.self != lck) { 701 KMP_FATAL(LockIsUninitialized, func); 702 } 703 if (__kmp_is_ticket_lock_nestable(lck)) { 704 KMP_FATAL(LockNestableUsedAsSimple, func); 705 } 706 707 int retval = __kmp_test_ticket_lock(lck, gtid); 708 709 if (retval) { 710 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, 711 std::memory_order_relaxed); 712 } 713 return retval; 714 } 715 716 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { 717 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket, 718 std::memory_order_relaxed) - 719 std::atomic_load_explicit(&lck->lk.now_serving, 720 std::memory_order_relaxed); 721 722 ANNOTATE_TICKET_RELEASED(lck); 723 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U, 724 std::memory_order_release); 725 726 KMP_YIELD(distance > 727 (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)); 728 return KMP_LOCK_RELEASED; 729 } 730 731 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck, 732 kmp_int32 gtid) { 733 char const *const func = "omp_unset_lock"; 734 735 if (!std::atomic_load_explicit(&lck->lk.initialized, 736 std::memory_order_relaxed)) { 737 KMP_FATAL(LockIsUninitialized, func); 738 } 739 if (lck->lk.self != lck) { 740 KMP_FATAL(LockIsUninitialized, func); 741 } 742 if (__kmp_is_ticket_lock_nestable(lck)) { 743 KMP_FATAL(LockNestableUsedAsSimple, func); 744 } 745 if (__kmp_get_ticket_lock_owner(lck) == -1) { 746 KMP_FATAL(LockUnsettingFree, func); 747 } 748 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) && 749 (__kmp_get_ticket_lock_owner(lck) != gtid)) { 750 KMP_FATAL(LockUnsettingSetByAnother, func); 751 } 752 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed); 753 return __kmp_release_ticket_lock(lck, gtid); 754 } 755 756 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) { 757 lck->lk.location = NULL; 758 lck->lk.self = lck; 759 std::atomic_store_explicit(&lck->lk.next_ticket, 0U, 760 std::memory_order_relaxed); 761 std::atomic_store_explicit(&lck->lk.now_serving, 0U, 762 std::memory_order_relaxed); 763 std::atomic_store_explicit( 764 &lck->lk.owner_id, 0, 765 std::memory_order_relaxed); // no thread owns the lock. 766 std::atomic_store_explicit( 767 &lck->lk.depth_locked, -1, 768 std::memory_order_relaxed); // -1 => not a nested lock. 769 std::atomic_store_explicit(&lck->lk.initialized, true, 770 std::memory_order_release); 771 } 772 773 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) { 774 std::atomic_store_explicit(&lck->lk.initialized, false, 775 std::memory_order_release); 776 lck->lk.self = NULL; 777 lck->lk.location = NULL; 778 std::atomic_store_explicit(&lck->lk.next_ticket, 0U, 779 std::memory_order_relaxed); 780 std::atomic_store_explicit(&lck->lk.now_serving, 0U, 781 std::memory_order_relaxed); 782 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed); 783 std::atomic_store_explicit(&lck->lk.depth_locked, -1, 784 std::memory_order_relaxed); 785 } 786 787 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { 788 char const *const func = "omp_destroy_lock"; 789 790 if (!std::atomic_load_explicit(&lck->lk.initialized, 791 std::memory_order_relaxed)) { 792 KMP_FATAL(LockIsUninitialized, func); 793 } 794 if (lck->lk.self != lck) { 795 KMP_FATAL(LockIsUninitialized, func); 796 } 797 if (__kmp_is_ticket_lock_nestable(lck)) { 798 KMP_FATAL(LockNestableUsedAsSimple, func); 799 } 800 if (__kmp_get_ticket_lock_owner(lck) != -1) { 801 KMP_FATAL(LockStillOwned, func); 802 } 803 __kmp_destroy_ticket_lock(lck); 804 } 805 806 // nested ticket locks 807 808 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { 809 KMP_DEBUG_ASSERT(gtid >= 0); 810 811 if (__kmp_get_ticket_lock_owner(lck) == gtid) { 812 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1, 813 std::memory_order_relaxed); 814 return KMP_LOCK_ACQUIRED_NEXT; 815 } else { 816 __kmp_acquire_ticket_lock_timed_template(lck, gtid); 817 ANNOTATE_TICKET_ACQUIRED(lck); 818 std::atomic_store_explicit(&lck->lk.depth_locked, 1, 819 std::memory_order_relaxed); 820 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, 821 std::memory_order_relaxed); 822 return KMP_LOCK_ACQUIRED_FIRST; 823 } 824 } 825 826 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck, 827 kmp_int32 gtid) { 828 char const *const func = "omp_set_nest_lock"; 829 830 if (!std::atomic_load_explicit(&lck->lk.initialized, 831 std::memory_order_relaxed)) { 832 KMP_FATAL(LockIsUninitialized, func); 833 } 834 if (lck->lk.self != lck) { 835 KMP_FATAL(LockIsUninitialized, func); 836 } 837 if (!__kmp_is_ticket_lock_nestable(lck)) { 838 KMP_FATAL(LockSimpleUsedAsNestable, func); 839 } 840 return __kmp_acquire_nested_ticket_lock(lck, gtid); 841 } 842 843 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { 844 int retval; 845 846 KMP_DEBUG_ASSERT(gtid >= 0); 847 848 if (__kmp_get_ticket_lock_owner(lck) == gtid) { 849 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1, 850 std::memory_order_relaxed) + 851 1; 852 } else if (!__kmp_test_ticket_lock(lck, gtid)) { 853 retval = 0; 854 } else { 855 std::atomic_store_explicit(&lck->lk.depth_locked, 1, 856 std::memory_order_relaxed); 857 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1, 858 std::memory_order_relaxed); 859 retval = 1; 860 } 861 return retval; 862 } 863 864 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck, 865 kmp_int32 gtid) { 866 char const *const func = "omp_test_nest_lock"; 867 868 if (!std::atomic_load_explicit(&lck->lk.initialized, 869 std::memory_order_relaxed)) { 870 KMP_FATAL(LockIsUninitialized, func); 871 } 872 if (lck->lk.self != lck) { 873 KMP_FATAL(LockIsUninitialized, func); 874 } 875 if (!__kmp_is_ticket_lock_nestable(lck)) { 876 KMP_FATAL(LockSimpleUsedAsNestable, func); 877 } 878 return __kmp_test_nested_ticket_lock(lck, gtid); 879 } 880 881 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) { 882 KMP_DEBUG_ASSERT(gtid >= 0); 883 884 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1, 885 std::memory_order_relaxed) - 886 1) == 0) { 887 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed); 888 __kmp_release_ticket_lock(lck, gtid); 889 return KMP_LOCK_RELEASED; 890 } 891 return KMP_LOCK_STILL_HELD; 892 } 893 894 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck, 895 kmp_int32 gtid) { 896 char const *const func = "omp_unset_nest_lock"; 897 898 if (!std::atomic_load_explicit(&lck->lk.initialized, 899 std::memory_order_relaxed)) { 900 KMP_FATAL(LockIsUninitialized, func); 901 } 902 if (lck->lk.self != lck) { 903 KMP_FATAL(LockIsUninitialized, func); 904 } 905 if (!__kmp_is_ticket_lock_nestable(lck)) { 906 KMP_FATAL(LockSimpleUsedAsNestable, func); 907 } 908 if (__kmp_get_ticket_lock_owner(lck) == -1) { 909 KMP_FATAL(LockUnsettingFree, func); 910 } 911 if (__kmp_get_ticket_lock_owner(lck) != gtid) { 912 KMP_FATAL(LockUnsettingSetByAnother, func); 913 } 914 return __kmp_release_nested_ticket_lock(lck, gtid); 915 } 916 917 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) { 918 __kmp_init_ticket_lock(lck); 919 std::atomic_store_explicit(&lck->lk.depth_locked, 0, 920 std::memory_order_relaxed); 921 // >= 0 for nestable locks, -1 for simple locks 922 } 923 924 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) { 925 __kmp_destroy_ticket_lock(lck); 926 std::atomic_store_explicit(&lck->lk.depth_locked, 0, 927 std::memory_order_relaxed); 928 } 929 930 static void 931 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { 932 char const *const func = "omp_destroy_nest_lock"; 933 934 if (!std::atomic_load_explicit(&lck->lk.initialized, 935 std::memory_order_relaxed)) { 936 KMP_FATAL(LockIsUninitialized, func); 937 } 938 if (lck->lk.self != lck) { 939 KMP_FATAL(LockIsUninitialized, func); 940 } 941 if (!__kmp_is_ticket_lock_nestable(lck)) { 942 KMP_FATAL(LockSimpleUsedAsNestable, func); 943 } 944 if (__kmp_get_ticket_lock_owner(lck) != -1) { 945 KMP_FATAL(LockStillOwned, func); 946 } 947 __kmp_destroy_nested_ticket_lock(lck); 948 } 949 950 // access functions to fields which don't exist for all lock kinds. 951 952 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) { 953 return lck->lk.location; 954 } 955 956 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck, 957 const ident_t *loc) { 958 lck->lk.location = loc; 959 } 960 961 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) { 962 return lck->lk.flags; 963 } 964 965 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck, 966 kmp_lock_flags_t flags) { 967 lck->lk.flags = flags; 968 } 969 970 /* ------------------------------------------------------------------------ */ 971 /* queuing locks */ 972 973 /* First the states 974 (head,tail) = 0, 0 means lock is unheld, nobody on queue 975 UINT_MAX or -1, 0 means lock is held, nobody on queue 976 h, h means lock held or about to transition, 977 1 element on queue 978 h, t h <> t, means lock is held or about to 979 transition, >1 elements on queue 980 981 Now the transitions 982 Acquire(0,0) = -1 ,0 983 Release(0,0) = Error 984 Acquire(-1,0) = h ,h h > 0 985 Release(-1,0) = 0 ,0 986 Acquire(h,h) = h ,t h > 0, t > 0, h <> t 987 Release(h,h) = -1 ,0 h > 0 988 Acquire(h,t) = h ,t' h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t' 989 Release(h,t) = h',t h > 0, t > 0, h <> t, h <> h', h' maybe = t 990 991 And pictorially 992 993 +-----+ 994 | 0, 0|------- release -------> Error 995 +-----+ 996 | ^ 997 acquire| |release 998 | | 999 | | 1000 v | 1001 +-----+ 1002 |-1, 0| 1003 +-----+ 1004 | ^ 1005 acquire| |release 1006 | | 1007 | | 1008 v | 1009 +-----+ 1010 | h, h| 1011 +-----+ 1012 | ^ 1013 acquire| |release 1014 | | 1015 | | 1016 v | 1017 +-----+ 1018 | h, t|----- acquire, release loopback ---+ 1019 +-----+ | 1020 ^ | 1021 | | 1022 +------------------------------------+ 1023 */ 1024 1025 #ifdef DEBUG_QUEUING_LOCKS 1026 1027 /* Stuff for circular trace buffer */ 1028 #define TRACE_BUF_ELE 1024 1029 static char traces[TRACE_BUF_ELE][128] = {0}; 1030 static int tc = 0; 1031 #define TRACE_LOCK(X, Y) \ 1032 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y); 1033 #define TRACE_LOCK_T(X, Y, Z) \ 1034 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z); 1035 #define TRACE_LOCK_HT(X, Y, Z, Q) \ 1036 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \ 1037 Z, Q); 1038 1039 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid, 1040 kmp_queuing_lock_t *lck, kmp_int32 head_id, 1041 kmp_int32 tail_id) { 1042 kmp_int32 t, i; 1043 1044 __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n"); 1045 1046 i = tc % TRACE_BUF_ELE; 1047 __kmp_printf_no_lock("%s\n", traces[i]); 1048 i = (i + 1) % TRACE_BUF_ELE; 1049 while (i != (tc % TRACE_BUF_ELE)) { 1050 __kmp_printf_no_lock("%s", traces[i]); 1051 i = (i + 1) % TRACE_BUF_ELE; 1052 } 1053 __kmp_printf_no_lock("\n"); 1054 1055 __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, " 1056 "next_wait:%d, head_id:%d, tail_id:%d\n", 1057 gtid + 1, this_thr->th.th_spin_here, 1058 this_thr->th.th_next_waiting, head_id, tail_id); 1059 1060 __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id); 1061 1062 if (lck->lk.head_id >= 1) { 1063 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting; 1064 while (t > 0) { 1065 __kmp_printf_no_lock("-> %d ", t); 1066 t = __kmp_threads[t - 1]->th.th_next_waiting; 1067 } 1068 } 1069 __kmp_printf_no_lock("; tail: %d ", lck->lk.tail_id); 1070 __kmp_printf_no_lock("\n\n"); 1071 } 1072 1073 #endif /* DEBUG_QUEUING_LOCKS */ 1074 1075 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) { 1076 return TCR_4(lck->lk.owner_id) - 1; 1077 } 1078 1079 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) { 1080 return lck->lk.depth_locked != -1; 1081 } 1082 1083 /* Acquire a lock using a the queuing lock implementation */ 1084 template <bool takeTime> 1085 /* [TLW] The unused template above is left behind because of what BEB believes 1086 is a potential compiler problem with __forceinline. */ 1087 __forceinline static int 1088 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck, 1089 kmp_int32 gtid) { 1090 kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid); 1091 volatile kmp_int32 *head_id_p = &lck->lk.head_id; 1092 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id; 1093 volatile kmp_uint32 *spin_here_p; 1094 kmp_int32 need_mf = 1; 1095 1096 #if OMPT_SUPPORT 1097 ompt_state_t prev_state = ompt_state_undefined; 1098 #endif 1099 1100 KA_TRACE(1000, 1101 ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid)); 1102 1103 KMP_FSYNC_PREPARE(lck); 1104 KMP_DEBUG_ASSERT(this_thr != NULL); 1105 spin_here_p = &this_thr->th.th_spin_here; 1106 1107 #ifdef DEBUG_QUEUING_LOCKS 1108 TRACE_LOCK(gtid + 1, "acq ent"); 1109 if (*spin_here_p) 1110 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p); 1111 if (this_thr->th.th_next_waiting != 0) 1112 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p); 1113 #endif 1114 KMP_DEBUG_ASSERT(!*spin_here_p); 1115 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); 1116 1117 /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to 1118 head_id_p that may follow, not just in execution order, but also in 1119 visibility order. This way, when a releasing thread observes the changes to 1120 the queue by this thread, it can rightly assume that spin_here_p has 1121 already been set to TRUE, so that when it sets spin_here_p to FALSE, it is 1122 not premature. If the releasing thread sets spin_here_p to FALSE before 1123 this thread sets it to TRUE, this thread will hang. */ 1124 *spin_here_p = TRUE; /* before enqueuing to prevent race */ 1125 1126 while (1) { 1127 kmp_int32 enqueued; 1128 kmp_int32 head; 1129 kmp_int32 tail; 1130 1131 head = *head_id_p; 1132 1133 switch (head) { 1134 1135 case -1: { 1136 #ifdef DEBUG_QUEUING_LOCKS 1137 tail = *tail_id_p; 1138 TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail); 1139 #endif 1140 tail = 0; /* to make sure next link asynchronously read is not set 1141 accidentally; this assignment prevents us from entering the 1142 if ( t > 0 ) condition in the enqueued case below, which is not 1143 necessary for this state transition */ 1144 1145 need_mf = 0; 1146 /* try (-1,0)->(tid,tid) */ 1147 enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p, 1148 KMP_PACK_64(-1, 0), 1149 KMP_PACK_64(gtid + 1, gtid + 1)); 1150 #ifdef DEBUG_QUEUING_LOCKS 1151 if (enqueued) 1152 TRACE_LOCK(gtid + 1, "acq enq: (-1,0)->(tid,tid)"); 1153 #endif 1154 } break; 1155 1156 default: { 1157 tail = *tail_id_p; 1158 KMP_DEBUG_ASSERT(tail != gtid + 1); 1159 1160 #ifdef DEBUG_QUEUING_LOCKS 1161 TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail); 1162 #endif 1163 1164 if (tail == 0) { 1165 enqueued = FALSE; 1166 } else { 1167 need_mf = 0; 1168 /* try (h,t) or (h,h)->(h,tid) */ 1169 enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1); 1170 1171 #ifdef DEBUG_QUEUING_LOCKS 1172 if (enqueued) 1173 TRACE_LOCK(gtid + 1, "acq enq: (h,t)->(h,tid)"); 1174 #endif 1175 } 1176 } break; 1177 1178 case 0: /* empty queue */ 1179 { 1180 kmp_int32 grabbed_lock; 1181 1182 #ifdef DEBUG_QUEUING_LOCKS 1183 tail = *tail_id_p; 1184 TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail); 1185 #endif 1186 /* try (0,0)->(-1,0) */ 1187 1188 /* only legal transition out of head = 0 is head = -1 with no change to 1189 * tail */ 1190 grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1); 1191 1192 if (grabbed_lock) { 1193 1194 *spin_here_p = FALSE; 1195 1196 KA_TRACE( 1197 1000, 1198 ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n", 1199 lck, gtid)); 1200 #ifdef DEBUG_QUEUING_LOCKS 1201 TRACE_LOCK_HT(gtid + 1, "acq exit: ", head, 0); 1202 #endif 1203 1204 #if OMPT_SUPPORT 1205 if (ompt_enabled.enabled && prev_state != ompt_state_undefined) { 1206 /* change the state before clearing wait_id */ 1207 this_thr->th.ompt_thread_info.state = prev_state; 1208 this_thr->th.ompt_thread_info.wait_id = 0; 1209 } 1210 #endif 1211 1212 KMP_FSYNC_ACQUIRED(lck); 1213 return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */ 1214 } 1215 enqueued = FALSE; 1216 } break; 1217 } 1218 1219 #if OMPT_SUPPORT 1220 if (ompt_enabled.enabled && prev_state == ompt_state_undefined) { 1221 /* this thread will spin; set wait_id before entering wait state */ 1222 prev_state = this_thr->th.ompt_thread_info.state; 1223 this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck; 1224 this_thr->th.ompt_thread_info.state = ompt_state_wait_lock; 1225 } 1226 #endif 1227 1228 if (enqueued) { 1229 if (tail > 0) { 1230 kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1); 1231 KMP_ASSERT(tail_thr != NULL); 1232 tail_thr->th.th_next_waiting = gtid + 1; 1233 /* corresponding wait for this write in release code */ 1234 } 1235 KA_TRACE(1000, 1236 ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n", 1237 lck, gtid)); 1238 1239 KMP_MB(); 1240 // ToDo: Use __kmp_wait_sleep or similar when blocktime != inf 1241 KMP_WAIT(spin_here_p, FALSE, KMP_EQ, lck); 1242 // Synchronize writes to both runtime thread structures 1243 // and writes in user code. 1244 KMP_MB(); 1245 1246 #ifdef DEBUG_QUEUING_LOCKS 1247 TRACE_LOCK(gtid + 1, "acq spin"); 1248 1249 if (this_thr->th.th_next_waiting != 0) 1250 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p); 1251 #endif 1252 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); 1253 KA_TRACE(1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after " 1254 "waiting on queue\n", 1255 lck, gtid)); 1256 1257 #ifdef DEBUG_QUEUING_LOCKS 1258 TRACE_LOCK(gtid + 1, "acq exit 2"); 1259 #endif 1260 1261 #if OMPT_SUPPORT 1262 /* change the state before clearing wait_id */ 1263 this_thr->th.ompt_thread_info.state = prev_state; 1264 this_thr->th.ompt_thread_info.wait_id = 0; 1265 #endif 1266 1267 /* got lock, we were dequeued by the thread that released lock */ 1268 return KMP_LOCK_ACQUIRED_FIRST; 1269 } 1270 1271 /* Yield if number of threads > number of logical processors */ 1272 /* ToDo: Not sure why this should only be in oversubscription case, 1273 maybe should be traditional YIELD_INIT/YIELD_WHEN loop */ 1274 KMP_YIELD_OVERSUB(); 1275 1276 #ifdef DEBUG_QUEUING_LOCKS 1277 TRACE_LOCK(gtid + 1, "acq retry"); 1278 #endif 1279 } 1280 KMP_ASSERT2(0, "should not get here"); 1281 return KMP_LOCK_ACQUIRED_FIRST; 1282 } 1283 1284 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { 1285 KMP_DEBUG_ASSERT(gtid >= 0); 1286 1287 int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid); 1288 ANNOTATE_QUEUING_ACQUIRED(lck); 1289 return retval; 1290 } 1291 1292 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 1293 kmp_int32 gtid) { 1294 char const *const func = "omp_set_lock"; 1295 if (lck->lk.initialized != lck) { 1296 KMP_FATAL(LockIsUninitialized, func); 1297 } 1298 if (__kmp_is_queuing_lock_nestable(lck)) { 1299 KMP_FATAL(LockNestableUsedAsSimple, func); 1300 } 1301 if (__kmp_get_queuing_lock_owner(lck) == gtid) { 1302 KMP_FATAL(LockIsAlreadyOwned, func); 1303 } 1304 1305 __kmp_acquire_queuing_lock(lck, gtid); 1306 1307 lck->lk.owner_id = gtid + 1; 1308 return KMP_LOCK_ACQUIRED_FIRST; 1309 } 1310 1311 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { 1312 volatile kmp_int32 *head_id_p = &lck->lk.head_id; 1313 kmp_int32 head; 1314 #ifdef KMP_DEBUG 1315 kmp_info_t *this_thr; 1316 #endif 1317 1318 KA_TRACE(1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid)); 1319 KMP_DEBUG_ASSERT(gtid >= 0); 1320 #ifdef KMP_DEBUG 1321 this_thr = __kmp_thread_from_gtid(gtid); 1322 KMP_DEBUG_ASSERT(this_thr != NULL); 1323 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here); 1324 #endif 1325 1326 head = *head_id_p; 1327 1328 if (head == 0) { /* nobody on queue, nobody holding */ 1329 /* try (0,0)->(-1,0) */ 1330 if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) { 1331 KA_TRACE(1000, 1332 ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid)); 1333 KMP_FSYNC_ACQUIRED(lck); 1334 ANNOTATE_QUEUING_ACQUIRED(lck); 1335 return TRUE; 1336 } 1337 } 1338 1339 KA_TRACE(1000, 1340 ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid)); 1341 return FALSE; 1342 } 1343 1344 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 1345 kmp_int32 gtid) { 1346 char const *const func = "omp_test_lock"; 1347 if (lck->lk.initialized != lck) { 1348 KMP_FATAL(LockIsUninitialized, func); 1349 } 1350 if (__kmp_is_queuing_lock_nestable(lck)) { 1351 KMP_FATAL(LockNestableUsedAsSimple, func); 1352 } 1353 1354 int retval = __kmp_test_queuing_lock(lck, gtid); 1355 1356 if (retval) { 1357 lck->lk.owner_id = gtid + 1; 1358 } 1359 return retval; 1360 } 1361 1362 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { 1363 kmp_info_t *this_thr; 1364 volatile kmp_int32 *head_id_p = &lck->lk.head_id; 1365 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id; 1366 1367 KA_TRACE(1000, 1368 ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid)); 1369 KMP_DEBUG_ASSERT(gtid >= 0); 1370 this_thr = __kmp_thread_from_gtid(gtid); 1371 KMP_DEBUG_ASSERT(this_thr != NULL); 1372 #ifdef DEBUG_QUEUING_LOCKS 1373 TRACE_LOCK(gtid + 1, "rel ent"); 1374 1375 if (this_thr->th.th_spin_here) 1376 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p); 1377 if (this_thr->th.th_next_waiting != 0) 1378 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p); 1379 #endif 1380 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here); 1381 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0); 1382 1383 KMP_FSYNC_RELEASING(lck); 1384 ANNOTATE_QUEUING_RELEASED(lck); 1385 1386 while (1) { 1387 kmp_int32 dequeued; 1388 kmp_int32 head; 1389 kmp_int32 tail; 1390 1391 head = *head_id_p; 1392 1393 #ifdef DEBUG_QUEUING_LOCKS 1394 tail = *tail_id_p; 1395 TRACE_LOCK_HT(gtid + 1, "rel read: ", head, tail); 1396 if (head == 0) 1397 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail); 1398 #endif 1399 KMP_DEBUG_ASSERT(head != 1400 0); /* holding the lock, head must be -1 or queue head */ 1401 1402 if (head == -1) { /* nobody on queue */ 1403 /* try (-1,0)->(0,0) */ 1404 if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) { 1405 KA_TRACE( 1406 1000, 1407 ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n", 1408 lck, gtid)); 1409 #ifdef DEBUG_QUEUING_LOCKS 1410 TRACE_LOCK_HT(gtid + 1, "rel exit: ", 0, 0); 1411 #endif 1412 1413 #if OMPT_SUPPORT 1414 /* nothing to do - no other thread is trying to shift blame */ 1415 #endif 1416 return KMP_LOCK_RELEASED; 1417 } 1418 dequeued = FALSE; 1419 } else { 1420 KMP_MB(); 1421 tail = *tail_id_p; 1422 if (head == tail) { /* only one thread on the queue */ 1423 #ifdef DEBUG_QUEUING_LOCKS 1424 if (head <= 0) 1425 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail); 1426 #endif 1427 KMP_DEBUG_ASSERT(head > 0); 1428 1429 /* try (h,h)->(-1,0) */ 1430 dequeued = KMP_COMPARE_AND_STORE_REL64( 1431 RCAST(volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head), 1432 KMP_PACK_64(-1, 0)); 1433 #ifdef DEBUG_QUEUING_LOCKS 1434 TRACE_LOCK(gtid + 1, "rel deq: (h,h)->(-1,0)"); 1435 #endif 1436 1437 } else { 1438 volatile kmp_int32 *waiting_id_p; 1439 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1); 1440 KMP_DEBUG_ASSERT(head_thr != NULL); 1441 waiting_id_p = &head_thr->th.th_next_waiting; 1442 1443 /* Does this require synchronous reads? */ 1444 #ifdef DEBUG_QUEUING_LOCKS 1445 if (head <= 0 || tail <= 0) 1446 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail); 1447 #endif 1448 KMP_DEBUG_ASSERT(head > 0 && tail > 0); 1449 1450 /* try (h,t)->(h',t) or (t,t) */ 1451 KMP_MB(); 1452 /* make sure enqueuing thread has time to update next waiting thread 1453 * field */ 1454 *head_id_p = 1455 KMP_WAIT((volatile kmp_uint32 *)waiting_id_p, 0, KMP_NEQ, NULL); 1456 #ifdef DEBUG_QUEUING_LOCKS 1457 TRACE_LOCK(gtid + 1, "rel deq: (h,t)->(h',t)"); 1458 #endif 1459 dequeued = TRUE; 1460 } 1461 } 1462 1463 if (dequeued) { 1464 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1); 1465 KMP_DEBUG_ASSERT(head_thr != NULL); 1466 1467 /* Does this require synchronous reads? */ 1468 #ifdef DEBUG_QUEUING_LOCKS 1469 if (head <= 0 || tail <= 0) 1470 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail); 1471 #endif 1472 KMP_DEBUG_ASSERT(head > 0 && tail > 0); 1473 1474 /* For clean code only. Thread not released until next statement prevents 1475 race with acquire code. */ 1476 head_thr->th.th_next_waiting = 0; 1477 #ifdef DEBUG_QUEUING_LOCKS 1478 TRACE_LOCK_T(gtid + 1, "rel nw=0 for t=", head); 1479 #endif 1480 1481 KMP_MB(); 1482 /* reset spin value */ 1483 head_thr->th.th_spin_here = FALSE; 1484 1485 KA_TRACE(1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after " 1486 "dequeuing\n", 1487 lck, gtid)); 1488 #ifdef DEBUG_QUEUING_LOCKS 1489 TRACE_LOCK(gtid + 1, "rel exit 2"); 1490 #endif 1491 return KMP_LOCK_RELEASED; 1492 } 1493 /* KMP_CPU_PAUSE(); don't want to make releasing thread hold up acquiring 1494 threads */ 1495 1496 #ifdef DEBUG_QUEUING_LOCKS 1497 TRACE_LOCK(gtid + 1, "rel retry"); 1498 #endif 1499 1500 } /* while */ 1501 KMP_ASSERT2(0, "should not get here"); 1502 return KMP_LOCK_RELEASED; 1503 } 1504 1505 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 1506 kmp_int32 gtid) { 1507 char const *const func = "omp_unset_lock"; 1508 KMP_MB(); /* in case another processor initialized lock */ 1509 if (lck->lk.initialized != lck) { 1510 KMP_FATAL(LockIsUninitialized, func); 1511 } 1512 if (__kmp_is_queuing_lock_nestable(lck)) { 1513 KMP_FATAL(LockNestableUsedAsSimple, func); 1514 } 1515 if (__kmp_get_queuing_lock_owner(lck) == -1) { 1516 KMP_FATAL(LockUnsettingFree, func); 1517 } 1518 if (__kmp_get_queuing_lock_owner(lck) != gtid) { 1519 KMP_FATAL(LockUnsettingSetByAnother, func); 1520 } 1521 lck->lk.owner_id = 0; 1522 return __kmp_release_queuing_lock(lck, gtid); 1523 } 1524 1525 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) { 1526 lck->lk.location = NULL; 1527 lck->lk.head_id = 0; 1528 lck->lk.tail_id = 0; 1529 lck->lk.next_ticket = 0; 1530 lck->lk.now_serving = 0; 1531 lck->lk.owner_id = 0; // no thread owns the lock. 1532 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks. 1533 lck->lk.initialized = lck; 1534 1535 KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck)); 1536 } 1537 1538 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) { 1539 lck->lk.initialized = NULL; 1540 lck->lk.location = NULL; 1541 lck->lk.head_id = 0; 1542 lck->lk.tail_id = 0; 1543 lck->lk.next_ticket = 0; 1544 lck->lk.now_serving = 0; 1545 lck->lk.owner_id = 0; 1546 lck->lk.depth_locked = -1; 1547 } 1548 1549 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { 1550 char const *const func = "omp_destroy_lock"; 1551 if (lck->lk.initialized != lck) { 1552 KMP_FATAL(LockIsUninitialized, func); 1553 } 1554 if (__kmp_is_queuing_lock_nestable(lck)) { 1555 KMP_FATAL(LockNestableUsedAsSimple, func); 1556 } 1557 if (__kmp_get_queuing_lock_owner(lck) != -1) { 1558 KMP_FATAL(LockStillOwned, func); 1559 } 1560 __kmp_destroy_queuing_lock(lck); 1561 } 1562 1563 // nested queuing locks 1564 1565 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { 1566 KMP_DEBUG_ASSERT(gtid >= 0); 1567 1568 if (__kmp_get_queuing_lock_owner(lck) == gtid) { 1569 lck->lk.depth_locked += 1; 1570 return KMP_LOCK_ACQUIRED_NEXT; 1571 } else { 1572 __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid); 1573 ANNOTATE_QUEUING_ACQUIRED(lck); 1574 KMP_MB(); 1575 lck->lk.depth_locked = 1; 1576 KMP_MB(); 1577 lck->lk.owner_id = gtid + 1; 1578 return KMP_LOCK_ACQUIRED_FIRST; 1579 } 1580 } 1581 1582 static int 1583 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 1584 kmp_int32 gtid) { 1585 char const *const func = "omp_set_nest_lock"; 1586 if (lck->lk.initialized != lck) { 1587 KMP_FATAL(LockIsUninitialized, func); 1588 } 1589 if (!__kmp_is_queuing_lock_nestable(lck)) { 1590 KMP_FATAL(LockSimpleUsedAsNestable, func); 1591 } 1592 return __kmp_acquire_nested_queuing_lock(lck, gtid); 1593 } 1594 1595 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { 1596 int retval; 1597 1598 KMP_DEBUG_ASSERT(gtid >= 0); 1599 1600 if (__kmp_get_queuing_lock_owner(lck) == gtid) { 1601 retval = ++lck->lk.depth_locked; 1602 } else if (!__kmp_test_queuing_lock(lck, gtid)) { 1603 retval = 0; 1604 } else { 1605 KMP_MB(); 1606 retval = lck->lk.depth_locked = 1; 1607 KMP_MB(); 1608 lck->lk.owner_id = gtid + 1; 1609 } 1610 return retval; 1611 } 1612 1613 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 1614 kmp_int32 gtid) { 1615 char const *const func = "omp_test_nest_lock"; 1616 if (lck->lk.initialized != lck) { 1617 KMP_FATAL(LockIsUninitialized, func); 1618 } 1619 if (!__kmp_is_queuing_lock_nestable(lck)) { 1620 KMP_FATAL(LockSimpleUsedAsNestable, func); 1621 } 1622 return __kmp_test_nested_queuing_lock(lck, gtid); 1623 } 1624 1625 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) { 1626 KMP_DEBUG_ASSERT(gtid >= 0); 1627 1628 KMP_MB(); 1629 if (--(lck->lk.depth_locked) == 0) { 1630 KMP_MB(); 1631 lck->lk.owner_id = 0; 1632 __kmp_release_queuing_lock(lck, gtid); 1633 return KMP_LOCK_RELEASED; 1634 } 1635 return KMP_LOCK_STILL_HELD; 1636 } 1637 1638 static int 1639 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 1640 kmp_int32 gtid) { 1641 char const *const func = "omp_unset_nest_lock"; 1642 KMP_MB(); /* in case another processor initialized lock */ 1643 if (lck->lk.initialized != lck) { 1644 KMP_FATAL(LockIsUninitialized, func); 1645 } 1646 if (!__kmp_is_queuing_lock_nestable(lck)) { 1647 KMP_FATAL(LockSimpleUsedAsNestable, func); 1648 } 1649 if (__kmp_get_queuing_lock_owner(lck) == -1) { 1650 KMP_FATAL(LockUnsettingFree, func); 1651 } 1652 if (__kmp_get_queuing_lock_owner(lck) != gtid) { 1653 KMP_FATAL(LockUnsettingSetByAnother, func); 1654 } 1655 return __kmp_release_nested_queuing_lock(lck, gtid); 1656 } 1657 1658 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) { 1659 __kmp_init_queuing_lock(lck); 1660 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks 1661 } 1662 1663 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) { 1664 __kmp_destroy_queuing_lock(lck); 1665 lck->lk.depth_locked = 0; 1666 } 1667 1668 static void 1669 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { 1670 char const *const func = "omp_destroy_nest_lock"; 1671 if (lck->lk.initialized != lck) { 1672 KMP_FATAL(LockIsUninitialized, func); 1673 } 1674 if (!__kmp_is_queuing_lock_nestable(lck)) { 1675 KMP_FATAL(LockSimpleUsedAsNestable, func); 1676 } 1677 if (__kmp_get_queuing_lock_owner(lck) != -1) { 1678 KMP_FATAL(LockStillOwned, func); 1679 } 1680 __kmp_destroy_nested_queuing_lock(lck); 1681 } 1682 1683 // access functions to fields which don't exist for all lock kinds. 1684 1685 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) { 1686 return lck->lk.location; 1687 } 1688 1689 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck, 1690 const ident_t *loc) { 1691 lck->lk.location = loc; 1692 } 1693 1694 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) { 1695 return lck->lk.flags; 1696 } 1697 1698 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck, 1699 kmp_lock_flags_t flags) { 1700 lck->lk.flags = flags; 1701 } 1702 1703 #if KMP_USE_ADAPTIVE_LOCKS 1704 1705 /* RTM Adaptive locks */ 1706 1707 #if KMP_HAVE_RTM_INTRINSICS 1708 #include <immintrin.h> 1709 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) 1710 1711 #else 1712 1713 // Values from the status register after failed speculation. 1714 #define _XBEGIN_STARTED (~0u) 1715 #define _XABORT_EXPLICIT (1 << 0) 1716 #define _XABORT_RETRY (1 << 1) 1717 #define _XABORT_CONFLICT (1 << 2) 1718 #define _XABORT_CAPACITY (1 << 3) 1719 #define _XABORT_DEBUG (1 << 4) 1720 #define _XABORT_NESTED (1 << 5) 1721 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF)) 1722 1723 // Aborts for which it's worth trying again immediately 1724 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) 1725 1726 #define STRINGIZE_INTERNAL(arg) #arg 1727 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg) 1728 1729 // Access to RTM instructions 1730 /*A version of XBegin which returns -1 on speculation, and the value of EAX on 1731 an abort. This is the same definition as the compiler intrinsic that will be 1732 supported at some point. */ 1733 static __inline int _xbegin() { 1734 int res = -1; 1735 1736 #if KMP_OS_WINDOWS 1737 #if KMP_ARCH_X86_64 1738 _asm { 1739 _emit 0xC7 1740 _emit 0xF8 1741 _emit 2 1742 _emit 0 1743 _emit 0 1744 _emit 0 1745 jmp L2 1746 mov res, eax 1747 L2: 1748 } 1749 #else /* IA32 */ 1750 _asm { 1751 _emit 0xC7 1752 _emit 0xF8 1753 _emit 2 1754 _emit 0 1755 _emit 0 1756 _emit 0 1757 jmp L2 1758 mov res, eax 1759 L2: 1760 } 1761 #endif // KMP_ARCH_X86_64 1762 #else 1763 /* Note that %eax must be noted as killed (clobbered), because the XSR is 1764 returned in %eax(%rax) on abort. Other register values are restored, so 1765 don't need to be killed. 1766 1767 We must also mark 'res' as an input and an output, since otherwise 1768 'res=-1' may be dropped as being dead, whereas we do need the assignment on 1769 the successful (i.e., non-abort) path. */ 1770 __asm__ volatile("1: .byte 0xC7; .byte 0xF8;\n" 1771 " .long 1f-1b-6\n" 1772 " jmp 2f\n" 1773 "1: movl %%eax,%0\n" 1774 "2:" 1775 : "+r"(res)::"memory", "%eax"); 1776 #endif // KMP_OS_WINDOWS 1777 return res; 1778 } 1779 1780 /* Transaction end */ 1781 static __inline void _xend() { 1782 #if KMP_OS_WINDOWS 1783 __asm { 1784 _emit 0x0f 1785 _emit 0x01 1786 _emit 0xd5 1787 } 1788 #else 1789 __asm__ volatile(".byte 0x0f; .byte 0x01; .byte 0xd5" ::: "memory"); 1790 #endif 1791 } 1792 1793 /* This is a macro, the argument must be a single byte constant which can be 1794 evaluated by the inline assembler, since it is emitted as a byte into the 1795 assembly code. */ 1796 // clang-format off 1797 #if KMP_OS_WINDOWS 1798 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG 1799 #else 1800 #define _xabort(ARG) \ 1801 __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory"); 1802 #endif 1803 // clang-format on 1804 #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300 1805 1806 // Statistics is collected for testing purpose 1807 #if KMP_DEBUG_ADAPTIVE_LOCKS 1808 1809 // We accumulate speculative lock statistics when the lock is destroyed. We 1810 // keep locks that haven't been destroyed in the liveLocks list so that we can 1811 // grab their statistics too. 1812 static kmp_adaptive_lock_statistics_t destroyedStats; 1813 1814 // To hold the list of live locks. 1815 static kmp_adaptive_lock_info_t liveLocks; 1816 1817 // A lock so we can safely update the list of locks. 1818 static kmp_bootstrap_lock_t chain_lock = 1819 KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock); 1820 1821 // Initialize the list of stats. 1822 void __kmp_init_speculative_stats() { 1823 kmp_adaptive_lock_info_t *lck = &liveLocks; 1824 1825 memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0, 1826 sizeof(lck->stats)); 1827 lck->stats.next = lck; 1828 lck->stats.prev = lck; 1829 1830 KMP_ASSERT(lck->stats.next->stats.prev == lck); 1831 KMP_ASSERT(lck->stats.prev->stats.next == lck); 1832 1833 __kmp_init_bootstrap_lock(&chain_lock); 1834 } 1835 1836 // Insert the lock into the circular list 1837 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) { 1838 __kmp_acquire_bootstrap_lock(&chain_lock); 1839 1840 lck->stats.next = liveLocks.stats.next; 1841 lck->stats.prev = &liveLocks; 1842 1843 liveLocks.stats.next = lck; 1844 lck->stats.next->stats.prev = lck; 1845 1846 KMP_ASSERT(lck->stats.next->stats.prev == lck); 1847 KMP_ASSERT(lck->stats.prev->stats.next == lck); 1848 1849 __kmp_release_bootstrap_lock(&chain_lock); 1850 } 1851 1852 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) { 1853 KMP_ASSERT(lck->stats.next->stats.prev == lck); 1854 KMP_ASSERT(lck->stats.prev->stats.next == lck); 1855 1856 kmp_adaptive_lock_info_t *n = lck->stats.next; 1857 kmp_adaptive_lock_info_t *p = lck->stats.prev; 1858 1859 n->stats.prev = p; 1860 p->stats.next = n; 1861 } 1862 1863 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) { 1864 memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0, 1865 sizeof(lck->stats)); 1866 __kmp_remember_lock(lck); 1867 } 1868 1869 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t, 1870 kmp_adaptive_lock_info_t *lck) { 1871 kmp_adaptive_lock_statistics_t volatile *s = &lck->stats; 1872 1873 t->nonSpeculativeAcquireAttempts += lck->acquire_attempts; 1874 t->successfulSpeculations += s->successfulSpeculations; 1875 t->hardFailedSpeculations += s->hardFailedSpeculations; 1876 t->softFailedSpeculations += s->softFailedSpeculations; 1877 t->nonSpeculativeAcquires += s->nonSpeculativeAcquires; 1878 t->lemmingYields += s->lemmingYields; 1879 } 1880 1881 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) { 1882 __kmp_acquire_bootstrap_lock(&chain_lock); 1883 1884 __kmp_add_stats(&destroyedStats, lck); 1885 __kmp_forget_lock(lck); 1886 1887 __kmp_release_bootstrap_lock(&chain_lock); 1888 } 1889 1890 static float percent(kmp_uint32 count, kmp_uint32 total) { 1891 return (total == 0) ? 0.0 : (100.0 * count) / total; 1892 } 1893 1894 void __kmp_print_speculative_stats() { 1895 kmp_adaptive_lock_statistics_t total = destroyedStats; 1896 kmp_adaptive_lock_info_t *lck; 1897 1898 for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) { 1899 __kmp_add_stats(&total, lck); 1900 } 1901 kmp_adaptive_lock_statistics_t *t = &total; 1902 kmp_uint32 totalSections = 1903 t->nonSpeculativeAcquires + t->successfulSpeculations; 1904 kmp_uint32 totalSpeculations = t->successfulSpeculations + 1905 t->hardFailedSpeculations + 1906 t->softFailedSpeculations; 1907 if (totalSections <= 0) 1908 return; 1909 1910 kmp_safe_raii_file_t statsFile; 1911 if (strcmp(__kmp_speculative_statsfile, "-") == 0) { 1912 statsFile.set_stdout(); 1913 } else { 1914 size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20; 1915 char buffer[buffLen]; 1916 KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile, 1917 (kmp_int32)getpid()); 1918 statsFile.open(buffer, "w"); 1919 } 1920 1921 fprintf(statsFile, "Speculative lock statistics (all approximate!)\n"); 1922 fprintf(statsFile, 1923 " Lock parameters: \n" 1924 " max_soft_retries : %10d\n" 1925 " max_badness : %10d\n", 1926 __kmp_adaptive_backoff_params.max_soft_retries, 1927 __kmp_adaptive_backoff_params.max_badness); 1928 fprintf(statsFile, " Non-speculative acquire attempts : %10d\n", 1929 t->nonSpeculativeAcquireAttempts); 1930 fprintf(statsFile, " Total critical sections : %10d\n", 1931 totalSections); 1932 fprintf(statsFile, " Successful speculations : %10d (%5.1f%%)\n", 1933 t->successfulSpeculations, 1934 percent(t->successfulSpeculations, totalSections)); 1935 fprintf(statsFile, " Non-speculative acquires : %10d (%5.1f%%)\n", 1936 t->nonSpeculativeAcquires, 1937 percent(t->nonSpeculativeAcquires, totalSections)); 1938 fprintf(statsFile, " Lemming yields : %10d\n\n", 1939 t->lemmingYields); 1940 1941 fprintf(statsFile, " Speculative acquire attempts : %10d\n", 1942 totalSpeculations); 1943 fprintf(statsFile, " Successes : %10d (%5.1f%%)\n", 1944 t->successfulSpeculations, 1945 percent(t->successfulSpeculations, totalSpeculations)); 1946 fprintf(statsFile, " Soft failures : %10d (%5.1f%%)\n", 1947 t->softFailedSpeculations, 1948 percent(t->softFailedSpeculations, totalSpeculations)); 1949 fprintf(statsFile, " Hard failures : %10d (%5.1f%%)\n", 1950 t->hardFailedSpeculations, 1951 percent(t->hardFailedSpeculations, totalSpeculations)); 1952 } 1953 1954 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++) 1955 #else 1956 #define KMP_INC_STAT(lck, stat) 1957 1958 #endif // KMP_DEBUG_ADAPTIVE_LOCKS 1959 1960 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) { 1961 // It is enough to check that the head_id is zero. 1962 // We don't also need to check the tail. 1963 bool res = lck->lk.head_id == 0; 1964 1965 // We need a fence here, since we must ensure that no memory operations 1966 // from later in this thread float above that read. 1967 #if KMP_COMPILER_ICC 1968 _mm_mfence(); 1969 #else 1970 __sync_synchronize(); 1971 #endif 1972 1973 return res; 1974 } 1975 1976 // Functions for manipulating the badness 1977 static __inline void 1978 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) { 1979 // Reset the badness to zero so we eagerly try to speculate again 1980 lck->lk.adaptive.badness = 0; 1981 KMP_INC_STAT(lck, successfulSpeculations); 1982 } 1983 1984 // Create a bit mask with one more set bit. 1985 static __inline void __kmp_step_badness(kmp_adaptive_lock_t *lck) { 1986 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1; 1987 if (newBadness > lck->lk.adaptive.max_badness) { 1988 return; 1989 } else { 1990 lck->lk.adaptive.badness = newBadness; 1991 } 1992 } 1993 1994 // Check whether speculation should be attempted. 1995 KMP_ATTRIBUTE_TARGET_RTM 1996 static __inline int __kmp_should_speculate(kmp_adaptive_lock_t *lck, 1997 kmp_int32 gtid) { 1998 kmp_uint32 badness = lck->lk.adaptive.badness; 1999 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts; 2000 int res = (attempts & badness) == 0; 2001 return res; 2002 } 2003 2004 // Attempt to acquire only the speculative lock. 2005 // Does not back off to the non-speculative lock. 2006 KMP_ATTRIBUTE_TARGET_RTM 2007 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck, 2008 kmp_int32 gtid) { 2009 int retries = lck->lk.adaptive.max_soft_retries; 2010 2011 // We don't explicitly count the start of speculation, rather we record the 2012 // results (success, hard fail, soft fail). The sum of all of those is the 2013 // total number of times we started speculation since all speculations must 2014 // end one of those ways. 2015 do { 2016 kmp_uint32 status = _xbegin(); 2017 // Switch this in to disable actual speculation but exercise at least some 2018 // of the rest of the code. Useful for debugging... 2019 // kmp_uint32 status = _XABORT_NESTED; 2020 2021 if (status == _XBEGIN_STARTED) { 2022 /* We have successfully started speculation. Check that no-one acquired 2023 the lock for real between when we last looked and now. This also gets 2024 the lock cache line into our read-set, which we need so that we'll 2025 abort if anyone later claims it for real. */ 2026 if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) { 2027 // Lock is now visibly acquired, so someone beat us to it. Abort the 2028 // transaction so we'll restart from _xbegin with the failure status. 2029 _xabort(0x01); 2030 KMP_ASSERT2(0, "should not get here"); 2031 } 2032 return 1; // Lock has been acquired (speculatively) 2033 } else { 2034 // We have aborted, update the statistics 2035 if (status & SOFT_ABORT_MASK) { 2036 KMP_INC_STAT(lck, softFailedSpeculations); 2037 // and loop round to retry. 2038 } else { 2039 KMP_INC_STAT(lck, hardFailedSpeculations); 2040 // Give up if we had a hard failure. 2041 break; 2042 } 2043 } 2044 } while (retries--); // Loop while we have retries, and didn't fail hard. 2045 2046 // Either we had a hard failure or we didn't succeed softly after 2047 // the full set of attempts, so back off the badness. 2048 __kmp_step_badness(lck); 2049 return 0; 2050 } 2051 2052 // Attempt to acquire the speculative lock, or back off to the non-speculative 2053 // one if the speculative lock cannot be acquired. 2054 // We can succeed speculatively, non-speculatively, or fail. 2055 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) { 2056 // First try to acquire the lock speculatively 2057 if (__kmp_should_speculate(lck, gtid) && 2058 __kmp_test_adaptive_lock_only(lck, gtid)) 2059 return 1; 2060 2061 // Speculative acquisition failed, so try to acquire it non-speculatively. 2062 // Count the non-speculative acquire attempt 2063 lck->lk.adaptive.acquire_attempts++; 2064 2065 // Use base, non-speculative lock. 2066 if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) { 2067 KMP_INC_STAT(lck, nonSpeculativeAcquires); 2068 return 1; // Lock is acquired (non-speculatively) 2069 } else { 2070 return 0; // Failed to acquire the lock, it's already visibly locked. 2071 } 2072 } 2073 2074 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck, 2075 kmp_int32 gtid) { 2076 char const *const func = "omp_test_lock"; 2077 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { 2078 KMP_FATAL(LockIsUninitialized, func); 2079 } 2080 2081 int retval = __kmp_test_adaptive_lock(lck, gtid); 2082 2083 if (retval) { 2084 lck->lk.qlk.owner_id = gtid + 1; 2085 } 2086 return retval; 2087 } 2088 2089 // Block until we can acquire a speculative, adaptive lock. We check whether we 2090 // should be trying to speculate. If we should be, we check the real lock to see 2091 // if it is free, and, if not, pause without attempting to acquire it until it 2092 // is. Then we try the speculative acquire. This means that although we suffer 2093 // from lemmings a little (because all we can't acquire the lock speculatively 2094 // until the queue of threads waiting has cleared), we don't get into a state 2095 // where we can never acquire the lock speculatively (because we force the queue 2096 // to clear by preventing new arrivals from entering the queue). This does mean 2097 // that when we're trying to break lemmings, the lock is no longer fair. However 2098 // OpenMP makes no guarantee that its locks are fair, so this isn't a real 2099 // problem. 2100 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck, 2101 kmp_int32 gtid) { 2102 if (__kmp_should_speculate(lck, gtid)) { 2103 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) { 2104 if (__kmp_test_adaptive_lock_only(lck, gtid)) 2105 return; 2106 // We tried speculation and failed, so give up. 2107 } else { 2108 // We can't try speculation until the lock is free, so we pause here 2109 // (without suspending on the queueing lock, to allow it to drain, then 2110 // try again. All other threads will also see the same result for 2111 // shouldSpeculate, so will be doing the same if they try to claim the 2112 // lock from now on. 2113 while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) { 2114 KMP_INC_STAT(lck, lemmingYields); 2115 KMP_YIELD(TRUE); 2116 } 2117 2118 if (__kmp_test_adaptive_lock_only(lck, gtid)) 2119 return; 2120 } 2121 } 2122 2123 // Speculative acquisition failed, so acquire it non-speculatively. 2124 // Count the non-speculative acquire attempt 2125 lck->lk.adaptive.acquire_attempts++; 2126 2127 __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid); 2128 // We have acquired the base lock, so count that. 2129 KMP_INC_STAT(lck, nonSpeculativeAcquires); 2130 ANNOTATE_QUEUING_ACQUIRED(lck); 2131 } 2132 2133 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck, 2134 kmp_int32 gtid) { 2135 char const *const func = "omp_set_lock"; 2136 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { 2137 KMP_FATAL(LockIsUninitialized, func); 2138 } 2139 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) { 2140 KMP_FATAL(LockIsAlreadyOwned, func); 2141 } 2142 2143 __kmp_acquire_adaptive_lock(lck, gtid); 2144 2145 lck->lk.qlk.owner_id = gtid + 1; 2146 } 2147 2148 KMP_ATTRIBUTE_TARGET_RTM 2149 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck, 2150 kmp_int32 gtid) { 2151 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR( 2152 lck))) { // If the lock doesn't look claimed we must be speculating. 2153 // (Or the user's code is buggy and they're releasing without locking; 2154 // if we had XTEST we'd be able to check that case...) 2155 _xend(); // Exit speculation 2156 __kmp_update_badness_after_success(lck); 2157 } else { // Since the lock *is* visibly locked we're not speculating, 2158 // so should use the underlying lock's release scheme. 2159 __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid); 2160 } 2161 return KMP_LOCK_RELEASED; 2162 } 2163 2164 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck, 2165 kmp_int32 gtid) { 2166 char const *const func = "omp_unset_lock"; 2167 KMP_MB(); /* in case another processor initialized lock */ 2168 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { 2169 KMP_FATAL(LockIsUninitialized, func); 2170 } 2171 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) { 2172 KMP_FATAL(LockUnsettingFree, func); 2173 } 2174 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) { 2175 KMP_FATAL(LockUnsettingSetByAnother, func); 2176 } 2177 lck->lk.qlk.owner_id = 0; 2178 __kmp_release_adaptive_lock(lck, gtid); 2179 return KMP_LOCK_RELEASED; 2180 } 2181 2182 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) { 2183 __kmp_init_queuing_lock(GET_QLK_PTR(lck)); 2184 lck->lk.adaptive.badness = 0; 2185 lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0; 2186 lck->lk.adaptive.max_soft_retries = 2187 __kmp_adaptive_backoff_params.max_soft_retries; 2188 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness; 2189 #if KMP_DEBUG_ADAPTIVE_LOCKS 2190 __kmp_zero_speculative_stats(&lck->lk.adaptive); 2191 #endif 2192 KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck)); 2193 } 2194 2195 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) { 2196 #if KMP_DEBUG_ADAPTIVE_LOCKS 2197 __kmp_accumulate_speculative_stats(&lck->lk.adaptive); 2198 #endif 2199 __kmp_destroy_queuing_lock(GET_QLK_PTR(lck)); 2200 // Nothing needed for the speculative part. 2201 } 2202 2203 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) { 2204 char const *const func = "omp_destroy_lock"; 2205 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) { 2206 KMP_FATAL(LockIsUninitialized, func); 2207 } 2208 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) { 2209 KMP_FATAL(LockStillOwned, func); 2210 } 2211 __kmp_destroy_adaptive_lock(lck); 2212 } 2213 2214 #endif // KMP_USE_ADAPTIVE_LOCKS 2215 2216 /* ------------------------------------------------------------------------ */ 2217 /* DRDPA ticket locks */ 2218 /* "DRDPA" means Dynamically Reconfigurable Distributed Polling Area */ 2219 2220 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) { 2221 return lck->lk.owner_id - 1; 2222 } 2223 2224 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) { 2225 return lck->lk.depth_locked != -1; 2226 } 2227 2228 __forceinline static int 2229 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2230 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket); 2231 kmp_uint64 mask = lck->lk.mask; // atomic load 2232 std::atomic<kmp_uint64> *polls = lck->lk.polls; 2233 2234 #ifdef USE_LOCK_PROFILE 2235 if (polls[ticket & mask] != ticket) 2236 __kmp_printf("LOCK CONTENTION: %p\n", lck); 2237 /* else __kmp_printf( "." );*/ 2238 #endif /* USE_LOCK_PROFILE */ 2239 2240 // Now spin-wait, but reload the polls pointer and mask, in case the 2241 // polling area has been reconfigured. Unless it is reconfigured, the 2242 // reloads stay in L1 cache and are cheap. 2243 // 2244 // Keep this code in sync with KMP_WAIT, in kmp_dispatch.cpp !!! 2245 // The current implementation of KMP_WAIT doesn't allow for mask 2246 // and poll to be re-read every spin iteration. 2247 kmp_uint32 spins; 2248 KMP_FSYNC_PREPARE(lck); 2249 KMP_INIT_YIELD(spins); 2250 while (polls[ticket & mask] < ticket) { // atomic load 2251 KMP_YIELD_OVERSUB_ELSE_SPIN(spins); 2252 // Re-read the mask and the poll pointer from the lock structure. 2253 // 2254 // Make certain that "mask" is read before "polls" !!! 2255 // 2256 // If another thread picks reconfigures the polling area and updates their 2257 // values, and we get the new value of mask and the old polls pointer, we 2258 // could access memory beyond the end of the old polling area. 2259 mask = lck->lk.mask; // atomic load 2260 polls = lck->lk.polls; // atomic load 2261 } 2262 2263 // Critical section starts here 2264 KMP_FSYNC_ACQUIRED(lck); 2265 KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n", 2266 ticket, lck)); 2267 lck->lk.now_serving = ticket; // non-volatile store 2268 2269 // Deallocate a garbage polling area if we know that we are the last 2270 // thread that could possibly access it. 2271 // 2272 // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup 2273 // ticket. 2274 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) { 2275 __kmp_free(lck->lk.old_polls); 2276 lck->lk.old_polls = NULL; 2277 lck->lk.cleanup_ticket = 0; 2278 } 2279 2280 // Check to see if we should reconfigure the polling area. 2281 // If there is still a garbage polling area to be deallocated from a 2282 // previous reconfiguration, let a later thread reconfigure it. 2283 if (lck->lk.old_polls == NULL) { 2284 bool reconfigure = false; 2285 std::atomic<kmp_uint64> *old_polls = polls; 2286 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls); 2287 2288 if (TCR_4(__kmp_nth) > 2289 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) { 2290 // We are in oversubscription mode. Contract the polling area 2291 // down to a single location, if that hasn't been done already. 2292 if (num_polls > 1) { 2293 reconfigure = true; 2294 num_polls = TCR_4(lck->lk.num_polls); 2295 mask = 0; 2296 num_polls = 1; 2297 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * 2298 sizeof(*polls)); 2299 polls[0] = ticket; 2300 } 2301 } else { 2302 // We are in under/fully subscribed mode. Check the number of 2303 // threads waiting on the lock. The size of the polling area 2304 // should be at least the number of threads waiting. 2305 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1; 2306 if (num_waiting > num_polls) { 2307 kmp_uint32 old_num_polls = num_polls; 2308 reconfigure = true; 2309 do { 2310 mask = (mask << 1) | 1; 2311 num_polls *= 2; 2312 } while (num_polls <= num_waiting); 2313 2314 // Allocate the new polling area, and copy the relevant portion 2315 // of the old polling area to the new area. __kmp_allocate() 2316 // zeroes the memory it allocates, and most of the old area is 2317 // just zero padding, so we only copy the release counters. 2318 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls * 2319 sizeof(*polls)); 2320 kmp_uint32 i; 2321 for (i = 0; i < old_num_polls; i++) { 2322 polls[i].store(old_polls[i]); 2323 } 2324 } 2325 } 2326 2327 if (reconfigure) { 2328 // Now write the updated fields back to the lock structure. 2329 // 2330 // Make certain that "polls" is written before "mask" !!! 2331 // 2332 // If another thread picks up the new value of mask and the old polls 2333 // pointer , it could access memory beyond the end of the old polling 2334 // area. 2335 // 2336 // On x86, we need memory fences. 2337 KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring " 2338 "lock %p to %d polls\n", 2339 ticket, lck, num_polls)); 2340 2341 lck->lk.old_polls = old_polls; 2342 lck->lk.polls = polls; // atomic store 2343 2344 KMP_MB(); 2345 2346 lck->lk.num_polls = num_polls; 2347 lck->lk.mask = mask; // atomic store 2348 2349 KMP_MB(); 2350 2351 // Only after the new polling area and mask have been flushed 2352 // to main memory can we update the cleanup ticket field. 2353 // 2354 // volatile load / non-volatile store 2355 lck->lk.cleanup_ticket = lck->lk.next_ticket; 2356 } 2357 } 2358 return KMP_LOCK_ACQUIRED_FIRST; 2359 } 2360 2361 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2362 int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid); 2363 ANNOTATE_DRDPA_ACQUIRED(lck); 2364 return retval; 2365 } 2366 2367 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, 2368 kmp_int32 gtid) { 2369 char const *const func = "omp_set_lock"; 2370 if (lck->lk.initialized != lck) { 2371 KMP_FATAL(LockIsUninitialized, func); 2372 } 2373 if (__kmp_is_drdpa_lock_nestable(lck)) { 2374 KMP_FATAL(LockNestableUsedAsSimple, func); 2375 } 2376 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) { 2377 KMP_FATAL(LockIsAlreadyOwned, func); 2378 } 2379 2380 __kmp_acquire_drdpa_lock(lck, gtid); 2381 2382 lck->lk.owner_id = gtid + 1; 2383 return KMP_LOCK_ACQUIRED_FIRST; 2384 } 2385 2386 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2387 // First get a ticket, then read the polls pointer and the mask. 2388 // The polls pointer must be read before the mask!!! (See above) 2389 kmp_uint64 ticket = lck->lk.next_ticket; // atomic load 2390 std::atomic<kmp_uint64> *polls = lck->lk.polls; 2391 kmp_uint64 mask = lck->lk.mask; // atomic load 2392 if (polls[ticket & mask] == ticket) { 2393 kmp_uint64 next_ticket = ticket + 1; 2394 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket, 2395 next_ticket)) { 2396 KMP_FSYNC_ACQUIRED(lck); 2397 KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n", 2398 ticket, lck)); 2399 lck->lk.now_serving = ticket; // non-volatile store 2400 2401 // Since no threads are waiting, there is no possibility that we would 2402 // want to reconfigure the polling area. We might have the cleanup ticket 2403 // value (which says that it is now safe to deallocate old_polls), but 2404 // we'll let a later thread which calls __kmp_acquire_lock do that - this 2405 // routine isn't supposed to block, and we would risk blocks if we called 2406 // __kmp_free() to do the deallocation. 2407 return TRUE; 2408 } 2409 } 2410 return FALSE; 2411 } 2412 2413 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, 2414 kmp_int32 gtid) { 2415 char const *const func = "omp_test_lock"; 2416 if (lck->lk.initialized != lck) { 2417 KMP_FATAL(LockIsUninitialized, func); 2418 } 2419 if (__kmp_is_drdpa_lock_nestable(lck)) { 2420 KMP_FATAL(LockNestableUsedAsSimple, func); 2421 } 2422 2423 int retval = __kmp_test_drdpa_lock(lck, gtid); 2424 2425 if (retval) { 2426 lck->lk.owner_id = gtid + 1; 2427 } 2428 return retval; 2429 } 2430 2431 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2432 // Read the ticket value from the lock data struct, then the polls pointer and 2433 // the mask. The polls pointer must be read before the mask!!! (See above) 2434 kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load 2435 std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load 2436 kmp_uint64 mask = lck->lk.mask; // atomic load 2437 KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n", 2438 ticket - 1, lck)); 2439 KMP_FSYNC_RELEASING(lck); 2440 ANNOTATE_DRDPA_RELEASED(lck); 2441 polls[ticket & mask] = ticket; // atomic store 2442 return KMP_LOCK_RELEASED; 2443 } 2444 2445 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, 2446 kmp_int32 gtid) { 2447 char const *const func = "omp_unset_lock"; 2448 KMP_MB(); /* in case another processor initialized lock */ 2449 if (lck->lk.initialized != lck) { 2450 KMP_FATAL(LockIsUninitialized, func); 2451 } 2452 if (__kmp_is_drdpa_lock_nestable(lck)) { 2453 KMP_FATAL(LockNestableUsedAsSimple, func); 2454 } 2455 if (__kmp_get_drdpa_lock_owner(lck) == -1) { 2456 KMP_FATAL(LockUnsettingFree, func); 2457 } 2458 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) && 2459 (__kmp_get_drdpa_lock_owner(lck) != gtid)) { 2460 KMP_FATAL(LockUnsettingSetByAnother, func); 2461 } 2462 lck->lk.owner_id = 0; 2463 return __kmp_release_drdpa_lock(lck, gtid); 2464 } 2465 2466 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) { 2467 lck->lk.location = NULL; 2468 lck->lk.mask = 0; 2469 lck->lk.num_polls = 1; 2470 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate( 2471 lck->lk.num_polls * sizeof(*(lck->lk.polls))); 2472 lck->lk.cleanup_ticket = 0; 2473 lck->lk.old_polls = NULL; 2474 lck->lk.next_ticket = 0; 2475 lck->lk.now_serving = 0; 2476 lck->lk.owner_id = 0; // no thread owns the lock. 2477 lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks. 2478 lck->lk.initialized = lck; 2479 2480 KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck)); 2481 } 2482 2483 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) { 2484 lck->lk.initialized = NULL; 2485 lck->lk.location = NULL; 2486 if (lck->lk.polls.load() != NULL) { 2487 __kmp_free(lck->lk.polls.load()); 2488 lck->lk.polls = NULL; 2489 } 2490 if (lck->lk.old_polls != NULL) { 2491 __kmp_free(lck->lk.old_polls); 2492 lck->lk.old_polls = NULL; 2493 } 2494 lck->lk.mask = 0; 2495 lck->lk.num_polls = 0; 2496 lck->lk.cleanup_ticket = 0; 2497 lck->lk.next_ticket = 0; 2498 lck->lk.now_serving = 0; 2499 lck->lk.owner_id = 0; 2500 lck->lk.depth_locked = -1; 2501 } 2502 2503 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { 2504 char const *const func = "omp_destroy_lock"; 2505 if (lck->lk.initialized != lck) { 2506 KMP_FATAL(LockIsUninitialized, func); 2507 } 2508 if (__kmp_is_drdpa_lock_nestable(lck)) { 2509 KMP_FATAL(LockNestableUsedAsSimple, func); 2510 } 2511 if (__kmp_get_drdpa_lock_owner(lck) != -1) { 2512 KMP_FATAL(LockStillOwned, func); 2513 } 2514 __kmp_destroy_drdpa_lock(lck); 2515 } 2516 2517 // nested drdpa ticket locks 2518 2519 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2520 KMP_DEBUG_ASSERT(gtid >= 0); 2521 2522 if (__kmp_get_drdpa_lock_owner(lck) == gtid) { 2523 lck->lk.depth_locked += 1; 2524 return KMP_LOCK_ACQUIRED_NEXT; 2525 } else { 2526 __kmp_acquire_drdpa_lock_timed_template(lck, gtid); 2527 ANNOTATE_DRDPA_ACQUIRED(lck); 2528 KMP_MB(); 2529 lck->lk.depth_locked = 1; 2530 KMP_MB(); 2531 lck->lk.owner_id = gtid + 1; 2532 return KMP_LOCK_ACQUIRED_FIRST; 2533 } 2534 } 2535 2536 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, 2537 kmp_int32 gtid) { 2538 char const *const func = "omp_set_nest_lock"; 2539 if (lck->lk.initialized != lck) { 2540 KMP_FATAL(LockIsUninitialized, func); 2541 } 2542 if (!__kmp_is_drdpa_lock_nestable(lck)) { 2543 KMP_FATAL(LockSimpleUsedAsNestable, func); 2544 } 2545 __kmp_acquire_nested_drdpa_lock(lck, gtid); 2546 } 2547 2548 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2549 int retval; 2550 2551 KMP_DEBUG_ASSERT(gtid >= 0); 2552 2553 if (__kmp_get_drdpa_lock_owner(lck) == gtid) { 2554 retval = ++lck->lk.depth_locked; 2555 } else if (!__kmp_test_drdpa_lock(lck, gtid)) { 2556 retval = 0; 2557 } else { 2558 KMP_MB(); 2559 retval = lck->lk.depth_locked = 1; 2560 KMP_MB(); 2561 lck->lk.owner_id = gtid + 1; 2562 } 2563 return retval; 2564 } 2565 2566 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, 2567 kmp_int32 gtid) { 2568 char const *const func = "omp_test_nest_lock"; 2569 if (lck->lk.initialized != lck) { 2570 KMP_FATAL(LockIsUninitialized, func); 2571 } 2572 if (!__kmp_is_drdpa_lock_nestable(lck)) { 2573 KMP_FATAL(LockSimpleUsedAsNestable, func); 2574 } 2575 return __kmp_test_nested_drdpa_lock(lck, gtid); 2576 } 2577 2578 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) { 2579 KMP_DEBUG_ASSERT(gtid >= 0); 2580 2581 KMP_MB(); 2582 if (--(lck->lk.depth_locked) == 0) { 2583 KMP_MB(); 2584 lck->lk.owner_id = 0; 2585 __kmp_release_drdpa_lock(lck, gtid); 2586 return KMP_LOCK_RELEASED; 2587 } 2588 return KMP_LOCK_STILL_HELD; 2589 } 2590 2591 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck, 2592 kmp_int32 gtid) { 2593 char const *const func = "omp_unset_nest_lock"; 2594 KMP_MB(); /* in case another processor initialized lock */ 2595 if (lck->lk.initialized != lck) { 2596 KMP_FATAL(LockIsUninitialized, func); 2597 } 2598 if (!__kmp_is_drdpa_lock_nestable(lck)) { 2599 KMP_FATAL(LockSimpleUsedAsNestable, func); 2600 } 2601 if (__kmp_get_drdpa_lock_owner(lck) == -1) { 2602 KMP_FATAL(LockUnsettingFree, func); 2603 } 2604 if (__kmp_get_drdpa_lock_owner(lck) != gtid) { 2605 KMP_FATAL(LockUnsettingSetByAnother, func); 2606 } 2607 return __kmp_release_nested_drdpa_lock(lck, gtid); 2608 } 2609 2610 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) { 2611 __kmp_init_drdpa_lock(lck); 2612 lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks 2613 } 2614 2615 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) { 2616 __kmp_destroy_drdpa_lock(lck); 2617 lck->lk.depth_locked = 0; 2618 } 2619 2620 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { 2621 char const *const func = "omp_destroy_nest_lock"; 2622 if (lck->lk.initialized != lck) { 2623 KMP_FATAL(LockIsUninitialized, func); 2624 } 2625 if (!__kmp_is_drdpa_lock_nestable(lck)) { 2626 KMP_FATAL(LockSimpleUsedAsNestable, func); 2627 } 2628 if (__kmp_get_drdpa_lock_owner(lck) != -1) { 2629 KMP_FATAL(LockStillOwned, func); 2630 } 2631 __kmp_destroy_nested_drdpa_lock(lck); 2632 } 2633 2634 // access functions to fields which don't exist for all lock kinds. 2635 2636 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) { 2637 return lck->lk.location; 2638 } 2639 2640 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck, 2641 const ident_t *loc) { 2642 lck->lk.location = loc; 2643 } 2644 2645 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) { 2646 return lck->lk.flags; 2647 } 2648 2649 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck, 2650 kmp_lock_flags_t flags) { 2651 lck->lk.flags = flags; 2652 } 2653 2654 // Time stamp counter 2655 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 2656 #define __kmp_tsc() __kmp_hardware_timestamp() 2657 // Runtime's default backoff parameters 2658 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100}; 2659 #else 2660 // Use nanoseconds for other platforms 2661 extern kmp_uint64 __kmp_now_nsec(); 2662 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100}; 2663 #define __kmp_tsc() __kmp_now_nsec() 2664 #endif 2665 2666 // A useful predicate for dealing with timestamps that may wrap. 2667 // Is a before b? Since the timestamps may wrap, this is asking whether it's 2668 // shorter to go clockwise from a to b around the clock-face, or anti-clockwise. 2669 // Times where going clockwise is less distance than going anti-clockwise 2670 // are in the future, others are in the past. e.g. a = MAX-1, b = MAX+1 (=0), 2671 // then a > b (true) does not mean a reached b; whereas signed(a) = -2, 2672 // signed(b) = 0 captures the actual difference 2673 static inline bool before(kmp_uint64 a, kmp_uint64 b) { 2674 return ((kmp_int64)b - (kmp_int64)a) > 0; 2675 } 2676 2677 // Truncated binary exponential backoff function 2678 void __kmp_spin_backoff(kmp_backoff_t *boff) { 2679 // We could flatten this loop, but making it a nested loop gives better result 2680 kmp_uint32 i; 2681 for (i = boff->step; i > 0; i--) { 2682 kmp_uint64 goal = __kmp_tsc() + boff->min_tick; 2683 do { 2684 KMP_CPU_PAUSE(); 2685 } while (before(__kmp_tsc(), goal)); 2686 } 2687 boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1); 2688 } 2689 2690 #if KMP_USE_DYNAMIC_LOCK 2691 2692 // Direct lock initializers. It simply writes a tag to the low 8 bits of the 2693 // lock word. 2694 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck, 2695 kmp_dyna_lockseq_t seq) { 2696 TCW_4(*lck, KMP_GET_D_TAG(seq)); 2697 KA_TRACE( 2698 20, 2699 ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq)); 2700 } 2701 2702 #if KMP_USE_TSX 2703 2704 // HLE lock functions - imported from the testbed runtime. 2705 #define HLE_ACQUIRE ".byte 0xf2;" 2706 #define HLE_RELEASE ".byte 0xf3;" 2707 2708 static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) { 2709 __asm__ volatile(HLE_ACQUIRE "xchg %1,%0" : "+r"(v), "+m"(*p) : : "memory"); 2710 return v; 2711 } 2712 2713 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); } 2714 2715 static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) { 2716 TCW_4(*lck, 0); 2717 } 2718 2719 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { 2720 // Use gtid for KMP_LOCK_BUSY if necessary 2721 if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) { 2722 int delay = 1; 2723 do { 2724 while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) { 2725 for (int i = delay; i != 0; --i) 2726 KMP_CPU_PAUSE(); 2727 delay = ((delay << 1) | 1) & 7; 2728 } 2729 } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)); 2730 } 2731 } 2732 2733 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck, 2734 kmp_int32 gtid) { 2735 __kmp_acquire_hle_lock(lck, gtid); // TODO: add checks 2736 } 2737 2738 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { 2739 __asm__ volatile(HLE_RELEASE "movl %1,%0" 2740 : "=m"(*lck) 2741 : "r"(KMP_LOCK_FREE(hle)) 2742 : "memory"); 2743 return KMP_LOCK_RELEASED; 2744 } 2745 2746 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck, 2747 kmp_int32 gtid) { 2748 return __kmp_release_hle_lock(lck, gtid); // TODO: add checks 2749 } 2750 2751 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) { 2752 return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle); 2753 } 2754 2755 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck, 2756 kmp_int32 gtid) { 2757 return __kmp_test_hle_lock(lck, gtid); // TODO: add checks 2758 } 2759 2760 static void __kmp_init_rtm_queuing_lock(kmp_queuing_lock_t *lck) { 2761 __kmp_init_queuing_lock(lck); 2762 } 2763 2764 static void __kmp_destroy_rtm_queuing_lock(kmp_queuing_lock_t *lck) { 2765 __kmp_destroy_queuing_lock(lck); 2766 } 2767 2768 static void 2769 __kmp_destroy_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { 2770 __kmp_destroy_queuing_lock_with_checks(lck); 2771 } 2772 2773 KMP_ATTRIBUTE_TARGET_RTM 2774 static void __kmp_acquire_rtm_queuing_lock(kmp_queuing_lock_t *lck, 2775 kmp_int32 gtid) { 2776 unsigned retries = 3, status; 2777 do { 2778 status = _xbegin(); 2779 if (status == _XBEGIN_STARTED) { 2780 if (__kmp_is_unlocked_queuing_lock(lck)) 2781 return; 2782 _xabort(0xff); 2783 } 2784 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) { 2785 // Wait until lock becomes free 2786 while (!__kmp_is_unlocked_queuing_lock(lck)) { 2787 KMP_YIELD(TRUE); 2788 } 2789 } else if (!(status & _XABORT_RETRY)) 2790 break; 2791 } while (retries--); 2792 2793 // Fall-back non-speculative lock (xchg) 2794 __kmp_acquire_queuing_lock(lck, gtid); 2795 } 2796 2797 static void __kmp_acquire_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 2798 kmp_int32 gtid) { 2799 __kmp_acquire_rtm_queuing_lock(lck, gtid); 2800 } 2801 2802 KMP_ATTRIBUTE_TARGET_RTM 2803 static int __kmp_release_rtm_queuing_lock(kmp_queuing_lock_t *lck, 2804 kmp_int32 gtid) { 2805 if (__kmp_is_unlocked_queuing_lock(lck)) { 2806 // Releasing from speculation 2807 _xend(); 2808 } else { 2809 // Releasing from a real lock 2810 __kmp_release_queuing_lock(lck, gtid); 2811 } 2812 return KMP_LOCK_RELEASED; 2813 } 2814 2815 static int __kmp_release_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 2816 kmp_int32 gtid) { 2817 return __kmp_release_rtm_queuing_lock(lck, gtid); 2818 } 2819 2820 KMP_ATTRIBUTE_TARGET_RTM 2821 static int __kmp_test_rtm_queuing_lock(kmp_queuing_lock_t *lck, 2822 kmp_int32 gtid) { 2823 unsigned retries = 3, status; 2824 do { 2825 status = _xbegin(); 2826 if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) { 2827 return 1; 2828 } 2829 if (!(status & _XABORT_RETRY)) 2830 break; 2831 } while (retries--); 2832 2833 return __kmp_test_queuing_lock(lck, gtid); 2834 } 2835 2836 static int __kmp_test_rtm_queuing_lock_with_checks(kmp_queuing_lock_t *lck, 2837 kmp_int32 gtid) { 2838 return __kmp_test_rtm_queuing_lock(lck, gtid); 2839 } 2840 2841 // Reuse kmp_tas_lock_t for TSX lock which use RTM with fall-back spin lock. 2842 typedef kmp_tas_lock_t kmp_rtm_spin_lock_t; 2843 2844 static void __kmp_destroy_rtm_spin_lock(kmp_rtm_spin_lock_t *lck) { 2845 KMP_ATOMIC_ST_REL(&lck->lk.poll, 0); 2846 } 2847 2848 static void __kmp_destroy_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck) { 2849 __kmp_destroy_rtm_spin_lock(lck); 2850 } 2851 2852 KMP_ATTRIBUTE_TARGET_RTM 2853 static int __kmp_acquire_rtm_spin_lock(kmp_rtm_spin_lock_t *lck, 2854 kmp_int32 gtid) { 2855 unsigned retries = 3, status; 2856 kmp_int32 lock_free = KMP_LOCK_FREE(rtm_spin); 2857 kmp_int32 lock_busy = KMP_LOCK_BUSY(1, rtm_spin); 2858 do { 2859 status = _xbegin(); 2860 if (status == _XBEGIN_STARTED) { 2861 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free) 2862 return KMP_LOCK_ACQUIRED_FIRST; 2863 _xabort(0xff); 2864 } 2865 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) { 2866 // Wait until lock becomes free 2867 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free) { 2868 KMP_YIELD(TRUE); 2869 } 2870 } else if (!(status & _XABORT_RETRY)) 2871 break; 2872 } while (retries--); 2873 2874 // Fall-back spin lock 2875 KMP_FSYNC_PREPARE(lck); 2876 kmp_backoff_t backoff = __kmp_spin_backoff_params; 2877 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != lock_free || 2878 !__kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) { 2879 __kmp_spin_backoff(&backoff); 2880 } 2881 KMP_FSYNC_ACQUIRED(lck); 2882 return KMP_LOCK_ACQUIRED_FIRST; 2883 } 2884 2885 static int __kmp_acquire_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck, 2886 kmp_int32 gtid) { 2887 return __kmp_acquire_rtm_spin_lock(lck, gtid); 2888 } 2889 2890 KMP_ATTRIBUTE_TARGET_RTM 2891 static int __kmp_release_rtm_spin_lock(kmp_rtm_spin_lock_t *lck, 2892 kmp_int32 gtid) { 2893 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == KMP_LOCK_FREE(rtm_spin)) { 2894 // Releasing from speculation 2895 _xend(); 2896 } else { 2897 // Releasing from a real lock 2898 KMP_FSYNC_RELEASING(lck); 2899 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(rtm_spin)); 2900 } 2901 return KMP_LOCK_RELEASED; 2902 } 2903 2904 static int __kmp_release_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck, 2905 kmp_int32 gtid) { 2906 return __kmp_release_rtm_spin_lock(lck, gtid); 2907 } 2908 2909 KMP_ATTRIBUTE_TARGET_RTM 2910 static int __kmp_test_rtm_spin_lock(kmp_rtm_spin_lock_t *lck, kmp_int32 gtid) { 2911 unsigned retries = 3, status; 2912 kmp_int32 lock_free = KMP_LOCK_FREE(rtm_spin); 2913 kmp_int32 lock_busy = KMP_LOCK_BUSY(1, rtm_spin); 2914 do { 2915 status = _xbegin(); 2916 if (status == _XBEGIN_STARTED && 2917 KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free) { 2918 return TRUE; 2919 } 2920 if (!(status & _XABORT_RETRY)) 2921 break; 2922 } while (retries--); 2923 2924 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == lock_free && 2925 __kmp_atomic_compare_store_acq(&lck->lk.poll, lock_free, lock_busy)) { 2926 KMP_FSYNC_ACQUIRED(lck); 2927 return TRUE; 2928 } 2929 return FALSE; 2930 } 2931 2932 static int __kmp_test_rtm_spin_lock_with_checks(kmp_rtm_spin_lock_t *lck, 2933 kmp_int32 gtid) { 2934 return __kmp_test_rtm_spin_lock(lck, gtid); 2935 } 2936 2937 #endif // KMP_USE_TSX 2938 2939 // Entry functions for indirect locks (first element of direct lock jump tables) 2940 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l, 2941 kmp_dyna_lockseq_t tag); 2942 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock); 2943 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32); 2944 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32); 2945 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32); 2946 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock, 2947 kmp_int32); 2948 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock, 2949 kmp_int32); 2950 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock, 2951 kmp_int32); 2952 2953 // Lock function definitions for the union parameter type 2954 #define KMP_FOREACH_LOCK_KIND(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) 2955 2956 #define expand1(lk, op) \ 2957 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \ 2958 __kmp_##op##_##lk##_##lock(&lock->lk); \ 2959 } 2960 #define expand2(lk, op) \ 2961 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \ 2962 kmp_int32 gtid) { \ 2963 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \ 2964 } 2965 #define expand3(lk, op) \ 2966 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \ 2967 kmp_lock_flags_t flags) { \ 2968 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \ 2969 } 2970 #define expand4(lk, op) \ 2971 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \ 2972 const ident_t *loc) { \ 2973 __kmp_set_##lk##_lock_location(&lock->lk, loc); \ 2974 } 2975 2976 KMP_FOREACH_LOCK_KIND(expand1, init) 2977 KMP_FOREACH_LOCK_KIND(expand1, init_nested) 2978 KMP_FOREACH_LOCK_KIND(expand1, destroy) 2979 KMP_FOREACH_LOCK_KIND(expand1, destroy_nested) 2980 KMP_FOREACH_LOCK_KIND(expand2, acquire) 2981 KMP_FOREACH_LOCK_KIND(expand2, acquire_nested) 2982 KMP_FOREACH_LOCK_KIND(expand2, release) 2983 KMP_FOREACH_LOCK_KIND(expand2, release_nested) 2984 KMP_FOREACH_LOCK_KIND(expand2, test) 2985 KMP_FOREACH_LOCK_KIND(expand2, test_nested) 2986 KMP_FOREACH_LOCK_KIND(expand3, ) 2987 KMP_FOREACH_LOCK_KIND(expand4, ) 2988 2989 #undef expand1 2990 #undef expand2 2991 #undef expand3 2992 #undef expand4 2993 2994 // Jump tables for the indirect lock functions 2995 // Only fill in the odd entries, that avoids the need to shift out the low bit 2996 2997 // init functions 2998 #define expand(l, op) 0, __kmp_init_direct_lock, 2999 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = { 3000 __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)}; 3001 #undef expand 3002 3003 // destroy functions 3004 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock, 3005 static void (*direct_destroy[])(kmp_dyna_lock_t *) = { 3006 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)}; 3007 #undef expand 3008 #define expand(l, op) \ 3009 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks, 3010 static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = { 3011 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)}; 3012 #undef expand 3013 3014 // set/acquire functions 3015 #define expand(l, op) \ 3016 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock, 3017 static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = { 3018 __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)}; 3019 #undef expand 3020 #define expand(l, op) \ 3021 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks, 3022 static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = { 3023 __kmp_set_indirect_lock_with_checks, 0, 3024 KMP_FOREACH_D_LOCK(expand, acquire)}; 3025 #undef expand 3026 3027 // unset/release and test functions 3028 #define expand(l, op) \ 3029 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock, 3030 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = { 3031 __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)}; 3032 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = { 3033 __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)}; 3034 #undef expand 3035 #define expand(l, op) \ 3036 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks, 3037 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = { 3038 __kmp_unset_indirect_lock_with_checks, 0, 3039 KMP_FOREACH_D_LOCK(expand, release)}; 3040 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = { 3041 __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)}; 3042 #undef expand 3043 3044 // Exposes only one set of jump tables (*lock or *lock_with_checks). 3045 void (**__kmp_direct_destroy)(kmp_dyna_lock_t *) = 0; 3046 int (**__kmp_direct_set)(kmp_dyna_lock_t *, kmp_int32) = 0; 3047 int (**__kmp_direct_unset)(kmp_dyna_lock_t *, kmp_int32) = 0; 3048 int (**__kmp_direct_test)(kmp_dyna_lock_t *, kmp_int32) = 0; 3049 3050 // Jump tables for the indirect lock functions 3051 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, 3052 void (*__kmp_indirect_init[])(kmp_user_lock_p) = { 3053 KMP_FOREACH_I_LOCK(expand, init)}; 3054 #undef expand 3055 3056 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, 3057 static void (*indirect_destroy[])(kmp_user_lock_p) = { 3058 KMP_FOREACH_I_LOCK(expand, destroy)}; 3059 #undef expand 3060 #define expand(l, op) \ 3061 (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks, 3062 static void (*indirect_destroy_check[])(kmp_user_lock_p) = { 3063 KMP_FOREACH_I_LOCK(expand, destroy)}; 3064 #undef expand 3065 3066 // set/acquire functions 3067 #define expand(l, op) \ 3068 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock, 3069 static int (*indirect_set[])(kmp_user_lock_p, 3070 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)}; 3071 #undef expand 3072 #define expand(l, op) \ 3073 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks, 3074 static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = { 3075 KMP_FOREACH_I_LOCK(expand, acquire)}; 3076 #undef expand 3077 3078 // unset/release and test functions 3079 #define expand(l, op) \ 3080 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock, 3081 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = { 3082 KMP_FOREACH_I_LOCK(expand, release)}; 3083 static int (*indirect_test[])(kmp_user_lock_p, 3084 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)}; 3085 #undef expand 3086 #define expand(l, op) \ 3087 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks, 3088 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = { 3089 KMP_FOREACH_I_LOCK(expand, release)}; 3090 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = { 3091 KMP_FOREACH_I_LOCK(expand, test)}; 3092 #undef expand 3093 3094 // Exposes only one jump tables (*lock or *lock_with_checks). 3095 void (**__kmp_indirect_destroy)(kmp_user_lock_p) = 0; 3096 int (**__kmp_indirect_set)(kmp_user_lock_p, kmp_int32) = 0; 3097 int (**__kmp_indirect_unset)(kmp_user_lock_p, kmp_int32) = 0; 3098 int (**__kmp_indirect_test)(kmp_user_lock_p, kmp_int32) = 0; 3099 3100 // Lock index table. 3101 kmp_indirect_lock_table_t __kmp_i_lock_table; 3102 3103 // Size of indirect locks. 3104 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0}; 3105 3106 // Jump tables for lock accessor/modifier. 3107 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p, 3108 const ident_t *) = {0}; 3109 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p, 3110 kmp_lock_flags_t) = {0}; 3111 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])( 3112 kmp_user_lock_p) = {0}; 3113 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])( 3114 kmp_user_lock_p) = {0}; 3115 3116 // Use different lock pools for different lock types. 3117 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0}; 3118 3119 // User lock allocator for dynamically dispatched indirect locks. Every entry of 3120 // the indirect lock table holds the address and type of the allocated indirect 3121 // lock (kmp_indirect_lock_t), and the size of the table doubles when it is 3122 // full. A destroyed indirect lock object is returned to the reusable pool of 3123 // locks, unique to each lock type. 3124 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock, 3125 kmp_int32 gtid, 3126 kmp_indirect_locktag_t tag) { 3127 kmp_indirect_lock_t *lck; 3128 kmp_lock_index_t idx; 3129 3130 __kmp_acquire_lock(&__kmp_global_lock, gtid); 3131 3132 if (__kmp_indirect_lock_pool[tag] != NULL) { 3133 // Reuse the allocated and destroyed lock object 3134 lck = __kmp_indirect_lock_pool[tag]; 3135 if (OMP_LOCK_T_SIZE < sizeof(void *)) 3136 idx = lck->lock->pool.index; 3137 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next; 3138 KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n", 3139 lck)); 3140 } else { 3141 idx = __kmp_i_lock_table.next; 3142 // Check capacity and double the size if it is full 3143 if (idx == __kmp_i_lock_table.size) { 3144 // Double up the space for block pointers 3145 int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; 3146 kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate( 3147 2 * row * sizeof(kmp_indirect_lock_t *)); 3148 KMP_MEMCPY(new_table, __kmp_i_lock_table.table, 3149 row * sizeof(kmp_indirect_lock_t *)); 3150 kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table; 3151 __kmp_i_lock_table.table = new_table; 3152 __kmp_free(old_table); 3153 // Allocate new objects in the new blocks 3154 for (int i = row; i < 2 * row; ++i) 3155 *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate( 3156 KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t)); 3157 __kmp_i_lock_table.size = 2 * idx; 3158 } 3159 __kmp_i_lock_table.next++; 3160 lck = KMP_GET_I_LOCK(idx); 3161 // Allocate a new base lock object 3162 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]); 3163 KA_TRACE(20, 3164 ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck)); 3165 } 3166 3167 __kmp_release_lock(&__kmp_global_lock, gtid); 3168 3169 lck->type = tag; 3170 3171 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3172 *((kmp_lock_index_t *)user_lock) = idx 3173 << 1; // indirect lock word must be even 3174 } else { 3175 *((kmp_indirect_lock_t **)user_lock) = lck; 3176 } 3177 3178 return lck; 3179 } 3180 3181 // User lock lookup for dynamically dispatched locks. 3182 static __forceinline kmp_indirect_lock_t * 3183 __kmp_lookup_indirect_lock(void **user_lock, const char *func) { 3184 if (__kmp_env_consistency_check) { 3185 kmp_indirect_lock_t *lck = NULL; 3186 if (user_lock == NULL) { 3187 KMP_FATAL(LockIsUninitialized, func); 3188 } 3189 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3190 kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock); 3191 if (idx >= __kmp_i_lock_table.size) { 3192 KMP_FATAL(LockIsUninitialized, func); 3193 } 3194 lck = KMP_GET_I_LOCK(idx); 3195 } else { 3196 lck = *((kmp_indirect_lock_t **)user_lock); 3197 } 3198 if (lck == NULL) { 3199 KMP_FATAL(LockIsUninitialized, func); 3200 } 3201 return lck; 3202 } else { 3203 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3204 return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock)); 3205 } else { 3206 return *((kmp_indirect_lock_t **)user_lock); 3207 } 3208 } 3209 } 3210 3211 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock, 3212 kmp_dyna_lockseq_t seq) { 3213 #if KMP_USE_ADAPTIVE_LOCKS 3214 if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) { 3215 KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive"); 3216 seq = lockseq_queuing; 3217 } 3218 #endif 3219 #if KMP_USE_TSX 3220 if (seq == lockseq_rtm_queuing && !__kmp_cpuinfo.rtm) { 3221 seq = lockseq_queuing; 3222 } 3223 #endif 3224 kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq); 3225 kmp_indirect_lock_t *l = 3226 __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag); 3227 KMP_I_LOCK_FUNC(l, init)(l->lock); 3228 KA_TRACE( 3229 20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n", 3230 seq)); 3231 } 3232 3233 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) { 3234 kmp_uint32 gtid = __kmp_entry_gtid(); 3235 kmp_indirect_lock_t *l = 3236 __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock"); 3237 KMP_I_LOCK_FUNC(l, destroy)(l->lock); 3238 kmp_indirect_locktag_t tag = l->type; 3239 3240 __kmp_acquire_lock(&__kmp_global_lock, gtid); 3241 3242 // Use the base lock's space to keep the pool chain. 3243 l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag]; 3244 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3245 l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock); 3246 } 3247 __kmp_indirect_lock_pool[tag] = l; 3248 3249 __kmp_release_lock(&__kmp_global_lock, gtid); 3250 } 3251 3252 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) { 3253 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock); 3254 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid); 3255 } 3256 3257 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) { 3258 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock); 3259 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid); 3260 } 3261 3262 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) { 3263 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock); 3264 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid); 3265 } 3266 3267 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock, 3268 kmp_int32 gtid) { 3269 kmp_indirect_lock_t *l = 3270 __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock"); 3271 return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid); 3272 } 3273 3274 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock, 3275 kmp_int32 gtid) { 3276 kmp_indirect_lock_t *l = 3277 __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock"); 3278 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid); 3279 } 3280 3281 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock, 3282 kmp_int32 gtid) { 3283 kmp_indirect_lock_t *l = 3284 __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock"); 3285 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid); 3286 } 3287 3288 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing; 3289 3290 // This is used only in kmp_error.cpp when consistency checking is on. 3291 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) { 3292 switch (seq) { 3293 case lockseq_tas: 3294 case lockseq_nested_tas: 3295 return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck); 3296 #if KMP_USE_FUTEX 3297 case lockseq_futex: 3298 case lockseq_nested_futex: 3299 return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck); 3300 #endif 3301 case lockseq_ticket: 3302 case lockseq_nested_ticket: 3303 return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck); 3304 case lockseq_queuing: 3305 case lockseq_nested_queuing: 3306 #if KMP_USE_ADAPTIVE_LOCKS 3307 case lockseq_adaptive: 3308 #endif 3309 return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck); 3310 case lockseq_drdpa: 3311 case lockseq_nested_drdpa: 3312 return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck); 3313 default: 3314 return 0; 3315 } 3316 } 3317 3318 // Initializes data for dynamic user locks. 3319 void __kmp_init_dynamic_user_locks() { 3320 // Initialize jump table for the lock functions 3321 if (__kmp_env_consistency_check) { 3322 __kmp_direct_set = direct_set_check; 3323 __kmp_direct_unset = direct_unset_check; 3324 __kmp_direct_test = direct_test_check; 3325 __kmp_direct_destroy = direct_destroy_check; 3326 __kmp_indirect_set = indirect_set_check; 3327 __kmp_indirect_unset = indirect_unset_check; 3328 __kmp_indirect_test = indirect_test_check; 3329 __kmp_indirect_destroy = indirect_destroy_check; 3330 } else { 3331 __kmp_direct_set = direct_set; 3332 __kmp_direct_unset = direct_unset; 3333 __kmp_direct_test = direct_test; 3334 __kmp_direct_destroy = direct_destroy; 3335 __kmp_indirect_set = indirect_set; 3336 __kmp_indirect_unset = indirect_unset; 3337 __kmp_indirect_test = indirect_test; 3338 __kmp_indirect_destroy = indirect_destroy; 3339 } 3340 // If the user locks have already been initialized, then return. Allow the 3341 // switch between different KMP_CONSISTENCY_CHECK values, but do not allocate 3342 // new lock tables if they have already been allocated. 3343 if (__kmp_init_user_locks) 3344 return; 3345 3346 // Initialize lock index table 3347 __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK; 3348 __kmp_i_lock_table.table = 3349 (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *)); 3350 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate( 3351 KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t)); 3352 __kmp_i_lock_table.next = 0; 3353 3354 // Indirect lock size 3355 __kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t); 3356 __kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t); 3357 #if KMP_USE_ADAPTIVE_LOCKS 3358 __kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t); 3359 #endif 3360 __kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t); 3361 #if KMP_USE_TSX 3362 __kmp_indirect_lock_size[locktag_rtm_queuing] = sizeof(kmp_queuing_lock_t); 3363 #endif 3364 __kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t); 3365 #if KMP_USE_FUTEX 3366 __kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t); 3367 #endif 3368 __kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t); 3369 __kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t); 3370 __kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t); 3371 3372 // Initialize lock accessor/modifier 3373 #define fill_jumps(table, expand, sep) \ 3374 { \ 3375 table[locktag##sep##ticket] = expand(ticket); \ 3376 table[locktag##sep##queuing] = expand(queuing); \ 3377 table[locktag##sep##drdpa] = expand(drdpa); \ 3378 } 3379 3380 #if KMP_USE_ADAPTIVE_LOCKS 3381 #define fill_table(table, expand) \ 3382 { \ 3383 fill_jumps(table, expand, _); \ 3384 table[locktag_adaptive] = expand(queuing); \ 3385 fill_jumps(table, expand, _nested_); \ 3386 } 3387 #else 3388 #define fill_table(table, expand) \ 3389 { \ 3390 fill_jumps(table, expand, _); \ 3391 fill_jumps(table, expand, _nested_); \ 3392 } 3393 #endif // KMP_USE_ADAPTIVE_LOCKS 3394 3395 #define expand(l) \ 3396 (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location 3397 fill_table(__kmp_indirect_set_location, expand); 3398 #undef expand 3399 #define expand(l) \ 3400 (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags 3401 fill_table(__kmp_indirect_set_flags, expand); 3402 #undef expand 3403 #define expand(l) \ 3404 (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location 3405 fill_table(__kmp_indirect_get_location, expand); 3406 #undef expand 3407 #define expand(l) \ 3408 (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags 3409 fill_table(__kmp_indirect_get_flags, expand); 3410 #undef expand 3411 3412 __kmp_init_user_locks = TRUE; 3413 } 3414 3415 // Clean up the lock table. 3416 void __kmp_cleanup_indirect_user_locks() { 3417 kmp_lock_index_t i; 3418 int k; 3419 3420 // Clean up locks in the pools first (they were already destroyed before going 3421 // into the pools). 3422 for (k = 0; k < KMP_NUM_I_LOCKS; ++k) { 3423 kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k]; 3424 while (l != NULL) { 3425 kmp_indirect_lock_t *ll = l; 3426 l = (kmp_indirect_lock_t *)l->lock->pool.next; 3427 KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n", 3428 ll)); 3429 __kmp_free(ll->lock); 3430 ll->lock = NULL; 3431 } 3432 __kmp_indirect_lock_pool[k] = NULL; 3433 } 3434 // Clean up the remaining undestroyed locks. 3435 for (i = 0; i < __kmp_i_lock_table.next; i++) { 3436 kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i); 3437 if (l->lock != NULL) { 3438 // Locks not destroyed explicitly need to be destroyed here. 3439 KMP_I_LOCK_FUNC(l, destroy)(l->lock); 3440 KA_TRACE( 3441 20, 3442 ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n", 3443 l)); 3444 __kmp_free(l->lock); 3445 } 3446 } 3447 // Free the table 3448 for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++) 3449 __kmp_free(__kmp_i_lock_table.table[i]); 3450 __kmp_free(__kmp_i_lock_table.table); 3451 3452 __kmp_init_user_locks = FALSE; 3453 } 3454 3455 enum kmp_lock_kind __kmp_user_lock_kind = lk_default; 3456 int __kmp_num_locks_in_block = 1; // FIXME - tune this value 3457 3458 #else // KMP_USE_DYNAMIC_LOCK 3459 3460 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) { 3461 __kmp_init_tas_lock(lck); 3462 } 3463 3464 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) { 3465 __kmp_init_nested_tas_lock(lck); 3466 } 3467 3468 #if KMP_USE_FUTEX 3469 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) { 3470 __kmp_init_futex_lock(lck); 3471 } 3472 3473 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) { 3474 __kmp_init_nested_futex_lock(lck); 3475 } 3476 #endif 3477 3478 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) { 3479 return lck == lck->lk.self; 3480 } 3481 3482 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { 3483 __kmp_init_ticket_lock(lck); 3484 } 3485 3486 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) { 3487 __kmp_init_nested_ticket_lock(lck); 3488 } 3489 3490 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) { 3491 return lck == lck->lk.initialized; 3492 } 3493 3494 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { 3495 __kmp_init_queuing_lock(lck); 3496 } 3497 3498 static void 3499 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) { 3500 __kmp_init_nested_queuing_lock(lck); 3501 } 3502 3503 #if KMP_USE_ADAPTIVE_LOCKS 3504 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) { 3505 __kmp_init_adaptive_lock(lck); 3506 } 3507 #endif 3508 3509 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) { 3510 return lck == lck->lk.initialized; 3511 } 3512 3513 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { 3514 __kmp_init_drdpa_lock(lck); 3515 } 3516 3517 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) { 3518 __kmp_init_nested_drdpa_lock(lck); 3519 } 3520 3521 /* user locks 3522 * They are implemented as a table of function pointers which are set to the 3523 * lock functions of the appropriate kind, once that has been determined. */ 3524 3525 enum kmp_lock_kind __kmp_user_lock_kind = lk_default; 3526 3527 size_t __kmp_base_user_lock_size = 0; 3528 size_t __kmp_user_lock_size = 0; 3529 3530 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL; 3531 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck, 3532 kmp_int32 gtid) = NULL; 3533 3534 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck, 3535 kmp_int32 gtid) = NULL; 3536 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck, 3537 kmp_int32 gtid) = NULL; 3538 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL; 3539 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL; 3540 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL; 3541 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck, 3542 kmp_int32 gtid) = NULL; 3543 3544 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck, 3545 kmp_int32 gtid) = NULL; 3546 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck, 3547 kmp_int32 gtid) = NULL; 3548 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL; 3549 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL; 3550 3551 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL; 3552 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL; 3553 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck, 3554 const ident_t *loc) = NULL; 3555 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL; 3556 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck, 3557 kmp_lock_flags_t flags) = NULL; 3558 3559 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) { 3560 switch (user_lock_kind) { 3561 case lk_default: 3562 default: 3563 KMP_ASSERT(0); 3564 3565 case lk_tas: { 3566 __kmp_base_user_lock_size = sizeof(kmp_base_tas_lock_t); 3567 __kmp_user_lock_size = sizeof(kmp_tas_lock_t); 3568 3569 __kmp_get_user_lock_owner_ = 3570 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner); 3571 3572 if (__kmp_env_consistency_check) { 3573 KMP_BIND_USER_LOCK_WITH_CHECKS(tas); 3574 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas); 3575 } else { 3576 KMP_BIND_USER_LOCK(tas); 3577 KMP_BIND_NESTED_USER_LOCK(tas); 3578 } 3579 3580 __kmp_destroy_user_lock_ = 3581 (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock); 3582 3583 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL; 3584 3585 __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL; 3586 3587 __kmp_set_user_lock_location_ = 3588 (void (*)(kmp_user_lock_p, const ident_t *))NULL; 3589 3590 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL; 3591 3592 __kmp_set_user_lock_flags_ = 3593 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL; 3594 } break; 3595 3596 #if KMP_USE_FUTEX 3597 3598 case lk_futex: { 3599 __kmp_base_user_lock_size = sizeof(kmp_base_futex_lock_t); 3600 __kmp_user_lock_size = sizeof(kmp_futex_lock_t); 3601 3602 __kmp_get_user_lock_owner_ = 3603 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner); 3604 3605 if (__kmp_env_consistency_check) { 3606 KMP_BIND_USER_LOCK_WITH_CHECKS(futex); 3607 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex); 3608 } else { 3609 KMP_BIND_USER_LOCK(futex); 3610 KMP_BIND_NESTED_USER_LOCK(futex); 3611 } 3612 3613 __kmp_destroy_user_lock_ = 3614 (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock); 3615 3616 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL; 3617 3618 __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL; 3619 3620 __kmp_set_user_lock_location_ = 3621 (void (*)(kmp_user_lock_p, const ident_t *))NULL; 3622 3623 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL; 3624 3625 __kmp_set_user_lock_flags_ = 3626 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL; 3627 } break; 3628 3629 #endif // KMP_USE_FUTEX 3630 3631 case lk_ticket: { 3632 __kmp_base_user_lock_size = sizeof(kmp_base_ticket_lock_t); 3633 __kmp_user_lock_size = sizeof(kmp_ticket_lock_t); 3634 3635 __kmp_get_user_lock_owner_ = 3636 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner); 3637 3638 if (__kmp_env_consistency_check) { 3639 KMP_BIND_USER_LOCK_WITH_CHECKS(ticket); 3640 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket); 3641 } else { 3642 KMP_BIND_USER_LOCK(ticket); 3643 KMP_BIND_NESTED_USER_LOCK(ticket); 3644 } 3645 3646 __kmp_destroy_user_lock_ = 3647 (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock); 3648 3649 __kmp_is_user_lock_initialized_ = 3650 (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized); 3651 3652 __kmp_get_user_lock_location_ = 3653 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location); 3654 3655 __kmp_set_user_lock_location_ = (void (*)( 3656 kmp_user_lock_p, const ident_t *))(&__kmp_set_ticket_lock_location); 3657 3658 __kmp_get_user_lock_flags_ = 3659 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags); 3660 3661 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))( 3662 &__kmp_set_ticket_lock_flags); 3663 } break; 3664 3665 case lk_queuing: { 3666 __kmp_base_user_lock_size = sizeof(kmp_base_queuing_lock_t); 3667 __kmp_user_lock_size = sizeof(kmp_queuing_lock_t); 3668 3669 __kmp_get_user_lock_owner_ = 3670 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner); 3671 3672 if (__kmp_env_consistency_check) { 3673 KMP_BIND_USER_LOCK_WITH_CHECKS(queuing); 3674 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing); 3675 } else { 3676 KMP_BIND_USER_LOCK(queuing); 3677 KMP_BIND_NESTED_USER_LOCK(queuing); 3678 } 3679 3680 __kmp_destroy_user_lock_ = 3681 (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock); 3682 3683 __kmp_is_user_lock_initialized_ = 3684 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized); 3685 3686 __kmp_get_user_lock_location_ = 3687 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location); 3688 3689 __kmp_set_user_lock_location_ = (void (*)( 3690 kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location); 3691 3692 __kmp_get_user_lock_flags_ = 3693 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags); 3694 3695 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))( 3696 &__kmp_set_queuing_lock_flags); 3697 } break; 3698 3699 #if KMP_USE_ADAPTIVE_LOCKS 3700 case lk_adaptive: { 3701 __kmp_base_user_lock_size = sizeof(kmp_base_adaptive_lock_t); 3702 __kmp_user_lock_size = sizeof(kmp_adaptive_lock_t); 3703 3704 __kmp_get_user_lock_owner_ = 3705 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner); 3706 3707 if (__kmp_env_consistency_check) { 3708 KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive); 3709 } else { 3710 KMP_BIND_USER_LOCK(adaptive); 3711 } 3712 3713 __kmp_destroy_user_lock_ = 3714 (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock); 3715 3716 __kmp_is_user_lock_initialized_ = 3717 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized); 3718 3719 __kmp_get_user_lock_location_ = 3720 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location); 3721 3722 __kmp_set_user_lock_location_ = (void (*)( 3723 kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location); 3724 3725 __kmp_get_user_lock_flags_ = 3726 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags); 3727 3728 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))( 3729 &__kmp_set_queuing_lock_flags); 3730 3731 } break; 3732 #endif // KMP_USE_ADAPTIVE_LOCKS 3733 3734 case lk_drdpa: { 3735 __kmp_base_user_lock_size = sizeof(kmp_base_drdpa_lock_t); 3736 __kmp_user_lock_size = sizeof(kmp_drdpa_lock_t); 3737 3738 __kmp_get_user_lock_owner_ = 3739 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner); 3740 3741 if (__kmp_env_consistency_check) { 3742 KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa); 3743 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa); 3744 } else { 3745 KMP_BIND_USER_LOCK(drdpa); 3746 KMP_BIND_NESTED_USER_LOCK(drdpa); 3747 } 3748 3749 __kmp_destroy_user_lock_ = 3750 (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock); 3751 3752 __kmp_is_user_lock_initialized_ = 3753 (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized); 3754 3755 __kmp_get_user_lock_location_ = 3756 (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location); 3757 3758 __kmp_set_user_lock_location_ = (void (*)( 3759 kmp_user_lock_p, const ident_t *))(&__kmp_set_drdpa_lock_location); 3760 3761 __kmp_get_user_lock_flags_ = 3762 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags); 3763 3764 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))( 3765 &__kmp_set_drdpa_lock_flags); 3766 } break; 3767 } 3768 } 3769 3770 // ---------------------------------------------------------------------------- 3771 // User lock table & lock allocation 3772 3773 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL}; 3774 kmp_user_lock_p __kmp_lock_pool = NULL; 3775 3776 // Lock block-allocation support. 3777 kmp_block_of_locks *__kmp_lock_blocks = NULL; 3778 int __kmp_num_locks_in_block = 1; // FIXME - tune this value 3779 3780 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) { 3781 // Assume that kmp_global_lock is held upon entry/exit. 3782 kmp_lock_index_t index; 3783 if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) { 3784 kmp_lock_index_t size; 3785 kmp_user_lock_p *table; 3786 // Reallocate lock table. 3787 if (__kmp_user_lock_table.allocated == 0) { 3788 size = 1024; 3789 } else { 3790 size = __kmp_user_lock_table.allocated * 2; 3791 } 3792 table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size); 3793 KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1, 3794 sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1)); 3795 table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table; 3796 // We cannot free the previous table now, since it may be in use by other 3797 // threads. So save the pointer to the previous table in in the first 3798 // element of the new table. All the tables will be organized into a list, 3799 // and could be freed when library shutting down. 3800 __kmp_user_lock_table.table = table; 3801 __kmp_user_lock_table.allocated = size; 3802 } 3803 KMP_DEBUG_ASSERT(__kmp_user_lock_table.used < 3804 __kmp_user_lock_table.allocated); 3805 index = __kmp_user_lock_table.used; 3806 __kmp_user_lock_table.table[index] = lck; 3807 ++__kmp_user_lock_table.used; 3808 return index; 3809 } 3810 3811 static kmp_user_lock_p __kmp_lock_block_allocate() { 3812 // Assume that kmp_global_lock is held upon entry/exit. 3813 static int last_index = 0; 3814 if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) { 3815 // Restart the index. 3816 last_index = 0; 3817 // Need to allocate a new block. 3818 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0); 3819 size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block; 3820 char *buffer = 3821 (char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks)); 3822 // Set up the new block. 3823 kmp_block_of_locks *new_block = 3824 (kmp_block_of_locks *)(&buffer[space_for_locks]); 3825 new_block->next_block = __kmp_lock_blocks; 3826 new_block->locks = (void *)buffer; 3827 // Publish the new block. 3828 KMP_MB(); 3829 __kmp_lock_blocks = new_block; 3830 } 3831 kmp_user_lock_p ret = (kmp_user_lock_p)(&( 3832 ((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size])); 3833 last_index++; 3834 return ret; 3835 } 3836 3837 // Get memory for a lock. It may be freshly allocated memory or reused memory 3838 // from lock pool. 3839 kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid, 3840 kmp_lock_flags_t flags) { 3841 kmp_user_lock_p lck; 3842 kmp_lock_index_t index; 3843 KMP_DEBUG_ASSERT(user_lock); 3844 3845 __kmp_acquire_lock(&__kmp_global_lock, gtid); 3846 3847 if (__kmp_lock_pool == NULL) { 3848 // Lock pool is empty. Allocate new memory. 3849 3850 // ANNOTATION: Found no good way to express the syncronisation 3851 // between allocation and usage, so ignore the allocation 3852 ANNOTATE_IGNORE_WRITES_BEGIN(); 3853 if (__kmp_num_locks_in_block <= 1) { // Tune this cutoff point. 3854 lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size); 3855 } else { 3856 lck = __kmp_lock_block_allocate(); 3857 } 3858 ANNOTATE_IGNORE_WRITES_END(); 3859 3860 // Insert lock in the table so that it can be freed in __kmp_cleanup, 3861 // and debugger has info on all allocated locks. 3862 index = __kmp_lock_table_insert(lck); 3863 } else { 3864 // Pick up lock from pool. 3865 lck = __kmp_lock_pool; 3866 index = __kmp_lock_pool->pool.index; 3867 __kmp_lock_pool = __kmp_lock_pool->pool.next; 3868 } 3869 3870 // We could potentially differentiate between nested and regular locks 3871 // here, and do the lock table lookup for regular locks only. 3872 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3873 *((kmp_lock_index_t *)user_lock) = index; 3874 } else { 3875 *((kmp_user_lock_p *)user_lock) = lck; 3876 } 3877 3878 // mark the lock if it is critical section lock. 3879 __kmp_set_user_lock_flags(lck, flags); 3880 3881 __kmp_release_lock(&__kmp_global_lock, gtid); // AC: TODO move this line upper 3882 3883 return lck; 3884 } 3885 3886 // Put lock's memory to pool for reusing. 3887 void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid, 3888 kmp_user_lock_p lck) { 3889 KMP_DEBUG_ASSERT(user_lock != NULL); 3890 KMP_DEBUG_ASSERT(lck != NULL); 3891 3892 __kmp_acquire_lock(&__kmp_global_lock, gtid); 3893 3894 lck->pool.next = __kmp_lock_pool; 3895 __kmp_lock_pool = lck; 3896 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3897 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock); 3898 KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used); 3899 lck->pool.index = index; 3900 } 3901 3902 __kmp_release_lock(&__kmp_global_lock, gtid); 3903 } 3904 3905 kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func) { 3906 kmp_user_lock_p lck = NULL; 3907 3908 if (__kmp_env_consistency_check) { 3909 if (user_lock == NULL) { 3910 KMP_FATAL(LockIsUninitialized, func); 3911 } 3912 } 3913 3914 if (OMP_LOCK_T_SIZE < sizeof(void *)) { 3915 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock); 3916 if (__kmp_env_consistency_check) { 3917 if (!(0 < index && index < __kmp_user_lock_table.used)) { 3918 KMP_FATAL(LockIsUninitialized, func); 3919 } 3920 } 3921 KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used); 3922 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0); 3923 lck = __kmp_user_lock_table.table[index]; 3924 } else { 3925 lck = *((kmp_user_lock_p *)user_lock); 3926 } 3927 3928 if (__kmp_env_consistency_check) { 3929 if (lck == NULL) { 3930 KMP_FATAL(LockIsUninitialized, func); 3931 } 3932 } 3933 3934 return lck; 3935 } 3936 3937 void __kmp_cleanup_user_locks(void) { 3938 // Reset lock pool. Don't worry about lock in the pool--we will free them when 3939 // iterating through lock table (it includes all the locks, dead or alive). 3940 __kmp_lock_pool = NULL; 3941 3942 #define IS_CRITICAL(lck) \ 3943 ((__kmp_get_user_lock_flags_ != NULL) && \ 3944 ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section)) 3945 3946 // Loop through lock table, free all locks. 3947 // Do not free item [0], it is reserved for lock tables list. 3948 // 3949 // FIXME - we are iterating through a list of (pointers to) objects of type 3950 // union kmp_user_lock, but we have no way of knowing whether the base type is 3951 // currently "pool" or whatever the global user lock type is. 3952 // 3953 // We are relying on the fact that for all of the user lock types 3954 // (except "tas"), the first field in the lock struct is the "initialized" 3955 // field, which is set to the address of the lock object itself when 3956 // the lock is initialized. When the union is of type "pool", the 3957 // first field is a pointer to the next object in the free list, which 3958 // will not be the same address as the object itself. 3959 // 3960 // This means that the check (*__kmp_is_user_lock_initialized_)(lck) will fail 3961 // for "pool" objects on the free list. This must happen as the "location" 3962 // field of real user locks overlaps the "index" field of "pool" objects. 3963 // 3964 // It would be better to run through the free list, and remove all "pool" 3965 // objects from the lock table before executing this loop. However, 3966 // "pool" objects do not always have their index field set (only on 3967 // lin_32e), and I don't want to search the lock table for the address 3968 // of every "pool" object on the free list. 3969 while (__kmp_user_lock_table.used > 1) { 3970 const ident *loc; 3971 3972 // reduce __kmp_user_lock_table.used before freeing the lock, 3973 // so that state of locks is consistent 3974 kmp_user_lock_p lck = 3975 __kmp_user_lock_table.table[--__kmp_user_lock_table.used]; 3976 3977 if ((__kmp_is_user_lock_initialized_ != NULL) && 3978 (*__kmp_is_user_lock_initialized_)(lck)) { 3979 // Issue a warning if: KMP_CONSISTENCY_CHECK AND lock is initialized AND 3980 // it is NOT a critical section (user is not responsible for destroying 3981 // criticals) AND we know source location to report. 3982 if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) && 3983 ((loc = __kmp_get_user_lock_location(lck)) != NULL) && 3984 (loc->psource != NULL)) { 3985 kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, false); 3986 KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line); 3987 __kmp_str_loc_free(&str_loc); 3988 } 3989 3990 #ifdef KMP_DEBUG 3991 if (IS_CRITICAL(lck)) { 3992 KA_TRACE( 3993 20, 3994 ("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n", 3995 lck, *(void **)lck)); 3996 } else { 3997 KA_TRACE(20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck, 3998 *(void **)lck)); 3999 } 4000 #endif // KMP_DEBUG 4001 4002 // Cleanup internal lock dynamic resources (for drdpa locks particularly). 4003 __kmp_destroy_user_lock(lck); 4004 } 4005 4006 // Free the lock if block allocation of locks is not used. 4007 if (__kmp_lock_blocks == NULL) { 4008 __kmp_free(lck); 4009 } 4010 } 4011 4012 #undef IS_CRITICAL 4013 4014 // delete lock table(s). 4015 kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table; 4016 __kmp_user_lock_table.table = NULL; 4017 __kmp_user_lock_table.allocated = 0; 4018 4019 while (table_ptr != NULL) { 4020 // In the first element we saved the pointer to the previous 4021 // (smaller) lock table. 4022 kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]); 4023 __kmp_free(table_ptr); 4024 table_ptr = next; 4025 } 4026 4027 // Free buffers allocated for blocks of locks. 4028 kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks; 4029 __kmp_lock_blocks = NULL; 4030 4031 while (block_ptr != NULL) { 4032 kmp_block_of_locks_t *next = block_ptr->next_block; 4033 __kmp_free(block_ptr->locks); 4034 // *block_ptr itself was allocated at the end of the locks vector. 4035 block_ptr = next; 4036 } 4037 4038 TCW_4(__kmp_init_user_locks, FALSE); 4039 } 4040 4041 #endif // KMP_USE_DYNAMIC_LOCK 4042