1 /*
2  * kmp_lock.cpp -- lock-related functions
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 //                     The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include <stddef.h>
15 #include <atomic>
16 
17 #include "kmp.h"
18 #include "kmp_i18n.h"
19 #include "kmp_io.h"
20 #include "kmp_itt.h"
21 #include "kmp_lock.h"
22 #include "kmp_wait_release.h"
23 #include "kmp_wrapper_getpid.h"
24 
25 #include "tsan_annotations.h"
26 
27 #if KMP_USE_FUTEX
28 #include <sys/syscall.h>
29 #include <unistd.h>
30 // We should really include <futex.h>, but that causes compatibility problems on
31 // different Linux* OS distributions that either require that you include (or
32 // break when you try to include) <pci/types.h>. Since all we need is the two
33 // macros below (which are part of the kernel ABI, so can't change) we just
34 // define the constants here and don't include <futex.h>
35 #ifndef FUTEX_WAIT
36 #define FUTEX_WAIT 0
37 #endif
38 #ifndef FUTEX_WAKE
39 #define FUTEX_WAKE 1
40 #endif
41 #endif
42 
43 /* Implement spin locks for internal library use.             */
44 /* The algorithm implemented is Lamport's bakery lock [1974]. */
45 
__kmp_validate_locks(void)46 void __kmp_validate_locks(void) {
47   int i;
48   kmp_uint32 x, y;
49 
50   /* Check to make sure unsigned arithmetic does wraps properly */
51   x = ~((kmp_uint32)0) - 2;
52   y = x - 2;
53 
54   for (i = 0; i < 8; ++i, ++x, ++y) {
55     kmp_uint32 z = (x - y);
56     KMP_ASSERT(z == 2);
57   }
58 
59   KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
60 }
61 
62 /* ------------------------------------------------------------------------ */
63 /* test and set locks */
64 
65 // For the non-nested locks, we can only assume that the first 4 bytes were
66 // allocated, since gcc only allocates 4 bytes for omp_lock_t, and the Intel
67 // compiler only allocates a 4 byte pointer on IA-32 architecture.  On
68 // Windows* OS on Intel(R) 64, we can assume that all 8 bytes were allocated.
69 //
70 // gcc reserves >= 8 bytes for nested locks, so we can assume that the
71 // entire 8 bytes were allocated for nested locks on all 64-bit platforms.
72 
__kmp_get_tas_lock_owner(kmp_tas_lock_t * lck)73 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
74   return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
75 }
76 
__kmp_is_tas_lock_nestable(kmp_tas_lock_t * lck)77 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
78   return lck->lk.depth_locked != -1;
79 }
80 
81 __forceinline static int
__kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t * lck,kmp_int32 gtid)82 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
83   KMP_MB();
84 
85 #ifdef USE_LOCK_PROFILE
86   kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
87   if ((curr != 0) && (curr != gtid + 1))
88     __kmp_printf("LOCK CONTENTION: %p\n", lck);
89 /* else __kmp_printf( "." );*/
90 #endif /* USE_LOCK_PROFILE */
91 
92   kmp_int32 tas_free = KMP_LOCK_FREE(tas);
93   kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
94 
95   if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
96       __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
97     KMP_FSYNC_ACQUIRED(lck);
98     return KMP_LOCK_ACQUIRED_FIRST;
99   }
100 
101   kmp_uint32 spins;
102   KMP_FSYNC_PREPARE(lck);
103   KMP_INIT_YIELD(spins);
104   if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
105     KMP_YIELD(TRUE);
106   } else {
107     KMP_YIELD_SPIN(spins);
108   }
109 
110   kmp_backoff_t backoff = __kmp_spin_backoff_params;
111   while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
112          !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
113     __kmp_spin_backoff(&backoff);
114     if (TCR_4(__kmp_nth) >
115         (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
116       KMP_YIELD(TRUE);
117     } else {
118       KMP_YIELD_SPIN(spins);
119     }
120   }
121   KMP_FSYNC_ACQUIRED(lck);
122   return KMP_LOCK_ACQUIRED_FIRST;
123 }
124 
__kmp_acquire_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)125 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
126   int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
127   ANNOTATE_TAS_ACQUIRED(lck);
128   return retval;
129 }
130 
__kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)131 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
132                                               kmp_int32 gtid) {
133   char const *const func = "omp_set_lock";
134   if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
135       __kmp_is_tas_lock_nestable(lck)) {
136     KMP_FATAL(LockNestableUsedAsSimple, func);
137   }
138   if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
139     KMP_FATAL(LockIsAlreadyOwned, func);
140   }
141   return __kmp_acquire_tas_lock(lck, gtid);
142 }
143 
__kmp_test_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)144 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
145   kmp_int32 tas_free = KMP_LOCK_FREE(tas);
146   kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
147   if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
148       __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
149     KMP_FSYNC_ACQUIRED(lck);
150     return TRUE;
151   }
152   return FALSE;
153 }
154 
__kmp_test_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)155 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
156                                            kmp_int32 gtid) {
157   char const *const func = "omp_test_lock";
158   if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
159       __kmp_is_tas_lock_nestable(lck)) {
160     KMP_FATAL(LockNestableUsedAsSimple, func);
161   }
162   return __kmp_test_tas_lock(lck, gtid);
163 }
164 
__kmp_release_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)165 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
166   KMP_MB(); /* Flush all pending memory write invalidates.  */
167 
168   KMP_FSYNC_RELEASING(lck);
169   ANNOTATE_TAS_RELEASED(lck);
170   KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
171   KMP_MB(); /* Flush all pending memory write invalidates.  */
172 
173   KMP_YIELD(TCR_4(__kmp_nth) >
174             (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
175   return KMP_LOCK_RELEASED;
176 }
177 
__kmp_release_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)178 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
179                                               kmp_int32 gtid) {
180   char const *const func = "omp_unset_lock";
181   KMP_MB(); /* in case another processor initialized lock */
182   if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
183       __kmp_is_tas_lock_nestable(lck)) {
184     KMP_FATAL(LockNestableUsedAsSimple, func);
185   }
186   if (__kmp_get_tas_lock_owner(lck) == -1) {
187     KMP_FATAL(LockUnsettingFree, func);
188   }
189   if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
190       (__kmp_get_tas_lock_owner(lck) != gtid)) {
191     KMP_FATAL(LockUnsettingSetByAnother, func);
192   }
193   return __kmp_release_tas_lock(lck, gtid);
194 }
195 
__kmp_init_tas_lock(kmp_tas_lock_t * lck)196 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
197   lck->lk.poll = KMP_LOCK_FREE(tas);
198 }
199 
__kmp_destroy_tas_lock(kmp_tas_lock_t * lck)200 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
201 
__kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t * lck)202 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
203   char const *const func = "omp_destroy_lock";
204   if ((sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
205       __kmp_is_tas_lock_nestable(lck)) {
206     KMP_FATAL(LockNestableUsedAsSimple, func);
207   }
208   if (__kmp_get_tas_lock_owner(lck) != -1) {
209     KMP_FATAL(LockStillOwned, func);
210   }
211   __kmp_destroy_tas_lock(lck);
212 }
213 
214 // nested test and set locks
215 
__kmp_acquire_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)216 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
217   KMP_DEBUG_ASSERT(gtid >= 0);
218 
219   if (__kmp_get_tas_lock_owner(lck) == gtid) {
220     lck->lk.depth_locked += 1;
221     return KMP_LOCK_ACQUIRED_NEXT;
222   } else {
223     __kmp_acquire_tas_lock_timed_template(lck, gtid);
224     ANNOTATE_TAS_ACQUIRED(lck);
225     lck->lk.depth_locked = 1;
226     return KMP_LOCK_ACQUIRED_FIRST;
227   }
228 }
229 
__kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)230 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
231                                                      kmp_int32 gtid) {
232   char const *const func = "omp_set_nest_lock";
233   if (!__kmp_is_tas_lock_nestable(lck)) {
234     KMP_FATAL(LockSimpleUsedAsNestable, func);
235   }
236   return __kmp_acquire_nested_tas_lock(lck, gtid);
237 }
238 
__kmp_test_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)239 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
240   int retval;
241 
242   KMP_DEBUG_ASSERT(gtid >= 0);
243 
244   if (__kmp_get_tas_lock_owner(lck) == gtid) {
245     retval = ++lck->lk.depth_locked;
246   } else if (!__kmp_test_tas_lock(lck, gtid)) {
247     retval = 0;
248   } else {
249     KMP_MB();
250     retval = lck->lk.depth_locked = 1;
251   }
252   return retval;
253 }
254 
__kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)255 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
256                                                   kmp_int32 gtid) {
257   char const *const func = "omp_test_nest_lock";
258   if (!__kmp_is_tas_lock_nestable(lck)) {
259     KMP_FATAL(LockSimpleUsedAsNestable, func);
260   }
261   return __kmp_test_nested_tas_lock(lck, gtid);
262 }
263 
__kmp_release_nested_tas_lock(kmp_tas_lock_t * lck,kmp_int32 gtid)264 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
265   KMP_DEBUG_ASSERT(gtid >= 0);
266 
267   KMP_MB();
268   if (--(lck->lk.depth_locked) == 0) {
269     __kmp_release_tas_lock(lck, gtid);
270     return KMP_LOCK_RELEASED;
271   }
272   return KMP_LOCK_STILL_HELD;
273 }
274 
__kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t * lck,kmp_int32 gtid)275 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
276                                                      kmp_int32 gtid) {
277   char const *const func = "omp_unset_nest_lock";
278   KMP_MB(); /* in case another processor initialized lock */
279   if (!__kmp_is_tas_lock_nestable(lck)) {
280     KMP_FATAL(LockSimpleUsedAsNestable, func);
281   }
282   if (__kmp_get_tas_lock_owner(lck) == -1) {
283     KMP_FATAL(LockUnsettingFree, func);
284   }
285   if (__kmp_get_tas_lock_owner(lck) != gtid) {
286     KMP_FATAL(LockUnsettingSetByAnother, func);
287   }
288   return __kmp_release_nested_tas_lock(lck, gtid);
289 }
290 
__kmp_init_nested_tas_lock(kmp_tas_lock_t * lck)291 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
292   __kmp_init_tas_lock(lck);
293   lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
294 }
295 
__kmp_destroy_nested_tas_lock(kmp_tas_lock_t * lck)296 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
297   __kmp_destroy_tas_lock(lck);
298   lck->lk.depth_locked = 0;
299 }
300 
__kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t * lck)301 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
302   char const *const func = "omp_destroy_nest_lock";
303   if (!__kmp_is_tas_lock_nestable(lck)) {
304     KMP_FATAL(LockSimpleUsedAsNestable, func);
305   }
306   if (__kmp_get_tas_lock_owner(lck) != -1) {
307     KMP_FATAL(LockStillOwned, func);
308   }
309   __kmp_destroy_nested_tas_lock(lck);
310 }
311 
312 #if KMP_USE_FUTEX
313 
314 /* ------------------------------------------------------------------------ */
315 /* futex locks */
316 
317 // futex locks are really just test and set locks, with a different method
318 // of handling contention.  They take the same amount of space as test and
319 // set locks, and are allocated the same way (i.e. use the area allocated by
320 // the compiler for non-nested locks / allocate nested locks on the heap).
321 
__kmp_get_futex_lock_owner(kmp_futex_lock_t * lck)322 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
323   return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
324 }
325 
__kmp_is_futex_lock_nestable(kmp_futex_lock_t * lck)326 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
327   return lck->lk.depth_locked != -1;
328 }
329 
330 __forceinline static int
__kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t * lck,kmp_int32 gtid)331 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
332   kmp_int32 gtid_code = (gtid + 1) << 1;
333 
334   KMP_MB();
335 
336 #ifdef USE_LOCK_PROFILE
337   kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
338   if ((curr != 0) && (curr != gtid_code))
339     __kmp_printf("LOCK CONTENTION: %p\n", lck);
340 /* else __kmp_printf( "." );*/
341 #endif /* USE_LOCK_PROFILE */
342 
343   KMP_FSYNC_PREPARE(lck);
344   KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
345                   lck, lck->lk.poll, gtid));
346 
347   kmp_int32 poll_val;
348 
349   while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
350               &(lck->lk.poll), KMP_LOCK_FREE(futex),
351               KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
352 
353     kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
354     KA_TRACE(
355         1000,
356         ("__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
357          lck, gtid, poll_val, cond));
358 
359     // NOTE: if you try to use the following condition for this branch
360     //
361     // if ( poll_val & 1 == 0 )
362     //
363     // Then the 12.0 compiler has a bug where the following block will
364     // always be skipped, regardless of the value of the LSB of poll_val.
365     if (!cond) {
366       // Try to set the lsb in the poll to indicate to the owner
367       // thread that they need to wake this thread up.
368       if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
369                                        poll_val | KMP_LOCK_BUSY(1, futex))) {
370         KA_TRACE(
371             1000,
372             ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
373              lck, lck->lk.poll, gtid));
374         continue;
375       }
376       poll_val |= KMP_LOCK_BUSY(1, futex);
377 
378       KA_TRACE(1000,
379                ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
380                 lck->lk.poll, gtid));
381     }
382 
383     KA_TRACE(
384         1000,
385         ("__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
386          lck, gtid, poll_val));
387 
388     kmp_int32 rc;
389     if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
390                       NULL, 0)) != 0) {
391       KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) "
392                       "failed (rc=%d errno=%d)\n",
393                       lck, gtid, poll_val, rc, errno));
394       continue;
395     }
396 
397     KA_TRACE(1000,
398              ("__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
399               lck, gtid, poll_val));
400     // This thread has now done a successful futex wait call and was entered on
401     // the OS futex queue.  We must now perform a futex wake call when releasing
402     // the lock, as we have no idea how many other threads are in the queue.
403     gtid_code |= 1;
404   }
405 
406   KMP_FSYNC_ACQUIRED(lck);
407   KA_TRACE(1000, ("__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
408                   lck->lk.poll, gtid));
409   return KMP_LOCK_ACQUIRED_FIRST;
410 }
411 
__kmp_acquire_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)412 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
413   int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
414   ANNOTATE_FUTEX_ACQUIRED(lck);
415   return retval;
416 }
417 
__kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)418 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
419                                                 kmp_int32 gtid) {
420   char const *const func = "omp_set_lock";
421   if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
422       __kmp_is_futex_lock_nestable(lck)) {
423     KMP_FATAL(LockNestableUsedAsSimple, func);
424   }
425   if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
426     KMP_FATAL(LockIsAlreadyOwned, func);
427   }
428   return __kmp_acquire_futex_lock(lck, gtid);
429 }
430 
__kmp_test_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)431 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
432   if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
433                                   KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
434     KMP_FSYNC_ACQUIRED(lck);
435     return TRUE;
436   }
437   return FALSE;
438 }
439 
__kmp_test_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)440 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
441                                              kmp_int32 gtid) {
442   char const *const func = "omp_test_lock";
443   if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
444       __kmp_is_futex_lock_nestable(lck)) {
445     KMP_FATAL(LockNestableUsedAsSimple, func);
446   }
447   return __kmp_test_futex_lock(lck, gtid);
448 }
449 
__kmp_release_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)450 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
451   KMP_MB(); /* Flush all pending memory write invalidates.  */
452 
453   KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
454                   lck, lck->lk.poll, gtid));
455 
456   KMP_FSYNC_RELEASING(lck);
457   ANNOTATE_FUTEX_RELEASED(lck);
458 
459   kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
460 
461   KA_TRACE(1000,
462            ("__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
463             lck, gtid, poll_val));
464 
465   if (KMP_LOCK_STRIP(poll_val) & 1) {
466     KA_TRACE(1000,
467              ("__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
468               lck, gtid));
469     syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
470             NULL, NULL, 0);
471   }
472 
473   KMP_MB(); /* Flush all pending memory write invalidates.  */
474 
475   KA_TRACE(1000, ("__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
476                   lck->lk.poll, gtid));
477 
478   KMP_YIELD(TCR_4(__kmp_nth) >
479             (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
480   return KMP_LOCK_RELEASED;
481 }
482 
__kmp_release_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)483 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
484                                                 kmp_int32 gtid) {
485   char const *const func = "omp_unset_lock";
486   KMP_MB(); /* in case another processor initialized lock */
487   if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
488       __kmp_is_futex_lock_nestable(lck)) {
489     KMP_FATAL(LockNestableUsedAsSimple, func);
490   }
491   if (__kmp_get_futex_lock_owner(lck) == -1) {
492     KMP_FATAL(LockUnsettingFree, func);
493   }
494   if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
495       (__kmp_get_futex_lock_owner(lck) != gtid)) {
496     KMP_FATAL(LockUnsettingSetByAnother, func);
497   }
498   return __kmp_release_futex_lock(lck, gtid);
499 }
500 
__kmp_init_futex_lock(kmp_futex_lock_t * lck)501 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
502   TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
503 }
504 
__kmp_destroy_futex_lock(kmp_futex_lock_t * lck)505 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
506 
__kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t * lck)507 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
508   char const *const func = "omp_destroy_lock";
509   if ((sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
510       __kmp_is_futex_lock_nestable(lck)) {
511     KMP_FATAL(LockNestableUsedAsSimple, func);
512   }
513   if (__kmp_get_futex_lock_owner(lck) != -1) {
514     KMP_FATAL(LockStillOwned, func);
515   }
516   __kmp_destroy_futex_lock(lck);
517 }
518 
519 // nested futex locks
520 
__kmp_acquire_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)521 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
522   KMP_DEBUG_ASSERT(gtid >= 0);
523 
524   if (__kmp_get_futex_lock_owner(lck) == gtid) {
525     lck->lk.depth_locked += 1;
526     return KMP_LOCK_ACQUIRED_NEXT;
527   } else {
528     __kmp_acquire_futex_lock_timed_template(lck, gtid);
529     ANNOTATE_FUTEX_ACQUIRED(lck);
530     lck->lk.depth_locked = 1;
531     return KMP_LOCK_ACQUIRED_FIRST;
532   }
533 }
534 
__kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)535 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
536                                                        kmp_int32 gtid) {
537   char const *const func = "omp_set_nest_lock";
538   if (!__kmp_is_futex_lock_nestable(lck)) {
539     KMP_FATAL(LockSimpleUsedAsNestable, func);
540   }
541   return __kmp_acquire_nested_futex_lock(lck, gtid);
542 }
543 
__kmp_test_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)544 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
545   int retval;
546 
547   KMP_DEBUG_ASSERT(gtid >= 0);
548 
549   if (__kmp_get_futex_lock_owner(lck) == gtid) {
550     retval = ++lck->lk.depth_locked;
551   } else if (!__kmp_test_futex_lock(lck, gtid)) {
552     retval = 0;
553   } else {
554     KMP_MB();
555     retval = lck->lk.depth_locked = 1;
556   }
557   return retval;
558 }
559 
__kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)560 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
561                                                     kmp_int32 gtid) {
562   char const *const func = "omp_test_nest_lock";
563   if (!__kmp_is_futex_lock_nestable(lck)) {
564     KMP_FATAL(LockSimpleUsedAsNestable, func);
565   }
566   return __kmp_test_nested_futex_lock(lck, gtid);
567 }
568 
__kmp_release_nested_futex_lock(kmp_futex_lock_t * lck,kmp_int32 gtid)569 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
570   KMP_DEBUG_ASSERT(gtid >= 0);
571 
572   KMP_MB();
573   if (--(lck->lk.depth_locked) == 0) {
574     __kmp_release_futex_lock(lck, gtid);
575     return KMP_LOCK_RELEASED;
576   }
577   return KMP_LOCK_STILL_HELD;
578 }
579 
__kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t * lck,kmp_int32 gtid)580 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
581                                                        kmp_int32 gtid) {
582   char const *const func = "omp_unset_nest_lock";
583   KMP_MB(); /* in case another processor initialized lock */
584   if (!__kmp_is_futex_lock_nestable(lck)) {
585     KMP_FATAL(LockSimpleUsedAsNestable, func);
586   }
587   if (__kmp_get_futex_lock_owner(lck) == -1) {
588     KMP_FATAL(LockUnsettingFree, func);
589   }
590   if (__kmp_get_futex_lock_owner(lck) != gtid) {
591     KMP_FATAL(LockUnsettingSetByAnother, func);
592   }
593   return __kmp_release_nested_futex_lock(lck, gtid);
594 }
595 
__kmp_init_nested_futex_lock(kmp_futex_lock_t * lck)596 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
597   __kmp_init_futex_lock(lck);
598   lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
599 }
600 
__kmp_destroy_nested_futex_lock(kmp_futex_lock_t * lck)601 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
602   __kmp_destroy_futex_lock(lck);
603   lck->lk.depth_locked = 0;
604 }
605 
__kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t * lck)606 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
607   char const *const func = "omp_destroy_nest_lock";
608   if (!__kmp_is_futex_lock_nestable(lck)) {
609     KMP_FATAL(LockSimpleUsedAsNestable, func);
610   }
611   if (__kmp_get_futex_lock_owner(lck) != -1) {
612     KMP_FATAL(LockStillOwned, func);
613   }
614   __kmp_destroy_nested_futex_lock(lck);
615 }
616 
617 #endif // KMP_USE_FUTEX
618 
619 /* ------------------------------------------------------------------------ */
620 /* ticket (bakery) locks */
621 
__kmp_get_ticket_lock_owner(kmp_ticket_lock_t * lck)622 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
623   return std::atomic_load_explicit(&lck->lk.owner_id,
624                                    std::memory_order_relaxed) -
625          1;
626 }
627 
__kmp_is_ticket_lock_nestable(kmp_ticket_lock_t * lck)628 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
629   return std::atomic_load_explicit(&lck->lk.depth_locked,
630                                    std::memory_order_relaxed) != -1;
631 }
632 
__kmp_bakery_check(void * now_serving,kmp_uint32 my_ticket)633 static kmp_uint32 __kmp_bakery_check(void *now_serving, kmp_uint32 my_ticket) {
634   return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
635                                    std::memory_order_acquire) == my_ticket;
636 }
637 
638 __forceinline static int
__kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t * lck,kmp_int32 gtid)639 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
640                                          kmp_int32 gtid) {
641   kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
642       &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
643 
644 #ifdef USE_LOCK_PROFILE
645   if (std::atomic_load_explicit(&lck->lk.now_serving,
646                                 std::memory_order_relaxed) != my_ticket)
647     __kmp_printf("LOCK CONTENTION: %p\n", lck);
648 /* else __kmp_printf( "." );*/
649 #endif /* USE_LOCK_PROFILE */
650 
651   if (std::atomic_load_explicit(&lck->lk.now_serving,
652                                 std::memory_order_acquire) == my_ticket) {
653     return KMP_LOCK_ACQUIRED_FIRST;
654   }
655   KMP_WAIT_YIELD_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
656   return KMP_LOCK_ACQUIRED_FIRST;
657 }
658 
__kmp_acquire_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)659 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
660   int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
661   ANNOTATE_TICKET_ACQUIRED(lck);
662   return retval;
663 }
664 
__kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)665 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
666                                                  kmp_int32 gtid) {
667   char const *const func = "omp_set_lock";
668 
669   if (!std::atomic_load_explicit(&lck->lk.initialized,
670                                  std::memory_order_relaxed)) {
671     KMP_FATAL(LockIsUninitialized, func);
672   }
673   if (lck->lk.self != lck) {
674     KMP_FATAL(LockIsUninitialized, func);
675   }
676   if (__kmp_is_ticket_lock_nestable(lck)) {
677     KMP_FATAL(LockNestableUsedAsSimple, func);
678   }
679   if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
680     KMP_FATAL(LockIsAlreadyOwned, func);
681   }
682 
683   __kmp_acquire_ticket_lock(lck, gtid);
684 
685   std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
686                              std::memory_order_relaxed);
687   return KMP_LOCK_ACQUIRED_FIRST;
688 }
689 
__kmp_test_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)690 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
691   kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
692                                                    std::memory_order_relaxed);
693 
694   if (std::atomic_load_explicit(&lck->lk.now_serving,
695                                 std::memory_order_relaxed) == my_ticket) {
696     kmp_uint32 next_ticket = my_ticket + 1;
697     if (std::atomic_compare_exchange_strong_explicit(
698             &lck->lk.next_ticket, &my_ticket, next_ticket,
699             std::memory_order_acquire, std::memory_order_acquire)) {
700       return TRUE;
701     }
702   }
703   return FALSE;
704 }
705 
__kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)706 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
707                                               kmp_int32 gtid) {
708   char const *const func = "omp_test_lock";
709 
710   if (!std::atomic_load_explicit(&lck->lk.initialized,
711                                  std::memory_order_relaxed)) {
712     KMP_FATAL(LockIsUninitialized, func);
713   }
714   if (lck->lk.self != lck) {
715     KMP_FATAL(LockIsUninitialized, func);
716   }
717   if (__kmp_is_ticket_lock_nestable(lck)) {
718     KMP_FATAL(LockNestableUsedAsSimple, func);
719   }
720 
721   int retval = __kmp_test_ticket_lock(lck, gtid);
722 
723   if (retval) {
724     std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
725                                std::memory_order_relaxed);
726   }
727   return retval;
728 }
729 
__kmp_release_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)730 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
731   kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
732                                                   std::memory_order_relaxed) -
733                         std::atomic_load_explicit(&lck->lk.now_serving,
734                                                   std::memory_order_relaxed);
735 
736   ANNOTATE_TICKET_RELEASED(lck);
737   std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
738                                  std::memory_order_release);
739 
740   KMP_YIELD(distance >
741             (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
742   return KMP_LOCK_RELEASED;
743 }
744 
__kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)745 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
746                                                  kmp_int32 gtid) {
747   char const *const func = "omp_unset_lock";
748 
749   if (!std::atomic_load_explicit(&lck->lk.initialized,
750                                  std::memory_order_relaxed)) {
751     KMP_FATAL(LockIsUninitialized, func);
752   }
753   if (lck->lk.self != lck) {
754     KMP_FATAL(LockIsUninitialized, func);
755   }
756   if (__kmp_is_ticket_lock_nestable(lck)) {
757     KMP_FATAL(LockNestableUsedAsSimple, func);
758   }
759   if (__kmp_get_ticket_lock_owner(lck) == -1) {
760     KMP_FATAL(LockUnsettingFree, func);
761   }
762   if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
763       (__kmp_get_ticket_lock_owner(lck) != gtid)) {
764     KMP_FATAL(LockUnsettingSetByAnother, func);
765   }
766   std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
767   return __kmp_release_ticket_lock(lck, gtid);
768 }
769 
__kmp_init_ticket_lock(kmp_ticket_lock_t * lck)770 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
771   lck->lk.location = NULL;
772   lck->lk.self = lck;
773   std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
774                              std::memory_order_relaxed);
775   std::atomic_store_explicit(&lck->lk.now_serving, 0U,
776                              std::memory_order_relaxed);
777   std::atomic_store_explicit(
778       &lck->lk.owner_id, 0,
779       std::memory_order_relaxed); // no thread owns the lock.
780   std::atomic_store_explicit(
781       &lck->lk.depth_locked, -1,
782       std::memory_order_relaxed); // -1 => not a nested lock.
783   std::atomic_store_explicit(&lck->lk.initialized, true,
784                              std::memory_order_release);
785 }
786 
__kmp_destroy_ticket_lock(kmp_ticket_lock_t * lck)787 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
788   std::atomic_store_explicit(&lck->lk.initialized, false,
789                              std::memory_order_release);
790   lck->lk.self = NULL;
791   lck->lk.location = NULL;
792   std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
793                              std::memory_order_relaxed);
794   std::atomic_store_explicit(&lck->lk.now_serving, 0U,
795                              std::memory_order_relaxed);
796   std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
797   std::atomic_store_explicit(&lck->lk.depth_locked, -1,
798                              std::memory_order_relaxed);
799 }
800 
__kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t * lck)801 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
802   char const *const func = "omp_destroy_lock";
803 
804   if (!std::atomic_load_explicit(&lck->lk.initialized,
805                                  std::memory_order_relaxed)) {
806     KMP_FATAL(LockIsUninitialized, func);
807   }
808   if (lck->lk.self != lck) {
809     KMP_FATAL(LockIsUninitialized, func);
810   }
811   if (__kmp_is_ticket_lock_nestable(lck)) {
812     KMP_FATAL(LockNestableUsedAsSimple, func);
813   }
814   if (__kmp_get_ticket_lock_owner(lck) != -1) {
815     KMP_FATAL(LockStillOwned, func);
816   }
817   __kmp_destroy_ticket_lock(lck);
818 }
819 
820 // nested ticket locks
821 
__kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)822 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
823   KMP_DEBUG_ASSERT(gtid >= 0);
824 
825   if (__kmp_get_ticket_lock_owner(lck) == gtid) {
826     std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
827                                    std::memory_order_relaxed);
828     return KMP_LOCK_ACQUIRED_NEXT;
829   } else {
830     __kmp_acquire_ticket_lock_timed_template(lck, gtid);
831     ANNOTATE_TICKET_ACQUIRED(lck);
832     std::atomic_store_explicit(&lck->lk.depth_locked, 1,
833                                std::memory_order_relaxed);
834     std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
835                                std::memory_order_relaxed);
836     return KMP_LOCK_ACQUIRED_FIRST;
837   }
838 }
839 
__kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)840 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
841                                                         kmp_int32 gtid) {
842   char const *const func = "omp_set_nest_lock";
843 
844   if (!std::atomic_load_explicit(&lck->lk.initialized,
845                                  std::memory_order_relaxed)) {
846     KMP_FATAL(LockIsUninitialized, func);
847   }
848   if (lck->lk.self != lck) {
849     KMP_FATAL(LockIsUninitialized, func);
850   }
851   if (!__kmp_is_ticket_lock_nestable(lck)) {
852     KMP_FATAL(LockSimpleUsedAsNestable, func);
853   }
854   return __kmp_acquire_nested_ticket_lock(lck, gtid);
855 }
856 
__kmp_test_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)857 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
858   int retval;
859 
860   KMP_DEBUG_ASSERT(gtid >= 0);
861 
862   if (__kmp_get_ticket_lock_owner(lck) == gtid) {
863     retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
864                                             std::memory_order_relaxed) +
865              1;
866   } else if (!__kmp_test_ticket_lock(lck, gtid)) {
867     retval = 0;
868   } else {
869     std::atomic_store_explicit(&lck->lk.depth_locked, 1,
870                                std::memory_order_relaxed);
871     std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
872                                std::memory_order_relaxed);
873     retval = 1;
874   }
875   return retval;
876 }
877 
__kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)878 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
879                                                      kmp_int32 gtid) {
880   char const *const func = "omp_test_nest_lock";
881 
882   if (!std::atomic_load_explicit(&lck->lk.initialized,
883                                  std::memory_order_relaxed)) {
884     KMP_FATAL(LockIsUninitialized, func);
885   }
886   if (lck->lk.self != lck) {
887     KMP_FATAL(LockIsUninitialized, func);
888   }
889   if (!__kmp_is_ticket_lock_nestable(lck)) {
890     KMP_FATAL(LockSimpleUsedAsNestable, func);
891   }
892   return __kmp_test_nested_ticket_lock(lck, gtid);
893 }
894 
__kmp_release_nested_ticket_lock(kmp_ticket_lock_t * lck,kmp_int32 gtid)895 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
896   KMP_DEBUG_ASSERT(gtid >= 0);
897 
898   if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
899                                       std::memory_order_relaxed) -
900        1) == 0) {
901     std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
902     __kmp_release_ticket_lock(lck, gtid);
903     return KMP_LOCK_RELEASED;
904   }
905   return KMP_LOCK_STILL_HELD;
906 }
907 
__kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck,kmp_int32 gtid)908 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
909                                                         kmp_int32 gtid) {
910   char const *const func = "omp_unset_nest_lock";
911 
912   if (!std::atomic_load_explicit(&lck->lk.initialized,
913                                  std::memory_order_relaxed)) {
914     KMP_FATAL(LockIsUninitialized, func);
915   }
916   if (lck->lk.self != lck) {
917     KMP_FATAL(LockIsUninitialized, func);
918   }
919   if (!__kmp_is_ticket_lock_nestable(lck)) {
920     KMP_FATAL(LockSimpleUsedAsNestable, func);
921   }
922   if (__kmp_get_ticket_lock_owner(lck) == -1) {
923     KMP_FATAL(LockUnsettingFree, func);
924   }
925   if (__kmp_get_ticket_lock_owner(lck) != gtid) {
926     KMP_FATAL(LockUnsettingSetByAnother, func);
927   }
928   return __kmp_release_nested_ticket_lock(lck, gtid);
929 }
930 
__kmp_init_nested_ticket_lock(kmp_ticket_lock_t * lck)931 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
932   __kmp_init_ticket_lock(lck);
933   std::atomic_store_explicit(&lck->lk.depth_locked, 0,
934                              std::memory_order_relaxed);
935   // >= 0 for nestable locks, -1 for simple locks
936 }
937 
__kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t * lck)938 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
939   __kmp_destroy_ticket_lock(lck);
940   std::atomic_store_explicit(&lck->lk.depth_locked, 0,
941                              std::memory_order_relaxed);
942 }
943 
944 static void
__kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck)945 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
946   char const *const func = "omp_destroy_nest_lock";
947 
948   if (!std::atomic_load_explicit(&lck->lk.initialized,
949                                  std::memory_order_relaxed)) {
950     KMP_FATAL(LockIsUninitialized, func);
951   }
952   if (lck->lk.self != lck) {
953     KMP_FATAL(LockIsUninitialized, func);
954   }
955   if (!__kmp_is_ticket_lock_nestable(lck)) {
956     KMP_FATAL(LockSimpleUsedAsNestable, func);
957   }
958   if (__kmp_get_ticket_lock_owner(lck) != -1) {
959     KMP_FATAL(LockStillOwned, func);
960   }
961   __kmp_destroy_nested_ticket_lock(lck);
962 }
963 
964 // access functions to fields which don't exist for all lock kinds.
965 
__kmp_get_ticket_lock_location(kmp_ticket_lock_t * lck)966 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
967   return lck->lk.location;
968 }
969 
__kmp_set_ticket_lock_location(kmp_ticket_lock_t * lck,const ident_t * loc)970 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
971                                            const ident_t *loc) {
972   lck->lk.location = loc;
973 }
974 
__kmp_get_ticket_lock_flags(kmp_ticket_lock_t * lck)975 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
976   return lck->lk.flags;
977 }
978 
__kmp_set_ticket_lock_flags(kmp_ticket_lock_t * lck,kmp_lock_flags_t flags)979 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
980                                         kmp_lock_flags_t flags) {
981   lck->lk.flags = flags;
982 }
983 
984 /* ------------------------------------------------------------------------ */
985 /* queuing locks */
986 
987 /* First the states
988    (head,tail) =              0, 0  means lock is unheld, nobody on queue
989                  UINT_MAX or -1, 0  means lock is held, nobody on queue
990                               h, h  means lock held or about to transition,
991                                     1 element on queue
992                               h, t  h <> t, means lock is held or about to
993                                     transition, >1 elements on queue
994 
995    Now the transitions
996       Acquire(0,0)  = -1 ,0
997       Release(0,0)  = Error
998       Acquire(-1,0) =  h ,h    h > 0
999       Release(-1,0) =  0 ,0
1000       Acquire(h,h)  =  h ,t    h > 0, t > 0, h <> t
1001       Release(h,h)  = -1 ,0    h > 0
1002       Acquire(h,t)  =  h ,t'   h > 0, t > 0, t' > 0, h <> t, h <> t', t <> t'
1003       Release(h,t)  =  h',t    h > 0, t > 0, h <> t, h <> h', h' maybe = t
1004 
1005    And pictorially
1006 
1007            +-----+
1008            | 0, 0|------- release -------> Error
1009            +-----+
1010              |  ^
1011       acquire|  |release
1012              |  |
1013              |  |
1014              v  |
1015            +-----+
1016            |-1, 0|
1017            +-----+
1018              |  ^
1019       acquire|  |release
1020              |  |
1021              |  |
1022              v  |
1023            +-----+
1024            | h, h|
1025            +-----+
1026              |  ^
1027       acquire|  |release
1028              |  |
1029              |  |
1030              v  |
1031            +-----+
1032            | h, t|----- acquire, release loopback ---+
1033            +-----+                                   |
1034                 ^                                    |
1035                 |                                    |
1036                 +------------------------------------+
1037  */
1038 
1039 #ifdef DEBUG_QUEUING_LOCKS
1040 
1041 /* Stuff for circular trace buffer */
1042 #define TRACE_BUF_ELE 1024
1043 static char traces[TRACE_BUF_ELE][128] = {0};
1044 static int tc = 0;
1045 #define TRACE_LOCK(X, Y)                                                       \
1046   KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y);
1047 #define TRACE_LOCK_T(X, Y, Z)                                                  \
1048   KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z);
1049 #define TRACE_LOCK_HT(X, Y, Z, Q)                                              \
1050   KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y,   \
1051                Z, Q);
1052 
__kmp_dump_queuing_lock(kmp_info_t * this_thr,kmp_int32 gtid,kmp_queuing_lock_t * lck,kmp_int32 head_id,kmp_int32 tail_id)1053 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
1054                                     kmp_queuing_lock_t *lck, kmp_int32 head_id,
1055                                     kmp_int32 tail_id) {
1056   kmp_int32 t, i;
1057 
1058   __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
1059 
1060   i = tc % TRACE_BUF_ELE;
1061   __kmp_printf_no_lock("%s\n", traces[i]);
1062   i = (i + 1) % TRACE_BUF_ELE;
1063   while (i != (tc % TRACE_BUF_ELE)) {
1064     __kmp_printf_no_lock("%s", traces[i]);
1065     i = (i + 1) % TRACE_BUF_ELE;
1066   }
1067   __kmp_printf_no_lock("\n");
1068 
1069   __kmp_printf_no_lock("\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, "
1070                        "next_wait:%d, head_id:%d, tail_id:%d\n",
1071                        gtid + 1, this_thr->th.th_spin_here,
1072                        this_thr->th.th_next_waiting, head_id, tail_id);
1073 
1074   __kmp_printf_no_lock("\t\thead: %d ", lck->lk.head_id);
1075 
1076   if (lck->lk.head_id >= 1) {
1077     t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1078     while (t > 0) {
1079       __kmp_printf_no_lock("-> %d ", t);
1080       t = __kmp_threads[t - 1]->th.th_next_waiting;
1081     }
1082   }
1083   __kmp_printf_no_lock(";  tail: %d ", lck->lk.tail_id);
1084   __kmp_printf_no_lock("\n\n");
1085 }
1086 
1087 #endif /* DEBUG_QUEUING_LOCKS */
1088 
__kmp_get_queuing_lock_owner(kmp_queuing_lock_t * lck)1089 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
1090   return TCR_4(lck->lk.owner_id) - 1;
1091 }
1092 
__kmp_is_queuing_lock_nestable(kmp_queuing_lock_t * lck)1093 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
1094   return lck->lk.depth_locked != -1;
1095 }
1096 
1097 /* Acquire a lock using a the queuing lock implementation */
1098 template <bool takeTime>
1099 /* [TLW] The unused template above is left behind because of what BEB believes
1100    is a potential compiler problem with __forceinline. */
1101 __forceinline static int
__kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t * lck,kmp_int32 gtid)1102 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
1103                                           kmp_int32 gtid) {
1104   kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
1105   volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1106   volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1107   volatile kmp_uint32 *spin_here_p;
1108   kmp_int32 need_mf = 1;
1109 
1110 #if OMPT_SUPPORT
1111   ompt_state_t prev_state = ompt_state_undefined;
1112 #endif
1113 
1114   KA_TRACE(1000,
1115            ("__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1116 
1117   KMP_FSYNC_PREPARE(lck);
1118   KMP_DEBUG_ASSERT(this_thr != NULL);
1119   spin_here_p = &this_thr->th.th_spin_here;
1120 
1121 #ifdef DEBUG_QUEUING_LOCKS
1122   TRACE_LOCK(gtid + 1, "acq ent");
1123   if (*spin_here_p)
1124     __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1125   if (this_thr->th.th_next_waiting != 0)
1126     __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1127 #endif
1128   KMP_DEBUG_ASSERT(!*spin_here_p);
1129   KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1130 
1131   /* The following st.rel to spin_here_p needs to precede the cmpxchg.acq to
1132      head_id_p that may follow, not just in execution order, but also in
1133      visibility order. This way, when a releasing thread observes the changes to
1134      the queue by this thread, it can rightly assume that spin_here_p has
1135      already been set to TRUE, so that when it sets spin_here_p to FALSE, it is
1136      not premature.  If the releasing thread sets spin_here_p to FALSE before
1137      this thread sets it to TRUE, this thread will hang. */
1138   *spin_here_p = TRUE; /* before enqueuing to prevent race */
1139 
1140   while (1) {
1141     kmp_int32 enqueued;
1142     kmp_int32 head;
1143     kmp_int32 tail;
1144 
1145     head = *head_id_p;
1146 
1147     switch (head) {
1148 
1149     case -1: {
1150 #ifdef DEBUG_QUEUING_LOCKS
1151       tail = *tail_id_p;
1152       TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1153 #endif
1154       tail = 0; /* to make sure next link asynchronously read is not set
1155                 accidentally; this assignment prevents us from entering the
1156                 if ( t > 0 ) condition in the enqueued case below, which is not
1157                 necessary for this state transition */
1158 
1159       need_mf = 0;
1160       /* try (-1,0)->(tid,tid) */
1161       enqueued = KMP_COMPARE_AND_STORE_ACQ64((volatile kmp_int64 *)tail_id_p,
1162                                              KMP_PACK_64(-1, 0),
1163                                              KMP_PACK_64(gtid + 1, gtid + 1));
1164 #ifdef DEBUG_QUEUING_LOCKS
1165       if (enqueued)
1166         TRACE_LOCK(gtid + 1, "acq enq: (-1,0)->(tid,tid)");
1167 #endif
1168     } break;
1169 
1170     default: {
1171       tail = *tail_id_p;
1172       KMP_DEBUG_ASSERT(tail != gtid + 1);
1173 
1174 #ifdef DEBUG_QUEUING_LOCKS
1175       TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1176 #endif
1177 
1178       if (tail == 0) {
1179         enqueued = FALSE;
1180       } else {
1181         need_mf = 0;
1182         /* try (h,t) or (h,h)->(h,tid) */
1183         enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
1184 
1185 #ifdef DEBUG_QUEUING_LOCKS
1186         if (enqueued)
1187           TRACE_LOCK(gtid + 1, "acq enq: (h,t)->(h,tid)");
1188 #endif
1189       }
1190     } break;
1191 
1192     case 0: /* empty queue */
1193     {
1194       kmp_int32 grabbed_lock;
1195 
1196 #ifdef DEBUG_QUEUING_LOCKS
1197       tail = *tail_id_p;
1198       TRACE_LOCK_HT(gtid + 1, "acq read: ", head, tail);
1199 #endif
1200       /* try (0,0)->(-1,0) */
1201 
1202       /* only legal transition out of head = 0 is head = -1 with no change to
1203        * tail */
1204       grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
1205 
1206       if (grabbed_lock) {
1207 
1208         *spin_here_p = FALSE;
1209 
1210         KA_TRACE(
1211             1000,
1212             ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1213              lck, gtid));
1214 #ifdef DEBUG_QUEUING_LOCKS
1215         TRACE_LOCK_HT(gtid + 1, "acq exit: ", head, 0);
1216 #endif
1217 
1218 #if OMPT_SUPPORT
1219         if (ompt_enabled.enabled && prev_state != ompt_state_undefined) {
1220           /* change the state before clearing wait_id */
1221           this_thr->th.ompt_thread_info.state = prev_state;
1222           this_thr->th.ompt_thread_info.wait_id = 0;
1223         }
1224 #endif
1225 
1226         KMP_FSYNC_ACQUIRED(lck);
1227         return KMP_LOCK_ACQUIRED_FIRST; /* lock holder cannot be on queue */
1228       }
1229       enqueued = FALSE;
1230     } break;
1231     }
1232 
1233 #if OMPT_SUPPORT
1234     if (ompt_enabled.enabled && prev_state == ompt_state_undefined) {
1235       /* this thread will spin; set wait_id before entering wait state */
1236       prev_state = this_thr->th.ompt_thread_info.state;
1237       this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
1238       this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
1239     }
1240 #endif
1241 
1242     if (enqueued) {
1243       if (tail > 0) {
1244         kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
1245         KMP_ASSERT(tail_thr != NULL);
1246         tail_thr->th.th_next_waiting = gtid + 1;
1247         /* corresponding wait for this write in release code */
1248       }
1249       KA_TRACE(1000,
1250                ("__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1251                 lck, gtid));
1252 
1253       /* ToDo: May want to consider using __kmp_wait_sleep  or something that
1254          sleeps for throughput only here. */
1255       KMP_MB();
1256       KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck);
1257 
1258 #ifdef DEBUG_QUEUING_LOCKS
1259       TRACE_LOCK(gtid + 1, "acq spin");
1260 
1261       if (this_thr->th.th_next_waiting != 0)
1262         __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1263 #endif
1264       KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1265       KA_TRACE(1000, ("__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after "
1266                       "waiting on queue\n",
1267                       lck, gtid));
1268 
1269 #ifdef DEBUG_QUEUING_LOCKS
1270       TRACE_LOCK(gtid + 1, "acq exit 2");
1271 #endif
1272 
1273 #if OMPT_SUPPORT
1274       /* change the state before clearing wait_id */
1275       this_thr->th.ompt_thread_info.state = prev_state;
1276       this_thr->th.ompt_thread_info.wait_id = 0;
1277 #endif
1278 
1279       /* got lock, we were dequeued by the thread that released lock */
1280       return KMP_LOCK_ACQUIRED_FIRST;
1281     }
1282 
1283     /* Yield if number of threads > number of logical processors */
1284     /* ToDo: Not sure why this should only be in oversubscription case,
1285        maybe should be traditional YIELD_INIT/YIELD_WHEN loop */
1286     KMP_YIELD(TCR_4(__kmp_nth) >
1287               (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
1288 #ifdef DEBUG_QUEUING_LOCKS
1289     TRACE_LOCK(gtid + 1, "acq retry");
1290 #endif
1291   }
1292   KMP_ASSERT2(0, "should not get here");
1293   return KMP_LOCK_ACQUIRED_FIRST;
1294 }
1295 
__kmp_acquire_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1296 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1297   KMP_DEBUG_ASSERT(gtid >= 0);
1298 
1299   int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1300   ANNOTATE_QUEUING_ACQUIRED(lck);
1301   return retval;
1302 }
1303 
__kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1304 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1305                                                   kmp_int32 gtid) {
1306   char const *const func = "omp_set_lock";
1307   if (lck->lk.initialized != lck) {
1308     KMP_FATAL(LockIsUninitialized, func);
1309   }
1310   if (__kmp_is_queuing_lock_nestable(lck)) {
1311     KMP_FATAL(LockNestableUsedAsSimple, func);
1312   }
1313   if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1314     KMP_FATAL(LockIsAlreadyOwned, func);
1315   }
1316 
1317   __kmp_acquire_queuing_lock(lck, gtid);
1318 
1319   lck->lk.owner_id = gtid + 1;
1320   return KMP_LOCK_ACQUIRED_FIRST;
1321 }
1322 
__kmp_test_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1323 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1324   volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1325   kmp_int32 head;
1326 #ifdef KMP_DEBUG
1327   kmp_info_t *this_thr;
1328 #endif
1329 
1330   KA_TRACE(1000, ("__kmp_test_queuing_lock: T#%d entering\n", gtid));
1331   KMP_DEBUG_ASSERT(gtid >= 0);
1332 #ifdef KMP_DEBUG
1333   this_thr = __kmp_thread_from_gtid(gtid);
1334   KMP_DEBUG_ASSERT(this_thr != NULL);
1335   KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1336 #endif
1337 
1338   head = *head_id_p;
1339 
1340   if (head == 0) { /* nobody on queue, nobody holding */
1341     /* try (0,0)->(-1,0) */
1342     if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
1343       KA_TRACE(1000,
1344                ("__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1345       KMP_FSYNC_ACQUIRED(lck);
1346       ANNOTATE_QUEUING_ACQUIRED(lck);
1347       return TRUE;
1348     }
1349   }
1350 
1351   KA_TRACE(1000,
1352            ("__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1353   return FALSE;
1354 }
1355 
__kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1356 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1357                                                kmp_int32 gtid) {
1358   char const *const func = "omp_test_lock";
1359   if (lck->lk.initialized != lck) {
1360     KMP_FATAL(LockIsUninitialized, func);
1361   }
1362   if (__kmp_is_queuing_lock_nestable(lck)) {
1363     KMP_FATAL(LockNestableUsedAsSimple, func);
1364   }
1365 
1366   int retval = __kmp_test_queuing_lock(lck, gtid);
1367 
1368   if (retval) {
1369     lck->lk.owner_id = gtid + 1;
1370   }
1371   return retval;
1372 }
1373 
__kmp_release_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1374 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1375   kmp_info_t *this_thr;
1376   volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1377   volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1378 
1379   KA_TRACE(1000,
1380            ("__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1381   KMP_DEBUG_ASSERT(gtid >= 0);
1382   this_thr = __kmp_thread_from_gtid(gtid);
1383   KMP_DEBUG_ASSERT(this_thr != NULL);
1384 #ifdef DEBUG_QUEUING_LOCKS
1385   TRACE_LOCK(gtid + 1, "rel ent");
1386 
1387   if (this_thr->th.th_spin_here)
1388     __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1389   if (this_thr->th.th_next_waiting != 0)
1390     __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1391 #endif
1392   KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1393   KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1394 
1395   KMP_FSYNC_RELEASING(lck);
1396   ANNOTATE_QUEUING_RELEASED(lck);
1397 
1398   while (1) {
1399     kmp_int32 dequeued;
1400     kmp_int32 head;
1401     kmp_int32 tail;
1402 
1403     head = *head_id_p;
1404 
1405 #ifdef DEBUG_QUEUING_LOCKS
1406     tail = *tail_id_p;
1407     TRACE_LOCK_HT(gtid + 1, "rel read: ", head, tail);
1408     if (head == 0)
1409       __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1410 #endif
1411     KMP_DEBUG_ASSERT(head !=
1412                      0); /* holding the lock, head must be -1 or queue head */
1413 
1414     if (head == -1) { /* nobody on queue */
1415       /* try (-1,0)->(0,0) */
1416       if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
1417         KA_TRACE(
1418             1000,
1419             ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1420              lck, gtid));
1421 #ifdef DEBUG_QUEUING_LOCKS
1422         TRACE_LOCK_HT(gtid + 1, "rel exit: ", 0, 0);
1423 #endif
1424 
1425 #if OMPT_SUPPORT
1426 /* nothing to do - no other thread is trying to shift blame */
1427 #endif
1428         return KMP_LOCK_RELEASED;
1429       }
1430       dequeued = FALSE;
1431     } else {
1432       KMP_MB();
1433       tail = *tail_id_p;
1434       if (head == tail) { /* only one thread on the queue */
1435 #ifdef DEBUG_QUEUING_LOCKS
1436         if (head <= 0)
1437           __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1438 #endif
1439         KMP_DEBUG_ASSERT(head > 0);
1440 
1441         /* try (h,h)->(-1,0) */
1442         dequeued = KMP_COMPARE_AND_STORE_REL64(
1443             RCAST(volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head),
1444             KMP_PACK_64(-1, 0));
1445 #ifdef DEBUG_QUEUING_LOCKS
1446         TRACE_LOCK(gtid + 1, "rel deq: (h,h)->(-1,0)");
1447 #endif
1448 
1449       } else {
1450         volatile kmp_int32 *waiting_id_p;
1451         kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1452         KMP_DEBUG_ASSERT(head_thr != NULL);
1453         waiting_id_p = &head_thr->th.th_next_waiting;
1454 
1455 /* Does this require synchronous reads? */
1456 #ifdef DEBUG_QUEUING_LOCKS
1457         if (head <= 0 || tail <= 0)
1458           __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1459 #endif
1460         KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1461 
1462         /* try (h,t)->(h',t) or (t,t) */
1463         KMP_MB();
1464         /* make sure enqueuing thread has time to update next waiting thread
1465          * field */
1466         *head_id_p = KMP_WAIT_YIELD((volatile kmp_uint32 *)waiting_id_p, 0,
1467                                     KMP_NEQ, NULL);
1468 #ifdef DEBUG_QUEUING_LOCKS
1469         TRACE_LOCK(gtid + 1, "rel deq: (h,t)->(h',t)");
1470 #endif
1471         dequeued = TRUE;
1472       }
1473     }
1474 
1475     if (dequeued) {
1476       kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1477       KMP_DEBUG_ASSERT(head_thr != NULL);
1478 
1479 /* Does this require synchronous reads? */
1480 #ifdef DEBUG_QUEUING_LOCKS
1481       if (head <= 0 || tail <= 0)
1482         __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1483 #endif
1484       KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1485 
1486       /* For clean code only. Thread not released until next statement prevents
1487          race with acquire code. */
1488       head_thr->th.th_next_waiting = 0;
1489 #ifdef DEBUG_QUEUING_LOCKS
1490       TRACE_LOCK_T(gtid + 1, "rel nw=0 for t=", head);
1491 #endif
1492 
1493       KMP_MB();
1494       /* reset spin value */
1495       head_thr->th.th_spin_here = FALSE;
1496 
1497       KA_TRACE(1000, ("__kmp_release_queuing_lock: lck:%p, T#%d exiting: after "
1498                       "dequeuing\n",
1499                       lck, gtid));
1500 #ifdef DEBUG_QUEUING_LOCKS
1501       TRACE_LOCK(gtid + 1, "rel exit 2");
1502 #endif
1503       return KMP_LOCK_RELEASED;
1504     }
1505 /* KMP_CPU_PAUSE(); don't want to make releasing thread hold up acquiring
1506    threads */
1507 
1508 #ifdef DEBUG_QUEUING_LOCKS
1509     TRACE_LOCK(gtid + 1, "rel retry");
1510 #endif
1511 
1512   } /* while */
1513   KMP_ASSERT2(0, "should not get here");
1514   return KMP_LOCK_RELEASED;
1515 }
1516 
__kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1517 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1518                                                   kmp_int32 gtid) {
1519   char const *const func = "omp_unset_lock";
1520   KMP_MB(); /* in case another processor initialized lock */
1521   if (lck->lk.initialized != lck) {
1522     KMP_FATAL(LockIsUninitialized, func);
1523   }
1524   if (__kmp_is_queuing_lock_nestable(lck)) {
1525     KMP_FATAL(LockNestableUsedAsSimple, func);
1526   }
1527   if (__kmp_get_queuing_lock_owner(lck) == -1) {
1528     KMP_FATAL(LockUnsettingFree, func);
1529   }
1530   if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1531     KMP_FATAL(LockUnsettingSetByAnother, func);
1532   }
1533   lck->lk.owner_id = 0;
1534   return __kmp_release_queuing_lock(lck, gtid);
1535 }
1536 
__kmp_init_queuing_lock(kmp_queuing_lock_t * lck)1537 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
1538   lck->lk.location = NULL;
1539   lck->lk.head_id = 0;
1540   lck->lk.tail_id = 0;
1541   lck->lk.next_ticket = 0;
1542   lck->lk.now_serving = 0;
1543   lck->lk.owner_id = 0; // no thread owns the lock.
1544   lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
1545   lck->lk.initialized = lck;
1546 
1547   KA_TRACE(1000, ("__kmp_init_queuing_lock: lock %p initialized\n", lck));
1548 }
1549 
__kmp_destroy_queuing_lock(kmp_queuing_lock_t * lck)1550 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
1551   lck->lk.initialized = NULL;
1552   lck->lk.location = NULL;
1553   lck->lk.head_id = 0;
1554   lck->lk.tail_id = 0;
1555   lck->lk.next_ticket = 0;
1556   lck->lk.now_serving = 0;
1557   lck->lk.owner_id = 0;
1558   lck->lk.depth_locked = -1;
1559 }
1560 
__kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t * lck)1561 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1562   char const *const func = "omp_destroy_lock";
1563   if (lck->lk.initialized != lck) {
1564     KMP_FATAL(LockIsUninitialized, func);
1565   }
1566   if (__kmp_is_queuing_lock_nestable(lck)) {
1567     KMP_FATAL(LockNestableUsedAsSimple, func);
1568   }
1569   if (__kmp_get_queuing_lock_owner(lck) != -1) {
1570     KMP_FATAL(LockStillOwned, func);
1571   }
1572   __kmp_destroy_queuing_lock(lck);
1573 }
1574 
1575 // nested queuing locks
1576 
__kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1577 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1578   KMP_DEBUG_ASSERT(gtid >= 0);
1579 
1580   if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1581     lck->lk.depth_locked += 1;
1582     return KMP_LOCK_ACQUIRED_NEXT;
1583   } else {
1584     __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1585     ANNOTATE_QUEUING_ACQUIRED(lck);
1586     KMP_MB();
1587     lck->lk.depth_locked = 1;
1588     KMP_MB();
1589     lck->lk.owner_id = gtid + 1;
1590     return KMP_LOCK_ACQUIRED_FIRST;
1591   }
1592 }
1593 
1594 static int
__kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1595 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1596                                               kmp_int32 gtid) {
1597   char const *const func = "omp_set_nest_lock";
1598   if (lck->lk.initialized != lck) {
1599     KMP_FATAL(LockIsUninitialized, func);
1600   }
1601   if (!__kmp_is_queuing_lock_nestable(lck)) {
1602     KMP_FATAL(LockSimpleUsedAsNestable, func);
1603   }
1604   return __kmp_acquire_nested_queuing_lock(lck, gtid);
1605 }
1606 
__kmp_test_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1607 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1608   int retval;
1609 
1610   KMP_DEBUG_ASSERT(gtid >= 0);
1611 
1612   if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1613     retval = ++lck->lk.depth_locked;
1614   } else if (!__kmp_test_queuing_lock(lck, gtid)) {
1615     retval = 0;
1616   } else {
1617     KMP_MB();
1618     retval = lck->lk.depth_locked = 1;
1619     KMP_MB();
1620     lck->lk.owner_id = gtid + 1;
1621   }
1622   return retval;
1623 }
1624 
__kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1625 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1626                                                       kmp_int32 gtid) {
1627   char const *const func = "omp_test_nest_lock";
1628   if (lck->lk.initialized != lck) {
1629     KMP_FATAL(LockIsUninitialized, func);
1630   }
1631   if (!__kmp_is_queuing_lock_nestable(lck)) {
1632     KMP_FATAL(LockSimpleUsedAsNestable, func);
1633   }
1634   return __kmp_test_nested_queuing_lock(lck, gtid);
1635 }
1636 
__kmp_release_nested_queuing_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)1637 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1638   KMP_DEBUG_ASSERT(gtid >= 0);
1639 
1640   KMP_MB();
1641   if (--(lck->lk.depth_locked) == 0) {
1642     KMP_MB();
1643     lck->lk.owner_id = 0;
1644     __kmp_release_queuing_lock(lck, gtid);
1645     return KMP_LOCK_RELEASED;
1646   }
1647   return KMP_LOCK_STILL_HELD;
1648 }
1649 
1650 static int
__kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)1651 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1652                                               kmp_int32 gtid) {
1653   char const *const func = "omp_unset_nest_lock";
1654   KMP_MB(); /* in case another processor initialized lock */
1655   if (lck->lk.initialized != lck) {
1656     KMP_FATAL(LockIsUninitialized, func);
1657   }
1658   if (!__kmp_is_queuing_lock_nestable(lck)) {
1659     KMP_FATAL(LockSimpleUsedAsNestable, func);
1660   }
1661   if (__kmp_get_queuing_lock_owner(lck) == -1) {
1662     KMP_FATAL(LockUnsettingFree, func);
1663   }
1664   if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1665     KMP_FATAL(LockUnsettingSetByAnother, func);
1666   }
1667   return __kmp_release_nested_queuing_lock(lck, gtid);
1668 }
1669 
__kmp_init_nested_queuing_lock(kmp_queuing_lock_t * lck)1670 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1671   __kmp_init_queuing_lock(lck);
1672   lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
1673 }
1674 
__kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t * lck)1675 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1676   __kmp_destroy_queuing_lock(lck);
1677   lck->lk.depth_locked = 0;
1678 }
1679 
1680 static void
__kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck)1681 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1682   char const *const func = "omp_destroy_nest_lock";
1683   if (lck->lk.initialized != lck) {
1684     KMP_FATAL(LockIsUninitialized, func);
1685   }
1686   if (!__kmp_is_queuing_lock_nestable(lck)) {
1687     KMP_FATAL(LockSimpleUsedAsNestable, func);
1688   }
1689   if (__kmp_get_queuing_lock_owner(lck) != -1) {
1690     KMP_FATAL(LockStillOwned, func);
1691   }
1692   __kmp_destroy_nested_queuing_lock(lck);
1693 }
1694 
1695 // access functions to fields which don't exist for all lock kinds.
1696 
__kmp_get_queuing_lock_location(kmp_queuing_lock_t * lck)1697 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
1698   return lck->lk.location;
1699 }
1700 
__kmp_set_queuing_lock_location(kmp_queuing_lock_t * lck,const ident_t * loc)1701 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
1702                                             const ident_t *loc) {
1703   lck->lk.location = loc;
1704 }
1705 
__kmp_get_queuing_lock_flags(kmp_queuing_lock_t * lck)1706 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
1707   return lck->lk.flags;
1708 }
1709 
__kmp_set_queuing_lock_flags(kmp_queuing_lock_t * lck,kmp_lock_flags_t flags)1710 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
1711                                          kmp_lock_flags_t flags) {
1712   lck->lk.flags = flags;
1713 }
1714 
1715 #if KMP_USE_ADAPTIVE_LOCKS
1716 
1717 /* RTM Adaptive locks */
1718 
1719 #if (KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300) ||                          \
1720     (KMP_COMPILER_MSVC && _MSC_VER >= 1700) ||                                 \
1721     (KMP_COMPILER_CLANG && KMP_MSVC_COMPAT)
1722 
1723 #include <immintrin.h>
1724 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1725 
1726 #else
1727 
1728 // Values from the status register after failed speculation.
1729 #define _XBEGIN_STARTED (~0u)
1730 #define _XABORT_EXPLICIT (1 << 0)
1731 #define _XABORT_RETRY (1 << 1)
1732 #define _XABORT_CONFLICT (1 << 2)
1733 #define _XABORT_CAPACITY (1 << 3)
1734 #define _XABORT_DEBUG (1 << 4)
1735 #define _XABORT_NESTED (1 << 5)
1736 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF))
1737 
1738 // Aborts for which it's worth trying again immediately
1739 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT)
1740 
1741 #define STRINGIZE_INTERNAL(arg) #arg
1742 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg)
1743 
1744 // Access to RTM instructions
1745 /*A version of XBegin which returns -1 on speculation, and the value of EAX on
1746   an abort. This is the same definition as the compiler intrinsic that will be
1747   supported at some point. */
_xbegin()1748 static __inline int _xbegin() {
1749   int res = -1;
1750 
1751 #if KMP_OS_WINDOWS
1752 #if KMP_ARCH_X86_64
1753   _asm {
1754         _emit 0xC7
1755         _emit 0xF8
1756         _emit 2
1757         _emit 0
1758         _emit 0
1759         _emit 0
1760         jmp   L2
1761         mov   res, eax
1762     L2:
1763   }
1764 #else /* IA32 */
1765   _asm {
1766         _emit 0xC7
1767         _emit 0xF8
1768         _emit 2
1769         _emit 0
1770         _emit 0
1771         _emit 0
1772         jmp   L2
1773         mov   res, eax
1774     L2:
1775   }
1776 #endif // KMP_ARCH_X86_64
1777 #else
1778   /* Note that %eax must be noted as killed (clobbered), because the XSR is
1779      returned in %eax(%rax) on abort.  Other register values are restored, so
1780      don't need to be killed.
1781 
1782      We must also mark 'res' as an input and an output, since otherwise
1783      'res=-1' may be dropped as being dead, whereas we do need the assignment on
1784      the successful (i.e., non-abort) path. */
1785   __asm__ volatile("1: .byte  0xC7; .byte 0xF8;\n"
1786                    "   .long  1f-1b-6\n"
1787                    "    jmp   2f\n"
1788                    "1:  movl  %%eax,%0\n"
1789                    "2:"
1790                    : "+r"(res)::"memory", "%eax");
1791 #endif // KMP_OS_WINDOWS
1792   return res;
1793 }
1794 
1795 /* Transaction end */
_xend()1796 static __inline void _xend() {
1797 #if KMP_OS_WINDOWS
1798   __asm {
1799         _emit 0x0f
1800         _emit 0x01
1801         _emit 0xd5
1802   }
1803 #else
1804   __asm__ volatile(".byte 0x0f; .byte 0x01; .byte 0xd5" ::: "memory");
1805 #endif
1806 }
1807 
1808 /* This is a macro, the argument must be a single byte constant which can be
1809    evaluated by the inline assembler, since it is emitted as a byte into the
1810    assembly code. */
1811 // clang-format off
1812 #if KMP_OS_WINDOWS
1813 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG
1814 #else
1815 #define _xabort(ARG)                                                           \
1816   __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory");
1817 #endif
1818 // clang-format on
1819 #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300
1820 
1821 // Statistics is collected for testing purpose
1822 #if KMP_DEBUG_ADAPTIVE_LOCKS
1823 
1824 // We accumulate speculative lock statistics when the lock is destroyed. We
1825 // keep locks that haven't been destroyed in the liveLocks list so that we can
1826 // grab their statistics too.
1827 static kmp_adaptive_lock_statistics_t destroyedStats;
1828 
1829 // To hold the list of live locks.
1830 static kmp_adaptive_lock_info_t liveLocks;
1831 
1832 // A lock so we can safely update the list of locks.
1833 static kmp_bootstrap_lock_t chain_lock =
1834     KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock);
1835 
1836 // Initialize the list of stats.
__kmp_init_speculative_stats()1837 void __kmp_init_speculative_stats() {
1838   kmp_adaptive_lock_info_t *lck = &liveLocks;
1839 
1840   memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0,
1841          sizeof(lck->stats));
1842   lck->stats.next = lck;
1843   lck->stats.prev = lck;
1844 
1845   KMP_ASSERT(lck->stats.next->stats.prev == lck);
1846   KMP_ASSERT(lck->stats.prev->stats.next == lck);
1847 
1848   __kmp_init_bootstrap_lock(&chain_lock);
1849 }
1850 
1851 // Insert the lock into the circular list
__kmp_remember_lock(kmp_adaptive_lock_info_t * lck)1852 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
1853   __kmp_acquire_bootstrap_lock(&chain_lock);
1854 
1855   lck->stats.next = liveLocks.stats.next;
1856   lck->stats.prev = &liveLocks;
1857 
1858   liveLocks.stats.next = lck;
1859   lck->stats.next->stats.prev = lck;
1860 
1861   KMP_ASSERT(lck->stats.next->stats.prev == lck);
1862   KMP_ASSERT(lck->stats.prev->stats.next == lck);
1863 
1864   __kmp_release_bootstrap_lock(&chain_lock);
1865 }
1866 
__kmp_forget_lock(kmp_adaptive_lock_info_t * lck)1867 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
1868   KMP_ASSERT(lck->stats.next->stats.prev == lck);
1869   KMP_ASSERT(lck->stats.prev->stats.next == lck);
1870 
1871   kmp_adaptive_lock_info_t *n = lck->stats.next;
1872   kmp_adaptive_lock_info_t *p = lck->stats.prev;
1873 
1874   n->stats.prev = p;
1875   p->stats.next = n;
1876 }
1877 
__kmp_zero_speculative_stats(kmp_adaptive_lock_info_t * lck)1878 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1879   memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0,
1880          sizeof(lck->stats));
1881   __kmp_remember_lock(lck);
1882 }
1883 
__kmp_add_stats(kmp_adaptive_lock_statistics_t * t,kmp_adaptive_lock_info_t * lck)1884 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1885                             kmp_adaptive_lock_info_t *lck) {
1886   kmp_adaptive_lock_statistics_t volatile *s = &lck->stats;
1887 
1888   t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
1889   t->successfulSpeculations += s->successfulSpeculations;
1890   t->hardFailedSpeculations += s->hardFailedSpeculations;
1891   t->softFailedSpeculations += s->softFailedSpeculations;
1892   t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
1893   t->lemmingYields += s->lemmingYields;
1894 }
1895 
__kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t * lck)1896 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1897   __kmp_acquire_bootstrap_lock(&chain_lock);
1898 
1899   __kmp_add_stats(&destroyedStats, lck);
1900   __kmp_forget_lock(lck);
1901 
1902   __kmp_release_bootstrap_lock(&chain_lock);
1903 }
1904 
percent(kmp_uint32 count,kmp_uint32 total)1905 static float percent(kmp_uint32 count, kmp_uint32 total) {
1906   return (total == 0) ? 0.0 : (100.0 * count) / total;
1907 }
1908 
__kmp_open_stats_file()1909 static FILE *__kmp_open_stats_file() {
1910   if (strcmp(__kmp_speculative_statsfile, "-") == 0)
1911     return stdout;
1912 
1913   size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1914   char buffer[buffLen];
1915   KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1916                (kmp_int32)getpid());
1917   FILE *result = fopen(&buffer[0], "w");
1918 
1919   // Maybe we should issue a warning here...
1920   return result ? result : stdout;
1921 }
1922 
__kmp_print_speculative_stats()1923 void __kmp_print_speculative_stats() {
1924   kmp_adaptive_lock_statistics_t total = destroyedStats;
1925   kmp_adaptive_lock_info_t *lck;
1926 
1927   for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
1928     __kmp_add_stats(&total, lck);
1929   }
1930   kmp_adaptive_lock_statistics_t *t = &total;
1931   kmp_uint32 totalSections =
1932       t->nonSpeculativeAcquires + t->successfulSpeculations;
1933   kmp_uint32 totalSpeculations = t->successfulSpeculations +
1934                                  t->hardFailedSpeculations +
1935                                  t->softFailedSpeculations;
1936   if (totalSections <= 0)
1937     return;
1938 
1939   FILE *statsFile = __kmp_open_stats_file();
1940 
1941   fprintf(statsFile, "Speculative lock statistics (all approximate!)\n");
1942   fprintf(statsFile, " Lock parameters: \n"
1943                      "   max_soft_retries               : %10d\n"
1944                      "   max_badness                    : %10d\n",
1945           __kmp_adaptive_backoff_params.max_soft_retries,
1946           __kmp_adaptive_backoff_params.max_badness);
1947   fprintf(statsFile, " Non-speculative acquire attempts : %10d\n",
1948           t->nonSpeculativeAcquireAttempts);
1949   fprintf(statsFile, " Total critical sections          : %10d\n",
1950           totalSections);
1951   fprintf(statsFile, " Successful speculations          : %10d (%5.1f%%)\n",
1952           t->successfulSpeculations,
1953           percent(t->successfulSpeculations, totalSections));
1954   fprintf(statsFile, " Non-speculative acquires         : %10d (%5.1f%%)\n",
1955           t->nonSpeculativeAcquires,
1956           percent(t->nonSpeculativeAcquires, totalSections));
1957   fprintf(statsFile, " Lemming yields                   : %10d\n\n",
1958           t->lemmingYields);
1959 
1960   fprintf(statsFile, " Speculative acquire attempts     : %10d\n",
1961           totalSpeculations);
1962   fprintf(statsFile, " Successes                        : %10d (%5.1f%%)\n",
1963           t->successfulSpeculations,
1964           percent(t->successfulSpeculations, totalSpeculations));
1965   fprintf(statsFile, " Soft failures                    : %10d (%5.1f%%)\n",
1966           t->softFailedSpeculations,
1967           percent(t->softFailedSpeculations, totalSpeculations));
1968   fprintf(statsFile, " Hard failures                    : %10d (%5.1f%%)\n",
1969           t->hardFailedSpeculations,
1970           percent(t->hardFailedSpeculations, totalSpeculations));
1971 
1972   if (statsFile != stdout)
1973     fclose(statsFile);
1974 }
1975 
1976 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++)
1977 #else
1978 #define KMP_INC_STAT(lck, stat)
1979 
1980 #endif // KMP_DEBUG_ADAPTIVE_LOCKS
1981 
__kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t * lck)1982 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
1983   // It is enough to check that the head_id is zero.
1984   // We don't also need to check the tail.
1985   bool res = lck->lk.head_id == 0;
1986 
1987 // We need a fence here, since we must ensure that no memory operations
1988 // from later in this thread float above that read.
1989 #if KMP_COMPILER_ICC
1990   _mm_mfence();
1991 #else
1992   __sync_synchronize();
1993 #endif
1994 
1995   return res;
1996 }
1997 
1998 // Functions for manipulating the badness
1999 static __inline void
__kmp_update_badness_after_success(kmp_adaptive_lock_t * lck)2000 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
2001   // Reset the badness to zero so we eagerly try to speculate again
2002   lck->lk.adaptive.badness = 0;
2003   KMP_INC_STAT(lck, successfulSpeculations);
2004 }
2005 
2006 // Create a bit mask with one more set bit.
__kmp_step_badness(kmp_adaptive_lock_t * lck)2007 static __inline void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
2008   kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
2009   if (newBadness > lck->lk.adaptive.max_badness) {
2010     return;
2011   } else {
2012     lck->lk.adaptive.badness = newBadness;
2013   }
2014 }
2015 
2016 // Check whether speculation should be attempted.
__kmp_should_speculate(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2017 static __inline int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
2018                                            kmp_int32 gtid) {
2019   kmp_uint32 badness = lck->lk.adaptive.badness;
2020   kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
2021   int res = (attempts & badness) == 0;
2022   return res;
2023 }
2024 
2025 // Attempt to acquire only the speculative lock.
2026 // Does not back off to the non-speculative lock.
__kmp_test_adaptive_lock_only(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2027 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
2028                                          kmp_int32 gtid) {
2029   int retries = lck->lk.adaptive.max_soft_retries;
2030 
2031   // We don't explicitly count the start of speculation, rather we record the
2032   // results (success, hard fail, soft fail). The sum of all of those is the
2033   // total number of times we started speculation since all speculations must
2034   // end one of those ways.
2035   do {
2036     kmp_uint32 status = _xbegin();
2037     // Switch this in to disable actual speculation but exercise at least some
2038     // of the rest of the code. Useful for debugging...
2039     // kmp_uint32 status = _XABORT_NESTED;
2040 
2041     if (status == _XBEGIN_STARTED) {
2042       /* We have successfully started speculation. Check that no-one acquired
2043          the lock for real between when we last looked and now. This also gets
2044          the lock cache line into our read-set, which we need so that we'll
2045          abort if anyone later claims it for real. */
2046       if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2047         // Lock is now visibly acquired, so someone beat us to it. Abort the
2048         // transaction so we'll restart from _xbegin with the failure status.
2049         _xabort(0x01);
2050         KMP_ASSERT2(0, "should not get here");
2051       }
2052       return 1; // Lock has been acquired (speculatively)
2053     } else {
2054       // We have aborted, update the statistics
2055       if (status & SOFT_ABORT_MASK) {
2056         KMP_INC_STAT(lck, softFailedSpeculations);
2057         // and loop round to retry.
2058       } else {
2059         KMP_INC_STAT(lck, hardFailedSpeculations);
2060         // Give up if we had a hard failure.
2061         break;
2062       }
2063     }
2064   } while (retries--); // Loop while we have retries, and didn't fail hard.
2065 
2066   // Either we had a hard failure or we didn't succeed softly after
2067   // the full set of attempts, so back off the badness.
2068   __kmp_step_badness(lck);
2069   return 0;
2070 }
2071 
2072 // Attempt to acquire the speculative lock, or back off to the non-speculative
2073 // one if the speculative lock cannot be acquired.
2074 // We can succeed speculatively, non-speculatively, or fail.
__kmp_test_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2075 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
2076   // First try to acquire the lock speculatively
2077   if (__kmp_should_speculate(lck, gtid) &&
2078       __kmp_test_adaptive_lock_only(lck, gtid))
2079     return 1;
2080 
2081   // Speculative acquisition failed, so try to acquire it non-speculatively.
2082   // Count the non-speculative acquire attempt
2083   lck->lk.adaptive.acquire_attempts++;
2084 
2085   // Use base, non-speculative lock.
2086   if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
2087     KMP_INC_STAT(lck, nonSpeculativeAcquires);
2088     return 1; // Lock is acquired (non-speculatively)
2089   } else {
2090     return 0; // Failed to acquire the lock, it's already visibly locked.
2091   }
2092 }
2093 
__kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2094 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2095                                                 kmp_int32 gtid) {
2096   char const *const func = "omp_test_lock";
2097   if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2098     KMP_FATAL(LockIsUninitialized, func);
2099   }
2100 
2101   int retval = __kmp_test_adaptive_lock(lck, gtid);
2102 
2103   if (retval) {
2104     lck->lk.qlk.owner_id = gtid + 1;
2105   }
2106   return retval;
2107 }
2108 
2109 // Block until we can acquire a speculative, adaptive lock. We check whether we
2110 // should be trying to speculate. If we should be, we check the real lock to see
2111 // if it is free, and, if not, pause without attempting to acquire it until it
2112 // is. Then we try the speculative acquire. This means that although we suffer
2113 // from lemmings a little (because all we can't acquire the lock speculatively
2114 // until the queue of threads waiting has cleared), we don't get into a state
2115 // where we can never acquire the lock speculatively (because we force the queue
2116 // to clear by preventing new arrivals from entering the queue). This does mean
2117 // that when we're trying to break lemmings, the lock is no longer fair. However
2118 // OpenMP makes no guarantee that its locks are fair, so this isn't a real
2119 // problem.
__kmp_acquire_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2120 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
2121                                         kmp_int32 gtid) {
2122   if (__kmp_should_speculate(lck, gtid)) {
2123     if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2124       if (__kmp_test_adaptive_lock_only(lck, gtid))
2125         return;
2126       // We tried speculation and failed, so give up.
2127     } else {
2128       // We can't try speculation until the lock is free, so we pause here
2129       // (without suspending on the queueing lock, to allow it to drain, then
2130       // try again. All other threads will also see the same result for
2131       // shouldSpeculate, so will be doing the same if they try to claim the
2132       // lock from now on.
2133       while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2134         KMP_INC_STAT(lck, lemmingYields);
2135         __kmp_yield(TRUE);
2136       }
2137 
2138       if (__kmp_test_adaptive_lock_only(lck, gtid))
2139         return;
2140     }
2141   }
2142 
2143   // Speculative acquisition failed, so acquire it non-speculatively.
2144   // Count the non-speculative acquire attempt
2145   lck->lk.adaptive.acquire_attempts++;
2146 
2147   __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
2148   // We have acquired the base lock, so count that.
2149   KMP_INC_STAT(lck, nonSpeculativeAcquires);
2150   ANNOTATE_QUEUING_ACQUIRED(lck);
2151 }
2152 
__kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2153 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2154                                                     kmp_int32 gtid) {
2155   char const *const func = "omp_set_lock";
2156   if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2157     KMP_FATAL(LockIsUninitialized, func);
2158   }
2159   if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
2160     KMP_FATAL(LockIsAlreadyOwned, func);
2161   }
2162 
2163   __kmp_acquire_adaptive_lock(lck, gtid);
2164 
2165   lck->lk.qlk.owner_id = gtid + 1;
2166 }
2167 
__kmp_release_adaptive_lock(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2168 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
2169                                        kmp_int32 gtid) {
2170   if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2171           lck))) { // If the lock doesn't look claimed we must be speculating.
2172     // (Or the user's code is buggy and they're releasing without locking;
2173     // if we had XTEST we'd be able to check that case...)
2174     _xend(); // Exit speculation
2175     __kmp_update_badness_after_success(lck);
2176   } else { // Since the lock *is* visibly locked we're not speculating,
2177     // so should use the underlying lock's release scheme.
2178     __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
2179   }
2180   return KMP_LOCK_RELEASED;
2181 }
2182 
__kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck,kmp_int32 gtid)2183 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2184                                                    kmp_int32 gtid) {
2185   char const *const func = "omp_unset_lock";
2186   KMP_MB(); /* in case another processor initialized lock */
2187   if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2188     KMP_FATAL(LockIsUninitialized, func);
2189   }
2190   if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
2191     KMP_FATAL(LockUnsettingFree, func);
2192   }
2193   if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
2194     KMP_FATAL(LockUnsettingSetByAnother, func);
2195   }
2196   lck->lk.qlk.owner_id = 0;
2197   __kmp_release_adaptive_lock(lck, gtid);
2198   return KMP_LOCK_RELEASED;
2199 }
2200 
__kmp_init_adaptive_lock(kmp_adaptive_lock_t * lck)2201 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
2202   __kmp_init_queuing_lock(GET_QLK_PTR(lck));
2203   lck->lk.adaptive.badness = 0;
2204   lck->lk.adaptive.acquire_attempts = 0; // nonSpeculativeAcquireAttempts = 0;
2205   lck->lk.adaptive.max_soft_retries =
2206       __kmp_adaptive_backoff_params.max_soft_retries;
2207   lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2208 #if KMP_DEBUG_ADAPTIVE_LOCKS
2209   __kmp_zero_speculative_stats(&lck->lk.adaptive);
2210 #endif
2211   KA_TRACE(1000, ("__kmp_init_adaptive_lock: lock %p initialized\n", lck));
2212 }
2213 
__kmp_destroy_adaptive_lock(kmp_adaptive_lock_t * lck)2214 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
2215 #if KMP_DEBUG_ADAPTIVE_LOCKS
2216   __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2217 #endif
2218   __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
2219   // Nothing needed for the speculative part.
2220 }
2221 
__kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck)2222 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2223   char const *const func = "omp_destroy_lock";
2224   if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2225     KMP_FATAL(LockIsUninitialized, func);
2226   }
2227   if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
2228     KMP_FATAL(LockStillOwned, func);
2229   }
2230   __kmp_destroy_adaptive_lock(lck);
2231 }
2232 
2233 #endif // KMP_USE_ADAPTIVE_LOCKS
2234 
2235 /* ------------------------------------------------------------------------ */
2236 /* DRDPA ticket locks                                                */
2237 /* "DRDPA" means Dynamically Reconfigurable Distributed Polling Area */
2238 
__kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t * lck)2239 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
2240   return lck->lk.owner_id - 1;
2241 }
2242 
__kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t * lck)2243 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
2244   return lck->lk.depth_locked != -1;
2245 }
2246 
2247 __forceinline static int
__kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2248 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2249   kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2250   kmp_uint64 mask = lck->lk.mask; // atomic load
2251   std::atomic<kmp_uint64> *polls = lck->lk.polls;
2252 
2253 #ifdef USE_LOCK_PROFILE
2254   if (polls[ticket & mask] != ticket)
2255     __kmp_printf("LOCK CONTENTION: %p\n", lck);
2256 /* else __kmp_printf( "." );*/
2257 #endif /* USE_LOCK_PROFILE */
2258 
2259   // Now spin-wait, but reload the polls pointer and mask, in case the
2260   // polling area has been reconfigured.  Unless it is reconfigured, the
2261   // reloads stay in L1 cache and are cheap.
2262   //
2263   // Keep this code in sync with KMP_WAIT_YIELD, in kmp_dispatch.cpp !!!
2264   //
2265   // The current implementation of KMP_WAIT_YIELD doesn't allow for mask
2266   // and poll to be re-read every spin iteration.
2267   kmp_uint32 spins;
2268 
2269   KMP_FSYNC_PREPARE(lck);
2270   KMP_INIT_YIELD(spins);
2271   while (polls[ticket & mask] < ticket) { // atomic load
2272     // If we are oversubscribed,
2273     // or have waited a bit (and KMP_LIBRARY=turnaround), then yield.
2274     // CPU Pause is in the macros for yield.
2275     //
2276     KMP_YIELD(TCR_4(__kmp_nth) >
2277               (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
2278     KMP_YIELD_SPIN(spins);
2279 
2280     // Re-read the mask and the poll pointer from the lock structure.
2281     //
2282     // Make certain that "mask" is read before "polls" !!!
2283     //
2284     // If another thread picks reconfigures the polling area and updates their
2285     // values, and we get the new value of mask and the old polls pointer, we
2286     // could access memory beyond the end of the old polling area.
2287     mask = lck->lk.mask; // atomic load
2288     polls = lck->lk.polls; // atomic load
2289   }
2290 
2291   // Critical section starts here
2292   KMP_FSYNC_ACQUIRED(lck);
2293   KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2294                   ticket, lck));
2295   lck->lk.now_serving = ticket; // non-volatile store
2296 
2297   // Deallocate a garbage polling area if we know that we are the last
2298   // thread that could possibly access it.
2299   //
2300   // The >= check is in case __kmp_test_drdpa_lock() allocated the cleanup
2301   // ticket.
2302   if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2303     __kmp_free(lck->lk.old_polls);
2304     lck->lk.old_polls = NULL;
2305     lck->lk.cleanup_ticket = 0;
2306   }
2307 
2308   // Check to see if we should reconfigure the polling area.
2309   // If there is still a garbage polling area to be deallocated from a
2310   // previous reconfiguration, let a later thread reconfigure it.
2311   if (lck->lk.old_polls == NULL) {
2312     bool reconfigure = false;
2313     std::atomic<kmp_uint64> *old_polls = polls;
2314     kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2315 
2316     if (TCR_4(__kmp_nth) >
2317         (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
2318       // We are in oversubscription mode.  Contract the polling area
2319       // down to a single location, if that hasn't been done already.
2320       if (num_polls > 1) {
2321         reconfigure = true;
2322         num_polls = TCR_4(lck->lk.num_polls);
2323         mask = 0;
2324         num_polls = 1;
2325         polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2326                                                           sizeof(*polls));
2327         polls[0] = ticket;
2328       }
2329     } else {
2330       // We are in under/fully subscribed mode.  Check the number of
2331       // threads waiting on the lock.  The size of the polling area
2332       // should be at least the number of threads waiting.
2333       kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2334       if (num_waiting > num_polls) {
2335         kmp_uint32 old_num_polls = num_polls;
2336         reconfigure = true;
2337         do {
2338           mask = (mask << 1) | 1;
2339           num_polls *= 2;
2340         } while (num_polls <= num_waiting);
2341 
2342         // Allocate the new polling area, and copy the relevant portion
2343         // of the old polling area to the new area.  __kmp_allocate()
2344         // zeroes the memory it allocates, and most of the old area is
2345         // just zero padding, so we only copy the release counters.
2346         polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2347                                                           sizeof(*polls));
2348         kmp_uint32 i;
2349         for (i = 0; i < old_num_polls; i++) {
2350           polls[i].store(old_polls[i]);
2351         }
2352       }
2353     }
2354 
2355     if (reconfigure) {
2356       // Now write the updated fields back to the lock structure.
2357       //
2358       // Make certain that "polls" is written before "mask" !!!
2359       //
2360       // If another thread picks up the new value of mask and the old polls
2361       // pointer , it could access memory beyond the end of the old polling
2362       // area.
2363       //
2364       // On x86, we need memory fences.
2365       KA_TRACE(1000, ("__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring "
2366                       "lock %p to %d polls\n",
2367                       ticket, lck, num_polls));
2368 
2369       lck->lk.old_polls = old_polls;
2370       lck->lk.polls = polls; // atomic store
2371 
2372       KMP_MB();
2373 
2374       lck->lk.num_polls = num_polls;
2375       lck->lk.mask = mask; // atomic store
2376 
2377       KMP_MB();
2378 
2379       // Only after the new polling area and mask have been flushed
2380       // to main memory can we update the cleanup ticket field.
2381       //
2382       // volatile load / non-volatile store
2383       lck->lk.cleanup_ticket = lck->lk.next_ticket;
2384     }
2385   }
2386   return KMP_LOCK_ACQUIRED_FIRST;
2387 }
2388 
__kmp_acquire_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2389 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2390   int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2391   ANNOTATE_DRDPA_ACQUIRED(lck);
2392   return retval;
2393 }
2394 
__kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2395 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2396                                                 kmp_int32 gtid) {
2397   char const *const func = "omp_set_lock";
2398   if (lck->lk.initialized != lck) {
2399     KMP_FATAL(LockIsUninitialized, func);
2400   }
2401   if (__kmp_is_drdpa_lock_nestable(lck)) {
2402     KMP_FATAL(LockNestableUsedAsSimple, func);
2403   }
2404   if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
2405     KMP_FATAL(LockIsAlreadyOwned, func);
2406   }
2407 
2408   __kmp_acquire_drdpa_lock(lck, gtid);
2409 
2410   lck->lk.owner_id = gtid + 1;
2411   return KMP_LOCK_ACQUIRED_FIRST;
2412 }
2413 
__kmp_test_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2414 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2415   // First get a ticket, then read the polls pointer and the mask.
2416   // The polls pointer must be read before the mask!!! (See above)
2417   kmp_uint64 ticket = lck->lk.next_ticket; // atomic load
2418   std::atomic<kmp_uint64> *polls = lck->lk.polls;
2419   kmp_uint64 mask = lck->lk.mask; // atomic load
2420   if (polls[ticket & mask] == ticket) {
2421     kmp_uint64 next_ticket = ticket + 1;
2422     if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2423                                        next_ticket)) {
2424       KMP_FSYNC_ACQUIRED(lck);
2425       KA_TRACE(1000, ("__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2426                       ticket, lck));
2427       lck->lk.now_serving = ticket; // non-volatile store
2428 
2429       // Since no threads are waiting, there is no possibility that we would
2430       // want to reconfigure the polling area.  We might have the cleanup ticket
2431       // value (which says that it is now safe to deallocate old_polls), but
2432       // we'll let a later thread which calls __kmp_acquire_lock do that - this
2433       // routine isn't supposed to block, and we would risk blocks if we called
2434       // __kmp_free() to do the deallocation.
2435       return TRUE;
2436     }
2437   }
2438   return FALSE;
2439 }
2440 
__kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2441 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2442                                              kmp_int32 gtid) {
2443   char const *const func = "omp_test_lock";
2444   if (lck->lk.initialized != lck) {
2445     KMP_FATAL(LockIsUninitialized, func);
2446   }
2447   if (__kmp_is_drdpa_lock_nestable(lck)) {
2448     KMP_FATAL(LockNestableUsedAsSimple, func);
2449   }
2450 
2451   int retval = __kmp_test_drdpa_lock(lck, gtid);
2452 
2453   if (retval) {
2454     lck->lk.owner_id = gtid + 1;
2455   }
2456   return retval;
2457 }
2458 
__kmp_release_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2459 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2460   // Read the ticket value from the lock data struct, then the polls pointer and
2461   // the mask.  The polls pointer must be read before the mask!!! (See above)
2462   kmp_uint64 ticket = lck->lk.now_serving + 1; // non-atomic load
2463   std::atomic<kmp_uint64> *polls = lck->lk.polls; // atomic load
2464   kmp_uint64 mask = lck->lk.mask; // atomic load
2465   KA_TRACE(1000, ("__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2466                   ticket - 1, lck));
2467   KMP_FSYNC_RELEASING(lck);
2468   ANNOTATE_DRDPA_RELEASED(lck);
2469   polls[ticket & mask] = ticket; // atomic store
2470   return KMP_LOCK_RELEASED;
2471 }
2472 
__kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2473 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2474                                                 kmp_int32 gtid) {
2475   char const *const func = "omp_unset_lock";
2476   KMP_MB(); /* in case another processor initialized lock */
2477   if (lck->lk.initialized != lck) {
2478     KMP_FATAL(LockIsUninitialized, func);
2479   }
2480   if (__kmp_is_drdpa_lock_nestable(lck)) {
2481     KMP_FATAL(LockNestableUsedAsSimple, func);
2482   }
2483   if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2484     KMP_FATAL(LockUnsettingFree, func);
2485   }
2486   if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
2487       (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
2488     KMP_FATAL(LockUnsettingSetByAnother, func);
2489   }
2490   lck->lk.owner_id = 0;
2491   return __kmp_release_drdpa_lock(lck, gtid);
2492 }
2493 
__kmp_init_drdpa_lock(kmp_drdpa_lock_t * lck)2494 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
2495   lck->lk.location = NULL;
2496   lck->lk.mask = 0;
2497   lck->lk.num_polls = 1;
2498   lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2499       lck->lk.num_polls * sizeof(*(lck->lk.polls)));
2500   lck->lk.cleanup_ticket = 0;
2501   lck->lk.old_polls = NULL;
2502   lck->lk.next_ticket = 0;
2503   lck->lk.now_serving = 0;
2504   lck->lk.owner_id = 0; // no thread owns the lock.
2505   lck->lk.depth_locked = -1; // >= 0 for nestable locks, -1 for simple locks.
2506   lck->lk.initialized = lck;
2507 
2508   KA_TRACE(1000, ("__kmp_init_drdpa_lock: lock %p initialized\n", lck));
2509 }
2510 
__kmp_destroy_drdpa_lock(kmp_drdpa_lock_t * lck)2511 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
2512   lck->lk.initialized = NULL;
2513   lck->lk.location = NULL;
2514   if (lck->lk.polls.load() != NULL) {
2515     __kmp_free(lck->lk.polls.load());
2516     lck->lk.polls = NULL;
2517   }
2518   if (lck->lk.old_polls != NULL) {
2519     __kmp_free(lck->lk.old_polls);
2520     lck->lk.old_polls = NULL;
2521   }
2522   lck->lk.mask = 0;
2523   lck->lk.num_polls = 0;
2524   lck->lk.cleanup_ticket = 0;
2525   lck->lk.next_ticket = 0;
2526   lck->lk.now_serving = 0;
2527   lck->lk.owner_id = 0;
2528   lck->lk.depth_locked = -1;
2529 }
2530 
__kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)2531 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2532   char const *const func = "omp_destroy_lock";
2533   if (lck->lk.initialized != lck) {
2534     KMP_FATAL(LockIsUninitialized, func);
2535   }
2536   if (__kmp_is_drdpa_lock_nestable(lck)) {
2537     KMP_FATAL(LockNestableUsedAsSimple, func);
2538   }
2539   if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2540     KMP_FATAL(LockStillOwned, func);
2541   }
2542   __kmp_destroy_drdpa_lock(lck);
2543 }
2544 
2545 // nested drdpa ticket locks
2546 
__kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2547 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2548   KMP_DEBUG_ASSERT(gtid >= 0);
2549 
2550   if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2551     lck->lk.depth_locked += 1;
2552     return KMP_LOCK_ACQUIRED_NEXT;
2553   } else {
2554     __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2555     ANNOTATE_DRDPA_ACQUIRED(lck);
2556     KMP_MB();
2557     lck->lk.depth_locked = 1;
2558     KMP_MB();
2559     lck->lk.owner_id = gtid + 1;
2560     return KMP_LOCK_ACQUIRED_FIRST;
2561   }
2562 }
2563 
__kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2564 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2565                                                         kmp_int32 gtid) {
2566   char const *const func = "omp_set_nest_lock";
2567   if (lck->lk.initialized != lck) {
2568     KMP_FATAL(LockIsUninitialized, func);
2569   }
2570   if (!__kmp_is_drdpa_lock_nestable(lck)) {
2571     KMP_FATAL(LockSimpleUsedAsNestable, func);
2572   }
2573   __kmp_acquire_nested_drdpa_lock(lck, gtid);
2574 }
2575 
__kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2576 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2577   int retval;
2578 
2579   KMP_DEBUG_ASSERT(gtid >= 0);
2580 
2581   if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2582     retval = ++lck->lk.depth_locked;
2583   } else if (!__kmp_test_drdpa_lock(lck, gtid)) {
2584     retval = 0;
2585   } else {
2586     KMP_MB();
2587     retval = lck->lk.depth_locked = 1;
2588     KMP_MB();
2589     lck->lk.owner_id = gtid + 1;
2590   }
2591   return retval;
2592 }
2593 
__kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2594 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2595                                                     kmp_int32 gtid) {
2596   char const *const func = "omp_test_nest_lock";
2597   if (lck->lk.initialized != lck) {
2598     KMP_FATAL(LockIsUninitialized, func);
2599   }
2600   if (!__kmp_is_drdpa_lock_nestable(lck)) {
2601     KMP_FATAL(LockSimpleUsedAsNestable, func);
2602   }
2603   return __kmp_test_nested_drdpa_lock(lck, gtid);
2604 }
2605 
__kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2606 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2607   KMP_DEBUG_ASSERT(gtid >= 0);
2608 
2609   KMP_MB();
2610   if (--(lck->lk.depth_locked) == 0) {
2611     KMP_MB();
2612     lck->lk.owner_id = 0;
2613     __kmp_release_drdpa_lock(lck, gtid);
2614     return KMP_LOCK_RELEASED;
2615   }
2616   return KMP_LOCK_STILL_HELD;
2617 }
2618 
__kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck,kmp_int32 gtid)2619 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2620                                                        kmp_int32 gtid) {
2621   char const *const func = "omp_unset_nest_lock";
2622   KMP_MB(); /* in case another processor initialized lock */
2623   if (lck->lk.initialized != lck) {
2624     KMP_FATAL(LockIsUninitialized, func);
2625   }
2626   if (!__kmp_is_drdpa_lock_nestable(lck)) {
2627     KMP_FATAL(LockSimpleUsedAsNestable, func);
2628   }
2629   if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2630     KMP_FATAL(LockUnsettingFree, func);
2631   }
2632   if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
2633     KMP_FATAL(LockUnsettingSetByAnother, func);
2634   }
2635   return __kmp_release_nested_drdpa_lock(lck, gtid);
2636 }
2637 
__kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t * lck)2638 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2639   __kmp_init_drdpa_lock(lck);
2640   lck->lk.depth_locked = 0; // >= 0 for nestable locks, -1 for simple locks
2641 }
2642 
__kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t * lck)2643 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2644   __kmp_destroy_drdpa_lock(lck);
2645   lck->lk.depth_locked = 0;
2646 }
2647 
__kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)2648 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2649   char const *const func = "omp_destroy_nest_lock";
2650   if (lck->lk.initialized != lck) {
2651     KMP_FATAL(LockIsUninitialized, func);
2652   }
2653   if (!__kmp_is_drdpa_lock_nestable(lck)) {
2654     KMP_FATAL(LockSimpleUsedAsNestable, func);
2655   }
2656   if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2657     KMP_FATAL(LockStillOwned, func);
2658   }
2659   __kmp_destroy_nested_drdpa_lock(lck);
2660 }
2661 
2662 // access functions to fields which don't exist for all lock kinds.
2663 
__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t * lck)2664 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
2665   return lck->lk.location;
2666 }
2667 
__kmp_set_drdpa_lock_location(kmp_drdpa_lock_t * lck,const ident_t * loc)2668 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
2669                                           const ident_t *loc) {
2670   lck->lk.location = loc;
2671 }
2672 
__kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t * lck)2673 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
2674   return lck->lk.flags;
2675 }
2676 
__kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t * lck,kmp_lock_flags_t flags)2677 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
2678                                        kmp_lock_flags_t flags) {
2679   lck->lk.flags = flags;
2680 }
2681 
2682 // Time stamp counter
2683 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
2684 #define __kmp_tsc() __kmp_hardware_timestamp()
2685 // Runtime's default backoff parameters
2686 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
2687 #else
2688 // Use nanoseconds for other platforms
2689 extern kmp_uint64 __kmp_now_nsec();
2690 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
2691 #define __kmp_tsc() __kmp_now_nsec()
2692 #endif
2693 
2694 // A useful predicate for dealing with timestamps that may wrap.
2695 // Is a before b? Since the timestamps may wrap, this is asking whether it's
2696 // shorter to go clockwise from a to b around the clock-face, or anti-clockwise.
2697 // Times where going clockwise is less distance than going anti-clockwise
2698 // are in the future, others are in the past. e.g. a = MAX-1, b = MAX+1 (=0),
2699 // then a > b (true) does not mean a reached b; whereas signed(a) = -2,
2700 // signed(b) = 0 captures the actual difference
before(kmp_uint64 a,kmp_uint64 b)2701 static inline bool before(kmp_uint64 a, kmp_uint64 b) {
2702   return ((kmp_int64)b - (kmp_int64)a) > 0;
2703 }
2704 
2705 // Truncated binary exponential backoff function
__kmp_spin_backoff(kmp_backoff_t * boff)2706 void __kmp_spin_backoff(kmp_backoff_t *boff) {
2707   // We could flatten this loop, but making it a nested loop gives better result
2708   kmp_uint32 i;
2709   for (i = boff->step; i > 0; i--) {
2710     kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
2711     do {
2712       KMP_CPU_PAUSE();
2713     } while (before(__kmp_tsc(), goal));
2714   }
2715   boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
2716 }
2717 
2718 #if KMP_USE_DYNAMIC_LOCK
2719 
2720 // Direct lock initializers. It simply writes a tag to the low 8 bits of the
2721 // lock word.
__kmp_init_direct_lock(kmp_dyna_lock_t * lck,kmp_dyna_lockseq_t seq)2722 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
2723                                    kmp_dyna_lockseq_t seq) {
2724   TCW_4(*lck, KMP_GET_D_TAG(seq));
2725   KA_TRACE(
2726       20,
2727       ("__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2728 }
2729 
2730 #if KMP_USE_TSX
2731 
2732 // HLE lock functions - imported from the testbed runtime.
2733 #define HLE_ACQUIRE ".byte 0xf2;"
2734 #define HLE_RELEASE ".byte 0xf3;"
2735 
swap4(kmp_uint32 volatile * p,kmp_uint32 v)2736 static inline kmp_uint32 swap4(kmp_uint32 volatile *p, kmp_uint32 v) {
2737   __asm__ volatile(HLE_ACQUIRE "xchg %1,%0" : "+r"(v), "+m"(*p) : : "memory");
2738   return v;
2739 }
2740 
__kmp_destroy_hle_lock(kmp_dyna_lock_t * lck)2741 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
2742 
__kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t * lck)2743 static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) {
2744   TCW_4(*lck, 0);
2745 }
2746 
__kmp_acquire_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid)2747 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2748   // Use gtid for KMP_LOCK_BUSY if necessary
2749   if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
2750     int delay = 1;
2751     do {
2752       while (*(kmp_uint32 volatile *)lck != KMP_LOCK_FREE(hle)) {
2753         for (int i = delay; i != 0; --i)
2754           KMP_CPU_PAUSE();
2755         delay = ((delay << 1) | 1) & 7;
2756       }
2757     } while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
2758   }
2759 }
2760 
__kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid)2761 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2762                                                kmp_int32 gtid) {
2763   __kmp_acquire_hle_lock(lck, gtid); // TODO: add checks
2764 }
2765 
__kmp_release_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid)2766 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2767   __asm__ volatile(HLE_RELEASE "movl %1,%0"
2768                    : "=m"(*lck)
2769                    : "r"(KMP_LOCK_FREE(hle))
2770                    : "memory");
2771   return KMP_LOCK_RELEASED;
2772 }
2773 
__kmp_release_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid)2774 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2775                                               kmp_int32 gtid) {
2776   return __kmp_release_hle_lock(lck, gtid); // TODO: add checks
2777 }
2778 
__kmp_test_hle_lock(kmp_dyna_lock_t * lck,kmp_int32 gtid)2779 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2780   return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
2781 }
2782 
__kmp_test_hle_lock_with_checks(kmp_dyna_lock_t * lck,kmp_int32 gtid)2783 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2784                                            kmp_int32 gtid) {
2785   return __kmp_test_hle_lock(lck, gtid); // TODO: add checks
2786 }
2787 
__kmp_init_rtm_lock(kmp_queuing_lock_t * lck)2788 static void __kmp_init_rtm_lock(kmp_queuing_lock_t *lck) {
2789   __kmp_init_queuing_lock(lck);
2790 }
2791 
__kmp_destroy_rtm_lock(kmp_queuing_lock_t * lck)2792 static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) {
2793   __kmp_destroy_queuing_lock(lck);
2794 }
2795 
__kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t * lck)2796 static void __kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t *lck) {
2797   __kmp_destroy_queuing_lock_with_checks(lck);
2798 }
2799 
__kmp_acquire_rtm_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)2800 static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2801   unsigned retries = 3, status;
2802   do {
2803     status = _xbegin();
2804     if (status == _XBEGIN_STARTED) {
2805       if (__kmp_is_unlocked_queuing_lock(lck))
2806         return;
2807       _xabort(0xff);
2808     }
2809     if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2810       // Wait until lock becomes free
2811       while (!__kmp_is_unlocked_queuing_lock(lck))
2812         __kmp_yield(TRUE);
2813     } else if (!(status & _XABORT_RETRY))
2814       break;
2815   } while (retries--);
2816 
2817   // Fall-back non-speculative lock (xchg)
2818   __kmp_acquire_queuing_lock(lck, gtid);
2819 }
2820 
__kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)2821 static void __kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2822                                                kmp_int32 gtid) {
2823   __kmp_acquire_rtm_lock(lck, gtid);
2824 }
2825 
__kmp_release_rtm_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)2826 static int __kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2827   if (__kmp_is_unlocked_queuing_lock(lck)) {
2828     // Releasing from speculation
2829     _xend();
2830   } else {
2831     // Releasing from a real lock
2832     __kmp_release_queuing_lock(lck, gtid);
2833   }
2834   return KMP_LOCK_RELEASED;
2835 }
2836 
__kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)2837 static int __kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2838                                               kmp_int32 gtid) {
2839   return __kmp_release_rtm_lock(lck, gtid);
2840 }
2841 
__kmp_test_rtm_lock(kmp_queuing_lock_t * lck,kmp_int32 gtid)2842 static int __kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2843   unsigned retries = 3, status;
2844   do {
2845     status = _xbegin();
2846     if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
2847       return 1;
2848     }
2849     if (!(status & _XABORT_RETRY))
2850       break;
2851   } while (retries--);
2852 
2853   return (__kmp_is_unlocked_queuing_lock(lck)) ? 1 : 0;
2854 }
2855 
__kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t * lck,kmp_int32 gtid)2856 static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2857                                            kmp_int32 gtid) {
2858   return __kmp_test_rtm_lock(lck, gtid);
2859 }
2860 
2861 #endif // KMP_USE_TSX
2862 
2863 // Entry functions for indirect locks (first element of direct lock jump tables)
2864 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2865                                      kmp_dyna_lockseq_t tag);
2866 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
2867 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2868 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2869 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2870 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2871                                                kmp_int32);
2872 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2873                                                  kmp_int32);
2874 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2875                                                 kmp_int32);
2876 
2877 // Lock function definitions for the union parameter type
2878 #define KMP_FOREACH_LOCK_KIND(m, a) m(ticket, a) m(queuing, a) m(drdpa, a)
2879 
2880 #define expand1(lk, op)                                                        \
2881   static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) {               \
2882     __kmp_##op##_##lk##_##lock(&lock->lk);                                     \
2883   }
2884 #define expand2(lk, op)                                                        \
2885   static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock,                  \
2886                                         kmp_int32 gtid) {                      \
2887     return __kmp_##op##_##lk##_##lock(&lock->lk, gtid);                        \
2888   }
2889 #define expand3(lk, op)                                                        \
2890   static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock,              \
2891                                             kmp_lock_flags_t flags) {          \
2892     __kmp_set_##lk##_lock_flags(&lock->lk, flags);                             \
2893   }
2894 #define expand4(lk, op)                                                        \
2895   static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock,           \
2896                                                const ident_t *loc) {           \
2897     __kmp_set_##lk##_lock_location(&lock->lk, loc);                            \
2898   }
2899 
2900 KMP_FOREACH_LOCK_KIND(expand1, init)
2901 KMP_FOREACH_LOCK_KIND(expand1, init_nested)
2902 KMP_FOREACH_LOCK_KIND(expand1, destroy)
2903 KMP_FOREACH_LOCK_KIND(expand1, destroy_nested)
2904 KMP_FOREACH_LOCK_KIND(expand2, acquire)
2905 KMP_FOREACH_LOCK_KIND(expand2, acquire_nested)
2906 KMP_FOREACH_LOCK_KIND(expand2, release)
2907 KMP_FOREACH_LOCK_KIND(expand2, release_nested)
2908 KMP_FOREACH_LOCK_KIND(expand2, test)
2909 KMP_FOREACH_LOCK_KIND(expand2, test_nested)
2910 KMP_FOREACH_LOCK_KIND(expand3, )
2911 KMP_FOREACH_LOCK_KIND(expand4, )
2912 
2913 #undef expand1
2914 #undef expand2
2915 #undef expand3
2916 #undef expand4
2917 
2918 // Jump tables for the indirect lock functions
2919 // Only fill in the odd entries, that avoids the need to shift out the low bit
2920 
2921 // init functions
2922 #define expand(l, op) 0, __kmp_init_direct_lock,
2923 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2924     __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
2925 #undef expand
2926 
2927 // destroy functions
2928 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock,
2929 static void (*direct_destroy[])(kmp_dyna_lock_t *) = {
2930     __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2931 #undef expand
2932 #define expand(l, op)                                                          \
2933   0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks,
2934 static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = {
2935     __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2936 #undef expand
2937 
2938 // set/acquire functions
2939 #define expand(l, op)                                                          \
2940   0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
2941 static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
2942     __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
2943 #undef expand
2944 #define expand(l, op)                                                          \
2945   0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
2946 static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2947     __kmp_set_indirect_lock_with_checks, 0,
2948     KMP_FOREACH_D_LOCK(expand, acquire)};
2949 #undef expand
2950 
2951 // unset/release and test functions
2952 #define expand(l, op)                                                          \
2953   0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock,
2954 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
2955     __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
2956 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
2957     __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
2958 #undef expand
2959 #define expand(l, op)                                                          \
2960   0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks,
2961 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2962     __kmp_unset_indirect_lock_with_checks, 0,
2963     KMP_FOREACH_D_LOCK(expand, release)};
2964 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2965     __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
2966 #undef expand
2967 
2968 // Exposes only one set of jump tables (*lock or *lock_with_checks).
2969 void (*(*__kmp_direct_destroy))(kmp_dyna_lock_t *) = 0;
2970 int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0;
2971 int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0;
2972 int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0;
2973 
2974 // Jump tables for the indirect lock functions
2975 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
2976 void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
2977     KMP_FOREACH_I_LOCK(expand, init)};
2978 #undef expand
2979 
2980 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock,
2981 static void (*indirect_destroy[])(kmp_user_lock_p) = {
2982     KMP_FOREACH_I_LOCK(expand, destroy)};
2983 #undef expand
2984 #define expand(l, op)                                                          \
2985   (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks,
2986 static void (*indirect_destroy_check[])(kmp_user_lock_p) = {
2987     KMP_FOREACH_I_LOCK(expand, destroy)};
2988 #undef expand
2989 
2990 // set/acquire functions
2991 #define expand(l, op)                                                          \
2992   (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
2993 static int (*indirect_set[])(kmp_user_lock_p,
2994                              kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
2995 #undef expand
2996 #define expand(l, op)                                                          \
2997   (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
2998 static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
2999     KMP_FOREACH_I_LOCK(expand, acquire)};
3000 #undef expand
3001 
3002 // unset/release and test functions
3003 #define expand(l, op)                                                          \
3004   (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock,
3005 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
3006     KMP_FOREACH_I_LOCK(expand, release)};
3007 static int (*indirect_test[])(kmp_user_lock_p,
3008                               kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
3009 #undef expand
3010 #define expand(l, op)                                                          \
3011   (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks,
3012 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
3013     KMP_FOREACH_I_LOCK(expand, release)};
3014 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
3015     KMP_FOREACH_I_LOCK(expand, test)};
3016 #undef expand
3017 
3018 // Exposes only one jump tables (*lock or *lock_with_checks).
3019 void (*(*__kmp_indirect_destroy))(kmp_user_lock_p) = 0;
3020 int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0;
3021 int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0;
3022 int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0;
3023 
3024 // Lock index table.
3025 kmp_indirect_lock_table_t __kmp_i_lock_table;
3026 
3027 // Size of indirect locks.
3028 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
3029 
3030 // Jump tables for lock accessor/modifier.
3031 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3032                                                      const ident_t *) = {0};
3033 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3034                                                   kmp_lock_flags_t) = {0};
3035 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
3036     kmp_user_lock_p) = {0};
3037 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
3038     kmp_user_lock_p) = {0};
3039 
3040 // Use different lock pools for different lock types.
3041 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3042 
3043 // User lock allocator for dynamically dispatched indirect locks. Every entry of
3044 // the indirect lock table holds the address and type of the allocated indrect
3045 // lock (kmp_indirect_lock_t), and the size of the table doubles when it is
3046 // full. A destroyed indirect lock object is returned to the reusable pool of
3047 // locks, unique to each lock type.
__kmp_allocate_indirect_lock(void ** user_lock,kmp_int32 gtid,kmp_indirect_locktag_t tag)3048 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(void **user_lock,
3049                                                   kmp_int32 gtid,
3050                                                   kmp_indirect_locktag_t tag) {
3051   kmp_indirect_lock_t *lck;
3052   kmp_lock_index_t idx;
3053 
3054   __kmp_acquire_lock(&__kmp_global_lock, gtid);
3055 
3056   if (__kmp_indirect_lock_pool[tag] != NULL) {
3057     // Reuse the allocated and destroyed lock object
3058     lck = __kmp_indirect_lock_pool[tag];
3059     if (OMP_LOCK_T_SIZE < sizeof(void *))
3060       idx = lck->lock->pool.index;
3061     __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
3062     KA_TRACE(20, ("__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3063                   lck));
3064   } else {
3065     idx = __kmp_i_lock_table.next;
3066     // Check capacity and double the size if it is full
3067     if (idx == __kmp_i_lock_table.size) {
3068       // Double up the space for block pointers
3069       int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
3070       kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
3071           2 * row * sizeof(kmp_indirect_lock_t *));
3072       KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
3073                  row * sizeof(kmp_indirect_lock_t *));
3074       kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
3075       __kmp_i_lock_table.table = new_table;
3076       __kmp_free(old_table);
3077       // Allocate new objects in the new blocks
3078       for (int i = row; i < 2 * row; ++i)
3079         *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
3080             KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
3081       __kmp_i_lock_table.size = 2 * idx;
3082     }
3083     __kmp_i_lock_table.next++;
3084     lck = KMP_GET_I_LOCK(idx);
3085     // Allocate a new base lock object
3086     lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
3087     KA_TRACE(20,
3088              ("__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
3089   }
3090 
3091   __kmp_release_lock(&__kmp_global_lock, gtid);
3092 
3093   lck->type = tag;
3094 
3095   if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3096     *((kmp_lock_index_t *)user_lock) = idx
3097                                        << 1; // indirect lock word must be even
3098   } else {
3099     *((kmp_indirect_lock_t **)user_lock) = lck;
3100   }
3101 
3102   return lck;
3103 }
3104 
3105 // User lock lookup for dynamically dispatched locks.
3106 static __forceinline kmp_indirect_lock_t *
__kmp_lookup_indirect_lock(void ** user_lock,const char * func)3107 __kmp_lookup_indirect_lock(void **user_lock, const char *func) {
3108   if (__kmp_env_consistency_check) {
3109     kmp_indirect_lock_t *lck = NULL;
3110     if (user_lock == NULL) {
3111       KMP_FATAL(LockIsUninitialized, func);
3112     }
3113     if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3114       kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
3115       if (idx >= __kmp_i_lock_table.size) {
3116         KMP_FATAL(LockIsUninitialized, func);
3117       }
3118       lck = KMP_GET_I_LOCK(idx);
3119     } else {
3120       lck = *((kmp_indirect_lock_t **)user_lock);
3121     }
3122     if (lck == NULL) {
3123       KMP_FATAL(LockIsUninitialized, func);
3124     }
3125     return lck;
3126   } else {
3127     if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3128       return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
3129     } else {
3130       return *((kmp_indirect_lock_t **)user_lock);
3131     }
3132   }
3133 }
3134 
__kmp_init_indirect_lock(kmp_dyna_lock_t * lock,kmp_dyna_lockseq_t seq)3135 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
3136                                      kmp_dyna_lockseq_t seq) {
3137 #if KMP_USE_ADAPTIVE_LOCKS
3138   if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
3139     KMP_WARNING(AdaptiveNotSupported, "kmp_lockseq_t", "adaptive");
3140     seq = lockseq_queuing;
3141   }
3142 #endif
3143 #if KMP_USE_TSX
3144   if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
3145     seq = lockseq_queuing;
3146   }
3147 #endif
3148   kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3149   kmp_indirect_lock_t *l =
3150       __kmp_allocate_indirect_lock((void **)lock, __kmp_entry_gtid(), tag);
3151   KMP_I_LOCK_FUNC(l, init)(l->lock);
3152   KA_TRACE(
3153       20, ("__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3154            seq));
3155 }
3156 
__kmp_destroy_indirect_lock(kmp_dyna_lock_t * lock)3157 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
3158   kmp_uint32 gtid = __kmp_entry_gtid();
3159   kmp_indirect_lock_t *l =
3160       __kmp_lookup_indirect_lock((void **)lock, "omp_destroy_lock");
3161   KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3162   kmp_indirect_locktag_t tag = l->type;
3163 
3164   __kmp_acquire_lock(&__kmp_global_lock, gtid);
3165 
3166   // Use the base lock's space to keep the pool chain.
3167   l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
3168   if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3169     l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
3170   }
3171   __kmp_indirect_lock_pool[tag] = l;
3172 
3173   __kmp_release_lock(&__kmp_global_lock, gtid);
3174 }
3175 
__kmp_set_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid)3176 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3177   kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3178   return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3179 }
3180 
__kmp_unset_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid)3181 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3182   kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3183   return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3184 }
3185 
__kmp_test_indirect_lock(kmp_dyna_lock_t * lock,kmp_int32 gtid)3186 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3187   kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3188   return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3189 }
3190 
__kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid)3191 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3192                                                kmp_int32 gtid) {
3193   kmp_indirect_lock_t *l =
3194       __kmp_lookup_indirect_lock((void **)lock, "omp_set_lock");
3195   return KMP_I_LOCK_FUNC(l, set)(l->lock, gtid);
3196 }
3197 
__kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid)3198 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3199                                                  kmp_int32 gtid) {
3200   kmp_indirect_lock_t *l =
3201       __kmp_lookup_indirect_lock((void **)lock, "omp_unset_lock");
3202   return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3203 }
3204 
__kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t * lock,kmp_int32 gtid)3205 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3206                                                 kmp_int32 gtid) {
3207   kmp_indirect_lock_t *l =
3208       __kmp_lookup_indirect_lock((void **)lock, "omp_test_lock");
3209   return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3210 }
3211 
3212 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3213 
3214 // This is used only in kmp_error.cpp when consistency checking is on.
__kmp_get_user_lock_owner(kmp_user_lock_p lck,kmp_uint32 seq)3215 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
3216   switch (seq) {
3217   case lockseq_tas:
3218   case lockseq_nested_tas:
3219     return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
3220 #if KMP_USE_FUTEX
3221   case lockseq_futex:
3222   case lockseq_nested_futex:
3223     return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
3224 #endif
3225   case lockseq_ticket:
3226   case lockseq_nested_ticket:
3227     return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
3228   case lockseq_queuing:
3229   case lockseq_nested_queuing:
3230 #if KMP_USE_ADAPTIVE_LOCKS
3231   case lockseq_adaptive:
3232 #endif
3233     return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
3234   case lockseq_drdpa:
3235   case lockseq_nested_drdpa:
3236     return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
3237   default:
3238     return 0;
3239   }
3240 }
3241 
3242 // Initializes data for dynamic user locks.
__kmp_init_dynamic_user_locks()3243 void __kmp_init_dynamic_user_locks() {
3244   // Initialize jump table for the lock functions
3245   if (__kmp_env_consistency_check) {
3246     __kmp_direct_set = direct_set_check;
3247     __kmp_direct_unset = direct_unset_check;
3248     __kmp_direct_test = direct_test_check;
3249     __kmp_direct_destroy = direct_destroy_check;
3250     __kmp_indirect_set = indirect_set_check;
3251     __kmp_indirect_unset = indirect_unset_check;
3252     __kmp_indirect_test = indirect_test_check;
3253     __kmp_indirect_destroy = indirect_destroy_check;
3254   } else {
3255     __kmp_direct_set = direct_set;
3256     __kmp_direct_unset = direct_unset;
3257     __kmp_direct_test = direct_test;
3258     __kmp_direct_destroy = direct_destroy;
3259     __kmp_indirect_set = indirect_set;
3260     __kmp_indirect_unset = indirect_unset;
3261     __kmp_indirect_test = indirect_test;
3262     __kmp_indirect_destroy = indirect_destroy;
3263   }
3264   // If the user locks have already been initialized, then return. Allow the
3265   // switch between different KMP_CONSISTENCY_CHECK values, but do not allocate
3266   // new lock tables if they have already been allocated.
3267   if (__kmp_init_user_locks)
3268     return;
3269 
3270   // Initialize lock index table
3271   __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
3272   __kmp_i_lock_table.table =
3273       (kmp_indirect_lock_t **)__kmp_allocate(sizeof(kmp_indirect_lock_t *));
3274   *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
3275       KMP_I_LOCK_CHUNK * sizeof(kmp_indirect_lock_t));
3276   __kmp_i_lock_table.next = 0;
3277 
3278   // Indirect lock size
3279   __kmp_indirect_lock_size[locktag_ticket] = sizeof(kmp_ticket_lock_t);
3280   __kmp_indirect_lock_size[locktag_queuing] = sizeof(kmp_queuing_lock_t);
3281 #if KMP_USE_ADAPTIVE_LOCKS
3282   __kmp_indirect_lock_size[locktag_adaptive] = sizeof(kmp_adaptive_lock_t);
3283 #endif
3284   __kmp_indirect_lock_size[locktag_drdpa] = sizeof(kmp_drdpa_lock_t);
3285 #if KMP_USE_TSX
3286   __kmp_indirect_lock_size[locktag_rtm] = sizeof(kmp_queuing_lock_t);
3287 #endif
3288   __kmp_indirect_lock_size[locktag_nested_tas] = sizeof(kmp_tas_lock_t);
3289 #if KMP_USE_FUTEX
3290   __kmp_indirect_lock_size[locktag_nested_futex] = sizeof(kmp_futex_lock_t);
3291 #endif
3292   __kmp_indirect_lock_size[locktag_nested_ticket] = sizeof(kmp_ticket_lock_t);
3293   __kmp_indirect_lock_size[locktag_nested_queuing] = sizeof(kmp_queuing_lock_t);
3294   __kmp_indirect_lock_size[locktag_nested_drdpa] = sizeof(kmp_drdpa_lock_t);
3295 
3296 // Initialize lock accessor/modifier
3297 #define fill_jumps(table, expand, sep)                                         \
3298   {                                                                            \
3299     table[locktag##sep##ticket] = expand(ticket);                              \
3300     table[locktag##sep##queuing] = expand(queuing);                            \
3301     table[locktag##sep##drdpa] = expand(drdpa);                                \
3302   }
3303 
3304 #if KMP_USE_ADAPTIVE_LOCKS
3305 #define fill_table(table, expand)                                              \
3306   {                                                                            \
3307     fill_jumps(table, expand, _);                                              \
3308     table[locktag_adaptive] = expand(queuing);                                 \
3309     fill_jumps(table, expand, _nested_);                                       \
3310   }
3311 #else
3312 #define fill_table(table, expand)                                              \
3313   {                                                                            \
3314     fill_jumps(table, expand, _);                                              \
3315     fill_jumps(table, expand, _nested_);                                       \
3316   }
3317 #endif // KMP_USE_ADAPTIVE_LOCKS
3318 
3319 #define expand(l)                                                              \
3320   (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location
3321   fill_table(__kmp_indirect_set_location, expand);
3322 #undef expand
3323 #define expand(l)                                                              \
3324   (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags
3325   fill_table(__kmp_indirect_set_flags, expand);
3326 #undef expand
3327 #define expand(l)                                                              \
3328   (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location
3329   fill_table(__kmp_indirect_get_location, expand);
3330 #undef expand
3331 #define expand(l)                                                              \
3332   (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags
3333   fill_table(__kmp_indirect_get_flags, expand);
3334 #undef expand
3335 
3336   __kmp_init_user_locks = TRUE;
3337 }
3338 
3339 // Clean up the lock table.
__kmp_cleanup_indirect_user_locks()3340 void __kmp_cleanup_indirect_user_locks() {
3341   kmp_lock_index_t i;
3342   int k;
3343 
3344   // Clean up locks in the pools first (they were already destroyed before going
3345   // into the pools).
3346   for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3347     kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3348     while (l != NULL) {
3349       kmp_indirect_lock_t *ll = l;
3350       l = (kmp_indirect_lock_t *)l->lock->pool.next;
3351       KA_TRACE(20, ("__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3352                     ll));
3353       __kmp_free(ll->lock);
3354       ll->lock = NULL;
3355     }
3356     __kmp_indirect_lock_pool[k] = NULL;
3357   }
3358   // Clean up the remaining undestroyed locks.
3359   for (i = 0; i < __kmp_i_lock_table.next; i++) {
3360     kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
3361     if (l->lock != NULL) {
3362       // Locks not destroyed explicitly need to be destroyed here.
3363       KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3364       KA_TRACE(
3365           20,
3366           ("__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
3367            l));
3368       __kmp_free(l->lock);
3369     }
3370   }
3371   // Free the table
3372   for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
3373     __kmp_free(__kmp_i_lock_table.table[i]);
3374   __kmp_free(__kmp_i_lock_table.table);
3375 
3376   __kmp_init_user_locks = FALSE;
3377 }
3378 
3379 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3380 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3381 
3382 #else // KMP_USE_DYNAMIC_LOCK
3383 
__kmp_init_tas_lock_with_checks(kmp_tas_lock_t * lck)3384 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3385   __kmp_init_tas_lock(lck);
3386 }
3387 
__kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t * lck)3388 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3389   __kmp_init_nested_tas_lock(lck);
3390 }
3391 
3392 #if KMP_USE_FUTEX
__kmp_init_futex_lock_with_checks(kmp_futex_lock_t * lck)3393 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3394   __kmp_init_futex_lock(lck);
3395 }
3396 
__kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t * lck)3397 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3398   __kmp_init_nested_futex_lock(lck);
3399 }
3400 #endif
3401 
__kmp_is_ticket_lock_initialized(kmp_ticket_lock_t * lck)3402 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
3403   return lck == lck->lk.self;
3404 }
3405 
__kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t * lck)3406 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3407   __kmp_init_ticket_lock(lck);
3408 }
3409 
__kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t * lck)3410 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3411   __kmp_init_nested_ticket_lock(lck);
3412 }
3413 
__kmp_is_queuing_lock_initialized(kmp_queuing_lock_t * lck)3414 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
3415   return lck == lck->lk.initialized;
3416 }
3417 
__kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t * lck)3418 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3419   __kmp_init_queuing_lock(lck);
3420 }
3421 
3422 static void
__kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t * lck)3423 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3424   __kmp_init_nested_queuing_lock(lck);
3425 }
3426 
3427 #if KMP_USE_ADAPTIVE_LOCKS
__kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t * lck)3428 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
3429   __kmp_init_adaptive_lock(lck);
3430 }
3431 #endif
3432 
__kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t * lck)3433 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
3434   return lck == lck->lk.initialized;
3435 }
3436 
__kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)3437 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3438   __kmp_init_drdpa_lock(lck);
3439 }
3440 
__kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t * lck)3441 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3442   __kmp_init_nested_drdpa_lock(lck);
3443 }
3444 
3445 /* user locks
3446  * They are implemented as a table of function pointers which are set to the
3447  * lock functions of the appropriate kind, once that has been determined. */
3448 
3449 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3450 
3451 size_t __kmp_base_user_lock_size = 0;
3452 size_t __kmp_user_lock_size = 0;
3453 
3454 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
3455 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
3456                                             kmp_int32 gtid) = NULL;
3457 
3458 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
3459                                          kmp_int32 gtid) = NULL;
3460 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
3461                                             kmp_int32 gtid) = NULL;
3462 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3463 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
3464 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3465 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3466                                                    kmp_int32 gtid) = NULL;
3467 
3468 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3469                                                 kmp_int32 gtid) = NULL;
3470 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3471                                                    kmp_int32 gtid) = NULL;
3472 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3473 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3474 
3475 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
3476 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
3477 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
3478                                       const ident_t *loc) = NULL;
3479 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
3480 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
3481                                    kmp_lock_flags_t flags) = NULL;
3482 
__kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind)3483 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
3484   switch (user_lock_kind) {
3485   case lk_default:
3486   default:
3487     KMP_ASSERT(0);
3488 
3489   case lk_tas: {
3490     __kmp_base_user_lock_size = sizeof(kmp_base_tas_lock_t);
3491     __kmp_user_lock_size = sizeof(kmp_tas_lock_t);
3492 
3493     __kmp_get_user_lock_owner_ =
3494         (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
3495 
3496     if (__kmp_env_consistency_check) {
3497       KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
3498       KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
3499     } else {
3500       KMP_BIND_USER_LOCK(tas);
3501       KMP_BIND_NESTED_USER_LOCK(tas);
3502     }
3503 
3504     __kmp_destroy_user_lock_ =
3505         (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
3506 
3507     __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3508 
3509     __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
3510 
3511     __kmp_set_user_lock_location_ =
3512         (void (*)(kmp_user_lock_p, const ident_t *))NULL;
3513 
3514     __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3515 
3516     __kmp_set_user_lock_flags_ =
3517         (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3518   } break;
3519 
3520 #if KMP_USE_FUTEX
3521 
3522   case lk_futex: {
3523     __kmp_base_user_lock_size = sizeof(kmp_base_futex_lock_t);
3524     __kmp_user_lock_size = sizeof(kmp_futex_lock_t);
3525 
3526     __kmp_get_user_lock_owner_ =
3527         (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
3528 
3529     if (__kmp_env_consistency_check) {
3530       KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
3531       KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
3532     } else {
3533       KMP_BIND_USER_LOCK(futex);
3534       KMP_BIND_NESTED_USER_LOCK(futex);
3535     }
3536 
3537     __kmp_destroy_user_lock_ =
3538         (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
3539 
3540     __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3541 
3542     __kmp_get_user_lock_location_ = (const ident_t *(*)(kmp_user_lock_p))NULL;
3543 
3544     __kmp_set_user_lock_location_ =
3545         (void (*)(kmp_user_lock_p, const ident_t *))NULL;
3546 
3547     __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3548 
3549     __kmp_set_user_lock_flags_ =
3550         (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3551   } break;
3552 
3553 #endif // KMP_USE_FUTEX
3554 
3555   case lk_ticket: {
3556     __kmp_base_user_lock_size = sizeof(kmp_base_ticket_lock_t);
3557     __kmp_user_lock_size = sizeof(kmp_ticket_lock_t);
3558 
3559     __kmp_get_user_lock_owner_ =
3560         (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
3561 
3562     if (__kmp_env_consistency_check) {
3563       KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
3564       KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
3565     } else {
3566       KMP_BIND_USER_LOCK(ticket);
3567       KMP_BIND_NESTED_USER_LOCK(ticket);
3568     }
3569 
3570     __kmp_destroy_user_lock_ =
3571         (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
3572 
3573     __kmp_is_user_lock_initialized_ =
3574         (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
3575 
3576     __kmp_get_user_lock_location_ =
3577         (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
3578 
3579     __kmp_set_user_lock_location_ = (void (*)(
3580         kmp_user_lock_p, const ident_t *))(&__kmp_set_ticket_lock_location);
3581 
3582     __kmp_get_user_lock_flags_ =
3583         (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
3584 
3585     __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3586         &__kmp_set_ticket_lock_flags);
3587   } break;
3588 
3589   case lk_queuing: {
3590     __kmp_base_user_lock_size = sizeof(kmp_base_queuing_lock_t);
3591     __kmp_user_lock_size = sizeof(kmp_queuing_lock_t);
3592 
3593     __kmp_get_user_lock_owner_ =
3594         (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3595 
3596     if (__kmp_env_consistency_check) {
3597       KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
3598       KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
3599     } else {
3600       KMP_BIND_USER_LOCK(queuing);
3601       KMP_BIND_NESTED_USER_LOCK(queuing);
3602     }
3603 
3604     __kmp_destroy_user_lock_ =
3605         (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
3606 
3607     __kmp_is_user_lock_initialized_ =
3608         (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3609 
3610     __kmp_get_user_lock_location_ =
3611         (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3612 
3613     __kmp_set_user_lock_location_ = (void (*)(
3614         kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
3615 
3616     __kmp_get_user_lock_flags_ =
3617         (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3618 
3619     __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3620         &__kmp_set_queuing_lock_flags);
3621   } break;
3622 
3623 #if KMP_USE_ADAPTIVE_LOCKS
3624   case lk_adaptive: {
3625     __kmp_base_user_lock_size = sizeof(kmp_base_adaptive_lock_t);
3626     __kmp_user_lock_size = sizeof(kmp_adaptive_lock_t);
3627 
3628     __kmp_get_user_lock_owner_ =
3629         (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3630 
3631     if (__kmp_env_consistency_check) {
3632       KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
3633     } else {
3634       KMP_BIND_USER_LOCK(adaptive);
3635     }
3636 
3637     __kmp_destroy_user_lock_ =
3638         (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
3639 
3640     __kmp_is_user_lock_initialized_ =
3641         (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3642 
3643     __kmp_get_user_lock_location_ =
3644         (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3645 
3646     __kmp_set_user_lock_location_ = (void (*)(
3647         kmp_user_lock_p, const ident_t *))(&__kmp_set_queuing_lock_location);
3648 
3649     __kmp_get_user_lock_flags_ =
3650         (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3651 
3652     __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3653         &__kmp_set_queuing_lock_flags);
3654 
3655   } break;
3656 #endif // KMP_USE_ADAPTIVE_LOCKS
3657 
3658   case lk_drdpa: {
3659     __kmp_base_user_lock_size = sizeof(kmp_base_drdpa_lock_t);
3660     __kmp_user_lock_size = sizeof(kmp_drdpa_lock_t);
3661 
3662     __kmp_get_user_lock_owner_ =
3663         (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
3664 
3665     if (__kmp_env_consistency_check) {
3666       KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
3667       KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
3668     } else {
3669       KMP_BIND_USER_LOCK(drdpa);
3670       KMP_BIND_NESTED_USER_LOCK(drdpa);
3671     }
3672 
3673     __kmp_destroy_user_lock_ =
3674         (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
3675 
3676     __kmp_is_user_lock_initialized_ =
3677         (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
3678 
3679     __kmp_get_user_lock_location_ =
3680         (const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
3681 
3682     __kmp_set_user_lock_location_ = (void (*)(
3683         kmp_user_lock_p, const ident_t *))(&__kmp_set_drdpa_lock_location);
3684 
3685     __kmp_get_user_lock_flags_ =
3686         (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
3687 
3688     __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3689         &__kmp_set_drdpa_lock_flags);
3690   } break;
3691   }
3692 }
3693 
3694 // ----------------------------------------------------------------------------
3695 // User lock table & lock allocation
3696 
3697 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
3698 kmp_user_lock_p __kmp_lock_pool = NULL;
3699 
3700 // Lock block-allocation support.
3701 kmp_block_of_locks *__kmp_lock_blocks = NULL;
3702 int __kmp_num_locks_in_block = 1; // FIXME - tune this value
3703 
__kmp_lock_table_insert(kmp_user_lock_p lck)3704 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
3705   // Assume that kmp_global_lock is held upon entry/exit.
3706   kmp_lock_index_t index;
3707   if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
3708     kmp_lock_index_t size;
3709     kmp_user_lock_p *table;
3710     // Reallocate lock table.
3711     if (__kmp_user_lock_table.allocated == 0) {
3712       size = 1024;
3713     } else {
3714       size = __kmp_user_lock_table.allocated * 2;
3715     }
3716     table = (kmp_user_lock_p *)__kmp_allocate(sizeof(kmp_user_lock_p) * size);
3717     KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
3718                sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
3719     table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
3720     // We cannot free the previous table now, since it may be in use by other
3721     // threads. So save the pointer to the previous table in in the first
3722     // element of the new table. All the tables will be organized into a list,
3723     // and could be freed when library shutting down.
3724     __kmp_user_lock_table.table = table;
3725     __kmp_user_lock_table.allocated = size;
3726   }
3727   KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
3728                    __kmp_user_lock_table.allocated);
3729   index = __kmp_user_lock_table.used;
3730   __kmp_user_lock_table.table[index] = lck;
3731   ++__kmp_user_lock_table.used;
3732   return index;
3733 }
3734 
__kmp_lock_block_allocate()3735 static kmp_user_lock_p __kmp_lock_block_allocate() {
3736   // Assume that kmp_global_lock is held upon entry/exit.
3737   static int last_index = 0;
3738   if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
3739     // Restart the index.
3740     last_index = 0;
3741     // Need to allocate a new block.
3742     KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3743     size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
3744     char *buffer =
3745         (char *)__kmp_allocate(space_for_locks + sizeof(kmp_block_of_locks));
3746     // Set up the new block.
3747     kmp_block_of_locks *new_block =
3748         (kmp_block_of_locks *)(&buffer[space_for_locks]);
3749     new_block->next_block = __kmp_lock_blocks;
3750     new_block->locks = (void *)buffer;
3751     // Publish the new block.
3752     KMP_MB();
3753     __kmp_lock_blocks = new_block;
3754   }
3755   kmp_user_lock_p ret = (kmp_user_lock_p)(&(
3756       ((char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
3757   last_index++;
3758   return ret;
3759 }
3760 
3761 // Get memory for a lock. It may be freshly allocated memory or reused memory
3762 // from lock pool.
__kmp_user_lock_allocate(void ** user_lock,kmp_int32 gtid,kmp_lock_flags_t flags)3763 kmp_user_lock_p __kmp_user_lock_allocate(void **user_lock, kmp_int32 gtid,
3764                                          kmp_lock_flags_t flags) {
3765   kmp_user_lock_p lck;
3766   kmp_lock_index_t index;
3767   KMP_DEBUG_ASSERT(user_lock);
3768 
3769   __kmp_acquire_lock(&__kmp_global_lock, gtid);
3770 
3771   if (__kmp_lock_pool == NULL) {
3772     // Lock pool is empty. Allocate new memory.
3773 
3774     // ANNOTATION: Found no good way to express the syncronisation
3775     // between allocation and usage, so ignore the allocation
3776     ANNOTATE_IGNORE_WRITES_BEGIN();
3777     if (__kmp_num_locks_in_block <= 1) { // Tune this cutoff point.
3778       lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
3779     } else {
3780       lck = __kmp_lock_block_allocate();
3781     }
3782     ANNOTATE_IGNORE_WRITES_END();
3783 
3784     // Insert lock in the table so that it can be freed in __kmp_cleanup,
3785     // and debugger has info on all allocated locks.
3786     index = __kmp_lock_table_insert(lck);
3787   } else {
3788     // Pick up lock from pool.
3789     lck = __kmp_lock_pool;
3790     index = __kmp_lock_pool->pool.index;
3791     __kmp_lock_pool = __kmp_lock_pool->pool.next;
3792   }
3793 
3794   // We could potentially differentiate between nested and regular locks
3795   // here, and do the lock table lookup for regular locks only.
3796   if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3797     *((kmp_lock_index_t *)user_lock) = index;
3798   } else {
3799     *((kmp_user_lock_p *)user_lock) = lck;
3800   }
3801 
3802   // mark the lock if it is critical section lock.
3803   __kmp_set_user_lock_flags(lck, flags);
3804 
3805   __kmp_release_lock(&__kmp_global_lock, gtid); // AC: TODO move this line upper
3806 
3807   return lck;
3808 }
3809 
3810 // Put lock's memory to pool for reusing.
__kmp_user_lock_free(void ** user_lock,kmp_int32 gtid,kmp_user_lock_p lck)3811 void __kmp_user_lock_free(void **user_lock, kmp_int32 gtid,
3812                           kmp_user_lock_p lck) {
3813   KMP_DEBUG_ASSERT(user_lock != NULL);
3814   KMP_DEBUG_ASSERT(lck != NULL);
3815 
3816   __kmp_acquire_lock(&__kmp_global_lock, gtid);
3817 
3818   lck->pool.next = __kmp_lock_pool;
3819   __kmp_lock_pool = lck;
3820   if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3821     kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3822     KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
3823     lck->pool.index = index;
3824   }
3825 
3826   __kmp_release_lock(&__kmp_global_lock, gtid);
3827 }
3828 
__kmp_lookup_user_lock(void ** user_lock,char const * func)3829 kmp_user_lock_p __kmp_lookup_user_lock(void **user_lock, char const *func) {
3830   kmp_user_lock_p lck = NULL;
3831 
3832   if (__kmp_env_consistency_check) {
3833     if (user_lock == NULL) {
3834       KMP_FATAL(LockIsUninitialized, func);
3835     }
3836   }
3837 
3838   if (OMP_LOCK_T_SIZE < sizeof(void *)) {
3839     kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3840     if (__kmp_env_consistency_check) {
3841       if (!(0 < index && index < __kmp_user_lock_table.used)) {
3842         KMP_FATAL(LockIsUninitialized, func);
3843       }
3844     }
3845     KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
3846     KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3847     lck = __kmp_user_lock_table.table[index];
3848   } else {
3849     lck = *((kmp_user_lock_p *)user_lock);
3850   }
3851 
3852   if (__kmp_env_consistency_check) {
3853     if (lck == NULL) {
3854       KMP_FATAL(LockIsUninitialized, func);
3855     }
3856   }
3857 
3858   return lck;
3859 }
3860 
__kmp_cleanup_user_locks(void)3861 void __kmp_cleanup_user_locks(void) {
3862   // Reset lock pool. Don't worry about lock in the pool--we will free them when
3863   // iterating through lock table (it includes all the locks, dead or alive).
3864   __kmp_lock_pool = NULL;
3865 
3866 #define IS_CRITICAL(lck)                                                       \
3867   ((__kmp_get_user_lock_flags_ != NULL) &&                                     \
3868    ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section))
3869 
3870   // Loop through lock table, free all locks.
3871   // Do not free item [0], it is reserved for lock tables list.
3872   //
3873   // FIXME - we are iterating through a list of (pointers to) objects of type
3874   // union kmp_user_lock, but we have no way of knowing whether the base type is
3875   // currently "pool" or whatever the global user lock type is.
3876   //
3877   // We are relying on the fact that for all of the user lock types
3878   // (except "tas"), the first field in the lock struct is the "initialized"
3879   // field, which is set to the address of the lock object itself when
3880   // the lock is initialized.  When the union is of type "pool", the
3881   // first field is a pointer to the next object in the free list, which
3882   // will not be the same address as the object itself.
3883   //
3884   // This means that the check (*__kmp_is_user_lock_initialized_)(lck) will fail
3885   // for "pool" objects on the free list.  This must happen as the "location"
3886   // field of real user locks overlaps the "index" field of "pool" objects.
3887   //
3888   // It would be better to run through the free list, and remove all "pool"
3889   // objects from the lock table before executing this loop.  However,
3890   // "pool" objects do not always have their index field set (only on
3891   // lin_32e), and I don't want to search the lock table for the address
3892   // of every "pool" object on the free list.
3893   while (__kmp_user_lock_table.used > 1) {
3894     const ident *loc;
3895 
3896     // reduce __kmp_user_lock_table.used before freeing the lock,
3897     // so that state of locks is consistent
3898     kmp_user_lock_p lck =
3899         __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
3900 
3901     if ((__kmp_is_user_lock_initialized_ != NULL) &&
3902         (*__kmp_is_user_lock_initialized_)(lck)) {
3903       // Issue a warning if: KMP_CONSISTENCY_CHECK AND lock is initialized AND
3904       // it is NOT a critical section (user is not responsible for destroying
3905       // criticals) AND we know source location to report.
3906       if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
3907           ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
3908           (loc->psource != NULL)) {
3909         kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->psource, 0);
3910         KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
3911         __kmp_str_loc_free(&str_loc);
3912       }
3913 
3914 #ifdef KMP_DEBUG
3915       if (IS_CRITICAL(lck)) {
3916         KA_TRACE(
3917             20,
3918             ("__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
3919              lck, *(void **)lck));
3920       } else {
3921         KA_TRACE(20, ("__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
3922                       *(void **)lck));
3923       }
3924 #endif // KMP_DEBUG
3925 
3926       // Cleanup internal lock dynamic resources (for drdpa locks particularly).
3927       __kmp_destroy_user_lock(lck);
3928     }
3929 
3930     // Free the lock if block allocation of locks is not used.
3931     if (__kmp_lock_blocks == NULL) {
3932       __kmp_free(lck);
3933     }
3934   }
3935 
3936 #undef IS_CRITICAL
3937 
3938   // delete lock table(s).
3939   kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
3940   __kmp_user_lock_table.table = NULL;
3941   __kmp_user_lock_table.allocated = 0;
3942 
3943   while (table_ptr != NULL) {
3944     // In the first element we saved the pointer to the previous
3945     // (smaller) lock table.
3946     kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
3947     __kmp_free(table_ptr);
3948     table_ptr = next;
3949   }
3950 
3951   // Free buffers allocated for blocks of locks.
3952   kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
3953   __kmp_lock_blocks = NULL;
3954 
3955   while (block_ptr != NULL) {
3956     kmp_block_of_locks_t *next = block_ptr->next_block;
3957     __kmp_free(block_ptr->locks);
3958     // *block_ptr itself was allocated at the end of the locks vector.
3959     block_ptr = next;
3960   }
3961 
3962   TCW_4(__kmp_init_user_locks, FALSE);
3963 }
3964 
3965 #endif // KMP_USE_DYNAMIC_LOCK
3966