1 //===-- tsan_interface_atomic.cpp -----------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
14 // For background see C++11 standard.  A slightly older, publicly
15 // available draft of the standard (not entirely up-to-date, but close enough
16 // for casual browsing) is available here:
17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
18 // The following page contains more background information:
19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
20 
21 #include "sanitizer_common/sanitizer_placement_new.h"
22 #include "sanitizer_common/sanitizer_stacktrace.h"
23 #include "sanitizer_common/sanitizer_mutex.h"
24 #include "tsan_flags.h"
25 #include "tsan_interface.h"
26 #include "tsan_rtl.h"
27 
28 using namespace __tsan;
29 
30 #if !SANITIZER_GO && __TSAN_HAS_INT128
31 // Protects emulation of 128-bit atomic operations.
32 static StaticSpinMutex mutex128;
33 #endif
34 
35 static bool IsLoadOrder(morder mo) {
36   return mo == mo_relaxed || mo == mo_consume
37       || mo == mo_acquire || mo == mo_seq_cst;
38 }
39 
40 static bool IsStoreOrder(morder mo) {
41   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
42 }
43 
44 static bool IsReleaseOrder(morder mo) {
45   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
46 }
47 
48 static bool IsAcquireOrder(morder mo) {
49   return mo == mo_consume || mo == mo_acquire
50       || mo == mo_acq_rel || mo == mo_seq_cst;
51 }
52 
53 static bool IsAcqRelOrder(morder mo) {
54   return mo == mo_acq_rel || mo == mo_seq_cst;
55 }
56 
57 template<typename T> T func_xchg(volatile T *v, T op) {
58   T res = __sync_lock_test_and_set(v, op);
59   // __sync_lock_test_and_set does not contain full barrier.
60   __sync_synchronize();
61   return res;
62 }
63 
64 template<typename T> T func_add(volatile T *v, T op) {
65   return __sync_fetch_and_add(v, op);
66 }
67 
68 template<typename T> T func_sub(volatile T *v, T op) {
69   return __sync_fetch_and_sub(v, op);
70 }
71 
72 template<typename T> T func_and(volatile T *v, T op) {
73   return __sync_fetch_and_and(v, op);
74 }
75 
76 template<typename T> T func_or(volatile T *v, T op) {
77   return __sync_fetch_and_or(v, op);
78 }
79 
80 template<typename T> T func_xor(volatile T *v, T op) {
81   return __sync_fetch_and_xor(v, op);
82 }
83 
84 template<typename T> T func_nand(volatile T *v, T op) {
85   // clang does not support __sync_fetch_and_nand.
86   T cmp = *v;
87   for (;;) {
88     T newv = ~(cmp & op);
89     T cur = __sync_val_compare_and_swap(v, cmp, newv);
90     if (cmp == cur)
91       return cmp;
92     cmp = cur;
93   }
94 }
95 
96 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
97   return __sync_val_compare_and_swap(v, cmp, xch);
98 }
99 
100 // clang does not support 128-bit atomic ops.
101 // Atomic ops are executed under tsan internal mutex,
102 // here we assume that the atomic variables are not accessed
103 // from non-instrumented code.
104 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
105     && __TSAN_HAS_INT128
106 a128 func_xchg(volatile a128 *v, a128 op) {
107   SpinMutexLock lock(&mutex128);
108   a128 cmp = *v;
109   *v = op;
110   return cmp;
111 }
112 
113 a128 func_add(volatile a128 *v, a128 op) {
114   SpinMutexLock lock(&mutex128);
115   a128 cmp = *v;
116   *v = cmp + op;
117   return cmp;
118 }
119 
120 a128 func_sub(volatile a128 *v, a128 op) {
121   SpinMutexLock lock(&mutex128);
122   a128 cmp = *v;
123   *v = cmp - op;
124   return cmp;
125 }
126 
127 a128 func_and(volatile a128 *v, a128 op) {
128   SpinMutexLock lock(&mutex128);
129   a128 cmp = *v;
130   *v = cmp & op;
131   return cmp;
132 }
133 
134 a128 func_or(volatile a128 *v, a128 op) {
135   SpinMutexLock lock(&mutex128);
136   a128 cmp = *v;
137   *v = cmp | op;
138   return cmp;
139 }
140 
141 a128 func_xor(volatile a128 *v, a128 op) {
142   SpinMutexLock lock(&mutex128);
143   a128 cmp = *v;
144   *v = cmp ^ op;
145   return cmp;
146 }
147 
148 a128 func_nand(volatile a128 *v, a128 op) {
149   SpinMutexLock lock(&mutex128);
150   a128 cmp = *v;
151   *v = ~(cmp & op);
152   return cmp;
153 }
154 
155 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
156   SpinMutexLock lock(&mutex128);
157   a128 cur = *v;
158   if (cur == cmp)
159     *v = xch;
160   return cur;
161 }
162 #endif
163 
164 template<typename T>
165 static int SizeLog() {
166   if (sizeof(T) <= 1)
167     return kSizeLog1;
168   else if (sizeof(T) <= 2)
169     return kSizeLog2;
170   else if (sizeof(T) <= 4)
171     return kSizeLog4;
172   else
173     return kSizeLog8;
174   // For 16-byte atomics we also use 8-byte memory access,
175   // this leads to false negatives only in very obscure cases.
176 }
177 
178 #if !SANITIZER_GO
179 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
180   return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
181 }
182 
183 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
184   return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
185 }
186 #endif
187 
188 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
189   return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
190 }
191 
192 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
193   return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
194 }
195 
196 static memory_order to_mo(morder mo) {
197   switch (mo) {
198   case mo_relaxed: return memory_order_relaxed;
199   case mo_consume: return memory_order_consume;
200   case mo_acquire: return memory_order_acquire;
201   case mo_release: return memory_order_release;
202   case mo_acq_rel: return memory_order_acq_rel;
203   case mo_seq_cst: return memory_order_seq_cst;
204   }
205   CHECK(0);
206   return memory_order_seq_cst;
207 }
208 
209 template<typename T>
210 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
211   return atomic_load(to_atomic(a), to_mo(mo));
212 }
213 
214 #if __TSAN_HAS_INT128 && !SANITIZER_GO
215 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
216   SpinMutexLock lock(&mutex128);
217   return *a;
218 }
219 #endif
220 
221 template<typename T>
222 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
223   CHECK(IsLoadOrder(mo));
224   // This fast-path is critical for performance.
225   // Assume the access is atomic.
226   if (!IsAcquireOrder(mo)) {
227     MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
228     return NoTsanAtomicLoad(a, mo);
229   }
230   // Don't create sync object if it does not exist yet. For example, an atomic
231   // pointer is initialized to nullptr and then periodically acquire-loaded.
232   T v = NoTsanAtomicLoad(a, mo);
233   SyncVar *s = ctx->metamap.GetIfExistsAndLock((uptr)a, false);
234   if (s) {
235     AcquireImpl(thr, pc, &s->clock);
236     // Re-read under sync mutex because we need a consistent snapshot
237     // of the value and the clock we acquire.
238     v = NoTsanAtomicLoad(a, mo);
239     s->mtx.ReadUnlock();
240   }
241   MemoryReadAtomic(thr, pc, (uptr)a, SizeLog<T>());
242   return v;
243 }
244 
245 template<typename T>
246 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
247   atomic_store(to_atomic(a), v, to_mo(mo));
248 }
249 
250 #if __TSAN_HAS_INT128 && !SANITIZER_GO
251 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
252   SpinMutexLock lock(&mutex128);
253   *a = v;
254 }
255 #endif
256 
257 template<typename T>
258 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
259     morder mo) {
260   CHECK(IsStoreOrder(mo));
261   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
262   // This fast-path is critical for performance.
263   // Assume the access is atomic.
264   // Strictly saying even relaxed store cuts off release sequence,
265   // so must reset the clock.
266   if (!IsReleaseOrder(mo)) {
267     NoTsanAtomicStore(a, v, mo);
268     return;
269   }
270   __sync_synchronize();
271   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
272   thr->fast_state.IncrementEpoch();
273   // Can't increment epoch w/o writing to the trace as well.
274   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
275   ReleaseStoreImpl(thr, pc, &s->clock);
276   NoTsanAtomicStore(a, v, mo);
277   s->mtx.Unlock();
278 }
279 
280 template<typename T, T (*F)(volatile T *v, T op)>
281 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
282   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
283   SyncVar *s = 0;
284   if (mo != mo_relaxed) {
285     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, true);
286     thr->fast_state.IncrementEpoch();
287     // Can't increment epoch w/o writing to the trace as well.
288     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
289     if (IsAcqRelOrder(mo))
290       AcquireReleaseImpl(thr, pc, &s->clock);
291     else if (IsReleaseOrder(mo))
292       ReleaseImpl(thr, pc, &s->clock);
293     else if (IsAcquireOrder(mo))
294       AcquireImpl(thr, pc, &s->clock);
295   }
296   v = F(a, v);
297   if (s)
298     s->mtx.Unlock();
299   return v;
300 }
301 
302 template<typename T>
303 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
304   return func_xchg(a, v);
305 }
306 
307 template<typename T>
308 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
309   return func_add(a, v);
310 }
311 
312 template<typename T>
313 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
314   return func_sub(a, v);
315 }
316 
317 template<typename T>
318 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
319   return func_and(a, v);
320 }
321 
322 template<typename T>
323 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
324   return func_or(a, v);
325 }
326 
327 template<typename T>
328 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
329   return func_xor(a, v);
330 }
331 
332 template<typename T>
333 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
334   return func_nand(a, v);
335 }
336 
337 template<typename T>
338 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
339     morder mo) {
340   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
341 }
342 
343 template<typename T>
344 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
345     morder mo) {
346   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
347 }
348 
349 template<typename T>
350 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
351     morder mo) {
352   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
353 }
354 
355 template<typename T>
356 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
357     morder mo) {
358   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
359 }
360 
361 template<typename T>
362 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
363     morder mo) {
364   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
365 }
366 
367 template<typename T>
368 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
369     morder mo) {
370   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
371 }
372 
373 template<typename T>
374 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
375     morder mo) {
376   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
377 }
378 
379 template<typename T>
380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
381   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
382 }
383 
384 #if __TSAN_HAS_INT128
385 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
386     morder mo, morder fmo) {
387   a128 old = *c;
388   a128 cur = func_cas(a, old, v);
389   if (cur == old)
390     return true;
391   *c = cur;
392   return false;
393 }
394 #endif
395 
396 template<typename T>
397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
398   NoTsanAtomicCAS(a, &c, v, mo, fmo);
399   return c;
400 }
401 
402 template<typename T>
403 static bool AtomicCAS(ThreadState *thr, uptr pc,
404     volatile T *a, T *c, T v, morder mo, morder fmo) {
405   // 31.7.2.18: "The failure argument shall not be memory_order_release
406   // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
407   // (mo_relaxed) when those are used.
408   CHECK(IsLoadOrder(fmo));
409 
410   MemoryWriteAtomic(thr, pc, (uptr)a, SizeLog<T>());
411   SyncVar *s = 0;
412   bool write_lock = IsReleaseOrder(mo);
413 
414   if (mo != mo_relaxed || fmo != mo_relaxed)
415     s = ctx->metamap.GetOrCreateAndLock(thr, pc, (uptr)a, write_lock);
416 
417   T cc = *c;
418   T pr = func_cas(a, cc, v);
419   bool success = pr == cc;
420   if (!success) {
421     *c = pr;
422     mo = fmo;
423   }
424 
425   if (s) {
426     thr->fast_state.IncrementEpoch();
427     // Can't increment epoch w/o writing to the trace as well.
428     TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
429 
430     if (success && IsAcqRelOrder(mo))
431       AcquireReleaseImpl(thr, pc, &s->clock);
432     else if (success && IsReleaseOrder(mo))
433       ReleaseImpl(thr, pc, &s->clock);
434     else if (IsAcquireOrder(mo))
435       AcquireImpl(thr, pc, &s->clock);
436 
437     if (write_lock)
438       s->mtx.Unlock();
439     else
440       s->mtx.ReadUnlock();
441   }
442 
443   return success;
444 }
445 
446 template<typename T>
447 static T AtomicCAS(ThreadState *thr, uptr pc,
448     volatile T *a, T c, T v, morder mo, morder fmo) {
449   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
450   return c;
451 }
452 
453 #if !SANITIZER_GO
454 static void NoTsanAtomicFence(morder mo) {
455   __sync_synchronize();
456 }
457 
458 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
459   // FIXME(dvyukov): not implemented.
460   __sync_synchronize();
461 }
462 #endif
463 
464 // Interface functions follow.
465 #if !SANITIZER_GO
466 
467 // C/C++
468 
469 static morder convert_morder(morder mo) {
470   if (flags()->force_seq_cst_atomics)
471     return (morder)mo_seq_cst;
472 
473   // Filter out additional memory order flags:
474   // MEMMODEL_SYNC        = 1 << 15
475   // __ATOMIC_HLE_ACQUIRE = 1 << 16
476   // __ATOMIC_HLE_RELEASE = 1 << 17
477   //
478   // HLE is an optimization, and we pretend that elision always fails.
479   // MEMMODEL_SYNC is used when lowering __sync_ atomics,
480   // since we use __sync_ atomics for actual atomic operations,
481   // we can safely ignore it as well. It also subtly affects semantics,
482   // but we don't model the difference.
483   return (morder)(mo & 0x7fff);
484 }
485 
486 #define SCOPED_ATOMIC(func, ...) \
487     ThreadState *const thr = cur_thread(); \
488     if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) { \
489       ProcessPendingSignals(thr); \
490       return NoTsanAtomic##func(__VA_ARGS__); \
491     } \
492     const uptr callpc = (uptr)__builtin_return_address(0); \
493     uptr pc = StackTrace::GetCurrentPc(); \
494     mo = convert_morder(mo); \
495     AtomicStatInc(thr, sizeof(*a), mo, StatAtomic##func); \
496     ScopedAtomic sa(thr, callpc, a, mo, __func__); \
497     return Atomic##func(thr, pc, __VA_ARGS__); \
498 /**/
499 
500 class ScopedAtomic {
501  public:
502   ScopedAtomic(ThreadState *thr, uptr pc, const volatile void *a,
503                morder mo, const char *func)
504       : thr_(thr) {
505     FuncEntry(thr_, pc);
506     DPrintf("#%d: %s(%p, %d)\n", thr_->tid, func, a, mo);
507   }
508   ~ScopedAtomic() {
509     ProcessPendingSignals(thr_);
510     FuncExit(thr_);
511   }
512  private:
513   ThreadState *thr_;
514 };
515 
516 static void AtomicStatInc(ThreadState *thr, uptr size, morder mo, StatType t) {
517   StatInc(thr, StatAtomic);
518   StatInc(thr, t);
519   StatInc(thr, size == 1 ? StatAtomic1
520              : size == 2 ? StatAtomic2
521              : size == 4 ? StatAtomic4
522              : size == 8 ? StatAtomic8
523              :             StatAtomic16);
524   StatInc(thr, mo == mo_relaxed ? StatAtomicRelaxed
525              : mo == mo_consume ? StatAtomicConsume
526              : mo == mo_acquire ? StatAtomicAcquire
527              : mo == mo_release ? StatAtomicRelease
528              : mo == mo_acq_rel ? StatAtomicAcq_Rel
529              :                    StatAtomicSeq_Cst);
530 }
531 
532 extern "C" {
533 SANITIZER_INTERFACE_ATTRIBUTE
534 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
535   SCOPED_ATOMIC(Load, a, mo);
536 }
537 
538 SANITIZER_INTERFACE_ATTRIBUTE
539 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
540   SCOPED_ATOMIC(Load, a, mo);
541 }
542 
543 SANITIZER_INTERFACE_ATTRIBUTE
544 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
545   SCOPED_ATOMIC(Load, a, mo);
546 }
547 
548 SANITIZER_INTERFACE_ATTRIBUTE
549 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
550   SCOPED_ATOMIC(Load, a, mo);
551 }
552 
553 #if __TSAN_HAS_INT128
554 SANITIZER_INTERFACE_ATTRIBUTE
555 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
556   SCOPED_ATOMIC(Load, a, mo);
557 }
558 #endif
559 
560 SANITIZER_INTERFACE_ATTRIBUTE
561 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
562   SCOPED_ATOMIC(Store, a, v, mo);
563 }
564 
565 SANITIZER_INTERFACE_ATTRIBUTE
566 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
567   SCOPED_ATOMIC(Store, a, v, mo);
568 }
569 
570 SANITIZER_INTERFACE_ATTRIBUTE
571 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
572   SCOPED_ATOMIC(Store, a, v, mo);
573 }
574 
575 SANITIZER_INTERFACE_ATTRIBUTE
576 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
577   SCOPED_ATOMIC(Store, a, v, mo);
578 }
579 
580 #if __TSAN_HAS_INT128
581 SANITIZER_INTERFACE_ATTRIBUTE
582 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
583   SCOPED_ATOMIC(Store, a, v, mo);
584 }
585 #endif
586 
587 SANITIZER_INTERFACE_ATTRIBUTE
588 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
589   SCOPED_ATOMIC(Exchange, a, v, mo);
590 }
591 
592 SANITIZER_INTERFACE_ATTRIBUTE
593 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
594   SCOPED_ATOMIC(Exchange, a, v, mo);
595 }
596 
597 SANITIZER_INTERFACE_ATTRIBUTE
598 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
599   SCOPED_ATOMIC(Exchange, a, v, mo);
600 }
601 
602 SANITIZER_INTERFACE_ATTRIBUTE
603 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
604   SCOPED_ATOMIC(Exchange, a, v, mo);
605 }
606 
607 #if __TSAN_HAS_INT128
608 SANITIZER_INTERFACE_ATTRIBUTE
609 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
610   SCOPED_ATOMIC(Exchange, a, v, mo);
611 }
612 #endif
613 
614 SANITIZER_INTERFACE_ATTRIBUTE
615 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
616   SCOPED_ATOMIC(FetchAdd, a, v, mo);
617 }
618 
619 SANITIZER_INTERFACE_ATTRIBUTE
620 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
621   SCOPED_ATOMIC(FetchAdd, a, v, mo);
622 }
623 
624 SANITIZER_INTERFACE_ATTRIBUTE
625 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
626   SCOPED_ATOMIC(FetchAdd, a, v, mo);
627 }
628 
629 SANITIZER_INTERFACE_ATTRIBUTE
630 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
631   SCOPED_ATOMIC(FetchAdd, a, v, mo);
632 }
633 
634 #if __TSAN_HAS_INT128
635 SANITIZER_INTERFACE_ATTRIBUTE
636 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
637   SCOPED_ATOMIC(FetchAdd, a, v, mo);
638 }
639 #endif
640 
641 SANITIZER_INTERFACE_ATTRIBUTE
642 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
643   SCOPED_ATOMIC(FetchSub, a, v, mo);
644 }
645 
646 SANITIZER_INTERFACE_ATTRIBUTE
647 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
648   SCOPED_ATOMIC(FetchSub, a, v, mo);
649 }
650 
651 SANITIZER_INTERFACE_ATTRIBUTE
652 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
653   SCOPED_ATOMIC(FetchSub, a, v, mo);
654 }
655 
656 SANITIZER_INTERFACE_ATTRIBUTE
657 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
658   SCOPED_ATOMIC(FetchSub, a, v, mo);
659 }
660 
661 #if __TSAN_HAS_INT128
662 SANITIZER_INTERFACE_ATTRIBUTE
663 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
664   SCOPED_ATOMIC(FetchSub, a, v, mo);
665 }
666 #endif
667 
668 SANITIZER_INTERFACE_ATTRIBUTE
669 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
670   SCOPED_ATOMIC(FetchAnd, a, v, mo);
671 }
672 
673 SANITIZER_INTERFACE_ATTRIBUTE
674 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
675   SCOPED_ATOMIC(FetchAnd, a, v, mo);
676 }
677 
678 SANITIZER_INTERFACE_ATTRIBUTE
679 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
680   SCOPED_ATOMIC(FetchAnd, a, v, mo);
681 }
682 
683 SANITIZER_INTERFACE_ATTRIBUTE
684 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
685   SCOPED_ATOMIC(FetchAnd, a, v, mo);
686 }
687 
688 #if __TSAN_HAS_INT128
689 SANITIZER_INTERFACE_ATTRIBUTE
690 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
691   SCOPED_ATOMIC(FetchAnd, a, v, mo);
692 }
693 #endif
694 
695 SANITIZER_INTERFACE_ATTRIBUTE
696 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
697   SCOPED_ATOMIC(FetchOr, a, v, mo);
698 }
699 
700 SANITIZER_INTERFACE_ATTRIBUTE
701 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
702   SCOPED_ATOMIC(FetchOr, a, v, mo);
703 }
704 
705 SANITIZER_INTERFACE_ATTRIBUTE
706 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
707   SCOPED_ATOMIC(FetchOr, a, v, mo);
708 }
709 
710 SANITIZER_INTERFACE_ATTRIBUTE
711 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
712   SCOPED_ATOMIC(FetchOr, a, v, mo);
713 }
714 
715 #if __TSAN_HAS_INT128
716 SANITIZER_INTERFACE_ATTRIBUTE
717 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
718   SCOPED_ATOMIC(FetchOr, a, v, mo);
719 }
720 #endif
721 
722 SANITIZER_INTERFACE_ATTRIBUTE
723 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
724   SCOPED_ATOMIC(FetchXor, a, v, mo);
725 }
726 
727 SANITIZER_INTERFACE_ATTRIBUTE
728 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
729   SCOPED_ATOMIC(FetchXor, a, v, mo);
730 }
731 
732 SANITIZER_INTERFACE_ATTRIBUTE
733 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
734   SCOPED_ATOMIC(FetchXor, a, v, mo);
735 }
736 
737 SANITIZER_INTERFACE_ATTRIBUTE
738 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
739   SCOPED_ATOMIC(FetchXor, a, v, mo);
740 }
741 
742 #if __TSAN_HAS_INT128
743 SANITIZER_INTERFACE_ATTRIBUTE
744 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
745   SCOPED_ATOMIC(FetchXor, a, v, mo);
746 }
747 #endif
748 
749 SANITIZER_INTERFACE_ATTRIBUTE
750 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
751   SCOPED_ATOMIC(FetchNand, a, v, mo);
752 }
753 
754 SANITIZER_INTERFACE_ATTRIBUTE
755 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
756   SCOPED_ATOMIC(FetchNand, a, v, mo);
757 }
758 
759 SANITIZER_INTERFACE_ATTRIBUTE
760 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
761   SCOPED_ATOMIC(FetchNand, a, v, mo);
762 }
763 
764 SANITIZER_INTERFACE_ATTRIBUTE
765 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
766   SCOPED_ATOMIC(FetchNand, a, v, mo);
767 }
768 
769 #if __TSAN_HAS_INT128
770 SANITIZER_INTERFACE_ATTRIBUTE
771 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
772   SCOPED_ATOMIC(FetchNand, a, v, mo);
773 }
774 #endif
775 
776 SANITIZER_INTERFACE_ATTRIBUTE
777 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
778     morder mo, morder fmo) {
779   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
780 }
781 
782 SANITIZER_INTERFACE_ATTRIBUTE
783 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
784     morder mo, morder fmo) {
785   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
786 }
787 
788 SANITIZER_INTERFACE_ATTRIBUTE
789 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
790     morder mo, morder fmo) {
791   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
792 }
793 
794 SANITIZER_INTERFACE_ATTRIBUTE
795 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
796     morder mo, morder fmo) {
797   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
798 }
799 
800 #if __TSAN_HAS_INT128
801 SANITIZER_INTERFACE_ATTRIBUTE
802 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
803     morder mo, morder fmo) {
804   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
805 }
806 #endif
807 
808 SANITIZER_INTERFACE_ATTRIBUTE
809 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
810     morder mo, morder fmo) {
811   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
812 }
813 
814 SANITIZER_INTERFACE_ATTRIBUTE
815 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
816     morder mo, morder fmo) {
817   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
818 }
819 
820 SANITIZER_INTERFACE_ATTRIBUTE
821 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
822     morder mo, morder fmo) {
823   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
824 }
825 
826 SANITIZER_INTERFACE_ATTRIBUTE
827 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
828     morder mo, morder fmo) {
829   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
830 }
831 
832 #if __TSAN_HAS_INT128
833 SANITIZER_INTERFACE_ATTRIBUTE
834 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
835     morder mo, morder fmo) {
836   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
837 }
838 #endif
839 
840 SANITIZER_INTERFACE_ATTRIBUTE
841 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
842     morder mo, morder fmo) {
843   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
844 }
845 
846 SANITIZER_INTERFACE_ATTRIBUTE
847 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
848     morder mo, morder fmo) {
849   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
850 }
851 
852 SANITIZER_INTERFACE_ATTRIBUTE
853 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
854     morder mo, morder fmo) {
855   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
856 }
857 
858 SANITIZER_INTERFACE_ATTRIBUTE
859 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
860     morder mo, morder fmo) {
861   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
862 }
863 
864 #if __TSAN_HAS_INT128
865 SANITIZER_INTERFACE_ATTRIBUTE
866 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
867     morder mo, morder fmo) {
868   SCOPED_ATOMIC(CAS, a, c, v, mo, fmo);
869 }
870 #endif
871 
872 SANITIZER_INTERFACE_ATTRIBUTE
873 void __tsan_atomic_thread_fence(morder mo) {
874   char* a = 0;
875   SCOPED_ATOMIC(Fence, mo);
876 }
877 
878 SANITIZER_INTERFACE_ATTRIBUTE
879 void __tsan_atomic_signal_fence(morder mo) {
880 }
881 }  // extern "C"
882 
883 #else  // #if !SANITIZER_GO
884 
885 // Go
886 
887 #define ATOMIC(func, ...) \
888     if (thr->ignore_sync) { \
889       NoTsanAtomic##func(__VA_ARGS__); \
890     } else { \
891       FuncEntry(thr, cpc); \
892       Atomic##func(thr, pc, __VA_ARGS__); \
893       FuncExit(thr); \
894     } \
895 /**/
896 
897 #define ATOMIC_RET(func, ret, ...) \
898     if (thr->ignore_sync) { \
899       (ret) = NoTsanAtomic##func(__VA_ARGS__); \
900     } else { \
901       FuncEntry(thr, cpc); \
902       (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
903       FuncExit(thr); \
904     } \
905 /**/
906 
907 extern "C" {
908 SANITIZER_INTERFACE_ATTRIBUTE
909 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
910   ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
911 }
912 
913 SANITIZER_INTERFACE_ATTRIBUTE
914 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
915   ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
916 }
917 
918 SANITIZER_INTERFACE_ATTRIBUTE
919 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
920   ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
921 }
922 
923 SANITIZER_INTERFACE_ATTRIBUTE
924 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
925   ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
926 }
927 
928 SANITIZER_INTERFACE_ATTRIBUTE
929 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
930   ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
931 }
932 
933 SANITIZER_INTERFACE_ATTRIBUTE
934 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
935   ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
936 }
937 
938 SANITIZER_INTERFACE_ATTRIBUTE
939 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
940   ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
941 }
942 
943 SANITIZER_INTERFACE_ATTRIBUTE
944 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
945   ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
946 }
947 
948 SANITIZER_INTERFACE_ATTRIBUTE
949 void __tsan_go_atomic32_compare_exchange(
950     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
951   a32 cur = 0;
952   a32 cmp = *(a32*)(a+8);
953   ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
954   *(bool*)(a+16) = (cur == cmp);
955 }
956 
957 SANITIZER_INTERFACE_ATTRIBUTE
958 void __tsan_go_atomic64_compare_exchange(
959     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
960   a64 cur = 0;
961   a64 cmp = *(a64*)(a+8);
962   ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
963   *(bool*)(a+24) = (cur == cmp);
964 }
965 }  // extern "C"
966 #endif  // #if !SANITIZER_GO
967