15a3bb1a4SNico Weber //===-- tsan_interface_atomic.cpp -----------------------------------------===//
25a3bb1a4SNico Weber //
35a3bb1a4SNico Weber // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
45a3bb1a4SNico Weber // See https://llvm.org/LICENSE.txt for license information.
55a3bb1a4SNico Weber // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
65a3bb1a4SNico Weber //
75a3bb1a4SNico Weber //===----------------------------------------------------------------------===//
85a3bb1a4SNico Weber //
95a3bb1a4SNico Weber // This file is a part of ThreadSanitizer (TSan), a race detector.
105a3bb1a4SNico Weber //
115a3bb1a4SNico Weber //===----------------------------------------------------------------------===//
125a3bb1a4SNico Weber
135a3bb1a4SNico Weber // ThreadSanitizer atomic operations are based on C++11/C1x standards.
145a3bb1a4SNico Weber // For background see C++11 standard. A slightly older, publicly
155a3bb1a4SNico Weber // available draft of the standard (not entirely up-to-date, but close enough
165a3bb1a4SNico Weber // for casual browsing) is available here:
175a3bb1a4SNico Weber // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
185a3bb1a4SNico Weber // The following page contains more background information:
195a3bb1a4SNico Weber // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
205a3bb1a4SNico Weber
215a3bb1a4SNico Weber #include "sanitizer_common/sanitizer_placement_new.h"
225a3bb1a4SNico Weber #include "sanitizer_common/sanitizer_stacktrace.h"
235a3bb1a4SNico Weber #include "sanitizer_common/sanitizer_mutex.h"
245a3bb1a4SNico Weber #include "tsan_flags.h"
255a3bb1a4SNico Weber #include "tsan_interface.h"
265a3bb1a4SNico Weber #include "tsan_rtl.h"
275a3bb1a4SNico Weber
28c0fa6322SVitaly Buka using namespace __tsan;
295a3bb1a4SNico Weber
305a3bb1a4SNico Weber #if !SANITIZER_GO && __TSAN_HAS_INT128
315a3bb1a4SNico Weber // Protects emulation of 128-bit atomic operations.
325a3bb1a4SNico Weber static StaticSpinMutex mutex128;
335a3bb1a4SNico Weber #endif
345a3bb1a4SNico Weber
3514e306faSDmitry Vyukov #if SANITIZER_DEBUG
IsLoadOrder(morder mo)365a3bb1a4SNico Weber static bool IsLoadOrder(morder mo) {
375a3bb1a4SNico Weber return mo == mo_relaxed || mo == mo_consume
385a3bb1a4SNico Weber || mo == mo_acquire || mo == mo_seq_cst;
395a3bb1a4SNico Weber }
405a3bb1a4SNico Weber
IsStoreOrder(morder mo)415a3bb1a4SNico Weber static bool IsStoreOrder(morder mo) {
425a3bb1a4SNico Weber return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
435a3bb1a4SNico Weber }
4414e306faSDmitry Vyukov #endif
455a3bb1a4SNico Weber
IsReleaseOrder(morder mo)465a3bb1a4SNico Weber static bool IsReleaseOrder(morder mo) {
475a3bb1a4SNico Weber return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
485a3bb1a4SNico Weber }
495a3bb1a4SNico Weber
IsAcquireOrder(morder mo)505a3bb1a4SNico Weber static bool IsAcquireOrder(morder mo) {
515a3bb1a4SNico Weber return mo == mo_consume || mo == mo_acquire
525a3bb1a4SNico Weber || mo == mo_acq_rel || mo == mo_seq_cst;
535a3bb1a4SNico Weber }
545a3bb1a4SNico Weber
IsAcqRelOrder(morder mo)555a3bb1a4SNico Weber static bool IsAcqRelOrder(morder mo) {
565a3bb1a4SNico Weber return mo == mo_acq_rel || mo == mo_seq_cst;
575a3bb1a4SNico Weber }
585a3bb1a4SNico Weber
func_xchg(volatile T * v,T op)595a3bb1a4SNico Weber template<typename T> T func_xchg(volatile T *v, T op) {
605a3bb1a4SNico Weber T res = __sync_lock_test_and_set(v, op);
615a3bb1a4SNico Weber // __sync_lock_test_and_set does not contain full barrier.
625a3bb1a4SNico Weber __sync_synchronize();
635a3bb1a4SNico Weber return res;
645a3bb1a4SNico Weber }
655a3bb1a4SNico Weber
func_add(volatile T * v,T op)665a3bb1a4SNico Weber template<typename T> T func_add(volatile T *v, T op) {
675a3bb1a4SNico Weber return __sync_fetch_and_add(v, op);
685a3bb1a4SNico Weber }
695a3bb1a4SNico Weber
func_sub(volatile T * v,T op)705a3bb1a4SNico Weber template<typename T> T func_sub(volatile T *v, T op) {
715a3bb1a4SNico Weber return __sync_fetch_and_sub(v, op);
725a3bb1a4SNico Weber }
735a3bb1a4SNico Weber
func_and(volatile T * v,T op)745a3bb1a4SNico Weber template<typename T> T func_and(volatile T *v, T op) {
755a3bb1a4SNico Weber return __sync_fetch_and_and(v, op);
765a3bb1a4SNico Weber }
775a3bb1a4SNico Weber
func_or(volatile T * v,T op)785a3bb1a4SNico Weber template<typename T> T func_or(volatile T *v, T op) {
795a3bb1a4SNico Weber return __sync_fetch_and_or(v, op);
805a3bb1a4SNico Weber }
815a3bb1a4SNico Weber
func_xor(volatile T * v,T op)825a3bb1a4SNico Weber template<typename T> T func_xor(volatile T *v, T op) {
835a3bb1a4SNico Weber return __sync_fetch_and_xor(v, op);
845a3bb1a4SNico Weber }
855a3bb1a4SNico Weber
func_nand(volatile T * v,T op)865a3bb1a4SNico Weber template<typename T> T func_nand(volatile T *v, T op) {
875a3bb1a4SNico Weber // clang does not support __sync_fetch_and_nand.
885a3bb1a4SNico Weber T cmp = *v;
895a3bb1a4SNico Weber for (;;) {
905a3bb1a4SNico Weber T newv = ~(cmp & op);
915a3bb1a4SNico Weber T cur = __sync_val_compare_and_swap(v, cmp, newv);
925a3bb1a4SNico Weber if (cmp == cur)
935a3bb1a4SNico Weber return cmp;
945a3bb1a4SNico Weber cmp = cur;
955a3bb1a4SNico Weber }
965a3bb1a4SNico Weber }
975a3bb1a4SNico Weber
func_cas(volatile T * v,T cmp,T xch)985a3bb1a4SNico Weber template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
995a3bb1a4SNico Weber return __sync_val_compare_and_swap(v, cmp, xch);
1005a3bb1a4SNico Weber }
1015a3bb1a4SNico Weber
1025a3bb1a4SNico Weber // clang does not support 128-bit atomic ops.
1035a3bb1a4SNico Weber // Atomic ops are executed under tsan internal mutex,
1045a3bb1a4SNico Weber // here we assume that the atomic variables are not accessed
1055a3bb1a4SNico Weber // from non-instrumented code.
1065a3bb1a4SNico Weber #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
1075a3bb1a4SNico Weber && __TSAN_HAS_INT128
func_xchg(volatile a128 * v,a128 op)1085a3bb1a4SNico Weber a128 func_xchg(volatile a128 *v, a128 op) {
1095a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1105a3bb1a4SNico Weber a128 cmp = *v;
1115a3bb1a4SNico Weber *v = op;
1125a3bb1a4SNico Weber return cmp;
1135a3bb1a4SNico Weber }
1145a3bb1a4SNico Weber
func_add(volatile a128 * v,a128 op)1155a3bb1a4SNico Weber a128 func_add(volatile a128 *v, a128 op) {
1165a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1175a3bb1a4SNico Weber a128 cmp = *v;
1185a3bb1a4SNico Weber *v = cmp + op;
1195a3bb1a4SNico Weber return cmp;
1205a3bb1a4SNico Weber }
1215a3bb1a4SNico Weber
func_sub(volatile a128 * v,a128 op)1225a3bb1a4SNico Weber a128 func_sub(volatile a128 *v, a128 op) {
1235a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1245a3bb1a4SNico Weber a128 cmp = *v;
1255a3bb1a4SNico Weber *v = cmp - op;
1265a3bb1a4SNico Weber return cmp;
1275a3bb1a4SNico Weber }
1285a3bb1a4SNico Weber
func_and(volatile a128 * v,a128 op)1295a3bb1a4SNico Weber a128 func_and(volatile a128 *v, a128 op) {
1305a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1315a3bb1a4SNico Weber a128 cmp = *v;
1325a3bb1a4SNico Weber *v = cmp & op;
1335a3bb1a4SNico Weber return cmp;
1345a3bb1a4SNico Weber }
1355a3bb1a4SNico Weber
func_or(volatile a128 * v,a128 op)1365a3bb1a4SNico Weber a128 func_or(volatile a128 *v, a128 op) {
1375a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1385a3bb1a4SNico Weber a128 cmp = *v;
1395a3bb1a4SNico Weber *v = cmp | op;
1405a3bb1a4SNico Weber return cmp;
1415a3bb1a4SNico Weber }
1425a3bb1a4SNico Weber
func_xor(volatile a128 * v,a128 op)1435a3bb1a4SNico Weber a128 func_xor(volatile a128 *v, a128 op) {
1445a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1455a3bb1a4SNico Weber a128 cmp = *v;
1465a3bb1a4SNico Weber *v = cmp ^ op;
1475a3bb1a4SNico Weber return cmp;
1485a3bb1a4SNico Weber }
1495a3bb1a4SNico Weber
func_nand(volatile a128 * v,a128 op)1505a3bb1a4SNico Weber a128 func_nand(volatile a128 *v, a128 op) {
1515a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1525a3bb1a4SNico Weber a128 cmp = *v;
1535a3bb1a4SNico Weber *v = ~(cmp & op);
1545a3bb1a4SNico Weber return cmp;
1555a3bb1a4SNico Weber }
1565a3bb1a4SNico Weber
func_cas(volatile a128 * v,a128 cmp,a128 xch)1575a3bb1a4SNico Weber a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
1585a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
1595a3bb1a4SNico Weber a128 cur = *v;
1605a3bb1a4SNico Weber if (cur == cmp)
1615a3bb1a4SNico Weber *v = xch;
1625a3bb1a4SNico Weber return cur;
1635a3bb1a4SNico Weber }
1645a3bb1a4SNico Weber #endif
1655a3bb1a4SNico Weber
1665a3bb1a4SNico Weber template <typename T>
AccessSize()167831910c5SDmitry Vyukov static int AccessSize() {
1685a3bb1a4SNico Weber if (sizeof(T) <= 1)
169831910c5SDmitry Vyukov return 1;
1705a3bb1a4SNico Weber else if (sizeof(T) <= 2)
171831910c5SDmitry Vyukov return 2;
1725a3bb1a4SNico Weber else if (sizeof(T) <= 4)
173831910c5SDmitry Vyukov return 4;
1745a3bb1a4SNico Weber else
175831910c5SDmitry Vyukov return 8;
1765a3bb1a4SNico Weber // For 16-byte atomics we also use 8-byte memory access,
1775a3bb1a4SNico Weber // this leads to false negatives only in very obscure cases.
1785a3bb1a4SNico Weber }
1795a3bb1a4SNico Weber
1805a3bb1a4SNico Weber #if !SANITIZER_GO
to_atomic(const volatile a8 * a)1815a3bb1a4SNico Weber static atomic_uint8_t *to_atomic(const volatile a8 *a) {
1825a3bb1a4SNico Weber return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
1835a3bb1a4SNico Weber }
1845a3bb1a4SNico Weber
to_atomic(const volatile a16 * a)1855a3bb1a4SNico Weber static atomic_uint16_t *to_atomic(const volatile a16 *a) {
1865a3bb1a4SNico Weber return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
1875a3bb1a4SNico Weber }
1885a3bb1a4SNico Weber #endif
1895a3bb1a4SNico Weber
to_atomic(const volatile a32 * a)1905a3bb1a4SNico Weber static atomic_uint32_t *to_atomic(const volatile a32 *a) {
1915a3bb1a4SNico Weber return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
1925a3bb1a4SNico Weber }
1935a3bb1a4SNico Weber
to_atomic(const volatile a64 * a)1945a3bb1a4SNico Weber static atomic_uint64_t *to_atomic(const volatile a64 *a) {
1955a3bb1a4SNico Weber return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
1965a3bb1a4SNico Weber }
1975a3bb1a4SNico Weber
to_mo(morder mo)1985a3bb1a4SNico Weber static memory_order to_mo(morder mo) {
1995a3bb1a4SNico Weber switch (mo) {
2005a3bb1a4SNico Weber case mo_relaxed: return memory_order_relaxed;
2015a3bb1a4SNico Weber case mo_consume: return memory_order_consume;
2025a3bb1a4SNico Weber case mo_acquire: return memory_order_acquire;
2035a3bb1a4SNico Weber case mo_release: return memory_order_release;
2045a3bb1a4SNico Weber case mo_acq_rel: return memory_order_acq_rel;
2055a3bb1a4SNico Weber case mo_seq_cst: return memory_order_seq_cst;
2065a3bb1a4SNico Weber }
20714e306faSDmitry Vyukov DCHECK(0);
2085a3bb1a4SNico Weber return memory_order_seq_cst;
2095a3bb1a4SNico Weber }
2105a3bb1a4SNico Weber
2115a3bb1a4SNico Weber template<typename T>
NoTsanAtomicLoad(const volatile T * a,morder mo)2125a3bb1a4SNico Weber static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
2135a3bb1a4SNico Weber return atomic_load(to_atomic(a), to_mo(mo));
2145a3bb1a4SNico Weber }
2155a3bb1a4SNico Weber
2165a3bb1a4SNico Weber #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicLoad(const volatile a128 * a,morder mo)2175a3bb1a4SNico Weber static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
2185a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
2195a3bb1a4SNico Weber return *a;
2205a3bb1a4SNico Weber }
2215a3bb1a4SNico Weber #endif
2225a3bb1a4SNico Weber
2235a3bb1a4SNico Weber template <typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)2249e3e97aaSDmitry Vyukov static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
22514e306faSDmitry Vyukov DCHECK(IsLoadOrder(mo));
2265a3bb1a4SNico Weber // This fast-path is critical for performance.
2275a3bb1a4SNico Weber // Assume the access is atomic.
2285a3bb1a4SNico Weber if (!IsAcquireOrder(mo)) {
229831910c5SDmitry Vyukov MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
230831910c5SDmitry Vyukov kAccessRead | kAccessAtomic);
2315a3bb1a4SNico Weber return NoTsanAtomicLoad(a, mo);
2325a3bb1a4SNico Weber }
2335a3bb1a4SNico Weber // Don't create sync object if it does not exist yet. For example, an atomic
2345a3bb1a4SNico Weber // pointer is initialized to nullptr and then periodically acquire-loaded.
2355a3bb1a4SNico Weber T v = NoTsanAtomicLoad(a, mo);
2369e3e97aaSDmitry Vyukov SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
2375a3bb1a4SNico Weber if (s) {
238b3321349SDmitry Vyukov SlotLocker locker(thr);
239b3321349SDmitry Vyukov ReadLock lock(&s->mtx);
240b3321349SDmitry Vyukov thr->clock.Acquire(s->clock);
2415a3bb1a4SNico Weber // Re-read under sync mutex because we need a consistent snapshot
2425a3bb1a4SNico Weber // of the value and the clock we acquire.
2435a3bb1a4SNico Weber v = NoTsanAtomicLoad(a, mo);
2445a3bb1a4SNico Weber }
245831910c5SDmitry Vyukov MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
2465a3bb1a4SNico Weber return v;
2475a3bb1a4SNico Weber }
2485a3bb1a4SNico Weber
2495a3bb1a4SNico Weber template<typename T>
NoTsanAtomicStore(volatile T * a,T v,morder mo)2505a3bb1a4SNico Weber static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
2515a3bb1a4SNico Weber atomic_store(to_atomic(a), v, to_mo(mo));
2525a3bb1a4SNico Weber }
2535a3bb1a4SNico Weber
2545a3bb1a4SNico Weber #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicStore(volatile a128 * a,a128 v,morder mo)2555a3bb1a4SNico Weber static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
2565a3bb1a4SNico Weber SpinMutexLock lock(&mutex128);
2575a3bb1a4SNico Weber *a = v;
2585a3bb1a4SNico Weber }
2595a3bb1a4SNico Weber #endif
2605a3bb1a4SNico Weber
2615a3bb1a4SNico Weber template <typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)2625a3bb1a4SNico Weber static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
2639e3e97aaSDmitry Vyukov morder mo) {
26414e306faSDmitry Vyukov DCHECK(IsStoreOrder(mo));
265831910c5SDmitry Vyukov MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
2665a3bb1a4SNico Weber // This fast-path is critical for performance.
2675a3bb1a4SNico Weber // Assume the access is atomic.
2685a3bb1a4SNico Weber // Strictly saying even relaxed store cuts off release sequence,
2695a3bb1a4SNico Weber // so must reset the clock.
2705a3bb1a4SNico Weber if (!IsReleaseOrder(mo)) {
2715a3bb1a4SNico Weber NoTsanAtomicStore(a, v, mo);
2725a3bb1a4SNico Weber return;
2735a3bb1a4SNico Weber }
274b3321349SDmitry Vyukov SlotLocker locker(thr);
275b3321349SDmitry Vyukov {
276b3321349SDmitry Vyukov auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
277b3321349SDmitry Vyukov Lock lock(&s->mtx);
278b3321349SDmitry Vyukov thr->clock.ReleaseStore(&s->clock);
2795a3bb1a4SNico Weber NoTsanAtomicStore(a, v, mo);
2805a3bb1a4SNico Weber }
281b3321349SDmitry Vyukov IncrementEpoch(thr);
282b3321349SDmitry Vyukov }
2835a3bb1a4SNico Weber
2845a3bb1a4SNico Weber template <typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)2859e3e97aaSDmitry Vyukov static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
286831910c5SDmitry Vyukov MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
2879e3e97aaSDmitry Vyukov if (LIKELY(mo == mo_relaxed))
2889e3e97aaSDmitry Vyukov return F(a, v);
289b3321349SDmitry Vyukov SlotLocker locker(thr);
290b3321349SDmitry Vyukov {
291b3321349SDmitry Vyukov auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
292b3321349SDmitry Vyukov RWLock lock(&s->mtx, IsReleaseOrder(mo));
2935a3bb1a4SNico Weber if (IsAcqRelOrder(mo))
294b3321349SDmitry Vyukov thr->clock.ReleaseAcquire(&s->clock);
2955a3bb1a4SNico Weber else if (IsReleaseOrder(mo))
296b3321349SDmitry Vyukov thr->clock.Release(&s->clock);
2975a3bb1a4SNico Weber else if (IsAcquireOrder(mo))
298b3321349SDmitry Vyukov thr->clock.Acquire(s->clock);
299b3321349SDmitry Vyukov v = F(a, v);
300b3321349SDmitry Vyukov }
301b3321349SDmitry Vyukov if (IsReleaseOrder(mo))
302b3321349SDmitry Vyukov IncrementEpoch(thr);
303b3321349SDmitry Vyukov return v;
3045a3bb1a4SNico Weber }
3055a3bb1a4SNico Weber
3065a3bb1a4SNico Weber template<typename T>
NoTsanAtomicExchange(volatile T * a,T v,morder mo)3075a3bb1a4SNico Weber static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
3085a3bb1a4SNico Weber return func_xchg(a, v);
3095a3bb1a4SNico Weber }
3105a3bb1a4SNico Weber
3115a3bb1a4SNico Weber template<typename T>
NoTsanAtomicFetchAdd(volatile T * a,T v,morder mo)3125a3bb1a4SNico Weber static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
3135a3bb1a4SNico Weber return func_add(a, v);
3145a3bb1a4SNico Weber }
3155a3bb1a4SNico Weber
3165a3bb1a4SNico Weber template<typename T>
NoTsanAtomicFetchSub(volatile T * a,T v,morder mo)3175a3bb1a4SNico Weber static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
3185a3bb1a4SNico Weber return func_sub(a, v);
3195a3bb1a4SNico Weber }
3205a3bb1a4SNico Weber
3215a3bb1a4SNico Weber template<typename T>
NoTsanAtomicFetchAnd(volatile T * a,T v,morder mo)3225a3bb1a4SNico Weber static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
3235a3bb1a4SNico Weber return func_and(a, v);
3245a3bb1a4SNico Weber }
3255a3bb1a4SNico Weber
3265a3bb1a4SNico Weber template<typename T>
NoTsanAtomicFetchOr(volatile T * a,T v,morder mo)3275a3bb1a4SNico Weber static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
3285a3bb1a4SNico Weber return func_or(a, v);
3295a3bb1a4SNico Weber }
3305a3bb1a4SNico Weber
3315a3bb1a4SNico Weber template<typename T>
NoTsanAtomicFetchXor(volatile T * a,T v,morder mo)3325a3bb1a4SNico Weber static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
3335a3bb1a4SNico Weber return func_xor(a, v);
3345a3bb1a4SNico Weber }
3355a3bb1a4SNico Weber
3365a3bb1a4SNico Weber template<typename T>
NoTsanAtomicFetchNand(volatile T * a,T v,morder mo)3375a3bb1a4SNico Weber static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
3385a3bb1a4SNico Weber return func_nand(a, v);
3395a3bb1a4SNico Weber }
3405a3bb1a4SNico Weber
3415a3bb1a4SNico Weber template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3425a3bb1a4SNico Weber static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
3435a3bb1a4SNico Weber morder mo) {
3445a3bb1a4SNico Weber return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
3455a3bb1a4SNico Weber }
3465a3bb1a4SNico Weber
3475a3bb1a4SNico Weber template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3485a3bb1a4SNico Weber static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
3495a3bb1a4SNico Weber morder mo) {
3505a3bb1a4SNico Weber return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
3515a3bb1a4SNico Weber }
3525a3bb1a4SNico Weber
3535a3bb1a4SNico Weber template<typename T>
AtomicFetchSub(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3545a3bb1a4SNico Weber static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
3555a3bb1a4SNico Weber morder mo) {
3565a3bb1a4SNico Weber return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
3575a3bb1a4SNico Weber }
3585a3bb1a4SNico Weber
3595a3bb1a4SNico Weber template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3605a3bb1a4SNico Weber static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
3615a3bb1a4SNico Weber morder mo) {
3625a3bb1a4SNico Weber return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
3635a3bb1a4SNico Weber }
3645a3bb1a4SNico Weber
3655a3bb1a4SNico Weber template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3665a3bb1a4SNico Weber static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
3675a3bb1a4SNico Weber morder mo) {
3685a3bb1a4SNico Weber return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
3695a3bb1a4SNico Weber }
3705a3bb1a4SNico Weber
3715a3bb1a4SNico Weber template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3725a3bb1a4SNico Weber static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
3735a3bb1a4SNico Weber morder mo) {
3745a3bb1a4SNico Weber return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
3755a3bb1a4SNico Weber }
3765a3bb1a4SNico Weber
3775a3bb1a4SNico Weber template<typename T>
AtomicFetchNand(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3785a3bb1a4SNico Weber static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
3795a3bb1a4SNico Weber morder mo) {
3805a3bb1a4SNico Weber return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
3815a3bb1a4SNico Weber }
3825a3bb1a4SNico Weber
3835a3bb1a4SNico Weber template<typename T>
NoTsanAtomicCAS(volatile T * a,T * c,T v,morder mo,morder fmo)3845a3bb1a4SNico Weber static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
3855a3bb1a4SNico Weber return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
3865a3bb1a4SNico Weber }
3875a3bb1a4SNico Weber
3885a3bb1a4SNico Weber #if __TSAN_HAS_INT128
NoTsanAtomicCAS(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)3895a3bb1a4SNico Weber static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
3905a3bb1a4SNico Weber morder mo, morder fmo) {
3915a3bb1a4SNico Weber a128 old = *c;
3925a3bb1a4SNico Weber a128 cur = func_cas(a, old, v);
3935a3bb1a4SNico Weber if (cur == old)
3945a3bb1a4SNico Weber return true;
3955a3bb1a4SNico Weber *c = cur;
3965a3bb1a4SNico Weber return false;
3975a3bb1a4SNico Weber }
3985a3bb1a4SNico Weber #endif
3995a3bb1a4SNico Weber
4005a3bb1a4SNico Weber template<typename T>
NoTsanAtomicCAS(volatile T * a,T c,T v,morder mo,morder fmo)4015a3bb1a4SNico Weber static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
4025a3bb1a4SNico Weber NoTsanAtomicCAS(a, &c, v, mo, fmo);
4035a3bb1a4SNico Weber return c;
4045a3bb1a4SNico Weber }
4055a3bb1a4SNico Weber
4065a3bb1a4SNico Weber template <typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo,morder fmo)4079e3e97aaSDmitry Vyukov static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
4089e3e97aaSDmitry Vyukov morder mo, morder fmo) {
409fd184c06SBruno Cardoso Lopes // 31.7.2.18: "The failure argument shall not be memory_order_release
410fd184c06SBruno Cardoso Lopes // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
411fd184c06SBruno Cardoso Lopes // (mo_relaxed) when those are used.
41214e306faSDmitry Vyukov DCHECK(IsLoadOrder(fmo));
413fd184c06SBruno Cardoso Lopes
414*eeccdd31SVitaly Buka MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
4159e3e97aaSDmitry Vyukov if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
4169e3e97aaSDmitry Vyukov T cc = *c;
4179e3e97aaSDmitry Vyukov T pr = func_cas(a, cc, v);
418*eeccdd31SVitaly Buka if (pr == cc)
419*eeccdd31SVitaly Buka return true;
4209e3e97aaSDmitry Vyukov *c = pr;
421*eeccdd31SVitaly Buka return false;
4229e3e97aaSDmitry Vyukov }
423b3321349SDmitry Vyukov SlotLocker locker(thr);
4249e3e97aaSDmitry Vyukov bool release = IsReleaseOrder(mo);
425b3321349SDmitry Vyukov bool success;
426b3321349SDmitry Vyukov {
427b3321349SDmitry Vyukov auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
428b3321349SDmitry Vyukov RWLock lock(&s->mtx, release);
429fd184c06SBruno Cardoso Lopes T cc = *c;
430fd184c06SBruno Cardoso Lopes T pr = func_cas(a, cc, v);
431b3321349SDmitry Vyukov success = pr == cc;
432fd184c06SBruno Cardoso Lopes if (!success) {
433fd184c06SBruno Cardoso Lopes *c = pr;
434fd184c06SBruno Cardoso Lopes mo = fmo;
435fd184c06SBruno Cardoso Lopes }
436fd184c06SBruno Cardoso Lopes if (success && IsAcqRelOrder(mo))
437b3321349SDmitry Vyukov thr->clock.ReleaseAcquire(&s->clock);
438fd184c06SBruno Cardoso Lopes else if (success && IsReleaseOrder(mo))
439b3321349SDmitry Vyukov thr->clock.Release(&s->clock);
4405a3bb1a4SNico Weber else if (IsAcquireOrder(mo))
441b3321349SDmitry Vyukov thr->clock.Acquire(s->clock);
442b3321349SDmitry Vyukov }
443b3321349SDmitry Vyukov if (success && release)
444b3321349SDmitry Vyukov IncrementEpoch(thr);
445fd184c06SBruno Cardoso Lopes return success;
4465a3bb1a4SNico Weber }
4475a3bb1a4SNico Weber
4485a3bb1a4SNico Weber template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T c,T v,morder mo,morder fmo)4495a3bb1a4SNico Weber static T AtomicCAS(ThreadState *thr, uptr pc,
4505a3bb1a4SNico Weber volatile T *a, T c, T v, morder mo, morder fmo) {
4515a3bb1a4SNico Weber AtomicCAS(thr, pc, a, &c, v, mo, fmo);
4525a3bb1a4SNico Weber return c;
4535a3bb1a4SNico Weber }
4545a3bb1a4SNico Weber
4555a3bb1a4SNico Weber #if !SANITIZER_GO
NoTsanAtomicFence(morder mo)4565a3bb1a4SNico Weber static void NoTsanAtomicFence(morder mo) {
4575a3bb1a4SNico Weber __sync_synchronize();
4585a3bb1a4SNico Weber }
4595a3bb1a4SNico Weber
AtomicFence(ThreadState * thr,uptr pc,morder mo)4605a3bb1a4SNico Weber static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
4615a3bb1a4SNico Weber // FIXME(dvyukov): not implemented.
4625a3bb1a4SNico Weber __sync_synchronize();
4635a3bb1a4SNico Weber }
4645a3bb1a4SNico Weber #endif
4655a3bb1a4SNico Weber
4665a3bb1a4SNico Weber // Interface functions follow.
4675a3bb1a4SNico Weber #if !SANITIZER_GO
4685a3bb1a4SNico Weber
4695a3bb1a4SNico Weber // C/C++
4705a3bb1a4SNico Weber
convert_morder(morder mo)4715a3bb1a4SNico Weber static morder convert_morder(morder mo) {
4725a3bb1a4SNico Weber if (flags()->force_seq_cst_atomics)
4735a3bb1a4SNico Weber return (morder)mo_seq_cst;
4745a3bb1a4SNico Weber
4755a3bb1a4SNico Weber // Filter out additional memory order flags:
4765a3bb1a4SNico Weber // MEMMODEL_SYNC = 1 << 15
4775a3bb1a4SNico Weber // __ATOMIC_HLE_ACQUIRE = 1 << 16
4785a3bb1a4SNico Weber // __ATOMIC_HLE_RELEASE = 1 << 17
4795a3bb1a4SNico Weber //
4805a3bb1a4SNico Weber // HLE is an optimization, and we pretend that elision always fails.
4815a3bb1a4SNico Weber // MEMMODEL_SYNC is used when lowering __sync_ atomics,
4825a3bb1a4SNico Weber // since we use __sync_ atomics for actual atomic operations,
4835a3bb1a4SNico Weber // we can safely ignore it as well. It also subtly affects semantics,
4845a3bb1a4SNico Weber // but we don't model the difference.
4855a3bb1a4SNico Weber return (morder)(mo & 0x7fff);
4865a3bb1a4SNico Weber }
4875a3bb1a4SNico Weber
488da7a5c09SDmitry Vyukov # define ATOMIC_IMPL(func, ...) \
4895a3bb1a4SNico Weber ThreadState *const thr = cur_thread(); \
4905a3bb1a4SNico Weber ProcessPendingSignals(thr); \
491da7a5c09SDmitry Vyukov if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
4925a3bb1a4SNico Weber return NoTsanAtomic##func(__VA_ARGS__); \
4935a3bb1a4SNico Weber mo = convert_morder(mo); \
494da7a5c09SDmitry Vyukov return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
4955a3bb1a4SNico Weber
4965a3bb1a4SNico Weber extern "C" {
4975a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_load(const volatile a8 * a,morder mo)4985a3bb1a4SNico Weber a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
499da7a5c09SDmitry Vyukov ATOMIC_IMPL(Load, a, mo);
5005a3bb1a4SNico Weber }
5015a3bb1a4SNico Weber
5025a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_load(const volatile a16 * a,morder mo)5035a3bb1a4SNico Weber a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
504da7a5c09SDmitry Vyukov ATOMIC_IMPL(Load, a, mo);
5055a3bb1a4SNico Weber }
5065a3bb1a4SNico Weber
5075a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_load(const volatile a32 * a,morder mo)5085a3bb1a4SNico Weber a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
509da7a5c09SDmitry Vyukov ATOMIC_IMPL(Load, a, mo);
5105a3bb1a4SNico Weber }
5115a3bb1a4SNico Weber
5125a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_load(const volatile a64 * a,morder mo)5135a3bb1a4SNico Weber a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
514da7a5c09SDmitry Vyukov ATOMIC_IMPL(Load, a, mo);
5155a3bb1a4SNico Weber }
5165a3bb1a4SNico Weber
5175a3bb1a4SNico Weber #if __TSAN_HAS_INT128
5185a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_load(const volatile a128 * a,morder mo)5195a3bb1a4SNico Weber a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
520da7a5c09SDmitry Vyukov ATOMIC_IMPL(Load, a, mo);
5215a3bb1a4SNico Weber }
5225a3bb1a4SNico Weber #endif
5235a3bb1a4SNico Weber
5245a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)5255a3bb1a4SNico Weber void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
526da7a5c09SDmitry Vyukov ATOMIC_IMPL(Store, a, v, mo);
5275a3bb1a4SNico Weber }
5285a3bb1a4SNico Weber
5295a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)5305a3bb1a4SNico Weber void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
531da7a5c09SDmitry Vyukov ATOMIC_IMPL(Store, a, v, mo);
5325a3bb1a4SNico Weber }
5335a3bb1a4SNico Weber
5345a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)5355a3bb1a4SNico Weber void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
536da7a5c09SDmitry Vyukov ATOMIC_IMPL(Store, a, v, mo);
5375a3bb1a4SNico Weber }
5385a3bb1a4SNico Weber
5395a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)5405a3bb1a4SNico Weber void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
541da7a5c09SDmitry Vyukov ATOMIC_IMPL(Store, a, v, mo);
5425a3bb1a4SNico Weber }
5435a3bb1a4SNico Weber
5445a3bb1a4SNico Weber #if __TSAN_HAS_INT128
5455a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_store(volatile a128 * a,a128 v,morder mo)5465a3bb1a4SNico Weber void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
547da7a5c09SDmitry Vyukov ATOMIC_IMPL(Store, a, v, mo);
5485a3bb1a4SNico Weber }
5495a3bb1a4SNico Weber #endif
5505a3bb1a4SNico Weber
5515a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)5525a3bb1a4SNico Weber a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
553da7a5c09SDmitry Vyukov ATOMIC_IMPL(Exchange, a, v, mo);
5545a3bb1a4SNico Weber }
5555a3bb1a4SNico Weber
5565a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)5575a3bb1a4SNico Weber a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
558da7a5c09SDmitry Vyukov ATOMIC_IMPL(Exchange, a, v, mo);
5595a3bb1a4SNico Weber }
5605a3bb1a4SNico Weber
5615a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)5625a3bb1a4SNico Weber a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
563da7a5c09SDmitry Vyukov ATOMIC_IMPL(Exchange, a, v, mo);
5645a3bb1a4SNico Weber }
5655a3bb1a4SNico Weber
5665a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)5675a3bb1a4SNico Weber a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
568da7a5c09SDmitry Vyukov ATOMIC_IMPL(Exchange, a, v, mo);
5695a3bb1a4SNico Weber }
5705a3bb1a4SNico Weber
5715a3bb1a4SNico Weber #if __TSAN_HAS_INT128
5725a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_exchange(volatile a128 * a,a128 v,morder mo)5735a3bb1a4SNico Weber a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
574da7a5c09SDmitry Vyukov ATOMIC_IMPL(Exchange, a, v, mo);
5755a3bb1a4SNico Weber }
5765a3bb1a4SNico Weber #endif
5775a3bb1a4SNico Weber
5785a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)5795a3bb1a4SNico Weber a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
580da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAdd, a, v, mo);
5815a3bb1a4SNico Weber }
5825a3bb1a4SNico Weber
5835a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)5845a3bb1a4SNico Weber a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
585da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAdd, a, v, mo);
5865a3bb1a4SNico Weber }
5875a3bb1a4SNico Weber
5885a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)5895a3bb1a4SNico Weber a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
590da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAdd, a, v, mo);
5915a3bb1a4SNico Weber }
5925a3bb1a4SNico Weber
5935a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)5945a3bb1a4SNico Weber a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
595da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAdd, a, v, mo);
5965a3bb1a4SNico Weber }
5975a3bb1a4SNico Weber
5985a3bb1a4SNico Weber #if __TSAN_HAS_INT128
5995a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,morder mo)6005a3bb1a4SNico Weber a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
601da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAdd, a, v, mo);
6025a3bb1a4SNico Weber }
6035a3bb1a4SNico Weber #endif
6045a3bb1a4SNico Weber
6055a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,morder mo)6065a3bb1a4SNico Weber a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
607da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchSub, a, v, mo);
6085a3bb1a4SNico Weber }
6095a3bb1a4SNico Weber
6105a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,morder mo)6115a3bb1a4SNico Weber a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
612da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchSub, a, v, mo);
6135a3bb1a4SNico Weber }
6145a3bb1a4SNico Weber
6155a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,morder mo)6165a3bb1a4SNico Weber a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
617da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchSub, a, v, mo);
6185a3bb1a4SNico Weber }
6195a3bb1a4SNico Weber
6205a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,morder mo)6215a3bb1a4SNico Weber a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
622da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchSub, a, v, mo);
6235a3bb1a4SNico Weber }
6245a3bb1a4SNico Weber
6255a3bb1a4SNico Weber #if __TSAN_HAS_INT128
6265a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,morder mo)6275a3bb1a4SNico Weber a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
628da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchSub, a, v, mo);
6295a3bb1a4SNico Weber }
6305a3bb1a4SNico Weber #endif
6315a3bb1a4SNico Weber
6325a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)6335a3bb1a4SNico Weber a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
634da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAnd, a, v, mo);
6355a3bb1a4SNico Weber }
6365a3bb1a4SNico Weber
6375a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)6385a3bb1a4SNico Weber a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
639da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAnd, a, v, mo);
6405a3bb1a4SNico Weber }
6415a3bb1a4SNico Weber
6425a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)6435a3bb1a4SNico Weber a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
644da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAnd, a, v, mo);
6455a3bb1a4SNico Weber }
6465a3bb1a4SNico Weber
6475a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)6485a3bb1a4SNico Weber a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
649da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAnd, a, v, mo);
6505a3bb1a4SNico Weber }
6515a3bb1a4SNico Weber
6525a3bb1a4SNico Weber #if __TSAN_HAS_INT128
6535a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,morder mo)6545a3bb1a4SNico Weber a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
655da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchAnd, a, v, mo);
6565a3bb1a4SNico Weber }
6575a3bb1a4SNico Weber #endif
6585a3bb1a4SNico Weber
6595a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)6605a3bb1a4SNico Weber a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
661da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchOr, a, v, mo);
6625a3bb1a4SNico Weber }
6635a3bb1a4SNico Weber
6645a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)6655a3bb1a4SNico Weber a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
666da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchOr, a, v, mo);
6675a3bb1a4SNico Weber }
6685a3bb1a4SNico Weber
6695a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)6705a3bb1a4SNico Weber a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
671da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchOr, a, v, mo);
6725a3bb1a4SNico Weber }
6735a3bb1a4SNico Weber
6745a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)6755a3bb1a4SNico Weber a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
676da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchOr, a, v, mo);
6775a3bb1a4SNico Weber }
6785a3bb1a4SNico Weber
6795a3bb1a4SNico Weber #if __TSAN_HAS_INT128
6805a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,morder mo)6815a3bb1a4SNico Weber a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
682da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchOr, a, v, mo);
6835a3bb1a4SNico Weber }
6845a3bb1a4SNico Weber #endif
6855a3bb1a4SNico Weber
6865a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)6875a3bb1a4SNico Weber a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
688da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchXor, a, v, mo);
6895a3bb1a4SNico Weber }
6905a3bb1a4SNico Weber
6915a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)6925a3bb1a4SNico Weber a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
693da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchXor, a, v, mo);
6945a3bb1a4SNico Weber }
6955a3bb1a4SNico Weber
6965a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)6975a3bb1a4SNico Weber a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
698da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchXor, a, v, mo);
6995a3bb1a4SNico Weber }
7005a3bb1a4SNico Weber
7015a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)7025a3bb1a4SNico Weber a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
703da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchXor, a, v, mo);
7045a3bb1a4SNico Weber }
7055a3bb1a4SNico Weber
7065a3bb1a4SNico Weber #if __TSAN_HAS_INT128
7075a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,morder mo)7085a3bb1a4SNico Weber a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
709da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchXor, a, v, mo);
7105a3bb1a4SNico Weber }
7115a3bb1a4SNico Weber #endif
7125a3bb1a4SNico Weber
7135a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,morder mo)7145a3bb1a4SNico Weber a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
715da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchNand, a, v, mo);
7165a3bb1a4SNico Weber }
7175a3bb1a4SNico Weber
7185a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,morder mo)7195a3bb1a4SNico Weber a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
720da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchNand, a, v, mo);
7215a3bb1a4SNico Weber }
7225a3bb1a4SNico Weber
7235a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,morder mo)7245a3bb1a4SNico Weber a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
725da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchNand, a, v, mo);
7265a3bb1a4SNico Weber }
7275a3bb1a4SNico Weber
7285a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,morder mo)7295a3bb1a4SNico Weber a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
730da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchNand, a, v, mo);
7315a3bb1a4SNico Weber }
7325a3bb1a4SNico Weber
7335a3bb1a4SNico Weber #if __TSAN_HAS_INT128
7345a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,morder mo)7355a3bb1a4SNico Weber a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
736da7a5c09SDmitry Vyukov ATOMIC_IMPL(FetchNand, a, v, mo);
7375a3bb1a4SNico Weber }
7385a3bb1a4SNico Weber #endif
7395a3bb1a4SNico Weber
7405a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)7415a3bb1a4SNico Weber int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
7425a3bb1a4SNico Weber morder mo, morder fmo) {
743da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7445a3bb1a4SNico Weber }
7455a3bb1a4SNico Weber
7465a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)7475a3bb1a4SNico Weber int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
7485a3bb1a4SNico Weber morder mo, morder fmo) {
749da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7505a3bb1a4SNico Weber }
7515a3bb1a4SNico Weber
7525a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)7535a3bb1a4SNico Weber int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
7545a3bb1a4SNico Weber morder mo, morder fmo) {
755da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7565a3bb1a4SNico Weber }
7575a3bb1a4SNico Weber
7585a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)7595a3bb1a4SNico Weber int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
7605a3bb1a4SNico Weber morder mo, morder fmo) {
761da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7625a3bb1a4SNico Weber }
7635a3bb1a4SNico Weber
7645a3bb1a4SNico Weber #if __TSAN_HAS_INT128
7655a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)7665a3bb1a4SNico Weber int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
7675a3bb1a4SNico Weber morder mo, morder fmo) {
768da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7695a3bb1a4SNico Weber }
7705a3bb1a4SNico Weber #endif
7715a3bb1a4SNico Weber
7725a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)7735a3bb1a4SNico Weber int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
7745a3bb1a4SNico Weber morder mo, morder fmo) {
775da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7765a3bb1a4SNico Weber }
7775a3bb1a4SNico Weber
7785a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)7795a3bb1a4SNico Weber int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
7805a3bb1a4SNico Weber morder mo, morder fmo) {
781da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7825a3bb1a4SNico Weber }
7835a3bb1a4SNico Weber
7845a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)7855a3bb1a4SNico Weber int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
7865a3bb1a4SNico Weber morder mo, morder fmo) {
787da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7885a3bb1a4SNico Weber }
7895a3bb1a4SNico Weber
7905a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)7915a3bb1a4SNico Weber int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
7925a3bb1a4SNico Weber morder mo, morder fmo) {
793da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7945a3bb1a4SNico Weber }
7955a3bb1a4SNico Weber
7965a3bb1a4SNico Weber #if __TSAN_HAS_INT128
7975a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)7985a3bb1a4SNico Weber int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
7995a3bb1a4SNico Weber morder mo, morder fmo) {
800da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8015a3bb1a4SNico Weber }
8025a3bb1a4SNico Weber #endif
8035a3bb1a4SNico Weber
8045a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,morder mo,morder fmo)8055a3bb1a4SNico Weber a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
8065a3bb1a4SNico Weber morder mo, morder fmo) {
807da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8085a3bb1a4SNico Weber }
8095a3bb1a4SNico Weber
8105a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,morder mo,morder fmo)8115a3bb1a4SNico Weber a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
8125a3bb1a4SNico Weber morder mo, morder fmo) {
813da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8145a3bb1a4SNico Weber }
8155a3bb1a4SNico Weber
8165a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,morder mo,morder fmo)8175a3bb1a4SNico Weber a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
8185a3bb1a4SNico Weber morder mo, morder fmo) {
819da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8205a3bb1a4SNico Weber }
8215a3bb1a4SNico Weber
8225a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,morder mo,morder fmo)8235a3bb1a4SNico Weber a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
8245a3bb1a4SNico Weber morder mo, morder fmo) {
825da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8265a3bb1a4SNico Weber }
8275a3bb1a4SNico Weber
8285a3bb1a4SNico Weber #if __TSAN_HAS_INT128
8295a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_val(volatile a128 * a,a128 c,a128 v,morder mo,morder fmo)8305a3bb1a4SNico Weber a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
8315a3bb1a4SNico Weber morder mo, morder fmo) {
832da7a5c09SDmitry Vyukov ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8335a3bb1a4SNico Weber }
8345a3bb1a4SNico Weber #endif
8355a3bb1a4SNico Weber
8365a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_thread_fence(morder mo)837da7a5c09SDmitry Vyukov void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
8385a3bb1a4SNico Weber
8395a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_signal_fence(morder mo)8405a3bb1a4SNico Weber void __tsan_atomic_signal_fence(morder mo) {
8415a3bb1a4SNico Weber }
8425a3bb1a4SNico Weber } // extern "C"
8435a3bb1a4SNico Weber
8445a3bb1a4SNico Weber #else // #if !SANITIZER_GO
8455a3bb1a4SNico Weber
8465a3bb1a4SNico Weber // Go
8475a3bb1a4SNico Weber
8485a3bb1a4SNico Weber # define ATOMIC(func, ...) \
8495a3bb1a4SNico Weber if (thr->ignore_sync) { \
8505a3bb1a4SNico Weber NoTsanAtomic##func(__VA_ARGS__); \
8515a3bb1a4SNico Weber } else { \
8525a3bb1a4SNico Weber FuncEntry(thr, cpc); \
8535a3bb1a4SNico Weber Atomic##func(thr, pc, __VA_ARGS__); \
8545a3bb1a4SNico Weber FuncExit(thr); \
855a1a37ddcSDmitry Vyukov }
8565a3bb1a4SNico Weber
8575a3bb1a4SNico Weber # define ATOMIC_RET(func, ret, ...) \
8585a3bb1a4SNico Weber if (thr->ignore_sync) { \
8595a3bb1a4SNico Weber (ret) = NoTsanAtomic##func(__VA_ARGS__); \
8605a3bb1a4SNico Weber } else { \
8615a3bb1a4SNico Weber FuncEntry(thr, cpc); \
8625a3bb1a4SNico Weber (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
8635a3bb1a4SNico Weber FuncExit(thr); \
864a1a37ddcSDmitry Vyukov }
8655a3bb1a4SNico Weber
8665a3bb1a4SNico Weber extern "C" {
8675a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8685a3bb1a4SNico Weber void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8695a3bb1a4SNico Weber ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
8705a3bb1a4SNico Weber }
8715a3bb1a4SNico Weber
8725a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8735a3bb1a4SNico Weber void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8745a3bb1a4SNico Weber ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
8755a3bb1a4SNico Weber }
8765a3bb1a4SNico Weber
8775a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8785a3bb1a4SNico Weber void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8795a3bb1a4SNico Weber ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
8805a3bb1a4SNico Weber }
8815a3bb1a4SNico Weber
8825a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8835a3bb1a4SNico Weber void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8845a3bb1a4SNico Weber ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
8855a3bb1a4SNico Weber }
8865a3bb1a4SNico Weber
8875a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8885a3bb1a4SNico Weber void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8895a3bb1a4SNico Weber ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
8905a3bb1a4SNico Weber }
8915a3bb1a4SNico Weber
8925a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8935a3bb1a4SNico Weber void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8945a3bb1a4SNico Weber ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
8955a3bb1a4SNico Weber }
8965a3bb1a4SNico Weber
8975a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8985a3bb1a4SNico Weber void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8995a3bb1a4SNico Weber ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
9005a3bb1a4SNico Weber }
9015a3bb1a4SNico Weber
9025a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)9035a3bb1a4SNico Weber void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
9045a3bb1a4SNico Weber ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
9055a3bb1a4SNico Weber }
9065a3bb1a4SNico Weber
9075a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)9085a3bb1a4SNico Weber void __tsan_go_atomic32_compare_exchange(
9095a3bb1a4SNico Weber ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
9105a3bb1a4SNico Weber a32 cur = 0;
9115a3bb1a4SNico Weber a32 cmp = *(a32*)(a+8);
9125a3bb1a4SNico Weber ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
9135a3bb1a4SNico Weber *(bool*)(a+16) = (cur == cmp);
9145a3bb1a4SNico Weber }
9155a3bb1a4SNico Weber
9165a3bb1a4SNico Weber SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)9175a3bb1a4SNico Weber void __tsan_go_atomic64_compare_exchange(
9185a3bb1a4SNico Weber ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
9195a3bb1a4SNico Weber a64 cur = 0;
9205a3bb1a4SNico Weber a64 cmp = *(a64*)(a+8);
9215a3bb1a4SNico Weber ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
9225a3bb1a4SNico Weber *(bool*)(a+24) = (cur == cmp);
9235a3bb1a4SNico Weber }
9245a3bb1a4SNico Weber } // extern "C"
9255a3bb1a4SNico Weber #endif // #if !SANITIZER_GO
926