xref: /openbsd-src/gnu/llvm/compiler-rt/lib/tsan/rtl/tsan_interface_atomic.cpp (revision 810390e339a5425391477d5d41c78d7cab2424ac)
13cab2bb3Spatrick //===-- tsan_interface_atomic.cpp -----------------------------------------===//
23cab2bb3Spatrick //
33cab2bb3Spatrick // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
43cab2bb3Spatrick // See https://llvm.org/LICENSE.txt for license information.
53cab2bb3Spatrick // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
63cab2bb3Spatrick //
73cab2bb3Spatrick //===----------------------------------------------------------------------===//
83cab2bb3Spatrick //
93cab2bb3Spatrick // This file is a part of ThreadSanitizer (TSan), a race detector.
103cab2bb3Spatrick //
113cab2bb3Spatrick //===----------------------------------------------------------------------===//
123cab2bb3Spatrick 
133cab2bb3Spatrick // ThreadSanitizer atomic operations are based on C++11/C1x standards.
143cab2bb3Spatrick // For background see C++11 standard.  A slightly older, publicly
153cab2bb3Spatrick // available draft of the standard (not entirely up-to-date, but close enough
163cab2bb3Spatrick // for casual browsing) is available here:
173cab2bb3Spatrick // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
183cab2bb3Spatrick // The following page contains more background information:
193cab2bb3Spatrick // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
203cab2bb3Spatrick 
213cab2bb3Spatrick #include "sanitizer_common/sanitizer_placement_new.h"
223cab2bb3Spatrick #include "sanitizer_common/sanitizer_stacktrace.h"
233cab2bb3Spatrick #include "sanitizer_common/sanitizer_mutex.h"
243cab2bb3Spatrick #include "tsan_flags.h"
253cab2bb3Spatrick #include "tsan_interface.h"
263cab2bb3Spatrick #include "tsan_rtl.h"
273cab2bb3Spatrick 
283cab2bb3Spatrick using namespace __tsan;
293cab2bb3Spatrick 
303cab2bb3Spatrick #if !SANITIZER_GO && __TSAN_HAS_INT128
313cab2bb3Spatrick // Protects emulation of 128-bit atomic operations.
323cab2bb3Spatrick static StaticSpinMutex mutex128;
333cab2bb3Spatrick #endif
343cab2bb3Spatrick 
35*810390e3Srobert #if SANITIZER_DEBUG
IsLoadOrder(morder mo)363cab2bb3Spatrick static bool IsLoadOrder(morder mo) {
373cab2bb3Spatrick   return mo == mo_relaxed || mo == mo_consume
383cab2bb3Spatrick       || mo == mo_acquire || mo == mo_seq_cst;
393cab2bb3Spatrick }
403cab2bb3Spatrick 
IsStoreOrder(morder mo)413cab2bb3Spatrick static bool IsStoreOrder(morder mo) {
423cab2bb3Spatrick   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
433cab2bb3Spatrick }
44*810390e3Srobert #endif
453cab2bb3Spatrick 
IsReleaseOrder(morder mo)463cab2bb3Spatrick static bool IsReleaseOrder(morder mo) {
473cab2bb3Spatrick   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
483cab2bb3Spatrick }
493cab2bb3Spatrick 
IsAcquireOrder(morder mo)503cab2bb3Spatrick static bool IsAcquireOrder(morder mo) {
513cab2bb3Spatrick   return mo == mo_consume || mo == mo_acquire
523cab2bb3Spatrick       || mo == mo_acq_rel || mo == mo_seq_cst;
533cab2bb3Spatrick }
543cab2bb3Spatrick 
IsAcqRelOrder(morder mo)553cab2bb3Spatrick static bool IsAcqRelOrder(morder mo) {
563cab2bb3Spatrick   return mo == mo_acq_rel || mo == mo_seq_cst;
573cab2bb3Spatrick }
583cab2bb3Spatrick 
func_xchg(volatile T * v,T op)593cab2bb3Spatrick template<typename T> T func_xchg(volatile T *v, T op) {
603cab2bb3Spatrick   T res = __sync_lock_test_and_set(v, op);
613cab2bb3Spatrick   // __sync_lock_test_and_set does not contain full barrier.
623cab2bb3Spatrick   __sync_synchronize();
633cab2bb3Spatrick   return res;
643cab2bb3Spatrick }
653cab2bb3Spatrick 
func_add(volatile T * v,T op)663cab2bb3Spatrick template<typename T> T func_add(volatile T *v, T op) {
673cab2bb3Spatrick   return __sync_fetch_and_add(v, op);
683cab2bb3Spatrick }
693cab2bb3Spatrick 
func_sub(volatile T * v,T op)703cab2bb3Spatrick template<typename T> T func_sub(volatile T *v, T op) {
713cab2bb3Spatrick   return __sync_fetch_and_sub(v, op);
723cab2bb3Spatrick }
733cab2bb3Spatrick 
func_and(volatile T * v,T op)743cab2bb3Spatrick template<typename T> T func_and(volatile T *v, T op) {
753cab2bb3Spatrick   return __sync_fetch_and_and(v, op);
763cab2bb3Spatrick }
773cab2bb3Spatrick 
func_or(volatile T * v,T op)783cab2bb3Spatrick template<typename T> T func_or(volatile T *v, T op) {
793cab2bb3Spatrick   return __sync_fetch_and_or(v, op);
803cab2bb3Spatrick }
813cab2bb3Spatrick 
func_xor(volatile T * v,T op)823cab2bb3Spatrick template<typename T> T func_xor(volatile T *v, T op) {
833cab2bb3Spatrick   return __sync_fetch_and_xor(v, op);
843cab2bb3Spatrick }
853cab2bb3Spatrick 
func_nand(volatile T * v,T op)863cab2bb3Spatrick template<typename T> T func_nand(volatile T *v, T op) {
873cab2bb3Spatrick   // clang does not support __sync_fetch_and_nand.
883cab2bb3Spatrick   T cmp = *v;
893cab2bb3Spatrick   for (;;) {
903cab2bb3Spatrick     T newv = ~(cmp & op);
913cab2bb3Spatrick     T cur = __sync_val_compare_and_swap(v, cmp, newv);
923cab2bb3Spatrick     if (cmp == cur)
933cab2bb3Spatrick       return cmp;
943cab2bb3Spatrick     cmp = cur;
953cab2bb3Spatrick   }
963cab2bb3Spatrick }
973cab2bb3Spatrick 
func_cas(volatile T * v,T cmp,T xch)983cab2bb3Spatrick template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
993cab2bb3Spatrick   return __sync_val_compare_and_swap(v, cmp, xch);
1003cab2bb3Spatrick }
1013cab2bb3Spatrick 
1023cab2bb3Spatrick // clang does not support 128-bit atomic ops.
1033cab2bb3Spatrick // Atomic ops are executed under tsan internal mutex,
1043cab2bb3Spatrick // here we assume that the atomic variables are not accessed
1053cab2bb3Spatrick // from non-instrumented code.
1063cab2bb3Spatrick #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
1073cab2bb3Spatrick     && __TSAN_HAS_INT128
func_xchg(volatile a128 * v,a128 op)1083cab2bb3Spatrick a128 func_xchg(volatile a128 *v, a128 op) {
1093cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1103cab2bb3Spatrick   a128 cmp = *v;
1113cab2bb3Spatrick   *v = op;
1123cab2bb3Spatrick   return cmp;
1133cab2bb3Spatrick }
1143cab2bb3Spatrick 
func_add(volatile a128 * v,a128 op)1153cab2bb3Spatrick a128 func_add(volatile a128 *v, a128 op) {
1163cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1173cab2bb3Spatrick   a128 cmp = *v;
1183cab2bb3Spatrick   *v = cmp + op;
1193cab2bb3Spatrick   return cmp;
1203cab2bb3Spatrick }
1213cab2bb3Spatrick 
func_sub(volatile a128 * v,a128 op)1223cab2bb3Spatrick a128 func_sub(volatile a128 *v, a128 op) {
1233cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1243cab2bb3Spatrick   a128 cmp = *v;
1253cab2bb3Spatrick   *v = cmp - op;
1263cab2bb3Spatrick   return cmp;
1273cab2bb3Spatrick }
1283cab2bb3Spatrick 
func_and(volatile a128 * v,a128 op)1293cab2bb3Spatrick a128 func_and(volatile a128 *v, a128 op) {
1303cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1313cab2bb3Spatrick   a128 cmp = *v;
1323cab2bb3Spatrick   *v = cmp & op;
1333cab2bb3Spatrick   return cmp;
1343cab2bb3Spatrick }
1353cab2bb3Spatrick 
func_or(volatile a128 * v,a128 op)1363cab2bb3Spatrick a128 func_or(volatile a128 *v, a128 op) {
1373cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1383cab2bb3Spatrick   a128 cmp = *v;
1393cab2bb3Spatrick   *v = cmp | op;
1403cab2bb3Spatrick   return cmp;
1413cab2bb3Spatrick }
1423cab2bb3Spatrick 
func_xor(volatile a128 * v,a128 op)1433cab2bb3Spatrick a128 func_xor(volatile a128 *v, a128 op) {
1443cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1453cab2bb3Spatrick   a128 cmp = *v;
1463cab2bb3Spatrick   *v = cmp ^ op;
1473cab2bb3Spatrick   return cmp;
1483cab2bb3Spatrick }
1493cab2bb3Spatrick 
func_nand(volatile a128 * v,a128 op)1503cab2bb3Spatrick a128 func_nand(volatile a128 *v, a128 op) {
1513cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1523cab2bb3Spatrick   a128 cmp = *v;
1533cab2bb3Spatrick   *v = ~(cmp & op);
1543cab2bb3Spatrick   return cmp;
1553cab2bb3Spatrick }
1563cab2bb3Spatrick 
func_cas(volatile a128 * v,a128 cmp,a128 xch)1573cab2bb3Spatrick a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
1583cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
1593cab2bb3Spatrick   a128 cur = *v;
1603cab2bb3Spatrick   if (cur == cmp)
1613cab2bb3Spatrick     *v = xch;
1623cab2bb3Spatrick   return cur;
1633cab2bb3Spatrick }
1643cab2bb3Spatrick #endif
1653cab2bb3Spatrick 
1663cab2bb3Spatrick template <typename T>
AccessSize()167*810390e3Srobert static int AccessSize() {
1683cab2bb3Spatrick   if (sizeof(T) <= 1)
169*810390e3Srobert     return 1;
1703cab2bb3Spatrick   else if (sizeof(T) <= 2)
171*810390e3Srobert     return 2;
1723cab2bb3Spatrick   else if (sizeof(T) <= 4)
173*810390e3Srobert     return 4;
1743cab2bb3Spatrick   else
175*810390e3Srobert     return 8;
1763cab2bb3Spatrick   // For 16-byte atomics we also use 8-byte memory access,
1773cab2bb3Spatrick   // this leads to false negatives only in very obscure cases.
1783cab2bb3Spatrick }
1793cab2bb3Spatrick 
1803cab2bb3Spatrick #if !SANITIZER_GO
to_atomic(const volatile a8 * a)1813cab2bb3Spatrick static atomic_uint8_t *to_atomic(const volatile a8 *a) {
1823cab2bb3Spatrick   return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
1833cab2bb3Spatrick }
1843cab2bb3Spatrick 
to_atomic(const volatile a16 * a)1853cab2bb3Spatrick static atomic_uint16_t *to_atomic(const volatile a16 *a) {
1863cab2bb3Spatrick   return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
1873cab2bb3Spatrick }
1883cab2bb3Spatrick #endif
1893cab2bb3Spatrick 
to_atomic(const volatile a32 * a)1903cab2bb3Spatrick static atomic_uint32_t *to_atomic(const volatile a32 *a) {
1913cab2bb3Spatrick   return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
1923cab2bb3Spatrick }
1933cab2bb3Spatrick 
to_atomic(const volatile a64 * a)1943cab2bb3Spatrick static atomic_uint64_t *to_atomic(const volatile a64 *a) {
1953cab2bb3Spatrick   return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
1963cab2bb3Spatrick }
1973cab2bb3Spatrick 
to_mo(morder mo)1983cab2bb3Spatrick static memory_order to_mo(morder mo) {
1993cab2bb3Spatrick   switch (mo) {
2003cab2bb3Spatrick   case mo_relaxed: return memory_order_relaxed;
2013cab2bb3Spatrick   case mo_consume: return memory_order_consume;
2023cab2bb3Spatrick   case mo_acquire: return memory_order_acquire;
2033cab2bb3Spatrick   case mo_release: return memory_order_release;
2043cab2bb3Spatrick   case mo_acq_rel: return memory_order_acq_rel;
2053cab2bb3Spatrick   case mo_seq_cst: return memory_order_seq_cst;
2063cab2bb3Spatrick   }
207*810390e3Srobert   DCHECK(0);
2083cab2bb3Spatrick   return memory_order_seq_cst;
2093cab2bb3Spatrick }
2103cab2bb3Spatrick 
2113cab2bb3Spatrick template<typename T>
NoTsanAtomicLoad(const volatile T * a,morder mo)2123cab2bb3Spatrick static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
2133cab2bb3Spatrick   return atomic_load(to_atomic(a), to_mo(mo));
2143cab2bb3Spatrick }
2153cab2bb3Spatrick 
2163cab2bb3Spatrick #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicLoad(const volatile a128 * a,morder mo)2173cab2bb3Spatrick static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
2183cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
2193cab2bb3Spatrick   return *a;
2203cab2bb3Spatrick }
2213cab2bb3Spatrick #endif
2223cab2bb3Spatrick 
2233cab2bb3Spatrick template <typename T>
AtomicLoad(ThreadState * thr,uptr pc,const volatile T * a,morder mo)224*810390e3Srobert static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
225*810390e3Srobert   DCHECK(IsLoadOrder(mo));
2263cab2bb3Spatrick   // This fast-path is critical for performance.
2273cab2bb3Spatrick   // Assume the access is atomic.
2283cab2bb3Spatrick   if (!IsAcquireOrder(mo)) {
229*810390e3Srobert     MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
230*810390e3Srobert                  kAccessRead | kAccessAtomic);
2313cab2bb3Spatrick     return NoTsanAtomicLoad(a, mo);
2323cab2bb3Spatrick   }
2333cab2bb3Spatrick   // Don't create sync object if it does not exist yet. For example, an atomic
2343cab2bb3Spatrick   // pointer is initialized to nullptr and then periodically acquire-loaded.
2353cab2bb3Spatrick   T v = NoTsanAtomicLoad(a, mo);
236*810390e3Srobert   SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
2373cab2bb3Spatrick   if (s) {
238*810390e3Srobert     SlotLocker locker(thr);
239*810390e3Srobert     ReadLock lock(&s->mtx);
240*810390e3Srobert     thr->clock.Acquire(s->clock);
2413cab2bb3Spatrick     // Re-read under sync mutex because we need a consistent snapshot
2423cab2bb3Spatrick     // of the value and the clock we acquire.
2433cab2bb3Spatrick     v = NoTsanAtomicLoad(a, mo);
2443cab2bb3Spatrick   }
245*810390e3Srobert   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
2463cab2bb3Spatrick   return v;
2473cab2bb3Spatrick }
2483cab2bb3Spatrick 
2493cab2bb3Spatrick template<typename T>
NoTsanAtomicStore(volatile T * a,T v,morder mo)2503cab2bb3Spatrick static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
2513cab2bb3Spatrick   atomic_store(to_atomic(a), v, to_mo(mo));
2523cab2bb3Spatrick }
2533cab2bb3Spatrick 
2543cab2bb3Spatrick #if __TSAN_HAS_INT128 && !SANITIZER_GO
NoTsanAtomicStore(volatile a128 * a,a128 v,morder mo)2553cab2bb3Spatrick static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
2563cab2bb3Spatrick   SpinMutexLock lock(&mutex128);
2573cab2bb3Spatrick   *a = v;
2583cab2bb3Spatrick }
2593cab2bb3Spatrick #endif
2603cab2bb3Spatrick 
2613cab2bb3Spatrick template <typename T>
AtomicStore(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)2623cab2bb3Spatrick static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
263*810390e3Srobert                         morder mo) {
264*810390e3Srobert   DCHECK(IsStoreOrder(mo));
265*810390e3Srobert   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
2663cab2bb3Spatrick   // This fast-path is critical for performance.
2673cab2bb3Spatrick   // Assume the access is atomic.
2683cab2bb3Spatrick   // Strictly saying even relaxed store cuts off release sequence,
2693cab2bb3Spatrick   // so must reset the clock.
2703cab2bb3Spatrick   if (!IsReleaseOrder(mo)) {
2713cab2bb3Spatrick     NoTsanAtomicStore(a, v, mo);
2723cab2bb3Spatrick     return;
2733cab2bb3Spatrick   }
274*810390e3Srobert   SlotLocker locker(thr);
275*810390e3Srobert   {
276*810390e3Srobert     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
277*810390e3Srobert     Lock lock(&s->mtx);
278*810390e3Srobert     thr->clock.ReleaseStore(&s->clock);
2793cab2bb3Spatrick     NoTsanAtomicStore(a, v, mo);
280*810390e3Srobert   }
281*810390e3Srobert   IncrementEpoch(thr);
2823cab2bb3Spatrick }
2833cab2bb3Spatrick 
2843cab2bb3Spatrick template <typename T, T (*F)(volatile T *v, T op)>
AtomicRMW(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)285*810390e3Srobert static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
286*810390e3Srobert   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
287*810390e3Srobert   if (LIKELY(mo == mo_relaxed))
288*810390e3Srobert     return F(a, v);
289*810390e3Srobert   SlotLocker locker(thr);
290*810390e3Srobert   {
291*810390e3Srobert     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
292*810390e3Srobert     RWLock lock(&s->mtx, IsReleaseOrder(mo));
2933cab2bb3Spatrick     if (IsAcqRelOrder(mo))
294*810390e3Srobert       thr->clock.ReleaseAcquire(&s->clock);
2953cab2bb3Spatrick     else if (IsReleaseOrder(mo))
296*810390e3Srobert       thr->clock.Release(&s->clock);
2973cab2bb3Spatrick     else if (IsAcquireOrder(mo))
298*810390e3Srobert       thr->clock.Acquire(s->clock);
2993cab2bb3Spatrick     v = F(a, v);
300*810390e3Srobert   }
301*810390e3Srobert   if (IsReleaseOrder(mo))
302*810390e3Srobert     IncrementEpoch(thr);
3033cab2bb3Spatrick   return v;
3043cab2bb3Spatrick }
3053cab2bb3Spatrick 
3063cab2bb3Spatrick template<typename T>
NoTsanAtomicExchange(volatile T * a,T v,morder mo)3073cab2bb3Spatrick static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
3083cab2bb3Spatrick   return func_xchg(a, v);
3093cab2bb3Spatrick }
3103cab2bb3Spatrick 
3113cab2bb3Spatrick template<typename T>
NoTsanAtomicFetchAdd(volatile T * a,T v,morder mo)3123cab2bb3Spatrick static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
3133cab2bb3Spatrick   return func_add(a, v);
3143cab2bb3Spatrick }
3153cab2bb3Spatrick 
3163cab2bb3Spatrick template<typename T>
NoTsanAtomicFetchSub(volatile T * a,T v,morder mo)3173cab2bb3Spatrick static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
3183cab2bb3Spatrick   return func_sub(a, v);
3193cab2bb3Spatrick }
3203cab2bb3Spatrick 
3213cab2bb3Spatrick template<typename T>
NoTsanAtomicFetchAnd(volatile T * a,T v,morder mo)3223cab2bb3Spatrick static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
3233cab2bb3Spatrick   return func_and(a, v);
3243cab2bb3Spatrick }
3253cab2bb3Spatrick 
3263cab2bb3Spatrick template<typename T>
NoTsanAtomicFetchOr(volatile T * a,T v,morder mo)3273cab2bb3Spatrick static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
3283cab2bb3Spatrick   return func_or(a, v);
3293cab2bb3Spatrick }
3303cab2bb3Spatrick 
3313cab2bb3Spatrick template<typename T>
NoTsanAtomicFetchXor(volatile T * a,T v,morder mo)3323cab2bb3Spatrick static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
3333cab2bb3Spatrick   return func_xor(a, v);
3343cab2bb3Spatrick }
3353cab2bb3Spatrick 
3363cab2bb3Spatrick template<typename T>
NoTsanAtomicFetchNand(volatile T * a,T v,morder mo)3373cab2bb3Spatrick static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
3383cab2bb3Spatrick   return func_nand(a, v);
3393cab2bb3Spatrick }
3403cab2bb3Spatrick 
3413cab2bb3Spatrick template<typename T>
AtomicExchange(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3423cab2bb3Spatrick static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
3433cab2bb3Spatrick     morder mo) {
3443cab2bb3Spatrick   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
3453cab2bb3Spatrick }
3463cab2bb3Spatrick 
3473cab2bb3Spatrick template<typename T>
AtomicFetchAdd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3483cab2bb3Spatrick static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
3493cab2bb3Spatrick     morder mo) {
3503cab2bb3Spatrick   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
3513cab2bb3Spatrick }
3523cab2bb3Spatrick 
3533cab2bb3Spatrick template<typename T>
AtomicFetchSub(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3543cab2bb3Spatrick static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
3553cab2bb3Spatrick     morder mo) {
3563cab2bb3Spatrick   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
3573cab2bb3Spatrick }
3583cab2bb3Spatrick 
3593cab2bb3Spatrick template<typename T>
AtomicFetchAnd(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3603cab2bb3Spatrick static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
3613cab2bb3Spatrick     morder mo) {
3623cab2bb3Spatrick   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
3633cab2bb3Spatrick }
3643cab2bb3Spatrick 
3653cab2bb3Spatrick template<typename T>
AtomicFetchOr(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3663cab2bb3Spatrick static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
3673cab2bb3Spatrick     morder mo) {
3683cab2bb3Spatrick   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
3693cab2bb3Spatrick }
3703cab2bb3Spatrick 
3713cab2bb3Spatrick template<typename T>
AtomicFetchXor(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3723cab2bb3Spatrick static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
3733cab2bb3Spatrick     morder mo) {
3743cab2bb3Spatrick   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
3753cab2bb3Spatrick }
3763cab2bb3Spatrick 
3773cab2bb3Spatrick template<typename T>
AtomicFetchNand(ThreadState * thr,uptr pc,volatile T * a,T v,morder mo)3783cab2bb3Spatrick static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
3793cab2bb3Spatrick     morder mo) {
3803cab2bb3Spatrick   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
3813cab2bb3Spatrick }
3823cab2bb3Spatrick 
3833cab2bb3Spatrick template<typename T>
NoTsanAtomicCAS(volatile T * a,T * c,T v,morder mo,morder fmo)3843cab2bb3Spatrick static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
3853cab2bb3Spatrick   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
3863cab2bb3Spatrick }
3873cab2bb3Spatrick 
3883cab2bb3Spatrick #if __TSAN_HAS_INT128
NoTsanAtomicCAS(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)3893cab2bb3Spatrick static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
3903cab2bb3Spatrick     morder mo, morder fmo) {
3913cab2bb3Spatrick   a128 old = *c;
3923cab2bb3Spatrick   a128 cur = func_cas(a, old, v);
3933cab2bb3Spatrick   if (cur == old)
3943cab2bb3Spatrick     return true;
3953cab2bb3Spatrick   *c = cur;
3963cab2bb3Spatrick   return false;
3973cab2bb3Spatrick }
3983cab2bb3Spatrick #endif
3993cab2bb3Spatrick 
4003cab2bb3Spatrick template<typename T>
NoTsanAtomicCAS(volatile T * a,T c,T v,morder mo,morder fmo)4013cab2bb3Spatrick static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
4023cab2bb3Spatrick   NoTsanAtomicCAS(a, &c, v, mo, fmo);
4033cab2bb3Spatrick   return c;
4043cab2bb3Spatrick }
4053cab2bb3Spatrick 
4063cab2bb3Spatrick template <typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T * c,T v,morder mo,morder fmo)407*810390e3Srobert static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
408*810390e3Srobert                       morder mo, morder fmo) {
409d89ec533Spatrick   // 31.7.2.18: "The failure argument shall not be memory_order_release
410d89ec533Spatrick   // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
411d89ec533Spatrick   // (mo_relaxed) when those are used.
412*810390e3Srobert   DCHECK(IsLoadOrder(fmo));
413d89ec533Spatrick 
414*810390e3Srobert   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
415*810390e3Srobert   if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
416d89ec533Spatrick     T cc = *c;
417d89ec533Spatrick     T pr = func_cas(a, cc, v);
418*810390e3Srobert     if (pr == cc)
419*810390e3Srobert       return true;
420*810390e3Srobert     *c = pr;
421*810390e3Srobert     return false;
422*810390e3Srobert   }
423*810390e3Srobert   SlotLocker locker(thr);
424*810390e3Srobert   bool release = IsReleaseOrder(mo);
425*810390e3Srobert   bool success;
426*810390e3Srobert   {
427*810390e3Srobert     auto s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
428*810390e3Srobert     RWLock lock(&s->mtx, release);
429*810390e3Srobert     T cc = *c;
430*810390e3Srobert     T pr = func_cas(a, cc, v);
431*810390e3Srobert     success = pr == cc;
432d89ec533Spatrick     if (!success) {
433d89ec533Spatrick       *c = pr;
434d89ec533Spatrick       mo = fmo;
435d89ec533Spatrick     }
436d89ec533Spatrick     if (success && IsAcqRelOrder(mo))
437*810390e3Srobert       thr->clock.ReleaseAcquire(&s->clock);
438d89ec533Spatrick     else if (success && IsReleaseOrder(mo))
439*810390e3Srobert       thr->clock.Release(&s->clock);
4403cab2bb3Spatrick     else if (IsAcquireOrder(mo))
441*810390e3Srobert       thr->clock.Acquire(s->clock);
4423cab2bb3Spatrick   }
443*810390e3Srobert   if (success && release)
444*810390e3Srobert     IncrementEpoch(thr);
445d89ec533Spatrick   return success;
4463cab2bb3Spatrick }
4473cab2bb3Spatrick 
4483cab2bb3Spatrick template<typename T>
AtomicCAS(ThreadState * thr,uptr pc,volatile T * a,T c,T v,morder mo,morder fmo)4493cab2bb3Spatrick static T AtomicCAS(ThreadState *thr, uptr pc,
4503cab2bb3Spatrick     volatile T *a, T c, T v, morder mo, morder fmo) {
4513cab2bb3Spatrick   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
4523cab2bb3Spatrick   return c;
4533cab2bb3Spatrick }
4543cab2bb3Spatrick 
4553cab2bb3Spatrick #if !SANITIZER_GO
NoTsanAtomicFence(morder mo)4563cab2bb3Spatrick static void NoTsanAtomicFence(morder mo) {
4573cab2bb3Spatrick   __sync_synchronize();
4583cab2bb3Spatrick }
4593cab2bb3Spatrick 
AtomicFence(ThreadState * thr,uptr pc,morder mo)4603cab2bb3Spatrick static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
4613cab2bb3Spatrick   // FIXME(dvyukov): not implemented.
4623cab2bb3Spatrick   __sync_synchronize();
4633cab2bb3Spatrick }
4643cab2bb3Spatrick #endif
4653cab2bb3Spatrick 
4663cab2bb3Spatrick // Interface functions follow.
4673cab2bb3Spatrick #if !SANITIZER_GO
4683cab2bb3Spatrick 
4693cab2bb3Spatrick // C/C++
4703cab2bb3Spatrick 
convert_morder(morder mo)4713cab2bb3Spatrick static morder convert_morder(morder mo) {
4723cab2bb3Spatrick   if (flags()->force_seq_cst_atomics)
4733cab2bb3Spatrick     return (morder)mo_seq_cst;
4743cab2bb3Spatrick 
4753cab2bb3Spatrick   // Filter out additional memory order flags:
4763cab2bb3Spatrick   // MEMMODEL_SYNC        = 1 << 15
4773cab2bb3Spatrick   // __ATOMIC_HLE_ACQUIRE = 1 << 16
4783cab2bb3Spatrick   // __ATOMIC_HLE_RELEASE = 1 << 17
4793cab2bb3Spatrick   //
4803cab2bb3Spatrick   // HLE is an optimization, and we pretend that elision always fails.
4813cab2bb3Spatrick   // MEMMODEL_SYNC is used when lowering __sync_ atomics,
4823cab2bb3Spatrick   // since we use __sync_ atomics for actual atomic operations,
4833cab2bb3Spatrick   // we can safely ignore it as well. It also subtly affects semantics,
4843cab2bb3Spatrick   // but we don't model the difference.
4853cab2bb3Spatrick   return (morder)(mo & 0x7fff);
4863cab2bb3Spatrick }
4873cab2bb3Spatrick 
488*810390e3Srobert #  define ATOMIC_IMPL(func, ...)                                \
4893cab2bb3Spatrick     ThreadState *const thr = cur_thread();                      \
4903cab2bb3Spatrick     ProcessPendingSignals(thr);                                 \
491*810390e3Srobert     if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
4923cab2bb3Spatrick       return NoTsanAtomic##func(__VA_ARGS__);                   \
4933cab2bb3Spatrick     mo = convert_morder(mo);                                    \
494*810390e3Srobert     return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
4953cab2bb3Spatrick 
4963cab2bb3Spatrick extern "C" {
4973cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_load(const volatile a8 * a,morder mo)4983cab2bb3Spatrick a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
499*810390e3Srobert   ATOMIC_IMPL(Load, a, mo);
5003cab2bb3Spatrick }
5013cab2bb3Spatrick 
5023cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_load(const volatile a16 * a,morder mo)5033cab2bb3Spatrick a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
504*810390e3Srobert   ATOMIC_IMPL(Load, a, mo);
5053cab2bb3Spatrick }
5063cab2bb3Spatrick 
5073cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_load(const volatile a32 * a,morder mo)5083cab2bb3Spatrick a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
509*810390e3Srobert   ATOMIC_IMPL(Load, a, mo);
5103cab2bb3Spatrick }
5113cab2bb3Spatrick 
5123cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_load(const volatile a64 * a,morder mo)5133cab2bb3Spatrick a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
514*810390e3Srobert   ATOMIC_IMPL(Load, a, mo);
5153cab2bb3Spatrick }
5163cab2bb3Spatrick 
5173cab2bb3Spatrick #if __TSAN_HAS_INT128
5183cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_load(const volatile a128 * a,morder mo)5193cab2bb3Spatrick a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
520*810390e3Srobert   ATOMIC_IMPL(Load, a, mo);
5213cab2bb3Spatrick }
5223cab2bb3Spatrick #endif
5233cab2bb3Spatrick 
5243cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_store(volatile a8 * a,a8 v,morder mo)5253cab2bb3Spatrick void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
526*810390e3Srobert   ATOMIC_IMPL(Store, a, v, mo);
5273cab2bb3Spatrick }
5283cab2bb3Spatrick 
5293cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_store(volatile a16 * a,a16 v,morder mo)5303cab2bb3Spatrick void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
531*810390e3Srobert   ATOMIC_IMPL(Store, a, v, mo);
5323cab2bb3Spatrick }
5333cab2bb3Spatrick 
5343cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_store(volatile a32 * a,a32 v,morder mo)5353cab2bb3Spatrick void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
536*810390e3Srobert   ATOMIC_IMPL(Store, a, v, mo);
5373cab2bb3Spatrick }
5383cab2bb3Spatrick 
5393cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_store(volatile a64 * a,a64 v,morder mo)5403cab2bb3Spatrick void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
541*810390e3Srobert   ATOMIC_IMPL(Store, a, v, mo);
5423cab2bb3Spatrick }
5433cab2bb3Spatrick 
5443cab2bb3Spatrick #if __TSAN_HAS_INT128
5453cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_store(volatile a128 * a,a128 v,morder mo)5463cab2bb3Spatrick void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
547*810390e3Srobert   ATOMIC_IMPL(Store, a, v, mo);
5483cab2bb3Spatrick }
5493cab2bb3Spatrick #endif
5503cab2bb3Spatrick 
5513cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_exchange(volatile a8 * a,a8 v,morder mo)5523cab2bb3Spatrick a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
553*810390e3Srobert   ATOMIC_IMPL(Exchange, a, v, mo);
5543cab2bb3Spatrick }
5553cab2bb3Spatrick 
5563cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_exchange(volatile a16 * a,a16 v,morder mo)5573cab2bb3Spatrick a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
558*810390e3Srobert   ATOMIC_IMPL(Exchange, a, v, mo);
5593cab2bb3Spatrick }
5603cab2bb3Spatrick 
5613cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_exchange(volatile a32 * a,a32 v,morder mo)5623cab2bb3Spatrick a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
563*810390e3Srobert   ATOMIC_IMPL(Exchange, a, v, mo);
5643cab2bb3Spatrick }
5653cab2bb3Spatrick 
5663cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_exchange(volatile a64 * a,a64 v,morder mo)5673cab2bb3Spatrick a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
568*810390e3Srobert   ATOMIC_IMPL(Exchange, a, v, mo);
5693cab2bb3Spatrick }
5703cab2bb3Spatrick 
5713cab2bb3Spatrick #if __TSAN_HAS_INT128
5723cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_exchange(volatile a128 * a,a128 v,morder mo)5733cab2bb3Spatrick a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
574*810390e3Srobert   ATOMIC_IMPL(Exchange, a, v, mo);
5753cab2bb3Spatrick }
5763cab2bb3Spatrick #endif
5773cab2bb3Spatrick 
5783cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_add(volatile a8 * a,a8 v,morder mo)5793cab2bb3Spatrick a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
580*810390e3Srobert   ATOMIC_IMPL(FetchAdd, a, v, mo);
5813cab2bb3Spatrick }
5823cab2bb3Spatrick 
5833cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_add(volatile a16 * a,a16 v,morder mo)5843cab2bb3Spatrick a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
585*810390e3Srobert   ATOMIC_IMPL(FetchAdd, a, v, mo);
5863cab2bb3Spatrick }
5873cab2bb3Spatrick 
5883cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_add(volatile a32 * a,a32 v,morder mo)5893cab2bb3Spatrick a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
590*810390e3Srobert   ATOMIC_IMPL(FetchAdd, a, v, mo);
5913cab2bb3Spatrick }
5923cab2bb3Spatrick 
5933cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_add(volatile a64 * a,a64 v,morder mo)5943cab2bb3Spatrick a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
595*810390e3Srobert   ATOMIC_IMPL(FetchAdd, a, v, mo);
5963cab2bb3Spatrick }
5973cab2bb3Spatrick 
5983cab2bb3Spatrick #if __TSAN_HAS_INT128
5993cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_add(volatile a128 * a,a128 v,morder mo)6003cab2bb3Spatrick a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
601*810390e3Srobert   ATOMIC_IMPL(FetchAdd, a, v, mo);
6023cab2bb3Spatrick }
6033cab2bb3Spatrick #endif
6043cab2bb3Spatrick 
6053cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_sub(volatile a8 * a,a8 v,morder mo)6063cab2bb3Spatrick a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
607*810390e3Srobert   ATOMIC_IMPL(FetchSub, a, v, mo);
6083cab2bb3Spatrick }
6093cab2bb3Spatrick 
6103cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_sub(volatile a16 * a,a16 v,morder mo)6113cab2bb3Spatrick a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
612*810390e3Srobert   ATOMIC_IMPL(FetchSub, a, v, mo);
6133cab2bb3Spatrick }
6143cab2bb3Spatrick 
6153cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_sub(volatile a32 * a,a32 v,morder mo)6163cab2bb3Spatrick a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
617*810390e3Srobert   ATOMIC_IMPL(FetchSub, a, v, mo);
6183cab2bb3Spatrick }
6193cab2bb3Spatrick 
6203cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_sub(volatile a64 * a,a64 v,morder mo)6213cab2bb3Spatrick a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
622*810390e3Srobert   ATOMIC_IMPL(FetchSub, a, v, mo);
6233cab2bb3Spatrick }
6243cab2bb3Spatrick 
6253cab2bb3Spatrick #if __TSAN_HAS_INT128
6263cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_sub(volatile a128 * a,a128 v,morder mo)6273cab2bb3Spatrick a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
628*810390e3Srobert   ATOMIC_IMPL(FetchSub, a, v, mo);
6293cab2bb3Spatrick }
6303cab2bb3Spatrick #endif
6313cab2bb3Spatrick 
6323cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_and(volatile a8 * a,a8 v,morder mo)6333cab2bb3Spatrick a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
634*810390e3Srobert   ATOMIC_IMPL(FetchAnd, a, v, mo);
6353cab2bb3Spatrick }
6363cab2bb3Spatrick 
6373cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_and(volatile a16 * a,a16 v,morder mo)6383cab2bb3Spatrick a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
639*810390e3Srobert   ATOMIC_IMPL(FetchAnd, a, v, mo);
6403cab2bb3Spatrick }
6413cab2bb3Spatrick 
6423cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_and(volatile a32 * a,a32 v,morder mo)6433cab2bb3Spatrick a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
644*810390e3Srobert   ATOMIC_IMPL(FetchAnd, a, v, mo);
6453cab2bb3Spatrick }
6463cab2bb3Spatrick 
6473cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_and(volatile a64 * a,a64 v,morder mo)6483cab2bb3Spatrick a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
649*810390e3Srobert   ATOMIC_IMPL(FetchAnd, a, v, mo);
6503cab2bb3Spatrick }
6513cab2bb3Spatrick 
6523cab2bb3Spatrick #if __TSAN_HAS_INT128
6533cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_and(volatile a128 * a,a128 v,morder mo)6543cab2bb3Spatrick a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
655*810390e3Srobert   ATOMIC_IMPL(FetchAnd, a, v, mo);
6563cab2bb3Spatrick }
6573cab2bb3Spatrick #endif
6583cab2bb3Spatrick 
6593cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_or(volatile a8 * a,a8 v,morder mo)6603cab2bb3Spatrick a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
661*810390e3Srobert   ATOMIC_IMPL(FetchOr, a, v, mo);
6623cab2bb3Spatrick }
6633cab2bb3Spatrick 
6643cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_or(volatile a16 * a,a16 v,morder mo)6653cab2bb3Spatrick a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
666*810390e3Srobert   ATOMIC_IMPL(FetchOr, a, v, mo);
6673cab2bb3Spatrick }
6683cab2bb3Spatrick 
6693cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_or(volatile a32 * a,a32 v,morder mo)6703cab2bb3Spatrick a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
671*810390e3Srobert   ATOMIC_IMPL(FetchOr, a, v, mo);
6723cab2bb3Spatrick }
6733cab2bb3Spatrick 
6743cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_or(volatile a64 * a,a64 v,morder mo)6753cab2bb3Spatrick a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
676*810390e3Srobert   ATOMIC_IMPL(FetchOr, a, v, mo);
6773cab2bb3Spatrick }
6783cab2bb3Spatrick 
6793cab2bb3Spatrick #if __TSAN_HAS_INT128
6803cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_or(volatile a128 * a,a128 v,morder mo)6813cab2bb3Spatrick a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
682*810390e3Srobert   ATOMIC_IMPL(FetchOr, a, v, mo);
6833cab2bb3Spatrick }
6843cab2bb3Spatrick #endif
6853cab2bb3Spatrick 
6863cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_xor(volatile a8 * a,a8 v,morder mo)6873cab2bb3Spatrick a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
688*810390e3Srobert   ATOMIC_IMPL(FetchXor, a, v, mo);
6893cab2bb3Spatrick }
6903cab2bb3Spatrick 
6913cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_xor(volatile a16 * a,a16 v,morder mo)6923cab2bb3Spatrick a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
693*810390e3Srobert   ATOMIC_IMPL(FetchXor, a, v, mo);
6943cab2bb3Spatrick }
6953cab2bb3Spatrick 
6963cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_xor(volatile a32 * a,a32 v,morder mo)6973cab2bb3Spatrick a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
698*810390e3Srobert   ATOMIC_IMPL(FetchXor, a, v, mo);
6993cab2bb3Spatrick }
7003cab2bb3Spatrick 
7013cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_xor(volatile a64 * a,a64 v,morder mo)7023cab2bb3Spatrick a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
703*810390e3Srobert   ATOMIC_IMPL(FetchXor, a, v, mo);
7043cab2bb3Spatrick }
7053cab2bb3Spatrick 
7063cab2bb3Spatrick #if __TSAN_HAS_INT128
7073cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_xor(volatile a128 * a,a128 v,morder mo)7083cab2bb3Spatrick a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
709*810390e3Srobert   ATOMIC_IMPL(FetchXor, a, v, mo);
7103cab2bb3Spatrick }
7113cab2bb3Spatrick #endif
7123cab2bb3Spatrick 
7133cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_fetch_nand(volatile a8 * a,a8 v,morder mo)7143cab2bb3Spatrick a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
715*810390e3Srobert   ATOMIC_IMPL(FetchNand, a, v, mo);
7163cab2bb3Spatrick }
7173cab2bb3Spatrick 
7183cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_fetch_nand(volatile a16 * a,a16 v,morder mo)7193cab2bb3Spatrick a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
720*810390e3Srobert   ATOMIC_IMPL(FetchNand, a, v, mo);
7213cab2bb3Spatrick }
7223cab2bb3Spatrick 
7233cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_fetch_nand(volatile a32 * a,a32 v,morder mo)7243cab2bb3Spatrick a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
725*810390e3Srobert   ATOMIC_IMPL(FetchNand, a, v, mo);
7263cab2bb3Spatrick }
7273cab2bb3Spatrick 
7283cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_fetch_nand(volatile a64 * a,a64 v,morder mo)7293cab2bb3Spatrick a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
730*810390e3Srobert   ATOMIC_IMPL(FetchNand, a, v, mo);
7313cab2bb3Spatrick }
7323cab2bb3Spatrick 
7333cab2bb3Spatrick #if __TSAN_HAS_INT128
7343cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_fetch_nand(volatile a128 * a,a128 v,morder mo)7353cab2bb3Spatrick a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
736*810390e3Srobert   ATOMIC_IMPL(FetchNand, a, v, mo);
7373cab2bb3Spatrick }
7383cab2bb3Spatrick #endif
7393cab2bb3Spatrick 
7403cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_strong(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)7413cab2bb3Spatrick int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
7423cab2bb3Spatrick     morder mo, morder fmo) {
743*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7443cab2bb3Spatrick }
7453cab2bb3Spatrick 
7463cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_strong(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)7473cab2bb3Spatrick int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
7483cab2bb3Spatrick     morder mo, morder fmo) {
749*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7503cab2bb3Spatrick }
7513cab2bb3Spatrick 
7523cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_strong(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)7533cab2bb3Spatrick int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
7543cab2bb3Spatrick     morder mo, morder fmo) {
755*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7563cab2bb3Spatrick }
7573cab2bb3Spatrick 
7583cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_strong(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)7593cab2bb3Spatrick int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
7603cab2bb3Spatrick     morder mo, morder fmo) {
761*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7623cab2bb3Spatrick }
7633cab2bb3Spatrick 
7643cab2bb3Spatrick #if __TSAN_HAS_INT128
7653cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_strong(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)7663cab2bb3Spatrick int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
7673cab2bb3Spatrick     morder mo, morder fmo) {
768*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7693cab2bb3Spatrick }
7703cab2bb3Spatrick #endif
7713cab2bb3Spatrick 
7723cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_weak(volatile a8 * a,a8 * c,a8 v,morder mo,morder fmo)7733cab2bb3Spatrick int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
7743cab2bb3Spatrick     morder mo, morder fmo) {
775*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7763cab2bb3Spatrick }
7773cab2bb3Spatrick 
7783cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_weak(volatile a16 * a,a16 * c,a16 v,morder mo,morder fmo)7793cab2bb3Spatrick int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
7803cab2bb3Spatrick     morder mo, morder fmo) {
781*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7823cab2bb3Spatrick }
7833cab2bb3Spatrick 
7843cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_weak(volatile a32 * a,a32 * c,a32 v,morder mo,morder fmo)7853cab2bb3Spatrick int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
7863cab2bb3Spatrick     morder mo, morder fmo) {
787*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7883cab2bb3Spatrick }
7893cab2bb3Spatrick 
7903cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_weak(volatile a64 * a,a64 * c,a64 v,morder mo,morder fmo)7913cab2bb3Spatrick int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
7923cab2bb3Spatrick     morder mo, morder fmo) {
793*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
7943cab2bb3Spatrick }
7953cab2bb3Spatrick 
7963cab2bb3Spatrick #if __TSAN_HAS_INT128
7973cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_weak(volatile a128 * a,a128 * c,a128 v,morder mo,morder fmo)7983cab2bb3Spatrick int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
7993cab2bb3Spatrick     morder mo, morder fmo) {
800*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8013cab2bb3Spatrick }
8023cab2bb3Spatrick #endif
8033cab2bb3Spatrick 
8043cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic8_compare_exchange_val(volatile a8 * a,a8 c,a8 v,morder mo,morder fmo)8053cab2bb3Spatrick a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
8063cab2bb3Spatrick     morder mo, morder fmo) {
807*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8083cab2bb3Spatrick }
8093cab2bb3Spatrick 
8103cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic16_compare_exchange_val(volatile a16 * a,a16 c,a16 v,morder mo,morder fmo)8113cab2bb3Spatrick a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
8123cab2bb3Spatrick     morder mo, morder fmo) {
813*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8143cab2bb3Spatrick }
8153cab2bb3Spatrick 
8163cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic32_compare_exchange_val(volatile a32 * a,a32 c,a32 v,morder mo,morder fmo)8173cab2bb3Spatrick a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
8183cab2bb3Spatrick     morder mo, morder fmo) {
819*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8203cab2bb3Spatrick }
8213cab2bb3Spatrick 
8223cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic64_compare_exchange_val(volatile a64 * a,a64 c,a64 v,morder mo,morder fmo)8233cab2bb3Spatrick a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
8243cab2bb3Spatrick     morder mo, morder fmo) {
825*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8263cab2bb3Spatrick }
8273cab2bb3Spatrick 
8283cab2bb3Spatrick #if __TSAN_HAS_INT128
8293cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic128_compare_exchange_val(volatile a128 * a,a128 c,a128 v,morder mo,morder fmo)8303cab2bb3Spatrick a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
8313cab2bb3Spatrick     morder mo, morder fmo) {
832*810390e3Srobert   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
8333cab2bb3Spatrick }
8343cab2bb3Spatrick #endif
8353cab2bb3Spatrick 
8363cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_thread_fence(morder mo)837*810390e3Srobert void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
8383cab2bb3Spatrick 
8393cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_atomic_signal_fence(morder mo)8403cab2bb3Spatrick void __tsan_atomic_signal_fence(morder mo) {
8413cab2bb3Spatrick }
8423cab2bb3Spatrick }  // extern "C"
8433cab2bb3Spatrick 
8443cab2bb3Spatrick #else  // #if !SANITIZER_GO
8453cab2bb3Spatrick 
8463cab2bb3Spatrick // Go
8473cab2bb3Spatrick 
8483cab2bb3Spatrick #  define ATOMIC(func, ...)               \
8493cab2bb3Spatrick     if (thr->ignore_sync) {               \
8503cab2bb3Spatrick       NoTsanAtomic##func(__VA_ARGS__);    \
8513cab2bb3Spatrick     } else {                              \
8523cab2bb3Spatrick       FuncEntry(thr, cpc);                \
8533cab2bb3Spatrick       Atomic##func(thr, pc, __VA_ARGS__); \
8543cab2bb3Spatrick       FuncExit(thr);                      \
855*810390e3Srobert     }
8563cab2bb3Spatrick 
8573cab2bb3Spatrick #  define ATOMIC_RET(func, ret, ...)              \
8583cab2bb3Spatrick     if (thr->ignore_sync) {                       \
8593cab2bb3Spatrick       (ret) = NoTsanAtomic##func(__VA_ARGS__);    \
8603cab2bb3Spatrick     } else {                                      \
8613cab2bb3Spatrick       FuncEntry(thr, cpc);                        \
8623cab2bb3Spatrick       (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
8633cab2bb3Spatrick       FuncExit(thr);                              \
864*810390e3Srobert     }
8653cab2bb3Spatrick 
8663cab2bb3Spatrick extern "C" {
8673cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8683cab2bb3Spatrick void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8693cab2bb3Spatrick   ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
8703cab2bb3Spatrick }
8713cab2bb3Spatrick 
8723cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_load(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8733cab2bb3Spatrick void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8743cab2bb3Spatrick   ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
8753cab2bb3Spatrick }
8763cab2bb3Spatrick 
8773cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8783cab2bb3Spatrick void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8793cab2bb3Spatrick   ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
8803cab2bb3Spatrick }
8813cab2bb3Spatrick 
8823cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_store(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8833cab2bb3Spatrick void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8843cab2bb3Spatrick   ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
8853cab2bb3Spatrick }
8863cab2bb3Spatrick 
8873cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8883cab2bb3Spatrick void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8893cab2bb3Spatrick   ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
8903cab2bb3Spatrick }
8913cab2bb3Spatrick 
8923cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_fetch_add(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8933cab2bb3Spatrick void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8943cab2bb3Spatrick   ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
8953cab2bb3Spatrick }
8963cab2bb3Spatrick 
8973cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)8983cab2bb3Spatrick void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
8993cab2bb3Spatrick   ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
9003cab2bb3Spatrick }
9013cab2bb3Spatrick 
9023cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)9033cab2bb3Spatrick void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
9043cab2bb3Spatrick   ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
9053cab2bb3Spatrick }
9063cab2bb3Spatrick 
9073cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic32_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)9083cab2bb3Spatrick void __tsan_go_atomic32_compare_exchange(
9093cab2bb3Spatrick     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
9103cab2bb3Spatrick   a32 cur = 0;
9113cab2bb3Spatrick   a32 cmp = *(a32*)(a+8);
9123cab2bb3Spatrick   ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
9133cab2bb3Spatrick   *(bool*)(a+16) = (cur == cmp);
9143cab2bb3Spatrick }
9153cab2bb3Spatrick 
9163cab2bb3Spatrick SANITIZER_INTERFACE_ATTRIBUTE
__tsan_go_atomic64_compare_exchange(ThreadState * thr,uptr cpc,uptr pc,u8 * a)9173cab2bb3Spatrick void __tsan_go_atomic64_compare_exchange(
9183cab2bb3Spatrick     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
9193cab2bb3Spatrick   a64 cur = 0;
9203cab2bb3Spatrick   a64 cmp = *(a64*)(a+8);
9213cab2bb3Spatrick   ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
9223cab2bb3Spatrick   *(bool*)(a+24) = (cur == cmp);
9233cab2bb3Spatrick }
9243cab2bb3Spatrick }  // extern "C"
9253cab2bb3Spatrick #endif  // #if !SANITIZER_GO
926