Home | History | Annotate | Line # | Download | only in tsan
      1 //===-- tsan_interface_atomic.cpp -----------------------------------------===//
      2 //
      3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4 // See https://llvm.org/LICENSE.txt for license information.
      5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6 //
      7 //===----------------------------------------------------------------------===//
      8 //
      9 // This file is a part of ThreadSanitizer (TSan), a race detector.
     10 //
     11 //===----------------------------------------------------------------------===//
     12 
     13 // ThreadSanitizer atomic operations are based on C++11/C1x standards.
     14 // For background see C++11 standard.  A slightly older, publicly
     15 // available draft of the standard (not entirely up-to-date, but close enough
     16 // for casual browsing) is available here:
     17 // http://www.open-std.org/jtc1/sc22/wg21/docs/papers/2011/n3242.pdf
     18 // The following page contains more background information:
     19 // http://www.hpl.hp.com/personal/Hans_Boehm/c++mm/
     20 
     21 #include "sanitizer_common/sanitizer_placement_new.h"
     22 #include "sanitizer_common/sanitizer_stacktrace.h"
     23 #include "sanitizer_common/sanitizer_mutex.h"
     24 #include "tsan_flags.h"
     25 #include "tsan_interface.h"
     26 #include "tsan_rtl.h"
     27 
     28 using namespace __tsan;
     29 
     30 #if !SANITIZER_GO && __TSAN_HAS_INT128
     31 // Protects emulation of 128-bit atomic operations.
     32 static StaticSpinMutex mutex128;
     33 #endif
     34 
     35 #if SANITIZER_DEBUG
     36 static bool IsLoadOrder(morder mo) {
     37   return mo == mo_relaxed || mo == mo_consume
     38       || mo == mo_acquire || mo == mo_seq_cst;
     39 }
     40 
     41 static bool IsStoreOrder(morder mo) {
     42   return mo == mo_relaxed || mo == mo_release || mo == mo_seq_cst;
     43 }
     44 #endif
     45 
     46 static bool IsReleaseOrder(morder mo) {
     47   return mo == mo_release || mo == mo_acq_rel || mo == mo_seq_cst;
     48 }
     49 
     50 static bool IsAcquireOrder(morder mo) {
     51   return mo == mo_consume || mo == mo_acquire
     52       || mo == mo_acq_rel || mo == mo_seq_cst;
     53 }
     54 
     55 static bool IsAcqRelOrder(morder mo) {
     56   return mo == mo_acq_rel || mo == mo_seq_cst;
     57 }
     58 
     59 template<typename T> T func_xchg(volatile T *v, T op) {
     60   T res = __sync_lock_test_and_set(v, op);
     61   // __sync_lock_test_and_set does not contain full barrier.
     62   __sync_synchronize();
     63   return res;
     64 }
     65 
     66 template<typename T> T func_add(volatile T *v, T op) {
     67   return __sync_fetch_and_add(v, op);
     68 }
     69 
     70 template<typename T> T func_sub(volatile T *v, T op) {
     71   return __sync_fetch_and_sub(v, op);
     72 }
     73 
     74 template<typename T> T func_and(volatile T *v, T op) {
     75   return __sync_fetch_and_and(v, op);
     76 }
     77 
     78 template<typename T> T func_or(volatile T *v, T op) {
     79   return __sync_fetch_and_or(v, op);
     80 }
     81 
     82 template<typename T> T func_xor(volatile T *v, T op) {
     83   return __sync_fetch_and_xor(v, op);
     84 }
     85 
     86 template<typename T> T func_nand(volatile T *v, T op) {
     87   // clang does not support __sync_fetch_and_nand.
     88   T cmp = *v;
     89   for (;;) {
     90     T newv = ~(cmp & op);
     91     T cur = __sync_val_compare_and_swap(v, cmp, newv);
     92     if (cmp == cur)
     93       return cmp;
     94     cmp = cur;
     95   }
     96 }
     97 
     98 template<typename T> T func_cas(volatile T *v, T cmp, T xch) {
     99   return __sync_val_compare_and_swap(v, cmp, xch);
    100 }
    101 
    102 // clang does not support 128-bit atomic ops.
    103 // Atomic ops are executed under tsan internal mutex,
    104 // here we assume that the atomic variables are not accessed
    105 // from non-instrumented code.
    106 #if !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_16) && !SANITIZER_GO \
    107     && __TSAN_HAS_INT128
    108 a128 func_xchg(volatile a128 *v, a128 op) {
    109   SpinMutexLock lock(&mutex128);
    110   a128 cmp = *v;
    111   *v = op;
    112   return cmp;
    113 }
    114 
    115 a128 func_add(volatile a128 *v, a128 op) {
    116   SpinMutexLock lock(&mutex128);
    117   a128 cmp = *v;
    118   *v = cmp + op;
    119   return cmp;
    120 }
    121 
    122 a128 func_sub(volatile a128 *v, a128 op) {
    123   SpinMutexLock lock(&mutex128);
    124   a128 cmp = *v;
    125   *v = cmp - op;
    126   return cmp;
    127 }
    128 
    129 a128 func_and(volatile a128 *v, a128 op) {
    130   SpinMutexLock lock(&mutex128);
    131   a128 cmp = *v;
    132   *v = cmp & op;
    133   return cmp;
    134 }
    135 
    136 a128 func_or(volatile a128 *v, a128 op) {
    137   SpinMutexLock lock(&mutex128);
    138   a128 cmp = *v;
    139   *v = cmp | op;
    140   return cmp;
    141 }
    142 
    143 a128 func_xor(volatile a128 *v, a128 op) {
    144   SpinMutexLock lock(&mutex128);
    145   a128 cmp = *v;
    146   *v = cmp ^ op;
    147   return cmp;
    148 }
    149 
    150 a128 func_nand(volatile a128 *v, a128 op) {
    151   SpinMutexLock lock(&mutex128);
    152   a128 cmp = *v;
    153   *v = ~(cmp & op);
    154   return cmp;
    155 }
    156 
    157 a128 func_cas(volatile a128 *v, a128 cmp, a128 xch) {
    158   SpinMutexLock lock(&mutex128);
    159   a128 cur = *v;
    160   if (cur == cmp)
    161     *v = xch;
    162   return cur;
    163 }
    164 #endif
    165 
    166 template <typename T>
    167 static int AccessSize() {
    168   if (sizeof(T) <= 1)
    169     return 1;
    170   else if (sizeof(T) <= 2)
    171     return 2;
    172   else if (sizeof(T) <= 4)
    173     return 4;
    174   else
    175     return 8;
    176   // For 16-byte atomics we also use 8-byte memory access,
    177   // this leads to false negatives only in very obscure cases.
    178 }
    179 
    180 #if !SANITIZER_GO
    181 static atomic_uint8_t *to_atomic(const volatile a8 *a) {
    182   return reinterpret_cast<atomic_uint8_t *>(const_cast<a8 *>(a));
    183 }
    184 
    185 static atomic_uint16_t *to_atomic(const volatile a16 *a) {
    186   return reinterpret_cast<atomic_uint16_t *>(const_cast<a16 *>(a));
    187 }
    188 #endif
    189 
    190 static atomic_uint32_t *to_atomic(const volatile a32 *a) {
    191   return reinterpret_cast<atomic_uint32_t *>(const_cast<a32 *>(a));
    192 }
    193 
    194 static atomic_uint64_t *to_atomic(const volatile a64 *a) {
    195   return reinterpret_cast<atomic_uint64_t *>(const_cast<a64 *>(a));
    196 }
    197 
    198 static memory_order to_mo(morder mo) {
    199   switch (mo) {
    200   case mo_relaxed: return memory_order_relaxed;
    201   case mo_consume: return memory_order_consume;
    202   case mo_acquire: return memory_order_acquire;
    203   case mo_release: return memory_order_release;
    204   case mo_acq_rel: return memory_order_acq_rel;
    205   case mo_seq_cst: return memory_order_seq_cst;
    206   }
    207   DCHECK(0);
    208   return memory_order_seq_cst;
    209 }
    210 
    211 template<typename T>
    212 static T NoTsanAtomicLoad(const volatile T *a, morder mo) {
    213   return atomic_load(to_atomic(a), to_mo(mo));
    214 }
    215 
    216 #if __TSAN_HAS_INT128 && !SANITIZER_GO
    217 static a128 NoTsanAtomicLoad(const volatile a128 *a, morder mo) {
    218   SpinMutexLock lock(&mutex128);
    219   return *a;
    220 }
    221 #endif
    222 
    223 template <typename T>
    224 static T AtomicLoad(ThreadState *thr, uptr pc, const volatile T *a, morder mo) {
    225   DCHECK(IsLoadOrder(mo));
    226   // This fast-path is critical for performance.
    227   // Assume the access is atomic.
    228   if (!IsAcquireOrder(mo)) {
    229     MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(),
    230                  kAccessRead | kAccessAtomic);
    231     return NoTsanAtomicLoad(a, mo);
    232   }
    233   // Don't create sync object if it does not exist yet. For example, an atomic
    234   // pointer is initialized to nullptr and then periodically acquire-loaded.
    235   T v = NoTsanAtomicLoad(a, mo);
    236   SyncVar *s = ctx->metamap.GetSyncIfExists((uptr)a);
    237   if (s) {
    238     ReadLock l(&s->mtx);
    239     AcquireImpl(thr, pc, &s->clock);
    240     // Re-read under sync mutex because we need a consistent snapshot
    241     // of the value and the clock we acquire.
    242     v = NoTsanAtomicLoad(a, mo);
    243   }
    244   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessRead | kAccessAtomic);
    245   return v;
    246 }
    247 
    248 template<typename T>
    249 static void NoTsanAtomicStore(volatile T *a, T v, morder mo) {
    250   atomic_store(to_atomic(a), v, to_mo(mo));
    251 }
    252 
    253 #if __TSAN_HAS_INT128 && !SANITIZER_GO
    254 static void NoTsanAtomicStore(volatile a128 *a, a128 v, morder mo) {
    255   SpinMutexLock lock(&mutex128);
    256   *a = v;
    257 }
    258 #endif
    259 
    260 template <typename T>
    261 static void AtomicStore(ThreadState *thr, uptr pc, volatile T *a, T v,
    262                         morder mo) {
    263   DCHECK(IsStoreOrder(mo));
    264   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
    265   // This fast-path is critical for performance.
    266   // Assume the access is atomic.
    267   // Strictly saying even relaxed store cuts off release sequence,
    268   // so must reset the clock.
    269   if (!IsReleaseOrder(mo)) {
    270     NoTsanAtomicStore(a, v, mo);
    271     return;
    272   }
    273   __sync_synchronize();
    274   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
    275   Lock l(&s->mtx);
    276   thr->fast_state.IncrementEpoch();
    277   // Can't increment epoch w/o writing to the trace as well.
    278   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    279   ReleaseStoreImpl(thr, pc, &s->clock);
    280   NoTsanAtomicStore(a, v, mo);
    281 }
    282 
    283 template <typename T, T (*F)(volatile T *v, T op)>
    284 static T AtomicRMW(ThreadState *thr, uptr pc, volatile T *a, T v, morder mo) {
    285   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
    286   if (LIKELY(mo == mo_relaxed))
    287     return F(a, v);
    288   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
    289   Lock l(&s->mtx);
    290   thr->fast_state.IncrementEpoch();
    291   // Can't increment epoch w/o writing to the trace as well.
    292   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    293   if (IsAcqRelOrder(mo))
    294     AcquireReleaseImpl(thr, pc, &s->clock);
    295   else if (IsReleaseOrder(mo))
    296     ReleaseImpl(thr, pc, &s->clock);
    297   else if (IsAcquireOrder(mo))
    298     AcquireImpl(thr, pc, &s->clock);
    299   return F(a, v);
    300 }
    301 
    302 template<typename T>
    303 static T NoTsanAtomicExchange(volatile T *a, T v, morder mo) {
    304   return func_xchg(a, v);
    305 }
    306 
    307 template<typename T>
    308 static T NoTsanAtomicFetchAdd(volatile T *a, T v, morder mo) {
    309   return func_add(a, v);
    310 }
    311 
    312 template<typename T>
    313 static T NoTsanAtomicFetchSub(volatile T *a, T v, morder mo) {
    314   return func_sub(a, v);
    315 }
    316 
    317 template<typename T>
    318 static T NoTsanAtomicFetchAnd(volatile T *a, T v, morder mo) {
    319   return func_and(a, v);
    320 }
    321 
    322 template<typename T>
    323 static T NoTsanAtomicFetchOr(volatile T *a, T v, morder mo) {
    324   return func_or(a, v);
    325 }
    326 
    327 template<typename T>
    328 static T NoTsanAtomicFetchXor(volatile T *a, T v, morder mo) {
    329   return func_xor(a, v);
    330 }
    331 
    332 template<typename T>
    333 static T NoTsanAtomicFetchNand(volatile T *a, T v, morder mo) {
    334   return func_nand(a, v);
    335 }
    336 
    337 template<typename T>
    338 static T AtomicExchange(ThreadState *thr, uptr pc, volatile T *a, T v,
    339     morder mo) {
    340   return AtomicRMW<T, func_xchg>(thr, pc, a, v, mo);
    341 }
    342 
    343 template<typename T>
    344 static T AtomicFetchAdd(ThreadState *thr, uptr pc, volatile T *a, T v,
    345     morder mo) {
    346   return AtomicRMW<T, func_add>(thr, pc, a, v, mo);
    347 }
    348 
    349 template<typename T>
    350 static T AtomicFetchSub(ThreadState *thr, uptr pc, volatile T *a, T v,
    351     morder mo) {
    352   return AtomicRMW<T, func_sub>(thr, pc, a, v, mo);
    353 }
    354 
    355 template<typename T>
    356 static T AtomicFetchAnd(ThreadState *thr, uptr pc, volatile T *a, T v,
    357     morder mo) {
    358   return AtomicRMW<T, func_and>(thr, pc, a, v, mo);
    359 }
    360 
    361 template<typename T>
    362 static T AtomicFetchOr(ThreadState *thr, uptr pc, volatile T *a, T v,
    363     morder mo) {
    364   return AtomicRMW<T, func_or>(thr, pc, a, v, mo);
    365 }
    366 
    367 template<typename T>
    368 static T AtomicFetchXor(ThreadState *thr, uptr pc, volatile T *a, T v,
    369     morder mo) {
    370   return AtomicRMW<T, func_xor>(thr, pc, a, v, mo);
    371 }
    372 
    373 template<typename T>
    374 static T AtomicFetchNand(ThreadState *thr, uptr pc, volatile T *a, T v,
    375     morder mo) {
    376   return AtomicRMW<T, func_nand>(thr, pc, a, v, mo);
    377 }
    378 
    379 template<typename T>
    380 static bool NoTsanAtomicCAS(volatile T *a, T *c, T v, morder mo, morder fmo) {
    381   return atomic_compare_exchange_strong(to_atomic(a), c, v, to_mo(mo));
    382 }
    383 
    384 #if __TSAN_HAS_INT128
    385 static bool NoTsanAtomicCAS(volatile a128 *a, a128 *c, a128 v,
    386     morder mo, morder fmo) {
    387   a128 old = *c;
    388   a128 cur = func_cas(a, old, v);
    389   if (cur == old)
    390     return true;
    391   *c = cur;
    392   return false;
    393 }
    394 #endif
    395 
    396 template<typename T>
    397 static T NoTsanAtomicCAS(volatile T *a, T c, T v, morder mo, morder fmo) {
    398   NoTsanAtomicCAS(a, &c, v, mo, fmo);
    399   return c;
    400 }
    401 
    402 template <typename T>
    403 static bool AtomicCAS(ThreadState *thr, uptr pc, volatile T *a, T *c, T v,
    404                       morder mo, morder fmo) {
    405   // 31.7.2.18: "The failure argument shall not be memory_order_release
    406   // nor memory_order_acq_rel". LLVM (2021-05) fallbacks to Monotonic
    407   // (mo_relaxed) when those are used.
    408   DCHECK(IsLoadOrder(fmo));
    409 
    410   MemoryAccess(thr, pc, (uptr)a, AccessSize<T>(), kAccessWrite | kAccessAtomic);
    411   if (LIKELY(mo == mo_relaxed && fmo == mo_relaxed)) {
    412     T cc = *c;
    413     T pr = func_cas(a, cc, v);
    414     if (pr == cc)
    415       return true;
    416     *c = pr;
    417     return false;
    418   }
    419 
    420   bool release = IsReleaseOrder(mo);
    421   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, (uptr)a, false);
    422   RWLock l(&s->mtx, release);
    423   T cc = *c;
    424   T pr = func_cas(a, cc, v);
    425   bool success = pr == cc;
    426   if (!success) {
    427     *c = pr;
    428     mo = fmo;
    429   }
    430   thr->fast_state.IncrementEpoch();
    431   // Can't increment epoch w/o writing to the trace as well.
    432   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    433 
    434   if (success && IsAcqRelOrder(mo))
    435     AcquireReleaseImpl(thr, pc, &s->clock);
    436   else if (success && IsReleaseOrder(mo))
    437     ReleaseImpl(thr, pc, &s->clock);
    438   else if (IsAcquireOrder(mo))
    439     AcquireImpl(thr, pc, &s->clock);
    440   return success;
    441 }
    442 
    443 template<typename T>
    444 static T AtomicCAS(ThreadState *thr, uptr pc,
    445     volatile T *a, T c, T v, morder mo, morder fmo) {
    446   AtomicCAS(thr, pc, a, &c, v, mo, fmo);
    447   return c;
    448 }
    449 
    450 #if !SANITIZER_GO
    451 static void NoTsanAtomicFence(morder mo) {
    452   __sync_synchronize();
    453 }
    454 
    455 static void AtomicFence(ThreadState *thr, uptr pc, morder mo) {
    456   // FIXME(dvyukov): not implemented.
    457   __sync_synchronize();
    458 }
    459 #endif
    460 
    461 // Interface functions follow.
    462 #if !SANITIZER_GO
    463 
    464 // C/C++
    465 
    466 static morder convert_morder(morder mo) {
    467   if (flags()->force_seq_cst_atomics)
    468     return (morder)mo_seq_cst;
    469 
    470   // Filter out additional memory order flags:
    471   // MEMMODEL_SYNC        = 1 << 15
    472   // __ATOMIC_HLE_ACQUIRE = 1 << 16
    473   // __ATOMIC_HLE_RELEASE = 1 << 17
    474   //
    475   // HLE is an optimization, and we pretend that elision always fails.
    476   // MEMMODEL_SYNC is used when lowering __sync_ atomics,
    477   // since we use __sync_ atomics for actual atomic operations,
    478   // we can safely ignore it as well. It also subtly affects semantics,
    479   // but we don't model the difference.
    480   return (morder)(mo & 0x7fff);
    481 }
    482 
    483 #  define ATOMIC_IMPL(func, ...)                                \
    484     ThreadState *const thr = cur_thread();                      \
    485     ProcessPendingSignals(thr);                                 \
    486     if (UNLIKELY(thr->ignore_sync || thr->ignore_interceptors)) \
    487       return NoTsanAtomic##func(__VA_ARGS__);                   \
    488     mo = convert_morder(mo);                                    \
    489     return Atomic##func(thr, GET_CALLER_PC(), __VA_ARGS__);
    490 
    491 extern "C" {
    492 SANITIZER_INTERFACE_ATTRIBUTE
    493 a8 __tsan_atomic8_load(const volatile a8 *a, morder mo) {
    494   ATOMIC_IMPL(Load, a, mo);
    495 }
    496 
    497 SANITIZER_INTERFACE_ATTRIBUTE
    498 a16 __tsan_atomic16_load(const volatile a16 *a, morder mo) {
    499   ATOMIC_IMPL(Load, a, mo);
    500 }
    501 
    502 SANITIZER_INTERFACE_ATTRIBUTE
    503 a32 __tsan_atomic32_load(const volatile a32 *a, morder mo) {
    504   ATOMIC_IMPL(Load, a, mo);
    505 }
    506 
    507 SANITIZER_INTERFACE_ATTRIBUTE
    508 a64 __tsan_atomic64_load(const volatile a64 *a, morder mo) {
    509   ATOMIC_IMPL(Load, a, mo);
    510 }
    511 
    512 #if __TSAN_HAS_INT128
    513 SANITIZER_INTERFACE_ATTRIBUTE
    514 a128 __tsan_atomic128_load(const volatile a128 *a, morder mo) {
    515   ATOMIC_IMPL(Load, a, mo);
    516 }
    517 #endif
    518 
    519 SANITIZER_INTERFACE_ATTRIBUTE
    520 void __tsan_atomic8_store(volatile a8 *a, a8 v, morder mo) {
    521   ATOMIC_IMPL(Store, a, v, mo);
    522 }
    523 
    524 SANITIZER_INTERFACE_ATTRIBUTE
    525 void __tsan_atomic16_store(volatile a16 *a, a16 v, morder mo) {
    526   ATOMIC_IMPL(Store, a, v, mo);
    527 }
    528 
    529 SANITIZER_INTERFACE_ATTRIBUTE
    530 void __tsan_atomic32_store(volatile a32 *a, a32 v, morder mo) {
    531   ATOMIC_IMPL(Store, a, v, mo);
    532 }
    533 
    534 SANITIZER_INTERFACE_ATTRIBUTE
    535 void __tsan_atomic64_store(volatile a64 *a, a64 v, morder mo) {
    536   ATOMIC_IMPL(Store, a, v, mo);
    537 }
    538 
    539 #if __TSAN_HAS_INT128
    540 SANITIZER_INTERFACE_ATTRIBUTE
    541 void __tsan_atomic128_store(volatile a128 *a, a128 v, morder mo) {
    542   ATOMIC_IMPL(Store, a, v, mo);
    543 }
    544 #endif
    545 
    546 SANITIZER_INTERFACE_ATTRIBUTE
    547 a8 __tsan_atomic8_exchange(volatile a8 *a, a8 v, morder mo) {
    548   ATOMIC_IMPL(Exchange, a, v, mo);
    549 }
    550 
    551 SANITIZER_INTERFACE_ATTRIBUTE
    552 a16 __tsan_atomic16_exchange(volatile a16 *a, a16 v, morder mo) {
    553   ATOMIC_IMPL(Exchange, a, v, mo);
    554 }
    555 
    556 SANITIZER_INTERFACE_ATTRIBUTE
    557 a32 __tsan_atomic32_exchange(volatile a32 *a, a32 v, morder mo) {
    558   ATOMIC_IMPL(Exchange, a, v, mo);
    559 }
    560 
    561 SANITIZER_INTERFACE_ATTRIBUTE
    562 a64 __tsan_atomic64_exchange(volatile a64 *a, a64 v, morder mo) {
    563   ATOMIC_IMPL(Exchange, a, v, mo);
    564 }
    565 
    566 #if __TSAN_HAS_INT128
    567 SANITIZER_INTERFACE_ATTRIBUTE
    568 a128 __tsan_atomic128_exchange(volatile a128 *a, a128 v, morder mo) {
    569   ATOMIC_IMPL(Exchange, a, v, mo);
    570 }
    571 #endif
    572 
    573 SANITIZER_INTERFACE_ATTRIBUTE
    574 a8 __tsan_atomic8_fetch_add(volatile a8 *a, a8 v, morder mo) {
    575   ATOMIC_IMPL(FetchAdd, a, v, mo);
    576 }
    577 
    578 SANITIZER_INTERFACE_ATTRIBUTE
    579 a16 __tsan_atomic16_fetch_add(volatile a16 *a, a16 v, morder mo) {
    580   ATOMIC_IMPL(FetchAdd, a, v, mo);
    581 }
    582 
    583 SANITIZER_INTERFACE_ATTRIBUTE
    584 a32 __tsan_atomic32_fetch_add(volatile a32 *a, a32 v, morder mo) {
    585   ATOMIC_IMPL(FetchAdd, a, v, mo);
    586 }
    587 
    588 SANITIZER_INTERFACE_ATTRIBUTE
    589 a64 __tsan_atomic64_fetch_add(volatile a64 *a, a64 v, morder mo) {
    590   ATOMIC_IMPL(FetchAdd, a, v, mo);
    591 }
    592 
    593 #if __TSAN_HAS_INT128
    594 SANITIZER_INTERFACE_ATTRIBUTE
    595 a128 __tsan_atomic128_fetch_add(volatile a128 *a, a128 v, morder mo) {
    596   ATOMIC_IMPL(FetchAdd, a, v, mo);
    597 }
    598 #endif
    599 
    600 SANITIZER_INTERFACE_ATTRIBUTE
    601 a8 __tsan_atomic8_fetch_sub(volatile a8 *a, a8 v, morder mo) {
    602   ATOMIC_IMPL(FetchSub, a, v, mo);
    603 }
    604 
    605 SANITIZER_INTERFACE_ATTRIBUTE
    606 a16 __tsan_atomic16_fetch_sub(volatile a16 *a, a16 v, morder mo) {
    607   ATOMIC_IMPL(FetchSub, a, v, mo);
    608 }
    609 
    610 SANITIZER_INTERFACE_ATTRIBUTE
    611 a32 __tsan_atomic32_fetch_sub(volatile a32 *a, a32 v, morder mo) {
    612   ATOMIC_IMPL(FetchSub, a, v, mo);
    613 }
    614 
    615 SANITIZER_INTERFACE_ATTRIBUTE
    616 a64 __tsan_atomic64_fetch_sub(volatile a64 *a, a64 v, morder mo) {
    617   ATOMIC_IMPL(FetchSub, a, v, mo);
    618 }
    619 
    620 #if __TSAN_HAS_INT128
    621 SANITIZER_INTERFACE_ATTRIBUTE
    622 a128 __tsan_atomic128_fetch_sub(volatile a128 *a, a128 v, morder mo) {
    623   ATOMIC_IMPL(FetchSub, a, v, mo);
    624 }
    625 #endif
    626 
    627 SANITIZER_INTERFACE_ATTRIBUTE
    628 a8 __tsan_atomic8_fetch_and(volatile a8 *a, a8 v, morder mo) {
    629   ATOMIC_IMPL(FetchAnd, a, v, mo);
    630 }
    631 
    632 SANITIZER_INTERFACE_ATTRIBUTE
    633 a16 __tsan_atomic16_fetch_and(volatile a16 *a, a16 v, morder mo) {
    634   ATOMIC_IMPL(FetchAnd, a, v, mo);
    635 }
    636 
    637 SANITIZER_INTERFACE_ATTRIBUTE
    638 a32 __tsan_atomic32_fetch_and(volatile a32 *a, a32 v, morder mo) {
    639   ATOMIC_IMPL(FetchAnd, a, v, mo);
    640 }
    641 
    642 SANITIZER_INTERFACE_ATTRIBUTE
    643 a64 __tsan_atomic64_fetch_and(volatile a64 *a, a64 v, morder mo) {
    644   ATOMIC_IMPL(FetchAnd, a, v, mo);
    645 }
    646 
    647 #if __TSAN_HAS_INT128
    648 SANITIZER_INTERFACE_ATTRIBUTE
    649 a128 __tsan_atomic128_fetch_and(volatile a128 *a, a128 v, morder mo) {
    650   ATOMIC_IMPL(FetchAnd, a, v, mo);
    651 }
    652 #endif
    653 
    654 SANITIZER_INTERFACE_ATTRIBUTE
    655 a8 __tsan_atomic8_fetch_or(volatile a8 *a, a8 v, morder mo) {
    656   ATOMIC_IMPL(FetchOr, a, v, mo);
    657 }
    658 
    659 SANITIZER_INTERFACE_ATTRIBUTE
    660 a16 __tsan_atomic16_fetch_or(volatile a16 *a, a16 v, morder mo) {
    661   ATOMIC_IMPL(FetchOr, a, v, mo);
    662 }
    663 
    664 SANITIZER_INTERFACE_ATTRIBUTE
    665 a32 __tsan_atomic32_fetch_or(volatile a32 *a, a32 v, morder mo) {
    666   ATOMIC_IMPL(FetchOr, a, v, mo);
    667 }
    668 
    669 SANITIZER_INTERFACE_ATTRIBUTE
    670 a64 __tsan_atomic64_fetch_or(volatile a64 *a, a64 v, morder mo) {
    671   ATOMIC_IMPL(FetchOr, a, v, mo);
    672 }
    673 
    674 #if __TSAN_HAS_INT128
    675 SANITIZER_INTERFACE_ATTRIBUTE
    676 a128 __tsan_atomic128_fetch_or(volatile a128 *a, a128 v, morder mo) {
    677   ATOMIC_IMPL(FetchOr, a, v, mo);
    678 }
    679 #endif
    680 
    681 SANITIZER_INTERFACE_ATTRIBUTE
    682 a8 __tsan_atomic8_fetch_xor(volatile a8 *a, a8 v, morder mo) {
    683   ATOMIC_IMPL(FetchXor, a, v, mo);
    684 }
    685 
    686 SANITIZER_INTERFACE_ATTRIBUTE
    687 a16 __tsan_atomic16_fetch_xor(volatile a16 *a, a16 v, morder mo) {
    688   ATOMIC_IMPL(FetchXor, a, v, mo);
    689 }
    690 
    691 SANITIZER_INTERFACE_ATTRIBUTE
    692 a32 __tsan_atomic32_fetch_xor(volatile a32 *a, a32 v, morder mo) {
    693   ATOMIC_IMPL(FetchXor, a, v, mo);
    694 }
    695 
    696 SANITIZER_INTERFACE_ATTRIBUTE
    697 a64 __tsan_atomic64_fetch_xor(volatile a64 *a, a64 v, morder mo) {
    698   ATOMIC_IMPL(FetchXor, a, v, mo);
    699 }
    700 
    701 #if __TSAN_HAS_INT128
    702 SANITIZER_INTERFACE_ATTRIBUTE
    703 a128 __tsan_atomic128_fetch_xor(volatile a128 *a, a128 v, morder mo) {
    704   ATOMIC_IMPL(FetchXor, a, v, mo);
    705 }
    706 #endif
    707 
    708 SANITIZER_INTERFACE_ATTRIBUTE
    709 a8 __tsan_atomic8_fetch_nand(volatile a8 *a, a8 v, morder mo) {
    710   ATOMIC_IMPL(FetchNand, a, v, mo);
    711 }
    712 
    713 SANITIZER_INTERFACE_ATTRIBUTE
    714 a16 __tsan_atomic16_fetch_nand(volatile a16 *a, a16 v, morder mo) {
    715   ATOMIC_IMPL(FetchNand, a, v, mo);
    716 }
    717 
    718 SANITIZER_INTERFACE_ATTRIBUTE
    719 a32 __tsan_atomic32_fetch_nand(volatile a32 *a, a32 v, morder mo) {
    720   ATOMIC_IMPL(FetchNand, a, v, mo);
    721 }
    722 
    723 SANITIZER_INTERFACE_ATTRIBUTE
    724 a64 __tsan_atomic64_fetch_nand(volatile a64 *a, a64 v, morder mo) {
    725   ATOMIC_IMPL(FetchNand, a, v, mo);
    726 }
    727 
    728 #if __TSAN_HAS_INT128
    729 SANITIZER_INTERFACE_ATTRIBUTE
    730 a128 __tsan_atomic128_fetch_nand(volatile a128 *a, a128 v, morder mo) {
    731   ATOMIC_IMPL(FetchNand, a, v, mo);
    732 }
    733 #endif
    734 
    735 SANITIZER_INTERFACE_ATTRIBUTE
    736 int __tsan_atomic8_compare_exchange_strong(volatile a8 *a, a8 *c, a8 v,
    737     morder mo, morder fmo) {
    738   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    739 }
    740 
    741 SANITIZER_INTERFACE_ATTRIBUTE
    742 int __tsan_atomic16_compare_exchange_strong(volatile a16 *a, a16 *c, a16 v,
    743     morder mo, morder fmo) {
    744   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    745 }
    746 
    747 SANITIZER_INTERFACE_ATTRIBUTE
    748 int __tsan_atomic32_compare_exchange_strong(volatile a32 *a, a32 *c, a32 v,
    749     morder mo, morder fmo) {
    750   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    751 }
    752 
    753 SANITIZER_INTERFACE_ATTRIBUTE
    754 int __tsan_atomic64_compare_exchange_strong(volatile a64 *a, a64 *c, a64 v,
    755     morder mo, morder fmo) {
    756   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    757 }
    758 
    759 #if __TSAN_HAS_INT128
    760 SANITIZER_INTERFACE_ATTRIBUTE
    761 int __tsan_atomic128_compare_exchange_strong(volatile a128 *a, a128 *c, a128 v,
    762     morder mo, morder fmo) {
    763   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    764 }
    765 #endif
    766 
    767 SANITIZER_INTERFACE_ATTRIBUTE
    768 int __tsan_atomic8_compare_exchange_weak(volatile a8 *a, a8 *c, a8 v,
    769     morder mo, morder fmo) {
    770   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    771 }
    772 
    773 SANITIZER_INTERFACE_ATTRIBUTE
    774 int __tsan_atomic16_compare_exchange_weak(volatile a16 *a, a16 *c, a16 v,
    775     morder mo, morder fmo) {
    776   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    777 }
    778 
    779 SANITIZER_INTERFACE_ATTRIBUTE
    780 int __tsan_atomic32_compare_exchange_weak(volatile a32 *a, a32 *c, a32 v,
    781     morder mo, morder fmo) {
    782   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    783 }
    784 
    785 SANITIZER_INTERFACE_ATTRIBUTE
    786 int __tsan_atomic64_compare_exchange_weak(volatile a64 *a, a64 *c, a64 v,
    787     morder mo, morder fmo) {
    788   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    789 }
    790 
    791 #if __TSAN_HAS_INT128
    792 SANITIZER_INTERFACE_ATTRIBUTE
    793 int __tsan_atomic128_compare_exchange_weak(volatile a128 *a, a128 *c, a128 v,
    794     morder mo, morder fmo) {
    795   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    796 }
    797 #endif
    798 
    799 SANITIZER_INTERFACE_ATTRIBUTE
    800 a8 __tsan_atomic8_compare_exchange_val(volatile a8 *a, a8 c, a8 v,
    801     morder mo, morder fmo) {
    802   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    803 }
    804 
    805 SANITIZER_INTERFACE_ATTRIBUTE
    806 a16 __tsan_atomic16_compare_exchange_val(volatile a16 *a, a16 c, a16 v,
    807     morder mo, morder fmo) {
    808   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    809 }
    810 
    811 SANITIZER_INTERFACE_ATTRIBUTE
    812 a32 __tsan_atomic32_compare_exchange_val(volatile a32 *a, a32 c, a32 v,
    813     morder mo, morder fmo) {
    814   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    815 }
    816 
    817 SANITIZER_INTERFACE_ATTRIBUTE
    818 a64 __tsan_atomic64_compare_exchange_val(volatile a64 *a, a64 c, a64 v,
    819     morder mo, morder fmo) {
    820   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    821 }
    822 
    823 #if __TSAN_HAS_INT128
    824 SANITIZER_INTERFACE_ATTRIBUTE
    825 a128 __tsan_atomic128_compare_exchange_val(volatile a128 *a, a128 c, a128 v,
    826     morder mo, morder fmo) {
    827   ATOMIC_IMPL(CAS, a, c, v, mo, fmo);
    828 }
    829 #endif
    830 
    831 SANITIZER_INTERFACE_ATTRIBUTE
    832 void __tsan_atomic_thread_fence(morder mo) { ATOMIC_IMPL(Fence, mo); }
    833 
    834 SANITIZER_INTERFACE_ATTRIBUTE
    835 void __tsan_atomic_signal_fence(morder mo) {
    836 }
    837 }  // extern "C"
    838 
    839 #else  // #if !SANITIZER_GO
    840 
    841 // Go
    842 
    843 #  define ATOMIC(func, ...)               \
    844     if (thr->ignore_sync) {               \
    845       NoTsanAtomic##func(__VA_ARGS__);    \
    846     } else {                              \
    847       FuncEntry(thr, cpc);                \
    848       Atomic##func(thr, pc, __VA_ARGS__); \
    849       FuncExit(thr);                      \
    850     }
    851 
    852 #  define ATOMIC_RET(func, ret, ...)              \
    853     if (thr->ignore_sync) {                       \
    854       (ret) = NoTsanAtomic##func(__VA_ARGS__);    \
    855     } else {                                      \
    856       FuncEntry(thr, cpc);                        \
    857       (ret) = Atomic##func(thr, pc, __VA_ARGS__); \
    858       FuncExit(thr);                              \
    859     }
    860 
    861 extern "C" {
    862 SANITIZER_INTERFACE_ATTRIBUTE
    863 void __tsan_go_atomic32_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    864   ATOMIC_RET(Load, *(a32*)(a+8), *(a32**)a, mo_acquire);
    865 }
    866 
    867 SANITIZER_INTERFACE_ATTRIBUTE
    868 void __tsan_go_atomic64_load(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    869   ATOMIC_RET(Load, *(a64*)(a+8), *(a64**)a, mo_acquire);
    870 }
    871 
    872 SANITIZER_INTERFACE_ATTRIBUTE
    873 void __tsan_go_atomic32_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    874   ATOMIC(Store, *(a32**)a, *(a32*)(a+8), mo_release);
    875 }
    876 
    877 SANITIZER_INTERFACE_ATTRIBUTE
    878 void __tsan_go_atomic64_store(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    879   ATOMIC(Store, *(a64**)a, *(a64*)(a+8), mo_release);
    880 }
    881 
    882 SANITIZER_INTERFACE_ATTRIBUTE
    883 void __tsan_go_atomic32_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    884   ATOMIC_RET(FetchAdd, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
    885 }
    886 
    887 SANITIZER_INTERFACE_ATTRIBUTE
    888 void __tsan_go_atomic64_fetch_add(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    889   ATOMIC_RET(FetchAdd, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
    890 }
    891 
    892 SANITIZER_INTERFACE_ATTRIBUTE
    893 void __tsan_go_atomic32_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    894   ATOMIC_RET(Exchange, *(a32*)(a+16), *(a32**)a, *(a32*)(a+8), mo_acq_rel);
    895 }
    896 
    897 SANITIZER_INTERFACE_ATTRIBUTE
    898 void __tsan_go_atomic64_exchange(ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    899   ATOMIC_RET(Exchange, *(a64*)(a+16), *(a64**)a, *(a64*)(a+8), mo_acq_rel);
    900 }
    901 
    902 SANITIZER_INTERFACE_ATTRIBUTE
    903 void __tsan_go_atomic32_compare_exchange(
    904     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    905   a32 cur = 0;
    906   a32 cmp = *(a32*)(a+8);
    907   ATOMIC_RET(CAS, cur, *(a32**)a, cmp, *(a32*)(a+12), mo_acq_rel, mo_acquire);
    908   *(bool*)(a+16) = (cur == cmp);
    909 }
    910 
    911 SANITIZER_INTERFACE_ATTRIBUTE
    912 void __tsan_go_atomic64_compare_exchange(
    913     ThreadState *thr, uptr cpc, uptr pc, u8 *a) {
    914   a64 cur = 0;
    915   a64 cmp = *(a64*)(a+8);
    916   ATOMIC_RET(CAS, cur, *(a64**)a, cmp, *(a64*)(a+16), mo_acq_rel, mo_acquire);
    917   *(bool*)(a+24) = (cur == cmp);
    918 }
    919 }  // extern "C"
    920 #endif  // #if !SANITIZER_GO
    921