Home | History | Annotate | Line # | Download | only in sanitizer_common
      1  1.1       mrg //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
      2  1.1       mrg //
      3  1.4       mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4  1.4       mrg // See https://llvm.org/LICENSE.txt for license information.
      5  1.4       mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6  1.1       mrg //
      7  1.1       mrg //===----------------------------------------------------------------------===//
      8  1.1       mrg //
      9  1.1       mrg // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     10  1.1       mrg //
     11  1.1       mrg //===----------------------------------------------------------------------===//
     12  1.1       mrg 
     13  1.1       mrg #ifndef SANITIZER_MUTEX_H
     14  1.1       mrg #define SANITIZER_MUTEX_H
     15  1.1       mrg 
     16  1.1       mrg #include "sanitizer_atomic.h"
     17  1.1       mrg #include "sanitizer_internal_defs.h"
     18  1.1       mrg #include "sanitizer_libc.h"
     19  1.4       mrg #include "sanitizer_thread_safety.h"
     20  1.1       mrg 
     21  1.1       mrg namespace __sanitizer {
     22  1.1       mrg 
     23  1.6       mrg class SANITIZER_MUTEX StaticSpinMutex {
     24  1.1       mrg  public:
     25  1.1       mrg   void Init() {
     26  1.1       mrg     atomic_store(&state_, 0, memory_order_relaxed);
     27  1.1       mrg   }
     28  1.1       mrg 
     29  1.6       mrg   void Lock() SANITIZER_ACQUIRE() {
     30  1.4       mrg     if (LIKELY(TryLock()))
     31  1.1       mrg       return;
     32  1.1       mrg     LockSlow();
     33  1.1       mrg   }
     34  1.1       mrg 
     35  1.6       mrg   bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
     36  1.1       mrg     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
     37  1.1       mrg   }
     38  1.1       mrg 
     39  1.6       mrg   void Unlock() SANITIZER_RELEASE() {
     40  1.6       mrg     atomic_store(&state_, 0, memory_order_release);
     41  1.6       mrg   }
     42  1.1       mrg 
     43  1.6       mrg   void CheckLocked() const SANITIZER_CHECK_LOCKED() {
     44  1.5  riastrad     // __sync_lock_test_and_set (as used under the hood of the bespoke
     45  1.5  riastrad     // atomic_exchange here) does not always store the value we asked
     46  1.5  riastrad     // to store -- it just stores some nonzero value.  On sparc, this
     47  1.5  riastrad     // is 0xff.  On vax, this is whatever was there before but with the
     48  1.5  riastrad     // low-order bit set.  So test for a nonzero value, rather than for
     49  1.5  riastrad     // the specific value 1.
     50  1.5  riastrad     CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
     51  1.2       mrg   }
     52  1.2       mrg 
     53  1.1       mrg  private:
     54  1.1       mrg   atomic_uint8_t state_;
     55  1.1       mrg 
     56  1.4       mrg   void LockSlow();
     57  1.1       mrg };
     58  1.1       mrg 
     59  1.6       mrg class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
     60  1.1       mrg  public:
     61  1.1       mrg   SpinMutex() {
     62  1.1       mrg     Init();
     63  1.1       mrg   }
     64  1.1       mrg 
     65  1.4       mrg   SpinMutex(const SpinMutex &) = delete;
     66  1.4       mrg   void operator=(const SpinMutex &) = delete;
     67  1.1       mrg };
     68  1.1       mrg 
     69  1.4       mrg // Semaphore provides an OS-dependent way to park/unpark threads.
     70  1.4       mrg // The last thread returned from Wait can destroy the object
     71  1.4       mrg // (destruction-safety).
     72  1.4       mrg class Semaphore {
     73  1.1       mrg  public:
     74  1.4       mrg   constexpr Semaphore() {}
     75  1.4       mrg   Semaphore(const Semaphore &) = delete;
     76  1.4       mrg   void operator=(const Semaphore &) = delete;
     77  1.2       mrg 
     78  1.4       mrg   void Wait();
     79  1.4       mrg   void Post(u32 count = 1);
     80  1.2       mrg 
     81  1.1       mrg  private:
     82  1.4       mrg   atomic_uint32_t state_ = {0};
     83  1.4       mrg };
     84  1.4       mrg 
     85  1.4       mrg typedef int MutexType;
     86  1.4       mrg 
     87  1.4       mrg enum {
     88  1.4       mrg   // Used as sentinel and to catch unassigned types
     89  1.4       mrg   // (should not be used as real Mutex type).
     90  1.4       mrg   MutexInvalid = 0,
     91  1.4       mrg   MutexThreadRegistry,
     92  1.4       mrg   // Each tool own mutexes must start at this number.
     93  1.4       mrg   MutexLastCommon,
     94  1.4       mrg   // Type for legacy mutexes that are not checked for deadlocks.
     95  1.4       mrg   MutexUnchecked = -1,
     96  1.4       mrg   // Special marks that can be used in MutexMeta::can_lock table.
     97  1.4       mrg   // The leaf mutexes can be locked under any other non-leaf mutex,
     98  1.4       mrg   // but no other mutex can be locked while under a leaf mutex.
     99  1.4       mrg   MutexLeaf = -1,
    100  1.4       mrg   // Multiple mutexes of this type can be locked at the same time.
    101  1.4       mrg   MutexMulti = -3,
    102  1.4       mrg };
    103  1.4       mrg 
    104  1.4       mrg // Go linker does not support THREADLOCAL variables,
    105  1.4       mrg // so we can't use per-thread state.
    106  1.4       mrg // Disable checked locks on Darwin. Although Darwin platforms support
    107  1.4       mrg // THREADLOCAL variables they are not usable early on during process init when
    108  1.4       mrg // `__sanitizer::Mutex` is used.
    109  1.4       mrg #define SANITIZER_CHECK_DEADLOCKS \
    110  1.6       mrg   (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
    111  1.4       mrg 
    112  1.4       mrg #if SANITIZER_CHECK_DEADLOCKS
    113  1.4       mrg struct MutexMeta {
    114  1.4       mrg   MutexType type;
    115  1.4       mrg   const char *name;
    116  1.4       mrg   // The table fixes what mutexes can be locked under what mutexes.
    117  1.4       mrg   // If the entry for MutexTypeFoo contains MutexTypeBar,
    118  1.4       mrg   // then Bar mutex can be locked while under Foo mutex.
    119  1.4       mrg   // Can also contain the special MutexLeaf/MutexMulti marks.
    120  1.4       mrg   MutexType can_lock[10];
    121  1.1       mrg };
    122  1.4       mrg #endif
    123  1.1       mrg 
    124  1.4       mrg class CheckedMutex {
    125  1.2       mrg  public:
    126  1.4       mrg   explicit constexpr CheckedMutex(MutexType type)
    127  1.4       mrg #if SANITIZER_CHECK_DEADLOCKS
    128  1.4       mrg       : type_(type)
    129  1.4       mrg #endif
    130  1.4       mrg   {
    131  1.4       mrg   }
    132  1.4       mrg 
    133  1.4       mrg   ALWAYS_INLINE void Lock() {
    134  1.4       mrg #if SANITIZER_CHECK_DEADLOCKS
    135  1.4       mrg     LockImpl(GET_CALLER_PC());
    136  1.4       mrg #endif
    137  1.4       mrg   }
    138  1.4       mrg 
    139  1.4       mrg   ALWAYS_INLINE void Unlock() {
    140  1.4       mrg #if SANITIZER_CHECK_DEADLOCKS
    141  1.4       mrg     UnlockImpl();
    142  1.4       mrg #endif
    143  1.4       mrg   }
    144  1.4       mrg 
    145  1.4       mrg   // Checks that the current thread does not hold any mutexes
    146  1.4       mrg   // (e.g. when returning from a runtime function to user code).
    147  1.4       mrg   static void CheckNoLocks() {
    148  1.4       mrg #if SANITIZER_CHECK_DEADLOCKS
    149  1.4       mrg     CheckNoLocksImpl();
    150  1.4       mrg #endif
    151  1.2       mrg   }
    152  1.2       mrg 
    153  1.4       mrg  private:
    154  1.4       mrg #if SANITIZER_CHECK_DEADLOCKS
    155  1.4       mrg   const MutexType type_;
    156  1.4       mrg 
    157  1.4       mrg   void LockImpl(uptr pc);
    158  1.4       mrg   void UnlockImpl();
    159  1.4       mrg   static void CheckNoLocksImpl();
    160  1.4       mrg #endif
    161  1.4       mrg };
    162  1.4       mrg 
    163  1.4       mrg // Reader-writer mutex.
    164  1.4       mrg // Derive from CheckedMutex for the purposes of EBO.
    165  1.4       mrg // We could make it a field marked with [[no_unique_address]],
    166  1.4       mrg // but this attribute is not supported by some older compilers.
    167  1.6       mrg class SANITIZER_MUTEX Mutex : CheckedMutex {
    168  1.4       mrg  public:
    169  1.4       mrg   explicit constexpr Mutex(MutexType type = MutexUnchecked)
    170  1.4       mrg       : CheckedMutex(type) {}
    171  1.4       mrg 
    172  1.6       mrg   void Lock() SANITIZER_ACQUIRE() {
    173  1.4       mrg     CheckedMutex::Lock();
    174  1.4       mrg     u64 reset_mask = ~0ull;
    175  1.4       mrg     u64 state = atomic_load_relaxed(&state_);
    176  1.4       mrg     for (uptr spin_iters = 0;; spin_iters++) {
    177  1.4       mrg       u64 new_state;
    178  1.4       mrg       bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
    179  1.4       mrg       if (LIKELY(!locked)) {
    180  1.4       mrg         // The mutex is not read-/write-locked, try to lock.
    181  1.4       mrg         new_state = (state | kWriterLock) & reset_mask;
    182  1.4       mrg       } else if (spin_iters > kMaxSpinIters) {
    183  1.4       mrg         // We've spun enough, increment waiting writers count and block.
    184  1.4       mrg         // The counter will be decremented by whoever wakes us.
    185  1.4       mrg         new_state = (state + kWaitingWriterInc) & reset_mask;
    186  1.4       mrg       } else if ((state & kWriterSpinWait) == 0) {
    187  1.4       mrg         // Active spinning, but denote our presence so that unlocking
    188  1.4       mrg         // thread does not wake up other threads.
    189  1.4       mrg         new_state = state | kWriterSpinWait;
    190  1.4       mrg       } else {
    191  1.4       mrg         // Active spinning.
    192  1.4       mrg         state = atomic_load(&state_, memory_order_relaxed);
    193  1.4       mrg         continue;
    194  1.4       mrg       }
    195  1.4       mrg       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    196  1.4       mrg                                                  memory_order_acquire)))
    197  1.4       mrg         continue;
    198  1.4       mrg       if (LIKELY(!locked))
    199  1.4       mrg         return;  // We've locked the mutex.
    200  1.4       mrg       if (spin_iters > kMaxSpinIters) {
    201  1.4       mrg         // We've incremented waiting writers, so now block.
    202  1.4       mrg         writers_.Wait();
    203  1.4       mrg         spin_iters = 0;
    204  1.4       mrg       } else {
    205  1.4       mrg         // We've set kWriterSpinWait, but we are still in active spinning.
    206  1.4       mrg       }
    207  1.4       mrg       // We either blocked and were unblocked,
    208  1.4       mrg       // or we just spun but set kWriterSpinWait.
    209  1.4       mrg       // Either way we need to reset kWriterSpinWait
    210  1.4       mrg       // next time we take the lock or block again.
    211  1.4       mrg       reset_mask = ~kWriterSpinWait;
    212  1.4       mrg       state = atomic_load(&state_, memory_order_relaxed);
    213  1.4       mrg       DCHECK_NE(state & kWriterSpinWait, 0);
    214  1.4       mrg     }
    215  1.2       mrg   }
    216  1.2       mrg 
    217  1.6       mrg   bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
    218  1.6       mrg     u64 state = atomic_load_relaxed(&state_);
    219  1.6       mrg     for (;;) {
    220  1.6       mrg       if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
    221  1.6       mrg         return false;
    222  1.6       mrg       // The mutex is not read-/write-locked, try to lock.
    223  1.6       mrg       if (LIKELY(atomic_compare_exchange_weak(
    224  1.6       mrg               &state_, &state, state | kWriterLock, memory_order_acquire))) {
    225  1.6       mrg         CheckedMutex::Lock();
    226  1.6       mrg         return true;
    227  1.6       mrg       }
    228  1.6       mrg     }
    229  1.6       mrg   }
    230  1.6       mrg 
    231  1.6       mrg   void Unlock() SANITIZER_RELEASE() {
    232  1.4       mrg     CheckedMutex::Unlock();
    233  1.4       mrg     bool wake_writer;
    234  1.4       mrg     u64 wake_readers;
    235  1.4       mrg     u64 new_state;
    236  1.4       mrg     u64 state = atomic_load_relaxed(&state_);
    237  1.4       mrg     do {
    238  1.4       mrg       DCHECK_NE(state & kWriterLock, 0);
    239  1.4       mrg       DCHECK_EQ(state & kReaderLockMask, 0);
    240  1.4       mrg       new_state = state & ~kWriterLock;
    241  1.4       mrg       wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
    242  1.4       mrg                     (state & kWaitingWriterMask) != 0;
    243  1.4       mrg       if (wake_writer)
    244  1.4       mrg         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
    245  1.4       mrg       wake_readers =
    246  1.4       mrg           wake_writer || (state & kWriterSpinWait) != 0
    247  1.4       mrg               ? 0
    248  1.4       mrg               : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
    249  1.4       mrg       if (wake_readers)
    250  1.4       mrg         new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
    251  1.4       mrg     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    252  1.4       mrg                                                     memory_order_release)));
    253  1.4       mrg     if (UNLIKELY(wake_writer))
    254  1.4       mrg       writers_.Post();
    255  1.4       mrg     else if (UNLIKELY(wake_readers))
    256  1.4       mrg       readers_.Post(wake_readers);
    257  1.4       mrg   }
    258  1.4       mrg 
    259  1.6       mrg   void ReadLock() SANITIZER_ACQUIRE_SHARED() {
    260  1.4       mrg     CheckedMutex::Lock();
    261  1.4       mrg     u64 reset_mask = ~0ull;
    262  1.4       mrg     u64 state = atomic_load_relaxed(&state_);
    263  1.4       mrg     for (uptr spin_iters = 0;; spin_iters++) {
    264  1.4       mrg       bool locked = (state & kWriterLock) != 0;
    265  1.4       mrg       u64 new_state;
    266  1.4       mrg       if (LIKELY(!locked)) {
    267  1.4       mrg         new_state = (state + kReaderLockInc) & reset_mask;
    268  1.4       mrg       } else if (spin_iters > kMaxSpinIters) {
    269  1.4       mrg         new_state = (state + kWaitingReaderInc) & reset_mask;
    270  1.4       mrg       } else if ((state & kReaderSpinWait) == 0) {
    271  1.4       mrg         // Active spinning, but denote our presence so that unlocking
    272  1.4       mrg         // thread does not wake up other threads.
    273  1.4       mrg         new_state = state | kReaderSpinWait;
    274  1.4       mrg       } else {
    275  1.4       mrg         // Active spinning.
    276  1.4       mrg         state = atomic_load(&state_, memory_order_relaxed);
    277  1.4       mrg         continue;
    278  1.4       mrg       }
    279  1.4       mrg       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    280  1.4       mrg                                                  memory_order_acquire)))
    281  1.4       mrg         continue;
    282  1.4       mrg       if (LIKELY(!locked))
    283  1.4       mrg         return;  // We've locked the mutex.
    284  1.4       mrg       if (spin_iters > kMaxSpinIters) {
    285  1.4       mrg         // We've incremented waiting readers, so now block.
    286  1.4       mrg         readers_.Wait();
    287  1.4       mrg         spin_iters = 0;
    288  1.4       mrg       } else {
    289  1.4       mrg         // We've set kReaderSpinWait, but we are still in active spinning.
    290  1.4       mrg       }
    291  1.4       mrg       reset_mask = ~kReaderSpinWait;
    292  1.4       mrg       state = atomic_load(&state_, memory_order_relaxed);
    293  1.4       mrg     }
    294  1.2       mrg   }
    295  1.2       mrg 
    296  1.6       mrg   void ReadUnlock() SANITIZER_RELEASE_SHARED() {
    297  1.4       mrg     CheckedMutex::Unlock();
    298  1.4       mrg     bool wake;
    299  1.4       mrg     u64 new_state;
    300  1.4       mrg     u64 state = atomic_load_relaxed(&state_);
    301  1.4       mrg     do {
    302  1.4       mrg       DCHECK_NE(state & kReaderLockMask, 0);
    303  1.4       mrg       DCHECK_EQ(state & kWriterLock, 0);
    304  1.4       mrg       new_state = state - kReaderLockInc;
    305  1.4       mrg       wake = (new_state &
    306  1.4       mrg               (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
    307  1.4       mrg              (new_state & kWaitingWriterMask) != 0;
    308  1.4       mrg       if (wake)
    309  1.4       mrg         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
    310  1.4       mrg     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    311  1.4       mrg                                                     memory_order_release)));
    312  1.4       mrg     if (UNLIKELY(wake))
    313  1.4       mrg       writers_.Post();
    314  1.2       mrg   }
    315  1.2       mrg 
    316  1.4       mrg   // This function does not guarantee an explicit check that the calling thread
    317  1.4       mrg   // is the thread which owns the mutex. This behavior, while more strictly
    318  1.4       mrg   // correct, causes problems in cases like StopTheWorld, where a parent thread
    319  1.4       mrg   // owns the mutex but a child checks that it is locked. Rather than
    320  1.4       mrg   // maintaining complex state to work around those situations, the check only
    321  1.4       mrg   // checks that the mutex is owned.
    322  1.6       mrg   void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
    323  1.4       mrg     CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
    324  1.2       mrg   }
    325  1.2       mrg 
    326  1.6       mrg   void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
    327  1.2       mrg 
    328  1.6       mrg   void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
    329  1.4       mrg     CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
    330  1.2       mrg   }
    331  1.2       mrg 
    332  1.2       mrg  private:
    333  1.4       mrg   atomic_uint64_t state_ = {0};
    334  1.4       mrg   Semaphore writers_;
    335  1.4       mrg   Semaphore readers_;
    336  1.4       mrg 
    337  1.4       mrg   // The state has 3 counters:
    338  1.4       mrg   //  - number of readers holding the lock,
    339  1.4       mrg   //    if non zero, the mutex is read-locked
    340  1.4       mrg   //  - number of waiting readers,
    341  1.4       mrg   //    if not zero, the mutex is write-locked
    342  1.4       mrg   //  - number of waiting writers,
    343  1.4       mrg   //    if non zero, the mutex is read- or write-locked
    344  1.4       mrg   // And 2 flags:
    345  1.4       mrg   //  - writer lock
    346  1.4       mrg   //    if set, the mutex is write-locked
    347  1.4       mrg   //  - a writer is awake and spin-waiting
    348  1.4       mrg   //    the flag is used to prevent thundering herd problem
    349  1.4       mrg   //    (new writers are not woken if this flag is set)
    350  1.4       mrg   //  - a reader is awake and spin-waiting
    351  1.4       mrg   //
    352  1.4       mrg   // Both writers and readers use active spinning before blocking.
    353  1.4       mrg   // But readers are more aggressive and always take the mutex
    354  1.4       mrg   // if there are any other readers.
    355  1.4       mrg   // After wake up both writers and readers compete to lock the
    356  1.4       mrg   // mutex again. This is needed to allow repeated locks even in presence
    357  1.4       mrg   // of other blocked threads.
    358  1.4       mrg   static constexpr u64 kCounterWidth = 20;
    359  1.4       mrg   static constexpr u64 kReaderLockShift = 0;
    360  1.4       mrg   static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
    361  1.4       mrg   static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
    362  1.4       mrg                                          << kReaderLockShift;
    363  1.4       mrg   static constexpr u64 kWaitingReaderShift = kCounterWidth;
    364  1.4       mrg   static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
    365  1.4       mrg   static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
    366  1.4       mrg                                             << kWaitingReaderShift;
    367  1.4       mrg   static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
    368  1.4       mrg   static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
    369  1.4       mrg   static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
    370  1.4       mrg                                             << kWaitingWriterShift;
    371  1.4       mrg   static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
    372  1.4       mrg   static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
    373  1.4       mrg   static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
    374  1.4       mrg 
    375  1.4       mrg   static constexpr uptr kMaxSpinIters = 1500;
    376  1.4       mrg 
    377  1.4       mrg   Mutex(LinkerInitialized) = delete;
    378  1.4       mrg   Mutex(const Mutex &) = delete;
    379  1.4       mrg   void operator=(const Mutex &) = delete;
    380  1.4       mrg };
    381  1.2       mrg 
    382  1.4       mrg void FutexWait(atomic_uint32_t *p, u32 cmp);
    383  1.4       mrg void FutexWake(atomic_uint32_t *p, u32 count);
    384  1.2       mrg 
    385  1.4       mrg template <typename MutexType>
    386  1.6       mrg class SANITIZER_SCOPED_LOCK GenericScopedLock {
    387  1.4       mrg  public:
    388  1.6       mrg   explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
    389  1.4       mrg     mu_->Lock();
    390  1.2       mrg   }
    391  1.2       mrg 
    392  1.6       mrg   ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
    393  1.4       mrg 
    394  1.4       mrg  private:
    395  1.4       mrg   MutexType *mu_;
    396  1.2       mrg 
    397  1.4       mrg   GenericScopedLock(const GenericScopedLock &) = delete;
    398  1.4       mrg   void operator=(const GenericScopedLock &) = delete;
    399  1.2       mrg };
    400  1.2       mrg 
    401  1.4       mrg template <typename MutexType>
    402  1.6       mrg class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
    403  1.1       mrg  public:
    404  1.6       mrg   explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
    405  1.6       mrg       : mu_(mu) {
    406  1.4       mrg     mu_->ReadLock();
    407  1.1       mrg   }
    408  1.1       mrg 
    409  1.6       mrg   ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
    410  1.1       mrg 
    411  1.1       mrg  private:
    412  1.1       mrg   MutexType *mu_;
    413  1.1       mrg 
    414  1.4       mrg   GenericScopedReadLock(const GenericScopedReadLock &) = delete;
    415  1.4       mrg   void operator=(const GenericScopedReadLock &) = delete;
    416  1.1       mrg };
    417  1.1       mrg 
    418  1.4       mrg template <typename MutexType>
    419  1.6       mrg class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
    420  1.1       mrg  public:
    421  1.4       mrg   ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
    422  1.6       mrg       SANITIZER_ACQUIRE(mu)
    423  1.4       mrg       : mu_(mu), write_(write) {
    424  1.4       mrg     if (write_)
    425  1.4       mrg       mu_->Lock();
    426  1.4       mrg     else
    427  1.4       mrg       mu_->ReadLock();
    428  1.1       mrg   }
    429  1.1       mrg 
    430  1.6       mrg   ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
    431  1.4       mrg     if (write_)
    432  1.4       mrg       mu_->Unlock();
    433  1.4       mrg     else
    434  1.4       mrg       mu_->ReadUnlock();
    435  1.1       mrg   }
    436  1.1       mrg 
    437  1.1       mrg  private:
    438  1.1       mrg   MutexType *mu_;
    439  1.4       mrg   bool write_;
    440  1.1       mrg 
    441  1.4       mrg   GenericScopedRWLock(const GenericScopedRWLock &) = delete;
    442  1.4       mrg   void operator=(const GenericScopedRWLock &) = delete;
    443  1.1       mrg };
    444  1.1       mrg 
    445  1.1       mrg typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
    446  1.4       mrg typedef GenericScopedLock<Mutex> Lock;
    447  1.4       mrg typedef GenericScopedReadLock<Mutex> ReadLock;
    448  1.4       mrg typedef GenericScopedRWLock<Mutex> RWLock;
    449  1.1       mrg 
    450  1.1       mrg }  // namespace __sanitizer
    451  1.1       mrg 
    452  1.1       mrg #endif  // SANITIZER_MUTEX_H
    453