Home | History | Annotate | Line # | Download | only in sanitizer_common
sanitizer_mutex.h revision 1.1.1.9
      1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
      2 //
      3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4 // See https://llvm.org/LICENSE.txt for license information.
      5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6 //
      7 //===----------------------------------------------------------------------===//
      8 //
      9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     10 //
     11 //===----------------------------------------------------------------------===//
     12 
     13 #ifndef SANITIZER_MUTEX_H
     14 #define SANITIZER_MUTEX_H
     15 
     16 #include "sanitizer_atomic.h"
     17 #include "sanitizer_internal_defs.h"
     18 #include "sanitizer_libc.h"
     19 #include "sanitizer_thread_safety.h"
     20 
     21 namespace __sanitizer {
     22 
     23 class MUTEX StaticSpinMutex {
     24  public:
     25   void Init() {
     26     atomic_store(&state_, 0, memory_order_relaxed);
     27   }
     28 
     29   void Lock() ACQUIRE() {
     30     if (LIKELY(TryLock()))
     31       return;
     32     LockSlow();
     33   }
     34 
     35   bool TryLock() TRY_ACQUIRE(true) {
     36     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
     37   }
     38 
     39   void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
     40 
     41   void CheckLocked() const CHECK_LOCKED() {
     42     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
     43   }
     44 
     45  private:
     46   atomic_uint8_t state_;
     47 
     48   void LockSlow();
     49 };
     50 
     51 class MUTEX SpinMutex : public StaticSpinMutex {
     52  public:
     53   SpinMutex() {
     54     Init();
     55   }
     56 
     57   SpinMutex(const SpinMutex &) = delete;
     58   void operator=(const SpinMutex &) = delete;
     59 };
     60 
     61 // Semaphore provides an OS-dependent way to park/unpark threads.
     62 // The last thread returned from Wait can destroy the object
     63 // (destruction-safety).
     64 class Semaphore {
     65  public:
     66   constexpr Semaphore() {}
     67   Semaphore(const Semaphore &) = delete;
     68   void operator=(const Semaphore &) = delete;
     69 
     70   void Wait();
     71   void Post(u32 count = 1);
     72 
     73  private:
     74   atomic_uint32_t state_ = {0};
     75 };
     76 
     77 typedef int MutexType;
     78 
     79 enum {
     80   // Used as sentinel and to catch unassigned types
     81   // (should not be used as real Mutex type).
     82   MutexInvalid = 0,
     83   MutexThreadRegistry,
     84   // Each tool own mutexes must start at this number.
     85   MutexLastCommon,
     86   // Type for legacy mutexes that are not checked for deadlocks.
     87   MutexUnchecked = -1,
     88   // Special marks that can be used in MutexMeta::can_lock table.
     89   // The leaf mutexes can be locked under any other non-leaf mutex,
     90   // but no other mutex can be locked while under a leaf mutex.
     91   MutexLeaf = -1,
     92   // Multiple mutexes of this type can be locked at the same time.
     93   MutexMulti = -3,
     94 };
     95 
     96 // Go linker does not support THREADLOCAL variables,
     97 // so we can't use per-thread state.
     98 // Disable checked locks on Darwin. Although Darwin platforms support
     99 // THREADLOCAL variables they are not usable early on during process init when
    100 // `__sanitizer::Mutex` is used.
    101 #define SANITIZER_CHECK_DEADLOCKS \
    102   (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_MAC)
    103 
    104 #if SANITIZER_CHECK_DEADLOCKS
    105 struct MutexMeta {
    106   MutexType type;
    107   const char *name;
    108   // The table fixes what mutexes can be locked under what mutexes.
    109   // If the entry for MutexTypeFoo contains MutexTypeBar,
    110   // then Bar mutex can be locked while under Foo mutex.
    111   // Can also contain the special MutexLeaf/MutexMulti marks.
    112   MutexType can_lock[10];
    113 };
    114 #endif
    115 
    116 class CheckedMutex {
    117  public:
    118   explicit constexpr CheckedMutex(MutexType type)
    119 #if SANITIZER_CHECK_DEADLOCKS
    120       : type_(type)
    121 #endif
    122   {
    123   }
    124 
    125   ALWAYS_INLINE void Lock() {
    126 #if SANITIZER_CHECK_DEADLOCKS
    127     LockImpl(GET_CALLER_PC());
    128 #endif
    129   }
    130 
    131   ALWAYS_INLINE void Unlock() {
    132 #if SANITIZER_CHECK_DEADLOCKS
    133     UnlockImpl();
    134 #endif
    135   }
    136 
    137   // Checks that the current thread does not hold any mutexes
    138   // (e.g. when returning from a runtime function to user code).
    139   static void CheckNoLocks() {
    140 #if SANITIZER_CHECK_DEADLOCKS
    141     CheckNoLocksImpl();
    142 #endif
    143   }
    144 
    145  private:
    146 #if SANITIZER_CHECK_DEADLOCKS
    147   const MutexType type_;
    148 
    149   void LockImpl(uptr pc);
    150   void UnlockImpl();
    151   static void CheckNoLocksImpl();
    152 #endif
    153 };
    154 
    155 // Reader-writer mutex.
    156 // Derive from CheckedMutex for the purposes of EBO.
    157 // We could make it a field marked with [[no_unique_address]],
    158 // but this attribute is not supported by some older compilers.
    159 class MUTEX Mutex : CheckedMutex {
    160  public:
    161   explicit constexpr Mutex(MutexType type = MutexUnchecked)
    162       : CheckedMutex(type) {}
    163 
    164   void Lock() ACQUIRE() {
    165     CheckedMutex::Lock();
    166     u64 reset_mask = ~0ull;
    167     u64 state = atomic_load_relaxed(&state_);
    168     for (uptr spin_iters = 0;; spin_iters++) {
    169       u64 new_state;
    170       bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
    171       if (LIKELY(!locked)) {
    172         // The mutex is not read-/write-locked, try to lock.
    173         new_state = (state | kWriterLock) & reset_mask;
    174       } else if (spin_iters > kMaxSpinIters) {
    175         // We've spun enough, increment waiting writers count and block.
    176         // The counter will be decremented by whoever wakes us.
    177         new_state = (state + kWaitingWriterInc) & reset_mask;
    178       } else if ((state & kWriterSpinWait) == 0) {
    179         // Active spinning, but denote our presence so that unlocking
    180         // thread does not wake up other threads.
    181         new_state = state | kWriterSpinWait;
    182       } else {
    183         // Active spinning.
    184         state = atomic_load(&state_, memory_order_relaxed);
    185         continue;
    186       }
    187       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    188                                                  memory_order_acquire)))
    189         continue;
    190       if (LIKELY(!locked))
    191         return;  // We've locked the mutex.
    192       if (spin_iters > kMaxSpinIters) {
    193         // We've incremented waiting writers, so now block.
    194         writers_.Wait();
    195         spin_iters = 0;
    196       } else {
    197         // We've set kWriterSpinWait, but we are still in active spinning.
    198       }
    199       // We either blocked and were unblocked,
    200       // or we just spun but set kWriterSpinWait.
    201       // Either way we need to reset kWriterSpinWait
    202       // next time we take the lock or block again.
    203       reset_mask = ~kWriterSpinWait;
    204       state = atomic_load(&state_, memory_order_relaxed);
    205       DCHECK_NE(state & kWriterSpinWait, 0);
    206     }
    207   }
    208 
    209   void Unlock() RELEASE() {
    210     CheckedMutex::Unlock();
    211     bool wake_writer;
    212     u64 wake_readers;
    213     u64 new_state;
    214     u64 state = atomic_load_relaxed(&state_);
    215     do {
    216       DCHECK_NE(state & kWriterLock, 0);
    217       DCHECK_EQ(state & kReaderLockMask, 0);
    218       new_state = state & ~kWriterLock;
    219       wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
    220                     (state & kWaitingWriterMask) != 0;
    221       if (wake_writer)
    222         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
    223       wake_readers =
    224           wake_writer || (state & kWriterSpinWait) != 0
    225               ? 0
    226               : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
    227       if (wake_readers)
    228         new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
    229     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    230                                                     memory_order_release)));
    231     if (UNLIKELY(wake_writer))
    232       writers_.Post();
    233     else if (UNLIKELY(wake_readers))
    234       readers_.Post(wake_readers);
    235   }
    236 
    237   void ReadLock() ACQUIRE_SHARED() {
    238     CheckedMutex::Lock();
    239     u64 reset_mask = ~0ull;
    240     u64 state = atomic_load_relaxed(&state_);
    241     for (uptr spin_iters = 0;; spin_iters++) {
    242       bool locked = (state & kWriterLock) != 0;
    243       u64 new_state;
    244       if (LIKELY(!locked)) {
    245         new_state = (state + kReaderLockInc) & reset_mask;
    246       } else if (spin_iters > kMaxSpinIters) {
    247         new_state = (state + kWaitingReaderInc) & reset_mask;
    248       } else if ((state & kReaderSpinWait) == 0) {
    249         // Active spinning, but denote our presence so that unlocking
    250         // thread does not wake up other threads.
    251         new_state = state | kReaderSpinWait;
    252       } else {
    253         // Active spinning.
    254         state = atomic_load(&state_, memory_order_relaxed);
    255         continue;
    256       }
    257       if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    258                                                  memory_order_acquire)))
    259         continue;
    260       if (LIKELY(!locked))
    261         return;  // We've locked the mutex.
    262       if (spin_iters > kMaxSpinIters) {
    263         // We've incremented waiting readers, so now block.
    264         readers_.Wait();
    265         spin_iters = 0;
    266       } else {
    267         // We've set kReaderSpinWait, but we are still in active spinning.
    268       }
    269       reset_mask = ~kReaderSpinWait;
    270       state = atomic_load(&state_, memory_order_relaxed);
    271     }
    272   }
    273 
    274   void ReadUnlock() RELEASE_SHARED() {
    275     CheckedMutex::Unlock();
    276     bool wake;
    277     u64 new_state;
    278     u64 state = atomic_load_relaxed(&state_);
    279     do {
    280       DCHECK_NE(state & kReaderLockMask, 0);
    281       DCHECK_EQ(state & kWriterLock, 0);
    282       new_state = state - kReaderLockInc;
    283       wake = (new_state &
    284               (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
    285              (new_state & kWaitingWriterMask) != 0;
    286       if (wake)
    287         new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
    288     } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
    289                                                     memory_order_release)));
    290     if (UNLIKELY(wake))
    291       writers_.Post();
    292   }
    293 
    294   // This function does not guarantee an explicit check that the calling thread
    295   // is the thread which owns the mutex. This behavior, while more strictly
    296   // correct, causes problems in cases like StopTheWorld, where a parent thread
    297   // owns the mutex but a child checks that it is locked. Rather than
    298   // maintaining complex state to work around those situations, the check only
    299   // checks that the mutex is owned.
    300   void CheckWriteLocked() const CHECK_LOCKED() {
    301     CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
    302   }
    303 
    304   void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
    305 
    306   void CheckReadLocked() const CHECK_LOCKED() {
    307     CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
    308   }
    309 
    310  private:
    311   atomic_uint64_t state_ = {0};
    312   Semaphore writers_;
    313   Semaphore readers_;
    314 
    315   // The state has 3 counters:
    316   //  - number of readers holding the lock,
    317   //    if non zero, the mutex is read-locked
    318   //  - number of waiting readers,
    319   //    if not zero, the mutex is write-locked
    320   //  - number of waiting writers,
    321   //    if non zero, the mutex is read- or write-locked
    322   // And 2 flags:
    323   //  - writer lock
    324   //    if set, the mutex is write-locked
    325   //  - a writer is awake and spin-waiting
    326   //    the flag is used to prevent thundering herd problem
    327   //    (new writers are not woken if this flag is set)
    328   //  - a reader is awake and spin-waiting
    329   //
    330   // Both writers and readers use active spinning before blocking.
    331   // But readers are more aggressive and always take the mutex
    332   // if there are any other readers.
    333   // After wake up both writers and readers compete to lock the
    334   // mutex again. This is needed to allow repeated locks even in presence
    335   // of other blocked threads.
    336   static constexpr u64 kCounterWidth = 20;
    337   static constexpr u64 kReaderLockShift = 0;
    338   static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
    339   static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
    340                                          << kReaderLockShift;
    341   static constexpr u64 kWaitingReaderShift = kCounterWidth;
    342   static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
    343   static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
    344                                             << kWaitingReaderShift;
    345   static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
    346   static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
    347   static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
    348                                             << kWaitingWriterShift;
    349   static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
    350   static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
    351   static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
    352 
    353   static constexpr uptr kMaxSpinIters = 1500;
    354 
    355   Mutex(LinkerInitialized) = delete;
    356   Mutex(const Mutex &) = delete;
    357   void operator=(const Mutex &) = delete;
    358 };
    359 
    360 void FutexWait(atomic_uint32_t *p, u32 cmp);
    361 void FutexWake(atomic_uint32_t *p, u32 count);
    362 
    363 template <typename MutexType>
    364 class SCOPED_LOCK GenericScopedLock {
    365  public:
    366   explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
    367     mu_->Lock();
    368   }
    369 
    370   ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
    371 
    372  private:
    373   MutexType *mu_;
    374 
    375   GenericScopedLock(const GenericScopedLock &) = delete;
    376   void operator=(const GenericScopedLock &) = delete;
    377 };
    378 
    379 template <typename MutexType>
    380 class SCOPED_LOCK GenericScopedReadLock {
    381  public:
    382   explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
    383     mu_->ReadLock();
    384   }
    385 
    386   ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
    387 
    388  private:
    389   MutexType *mu_;
    390 
    391   GenericScopedReadLock(const GenericScopedReadLock &) = delete;
    392   void operator=(const GenericScopedReadLock &) = delete;
    393 };
    394 
    395 template <typename MutexType>
    396 class SCOPED_LOCK GenericScopedRWLock {
    397  public:
    398   ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
    399       ACQUIRE(mu)
    400       : mu_(mu), write_(write) {
    401     if (write_)
    402       mu_->Lock();
    403     else
    404       mu_->ReadLock();
    405   }
    406 
    407   ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
    408     if (write_)
    409       mu_->Unlock();
    410     else
    411       mu_->ReadUnlock();
    412   }
    413 
    414  private:
    415   MutexType *mu_;
    416   bool write_;
    417 
    418   GenericScopedRWLock(const GenericScopedRWLock &) = delete;
    419   void operator=(const GenericScopedRWLock &) = delete;
    420 };
    421 
    422 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
    423 typedef GenericScopedLock<Mutex> Lock;
    424 typedef GenericScopedReadLock<Mutex> ReadLock;
    425 typedef GenericScopedRWLock<Mutex> RWLock;
    426 
    427 }  // namespace __sanitizer
    428 
    429 #endif  // SANITIZER_MUTEX_H
    430