Home | History | Annotate | Line # | Download | only in sanitizer_common
      1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #ifndef SANITIZER_MUTEX_H
     15 #define SANITIZER_MUTEX_H
     16 
     17 #include "sanitizer_atomic.h"
     18 #include "sanitizer_internal_defs.h"
     19 #include "sanitizer_libc.h"
     20 
     21 namespace __sanitizer {
     22 
     23 class StaticSpinMutex {
     24  public:
     25   void Init() {
     26     atomic_store(&state_, 0, memory_order_relaxed);
     27   }
     28 
     29   void Lock() {
     30     if (TryLock())
     31       return;
     32     LockSlow();
     33   }
     34 
     35   bool TryLock() {
     36     return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
     37   }
     38 
     39   void Unlock() {
     40     atomic_store(&state_, 0, memory_order_release);
     41   }
     42 
     43   void CheckLocked() {
     44     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), 1);
     45   }
     46 
     47  private:
     48   atomic_uint8_t state_;
     49 
     50   void NOINLINE LockSlow() {
     51     for (int i = 0;; i++) {
     52       if (i < 10)
     53         proc_yield(10);
     54       else
     55         internal_sched_yield();
     56       if (atomic_load(&state_, memory_order_relaxed) == 0
     57           && atomic_exchange(&state_, 1, memory_order_acquire) == 0)
     58         return;
     59     }
     60   }
     61 };
     62 
     63 class SpinMutex : public StaticSpinMutex {
     64  public:
     65   SpinMutex() {
     66     Init();
     67   }
     68 
     69  private:
     70   SpinMutex(const SpinMutex&);
     71   void operator=(const SpinMutex&);
     72 };
     73 
     74 class BlockingMutex {
     75  public:
     76   explicit constexpr BlockingMutex(LinkerInitialized)
     77       : opaque_storage_ {0, }, owner_ {0} {}
     78   BlockingMutex();
     79   void Lock();
     80   void Unlock();
     81 
     82   // This function does not guarantee an explicit check that the calling thread
     83   // is the thread which owns the mutex. This behavior, while more strictly
     84   // correct, causes problems in cases like StopTheWorld, where a parent thread
     85   // owns the mutex but a child checks that it is locked. Rather than
     86   // maintaining complex state to work around those situations, the check only
     87   // checks that the mutex is owned, and assumes callers to be generally
     88   // well-behaved.
     89   void CheckLocked();
     90 
     91  private:
     92   // Solaris mutex_t has a member that requires 64-bit alignment.
     93   ALIGNED(8) uptr opaque_storage_[10];
     94   uptr owner_;  // for debugging
     95 };
     96 
     97 // Reader-writer spin mutex.
     98 class RWMutex {
     99  public:
    100   RWMutex() {
    101     atomic_store(&state_, kUnlocked, memory_order_relaxed);
    102   }
    103 
    104   ~RWMutex() {
    105     CHECK_EQ(atomic_load(&state_, memory_order_relaxed), kUnlocked);
    106   }
    107 
    108   void Lock() {
    109     u32 cmp = kUnlocked;
    110     if (atomic_compare_exchange_strong(&state_, &cmp, kWriteLock,
    111                                        memory_order_acquire))
    112       return;
    113     LockSlow();
    114   }
    115 
    116   void Unlock() {
    117     u32 prev = atomic_fetch_sub(&state_, kWriteLock, memory_order_release);
    118     DCHECK_NE(prev & kWriteLock, 0);
    119     (void)prev;
    120   }
    121 
    122   void ReadLock() {
    123     u32 prev = atomic_fetch_add(&state_, kReadLock, memory_order_acquire);
    124     if ((prev & kWriteLock) == 0)
    125       return;
    126     ReadLockSlow();
    127   }
    128 
    129   void ReadUnlock() {
    130     u32 prev = atomic_fetch_sub(&state_, kReadLock, memory_order_release);
    131     DCHECK_EQ(prev & kWriteLock, 0);
    132     DCHECK_GT(prev & ~kWriteLock, 0);
    133     (void)prev;
    134   }
    135 
    136   void CheckLocked() {
    137     CHECK_NE(atomic_load(&state_, memory_order_relaxed), kUnlocked);
    138   }
    139 
    140  private:
    141   atomic_uint32_t state_;
    142 
    143   enum {
    144     kUnlocked = 0,
    145     kWriteLock = 1,
    146     kReadLock = 2
    147   };
    148 
    149   void NOINLINE LockSlow() {
    150     for (int i = 0;; i++) {
    151       if (i < 10)
    152         proc_yield(10);
    153       else
    154         internal_sched_yield();
    155       u32 cmp = atomic_load(&state_, memory_order_relaxed);
    156       if (cmp == kUnlocked &&
    157           atomic_compare_exchange_weak(&state_, &cmp, kWriteLock,
    158                                        memory_order_acquire))
    159           return;
    160     }
    161   }
    162 
    163   void NOINLINE ReadLockSlow() {
    164     for (int i = 0;; i++) {
    165       if (i < 10)
    166         proc_yield(10);
    167       else
    168         internal_sched_yield();
    169       u32 prev = atomic_load(&state_, memory_order_acquire);
    170       if ((prev & kWriteLock) == 0)
    171         return;
    172     }
    173   }
    174 
    175   RWMutex(const RWMutex&);
    176   void operator = (const RWMutex&);
    177 };
    178 
    179 template<typename MutexType>
    180 class GenericScopedLock {
    181  public:
    182   explicit GenericScopedLock(MutexType *mu)
    183       : mu_(mu) {
    184     mu_->Lock();
    185   }
    186 
    187   ~GenericScopedLock() {
    188     mu_->Unlock();
    189   }
    190 
    191  private:
    192   MutexType *mu_;
    193 
    194   GenericScopedLock(const GenericScopedLock&);
    195   void operator=(const GenericScopedLock&);
    196 };
    197 
    198 template<typename MutexType>
    199 class GenericScopedReadLock {
    200  public:
    201   explicit GenericScopedReadLock(MutexType *mu)
    202       : mu_(mu) {
    203     mu_->ReadLock();
    204   }
    205 
    206   ~GenericScopedReadLock() {
    207     mu_->ReadUnlock();
    208   }
    209 
    210  private:
    211   MutexType *mu_;
    212 
    213   GenericScopedReadLock(const GenericScopedReadLock&);
    214   void operator=(const GenericScopedReadLock&);
    215 };
    216 
    217 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
    218 typedef GenericScopedLock<BlockingMutex> BlockingMutexLock;
    219 typedef GenericScopedLock<RWMutex> RWMutexLock;
    220 typedef GenericScopedReadLock<RWMutex> RWMutexReadLock;
    221 
    222 }  // namespace __sanitizer
    223 
    224 #endif  // SANITIZER_MUTEX_H
    225