1 //===-- tsan_clock.h --------------------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 //===----------------------------------------------------------------------===// 13 #ifndef TSAN_CLOCK_H 14 #define TSAN_CLOCK_H 15 16 #include "tsan_defs.h" 17 #include "tsan_dense_alloc.h" 18 19 namespace __tsan { 20 21 typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc; 22 typedef DenseSlabAllocCache ClockCache; 23 24 // The clock that lives in sync variables (mutexes, atomics, etc). 25 class SyncClock { 26 public: 27 SyncClock(); 28 ~SyncClock(); 29 30 uptr size() const; 31 32 // These are used only in tests. 33 u64 get(unsigned tid) const; 34 u64 get_clean(unsigned tid) const; 35 36 void Resize(ClockCache *c, uptr nclk); 37 void Reset(ClockCache *c); 38 39 void DebugDump(int(*printf)(const char *s, ...)); 40 41 // Clock element iterator. 42 // Note: it iterates only over the table without regard to dirty entries. 43 class Iter { 44 public: 45 explicit Iter(SyncClock* parent); 46 Iter& operator++(); 47 bool operator!=(const Iter& other); 48 ClockElem &operator*(); 49 50 private: 51 SyncClock *parent_; 52 // [pos_, end_) is the current continuous range of clock elements. 53 ClockElem *pos_; 54 ClockElem *end_; 55 int block_; // Current number of second level block. 56 57 NOINLINE void Next(); 58 }; 59 60 Iter begin(); 61 Iter end(); 62 63 private: 64 friend class ThreadClock; 65 friend class Iter; 66 static const uptr kDirtyTids = 2; 67 68 struct Dirty { 69 u64 epoch : kClkBits; 70 u64 tid : 64 - kClkBits; // kInvalidId if not active 71 }; 72 73 unsigned release_store_tid_; 74 unsigned release_store_reused_; 75 Dirty dirty_[kDirtyTids]; 76 // If size_ is 0, tab_ is nullptr. 77 // If size <= 64 (kClockCount), tab_ contains pointer to an array with 78 // 64 ClockElem's (ClockBlock::clock). 79 // Otherwise, tab_ points to an array with up to 127 u32 elements, 80 // each pointing to the second-level 512b block with 64 ClockElem's. 81 // Unused space in the first level ClockBlock is used to store additional 82 // clock elements. 83 // The last u32 element in the first level ClockBlock is always used as 84 // reference counter. 85 // 86 // See the following scheme for details. 87 // All memory blocks are 512 bytes (allocated from ClockAlloc). 88 // Clock (clk) elements are 64 bits. 89 // Idx and ref are 32 bits. 90 // 91 // tab_ 92 // | 93 // \/ 94 // +----------------------------------------------------+ 95 // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref | 96 // +----------------------------------------------------+ 97 // | | 98 // | \/ 99 // | +----------------+ 100 // | | clk0 ... clk63 | 101 // | +----------------+ 102 // \/ 103 // +------------------+ 104 // | clk64 ... clk127 | 105 // +------------------+ 106 // 107 // Note: dirty entries, if active, always override what's stored in the clock. 108 ClockBlock *tab_; 109 u32 tab_idx_; 110 u16 size_; 111 u16 blocks_; // Number of second level blocks. 112 113 void Unshare(ClockCache *c); 114 bool IsShared() const; 115 bool Cachable() const; 116 void ResetImpl(); 117 void FlushDirty(); 118 uptr capacity() const; 119 u32 get_block(uptr bi) const; 120 void append_block(u32 idx); 121 ClockElem &elem(unsigned tid) const; 122 }; 123 124 // The clock that lives in threads. 125 class ThreadClock { 126 public: 127 typedef DenseSlabAllocCache Cache; 128 129 explicit ThreadClock(unsigned tid, unsigned reused = 0); 130 131 u64 get(unsigned tid) const; 132 void set(ClockCache *c, unsigned tid, u64 v); 133 void set(u64 v); 134 void tick(); 135 uptr size() const; 136 137 void acquire(ClockCache *c, SyncClock *src); 138 void release(ClockCache *c, SyncClock *dst); 139 void acq_rel(ClockCache *c, SyncClock *dst); 140 void ReleaseStore(ClockCache *c, SyncClock *dst); 141 void ResetCached(ClockCache *c); 142 143 void DebugReset(); 144 void DebugDump(int(*printf)(const char *s, ...)); 145 146 private: 147 static const uptr kDirtyTids = SyncClock::kDirtyTids; 148 // Index of the thread associated with he clock ("current thread"). 149 const unsigned tid_; 150 const unsigned reused_; // tid_ reuse count. 151 // Current thread time when it acquired something from other threads. 152 u64 last_acquire_; 153 154 // Cached SyncClock (without dirty entries and release_store_tid_). 155 // We reuse it for subsequent store-release operations without intervening 156 // acquire operations. Since it is shared (and thus constant), clock value 157 // for the current thread is then stored in dirty entries in the SyncClock. 158 // We host a refernece to the table while it is cached here. 159 u32 cached_idx_; 160 u16 cached_size_; 161 u16 cached_blocks_; 162 163 // Number of active elements in the clk_ table (the rest is zeros). 164 uptr nclk_; 165 u64 clk_[kMaxTidInClock]; // Fixed size vector clock. 166 167 bool IsAlreadyAcquired(const SyncClock *src) const; 168 void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const; 169 }; 170 171 ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const { 172 DCHECK_LT(tid, kMaxTidInClock); 173 return clk_[tid]; 174 } 175 176 ALWAYS_INLINE void ThreadClock::set(u64 v) { 177 DCHECK_GE(v, clk_[tid_]); 178 clk_[tid_] = v; 179 } 180 181 ALWAYS_INLINE void ThreadClock::tick() { 182 clk_[tid_]++; 183 } 184 185 ALWAYS_INLINE uptr ThreadClock::size() const { 186 return nclk_; 187 } 188 189 ALWAYS_INLINE SyncClock::Iter SyncClock::begin() { 190 return Iter(this); 191 } 192 193 ALWAYS_INLINE SyncClock::Iter SyncClock::end() { 194 return Iter(nullptr); 195 } 196 197 ALWAYS_INLINE uptr SyncClock::size() const { 198 return size_; 199 } 200 201 ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent) 202 : parent_(parent) 203 , pos_(nullptr) 204 , end_(nullptr) 205 , block_(-1) { 206 if (parent) 207 Next(); 208 } 209 210 ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() { 211 pos_++; 212 if (UNLIKELY(pos_ >= end_)) 213 Next(); 214 return *this; 215 } 216 217 ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) { 218 return parent_ != other.parent_; 219 } 220 221 ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() { 222 return *pos_; 223 } 224 } // namespace __tsan 225 226 #endif // TSAN_CLOCK_H 227