sanitizer_mutex.h revision 1.4.2.1 1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
15
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
20
21 namespace __sanitizer {
22
23 class MUTEX StaticSpinMutex {
24 public:
25 void Init() {
26 atomic_store(&state_, 0, memory_order_relaxed);
27 }
28
29 void Lock() ACQUIRE() {
30 if (LIKELY(TryLock()))
31 return;
32 LockSlow();
33 }
34
35 bool TryLock() TRY_ACQUIRE(true) {
36 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37 }
38
39 void Unlock() RELEASE() { atomic_store(&state_, 0, memory_order_release); }
40
41 void CheckLocked() const CHECK_LOCKED() {
42 // __sync_lock_test_and_set (as used under the hood of the bespoke
43 // atomic_exchange here) does not always store the value we asked
44 // to store -- it just stores some nonzero value. On sparc, this
45 // is 0xff. On vax, this is whatever was there before but with the
46 // low-order bit set. So test for a nonzero value, rather than for
47 // the specific value 1.
48 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
49 }
50
51 private:
52 atomic_uint8_t state_;
53
54 void LockSlow();
55 };
56
57 class MUTEX SpinMutex : public StaticSpinMutex {
58 public:
59 SpinMutex() {
60 Init();
61 }
62
63 SpinMutex(const SpinMutex &) = delete;
64 void operator=(const SpinMutex &) = delete;
65 };
66
67 // Semaphore provides an OS-dependent way to park/unpark threads.
68 // The last thread returned from Wait can destroy the object
69 // (destruction-safety).
70 class Semaphore {
71 public:
72 constexpr Semaphore() {}
73 Semaphore(const Semaphore &) = delete;
74 void operator=(const Semaphore &) = delete;
75
76 void Wait();
77 void Post(u32 count = 1);
78
79 private:
80 atomic_uint32_t state_ = {0};
81 };
82
83 typedef int MutexType;
84
85 enum {
86 // Used as sentinel and to catch unassigned types
87 // (should not be used as real Mutex type).
88 MutexInvalid = 0,
89 MutexThreadRegistry,
90 // Each tool own mutexes must start at this number.
91 MutexLastCommon,
92 // Type for legacy mutexes that are not checked for deadlocks.
93 MutexUnchecked = -1,
94 // Special marks that can be used in MutexMeta::can_lock table.
95 // The leaf mutexes can be locked under any other non-leaf mutex,
96 // but no other mutex can be locked while under a leaf mutex.
97 MutexLeaf = -1,
98 // Multiple mutexes of this type can be locked at the same time.
99 MutexMulti = -3,
100 };
101
102 // Go linker does not support THREADLOCAL variables,
103 // so we can't use per-thread state.
104 // Disable checked locks on Darwin. Although Darwin platforms support
105 // THREADLOCAL variables they are not usable early on during process init when
106 // `__sanitizer::Mutex` is used.
107 #define SANITIZER_CHECK_DEADLOCKS \
108 (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_MAC)
109
110 #if SANITIZER_CHECK_DEADLOCKS
111 struct MutexMeta {
112 MutexType type;
113 const char *name;
114 // The table fixes what mutexes can be locked under what mutexes.
115 // If the entry for MutexTypeFoo contains MutexTypeBar,
116 // then Bar mutex can be locked while under Foo mutex.
117 // Can also contain the special MutexLeaf/MutexMulti marks.
118 MutexType can_lock[10];
119 };
120 #endif
121
122 class CheckedMutex {
123 public:
124 explicit constexpr CheckedMutex(MutexType type)
125 #if SANITIZER_CHECK_DEADLOCKS
126 : type_(type)
127 #endif
128 {
129 }
130
131 ALWAYS_INLINE void Lock() {
132 #if SANITIZER_CHECK_DEADLOCKS
133 LockImpl(GET_CALLER_PC());
134 #endif
135 }
136
137 ALWAYS_INLINE void Unlock() {
138 #if SANITIZER_CHECK_DEADLOCKS
139 UnlockImpl();
140 #endif
141 }
142
143 // Checks that the current thread does not hold any mutexes
144 // (e.g. when returning from a runtime function to user code).
145 static void CheckNoLocks() {
146 #if SANITIZER_CHECK_DEADLOCKS
147 CheckNoLocksImpl();
148 #endif
149 }
150
151 private:
152 #if SANITIZER_CHECK_DEADLOCKS
153 const MutexType type_;
154
155 void LockImpl(uptr pc);
156 void UnlockImpl();
157 static void CheckNoLocksImpl();
158 #endif
159 };
160
161 // Reader-writer mutex.
162 // Derive from CheckedMutex for the purposes of EBO.
163 // We could make it a field marked with [[no_unique_address]],
164 // but this attribute is not supported by some older compilers.
165 class MUTEX Mutex : CheckedMutex {
166 public:
167 explicit constexpr Mutex(MutexType type = MutexUnchecked)
168 : CheckedMutex(type) {}
169
170 void Lock() ACQUIRE() {
171 CheckedMutex::Lock();
172 u64 reset_mask = ~0ull;
173 u64 state = atomic_load_relaxed(&state_);
174 for (uptr spin_iters = 0;; spin_iters++) {
175 u64 new_state;
176 bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
177 if (LIKELY(!locked)) {
178 // The mutex is not read-/write-locked, try to lock.
179 new_state = (state | kWriterLock) & reset_mask;
180 } else if (spin_iters > kMaxSpinIters) {
181 // We've spun enough, increment waiting writers count and block.
182 // The counter will be decremented by whoever wakes us.
183 new_state = (state + kWaitingWriterInc) & reset_mask;
184 } else if ((state & kWriterSpinWait) == 0) {
185 // Active spinning, but denote our presence so that unlocking
186 // thread does not wake up other threads.
187 new_state = state | kWriterSpinWait;
188 } else {
189 // Active spinning.
190 state = atomic_load(&state_, memory_order_relaxed);
191 continue;
192 }
193 if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
194 memory_order_acquire)))
195 continue;
196 if (LIKELY(!locked))
197 return; // We've locked the mutex.
198 if (spin_iters > kMaxSpinIters) {
199 // We've incremented waiting writers, so now block.
200 writers_.Wait();
201 spin_iters = 0;
202 } else {
203 // We've set kWriterSpinWait, but we are still in active spinning.
204 }
205 // We either blocked and were unblocked,
206 // or we just spun but set kWriterSpinWait.
207 // Either way we need to reset kWriterSpinWait
208 // next time we take the lock or block again.
209 reset_mask = ~kWriterSpinWait;
210 state = atomic_load(&state_, memory_order_relaxed);
211 DCHECK_NE(state & kWriterSpinWait, 0);
212 }
213 }
214
215 void Unlock() RELEASE() {
216 CheckedMutex::Unlock();
217 bool wake_writer;
218 u64 wake_readers;
219 u64 new_state;
220 u64 state = atomic_load_relaxed(&state_);
221 do {
222 DCHECK_NE(state & kWriterLock, 0);
223 DCHECK_EQ(state & kReaderLockMask, 0);
224 new_state = state & ~kWriterLock;
225 wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
226 (state & kWaitingWriterMask) != 0;
227 if (wake_writer)
228 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
229 wake_readers =
230 wake_writer || (state & kWriterSpinWait) != 0
231 ? 0
232 : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
233 if (wake_readers)
234 new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
235 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
236 memory_order_release)));
237 if (UNLIKELY(wake_writer))
238 writers_.Post();
239 else if (UNLIKELY(wake_readers))
240 readers_.Post(wake_readers);
241 }
242
243 void ReadLock() ACQUIRE_SHARED() {
244 CheckedMutex::Lock();
245 u64 reset_mask = ~0ull;
246 u64 state = atomic_load_relaxed(&state_);
247 for (uptr spin_iters = 0;; spin_iters++) {
248 bool locked = (state & kWriterLock) != 0;
249 u64 new_state;
250 if (LIKELY(!locked)) {
251 new_state = (state + kReaderLockInc) & reset_mask;
252 } else if (spin_iters > kMaxSpinIters) {
253 new_state = (state + kWaitingReaderInc) & reset_mask;
254 } else if ((state & kReaderSpinWait) == 0) {
255 // Active spinning, but denote our presence so that unlocking
256 // thread does not wake up other threads.
257 new_state = state | kReaderSpinWait;
258 } else {
259 // Active spinning.
260 state = atomic_load(&state_, memory_order_relaxed);
261 continue;
262 }
263 if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
264 memory_order_acquire)))
265 continue;
266 if (LIKELY(!locked))
267 return; // We've locked the mutex.
268 if (spin_iters > kMaxSpinIters) {
269 // We've incremented waiting readers, so now block.
270 readers_.Wait();
271 spin_iters = 0;
272 } else {
273 // We've set kReaderSpinWait, but we are still in active spinning.
274 }
275 reset_mask = ~kReaderSpinWait;
276 state = atomic_load(&state_, memory_order_relaxed);
277 }
278 }
279
280 void ReadUnlock() RELEASE_SHARED() {
281 CheckedMutex::Unlock();
282 bool wake;
283 u64 new_state;
284 u64 state = atomic_load_relaxed(&state_);
285 do {
286 DCHECK_NE(state & kReaderLockMask, 0);
287 DCHECK_EQ(state & kWriterLock, 0);
288 new_state = state - kReaderLockInc;
289 wake = (new_state &
290 (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
291 (new_state & kWaitingWriterMask) != 0;
292 if (wake)
293 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
294 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
295 memory_order_release)));
296 if (UNLIKELY(wake))
297 writers_.Post();
298 }
299
300 // This function does not guarantee an explicit check that the calling thread
301 // is the thread which owns the mutex. This behavior, while more strictly
302 // correct, causes problems in cases like StopTheWorld, where a parent thread
303 // owns the mutex but a child checks that it is locked. Rather than
304 // maintaining complex state to work around those situations, the check only
305 // checks that the mutex is owned.
306 void CheckWriteLocked() const CHECK_LOCKED() {
307 CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
308 }
309
310 void CheckLocked() const CHECK_LOCKED() { CheckWriteLocked(); }
311
312 void CheckReadLocked() const CHECK_LOCKED() {
313 CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
314 }
315
316 private:
317 atomic_uint64_t state_ = {0};
318 Semaphore writers_;
319 Semaphore readers_;
320
321 // The state has 3 counters:
322 // - number of readers holding the lock,
323 // if non zero, the mutex is read-locked
324 // - number of waiting readers,
325 // if not zero, the mutex is write-locked
326 // - number of waiting writers,
327 // if non zero, the mutex is read- or write-locked
328 // And 2 flags:
329 // - writer lock
330 // if set, the mutex is write-locked
331 // - a writer is awake and spin-waiting
332 // the flag is used to prevent thundering herd problem
333 // (new writers are not woken if this flag is set)
334 // - a reader is awake and spin-waiting
335 //
336 // Both writers and readers use active spinning before blocking.
337 // But readers are more aggressive and always take the mutex
338 // if there are any other readers.
339 // After wake up both writers and readers compete to lock the
340 // mutex again. This is needed to allow repeated locks even in presence
341 // of other blocked threads.
342 static constexpr u64 kCounterWidth = 20;
343 static constexpr u64 kReaderLockShift = 0;
344 static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
345 static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
346 << kReaderLockShift;
347 static constexpr u64 kWaitingReaderShift = kCounterWidth;
348 static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
349 static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
350 << kWaitingReaderShift;
351 static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
352 static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
353 static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
354 << kWaitingWriterShift;
355 static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
356 static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
357 static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
358
359 static constexpr uptr kMaxSpinIters = 1500;
360
361 Mutex(LinkerInitialized) = delete;
362 Mutex(const Mutex &) = delete;
363 void operator=(const Mutex &) = delete;
364 };
365
366 void FutexWait(atomic_uint32_t *p, u32 cmp);
367 void FutexWake(atomic_uint32_t *p, u32 count);
368
369 template <typename MutexType>
370 class SCOPED_LOCK GenericScopedLock {
371 public:
372 explicit GenericScopedLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
373 mu_->Lock();
374 }
375
376 ~GenericScopedLock() RELEASE() { mu_->Unlock(); }
377
378 private:
379 MutexType *mu_;
380
381 GenericScopedLock(const GenericScopedLock &) = delete;
382 void operator=(const GenericScopedLock &) = delete;
383 };
384
385 template <typename MutexType>
386 class SCOPED_LOCK GenericScopedReadLock {
387 public:
388 explicit GenericScopedReadLock(MutexType *mu) ACQUIRE(mu) : mu_(mu) {
389 mu_->ReadLock();
390 }
391
392 ~GenericScopedReadLock() RELEASE() { mu_->ReadUnlock(); }
393
394 private:
395 MutexType *mu_;
396
397 GenericScopedReadLock(const GenericScopedReadLock &) = delete;
398 void operator=(const GenericScopedReadLock &) = delete;
399 };
400
401 template <typename MutexType>
402 class SCOPED_LOCK GenericScopedRWLock {
403 public:
404 ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
405 ACQUIRE(mu)
406 : mu_(mu), write_(write) {
407 if (write_)
408 mu_->Lock();
409 else
410 mu_->ReadLock();
411 }
412
413 ALWAYS_INLINE ~GenericScopedRWLock() RELEASE() {
414 if (write_)
415 mu_->Unlock();
416 else
417 mu_->ReadUnlock();
418 }
419
420 private:
421 MutexType *mu_;
422 bool write_;
423
424 GenericScopedRWLock(const GenericScopedRWLock &) = delete;
425 void operator=(const GenericScopedRWLock &) = delete;
426 };
427
428 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
429 typedef GenericScopedLock<Mutex> Lock;
430 typedef GenericScopedReadLock<Mutex> ReadLock;
431 typedef GenericScopedRWLock<Mutex> RWLock;
432
433 } // namespace __sanitizer
434
435 #endif // SANITIZER_MUTEX_H
436