sanitizer_mutex.h revision 1.6 1 //===-- sanitizer_mutex.h ---------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #ifndef SANITIZER_MUTEX_H
14 #define SANITIZER_MUTEX_H
15
16 #include "sanitizer_atomic.h"
17 #include "sanitizer_internal_defs.h"
18 #include "sanitizer_libc.h"
19 #include "sanitizer_thread_safety.h"
20
21 namespace __sanitizer {
22
23 class SANITIZER_MUTEX StaticSpinMutex {
24 public:
25 void Init() {
26 atomic_store(&state_, 0, memory_order_relaxed);
27 }
28
29 void Lock() SANITIZER_ACQUIRE() {
30 if (LIKELY(TryLock()))
31 return;
32 LockSlow();
33 }
34
35 bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
36 return atomic_exchange(&state_, 1, memory_order_acquire) == 0;
37 }
38
39 void Unlock() SANITIZER_RELEASE() {
40 atomic_store(&state_, 0, memory_order_release);
41 }
42
43 void CheckLocked() const SANITIZER_CHECK_LOCKED() {
44 // __sync_lock_test_and_set (as used under the hood of the bespoke
45 // atomic_exchange here) does not always store the value we asked
46 // to store -- it just stores some nonzero value. On sparc, this
47 // is 0xff. On vax, this is whatever was there before but with the
48 // low-order bit set. So test for a nonzero value, rather than for
49 // the specific value 1.
50 CHECK_NE(atomic_load(&state_, memory_order_relaxed), 0);
51 }
52
53 private:
54 atomic_uint8_t state_;
55
56 void LockSlow();
57 };
58
59 class SANITIZER_MUTEX SpinMutex : public StaticSpinMutex {
60 public:
61 SpinMutex() {
62 Init();
63 }
64
65 SpinMutex(const SpinMutex &) = delete;
66 void operator=(const SpinMutex &) = delete;
67 };
68
69 // Semaphore provides an OS-dependent way to park/unpark threads.
70 // The last thread returned from Wait can destroy the object
71 // (destruction-safety).
72 class Semaphore {
73 public:
74 constexpr Semaphore() {}
75 Semaphore(const Semaphore &) = delete;
76 void operator=(const Semaphore &) = delete;
77
78 void Wait();
79 void Post(u32 count = 1);
80
81 private:
82 atomic_uint32_t state_ = {0};
83 };
84
85 typedef int MutexType;
86
87 enum {
88 // Used as sentinel and to catch unassigned types
89 // (should not be used as real Mutex type).
90 MutexInvalid = 0,
91 MutexThreadRegistry,
92 // Each tool own mutexes must start at this number.
93 MutexLastCommon,
94 // Type for legacy mutexes that are not checked for deadlocks.
95 MutexUnchecked = -1,
96 // Special marks that can be used in MutexMeta::can_lock table.
97 // The leaf mutexes can be locked under any other non-leaf mutex,
98 // but no other mutex can be locked while under a leaf mutex.
99 MutexLeaf = -1,
100 // Multiple mutexes of this type can be locked at the same time.
101 MutexMulti = -3,
102 };
103
104 // Go linker does not support THREADLOCAL variables,
105 // so we can't use per-thread state.
106 // Disable checked locks on Darwin. Although Darwin platforms support
107 // THREADLOCAL variables they are not usable early on during process init when
108 // `__sanitizer::Mutex` is used.
109 #define SANITIZER_CHECK_DEADLOCKS \
110 (SANITIZER_DEBUG && !SANITIZER_GO && SANITIZER_SUPPORTS_THREADLOCAL && !SANITIZER_APPLE)
111
112 #if SANITIZER_CHECK_DEADLOCKS
113 struct MutexMeta {
114 MutexType type;
115 const char *name;
116 // The table fixes what mutexes can be locked under what mutexes.
117 // If the entry for MutexTypeFoo contains MutexTypeBar,
118 // then Bar mutex can be locked while under Foo mutex.
119 // Can also contain the special MutexLeaf/MutexMulti marks.
120 MutexType can_lock[10];
121 };
122 #endif
123
124 class CheckedMutex {
125 public:
126 explicit constexpr CheckedMutex(MutexType type)
127 #if SANITIZER_CHECK_DEADLOCKS
128 : type_(type)
129 #endif
130 {
131 }
132
133 ALWAYS_INLINE void Lock() {
134 #if SANITIZER_CHECK_DEADLOCKS
135 LockImpl(GET_CALLER_PC());
136 #endif
137 }
138
139 ALWAYS_INLINE void Unlock() {
140 #if SANITIZER_CHECK_DEADLOCKS
141 UnlockImpl();
142 #endif
143 }
144
145 // Checks that the current thread does not hold any mutexes
146 // (e.g. when returning from a runtime function to user code).
147 static void CheckNoLocks() {
148 #if SANITIZER_CHECK_DEADLOCKS
149 CheckNoLocksImpl();
150 #endif
151 }
152
153 private:
154 #if SANITIZER_CHECK_DEADLOCKS
155 const MutexType type_;
156
157 void LockImpl(uptr pc);
158 void UnlockImpl();
159 static void CheckNoLocksImpl();
160 #endif
161 };
162
163 // Reader-writer mutex.
164 // Derive from CheckedMutex for the purposes of EBO.
165 // We could make it a field marked with [[no_unique_address]],
166 // but this attribute is not supported by some older compilers.
167 class SANITIZER_MUTEX Mutex : CheckedMutex {
168 public:
169 explicit constexpr Mutex(MutexType type = MutexUnchecked)
170 : CheckedMutex(type) {}
171
172 void Lock() SANITIZER_ACQUIRE() {
173 CheckedMutex::Lock();
174 u64 reset_mask = ~0ull;
175 u64 state = atomic_load_relaxed(&state_);
176 for (uptr spin_iters = 0;; spin_iters++) {
177 u64 new_state;
178 bool locked = (state & (kWriterLock | kReaderLockMask)) != 0;
179 if (LIKELY(!locked)) {
180 // The mutex is not read-/write-locked, try to lock.
181 new_state = (state | kWriterLock) & reset_mask;
182 } else if (spin_iters > kMaxSpinIters) {
183 // We've spun enough, increment waiting writers count and block.
184 // The counter will be decremented by whoever wakes us.
185 new_state = (state + kWaitingWriterInc) & reset_mask;
186 } else if ((state & kWriterSpinWait) == 0) {
187 // Active spinning, but denote our presence so that unlocking
188 // thread does not wake up other threads.
189 new_state = state | kWriterSpinWait;
190 } else {
191 // Active spinning.
192 state = atomic_load(&state_, memory_order_relaxed);
193 continue;
194 }
195 if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
196 memory_order_acquire)))
197 continue;
198 if (LIKELY(!locked))
199 return; // We've locked the mutex.
200 if (spin_iters > kMaxSpinIters) {
201 // We've incremented waiting writers, so now block.
202 writers_.Wait();
203 spin_iters = 0;
204 } else {
205 // We've set kWriterSpinWait, but we are still in active spinning.
206 }
207 // We either blocked and were unblocked,
208 // or we just spun but set kWriterSpinWait.
209 // Either way we need to reset kWriterSpinWait
210 // next time we take the lock or block again.
211 reset_mask = ~kWriterSpinWait;
212 state = atomic_load(&state_, memory_order_relaxed);
213 DCHECK_NE(state & kWriterSpinWait, 0);
214 }
215 }
216
217 bool TryLock() SANITIZER_TRY_ACQUIRE(true) {
218 u64 state = atomic_load_relaxed(&state_);
219 for (;;) {
220 if (UNLIKELY(state & (kWriterLock | kReaderLockMask)))
221 return false;
222 // The mutex is not read-/write-locked, try to lock.
223 if (LIKELY(atomic_compare_exchange_weak(
224 &state_, &state, state | kWriterLock, memory_order_acquire))) {
225 CheckedMutex::Lock();
226 return true;
227 }
228 }
229 }
230
231 void Unlock() SANITIZER_RELEASE() {
232 CheckedMutex::Unlock();
233 bool wake_writer;
234 u64 wake_readers;
235 u64 new_state;
236 u64 state = atomic_load_relaxed(&state_);
237 do {
238 DCHECK_NE(state & kWriterLock, 0);
239 DCHECK_EQ(state & kReaderLockMask, 0);
240 new_state = state & ~kWriterLock;
241 wake_writer = (state & (kWriterSpinWait | kReaderSpinWait)) == 0 &&
242 (state & kWaitingWriterMask) != 0;
243 if (wake_writer)
244 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
245 wake_readers =
246 wake_writer || (state & kWriterSpinWait) != 0
247 ? 0
248 : ((state & kWaitingReaderMask) >> kWaitingReaderShift);
249 if (wake_readers)
250 new_state = (new_state & ~kWaitingReaderMask) | kReaderSpinWait;
251 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
252 memory_order_release)));
253 if (UNLIKELY(wake_writer))
254 writers_.Post();
255 else if (UNLIKELY(wake_readers))
256 readers_.Post(wake_readers);
257 }
258
259 void ReadLock() SANITIZER_ACQUIRE_SHARED() {
260 CheckedMutex::Lock();
261 u64 reset_mask = ~0ull;
262 u64 state = atomic_load_relaxed(&state_);
263 for (uptr spin_iters = 0;; spin_iters++) {
264 bool locked = (state & kWriterLock) != 0;
265 u64 new_state;
266 if (LIKELY(!locked)) {
267 new_state = (state + kReaderLockInc) & reset_mask;
268 } else if (spin_iters > kMaxSpinIters) {
269 new_state = (state + kWaitingReaderInc) & reset_mask;
270 } else if ((state & kReaderSpinWait) == 0) {
271 // Active spinning, but denote our presence so that unlocking
272 // thread does not wake up other threads.
273 new_state = state | kReaderSpinWait;
274 } else {
275 // Active spinning.
276 state = atomic_load(&state_, memory_order_relaxed);
277 continue;
278 }
279 if (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
280 memory_order_acquire)))
281 continue;
282 if (LIKELY(!locked))
283 return; // We've locked the mutex.
284 if (spin_iters > kMaxSpinIters) {
285 // We've incremented waiting readers, so now block.
286 readers_.Wait();
287 spin_iters = 0;
288 } else {
289 // We've set kReaderSpinWait, but we are still in active spinning.
290 }
291 reset_mask = ~kReaderSpinWait;
292 state = atomic_load(&state_, memory_order_relaxed);
293 }
294 }
295
296 void ReadUnlock() SANITIZER_RELEASE_SHARED() {
297 CheckedMutex::Unlock();
298 bool wake;
299 u64 new_state;
300 u64 state = atomic_load_relaxed(&state_);
301 do {
302 DCHECK_NE(state & kReaderLockMask, 0);
303 DCHECK_EQ(state & kWriterLock, 0);
304 new_state = state - kReaderLockInc;
305 wake = (new_state &
306 (kReaderLockMask | kWriterSpinWait | kReaderSpinWait)) == 0 &&
307 (new_state & kWaitingWriterMask) != 0;
308 if (wake)
309 new_state = (new_state - kWaitingWriterInc) | kWriterSpinWait;
310 } while (UNLIKELY(!atomic_compare_exchange_weak(&state_, &state, new_state,
311 memory_order_release)));
312 if (UNLIKELY(wake))
313 writers_.Post();
314 }
315
316 // This function does not guarantee an explicit check that the calling thread
317 // is the thread which owns the mutex. This behavior, while more strictly
318 // correct, causes problems in cases like StopTheWorld, where a parent thread
319 // owns the mutex but a child checks that it is locked. Rather than
320 // maintaining complex state to work around those situations, the check only
321 // checks that the mutex is owned.
322 void CheckWriteLocked() const SANITIZER_CHECK_LOCKED() {
323 CHECK(atomic_load(&state_, memory_order_relaxed) & kWriterLock);
324 }
325
326 void CheckLocked() const SANITIZER_CHECK_LOCKED() { CheckWriteLocked(); }
327
328 void CheckReadLocked() const SANITIZER_CHECK_LOCKED() {
329 CHECK(atomic_load(&state_, memory_order_relaxed) & kReaderLockMask);
330 }
331
332 private:
333 atomic_uint64_t state_ = {0};
334 Semaphore writers_;
335 Semaphore readers_;
336
337 // The state has 3 counters:
338 // - number of readers holding the lock,
339 // if non zero, the mutex is read-locked
340 // - number of waiting readers,
341 // if not zero, the mutex is write-locked
342 // - number of waiting writers,
343 // if non zero, the mutex is read- or write-locked
344 // And 2 flags:
345 // - writer lock
346 // if set, the mutex is write-locked
347 // - a writer is awake and spin-waiting
348 // the flag is used to prevent thundering herd problem
349 // (new writers are not woken if this flag is set)
350 // - a reader is awake and spin-waiting
351 //
352 // Both writers and readers use active spinning before blocking.
353 // But readers are more aggressive and always take the mutex
354 // if there are any other readers.
355 // After wake up both writers and readers compete to lock the
356 // mutex again. This is needed to allow repeated locks even in presence
357 // of other blocked threads.
358 static constexpr u64 kCounterWidth = 20;
359 static constexpr u64 kReaderLockShift = 0;
360 static constexpr u64 kReaderLockInc = 1ull << kReaderLockShift;
361 static constexpr u64 kReaderLockMask = ((1ull << kCounterWidth) - 1)
362 << kReaderLockShift;
363 static constexpr u64 kWaitingReaderShift = kCounterWidth;
364 static constexpr u64 kWaitingReaderInc = 1ull << kWaitingReaderShift;
365 static constexpr u64 kWaitingReaderMask = ((1ull << kCounterWidth) - 1)
366 << kWaitingReaderShift;
367 static constexpr u64 kWaitingWriterShift = 2 * kCounterWidth;
368 static constexpr u64 kWaitingWriterInc = 1ull << kWaitingWriterShift;
369 static constexpr u64 kWaitingWriterMask = ((1ull << kCounterWidth) - 1)
370 << kWaitingWriterShift;
371 static constexpr u64 kWriterLock = 1ull << (3 * kCounterWidth);
372 static constexpr u64 kWriterSpinWait = 1ull << (3 * kCounterWidth + 1);
373 static constexpr u64 kReaderSpinWait = 1ull << (3 * kCounterWidth + 2);
374
375 static constexpr uptr kMaxSpinIters = 1500;
376
377 Mutex(LinkerInitialized) = delete;
378 Mutex(const Mutex &) = delete;
379 void operator=(const Mutex &) = delete;
380 };
381
382 void FutexWait(atomic_uint32_t *p, u32 cmp);
383 void FutexWake(atomic_uint32_t *p, u32 count);
384
385 template <typename MutexType>
386 class SANITIZER_SCOPED_LOCK GenericScopedLock {
387 public:
388 explicit GenericScopedLock(MutexType *mu) SANITIZER_ACQUIRE(mu) : mu_(mu) {
389 mu_->Lock();
390 }
391
392 ~GenericScopedLock() SANITIZER_RELEASE() { mu_->Unlock(); }
393
394 private:
395 MutexType *mu_;
396
397 GenericScopedLock(const GenericScopedLock &) = delete;
398 void operator=(const GenericScopedLock &) = delete;
399 };
400
401 template <typename MutexType>
402 class SANITIZER_SCOPED_LOCK GenericScopedReadLock {
403 public:
404 explicit GenericScopedReadLock(MutexType *mu) SANITIZER_ACQUIRE(mu)
405 : mu_(mu) {
406 mu_->ReadLock();
407 }
408
409 ~GenericScopedReadLock() SANITIZER_RELEASE() { mu_->ReadUnlock(); }
410
411 private:
412 MutexType *mu_;
413
414 GenericScopedReadLock(const GenericScopedReadLock &) = delete;
415 void operator=(const GenericScopedReadLock &) = delete;
416 };
417
418 template <typename MutexType>
419 class SANITIZER_SCOPED_LOCK GenericScopedRWLock {
420 public:
421 ALWAYS_INLINE explicit GenericScopedRWLock(MutexType *mu, bool write)
422 SANITIZER_ACQUIRE(mu)
423 : mu_(mu), write_(write) {
424 if (write_)
425 mu_->Lock();
426 else
427 mu_->ReadLock();
428 }
429
430 ALWAYS_INLINE ~GenericScopedRWLock() SANITIZER_RELEASE() {
431 if (write_)
432 mu_->Unlock();
433 else
434 mu_->ReadUnlock();
435 }
436
437 private:
438 MutexType *mu_;
439 bool write_;
440
441 GenericScopedRWLock(const GenericScopedRWLock &) = delete;
442 void operator=(const GenericScopedRWLock &) = delete;
443 };
444
445 typedef GenericScopedLock<StaticSpinMutex> SpinMutexLock;
446 typedef GenericScopedLock<Mutex> Lock;
447 typedef GenericScopedReadLock<Mutex> ReadLock;
448 typedef GenericScopedRWLock<Mutex> RWLock;
449
450 } // namespace __sanitizer
451
452 #endif // SANITIZER_MUTEX_H
453