1 //===-- tsan_interceptors_mac.cc ------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 // Mac-specific interceptors. 13 //===----------------------------------------------------------------------===// 14 15 #include "sanitizer_common/sanitizer_platform.h" 16 #if SANITIZER_MAC 17 18 #include "interception/interception.h" 19 #include "tsan_interceptors.h" 20 #include "tsan_interface.h" 21 #include "tsan_interface_ann.h" 22 #include "sanitizer_common/sanitizer_addrhashmap.h" 23 24 #include <libkern/OSAtomic.h> 25 #include <objc/objc-sync.h> 26 27 #if defined(__has_include) && __has_include(<xpc/xpc.h>) 28 #include <xpc/xpc.h> 29 #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) 30 31 typedef long long_t; // NOLINT 32 33 namespace __tsan { 34 35 // The non-barrier versions of OSAtomic* functions are semantically mo_relaxed, 36 // but the two variants (e.g. OSAtomicAdd32 and OSAtomicAdd32Barrier) are 37 // actually aliases of each other, and we cannot have different interceptors for 38 // them, because they're actually the same function. Thus, we have to stay 39 // conservative and treat the non-barrier versions as mo_acq_rel. 40 static const morder kMacOrderBarrier = mo_acq_rel; 41 static const morder kMacOrderNonBarrier = mo_acq_rel; 42 43 #define OSATOMIC_INTERCEPTOR(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ 44 TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ 45 SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ 46 return tsan_atomic_f((volatile tsan_t *)ptr, x, mo); \ 47 } 48 49 #define OSATOMIC_INTERCEPTOR_PLUS_X(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ 50 TSAN_INTERCEPTOR(return_t, f, t x, volatile t *ptr) { \ 51 SCOPED_TSAN_INTERCEPTOR(f, x, ptr); \ 52 return tsan_atomic_f((volatile tsan_t *)ptr, x, mo) + x; \ 53 } 54 55 #define OSATOMIC_INTERCEPTOR_PLUS_1(return_t, t, tsan_t, f, tsan_atomic_f, mo) \ 56 TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ 57 SCOPED_TSAN_INTERCEPTOR(f, ptr); \ 58 return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) + 1; \ 59 } 60 61 #define OSATOMIC_INTERCEPTOR_MINUS_1(return_t, t, tsan_t, f, tsan_atomic_f, \ 62 mo) \ 63 TSAN_INTERCEPTOR(return_t, f, volatile t *ptr) { \ 64 SCOPED_TSAN_INTERCEPTOR(f, ptr); \ 65 return tsan_atomic_f((volatile tsan_t *)ptr, 1, mo) - 1; \ 66 } 67 68 #define OSATOMIC_INTERCEPTORS_ARITHMETIC(f, tsan_atomic_f, m) \ 69 m(int32_t, int32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ 70 kMacOrderNonBarrier) \ 71 m(int32_t, int32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ 72 kMacOrderBarrier) \ 73 m(int64_t, int64_t, a64, f##64, __tsan_atomic64_##tsan_atomic_f, \ 74 kMacOrderNonBarrier) \ 75 m(int64_t, int64_t, a64, f##64##Barrier, __tsan_atomic64_##tsan_atomic_f, \ 76 kMacOrderBarrier) 77 78 #define OSATOMIC_INTERCEPTORS_BITWISE(f, tsan_atomic_f, m, m_orig) \ 79 m(int32_t, uint32_t, a32, f##32, __tsan_atomic32_##tsan_atomic_f, \ 80 kMacOrderNonBarrier) \ 81 m(int32_t, uint32_t, a32, f##32##Barrier, __tsan_atomic32_##tsan_atomic_f, \ 82 kMacOrderBarrier) \ 83 m_orig(int32_t, uint32_t, a32, f##32##Orig, __tsan_atomic32_##tsan_atomic_f, \ 84 kMacOrderNonBarrier) \ 85 m_orig(int32_t, uint32_t, a32, f##32##OrigBarrier, \ 86 __tsan_atomic32_##tsan_atomic_f, kMacOrderBarrier) 87 88 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicAdd, fetch_add, 89 OSATOMIC_INTERCEPTOR_PLUS_X) 90 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicIncrement, fetch_add, 91 OSATOMIC_INTERCEPTOR_PLUS_1) 92 OSATOMIC_INTERCEPTORS_ARITHMETIC(OSAtomicDecrement, fetch_sub, 93 OSATOMIC_INTERCEPTOR_MINUS_1) 94 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicOr, fetch_or, OSATOMIC_INTERCEPTOR_PLUS_X, 95 OSATOMIC_INTERCEPTOR) 96 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicAnd, fetch_and, 97 OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) 98 OSATOMIC_INTERCEPTORS_BITWISE(OSAtomicXor, fetch_xor, 99 OSATOMIC_INTERCEPTOR_PLUS_X, OSATOMIC_INTERCEPTOR) 100 101 #define OSATOMIC_INTERCEPTORS_CAS(f, tsan_atomic_f, tsan_t, t) \ 102 TSAN_INTERCEPTOR(bool, f, t old_value, t new_value, t volatile *ptr) { \ 103 SCOPED_TSAN_INTERCEPTOR(f, old_value, new_value, ptr); \ 104 return tsan_atomic_f##_compare_exchange_strong( \ 105 (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ 106 kMacOrderNonBarrier, kMacOrderNonBarrier); \ 107 } \ 108 \ 109 TSAN_INTERCEPTOR(bool, f##Barrier, t old_value, t new_value, \ 110 t volatile *ptr) { \ 111 SCOPED_TSAN_INTERCEPTOR(f##Barrier, old_value, new_value, ptr); \ 112 return tsan_atomic_f##_compare_exchange_strong( \ 113 (volatile tsan_t *)ptr, (tsan_t *)&old_value, (tsan_t)new_value, \ 114 kMacOrderBarrier, kMacOrderNonBarrier); \ 115 } 116 117 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapInt, __tsan_atomic32, a32, int) 118 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapLong, __tsan_atomic64, a64, 119 long_t) 120 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwapPtr, __tsan_atomic64, a64, 121 void *) 122 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap32, __tsan_atomic32, a32, 123 int32_t) 124 OSATOMIC_INTERCEPTORS_CAS(OSAtomicCompareAndSwap64, __tsan_atomic64, a64, 125 int64_t) 126 127 #define OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, mo) \ 128 TSAN_INTERCEPTOR(bool, f, uint32_t n, volatile void *ptr) { \ 129 SCOPED_TSAN_INTERCEPTOR(f, n, ptr); \ 130 volatile char *byte_ptr = ((volatile char *)ptr) + (n >> 3); \ 131 char bit = 0x80u >> (n & 7); \ 132 char mask = clear ? ~bit : bit; \ 133 char orig_byte = op((volatile a8 *)byte_ptr, mask, mo); \ 134 return orig_byte & bit; \ 135 } 136 137 #define OSATOMIC_INTERCEPTORS_BITOP(f, op, clear) \ 138 OSATOMIC_INTERCEPTOR_BITOP(f, op, clear, kMacOrderNonBarrier) \ 139 OSATOMIC_INTERCEPTOR_BITOP(f##Barrier, op, clear, kMacOrderBarrier) 140 141 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndSet, __tsan_atomic8_fetch_or, false) 142 OSATOMIC_INTERCEPTORS_BITOP(OSAtomicTestAndClear, __tsan_atomic8_fetch_and, 143 true) 144 145 TSAN_INTERCEPTOR(void, OSAtomicEnqueue, OSQueueHead *list, void *item, 146 size_t offset) { 147 SCOPED_TSAN_INTERCEPTOR(OSAtomicEnqueue, list, item, offset); 148 __tsan_release(item); 149 REAL(OSAtomicEnqueue)(list, item, offset); 150 } 151 152 TSAN_INTERCEPTOR(void *, OSAtomicDequeue, OSQueueHead *list, size_t offset) { 153 SCOPED_TSAN_INTERCEPTOR(OSAtomicDequeue, list, offset); 154 void *item = REAL(OSAtomicDequeue)(list, offset); 155 if (item) __tsan_acquire(item); 156 return item; 157 } 158 159 // OSAtomicFifoEnqueue and OSAtomicFifoDequeue are only on OS X. 160 #if !SANITIZER_IOS 161 162 TSAN_INTERCEPTOR(void, OSAtomicFifoEnqueue, OSFifoQueueHead *list, void *item, 163 size_t offset) { 164 SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoEnqueue, list, item, offset); 165 __tsan_release(item); 166 REAL(OSAtomicFifoEnqueue)(list, item, offset); 167 } 168 169 TSAN_INTERCEPTOR(void *, OSAtomicFifoDequeue, OSFifoQueueHead *list, 170 size_t offset) { 171 SCOPED_TSAN_INTERCEPTOR(OSAtomicFifoDequeue, list, offset); 172 void *item = REAL(OSAtomicFifoDequeue)(list, offset); 173 if (item) __tsan_acquire(item); 174 return item; 175 } 176 177 #endif 178 179 TSAN_INTERCEPTOR(void, OSSpinLockLock, volatile OSSpinLock *lock) { 180 CHECK(!cur_thread()->is_dead); 181 if (!cur_thread()->is_inited) { 182 return REAL(OSSpinLockLock)(lock); 183 } 184 SCOPED_TSAN_INTERCEPTOR(OSSpinLockLock, lock); 185 REAL(OSSpinLockLock)(lock); 186 Acquire(thr, pc, (uptr)lock); 187 } 188 189 TSAN_INTERCEPTOR(bool, OSSpinLockTry, volatile OSSpinLock *lock) { 190 CHECK(!cur_thread()->is_dead); 191 if (!cur_thread()->is_inited) { 192 return REAL(OSSpinLockTry)(lock); 193 } 194 SCOPED_TSAN_INTERCEPTOR(OSSpinLockTry, lock); 195 bool result = REAL(OSSpinLockTry)(lock); 196 if (result) 197 Acquire(thr, pc, (uptr)lock); 198 return result; 199 } 200 201 TSAN_INTERCEPTOR(void, OSSpinLockUnlock, volatile OSSpinLock *lock) { 202 CHECK(!cur_thread()->is_dead); 203 if (!cur_thread()->is_inited) { 204 return REAL(OSSpinLockUnlock)(lock); 205 } 206 SCOPED_TSAN_INTERCEPTOR(OSSpinLockUnlock, lock); 207 Release(thr, pc, (uptr)lock); 208 REAL(OSSpinLockUnlock)(lock); 209 } 210 211 TSAN_INTERCEPTOR(void, os_lock_lock, void *lock) { 212 CHECK(!cur_thread()->is_dead); 213 if (!cur_thread()->is_inited) { 214 return REAL(os_lock_lock)(lock); 215 } 216 SCOPED_TSAN_INTERCEPTOR(os_lock_lock, lock); 217 REAL(os_lock_lock)(lock); 218 Acquire(thr, pc, (uptr)lock); 219 } 220 221 TSAN_INTERCEPTOR(bool, os_lock_trylock, void *lock) { 222 CHECK(!cur_thread()->is_dead); 223 if (!cur_thread()->is_inited) { 224 return REAL(os_lock_trylock)(lock); 225 } 226 SCOPED_TSAN_INTERCEPTOR(os_lock_trylock, lock); 227 bool result = REAL(os_lock_trylock)(lock); 228 if (result) 229 Acquire(thr, pc, (uptr)lock); 230 return result; 231 } 232 233 TSAN_INTERCEPTOR(void, os_lock_unlock, void *lock) { 234 CHECK(!cur_thread()->is_dead); 235 if (!cur_thread()->is_inited) { 236 return REAL(os_lock_unlock)(lock); 237 } 238 SCOPED_TSAN_INTERCEPTOR(os_lock_unlock, lock); 239 Release(thr, pc, (uptr)lock); 240 REAL(os_lock_unlock)(lock); 241 } 242 243 #if defined(__has_include) && __has_include(<xpc/xpc.h>) 244 245 TSAN_INTERCEPTOR(void, xpc_connection_set_event_handler, 246 xpc_connection_t connection, xpc_handler_t handler) { 247 SCOPED_TSAN_INTERCEPTOR(xpc_connection_set_event_handler, connection, 248 handler); 249 Release(thr, pc, (uptr)connection); 250 xpc_handler_t new_handler = ^(xpc_object_t object) { 251 { 252 SCOPED_INTERCEPTOR_RAW(xpc_connection_set_event_handler); 253 Acquire(thr, pc, (uptr)connection); 254 } 255 handler(object); 256 }; 257 REAL(xpc_connection_set_event_handler)(connection, new_handler); 258 } 259 260 TSAN_INTERCEPTOR(void, xpc_connection_send_barrier, xpc_connection_t connection, 261 dispatch_block_t barrier) { 262 SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_barrier, connection, barrier); 263 Release(thr, pc, (uptr)connection); 264 dispatch_block_t new_barrier = ^() { 265 { 266 SCOPED_INTERCEPTOR_RAW(xpc_connection_send_barrier); 267 Acquire(thr, pc, (uptr)connection); 268 } 269 barrier(); 270 }; 271 REAL(xpc_connection_send_barrier)(connection, new_barrier); 272 } 273 274 TSAN_INTERCEPTOR(void, xpc_connection_send_message_with_reply, 275 xpc_connection_t connection, xpc_object_t message, 276 dispatch_queue_t replyq, xpc_handler_t handler) { 277 SCOPED_TSAN_INTERCEPTOR(xpc_connection_send_message_with_reply, connection, 278 message, replyq, handler); 279 Release(thr, pc, (uptr)connection); 280 xpc_handler_t new_handler = ^(xpc_object_t object) { 281 { 282 SCOPED_INTERCEPTOR_RAW(xpc_connection_send_message_with_reply); 283 Acquire(thr, pc, (uptr)connection); 284 } 285 handler(object); 286 }; 287 REAL(xpc_connection_send_message_with_reply) 288 (connection, message, replyq, new_handler); 289 } 290 291 TSAN_INTERCEPTOR(void, xpc_connection_cancel, xpc_connection_t connection) { 292 SCOPED_TSAN_INTERCEPTOR(xpc_connection_cancel, connection); 293 Release(thr, pc, (uptr)connection); 294 REAL(xpc_connection_cancel)(connection); 295 } 296 297 #endif // #if defined(__has_include) && __has_include(<xpc/xpc.h>) 298 299 // Determines whether the Obj-C object pointer is a tagged pointer. Tagged 300 // pointers encode the object data directly in their pointer bits and do not 301 // have an associated memory allocation. The Obj-C runtime uses tagged pointers 302 // to transparently optimize small objects. 303 static bool IsTaggedObjCPointer(id obj) { 304 const uptr kPossibleTaggedBits = 0x8000000000000001ull; 305 return ((uptr)obj & kPossibleTaggedBits) != 0; 306 } 307 308 // Returns an address which can be used to inform TSan about synchronization 309 // points (MutexLock/Unlock). The TSan infrastructure expects this to be a valid 310 // address in the process space. We do a small allocation here to obtain a 311 // stable address (the array backing the hash map can change). The memory is 312 // never free'd (leaked) and allocation and locking are slow, but this code only 313 // runs for @synchronized with tagged pointers, which is very rare. 314 static uptr GetOrCreateSyncAddress(uptr addr, ThreadState *thr, uptr pc) { 315 typedef AddrHashMap<uptr, 5> Map; 316 static Map Addresses; 317 Map::Handle h(&Addresses, addr); 318 if (h.created()) { 319 ThreadIgnoreBegin(thr, pc); 320 *h = (uptr) user_alloc(thr, pc, /*size=*/1); 321 ThreadIgnoreEnd(thr, pc); 322 } 323 return *h; 324 } 325 326 // Returns an address on which we can synchronize given an Obj-C object pointer. 327 // For normal object pointers, this is just the address of the object in memory. 328 // Tagged pointers are not backed by an actual memory allocation, so we need to 329 // synthesize a valid address. 330 static uptr SyncAddressForObjCObject(id obj, ThreadState *thr, uptr pc) { 331 if (IsTaggedObjCPointer(obj)) 332 return GetOrCreateSyncAddress((uptr)obj, thr, pc); 333 return (uptr)obj; 334 } 335 336 TSAN_INTERCEPTOR(int, objc_sync_enter, id obj) { 337 SCOPED_TSAN_INTERCEPTOR(objc_sync_enter, obj); 338 if (!obj) return REAL(objc_sync_enter)(obj); 339 uptr addr = SyncAddressForObjCObject(obj, thr, pc); 340 MutexPreLock(thr, pc, addr, MutexFlagWriteReentrant); 341 int result = REAL(objc_sync_enter)(obj); 342 CHECK_EQ(result, OBJC_SYNC_SUCCESS); 343 MutexPostLock(thr, pc, addr, MutexFlagWriteReentrant); 344 return result; 345 } 346 347 TSAN_INTERCEPTOR(int, objc_sync_exit, id obj) { 348 SCOPED_TSAN_INTERCEPTOR(objc_sync_exit, obj); 349 if (!obj) return REAL(objc_sync_exit)(obj); 350 uptr addr = SyncAddressForObjCObject(obj, thr, pc); 351 MutexUnlock(thr, pc, addr); 352 int result = REAL(objc_sync_exit)(obj); 353 if (result != OBJC_SYNC_SUCCESS) MutexInvalidAccess(thr, pc, addr); 354 return result; 355 } 356 357 // On macOS, libc++ is always linked dynamically, so intercepting works the 358 // usual way. 359 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR 360 361 namespace { 362 struct fake_shared_weak_count { 363 volatile a64 shared_owners; 364 volatile a64 shared_weak_owners; 365 virtual void _unused_0x0() = 0; 366 virtual void _unused_0x8() = 0; 367 virtual void on_zero_shared() = 0; 368 virtual void _unused_0x18() = 0; 369 virtual void on_zero_shared_weak() = 0; 370 }; 371 } // namespace 372 373 // The following code adds libc++ interceptors for: 374 // void __shared_weak_count::__release_shared() _NOEXCEPT; 375 // bool __shared_count::__release_shared() _NOEXCEPT; 376 // Shared and weak pointers in C++ maintain reference counts via atomics in 377 // libc++.dylib, which are TSan-invisible, and this leads to false positives in 378 // destructor code. These interceptors re-implements the whole functions so that 379 // the mo_acq_rel semantics of the atomic decrement are visible. 380 // 381 // Unfortunately, the interceptors cannot simply Acquire/Release some sync 382 // object and call the original function, because it would have a race between 383 // the sync and the destruction of the object. Calling both under a lock will 384 // not work because the destructor can invoke this interceptor again (and even 385 // in a different thread, so recursive locks don't help). 386 387 STDCXX_INTERCEPTOR(void, _ZNSt3__119__shared_weak_count16__release_sharedEv, 388 fake_shared_weak_count *o) { 389 if (!flags()->shared_ptr_interceptor) 390 return REAL(_ZNSt3__119__shared_weak_count16__release_sharedEv)(o); 391 392 SCOPED_TSAN_INTERCEPTOR(_ZNSt3__119__shared_weak_count16__release_sharedEv, 393 o); 394 if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { 395 Acquire(thr, pc, (uptr)&o->shared_owners); 396 o->on_zero_shared(); 397 if (__tsan_atomic64_fetch_add(&o->shared_weak_owners, -1, mo_release) == 398 0) { 399 Acquire(thr, pc, (uptr)&o->shared_weak_owners); 400 o->on_zero_shared_weak(); 401 } 402 } 403 } 404 405 STDCXX_INTERCEPTOR(bool, _ZNSt3__114__shared_count16__release_sharedEv, 406 fake_shared_weak_count *o) { 407 if (!flags()->shared_ptr_interceptor) 408 return REAL(_ZNSt3__114__shared_count16__release_sharedEv)(o); 409 410 SCOPED_TSAN_INTERCEPTOR(_ZNSt3__114__shared_count16__release_sharedEv, o); 411 if (__tsan_atomic64_fetch_add(&o->shared_owners, -1, mo_release) == 0) { 412 Acquire(thr, pc, (uptr)&o->shared_owners); 413 o->on_zero_shared(); 414 return true; 415 } 416 return false; 417 } 418 419 namespace { 420 struct call_once_callback_args { 421 void (*orig_func)(void *arg); 422 void *orig_arg; 423 void *flag; 424 }; 425 426 void call_once_callback_wrapper(void *arg) { 427 call_once_callback_args *new_args = (call_once_callback_args *)arg; 428 new_args->orig_func(new_args->orig_arg); 429 __tsan_release(new_args->flag); 430 } 431 } // namespace 432 433 // This adds a libc++ interceptor for: 434 // void __call_once(volatile unsigned long&, void*, void(*)(void*)); 435 // C++11 call_once is implemented via an internal function __call_once which is 436 // inside libc++.dylib, and the atomic release store inside it is thus 437 // TSan-invisible. To avoid false positives, this interceptor wraps the callback 438 // function and performs an explicit Release after the user code has run. 439 STDCXX_INTERCEPTOR(void, _ZNSt3__111__call_onceERVmPvPFvS2_E, void *flag, 440 void *arg, void (*func)(void *arg)) { 441 call_once_callback_args new_args = {func, arg, flag}; 442 REAL(_ZNSt3__111__call_onceERVmPvPFvS2_E)(flag, &new_args, 443 call_once_callback_wrapper); 444 } 445 446 } // namespace __tsan 447 448 #endif // SANITIZER_MAC 449