1 //===-- tsan_interceptors.cc ----------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of ThreadSanitizer (TSan), a race detector. 11 // 12 // FIXME: move as many interceptors as possible into 13 // sanitizer_common/sanitizer_common_interceptors.inc 14 //===----------------------------------------------------------------------===// 15 16 #include "sanitizer_common/sanitizer_atomic.h" 17 #include "sanitizer_common/sanitizer_errno.h" 18 #include "sanitizer_common/sanitizer_libc.h" 19 #include "sanitizer_common/sanitizer_linux.h" 20 #include "sanitizer_common/sanitizer_platform_limits_netbsd.h" 21 #include "sanitizer_common/sanitizer_platform_limits_posix.h" 22 #include "sanitizer_common/sanitizer_placement_new.h" 23 #include "sanitizer_common/sanitizer_posix.h" 24 #include "sanitizer_common/sanitizer_stacktrace.h" 25 #include "sanitizer_common/sanitizer_tls_get_addr.h" 26 #include "interception/interception.h" 27 #include "tsan_interceptors.h" 28 #include "tsan_interface.h" 29 #include "tsan_platform.h" 30 #include "tsan_suppressions.h" 31 #include "tsan_rtl.h" 32 #include "tsan_mman.h" 33 #include "tsan_fd.h" 34 35 36 using namespace __tsan; // NOLINT 37 38 #if SANITIZER_FREEBSD || SANITIZER_MAC 39 #define stdout __stdoutp 40 #define stderr __stderrp 41 #endif 42 43 #if SANITIZER_NETBSD 44 #define dirfd(dirp) (*(int *)(dirp)) 45 #define fileno_unlocked fileno 46 47 #if _LP64 48 #define __sF_size 152 49 #else 50 #define __sF_size 88 51 #endif 52 53 #define stdout ((char*)&__sF + (__sF_size * 1)) 54 #define stderr ((char*)&__sF + (__sF_size * 2)) 55 56 #define nanosleep __nanosleep50 57 #define vfork __vfork14 58 #endif 59 60 #if SANITIZER_ANDROID 61 #define mallopt(a, b) 62 #endif 63 64 #ifdef __mips__ 65 const int kSigCount = 129; 66 #else 67 const int kSigCount = 65; 68 #endif 69 70 #ifdef __mips__ 71 struct ucontext_t { 72 u64 opaque[768 / sizeof(u64) + 1]; 73 }; 74 #else 75 struct ucontext_t { 76 // The size is determined by looking at sizeof of real ucontext_t on linux. 77 u64 opaque[936 / sizeof(u64) + 1]; 78 }; 79 #endif 80 81 #if defined(__x86_64__) || defined(__mips__) || SANITIZER_PPC64V1 82 #define PTHREAD_ABI_BASE "GLIBC_2.3.2" 83 #elif defined(__aarch64__) || SANITIZER_PPC64V2 84 #define PTHREAD_ABI_BASE "GLIBC_2.17" 85 #endif 86 87 extern "C" int pthread_attr_init(void *attr); 88 extern "C" int pthread_attr_destroy(void *attr); 89 DECLARE_REAL(int, pthread_attr_getdetachstate, void *, void *) 90 extern "C" int pthread_attr_setstacksize(void *attr, uptr stacksize); 91 extern "C" int pthread_key_create(unsigned *key, void (*destructor)(void* v)); 92 extern "C" int pthread_setspecific(unsigned key, const void *v); 93 DECLARE_REAL(int, pthread_mutexattr_gettype, void *, void *) 94 DECLARE_REAL(int, fflush, __sanitizer_FILE *fp) 95 DECLARE_REAL_AND_INTERCEPTOR(void *, malloc, uptr size) 96 DECLARE_REAL_AND_INTERCEPTOR(void, free, void *ptr) 97 extern "C" void *pthread_self(); 98 extern "C" void _exit(int status); 99 extern "C" int fileno_unlocked(void *stream); 100 #if !SANITIZER_NETBSD 101 extern "C" int dirfd(void *dirp); 102 #endif 103 #if !SANITIZER_FREEBSD && !SANITIZER_ANDROID && !SANITIZER_NETBSD 104 extern "C" int mallopt(int param, int value); 105 #endif 106 #if SANITIZER_NETBSD 107 extern __sanitizer_FILE __sF[]; 108 #else 109 extern __sanitizer_FILE *stdout, *stderr; 110 #endif 111 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 112 const int PTHREAD_MUTEX_RECURSIVE = 1; 113 const int PTHREAD_MUTEX_RECURSIVE_NP = 1; 114 #else 115 const int PTHREAD_MUTEX_RECURSIVE = 2; 116 const int PTHREAD_MUTEX_RECURSIVE_NP = 2; 117 #endif 118 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 119 const int EPOLL_CTL_ADD = 1; 120 #endif 121 const int SIGILL = 4; 122 const int SIGABRT = 6; 123 const int SIGFPE = 8; 124 const int SIGSEGV = 11; 125 const int SIGPIPE = 13; 126 const int SIGTERM = 15; 127 #if defined(__mips__) || SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD 128 const int SIGBUS = 10; 129 const int SIGSYS = 12; 130 #else 131 const int SIGBUS = 7; 132 const int SIGSYS = 31; 133 #endif 134 void *const MAP_FAILED = (void*)-1; 135 #if SANITIZER_NETBSD 136 const int PTHREAD_BARRIER_SERIAL_THREAD = 1234567; 137 #elif !SANITIZER_MAC 138 const int PTHREAD_BARRIER_SERIAL_THREAD = -1; 139 #endif 140 const int MAP_FIXED = 0x10; 141 typedef long long_t; // NOLINT 142 143 // From /usr/include/unistd.h 144 # define F_ULOCK 0 /* Unlock a previously locked region. */ 145 # define F_LOCK 1 /* Lock a region for exclusive use. */ 146 # define F_TLOCK 2 /* Test and lock a region for exclusive use. */ 147 # define F_TEST 3 /* Test a region for other processes locks. */ 148 149 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_NETBSD 150 const int SA_SIGINFO = 0x40; 151 const int SIG_SETMASK = 3; 152 #elif defined(__mips__) 153 const int SA_SIGINFO = 8; 154 const int SIG_SETMASK = 3; 155 #else 156 const int SA_SIGINFO = 4; 157 const int SIG_SETMASK = 2; 158 #endif 159 160 #define COMMON_INTERCEPTOR_NOTHING_IS_INITIALIZED \ 161 (!cur_thread()->is_inited) 162 163 namespace __tsan { 164 struct SignalDesc { 165 bool armed; 166 bool sigaction; 167 __sanitizer_siginfo siginfo; 168 ucontext_t ctx; 169 }; 170 171 struct ThreadSignalContext { 172 int int_signal_send; 173 atomic_uintptr_t in_blocking_func; 174 atomic_uintptr_t have_pending_signals; 175 SignalDesc pending_signals[kSigCount]; 176 // emptyset and oldset are too big for stack. 177 __sanitizer_sigset_t emptyset; 178 __sanitizer_sigset_t oldset; 179 }; 180 181 // The sole reason tsan wraps atexit callbacks is to establish synchronization 182 // between callback setup and callback execution. 183 struct AtExitCtx { 184 void (*f)(); 185 void *arg; 186 }; 187 188 // InterceptorContext holds all global data required for interceptors. 189 // It's explicitly constructed in InitializeInterceptors with placement new 190 // and is never destroyed. This allows usage of members with non-trivial 191 // constructors and destructors. 192 struct InterceptorContext { 193 // The object is 64-byte aligned, because we want hot data to be located 194 // in a single cache line if possible (it's accessed in every interceptor). 195 ALIGNED(64) LibIgnore libignore; 196 __sanitizer_sigaction sigactions[kSigCount]; 197 #if !SANITIZER_MAC && !SANITIZER_NETBSD 198 unsigned finalize_key; 199 #endif 200 201 BlockingMutex atexit_mu; 202 Vector<struct AtExitCtx *> AtExitStack; 203 204 InterceptorContext() 205 : libignore(LINKER_INITIALIZED), AtExitStack() { 206 } 207 }; 208 209 static ALIGNED(64) char interceptor_placeholder[sizeof(InterceptorContext)]; 210 InterceptorContext *interceptor_ctx() { 211 return reinterpret_cast<InterceptorContext*>(&interceptor_placeholder[0]); 212 } 213 214 LibIgnore *libignore() { 215 return &interceptor_ctx()->libignore; 216 } 217 218 void InitializeLibIgnore() { 219 const SuppressionContext &supp = *Suppressions(); 220 const uptr n = supp.SuppressionCount(); 221 for (uptr i = 0; i < n; i++) { 222 const Suppression *s = supp.SuppressionAt(i); 223 if (0 == internal_strcmp(s->type, kSuppressionLib)) 224 libignore()->AddIgnoredLibrary(s->templ); 225 } 226 if (flags()->ignore_noninstrumented_modules) 227 libignore()->IgnoreNoninstrumentedModules(true); 228 libignore()->OnLibraryLoaded(0); 229 } 230 231 // The following two hooks can be used by for cooperative scheduling when 232 // locking. 233 #ifdef TSAN_EXTERNAL_HOOKS 234 void OnPotentiallyBlockingRegionBegin(); 235 void OnPotentiallyBlockingRegionEnd(); 236 #else 237 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionBegin() {} 238 SANITIZER_WEAK_CXX_DEFAULT_IMPL void OnPotentiallyBlockingRegionEnd() {} 239 #endif 240 241 } // namespace __tsan 242 243 static ThreadSignalContext *SigCtx(ThreadState *thr) { 244 ThreadSignalContext *ctx = (ThreadSignalContext*)thr->signal_ctx; 245 if (ctx == 0 && !thr->is_dead) { 246 ctx = (ThreadSignalContext*)MmapOrDie(sizeof(*ctx), "ThreadSignalContext"); 247 MemoryResetRange(thr, (uptr)&SigCtx, (uptr)ctx, sizeof(*ctx)); 248 thr->signal_ctx = ctx; 249 } 250 return ctx; 251 } 252 253 ScopedInterceptor::ScopedInterceptor(ThreadState *thr, const char *fname, 254 uptr pc) 255 : thr_(thr), pc_(pc), in_ignored_lib_(false), ignoring_(false) { 256 Initialize(thr); 257 if (!thr_->is_inited) return; 258 if (!thr_->ignore_interceptors) FuncEntry(thr, pc); 259 DPrintf("#%d: intercept %s()\n", thr_->tid, fname); 260 ignoring_ = 261 !thr_->in_ignored_lib && (flags()->ignore_interceptors_accesses || 262 libignore()->IsIgnored(pc, &in_ignored_lib_)); 263 EnableIgnores(); 264 } 265 266 ScopedInterceptor::~ScopedInterceptor() { 267 if (!thr_->is_inited) return; 268 DisableIgnores(); 269 if (!thr_->ignore_interceptors) { 270 ProcessPendingSignals(thr_); 271 FuncExit(thr_); 272 CheckNoLocks(thr_); 273 } 274 } 275 276 void ScopedInterceptor::EnableIgnores() { 277 if (ignoring_) { 278 ThreadIgnoreBegin(thr_, pc_, /*save_stack=*/false); 279 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports++; 280 if (in_ignored_lib_) { 281 DCHECK(!thr_->in_ignored_lib); 282 thr_->in_ignored_lib = true; 283 } 284 } 285 } 286 287 void ScopedInterceptor::DisableIgnores() { 288 if (ignoring_) { 289 ThreadIgnoreEnd(thr_, pc_); 290 if (flags()->ignore_noninstrumented_modules) thr_->suppress_reports--; 291 if (in_ignored_lib_) { 292 DCHECK(thr_->in_ignored_lib); 293 thr_->in_ignored_lib = false; 294 } 295 } 296 } 297 298 #define TSAN_INTERCEPT(func) INTERCEPT_FUNCTION(func) 299 #if SANITIZER_FREEBSD 300 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) 301 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) 302 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) 303 #elif SANITIZER_NETBSD 304 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION(func) 305 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) \ 306 INTERCEPT_FUNCTION(__libc_##func) 307 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) \ 308 INTERCEPT_FUNCTION(__libc_thr_##func) 309 #else 310 # define TSAN_INTERCEPT_VER(func, ver) INTERCEPT_FUNCTION_VER(func, ver) 311 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(func) 312 # define TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(func) 313 #endif 314 315 #define READ_STRING_OF_LEN(thr, pc, s, len, n) \ 316 MemoryAccessRange((thr), (pc), (uptr)(s), \ 317 common_flags()->strict_string_checks ? (len) + 1 : (n), false) 318 319 #define READ_STRING(thr, pc, s, n) \ 320 READ_STRING_OF_LEN((thr), (pc), (s), internal_strlen(s), (n)) 321 322 #define BLOCK_REAL(name) (BlockingCall(thr), REAL(name)) 323 324 struct BlockingCall { 325 explicit BlockingCall(ThreadState *thr) 326 : thr(thr) 327 , ctx(SigCtx(thr)) { 328 for (;;) { 329 atomic_store(&ctx->in_blocking_func, 1, memory_order_relaxed); 330 if (atomic_load(&ctx->have_pending_signals, memory_order_relaxed) == 0) 331 break; 332 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 333 ProcessPendingSignals(thr); 334 } 335 // When we are in a "blocking call", we process signals asynchronously 336 // (right when they arrive). In this context we do not expect to be 337 // executing any user/runtime code. The known interceptor sequence when 338 // this is not true is: pthread_join -> munmap(stack). It's fine 339 // to ignore munmap in this case -- we handle stack shadow separately. 340 thr->ignore_interceptors++; 341 } 342 343 ~BlockingCall() { 344 thr->ignore_interceptors--; 345 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 346 } 347 348 ThreadState *thr; 349 ThreadSignalContext *ctx; 350 }; 351 352 TSAN_INTERCEPTOR(unsigned, sleep, unsigned sec) { 353 SCOPED_TSAN_INTERCEPTOR(sleep, sec); 354 unsigned res = BLOCK_REAL(sleep)(sec); 355 AfterSleep(thr, pc); 356 return res; 357 } 358 359 TSAN_INTERCEPTOR(int, usleep, long_t usec) { 360 SCOPED_TSAN_INTERCEPTOR(usleep, usec); 361 int res = BLOCK_REAL(usleep)(usec); 362 AfterSleep(thr, pc); 363 return res; 364 } 365 366 TSAN_INTERCEPTOR(int, nanosleep, void *req, void *rem) { 367 SCOPED_TSAN_INTERCEPTOR(nanosleep, req, rem); 368 int res = BLOCK_REAL(nanosleep)(req, rem); 369 AfterSleep(thr, pc); 370 return res; 371 } 372 373 TSAN_INTERCEPTOR(int, pause, int fake) { 374 SCOPED_TSAN_INTERCEPTOR(pause, fake); 375 return BLOCK_REAL(pause)(fake); 376 } 377 378 static void at_exit_wrapper() { 379 AtExitCtx *ctx; 380 { 381 // Ensure thread-safety. 382 BlockingMutexLock l(&interceptor_ctx()->atexit_mu); 383 384 // Pop AtExitCtx from the top of the stack of callback functions 385 uptr element = interceptor_ctx()->AtExitStack.Size() - 1; 386 ctx = interceptor_ctx()->AtExitStack[element]; 387 interceptor_ctx()->AtExitStack.PopBack(); 388 } 389 390 Acquire(cur_thread(), (uptr)0, (uptr)ctx); 391 ((void(*)())ctx->f)(); 392 InternalFree(ctx); 393 } 394 395 static void cxa_at_exit_wrapper(void *arg) { 396 Acquire(cur_thread(), 0, (uptr)arg); 397 AtExitCtx *ctx = (AtExitCtx*)arg; 398 ((void(*)(void *arg))ctx->f)(ctx->arg); 399 InternalFree(ctx); 400 } 401 402 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 403 void *arg, void *dso); 404 405 #if !SANITIZER_ANDROID 406 TSAN_INTERCEPTOR(int, atexit, void (*f)()) { 407 if (UNLIKELY(cur_thread()->in_symbolizer)) 408 return 0; 409 // We want to setup the atexit callback even if we are in ignored lib 410 // or after fork. 411 SCOPED_INTERCEPTOR_RAW(atexit, f); 412 return setup_at_exit_wrapper(thr, pc, (void(*)())f, 0, 0); 413 } 414 #endif 415 416 TSAN_INTERCEPTOR(int, __cxa_atexit, void (*f)(void *a), void *arg, void *dso) { 417 if (UNLIKELY(cur_thread()->in_symbolizer)) 418 return 0; 419 SCOPED_TSAN_INTERCEPTOR(__cxa_atexit, f, arg, dso); 420 return setup_at_exit_wrapper(thr, pc, (void(*)())f, arg, dso); 421 } 422 423 static int setup_at_exit_wrapper(ThreadState *thr, uptr pc, void(*f)(), 424 void *arg, void *dso) { 425 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx)); 426 ctx->f = f; 427 ctx->arg = arg; 428 Release(thr, pc, (uptr)ctx); 429 // Memory allocation in __cxa_atexit will race with free during exit, 430 // because we do not see synchronization around atexit callback list. 431 ThreadIgnoreBegin(thr, pc); 432 int res; 433 if (!dso) { 434 // NetBSD does not preserve the 2nd argument if dso is equal to 0 435 // Store ctx in a local stack-like structure 436 437 // Ensure thread-safety. 438 BlockingMutexLock l(&interceptor_ctx()->atexit_mu); 439 440 res = REAL(__cxa_atexit)((void (*)(void *a))at_exit_wrapper, 0, 0); 441 // Push AtExitCtx on the top of the stack of callback functions 442 if (!res) { 443 interceptor_ctx()->AtExitStack.PushBack(ctx); 444 } 445 } else { 446 res = REAL(__cxa_atexit)(cxa_at_exit_wrapper, ctx, dso); 447 } 448 ThreadIgnoreEnd(thr, pc); 449 return res; 450 } 451 452 #if !SANITIZER_MAC && !SANITIZER_NETBSD 453 static void on_exit_wrapper(int status, void *arg) { 454 ThreadState *thr = cur_thread(); 455 uptr pc = 0; 456 Acquire(thr, pc, (uptr)arg); 457 AtExitCtx *ctx = (AtExitCtx*)arg; 458 ((void(*)(int status, void *arg))ctx->f)(status, ctx->arg); 459 InternalFree(ctx); 460 } 461 462 TSAN_INTERCEPTOR(int, on_exit, void(*f)(int, void*), void *arg) { 463 if (UNLIKELY(cur_thread()->in_symbolizer)) 464 return 0; 465 SCOPED_TSAN_INTERCEPTOR(on_exit, f, arg); 466 AtExitCtx *ctx = (AtExitCtx*)InternalAlloc(sizeof(AtExitCtx)); 467 ctx->f = (void(*)())f; 468 ctx->arg = arg; 469 Release(thr, pc, (uptr)ctx); 470 // Memory allocation in __cxa_atexit will race with free during exit, 471 // because we do not see synchronization around atexit callback list. 472 ThreadIgnoreBegin(thr, pc); 473 int res = REAL(on_exit)(on_exit_wrapper, ctx); 474 ThreadIgnoreEnd(thr, pc); 475 return res; 476 } 477 #define TSAN_MAYBE_INTERCEPT_ON_EXIT TSAN_INTERCEPT(on_exit) 478 #else 479 #define TSAN_MAYBE_INTERCEPT_ON_EXIT 480 #endif 481 482 // Cleanup old bufs. 483 static void JmpBufGarbageCollect(ThreadState *thr, uptr sp) { 484 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 485 JmpBuf *buf = &thr->jmp_bufs[i]; 486 if (buf->sp <= sp) { 487 uptr sz = thr->jmp_bufs.Size(); 488 internal_memcpy(buf, &thr->jmp_bufs[sz - 1], sizeof(*buf)); 489 thr->jmp_bufs.PopBack(); 490 i--; 491 } 492 } 493 } 494 495 static void SetJmp(ThreadState *thr, uptr sp, uptr mangled_sp) { 496 if (!thr->is_inited) // called from libc guts during bootstrap 497 return; 498 // Cleanup old bufs. 499 JmpBufGarbageCollect(thr, sp); 500 // Remember the buf. 501 JmpBuf *buf = thr->jmp_bufs.PushBack(); 502 buf->sp = sp; 503 buf->mangled_sp = mangled_sp; 504 buf->shadow_stack_pos = thr->shadow_stack_pos; 505 ThreadSignalContext *sctx = SigCtx(thr); 506 buf->int_signal_send = sctx ? sctx->int_signal_send : 0; 507 buf->in_blocking_func = sctx ? 508 atomic_load(&sctx->in_blocking_func, memory_order_relaxed) : 509 false; 510 buf->in_signal_handler = atomic_load(&thr->in_signal_handler, 511 memory_order_relaxed); 512 } 513 514 static void LongJmp(ThreadState *thr, uptr *env) { 515 #ifdef __powerpc__ 516 uptr mangled_sp = env[0]; 517 #elif SANITIZER_FREEBSD 518 uptr mangled_sp = env[2]; 519 #elif SANITIZER_NETBSD 520 uptr mangled_sp = env[6]; 521 #elif SANITIZER_MAC 522 # ifdef __aarch64__ 523 uptr mangled_sp = 524 (GetMacosVersion() >= MACOS_VERSION_MOJAVE) ? env[12] : env[13]; 525 # else 526 uptr mangled_sp = env[2]; 527 # endif 528 #elif SANITIZER_LINUX 529 # ifdef __aarch64__ 530 uptr mangled_sp = env[13]; 531 # elif defined(__mips64) 532 uptr mangled_sp = env[1]; 533 # else 534 uptr mangled_sp = env[6]; 535 # endif 536 #endif 537 // Find the saved buf by mangled_sp. 538 for (uptr i = 0; i < thr->jmp_bufs.Size(); i++) { 539 JmpBuf *buf = &thr->jmp_bufs[i]; 540 if (buf->mangled_sp == mangled_sp) { 541 CHECK_GE(thr->shadow_stack_pos, buf->shadow_stack_pos); 542 // Unwind the stack. 543 while (thr->shadow_stack_pos > buf->shadow_stack_pos) 544 FuncExit(thr); 545 ThreadSignalContext *sctx = SigCtx(thr); 546 if (sctx) { 547 sctx->int_signal_send = buf->int_signal_send; 548 atomic_store(&sctx->in_blocking_func, buf->in_blocking_func, 549 memory_order_relaxed); 550 } 551 atomic_store(&thr->in_signal_handler, buf->in_signal_handler, 552 memory_order_relaxed); 553 JmpBufGarbageCollect(thr, buf->sp - 1); // do not collect buf->sp 554 return; 555 } 556 } 557 Printf("ThreadSanitizer: can't find longjmp buf\n"); 558 CHECK(0); 559 } 560 561 // FIXME: put everything below into a common extern "C" block? 562 extern "C" void __tsan_setjmp(uptr sp, uptr mangled_sp) { 563 SetJmp(cur_thread(), sp, mangled_sp); 564 } 565 566 #if SANITIZER_MAC 567 TSAN_INTERCEPTOR(int, setjmp, void *env); 568 TSAN_INTERCEPTOR(int, _setjmp, void *env); 569 TSAN_INTERCEPTOR(int, sigsetjmp, void *env); 570 #else // SANITIZER_MAC 571 572 #if SANITIZER_NETBSD 573 #define setjmp_symname __setjmp14 574 #define sigsetjmp_symname __sigsetjmp14 575 #else 576 #define setjmp_symname setjmp 577 #define sigsetjmp_symname sigsetjmp 578 #endif 579 580 #define TSAN_INTERCEPTOR_SETJMP_(x) __interceptor_ ## x 581 #define TSAN_INTERCEPTOR_SETJMP__(x) TSAN_INTERCEPTOR_SETJMP_(x) 582 #define TSAN_INTERCEPTOR_SETJMP TSAN_INTERCEPTOR_SETJMP__(setjmp_symname) 583 #define TSAN_INTERCEPTOR_SIGSETJMP TSAN_INTERCEPTOR_SETJMP__(sigsetjmp_symname) 584 585 #define TSAN_STRING_SETJMP SANITIZER_STRINGIFY(setjmp_symname) 586 #define TSAN_STRING_SIGSETJMP SANITIZER_STRINGIFY(sigsetjmp_symname) 587 588 // Not called. Merely to satisfy TSAN_INTERCEPT(). 589 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 590 int TSAN_INTERCEPTOR_SETJMP(void *env); 591 extern "C" int TSAN_INTERCEPTOR_SETJMP(void *env) { 592 CHECK(0); 593 return 0; 594 } 595 596 // FIXME: any reason to have a separate declaration? 597 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 598 int __interceptor__setjmp(void *env); 599 extern "C" int __interceptor__setjmp(void *env) { 600 CHECK(0); 601 return 0; 602 } 603 604 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 605 int TSAN_INTERCEPTOR_SIGSETJMP(void *env); 606 extern "C" int TSAN_INTERCEPTOR_SIGSETJMP(void *env) { 607 CHECK(0); 608 return 0; 609 } 610 611 #if !SANITIZER_NETBSD 612 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 613 int __interceptor___sigsetjmp(void *env); 614 extern "C" int __interceptor___sigsetjmp(void *env) { 615 CHECK(0); 616 return 0; 617 } 618 #endif 619 620 extern "C" int setjmp_symname(void *env); 621 extern "C" int _setjmp(void *env); 622 extern "C" int sigsetjmp_symname(void *env); 623 #if !SANITIZER_NETBSD 624 extern "C" int __sigsetjmp(void *env); 625 #endif 626 DEFINE_REAL(int, setjmp_symname, void *env) 627 DEFINE_REAL(int, _setjmp, void *env) 628 DEFINE_REAL(int, sigsetjmp_symname, void *env) 629 #if !SANITIZER_NETBSD 630 DEFINE_REAL(int, __sigsetjmp, void *env) 631 #endif 632 #endif // SANITIZER_MAC 633 634 #if SANITIZER_NETBSD 635 #define longjmp_symname __longjmp14 636 #define siglongjmp_symname __siglongjmp14 637 #else 638 #define longjmp_symname longjmp 639 #define siglongjmp_symname siglongjmp 640 #endif 641 642 TSAN_INTERCEPTOR(void, longjmp_symname, uptr *env, int val) { 643 // Note: if we call REAL(longjmp) in the context of ScopedInterceptor, 644 // bad things will happen. We will jump over ScopedInterceptor dtor and can 645 // leave thr->in_ignored_lib set. 646 { 647 SCOPED_INTERCEPTOR_RAW(longjmp_symname, env, val); 648 } 649 LongJmp(cur_thread(), env); 650 REAL(longjmp_symname)(env, val); 651 } 652 653 TSAN_INTERCEPTOR(void, siglongjmp_symname, uptr *env, int val) { 654 { 655 SCOPED_INTERCEPTOR_RAW(siglongjmp_symname, env, val); 656 } 657 LongJmp(cur_thread(), env); 658 REAL(siglongjmp_symname)(env, val); 659 } 660 661 #if SANITIZER_NETBSD 662 TSAN_INTERCEPTOR(void, _longjmp, uptr *env, int val) { 663 { 664 SCOPED_INTERCEPTOR_RAW(_longjmp, env, val); 665 } 666 LongJmp(cur_thread(), env); 667 REAL(_longjmp)(env, val); 668 } 669 #endif 670 671 #if !SANITIZER_MAC 672 TSAN_INTERCEPTOR(void*, malloc, uptr size) { 673 if (UNLIKELY(cur_thread()->in_symbolizer)) 674 return InternalAlloc(size); 675 void *p = 0; 676 { 677 SCOPED_INTERCEPTOR_RAW(malloc, size); 678 p = user_alloc(thr, pc, size); 679 } 680 invoke_malloc_hook(p, size); 681 return p; 682 } 683 684 TSAN_INTERCEPTOR(void*, __libc_memalign, uptr align, uptr sz) { 685 SCOPED_TSAN_INTERCEPTOR(__libc_memalign, align, sz); 686 return user_memalign(thr, pc, align, sz); 687 } 688 689 TSAN_INTERCEPTOR(void*, calloc, uptr size, uptr n) { 690 if (UNLIKELY(cur_thread()->in_symbolizer)) 691 return InternalCalloc(size, n); 692 void *p = 0; 693 { 694 SCOPED_INTERCEPTOR_RAW(calloc, size, n); 695 p = user_calloc(thr, pc, size, n); 696 } 697 invoke_malloc_hook(p, n * size); 698 return p; 699 } 700 701 TSAN_INTERCEPTOR(void*, realloc, void *p, uptr size) { 702 if (UNLIKELY(cur_thread()->in_symbolizer)) 703 return InternalRealloc(p, size); 704 if (p) 705 invoke_free_hook(p); 706 { 707 SCOPED_INTERCEPTOR_RAW(realloc, p, size); 708 p = user_realloc(thr, pc, p, size); 709 } 710 invoke_malloc_hook(p, size); 711 return p; 712 } 713 714 TSAN_INTERCEPTOR(void, free, void *p) { 715 if (p == 0) 716 return; 717 if (UNLIKELY(cur_thread()->in_symbolizer)) 718 return InternalFree(p); 719 invoke_free_hook(p); 720 SCOPED_INTERCEPTOR_RAW(free, p); 721 user_free(thr, pc, p); 722 } 723 724 TSAN_INTERCEPTOR(void, cfree, void *p) { 725 if (p == 0) 726 return; 727 if (UNLIKELY(cur_thread()->in_symbolizer)) 728 return InternalFree(p); 729 invoke_free_hook(p); 730 SCOPED_INTERCEPTOR_RAW(cfree, p); 731 user_free(thr, pc, p); 732 } 733 734 TSAN_INTERCEPTOR(uptr, malloc_usable_size, void *p) { 735 SCOPED_INTERCEPTOR_RAW(malloc_usable_size, p); 736 return user_alloc_usable_size(p); 737 } 738 #endif 739 740 TSAN_INTERCEPTOR(char*, strcpy, char *dst, const char *src) { // NOLINT 741 SCOPED_TSAN_INTERCEPTOR(strcpy, dst, src); // NOLINT 742 uptr srclen = internal_strlen(src); 743 MemoryAccessRange(thr, pc, (uptr)dst, srclen + 1, true); 744 MemoryAccessRange(thr, pc, (uptr)src, srclen + 1, false); 745 return REAL(strcpy)(dst, src); // NOLINT 746 } 747 748 TSAN_INTERCEPTOR(char*, strncpy, char *dst, char *src, uptr n) { 749 SCOPED_TSAN_INTERCEPTOR(strncpy, dst, src, n); 750 uptr srclen = internal_strnlen(src, n); 751 MemoryAccessRange(thr, pc, (uptr)dst, n, true); 752 MemoryAccessRange(thr, pc, (uptr)src, min(srclen + 1, n), false); 753 return REAL(strncpy)(dst, src, n); 754 } 755 756 TSAN_INTERCEPTOR(char*, strdup, const char *str) { 757 SCOPED_TSAN_INTERCEPTOR(strdup, str); 758 // strdup will call malloc, so no instrumentation is required here. 759 return REAL(strdup)(str); 760 } 761 762 static bool fix_mmap_addr(void **addr, long_t sz, int flags) { 763 if (*addr) { 764 if (!IsAppMem((uptr)*addr) || !IsAppMem((uptr)*addr + sz - 1)) { 765 if (flags & MAP_FIXED) { 766 errno = errno_EINVAL; 767 return false; 768 } else { 769 *addr = 0; 770 } 771 } 772 } 773 return true; 774 } 775 776 template <class Mmap> 777 static void *mmap_interceptor(ThreadState *thr, uptr pc, Mmap real_mmap, 778 void *addr, SIZE_T sz, int prot, int flags, 779 int fd, OFF64_T off) { 780 if (!fix_mmap_addr(&addr, sz, flags)) return MAP_FAILED; 781 void *res = real_mmap(addr, sz, prot, flags, fd, off); 782 if (res != MAP_FAILED) { 783 if (fd > 0) FdAccess(thr, pc, fd); 784 if (thr->ignore_reads_and_writes == 0) 785 MemoryRangeImitateWrite(thr, pc, (uptr)res, sz); 786 else 787 MemoryResetRange(thr, pc, (uptr)res, sz); 788 } 789 return res; 790 } 791 792 TSAN_INTERCEPTOR(int, munmap, void *addr, long_t sz) { 793 SCOPED_TSAN_INTERCEPTOR(munmap, addr, sz); 794 if (sz != 0) { 795 // If sz == 0, munmap will return EINVAL and don't unmap any memory. 796 DontNeedShadowFor((uptr)addr, sz); 797 ScopedGlobalProcessor sgp; 798 ctx->metamap.ResetRange(thr->proc(), (uptr)addr, (uptr)sz); 799 } 800 int res = REAL(munmap)(addr, sz); 801 return res; 802 } 803 804 #if SANITIZER_LINUX 805 TSAN_INTERCEPTOR(void*, memalign, uptr align, uptr sz) { 806 SCOPED_INTERCEPTOR_RAW(memalign, align, sz); 807 return user_memalign(thr, pc, align, sz); 808 } 809 #define TSAN_MAYBE_INTERCEPT_MEMALIGN TSAN_INTERCEPT(memalign) 810 #else 811 #define TSAN_MAYBE_INTERCEPT_MEMALIGN 812 #endif 813 814 #if !SANITIZER_MAC 815 TSAN_INTERCEPTOR(void*, aligned_alloc, uptr align, uptr sz) { 816 if (UNLIKELY(cur_thread()->in_symbolizer)) 817 return InternalAlloc(sz, nullptr, align); 818 SCOPED_INTERCEPTOR_RAW(aligned_alloc, align, sz); 819 return user_aligned_alloc(thr, pc, align, sz); 820 } 821 822 TSAN_INTERCEPTOR(void*, valloc, uptr sz) { 823 if (UNLIKELY(cur_thread()->in_symbolizer)) 824 return InternalAlloc(sz, nullptr, GetPageSizeCached()); 825 SCOPED_INTERCEPTOR_RAW(valloc, sz); 826 return user_valloc(thr, pc, sz); 827 } 828 #endif 829 830 #if SANITIZER_LINUX 831 TSAN_INTERCEPTOR(void*, pvalloc, uptr sz) { 832 if (UNLIKELY(cur_thread()->in_symbolizer)) { 833 uptr PageSize = GetPageSizeCached(); 834 sz = sz ? RoundUpTo(sz, PageSize) : PageSize; 835 return InternalAlloc(sz, nullptr, PageSize); 836 } 837 SCOPED_INTERCEPTOR_RAW(pvalloc, sz); 838 return user_pvalloc(thr, pc, sz); 839 } 840 #define TSAN_MAYBE_INTERCEPT_PVALLOC TSAN_INTERCEPT(pvalloc) 841 #else 842 #define TSAN_MAYBE_INTERCEPT_PVALLOC 843 #endif 844 845 #if !SANITIZER_MAC 846 TSAN_INTERCEPTOR(int, posix_memalign, void **memptr, uptr align, uptr sz) { 847 if (UNLIKELY(cur_thread()->in_symbolizer)) { 848 void *p = InternalAlloc(sz, nullptr, align); 849 if (!p) 850 return errno_ENOMEM; 851 *memptr = p; 852 return 0; 853 } 854 SCOPED_INTERCEPTOR_RAW(posix_memalign, memptr, align, sz); 855 return user_posix_memalign(thr, pc, memptr, align, sz); 856 } 857 #endif 858 859 // __cxa_guard_acquire and friends need to be intercepted in a special way - 860 // regular interceptors will break statically-linked libstdc++. Linux 861 // interceptors are especially defined as weak functions (so that they don't 862 // cause link errors when user defines them as well). So they silently 863 // auto-disable themselves when such symbol is already present in the binary. If 864 // we link libstdc++ statically, it will bring own __cxa_guard_acquire which 865 // will silently replace our interceptor. That's why on Linux we simply export 866 // these interceptors with INTERFACE_ATTRIBUTE. 867 // On OS X, we don't support statically linking, so we just use a regular 868 // interceptor. 869 #if SANITIZER_MAC 870 #define STDCXX_INTERCEPTOR TSAN_INTERCEPTOR 871 #else 872 #define STDCXX_INTERCEPTOR(rettype, name, ...) \ 873 extern "C" rettype INTERFACE_ATTRIBUTE name(__VA_ARGS__) 874 #endif 875 876 // Used in thread-safe function static initialization. 877 STDCXX_INTERCEPTOR(int, __cxa_guard_acquire, atomic_uint32_t *g) { 878 SCOPED_INTERCEPTOR_RAW(__cxa_guard_acquire, g); 879 OnPotentiallyBlockingRegionBegin(); 880 auto on_exit = at_scope_exit(&OnPotentiallyBlockingRegionEnd); 881 for (;;) { 882 u32 cmp = atomic_load(g, memory_order_acquire); 883 if (cmp == 0) { 884 if (atomic_compare_exchange_strong(g, &cmp, 1<<16, memory_order_relaxed)) 885 return 1; 886 } else if (cmp == 1) { 887 Acquire(thr, pc, (uptr)g); 888 return 0; 889 } else { 890 internal_sched_yield(); 891 } 892 } 893 } 894 895 STDCXX_INTERCEPTOR(void, __cxa_guard_release, atomic_uint32_t *g) { 896 SCOPED_INTERCEPTOR_RAW(__cxa_guard_release, g); 897 Release(thr, pc, (uptr)g); 898 atomic_store(g, 1, memory_order_release); 899 } 900 901 STDCXX_INTERCEPTOR(void, __cxa_guard_abort, atomic_uint32_t *g) { 902 SCOPED_INTERCEPTOR_RAW(__cxa_guard_abort, g); 903 atomic_store(g, 0, memory_order_relaxed); 904 } 905 906 namespace __tsan { 907 void DestroyThreadState() { 908 ThreadState *thr = cur_thread(); 909 Processor *proc = thr->proc(); 910 ThreadFinish(thr); 911 ProcUnwire(proc, thr); 912 ProcDestroy(proc); 913 ThreadSignalContext *sctx = thr->signal_ctx; 914 if (sctx) { 915 thr->signal_ctx = 0; 916 UnmapOrDie(sctx, sizeof(*sctx)); 917 } 918 DTLS_Destroy(); 919 cur_thread_finalize(); 920 } 921 } // namespace __tsan 922 923 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 924 static void thread_finalize(void *v) { 925 uptr iter = (uptr)v; 926 if (iter > 1) { 927 if (pthread_setspecific(interceptor_ctx()->finalize_key, 928 (void*)(iter - 1))) { 929 Printf("ThreadSanitizer: failed to set thread key\n"); 930 Die(); 931 } 932 return; 933 } 934 DestroyThreadState(); 935 } 936 #endif 937 938 939 struct ThreadParam { 940 void* (*callback)(void *arg); 941 void *param; 942 atomic_uintptr_t tid; 943 }; 944 945 extern "C" void *__tsan_thread_start_func(void *arg) { 946 ThreadParam *p = (ThreadParam*)arg; 947 void* (*callback)(void *arg) = p->callback; 948 void *param = p->param; 949 int tid = 0; 950 { 951 ThreadState *thr = cur_thread(); 952 // Thread-local state is not initialized yet. 953 ScopedIgnoreInterceptors ignore; 954 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 955 ThreadIgnoreBegin(thr, 0); 956 if (pthread_setspecific(interceptor_ctx()->finalize_key, 957 (void *)GetPthreadDestructorIterations())) { 958 Printf("ThreadSanitizer: failed to set thread key\n"); 959 Die(); 960 } 961 ThreadIgnoreEnd(thr, 0); 962 #endif 963 while ((tid = atomic_load(&p->tid, memory_order_acquire)) == 0) 964 internal_sched_yield(); 965 Processor *proc = ProcCreate(); 966 ProcWire(proc, thr); 967 ThreadStart(thr, tid, GetTid(), /*workerthread*/ false); 968 atomic_store(&p->tid, 0, memory_order_release); 969 } 970 void *res = callback(param); 971 // Prevent the callback from being tail called, 972 // it mixes up stack traces. 973 volatile int foo = 42; 974 foo++; 975 return res; 976 } 977 978 TSAN_INTERCEPTOR(int, pthread_create, 979 void *th, void *attr, void *(*callback)(void*), void * param) { 980 SCOPED_INTERCEPTOR_RAW(pthread_create, th, attr, callback, param); 981 982 MaybeSpawnBackgroundThread(); 983 984 if (ctx->after_multithreaded_fork) { 985 if (flags()->die_after_fork) { 986 Report("ThreadSanitizer: starting new threads after multi-threaded " 987 "fork is not supported. Dying (set die_after_fork=0 to override)\n"); 988 Die(); 989 } else { 990 VPrintf(1, "ThreadSanitizer: starting new threads after multi-threaded " 991 "fork is not supported (pid %d). Continuing because of " 992 "die_after_fork=0, but you are on your own\n", internal_getpid()); 993 } 994 } 995 __sanitizer_pthread_attr_t myattr; 996 if (attr == 0) { 997 pthread_attr_init(&myattr); 998 attr = &myattr; 999 } 1000 int detached = 0; 1001 REAL(pthread_attr_getdetachstate)(attr, &detached); 1002 AdjustStackSize(attr); 1003 1004 ThreadParam p; 1005 p.callback = callback; 1006 p.param = param; 1007 atomic_store(&p.tid, 0, memory_order_relaxed); 1008 int res = -1; 1009 { 1010 // Otherwise we see false positives in pthread stack manipulation. 1011 ScopedIgnoreInterceptors ignore; 1012 ThreadIgnoreBegin(thr, pc); 1013 res = REAL(pthread_create)(th, attr, __tsan_thread_start_func, &p); 1014 ThreadIgnoreEnd(thr, pc); 1015 } 1016 if (res == 0) { 1017 int tid = ThreadCreate(thr, pc, *(uptr*)th, IsStateDetached(detached)); 1018 CHECK_NE(tid, 0); 1019 // Synchronization on p.tid serves two purposes: 1020 // 1. ThreadCreate must finish before the new thread starts. 1021 // Otherwise the new thread can call pthread_detach, but the pthread_t 1022 // identifier is not yet registered in ThreadRegistry by ThreadCreate. 1023 // 2. ThreadStart must finish before this thread continues. 1024 // Otherwise, this thread can call pthread_detach and reset thr->sync 1025 // before the new thread got a chance to acquire from it in ThreadStart. 1026 atomic_store(&p.tid, tid, memory_order_release); 1027 while (atomic_load(&p.tid, memory_order_acquire) != 0) 1028 internal_sched_yield(); 1029 } 1030 if (attr == &myattr) 1031 pthread_attr_destroy(&myattr); 1032 return res; 1033 } 1034 1035 TSAN_INTERCEPTOR(int, pthread_join, void *th, void **ret) { 1036 SCOPED_INTERCEPTOR_RAW(pthread_join, th, ret); 1037 int tid = ThreadTid(thr, pc, (uptr)th); 1038 ThreadIgnoreBegin(thr, pc); 1039 int res = BLOCK_REAL(pthread_join)(th, ret); 1040 ThreadIgnoreEnd(thr, pc); 1041 if (res == 0) { 1042 ThreadJoin(thr, pc, tid); 1043 } 1044 return res; 1045 } 1046 1047 DEFINE_REAL_PTHREAD_FUNCTIONS 1048 1049 TSAN_INTERCEPTOR(int, pthread_detach, void *th) { 1050 SCOPED_TSAN_INTERCEPTOR(pthread_detach, th); 1051 int tid = ThreadTid(thr, pc, (uptr)th); 1052 int res = REAL(pthread_detach)(th); 1053 if (res == 0) { 1054 ThreadDetach(thr, pc, tid); 1055 } 1056 return res; 1057 } 1058 1059 #if SANITIZER_LINUX 1060 TSAN_INTERCEPTOR(int, pthread_tryjoin_np, void *th, void **ret) { 1061 SCOPED_TSAN_INTERCEPTOR(pthread_tryjoin_np, th, ret); 1062 int tid = ThreadTid(thr, pc, (uptr)th); 1063 ThreadIgnoreBegin(thr, pc); 1064 int res = REAL(pthread_tryjoin_np)(th, ret); 1065 ThreadIgnoreEnd(thr, pc); 1066 if (res == 0) 1067 ThreadJoin(thr, pc, tid); 1068 else 1069 ThreadNotJoined(thr, pc, tid, (uptr)th); 1070 return res; 1071 } 1072 1073 TSAN_INTERCEPTOR(int, pthread_timedjoin_np, void *th, void **ret, 1074 const struct timespec *abstime) { 1075 SCOPED_TSAN_INTERCEPTOR(pthread_timedjoin_np, th, ret, abstime); 1076 int tid = ThreadTid(thr, pc, (uptr)th); 1077 ThreadIgnoreBegin(thr, pc); 1078 int res = BLOCK_REAL(pthread_timedjoin_np)(th, ret, abstime); 1079 ThreadIgnoreEnd(thr, pc); 1080 if (res == 0) 1081 ThreadJoin(thr, pc, tid); 1082 else 1083 ThreadNotJoined(thr, pc, tid, (uptr)th); 1084 return res; 1085 } 1086 #endif 1087 1088 // Problem: 1089 // NPTL implementation of pthread_cond has 2 versions (2.2.5 and 2.3.2). 1090 // pthread_cond_t has different size in the different versions. 1091 // If call new REAL functions for old pthread_cond_t, they will corrupt memory 1092 // after pthread_cond_t (old cond is smaller). 1093 // If we call old REAL functions for new pthread_cond_t, we will lose some 1094 // functionality (e.g. old functions do not support waiting against 1095 // CLOCK_REALTIME). 1096 // Proper handling would require to have 2 versions of interceptors as well. 1097 // But this is messy, in particular requires linker scripts when sanitizer 1098 // runtime is linked into a shared library. 1099 // Instead we assume we don't have dynamic libraries built against old 1100 // pthread (2.2.5 is dated by 2002). And provide legacy_pthread_cond flag 1101 // that allows to work with old libraries (but this mode does not support 1102 // some features, e.g. pthread_condattr_getpshared). 1103 static void *init_cond(void *c, bool force = false) { 1104 // sizeof(pthread_cond_t) >= sizeof(uptr) in both versions. 1105 // So we allocate additional memory on the side large enough to hold 1106 // any pthread_cond_t object. Always call new REAL functions, but pass 1107 // the aux object to them. 1108 // Note: the code assumes that PTHREAD_COND_INITIALIZER initializes 1109 // first word of pthread_cond_t to zero. 1110 // It's all relevant only for linux. 1111 if (!common_flags()->legacy_pthread_cond) 1112 return c; 1113 atomic_uintptr_t *p = (atomic_uintptr_t*)c; 1114 uptr cond = atomic_load(p, memory_order_acquire); 1115 if (!force && cond != 0) 1116 return (void*)cond; 1117 void *newcond = WRAP(malloc)(pthread_cond_t_sz); 1118 internal_memset(newcond, 0, pthread_cond_t_sz); 1119 if (atomic_compare_exchange_strong(p, &cond, (uptr)newcond, 1120 memory_order_acq_rel)) 1121 return newcond; 1122 WRAP(free)(newcond); 1123 return (void*)cond; 1124 } 1125 1126 struct CondMutexUnlockCtx { 1127 ScopedInterceptor *si; 1128 ThreadState *thr; 1129 uptr pc; 1130 void *m; 1131 }; 1132 1133 static void cond_mutex_unlock(CondMutexUnlockCtx *arg) { 1134 // pthread_cond_wait interceptor has enabled async signal delivery 1135 // (see BlockingCall below). Disable async signals since we are running 1136 // tsan code. Also ScopedInterceptor and BlockingCall destructors won't run 1137 // since the thread is cancelled, so we have to manually execute them 1138 // (the thread still can run some user code due to pthread_cleanup_push). 1139 ThreadSignalContext *ctx = SigCtx(arg->thr); 1140 CHECK_EQ(atomic_load(&ctx->in_blocking_func, memory_order_relaxed), 1); 1141 atomic_store(&ctx->in_blocking_func, 0, memory_order_relaxed); 1142 MutexPostLock(arg->thr, arg->pc, (uptr)arg->m, MutexFlagDoPreLockOnPostLock); 1143 // Undo BlockingCall ctor effects. 1144 arg->thr->ignore_interceptors--; 1145 arg->si->~ScopedInterceptor(); 1146 } 1147 1148 INTERCEPTOR(int, pthread_cond_init, void *c, void *a) { 1149 void *cond = init_cond(c, true); 1150 SCOPED_TSAN_INTERCEPTOR(pthread_cond_init, cond, a); 1151 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1152 return REAL(pthread_cond_init)(cond, a); 1153 } 1154 1155 static int cond_wait(ThreadState *thr, uptr pc, ScopedInterceptor *si, 1156 int (*fn)(void *c, void *m, void *abstime), void *c, 1157 void *m, void *t) { 1158 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1159 MutexUnlock(thr, pc, (uptr)m); 1160 CondMutexUnlockCtx arg = {si, thr, pc, m}; 1161 int res = 0; 1162 // This ensures that we handle mutex lock even in case of pthread_cancel. 1163 // See test/tsan/cond_cancel.cc. 1164 { 1165 // Enable signal delivery while the thread is blocked. 1166 BlockingCall bc(thr); 1167 res = call_pthread_cancel_with_cleanup( 1168 fn, c, m, t, (void (*)(void *arg))cond_mutex_unlock, &arg); 1169 } 1170 if (res == errno_EOWNERDEAD) MutexRepair(thr, pc, (uptr)m); 1171 MutexPostLock(thr, pc, (uptr)m, MutexFlagDoPreLockOnPostLock); 1172 return res; 1173 } 1174 1175 INTERCEPTOR(int, pthread_cond_wait, void *c, void *m) { 1176 void *cond = init_cond(c); 1177 SCOPED_TSAN_INTERCEPTOR(pthread_cond_wait, cond, m); 1178 return cond_wait(thr, pc, &si, (int (*)(void *c, void *m, void *abstime))REAL( 1179 pthread_cond_wait), 1180 cond, m, 0); 1181 } 1182 1183 INTERCEPTOR(int, pthread_cond_timedwait, void *c, void *m, void *abstime) { 1184 void *cond = init_cond(c); 1185 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait, cond, m, abstime); 1186 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait), cond, m, 1187 abstime); 1188 } 1189 1190 #if SANITIZER_MAC 1191 INTERCEPTOR(int, pthread_cond_timedwait_relative_np, void *c, void *m, 1192 void *reltime) { 1193 void *cond = init_cond(c); 1194 SCOPED_TSAN_INTERCEPTOR(pthread_cond_timedwait_relative_np, cond, m, reltime); 1195 return cond_wait(thr, pc, &si, REAL(pthread_cond_timedwait_relative_np), cond, 1196 m, reltime); 1197 } 1198 #endif 1199 1200 INTERCEPTOR(int, pthread_cond_signal, void *c) { 1201 void *cond = init_cond(c); 1202 SCOPED_TSAN_INTERCEPTOR(pthread_cond_signal, cond); 1203 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1204 return REAL(pthread_cond_signal)(cond); 1205 } 1206 1207 INTERCEPTOR(int, pthread_cond_broadcast, void *c) { 1208 void *cond = init_cond(c); 1209 SCOPED_TSAN_INTERCEPTOR(pthread_cond_broadcast, cond); 1210 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), false); 1211 return REAL(pthread_cond_broadcast)(cond); 1212 } 1213 1214 INTERCEPTOR(int, pthread_cond_destroy, void *c) { 1215 void *cond = init_cond(c); 1216 SCOPED_TSAN_INTERCEPTOR(pthread_cond_destroy, cond); 1217 MemoryAccessRange(thr, pc, (uptr)c, sizeof(uptr), true); 1218 int res = REAL(pthread_cond_destroy)(cond); 1219 if (common_flags()->legacy_pthread_cond) { 1220 // Free our aux cond and zero the pointer to not leave dangling pointers. 1221 WRAP(free)(cond); 1222 atomic_store((atomic_uintptr_t*)c, 0, memory_order_relaxed); 1223 } 1224 return res; 1225 } 1226 1227 TSAN_INTERCEPTOR(int, pthread_mutex_init, void *m, void *a) { 1228 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_init, m, a); 1229 int res = REAL(pthread_mutex_init)(m, a); 1230 if (res == 0) { 1231 u32 flagz = 0; 1232 if (a) { 1233 int type = 0; 1234 if (REAL(pthread_mutexattr_gettype)(a, &type) == 0) 1235 if (type == PTHREAD_MUTEX_RECURSIVE || 1236 type == PTHREAD_MUTEX_RECURSIVE_NP) 1237 flagz |= MutexFlagWriteReentrant; 1238 } 1239 MutexCreate(thr, pc, (uptr)m, flagz); 1240 } 1241 return res; 1242 } 1243 1244 TSAN_INTERCEPTOR(int, pthread_mutex_destroy, void *m) { 1245 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_destroy, m); 1246 int res = REAL(pthread_mutex_destroy)(m); 1247 if (res == 0 || res == errno_EBUSY) { 1248 MutexDestroy(thr, pc, (uptr)m); 1249 } 1250 return res; 1251 } 1252 1253 TSAN_INTERCEPTOR(int, pthread_mutex_trylock, void *m) { 1254 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_trylock, m); 1255 int res = REAL(pthread_mutex_trylock)(m); 1256 if (res == errno_EOWNERDEAD) 1257 MutexRepair(thr, pc, (uptr)m); 1258 if (res == 0 || res == errno_EOWNERDEAD) 1259 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1260 return res; 1261 } 1262 1263 #if !SANITIZER_MAC 1264 TSAN_INTERCEPTOR(int, pthread_mutex_timedlock, void *m, void *abstime) { 1265 SCOPED_TSAN_INTERCEPTOR(pthread_mutex_timedlock, m, abstime); 1266 int res = REAL(pthread_mutex_timedlock)(m, abstime); 1267 if (res == 0) { 1268 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1269 } 1270 return res; 1271 } 1272 #endif 1273 1274 #if !SANITIZER_MAC 1275 TSAN_INTERCEPTOR(int, pthread_spin_init, void *m, int pshared) { 1276 SCOPED_TSAN_INTERCEPTOR(pthread_spin_init, m, pshared); 1277 int res = REAL(pthread_spin_init)(m, pshared); 1278 if (res == 0) { 1279 MutexCreate(thr, pc, (uptr)m); 1280 } 1281 return res; 1282 } 1283 1284 TSAN_INTERCEPTOR(int, pthread_spin_destroy, void *m) { 1285 SCOPED_TSAN_INTERCEPTOR(pthread_spin_destroy, m); 1286 int res = REAL(pthread_spin_destroy)(m); 1287 if (res == 0) { 1288 MutexDestroy(thr, pc, (uptr)m); 1289 } 1290 return res; 1291 } 1292 1293 TSAN_INTERCEPTOR(int, pthread_spin_lock, void *m) { 1294 SCOPED_TSAN_INTERCEPTOR(pthread_spin_lock, m); 1295 MutexPreLock(thr, pc, (uptr)m); 1296 int res = REAL(pthread_spin_lock)(m); 1297 if (res == 0) { 1298 MutexPostLock(thr, pc, (uptr)m); 1299 } 1300 return res; 1301 } 1302 1303 TSAN_INTERCEPTOR(int, pthread_spin_trylock, void *m) { 1304 SCOPED_TSAN_INTERCEPTOR(pthread_spin_trylock, m); 1305 int res = REAL(pthread_spin_trylock)(m); 1306 if (res == 0) { 1307 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1308 } 1309 return res; 1310 } 1311 1312 TSAN_INTERCEPTOR(int, pthread_spin_unlock, void *m) { 1313 SCOPED_TSAN_INTERCEPTOR(pthread_spin_unlock, m); 1314 MutexUnlock(thr, pc, (uptr)m); 1315 int res = REAL(pthread_spin_unlock)(m); 1316 return res; 1317 } 1318 #endif 1319 1320 TSAN_INTERCEPTOR(int, pthread_rwlock_init, void *m, void *a) { 1321 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_init, m, a); 1322 int res = REAL(pthread_rwlock_init)(m, a); 1323 if (res == 0) { 1324 MutexCreate(thr, pc, (uptr)m); 1325 } 1326 return res; 1327 } 1328 1329 TSAN_INTERCEPTOR(int, pthread_rwlock_destroy, void *m) { 1330 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_destroy, m); 1331 int res = REAL(pthread_rwlock_destroy)(m); 1332 if (res == 0) { 1333 MutexDestroy(thr, pc, (uptr)m); 1334 } 1335 return res; 1336 } 1337 1338 TSAN_INTERCEPTOR(int, pthread_rwlock_rdlock, void *m) { 1339 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_rdlock, m); 1340 MutexPreReadLock(thr, pc, (uptr)m); 1341 int res = REAL(pthread_rwlock_rdlock)(m); 1342 if (res == 0) { 1343 MutexPostReadLock(thr, pc, (uptr)m); 1344 } 1345 return res; 1346 } 1347 1348 TSAN_INTERCEPTOR(int, pthread_rwlock_tryrdlock, void *m) { 1349 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_tryrdlock, m); 1350 int res = REAL(pthread_rwlock_tryrdlock)(m); 1351 if (res == 0) { 1352 MutexPostReadLock(thr, pc, (uptr)m, MutexFlagTryLock); 1353 } 1354 return res; 1355 } 1356 1357 #if !SANITIZER_MAC 1358 TSAN_INTERCEPTOR(int, pthread_rwlock_timedrdlock, void *m, void *abstime) { 1359 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedrdlock, m, abstime); 1360 int res = REAL(pthread_rwlock_timedrdlock)(m, abstime); 1361 if (res == 0) { 1362 MutexPostReadLock(thr, pc, (uptr)m); 1363 } 1364 return res; 1365 } 1366 #endif 1367 1368 TSAN_INTERCEPTOR(int, pthread_rwlock_wrlock, void *m) { 1369 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_wrlock, m); 1370 MutexPreLock(thr, pc, (uptr)m); 1371 int res = REAL(pthread_rwlock_wrlock)(m); 1372 if (res == 0) { 1373 MutexPostLock(thr, pc, (uptr)m); 1374 } 1375 return res; 1376 } 1377 1378 TSAN_INTERCEPTOR(int, pthread_rwlock_trywrlock, void *m) { 1379 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_trywrlock, m); 1380 int res = REAL(pthread_rwlock_trywrlock)(m); 1381 if (res == 0) { 1382 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1383 } 1384 return res; 1385 } 1386 1387 #if !SANITIZER_MAC 1388 TSAN_INTERCEPTOR(int, pthread_rwlock_timedwrlock, void *m, void *abstime) { 1389 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_timedwrlock, m, abstime); 1390 int res = REAL(pthread_rwlock_timedwrlock)(m, abstime); 1391 if (res == 0) { 1392 MutexPostLock(thr, pc, (uptr)m, MutexFlagTryLock); 1393 } 1394 return res; 1395 } 1396 #endif 1397 1398 TSAN_INTERCEPTOR(int, pthread_rwlock_unlock, void *m) { 1399 SCOPED_TSAN_INTERCEPTOR(pthread_rwlock_unlock, m); 1400 MutexReadOrWriteUnlock(thr, pc, (uptr)m); 1401 int res = REAL(pthread_rwlock_unlock)(m); 1402 return res; 1403 } 1404 1405 #if !SANITIZER_MAC 1406 TSAN_INTERCEPTOR(int, pthread_barrier_init, void *b, void *a, unsigned count) { 1407 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_init, b, a, count); 1408 MemoryWrite(thr, pc, (uptr)b, kSizeLog1); 1409 int res = REAL(pthread_barrier_init)(b, a, count); 1410 return res; 1411 } 1412 1413 TSAN_INTERCEPTOR(int, pthread_barrier_destroy, void *b) { 1414 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_destroy, b); 1415 MemoryWrite(thr, pc, (uptr)b, kSizeLog1); 1416 int res = REAL(pthread_barrier_destroy)(b); 1417 return res; 1418 } 1419 1420 TSAN_INTERCEPTOR(int, pthread_barrier_wait, void *b) { 1421 SCOPED_TSAN_INTERCEPTOR(pthread_barrier_wait, b); 1422 Release(thr, pc, (uptr)b); 1423 MemoryRead(thr, pc, (uptr)b, kSizeLog1); 1424 int res = REAL(pthread_barrier_wait)(b); 1425 MemoryRead(thr, pc, (uptr)b, kSizeLog1); 1426 if (res == 0 || res == PTHREAD_BARRIER_SERIAL_THREAD) { 1427 Acquire(thr, pc, (uptr)b); 1428 } 1429 return res; 1430 } 1431 #endif 1432 1433 TSAN_INTERCEPTOR(int, pthread_once, void *o, void (*f)()) { 1434 SCOPED_INTERCEPTOR_RAW(pthread_once, o, f); 1435 if (o == 0 || f == 0) 1436 return errno_EINVAL; 1437 atomic_uint32_t *a; 1438 1439 if (SANITIZER_MAC) 1440 a = static_cast<atomic_uint32_t*>((void *)((char *)o + sizeof(long_t))); 1441 else if (SANITIZER_NETBSD) 1442 a = static_cast<atomic_uint32_t*> 1443 ((void *)((char *)o + __sanitizer::pthread_mutex_t_sz)); 1444 else 1445 a = static_cast<atomic_uint32_t*>(o); 1446 1447 u32 v = atomic_load(a, memory_order_acquire); 1448 if (v == 0 && atomic_compare_exchange_strong(a, &v, 1, 1449 memory_order_relaxed)) { 1450 (*f)(); 1451 if (!thr->in_ignored_lib) 1452 Release(thr, pc, (uptr)o); 1453 atomic_store(a, 2, memory_order_release); 1454 } else { 1455 while (v != 2) { 1456 internal_sched_yield(); 1457 v = atomic_load(a, memory_order_acquire); 1458 } 1459 if (!thr->in_ignored_lib) 1460 Acquire(thr, pc, (uptr)o); 1461 } 1462 return 0; 1463 } 1464 1465 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1466 TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) { 1467 SCOPED_TSAN_INTERCEPTOR(__fxstat, version, fd, buf); 1468 if (fd > 0) 1469 FdAccess(thr, pc, fd); 1470 return REAL(__fxstat)(version, fd, buf); 1471 } 1472 #define TSAN_MAYBE_INTERCEPT___FXSTAT TSAN_INTERCEPT(__fxstat) 1473 #else 1474 #define TSAN_MAYBE_INTERCEPT___FXSTAT 1475 #endif 1476 1477 TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) { 1478 #if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD 1479 SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf); 1480 if (fd > 0) 1481 FdAccess(thr, pc, fd); 1482 return REAL(fstat)(fd, buf); 1483 #else 1484 SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf); 1485 if (fd > 0) 1486 FdAccess(thr, pc, fd); 1487 return REAL(__fxstat)(0, fd, buf); 1488 #endif 1489 } 1490 1491 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1492 TSAN_INTERCEPTOR(int, __fxstat64, int version, int fd, void *buf) { 1493 SCOPED_TSAN_INTERCEPTOR(__fxstat64, version, fd, buf); 1494 if (fd > 0) 1495 FdAccess(thr, pc, fd); 1496 return REAL(__fxstat64)(version, fd, buf); 1497 } 1498 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 TSAN_INTERCEPT(__fxstat64) 1499 #else 1500 #define TSAN_MAYBE_INTERCEPT___FXSTAT64 1501 #endif 1502 1503 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1504 TSAN_INTERCEPTOR(int, fstat64, int fd, void *buf) { 1505 SCOPED_TSAN_INTERCEPTOR(__fxstat64, 0, fd, buf); 1506 if (fd > 0) 1507 FdAccess(thr, pc, fd); 1508 return REAL(__fxstat64)(0, fd, buf); 1509 } 1510 #define TSAN_MAYBE_INTERCEPT_FSTAT64 TSAN_INTERCEPT(fstat64) 1511 #else 1512 #define TSAN_MAYBE_INTERCEPT_FSTAT64 1513 #endif 1514 1515 TSAN_INTERCEPTOR(int, open, const char *name, int flags, int mode) { 1516 SCOPED_TSAN_INTERCEPTOR(open, name, flags, mode); 1517 READ_STRING(thr, pc, name, 0); 1518 int fd = REAL(open)(name, flags, mode); 1519 if (fd >= 0) 1520 FdFileCreate(thr, pc, fd); 1521 return fd; 1522 } 1523 1524 #if SANITIZER_LINUX 1525 TSAN_INTERCEPTOR(int, open64, const char *name, int flags, int mode) { 1526 SCOPED_TSAN_INTERCEPTOR(open64, name, flags, mode); 1527 READ_STRING(thr, pc, name, 0); 1528 int fd = REAL(open64)(name, flags, mode); 1529 if (fd >= 0) 1530 FdFileCreate(thr, pc, fd); 1531 return fd; 1532 } 1533 #define TSAN_MAYBE_INTERCEPT_OPEN64 TSAN_INTERCEPT(open64) 1534 #else 1535 #define TSAN_MAYBE_INTERCEPT_OPEN64 1536 #endif 1537 1538 TSAN_INTERCEPTOR(int, creat, const char *name, int mode) { 1539 SCOPED_TSAN_INTERCEPTOR(creat, name, mode); 1540 READ_STRING(thr, pc, name, 0); 1541 int fd = REAL(creat)(name, mode); 1542 if (fd >= 0) 1543 FdFileCreate(thr, pc, fd); 1544 return fd; 1545 } 1546 1547 #if SANITIZER_LINUX 1548 TSAN_INTERCEPTOR(int, creat64, const char *name, int mode) { 1549 SCOPED_TSAN_INTERCEPTOR(creat64, name, mode); 1550 READ_STRING(thr, pc, name, 0); 1551 int fd = REAL(creat64)(name, mode); 1552 if (fd >= 0) 1553 FdFileCreate(thr, pc, fd); 1554 return fd; 1555 } 1556 #define TSAN_MAYBE_INTERCEPT_CREAT64 TSAN_INTERCEPT(creat64) 1557 #else 1558 #define TSAN_MAYBE_INTERCEPT_CREAT64 1559 #endif 1560 1561 TSAN_INTERCEPTOR(int, dup, int oldfd) { 1562 SCOPED_TSAN_INTERCEPTOR(dup, oldfd); 1563 int newfd = REAL(dup)(oldfd); 1564 if (oldfd >= 0 && newfd >= 0 && newfd != oldfd) 1565 FdDup(thr, pc, oldfd, newfd, true); 1566 return newfd; 1567 } 1568 1569 TSAN_INTERCEPTOR(int, dup2, int oldfd, int newfd) { 1570 SCOPED_TSAN_INTERCEPTOR(dup2, oldfd, newfd); 1571 int newfd2 = REAL(dup2)(oldfd, newfd); 1572 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1573 FdDup(thr, pc, oldfd, newfd2, false); 1574 return newfd2; 1575 } 1576 1577 #if !SANITIZER_MAC 1578 TSAN_INTERCEPTOR(int, dup3, int oldfd, int newfd, int flags) { 1579 SCOPED_TSAN_INTERCEPTOR(dup3, oldfd, newfd, flags); 1580 int newfd2 = REAL(dup3)(oldfd, newfd, flags); 1581 if (oldfd >= 0 && newfd2 >= 0 && newfd2 != oldfd) 1582 FdDup(thr, pc, oldfd, newfd2, false); 1583 return newfd2; 1584 } 1585 #endif 1586 1587 #if SANITIZER_LINUX 1588 TSAN_INTERCEPTOR(int, eventfd, unsigned initval, int flags) { 1589 SCOPED_TSAN_INTERCEPTOR(eventfd, initval, flags); 1590 int fd = REAL(eventfd)(initval, flags); 1591 if (fd >= 0) 1592 FdEventCreate(thr, pc, fd); 1593 return fd; 1594 } 1595 #define TSAN_MAYBE_INTERCEPT_EVENTFD TSAN_INTERCEPT(eventfd) 1596 #else 1597 #define TSAN_MAYBE_INTERCEPT_EVENTFD 1598 #endif 1599 1600 #if SANITIZER_LINUX 1601 TSAN_INTERCEPTOR(int, signalfd, int fd, void *mask, int flags) { 1602 SCOPED_TSAN_INTERCEPTOR(signalfd, fd, mask, flags); 1603 if (fd >= 0) 1604 FdClose(thr, pc, fd); 1605 fd = REAL(signalfd)(fd, mask, flags); 1606 if (fd >= 0) 1607 FdSignalCreate(thr, pc, fd); 1608 return fd; 1609 } 1610 #define TSAN_MAYBE_INTERCEPT_SIGNALFD TSAN_INTERCEPT(signalfd) 1611 #else 1612 #define TSAN_MAYBE_INTERCEPT_SIGNALFD 1613 #endif 1614 1615 #if SANITIZER_LINUX 1616 TSAN_INTERCEPTOR(int, inotify_init, int fake) { 1617 SCOPED_TSAN_INTERCEPTOR(inotify_init, fake); 1618 int fd = REAL(inotify_init)(fake); 1619 if (fd >= 0) 1620 FdInotifyCreate(thr, pc, fd); 1621 return fd; 1622 } 1623 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT TSAN_INTERCEPT(inotify_init) 1624 #else 1625 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT 1626 #endif 1627 1628 #if SANITIZER_LINUX 1629 TSAN_INTERCEPTOR(int, inotify_init1, int flags) { 1630 SCOPED_TSAN_INTERCEPTOR(inotify_init1, flags); 1631 int fd = REAL(inotify_init1)(flags); 1632 if (fd >= 0) 1633 FdInotifyCreate(thr, pc, fd); 1634 return fd; 1635 } 1636 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 TSAN_INTERCEPT(inotify_init1) 1637 #else 1638 #define TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1 1639 #endif 1640 1641 TSAN_INTERCEPTOR(int, socket, int domain, int type, int protocol) { 1642 SCOPED_TSAN_INTERCEPTOR(socket, domain, type, protocol); 1643 int fd = REAL(socket)(domain, type, protocol); 1644 if (fd >= 0) 1645 FdSocketCreate(thr, pc, fd); 1646 return fd; 1647 } 1648 1649 TSAN_INTERCEPTOR(int, socketpair, int domain, int type, int protocol, int *fd) { 1650 SCOPED_TSAN_INTERCEPTOR(socketpair, domain, type, protocol, fd); 1651 int res = REAL(socketpair)(domain, type, protocol, fd); 1652 if (res == 0 && fd[0] >= 0 && fd[1] >= 0) 1653 FdPipeCreate(thr, pc, fd[0], fd[1]); 1654 return res; 1655 } 1656 1657 TSAN_INTERCEPTOR(int, connect, int fd, void *addr, unsigned addrlen) { 1658 SCOPED_TSAN_INTERCEPTOR(connect, fd, addr, addrlen); 1659 FdSocketConnecting(thr, pc, fd); 1660 int res = REAL(connect)(fd, addr, addrlen); 1661 if (res == 0 && fd >= 0) 1662 FdSocketConnect(thr, pc, fd); 1663 return res; 1664 } 1665 1666 TSAN_INTERCEPTOR(int, bind, int fd, void *addr, unsigned addrlen) { 1667 SCOPED_TSAN_INTERCEPTOR(bind, fd, addr, addrlen); 1668 int res = REAL(bind)(fd, addr, addrlen); 1669 if (fd > 0 && res == 0) 1670 FdAccess(thr, pc, fd); 1671 return res; 1672 } 1673 1674 TSAN_INTERCEPTOR(int, listen, int fd, int backlog) { 1675 SCOPED_TSAN_INTERCEPTOR(listen, fd, backlog); 1676 int res = REAL(listen)(fd, backlog); 1677 if (fd > 0 && res == 0) 1678 FdAccess(thr, pc, fd); 1679 return res; 1680 } 1681 1682 TSAN_INTERCEPTOR(int, close, int fd) { 1683 SCOPED_TSAN_INTERCEPTOR(close, fd); 1684 if (fd >= 0) 1685 FdClose(thr, pc, fd); 1686 return REAL(close)(fd); 1687 } 1688 1689 #if SANITIZER_LINUX 1690 TSAN_INTERCEPTOR(int, __close, int fd) { 1691 SCOPED_TSAN_INTERCEPTOR(__close, fd); 1692 if (fd >= 0) 1693 FdClose(thr, pc, fd); 1694 return REAL(__close)(fd); 1695 } 1696 #define TSAN_MAYBE_INTERCEPT___CLOSE TSAN_INTERCEPT(__close) 1697 #else 1698 #define TSAN_MAYBE_INTERCEPT___CLOSE 1699 #endif 1700 1701 // glibc guts 1702 #if SANITIZER_LINUX && !SANITIZER_ANDROID 1703 TSAN_INTERCEPTOR(void, __res_iclose, void *state, bool free_addr) { 1704 SCOPED_TSAN_INTERCEPTOR(__res_iclose, state, free_addr); 1705 int fds[64]; 1706 int cnt = ExtractResolvFDs(state, fds, ARRAY_SIZE(fds)); 1707 for (int i = 0; i < cnt; i++) { 1708 if (fds[i] > 0) 1709 FdClose(thr, pc, fds[i]); 1710 } 1711 REAL(__res_iclose)(state, free_addr); 1712 } 1713 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE TSAN_INTERCEPT(__res_iclose) 1714 #else 1715 #define TSAN_MAYBE_INTERCEPT___RES_ICLOSE 1716 #endif 1717 1718 TSAN_INTERCEPTOR(int, pipe, int *pipefd) { 1719 SCOPED_TSAN_INTERCEPTOR(pipe, pipefd); 1720 int res = REAL(pipe)(pipefd); 1721 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1722 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1723 return res; 1724 } 1725 1726 #if !SANITIZER_MAC 1727 TSAN_INTERCEPTOR(int, pipe2, int *pipefd, int flags) { 1728 SCOPED_TSAN_INTERCEPTOR(pipe2, pipefd, flags); 1729 int res = REAL(pipe2)(pipefd, flags); 1730 if (res == 0 && pipefd[0] >= 0 && pipefd[1] >= 0) 1731 FdPipeCreate(thr, pc, pipefd[0], pipefd[1]); 1732 return res; 1733 } 1734 #endif 1735 1736 TSAN_INTERCEPTOR(int, unlink, char *path) { 1737 SCOPED_TSAN_INTERCEPTOR(unlink, path); 1738 Release(thr, pc, File2addr(path)); 1739 int res = REAL(unlink)(path); 1740 return res; 1741 } 1742 1743 TSAN_INTERCEPTOR(void*, tmpfile, int fake) { 1744 SCOPED_TSAN_INTERCEPTOR(tmpfile, fake); 1745 void *res = REAL(tmpfile)(fake); 1746 if (res) { 1747 int fd = fileno_unlocked(res); 1748 if (fd >= 0) 1749 FdFileCreate(thr, pc, fd); 1750 } 1751 return res; 1752 } 1753 1754 #if SANITIZER_LINUX 1755 TSAN_INTERCEPTOR(void*, tmpfile64, int fake) { 1756 SCOPED_TSAN_INTERCEPTOR(tmpfile64, fake); 1757 void *res = REAL(tmpfile64)(fake); 1758 if (res) { 1759 int fd = fileno_unlocked(res); 1760 if (fd >= 0) 1761 FdFileCreate(thr, pc, fd); 1762 } 1763 return res; 1764 } 1765 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 TSAN_INTERCEPT(tmpfile64) 1766 #else 1767 #define TSAN_MAYBE_INTERCEPT_TMPFILE64 1768 #endif 1769 1770 static void FlushStreams() { 1771 // Flushing all the streams here may freeze the process if a child thread is 1772 // performing file stream operations at the same time. 1773 REAL(fflush)(stdout); 1774 REAL(fflush)(stderr); 1775 } 1776 1777 TSAN_INTERCEPTOR(void, abort, int fake) { 1778 SCOPED_TSAN_INTERCEPTOR(abort, fake); 1779 FlushStreams(); 1780 REAL(abort)(fake); 1781 } 1782 1783 TSAN_INTERCEPTOR(int, rmdir, char *path) { 1784 SCOPED_TSAN_INTERCEPTOR(rmdir, path); 1785 Release(thr, pc, Dir2addr(path)); 1786 int res = REAL(rmdir)(path); 1787 return res; 1788 } 1789 1790 TSAN_INTERCEPTOR(int, closedir, void *dirp) { 1791 SCOPED_TSAN_INTERCEPTOR(closedir, dirp); 1792 if (dirp) { 1793 int fd = dirfd(dirp); 1794 FdClose(thr, pc, fd); 1795 } 1796 return REAL(closedir)(dirp); 1797 } 1798 1799 #if SANITIZER_LINUX 1800 TSAN_INTERCEPTOR(int, epoll_create, int size) { 1801 SCOPED_TSAN_INTERCEPTOR(epoll_create, size); 1802 int fd = REAL(epoll_create)(size); 1803 if (fd >= 0) 1804 FdPollCreate(thr, pc, fd); 1805 return fd; 1806 } 1807 1808 TSAN_INTERCEPTOR(int, epoll_create1, int flags) { 1809 SCOPED_TSAN_INTERCEPTOR(epoll_create1, flags); 1810 int fd = REAL(epoll_create1)(flags); 1811 if (fd >= 0) 1812 FdPollCreate(thr, pc, fd); 1813 return fd; 1814 } 1815 1816 TSAN_INTERCEPTOR(int, epoll_ctl, int epfd, int op, int fd, void *ev) { 1817 SCOPED_TSAN_INTERCEPTOR(epoll_ctl, epfd, op, fd, ev); 1818 if (epfd >= 0) 1819 FdAccess(thr, pc, epfd); 1820 if (epfd >= 0 && fd >= 0) 1821 FdAccess(thr, pc, fd); 1822 if (op == EPOLL_CTL_ADD && epfd >= 0) 1823 FdRelease(thr, pc, epfd); 1824 int res = REAL(epoll_ctl)(epfd, op, fd, ev); 1825 return res; 1826 } 1827 1828 TSAN_INTERCEPTOR(int, epoll_wait, int epfd, void *ev, int cnt, int timeout) { 1829 SCOPED_TSAN_INTERCEPTOR(epoll_wait, epfd, ev, cnt, timeout); 1830 if (epfd >= 0) 1831 FdAccess(thr, pc, epfd); 1832 int res = BLOCK_REAL(epoll_wait)(epfd, ev, cnt, timeout); 1833 if (res > 0 && epfd >= 0) 1834 FdAcquire(thr, pc, epfd); 1835 return res; 1836 } 1837 1838 TSAN_INTERCEPTOR(int, epoll_pwait, int epfd, void *ev, int cnt, int timeout, 1839 void *sigmask) { 1840 SCOPED_TSAN_INTERCEPTOR(epoll_pwait, epfd, ev, cnt, timeout, sigmask); 1841 if (epfd >= 0) 1842 FdAccess(thr, pc, epfd); 1843 int res = BLOCK_REAL(epoll_pwait)(epfd, ev, cnt, timeout, sigmask); 1844 if (res > 0 && epfd >= 0) 1845 FdAcquire(thr, pc, epfd); 1846 return res; 1847 } 1848 1849 #define TSAN_MAYBE_INTERCEPT_EPOLL \ 1850 TSAN_INTERCEPT(epoll_create); \ 1851 TSAN_INTERCEPT(epoll_create1); \ 1852 TSAN_INTERCEPT(epoll_ctl); \ 1853 TSAN_INTERCEPT(epoll_wait); \ 1854 TSAN_INTERCEPT(epoll_pwait) 1855 #else 1856 #define TSAN_MAYBE_INTERCEPT_EPOLL 1857 #endif 1858 1859 // The following functions are intercepted merely to process pending signals. 1860 // If program blocks signal X, we must deliver the signal before the function 1861 // returns. Similarly, if program unblocks a signal (or returns from sigsuspend) 1862 // it's better to deliver the signal straight away. 1863 TSAN_INTERCEPTOR(int, sigsuspend, const __sanitizer_sigset_t *mask) { 1864 SCOPED_TSAN_INTERCEPTOR(sigsuspend, mask); 1865 return REAL(sigsuspend)(mask); 1866 } 1867 1868 TSAN_INTERCEPTOR(int, sigblock, int mask) { 1869 SCOPED_TSAN_INTERCEPTOR(sigblock, mask); 1870 return REAL(sigblock)(mask); 1871 } 1872 1873 TSAN_INTERCEPTOR(int, sigsetmask, int mask) { 1874 SCOPED_TSAN_INTERCEPTOR(sigsetmask, mask); 1875 return REAL(sigsetmask)(mask); 1876 } 1877 1878 TSAN_INTERCEPTOR(int, pthread_sigmask, int how, const __sanitizer_sigset_t *set, 1879 __sanitizer_sigset_t *oldset) { 1880 SCOPED_TSAN_INTERCEPTOR(pthread_sigmask, how, set, oldset); 1881 return REAL(pthread_sigmask)(how, set, oldset); 1882 } 1883 1884 namespace __tsan { 1885 1886 static void CallUserSignalHandler(ThreadState *thr, bool sync, bool acquire, 1887 bool sigact, int sig, 1888 __sanitizer_siginfo *info, void *uctx) { 1889 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; 1890 if (acquire) 1891 Acquire(thr, 0, (uptr)&sigactions[sig]); 1892 // Signals are generally asynchronous, so if we receive a signals when 1893 // ignores are enabled we should disable ignores. This is critical for sync 1894 // and interceptors, because otherwise we can miss syncronization and report 1895 // false races. 1896 int ignore_reads_and_writes = thr->ignore_reads_and_writes; 1897 int ignore_interceptors = thr->ignore_interceptors; 1898 int ignore_sync = thr->ignore_sync; 1899 if (!ctx->after_multithreaded_fork) { 1900 thr->ignore_reads_and_writes = 0; 1901 thr->fast_state.ClearIgnoreBit(); 1902 thr->ignore_interceptors = 0; 1903 thr->ignore_sync = 0; 1904 } 1905 // Ensure that the handler does not spoil errno. 1906 const int saved_errno = errno; 1907 errno = 99; 1908 // This code races with sigaction. Be careful to not read sa_sigaction twice. 1909 // Also need to remember pc for reporting before the call, 1910 // because the handler can reset it. 1911 volatile uptr pc = 1912 sigact ? (uptr)sigactions[sig].sigaction : (uptr)sigactions[sig].handler; 1913 if (pc != sig_dfl && pc != sig_ign) { 1914 if (sigact) 1915 ((__sanitizer_sigactionhandler_ptr)pc)(sig, info, uctx); 1916 else 1917 ((__sanitizer_sighandler_ptr)pc)(sig); 1918 } 1919 if (!ctx->after_multithreaded_fork) { 1920 thr->ignore_reads_and_writes = ignore_reads_and_writes; 1921 if (ignore_reads_and_writes) 1922 thr->fast_state.SetIgnoreBit(); 1923 thr->ignore_interceptors = ignore_interceptors; 1924 thr->ignore_sync = ignore_sync; 1925 } 1926 // We do not detect errno spoiling for SIGTERM, 1927 // because some SIGTERM handlers do spoil errno but reraise SIGTERM, 1928 // tsan reports false positive in such case. 1929 // It's difficult to properly detect this situation (reraise), 1930 // because in async signal processing case (when handler is called directly 1931 // from rtl_generic_sighandler) we have not yet received the reraised 1932 // signal; and it looks too fragile to intercept all ways to reraise a signal. 1933 if (flags()->report_bugs && !sync && sig != SIGTERM && errno != 99) { 1934 VarSizeStackTrace stack; 1935 // StackTrace::GetNestInstructionPc(pc) is used because return address is 1936 // expected, OutputReport() will undo this. 1937 ObtainCurrentStack(thr, StackTrace::GetNextInstructionPc(pc), &stack); 1938 ThreadRegistryLock l(ctx->thread_registry); 1939 ScopedReport rep(ReportTypeErrnoInSignal); 1940 if (!IsFiredSuppression(ctx, ReportTypeErrnoInSignal, stack)) { 1941 rep.AddStack(stack, true); 1942 OutputReport(thr, rep); 1943 } 1944 } 1945 errno = saved_errno; 1946 } 1947 1948 void ProcessPendingSignals(ThreadState *thr) { 1949 ThreadSignalContext *sctx = SigCtx(thr); 1950 if (sctx == 0 || 1951 atomic_load(&sctx->have_pending_signals, memory_order_relaxed) == 0) 1952 return; 1953 atomic_store(&sctx->have_pending_signals, 0, memory_order_relaxed); 1954 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 1955 internal_sigfillset(&sctx->emptyset); 1956 int res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->emptyset, &sctx->oldset); 1957 CHECK_EQ(res, 0); 1958 for (int sig = 0; sig < kSigCount; sig++) { 1959 SignalDesc *signal = &sctx->pending_signals[sig]; 1960 if (signal->armed) { 1961 signal->armed = false; 1962 CallUserSignalHandler(thr, false, true, signal->sigaction, sig, 1963 &signal->siginfo, &signal->ctx); 1964 } 1965 } 1966 res = REAL(pthread_sigmask)(SIG_SETMASK, &sctx->oldset, 0); 1967 CHECK_EQ(res, 0); 1968 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 1969 } 1970 1971 } // namespace __tsan 1972 1973 static bool is_sync_signal(ThreadSignalContext *sctx, int sig) { 1974 return sig == SIGSEGV || sig == SIGBUS || sig == SIGILL || 1975 sig == SIGABRT || sig == SIGFPE || sig == SIGPIPE || sig == SIGSYS || 1976 // If we are sending signal to ourselves, we must process it now. 1977 (sctx && sig == sctx->int_signal_send); 1978 } 1979 1980 void ALWAYS_INLINE rtl_generic_sighandler(bool sigact, int sig, 1981 __sanitizer_siginfo *info, 1982 void *ctx) { 1983 ThreadState *thr = cur_thread(); 1984 ThreadSignalContext *sctx = SigCtx(thr); 1985 if (sig < 0 || sig >= kSigCount) { 1986 VPrintf(1, "ThreadSanitizer: ignoring signal %d\n", sig); 1987 return; 1988 } 1989 // Don't mess with synchronous signals. 1990 const bool sync = is_sync_signal(sctx, sig); 1991 if (sync || 1992 // If we are in blocking function, we can safely process it now 1993 // (but check if we are in a recursive interceptor, 1994 // i.e. pthread_join()->munmap()). 1995 (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed))) { 1996 atomic_fetch_add(&thr->in_signal_handler, 1, memory_order_relaxed); 1997 if (sctx && atomic_load(&sctx->in_blocking_func, memory_order_relaxed)) { 1998 atomic_store(&sctx->in_blocking_func, 0, memory_order_relaxed); 1999 CallUserSignalHandler(thr, sync, true, sigact, sig, info, ctx); 2000 atomic_store(&sctx->in_blocking_func, 1, memory_order_relaxed); 2001 } else { 2002 // Be very conservative with when we do acquire in this case. 2003 // It's unsafe to do acquire in async handlers, because ThreadState 2004 // can be in inconsistent state. 2005 // SIGSYS looks relatively safe -- it's synchronous and can actually 2006 // need some global state. 2007 bool acq = (sig == SIGSYS); 2008 CallUserSignalHandler(thr, sync, acq, sigact, sig, info, ctx); 2009 } 2010 atomic_fetch_add(&thr->in_signal_handler, -1, memory_order_relaxed); 2011 return; 2012 } 2013 2014 if (sctx == 0) 2015 return; 2016 SignalDesc *signal = &sctx->pending_signals[sig]; 2017 if (signal->armed == false) { 2018 signal->armed = true; 2019 signal->sigaction = sigact; 2020 if (info) 2021 internal_memcpy(&signal->siginfo, info, sizeof(*info)); 2022 if (ctx) 2023 internal_memcpy(&signal->ctx, ctx, sizeof(signal->ctx)); 2024 atomic_store(&sctx->have_pending_signals, 1, memory_order_relaxed); 2025 } 2026 } 2027 2028 static void rtl_sighandler(int sig) { 2029 rtl_generic_sighandler(false, sig, 0, 0); 2030 } 2031 2032 static void rtl_sigaction(int sig, __sanitizer_siginfo *info, void *ctx) { 2033 rtl_generic_sighandler(true, sig, info, ctx); 2034 } 2035 2036 TSAN_INTERCEPTOR(int, raise, int sig) { 2037 SCOPED_TSAN_INTERCEPTOR(raise, sig); 2038 ThreadSignalContext *sctx = SigCtx(thr); 2039 CHECK_NE(sctx, 0); 2040 int prev = sctx->int_signal_send; 2041 sctx->int_signal_send = sig; 2042 int res = REAL(raise)(sig); 2043 CHECK_EQ(sctx->int_signal_send, sig); 2044 sctx->int_signal_send = prev; 2045 return res; 2046 } 2047 2048 TSAN_INTERCEPTOR(int, kill, int pid, int sig) { 2049 SCOPED_TSAN_INTERCEPTOR(kill, pid, sig); 2050 ThreadSignalContext *sctx = SigCtx(thr); 2051 CHECK_NE(sctx, 0); 2052 int prev = sctx->int_signal_send; 2053 if (pid == (int)internal_getpid()) { 2054 sctx->int_signal_send = sig; 2055 } 2056 int res = REAL(kill)(pid, sig); 2057 if (pid == (int)internal_getpid()) { 2058 CHECK_EQ(sctx->int_signal_send, sig); 2059 sctx->int_signal_send = prev; 2060 } 2061 return res; 2062 } 2063 2064 TSAN_INTERCEPTOR(int, pthread_kill, void *tid, int sig) { 2065 SCOPED_TSAN_INTERCEPTOR(pthread_kill, tid, sig); 2066 ThreadSignalContext *sctx = SigCtx(thr); 2067 CHECK_NE(sctx, 0); 2068 int prev = sctx->int_signal_send; 2069 if (tid == pthread_self()) { 2070 sctx->int_signal_send = sig; 2071 } 2072 int res = REAL(pthread_kill)(tid, sig); 2073 if (tid == pthread_self()) { 2074 CHECK_EQ(sctx->int_signal_send, sig); 2075 sctx->int_signal_send = prev; 2076 } 2077 return res; 2078 } 2079 2080 TSAN_INTERCEPTOR(int, gettimeofday, void *tv, void *tz) { 2081 SCOPED_TSAN_INTERCEPTOR(gettimeofday, tv, tz); 2082 // It's intercepted merely to process pending signals. 2083 return REAL(gettimeofday)(tv, tz); 2084 } 2085 2086 TSAN_INTERCEPTOR(int, getaddrinfo, void *node, void *service, 2087 void *hints, void *rv) { 2088 SCOPED_TSAN_INTERCEPTOR(getaddrinfo, node, service, hints, rv); 2089 // We miss atomic synchronization in getaddrinfo, 2090 // and can report false race between malloc and free 2091 // inside of getaddrinfo. So ignore memory accesses. 2092 ThreadIgnoreBegin(thr, pc); 2093 int res = REAL(getaddrinfo)(node, service, hints, rv); 2094 ThreadIgnoreEnd(thr, pc); 2095 return res; 2096 } 2097 2098 TSAN_INTERCEPTOR(int, fork, int fake) { 2099 if (UNLIKELY(cur_thread()->in_symbolizer)) 2100 return REAL(fork)(fake); 2101 SCOPED_INTERCEPTOR_RAW(fork, fake); 2102 ForkBefore(thr, pc); 2103 int pid; 2104 { 2105 // On OS X, REAL(fork) can call intercepted functions (OSSpinLockLock), and 2106 // we'll assert in CheckNoLocks() unless we ignore interceptors. 2107 ScopedIgnoreInterceptors ignore; 2108 pid = REAL(fork)(fake); 2109 } 2110 if (pid == 0) { 2111 // child 2112 ForkChildAfter(thr, pc); 2113 FdOnFork(thr, pc); 2114 } else if (pid > 0) { 2115 // parent 2116 ForkParentAfter(thr, pc); 2117 } else { 2118 // error 2119 ForkParentAfter(thr, pc); 2120 } 2121 return pid; 2122 } 2123 2124 TSAN_INTERCEPTOR(int, vfork, int fake) { 2125 // Some programs (e.g. openjdk) call close for all file descriptors 2126 // in the child process. Under tsan it leads to false positives, because 2127 // address space is shared, so the parent process also thinks that 2128 // the descriptors are closed (while they are actually not). 2129 // This leads to false positives due to missed synchronization. 2130 // Strictly saying this is undefined behavior, because vfork child is not 2131 // allowed to call any functions other than exec/exit. But this is what 2132 // openjdk does, so we want to handle it. 2133 // We could disable interceptors in the child process. But it's not possible 2134 // to simply intercept and wrap vfork, because vfork child is not allowed 2135 // to return from the function that calls vfork, and that's exactly what 2136 // we would do. So this would require some assembly trickery as well. 2137 // Instead we simply turn vfork into fork. 2138 return WRAP(fork)(fake); 2139 } 2140 2141 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2142 typedef int (*dl_iterate_phdr_cb_t)(__sanitizer_dl_phdr_info *info, SIZE_T size, 2143 void *data); 2144 struct dl_iterate_phdr_data { 2145 ThreadState *thr; 2146 uptr pc; 2147 dl_iterate_phdr_cb_t cb; 2148 void *data; 2149 }; 2150 2151 static bool IsAppNotRodata(uptr addr) { 2152 return IsAppMem(addr) && *(u64*)MemToShadow(addr) != kShadowRodata; 2153 } 2154 2155 static int dl_iterate_phdr_cb(__sanitizer_dl_phdr_info *info, SIZE_T size, 2156 void *data) { 2157 dl_iterate_phdr_data *cbdata = (dl_iterate_phdr_data *)data; 2158 // dlopen/dlclose allocate/free dynamic-linker-internal memory, which is later 2159 // accessible in dl_iterate_phdr callback. But we don't see synchronization 2160 // inside of dynamic linker, so we "unpoison" it here in order to not 2161 // produce false reports. Ignoring malloc/free in dlopen/dlclose is not enough 2162 // because some libc functions call __libc_dlopen. 2163 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2164 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2165 internal_strlen(info->dlpi_name)); 2166 int res = cbdata->cb(info, size, cbdata->data); 2167 // Perform the check one more time in case info->dlpi_name was overwritten 2168 // by user callback. 2169 if (info && IsAppNotRodata((uptr)info->dlpi_name)) 2170 MemoryResetRange(cbdata->thr, cbdata->pc, (uptr)info->dlpi_name, 2171 internal_strlen(info->dlpi_name)); 2172 return res; 2173 } 2174 2175 TSAN_INTERCEPTOR(int, dl_iterate_phdr, dl_iterate_phdr_cb_t cb, void *data) { 2176 SCOPED_TSAN_INTERCEPTOR(dl_iterate_phdr, cb, data); 2177 dl_iterate_phdr_data cbdata; 2178 cbdata.thr = thr; 2179 cbdata.pc = pc; 2180 cbdata.cb = cb; 2181 cbdata.data = data; 2182 int res = REAL(dl_iterate_phdr)(dl_iterate_phdr_cb, &cbdata); 2183 return res; 2184 } 2185 #endif 2186 2187 static int OnExit(ThreadState *thr) { 2188 int status = Finalize(thr); 2189 FlushStreams(); 2190 return status; 2191 } 2192 2193 struct TsanInterceptorContext { 2194 ThreadState *thr; 2195 const uptr caller_pc; 2196 const uptr pc; 2197 }; 2198 2199 #if !SANITIZER_MAC 2200 static void HandleRecvmsg(ThreadState *thr, uptr pc, 2201 __sanitizer_msghdr *msg) { 2202 int fds[64]; 2203 int cnt = ExtractRecvmsgFDs(msg, fds, ARRAY_SIZE(fds)); 2204 for (int i = 0; i < cnt; i++) 2205 FdEventCreate(thr, pc, fds[i]); 2206 } 2207 #endif 2208 2209 #include "sanitizer_common/sanitizer_platform_interceptors.h" 2210 // Causes interceptor recursion (getaddrinfo() and fopen()) 2211 #undef SANITIZER_INTERCEPT_GETADDRINFO 2212 // There interceptors do not seem to be strictly necessary for tsan. 2213 // But we see cases where the interceptors consume 70% of execution time. 2214 // Memory blocks passed to fgetgrent_r are "written to" by tsan several times. 2215 // First, there is some recursion (getgrnam_r calls fgetgrent_r), and each 2216 // function "writes to" the buffer. Then, the same memory is "written to" 2217 // twice, first as buf and then as pwbufp (both of them refer to the same 2218 // addresses). 2219 #undef SANITIZER_INTERCEPT_GETPWENT 2220 #undef SANITIZER_INTERCEPT_GETPWENT_R 2221 #undef SANITIZER_INTERCEPT_FGETPWENT 2222 #undef SANITIZER_INTERCEPT_GETPWNAM_AND_FRIENDS 2223 #undef SANITIZER_INTERCEPT_GETPWNAM_R_AND_FRIENDS 2224 // We define our own. 2225 #if SANITIZER_INTERCEPT_TLS_GET_ADDR 2226 #define NEED_TLS_GET_ADDR 2227 #endif 2228 #undef SANITIZER_INTERCEPT_TLS_GET_ADDR 2229 2230 #define COMMON_INTERCEPT_FUNCTION(name) INTERCEPT_FUNCTION(name) 2231 #define COMMON_INTERCEPT_FUNCTION_VER(name, ver) \ 2232 INTERCEPT_FUNCTION_VER(name, ver) 2233 2234 #define COMMON_INTERCEPTOR_WRITE_RANGE(ctx, ptr, size) \ 2235 MemoryAccessRange(((TsanInterceptorContext *)ctx)->thr, \ 2236 ((TsanInterceptorContext *)ctx)->pc, (uptr)ptr, size, \ 2237 true) 2238 2239 #define COMMON_INTERCEPTOR_READ_RANGE(ctx, ptr, size) \ 2240 MemoryAccessRange(((TsanInterceptorContext *) ctx)->thr, \ 2241 ((TsanInterceptorContext *) ctx)->pc, (uptr) ptr, size, \ 2242 false) 2243 2244 #define COMMON_INTERCEPTOR_ENTER(ctx, func, ...) \ 2245 SCOPED_TSAN_INTERCEPTOR(func, __VA_ARGS__); \ 2246 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ 2247 ctx = (void *)&_ctx; \ 2248 (void) ctx; 2249 2250 #define COMMON_INTERCEPTOR_ENTER_NOIGNORE(ctx, func, ...) \ 2251 SCOPED_INTERCEPTOR_RAW(func, __VA_ARGS__); \ 2252 TsanInterceptorContext _ctx = {thr, caller_pc, pc}; \ 2253 ctx = (void *)&_ctx; \ 2254 (void) ctx; 2255 2256 #define COMMON_INTERCEPTOR_FILE_OPEN(ctx, file, path) \ 2257 if (path) \ 2258 Acquire(thr, pc, File2addr(path)); \ 2259 if (file) { \ 2260 int fd = fileno_unlocked(file); \ 2261 if (fd >= 0) FdFileCreate(thr, pc, fd); \ 2262 } 2263 2264 #define COMMON_INTERCEPTOR_FILE_CLOSE(ctx, file) \ 2265 if (file) { \ 2266 int fd = fileno_unlocked(file); \ 2267 if (fd >= 0) FdClose(thr, pc, fd); \ 2268 } 2269 2270 #define COMMON_INTERCEPTOR_LIBRARY_LOADED(filename, handle) \ 2271 libignore()->OnLibraryLoaded(filename) 2272 2273 #define COMMON_INTERCEPTOR_LIBRARY_UNLOADED() \ 2274 libignore()->OnLibraryUnloaded() 2275 2276 #define COMMON_INTERCEPTOR_ACQUIRE(ctx, u) \ 2277 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, u) 2278 2279 #define COMMON_INTERCEPTOR_RELEASE(ctx, u) \ 2280 Release(((TsanInterceptorContext *) ctx)->thr, pc, u) 2281 2282 #define COMMON_INTERCEPTOR_DIR_ACQUIRE(ctx, path) \ 2283 Acquire(((TsanInterceptorContext *) ctx)->thr, pc, Dir2addr(path)) 2284 2285 #define COMMON_INTERCEPTOR_FD_ACQUIRE(ctx, fd) \ 2286 FdAcquire(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2287 2288 #define COMMON_INTERCEPTOR_FD_RELEASE(ctx, fd) \ 2289 FdRelease(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2290 2291 #define COMMON_INTERCEPTOR_FD_ACCESS(ctx, fd) \ 2292 FdAccess(((TsanInterceptorContext *) ctx)->thr, pc, fd) 2293 2294 #define COMMON_INTERCEPTOR_FD_SOCKET_ACCEPT(ctx, fd, newfd) \ 2295 FdSocketAccept(((TsanInterceptorContext *) ctx)->thr, pc, fd, newfd) 2296 2297 #define COMMON_INTERCEPTOR_SET_THREAD_NAME(ctx, name) \ 2298 ThreadSetName(((TsanInterceptorContext *) ctx)->thr, name) 2299 2300 #define COMMON_INTERCEPTOR_SET_PTHREAD_NAME(ctx, thread, name) \ 2301 __tsan::ctx->thread_registry->SetThreadNameByUserId(thread, name) 2302 2303 #define COMMON_INTERCEPTOR_BLOCK_REAL(name) BLOCK_REAL(name) 2304 2305 #define COMMON_INTERCEPTOR_ON_EXIT(ctx) \ 2306 OnExit(((TsanInterceptorContext *) ctx)->thr) 2307 2308 #define COMMON_INTERCEPTOR_MUTEX_PRE_LOCK(ctx, m) \ 2309 MutexPreLock(((TsanInterceptorContext *)ctx)->thr, \ 2310 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2311 2312 #define COMMON_INTERCEPTOR_MUTEX_POST_LOCK(ctx, m) \ 2313 MutexPostLock(((TsanInterceptorContext *)ctx)->thr, \ 2314 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2315 2316 #define COMMON_INTERCEPTOR_MUTEX_UNLOCK(ctx, m) \ 2317 MutexUnlock(((TsanInterceptorContext *)ctx)->thr, \ 2318 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2319 2320 #define COMMON_INTERCEPTOR_MUTEX_REPAIR(ctx, m) \ 2321 MutexRepair(((TsanInterceptorContext *)ctx)->thr, \ 2322 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2323 2324 #define COMMON_INTERCEPTOR_MUTEX_INVALID(ctx, m) \ 2325 MutexInvalidAccess(((TsanInterceptorContext *)ctx)->thr, \ 2326 ((TsanInterceptorContext *)ctx)->pc, (uptr)m) 2327 2328 #define COMMON_INTERCEPTOR_MMAP_IMPL(ctx, mmap, addr, sz, prot, flags, fd, \ 2329 off) \ 2330 do { \ 2331 return mmap_interceptor(thr, pc, REAL(mmap), addr, sz, prot, flags, fd, \ 2332 off); \ 2333 } while (false) 2334 2335 #if !SANITIZER_MAC 2336 #define COMMON_INTERCEPTOR_HANDLE_RECVMSG(ctx, msg) \ 2337 HandleRecvmsg(((TsanInterceptorContext *)ctx)->thr, \ 2338 ((TsanInterceptorContext *)ctx)->pc, msg) 2339 #endif 2340 2341 #define COMMON_INTERCEPTOR_GET_TLS_RANGE(begin, end) \ 2342 if (TsanThread *t = GetCurrentThread()) { \ 2343 *begin = t->tls_begin(); \ 2344 *end = t->tls_end(); \ 2345 } else { \ 2346 *begin = *end = 0; \ 2347 } 2348 2349 #define COMMON_INTERCEPTOR_USER_CALLBACK_START() \ 2350 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_START() 2351 2352 #define COMMON_INTERCEPTOR_USER_CALLBACK_END() \ 2353 SCOPED_TSAN_INTERCEPTOR_USER_CALLBACK_END() 2354 2355 #include "sanitizer_common/sanitizer_common_interceptors.inc" 2356 2357 static int sigaction_impl(int sig, const __sanitizer_sigaction *act, 2358 __sanitizer_sigaction *old); 2359 static __sanitizer_sighandler_ptr signal_impl(int sig, 2360 __sanitizer_sighandler_ptr h); 2361 2362 #define SIGNAL_INTERCEPTOR_SIGACTION_IMPL(signo, act, oldact) \ 2363 { return sigaction_impl(signo, act, oldact); } 2364 2365 #define SIGNAL_INTERCEPTOR_SIGNAL_IMPL(func, signo, handler) \ 2366 { return (uptr)signal_impl(signo, (__sanitizer_sighandler_ptr)handler); } 2367 2368 #include "sanitizer_common/sanitizer_signal_interceptors.inc" 2369 2370 int sigaction_impl(int sig, const __sanitizer_sigaction *act, 2371 __sanitizer_sigaction *old) { 2372 // Note: if we call REAL(sigaction) directly for any reason without proxying 2373 // the signal handler through rtl_sigaction, very bad things will happen. 2374 // The handler will run synchronously and corrupt tsan per-thread state. 2375 SCOPED_INTERCEPTOR_RAW(sigaction, sig, act, old); 2376 __sanitizer_sigaction *sigactions = interceptor_ctx()->sigactions; 2377 __sanitizer_sigaction old_stored; 2378 if (old) internal_memcpy(&old_stored, &sigactions[sig], sizeof(old_stored)); 2379 __sanitizer_sigaction newact; 2380 if (act) { 2381 // Copy act into sigactions[sig]. 2382 // Can't use struct copy, because compiler can emit call to memcpy. 2383 // Can't use internal_memcpy, because it copies byte-by-byte, 2384 // and signal handler reads the handler concurrently. It it can read 2385 // some bytes from old value and some bytes from new value. 2386 // Use volatile to prevent insertion of memcpy. 2387 sigactions[sig].handler = 2388 *(volatile __sanitizer_sighandler_ptr const *)&act->handler; 2389 sigactions[sig].sa_flags = *(volatile int const *)&act->sa_flags; 2390 internal_memcpy(&sigactions[sig].sa_mask, &act->sa_mask, 2391 sizeof(sigactions[sig].sa_mask)); 2392 #if !SANITIZER_FREEBSD && !SANITIZER_MAC && !SANITIZER_NETBSD 2393 sigactions[sig].sa_restorer = act->sa_restorer; 2394 #endif 2395 internal_memcpy(&newact, act, sizeof(newact)); 2396 internal_sigfillset(&newact.sa_mask); 2397 if ((uptr)act->handler != sig_ign && (uptr)act->handler != sig_dfl) { 2398 if (newact.sa_flags & SA_SIGINFO) 2399 newact.sigaction = rtl_sigaction; 2400 else 2401 newact.handler = rtl_sighandler; 2402 } 2403 ReleaseStore(thr, pc, (uptr)&sigactions[sig]); 2404 act = &newact; 2405 } 2406 int res = REAL(sigaction)(sig, act, old); 2407 if (res == 0 && old) { 2408 uptr cb = (uptr)old->sigaction; 2409 if (cb == (uptr)rtl_sigaction || cb == (uptr)rtl_sighandler) { 2410 internal_memcpy(old, &old_stored, sizeof(*old)); 2411 } 2412 } 2413 return res; 2414 } 2415 2416 static __sanitizer_sighandler_ptr signal_impl(int sig, 2417 __sanitizer_sighandler_ptr h) { 2418 __sanitizer_sigaction act; 2419 act.handler = h; 2420 internal_memset(&act.sa_mask, -1, sizeof(act.sa_mask)); 2421 act.sa_flags = 0; 2422 __sanitizer_sigaction old; 2423 int res = sigaction_symname(sig, &act, &old); 2424 if (res) return (__sanitizer_sighandler_ptr)sig_err; 2425 return old.handler; 2426 } 2427 2428 #define TSAN_SYSCALL() \ 2429 ThreadState *thr = cur_thread(); \ 2430 if (thr->ignore_interceptors) \ 2431 return; \ 2432 ScopedSyscall scoped_syscall(thr) \ 2433 /**/ 2434 2435 struct ScopedSyscall { 2436 ThreadState *thr; 2437 2438 explicit ScopedSyscall(ThreadState *thr) 2439 : thr(thr) { 2440 Initialize(thr); 2441 } 2442 2443 ~ScopedSyscall() { 2444 ProcessPendingSignals(thr); 2445 } 2446 }; 2447 2448 #if !SANITIZER_FREEBSD && !SANITIZER_MAC 2449 static void syscall_access_range(uptr pc, uptr p, uptr s, bool write) { 2450 TSAN_SYSCALL(); 2451 MemoryAccessRange(thr, pc, p, s, write); 2452 } 2453 2454 static void syscall_acquire(uptr pc, uptr addr) { 2455 TSAN_SYSCALL(); 2456 Acquire(thr, pc, addr); 2457 DPrintf("syscall_acquire(%p)\n", addr); 2458 } 2459 2460 static void syscall_release(uptr pc, uptr addr) { 2461 TSAN_SYSCALL(); 2462 DPrintf("syscall_release(%p)\n", addr); 2463 Release(thr, pc, addr); 2464 } 2465 2466 static void syscall_fd_close(uptr pc, int fd) { 2467 TSAN_SYSCALL(); 2468 FdClose(thr, pc, fd); 2469 } 2470 2471 static USED void syscall_fd_acquire(uptr pc, int fd) { 2472 TSAN_SYSCALL(); 2473 FdAcquire(thr, pc, fd); 2474 DPrintf("syscall_fd_acquire(%p)\n", fd); 2475 } 2476 2477 static USED void syscall_fd_release(uptr pc, int fd) { 2478 TSAN_SYSCALL(); 2479 DPrintf("syscall_fd_release(%p)\n", fd); 2480 FdRelease(thr, pc, fd); 2481 } 2482 2483 static void syscall_pre_fork(uptr pc) { 2484 TSAN_SYSCALL(); 2485 ForkBefore(thr, pc); 2486 } 2487 2488 static void syscall_post_fork(uptr pc, int pid) { 2489 TSAN_SYSCALL(); 2490 if (pid == 0) { 2491 // child 2492 ForkChildAfter(thr, pc); 2493 FdOnFork(thr, pc); 2494 } else if (pid > 0) { 2495 // parent 2496 ForkParentAfter(thr, pc); 2497 } else { 2498 // error 2499 ForkParentAfter(thr, pc); 2500 } 2501 } 2502 #endif 2503 2504 #define COMMON_SYSCALL_PRE_READ_RANGE(p, s) \ 2505 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), false) 2506 2507 #define COMMON_SYSCALL_PRE_WRITE_RANGE(p, s) \ 2508 syscall_access_range(GET_CALLER_PC(), (uptr)(p), (uptr)(s), true) 2509 2510 #define COMMON_SYSCALL_POST_READ_RANGE(p, s) \ 2511 do { \ 2512 (void)(p); \ 2513 (void)(s); \ 2514 } while (false) 2515 2516 #define COMMON_SYSCALL_POST_WRITE_RANGE(p, s) \ 2517 do { \ 2518 (void)(p); \ 2519 (void)(s); \ 2520 } while (false) 2521 2522 #define COMMON_SYSCALL_ACQUIRE(addr) \ 2523 syscall_acquire(GET_CALLER_PC(), (uptr)(addr)) 2524 2525 #define COMMON_SYSCALL_RELEASE(addr) \ 2526 syscall_release(GET_CALLER_PC(), (uptr)(addr)) 2527 2528 #define COMMON_SYSCALL_FD_CLOSE(fd) syscall_fd_close(GET_CALLER_PC(), fd) 2529 2530 #define COMMON_SYSCALL_FD_ACQUIRE(fd) syscall_fd_acquire(GET_CALLER_PC(), fd) 2531 2532 #define COMMON_SYSCALL_FD_RELEASE(fd) syscall_fd_release(GET_CALLER_PC(), fd) 2533 2534 #define COMMON_SYSCALL_PRE_FORK() \ 2535 syscall_pre_fork(GET_CALLER_PC()) 2536 2537 #define COMMON_SYSCALL_POST_FORK(res) \ 2538 syscall_post_fork(GET_CALLER_PC(), res) 2539 2540 #include "sanitizer_common/sanitizer_common_syscalls.inc" 2541 #include "sanitizer_common/sanitizer_syscalls_netbsd.inc" 2542 2543 #ifdef NEED_TLS_GET_ADDR 2544 // Define own interceptor instead of sanitizer_common's for three reasons: 2545 // 1. It must not process pending signals. 2546 // Signal handlers may contain MOVDQA instruction (see below). 2547 // 2. It must be as simple as possible to not contain MOVDQA. 2548 // 3. Sanitizer_common version uses COMMON_INTERCEPTOR_INITIALIZE_RANGE which 2549 // is empty for tsan (meant only for msan). 2550 // Note: __tls_get_addr can be called with mis-aligned stack due to: 2551 // https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58066 2552 // So the interceptor must work with mis-aligned stack, in particular, does not 2553 // execute MOVDQA with stack addresses. 2554 TSAN_INTERCEPTOR(void *, __tls_get_addr, void *arg) { 2555 void *res = REAL(__tls_get_addr)(arg); 2556 ThreadState *thr = cur_thread(); 2557 if (!thr) 2558 return res; 2559 DTLS::DTV *dtv = DTLS_on_tls_get_addr(arg, res, thr->tls_addr, 2560 thr->tls_addr + thr->tls_size); 2561 if (!dtv) 2562 return res; 2563 // New DTLS block has been allocated. 2564 MemoryResetRange(thr, 0, dtv->beg, dtv->size); 2565 return res; 2566 } 2567 #endif 2568 2569 #if SANITIZER_NETBSD 2570 TSAN_INTERCEPTOR(void, _lwp_exit) { 2571 SCOPED_TSAN_INTERCEPTOR(_lwp_exit); 2572 DestroyThreadState(); 2573 REAL(_lwp_exit)(); 2574 } 2575 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT TSAN_INTERCEPT(_lwp_exit) 2576 #else 2577 #define TSAN_MAYBE_INTERCEPT__LWP_EXIT 2578 #endif 2579 2580 #if SANITIZER_FREEBSD 2581 TSAN_INTERCEPTOR(void, thr_exit, tid_t *state) { 2582 SCOPED_TSAN_INTERCEPTOR(thr_exit, state); 2583 DestroyThreadState(); 2584 REAL(thr_exit(state)); 2585 } 2586 #define TSAN_MAYBE_INTERCEPT_THR_EXIT TSAN_INTERCEPT(thr_exit) 2587 #else 2588 #define TSAN_MAYBE_INTERCEPT_THR_EXIT 2589 #endif 2590 2591 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_init, void *c, void *a) 2592 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_signal, void *c) 2593 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_broadcast, void *c) 2594 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_wait, void *c, void *m) 2595 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, cond_destroy, void *c) 2596 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_init, void *m, void *a) 2597 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_destroy, void *m) 2598 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, mutex_trylock, void *m) 2599 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_init, void *m, void *a) 2600 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_destroy, void *m) 2601 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_rdlock, void *m) 2602 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_tryrdlock, void *m) 2603 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_wrlock, void *m) 2604 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_trywrlock, void *m) 2605 TSAN_INTERCEPTOR_NETBSD_ALIAS(int, rwlock_unlock, void *m) 2606 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR(int, once, void *o, void (*f)()) 2607 TSAN_INTERCEPTOR_NETBSD_ALIAS_THR2(int, sigsetmask, sigmask, int a, void *b, 2608 void *c) 2609 2610 namespace __tsan { 2611 2612 static void finalize(void *arg) { 2613 ThreadState *thr = cur_thread(); 2614 int status = Finalize(thr); 2615 // Make sure the output is not lost. 2616 FlushStreams(); 2617 if (status) 2618 Die(); 2619 } 2620 2621 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2622 static void unreachable() { 2623 Report("FATAL: ThreadSanitizer: unreachable called\n"); 2624 Die(); 2625 } 2626 #endif 2627 2628 void InitializeInterceptors() { 2629 #if !SANITIZER_MAC 2630 // We need to setup it early, because functions like dlsym() can call it. 2631 REAL(memset) = internal_memset; 2632 REAL(memcpy) = internal_memcpy; 2633 #endif 2634 2635 // Instruct libc malloc to consume less memory. 2636 #if SANITIZER_LINUX 2637 mallopt(1, 0); // M_MXFAST 2638 mallopt(-3, 32*1024); // M_MMAP_THRESHOLD 2639 #endif 2640 2641 new(interceptor_ctx()) InterceptorContext(); 2642 2643 InitializeCommonInterceptors(); 2644 InitializeSignalInterceptors(); 2645 2646 #if !SANITIZER_MAC 2647 // We can not use TSAN_INTERCEPT to get setjmp addr, 2648 // because it does &setjmp and setjmp is not present in some versions of libc. 2649 using __interception::GetRealFunctionAddress; 2650 GetRealFunctionAddress(TSAN_STRING_SETJMP, 2651 (uptr*)&REAL(setjmp_symname), 0, 0); 2652 GetRealFunctionAddress("_setjmp", (uptr*)&REAL(_setjmp), 0, 0); 2653 GetRealFunctionAddress(TSAN_STRING_SIGSETJMP, 2654 (uptr*)&REAL(sigsetjmp_symname), 0, 0); 2655 #if !SANITIZER_NETBSD 2656 GetRealFunctionAddress("__sigsetjmp", (uptr*)&REAL(__sigsetjmp), 0, 0); 2657 #endif 2658 #endif 2659 2660 TSAN_INTERCEPT(longjmp_symname); 2661 TSAN_INTERCEPT(siglongjmp_symname); 2662 #if SANITIZER_NETBSD 2663 TSAN_INTERCEPT(_longjmp); 2664 #endif 2665 2666 TSAN_INTERCEPT(malloc); 2667 TSAN_INTERCEPT(__libc_memalign); 2668 TSAN_INTERCEPT(calloc); 2669 TSAN_INTERCEPT(realloc); 2670 TSAN_INTERCEPT(free); 2671 TSAN_INTERCEPT(cfree); 2672 TSAN_INTERCEPT(munmap); 2673 TSAN_MAYBE_INTERCEPT_MEMALIGN; 2674 TSAN_INTERCEPT(valloc); 2675 TSAN_MAYBE_INTERCEPT_PVALLOC; 2676 TSAN_INTERCEPT(posix_memalign); 2677 2678 TSAN_INTERCEPT(strcpy); // NOLINT 2679 TSAN_INTERCEPT(strncpy); 2680 TSAN_INTERCEPT(strdup); 2681 2682 TSAN_INTERCEPT(pthread_create); 2683 TSAN_INTERCEPT(pthread_join); 2684 TSAN_INTERCEPT(pthread_detach); 2685 #if SANITIZER_LINUX 2686 TSAN_INTERCEPT(pthread_tryjoin_np); 2687 TSAN_INTERCEPT(pthread_timedjoin_np); 2688 #endif 2689 2690 TSAN_INTERCEPT_VER(pthread_cond_init, PTHREAD_ABI_BASE); 2691 TSAN_INTERCEPT_VER(pthread_cond_signal, PTHREAD_ABI_BASE); 2692 TSAN_INTERCEPT_VER(pthread_cond_broadcast, PTHREAD_ABI_BASE); 2693 TSAN_INTERCEPT_VER(pthread_cond_wait, PTHREAD_ABI_BASE); 2694 TSAN_INTERCEPT_VER(pthread_cond_timedwait, PTHREAD_ABI_BASE); 2695 TSAN_INTERCEPT_VER(pthread_cond_destroy, PTHREAD_ABI_BASE); 2696 2697 TSAN_INTERCEPT(pthread_mutex_init); 2698 TSAN_INTERCEPT(pthread_mutex_destroy); 2699 TSAN_INTERCEPT(pthread_mutex_trylock); 2700 TSAN_INTERCEPT(pthread_mutex_timedlock); 2701 2702 TSAN_INTERCEPT(pthread_spin_init); 2703 TSAN_INTERCEPT(pthread_spin_destroy); 2704 TSAN_INTERCEPT(pthread_spin_lock); 2705 TSAN_INTERCEPT(pthread_spin_trylock); 2706 TSAN_INTERCEPT(pthread_spin_unlock); 2707 2708 TSAN_INTERCEPT(pthread_rwlock_init); 2709 TSAN_INTERCEPT(pthread_rwlock_destroy); 2710 TSAN_INTERCEPT(pthread_rwlock_rdlock); 2711 TSAN_INTERCEPT(pthread_rwlock_tryrdlock); 2712 TSAN_INTERCEPT(pthread_rwlock_timedrdlock); 2713 TSAN_INTERCEPT(pthread_rwlock_wrlock); 2714 TSAN_INTERCEPT(pthread_rwlock_trywrlock); 2715 TSAN_INTERCEPT(pthread_rwlock_timedwrlock); 2716 TSAN_INTERCEPT(pthread_rwlock_unlock); 2717 2718 TSAN_INTERCEPT(pthread_barrier_init); 2719 TSAN_INTERCEPT(pthread_barrier_destroy); 2720 TSAN_INTERCEPT(pthread_barrier_wait); 2721 2722 TSAN_INTERCEPT(pthread_once); 2723 2724 TSAN_INTERCEPT(fstat); 2725 TSAN_MAYBE_INTERCEPT___FXSTAT; 2726 TSAN_MAYBE_INTERCEPT_FSTAT64; 2727 TSAN_MAYBE_INTERCEPT___FXSTAT64; 2728 TSAN_INTERCEPT(open); 2729 TSAN_MAYBE_INTERCEPT_OPEN64; 2730 TSAN_INTERCEPT(creat); 2731 TSAN_MAYBE_INTERCEPT_CREAT64; 2732 TSAN_INTERCEPT(dup); 2733 TSAN_INTERCEPT(dup2); 2734 TSAN_INTERCEPT(dup3); 2735 TSAN_MAYBE_INTERCEPT_EVENTFD; 2736 TSAN_MAYBE_INTERCEPT_SIGNALFD; 2737 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT; 2738 TSAN_MAYBE_INTERCEPT_INOTIFY_INIT1; 2739 TSAN_INTERCEPT(socket); 2740 TSAN_INTERCEPT(socketpair); 2741 TSAN_INTERCEPT(connect); 2742 TSAN_INTERCEPT(bind); 2743 TSAN_INTERCEPT(listen); 2744 TSAN_MAYBE_INTERCEPT_EPOLL; 2745 TSAN_INTERCEPT(close); 2746 TSAN_MAYBE_INTERCEPT___CLOSE; 2747 TSAN_MAYBE_INTERCEPT___RES_ICLOSE; 2748 TSAN_INTERCEPT(pipe); 2749 TSAN_INTERCEPT(pipe2); 2750 2751 TSAN_INTERCEPT(unlink); 2752 TSAN_INTERCEPT(tmpfile); 2753 TSAN_MAYBE_INTERCEPT_TMPFILE64; 2754 TSAN_INTERCEPT(abort); 2755 TSAN_INTERCEPT(rmdir); 2756 TSAN_INTERCEPT(closedir); 2757 2758 TSAN_INTERCEPT(sigsuspend); 2759 TSAN_INTERCEPT(sigblock); 2760 TSAN_INTERCEPT(sigsetmask); 2761 TSAN_INTERCEPT(pthread_sigmask); 2762 TSAN_INTERCEPT(raise); 2763 TSAN_INTERCEPT(kill); 2764 TSAN_INTERCEPT(pthread_kill); 2765 TSAN_INTERCEPT(sleep); 2766 TSAN_INTERCEPT(usleep); 2767 TSAN_INTERCEPT(nanosleep); 2768 TSAN_INTERCEPT(pause); 2769 TSAN_INTERCEPT(gettimeofday); 2770 TSAN_INTERCEPT(getaddrinfo); 2771 2772 TSAN_INTERCEPT(fork); 2773 TSAN_INTERCEPT(vfork); 2774 #if !SANITIZER_ANDROID 2775 TSAN_INTERCEPT(dl_iterate_phdr); 2776 #endif 2777 TSAN_MAYBE_INTERCEPT_ON_EXIT; 2778 TSAN_INTERCEPT(__cxa_atexit); 2779 TSAN_INTERCEPT(_exit); 2780 2781 #ifdef NEED_TLS_GET_ADDR 2782 TSAN_INTERCEPT(__tls_get_addr); 2783 #endif 2784 2785 TSAN_MAYBE_INTERCEPT__LWP_EXIT; 2786 TSAN_MAYBE_INTERCEPT_THR_EXIT; 2787 2788 #if !SANITIZER_MAC && !SANITIZER_ANDROID 2789 // Need to setup it, because interceptors check that the function is resolved. 2790 // But atexit is emitted directly into the module, so can't be resolved. 2791 REAL(atexit) = (int(*)(void(*)()))unreachable; 2792 #endif 2793 2794 if (REAL(__cxa_atexit)(&finalize, 0, 0)) { 2795 Printf("ThreadSanitizer: failed to setup atexit callback\n"); 2796 Die(); 2797 } 2798 2799 #if !SANITIZER_MAC && !SANITIZER_NETBSD && !SANITIZER_FREEBSD 2800 if (pthread_key_create(&interceptor_ctx()->finalize_key, &thread_finalize)) { 2801 Printf("ThreadSanitizer: failed to create thread key\n"); 2802 Die(); 2803 } 2804 #endif 2805 2806 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_init); 2807 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_signal); 2808 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_broadcast); 2809 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_wait); 2810 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(cond_destroy); 2811 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_init); 2812 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_destroy); 2813 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(mutex_trylock); 2814 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_init); 2815 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_destroy); 2816 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_rdlock); 2817 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_tryrdlock); 2818 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_wrlock); 2819 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_trywrlock); 2820 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS(rwlock_unlock); 2821 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(once); 2822 TSAN_MAYBE_INTERCEPT_NETBSD_ALIAS_THR(sigsetmask); 2823 2824 FdInit(); 2825 } 2826 2827 } // namespace __tsan 2828 2829 // Invisible barrier for tests. 2830 // There were several unsuccessful iterations for this functionality: 2831 // 1. Initially it was implemented in user code using 2832 // REAL(pthread_barrier_wait). But pthread_barrier_wait is not supported on 2833 // MacOS. Futexes are linux-specific for this matter. 2834 // 2. Then we switched to atomics+usleep(10). But usleep produced parasitic 2835 // "as-if synchronized via sleep" messages in reports which failed some 2836 // output tests. 2837 // 3. Then we switched to atomics+sched_yield. But this produced tons of tsan- 2838 // visible events, which lead to "failed to restore stack trace" failures. 2839 // Note that no_sanitize_thread attribute does not turn off atomic interception 2840 // so attaching it to the function defined in user code does not help. 2841 // That's why we now have what we have. 2842 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 2843 void __tsan_testonly_barrier_init(u64 *barrier, u32 count) { 2844 if (count >= (1 << 8)) { 2845 Printf("barrier_init: count is too large (%d)\n", count); 2846 Die(); 2847 } 2848 // 8 lsb is thread count, the remaining are count of entered threads. 2849 *barrier = count; 2850 } 2851 2852 extern "C" SANITIZER_INTERFACE_ATTRIBUTE 2853 void __tsan_testonly_barrier_wait(u64 *barrier) { 2854 unsigned old = __atomic_fetch_add(barrier, 1 << 8, __ATOMIC_RELAXED); 2855 unsigned old_epoch = (old >> 8) / (old & 0xff); 2856 for (;;) { 2857 unsigned cur = __atomic_load_n(barrier, __ATOMIC_RELAXED); 2858 unsigned cur_epoch = (cur >> 8) / (cur & 0xff); 2859 if (cur_epoch != old_epoch) 2860 return; 2861 internal_sched_yield(); 2862 } 2863 } 2864