Home | History | Annotate | Line # | Download | only in asan
      1  1.1  kamil //===-- asan_thread.cc ----------------------------------------------------===//
      2  1.1  kamil //
      3  1.1  kamil //                     The LLVM Compiler Infrastructure
      4  1.1  kamil //
      5  1.1  kamil // This file is distributed under the University of Illinois Open Source
      6  1.1  kamil // License. See LICENSE.TXT for details.
      7  1.1  kamil //
      8  1.1  kamil //===----------------------------------------------------------------------===//
      9  1.1  kamil //
     10  1.1  kamil // This file is a part of AddressSanitizer, an address sanity checker.
     11  1.1  kamil //
     12  1.1  kamil // Thread-related code.
     13  1.1  kamil //===----------------------------------------------------------------------===//
     14  1.1  kamil #include "asan_allocator.h"
     15  1.1  kamil #include "asan_interceptors.h"
     16  1.1  kamil #include "asan_poisoning.h"
     17  1.1  kamil #include "asan_stack.h"
     18  1.1  kamil #include "asan_thread.h"
     19  1.1  kamil #include "asan_mapping.h"
     20  1.1  kamil #include "sanitizer_common/sanitizer_common.h"
     21  1.1  kamil #include "sanitizer_common/sanitizer_placement_new.h"
     22  1.1  kamil #include "sanitizer_common/sanitizer_stackdepot.h"
     23  1.1  kamil #include "sanitizer_common/sanitizer_tls_get_addr.h"
     24  1.1  kamil #include "lsan/lsan_common.h"
     25  1.1  kamil 
     26  1.1  kamil namespace __asan {
     27  1.1  kamil 
     28  1.1  kamil // AsanThreadContext implementation.
     29  1.1  kamil 
     30  1.1  kamil void AsanThreadContext::OnCreated(void *arg) {
     31  1.1  kamil   CreateThreadContextArgs *args = static_cast<CreateThreadContextArgs*>(arg);
     32  1.1  kamil   if (args->stack)
     33  1.1  kamil     stack_id = StackDepotPut(*args->stack);
     34  1.1  kamil   thread = args->thread;
     35  1.1  kamil   thread->set_context(this);
     36  1.1  kamil }
     37  1.1  kamil 
     38  1.1  kamil void AsanThreadContext::OnFinished() {
     39  1.1  kamil   // Drop the link to the AsanThread object.
     40  1.1  kamil   thread = nullptr;
     41  1.1  kamil }
     42  1.1  kamil 
     43  1.1  kamil // MIPS requires aligned address
     44  1.1  kamil static ALIGNED(16) char thread_registry_placeholder[sizeof(ThreadRegistry)];
     45  1.1  kamil static ThreadRegistry *asan_thread_registry;
     46  1.1  kamil 
     47  1.1  kamil static BlockingMutex mu_for_thread_context(LINKER_INITIALIZED);
     48  1.1  kamil static LowLevelAllocator allocator_for_thread_context;
     49  1.1  kamil 
     50  1.1  kamil static ThreadContextBase *GetAsanThreadContext(u32 tid) {
     51  1.1  kamil   BlockingMutexLock lock(&mu_for_thread_context);
     52  1.1  kamil   return new(allocator_for_thread_context) AsanThreadContext(tid);
     53  1.1  kamil }
     54  1.1  kamil 
     55  1.1  kamil ThreadRegistry &asanThreadRegistry() {
     56  1.1  kamil   static bool initialized;
     57  1.1  kamil   // Don't worry about thread_safety - this should be called when there is
     58  1.1  kamil   // a single thread.
     59  1.1  kamil   if (!initialized) {
     60  1.1  kamil     // Never reuse ASan threads: we store pointer to AsanThreadContext
     61  1.1  kamil     // in TSD and can't reliably tell when no more TSD destructors will
     62  1.1  kamil     // be called. It would be wrong to reuse AsanThreadContext for another
     63  1.1  kamil     // thread before all TSD destructors will be called for it.
     64  1.1  kamil     asan_thread_registry = new(thread_registry_placeholder) ThreadRegistry(
     65  1.1  kamil         GetAsanThreadContext, kMaxNumberOfThreads, kMaxNumberOfThreads);
     66  1.1  kamil     initialized = true;
     67  1.1  kamil   }
     68  1.1  kamil   return *asan_thread_registry;
     69  1.1  kamil }
     70  1.1  kamil 
     71  1.1  kamil AsanThreadContext *GetThreadContextByTidLocked(u32 tid) {
     72  1.1  kamil   return static_cast<AsanThreadContext *>(
     73  1.1  kamil       asanThreadRegistry().GetThreadLocked(tid));
     74  1.1  kamil }
     75  1.1  kamil 
     76  1.1  kamil // AsanThread implementation.
     77  1.1  kamil 
     78  1.1  kamil AsanThread *AsanThread::Create(thread_callback_t start_routine, void *arg,
     79  1.1  kamil                                u32 parent_tid, StackTrace *stack,
     80  1.1  kamil                                bool detached) {
     81  1.1  kamil   uptr PageSize = GetPageSizeCached();
     82  1.1  kamil   uptr size = RoundUpTo(sizeof(AsanThread), PageSize);
     83  1.1  kamil   AsanThread *thread = (AsanThread*)MmapOrDie(size, __func__);
     84  1.1  kamil   thread->start_routine_ = start_routine;
     85  1.1  kamil   thread->arg_ = arg;
     86  1.1  kamil   AsanThreadContext::CreateThreadContextArgs args = {thread, stack};
     87  1.1  kamil   asanThreadRegistry().CreateThread(*reinterpret_cast<uptr *>(thread), detached,
     88  1.1  kamil                                     parent_tid, &args);
     89  1.1  kamil 
     90  1.1  kamil   return thread;
     91  1.1  kamil }
     92  1.1  kamil 
     93  1.1  kamil void AsanThread::TSDDtor(void *tsd) {
     94  1.1  kamil   AsanThreadContext *context = (AsanThreadContext*)tsd;
     95  1.1  kamil   VReport(1, "T%d TSDDtor\n", context->tid);
     96  1.1  kamil   if (context->thread)
     97  1.1  kamil     context->thread->Destroy();
     98  1.1  kamil }
     99  1.1  kamil 
    100  1.1  kamil void AsanThread::Destroy() {
    101  1.1  kamil   int tid = this->tid();
    102  1.1  kamil   VReport(1, "T%d exited\n", tid);
    103  1.1  kamil 
    104  1.1  kamil   malloc_storage().CommitBack();
    105  1.1  kamil   if (common_flags()->use_sigaltstack) UnsetAlternateSignalStack();
    106  1.1  kamil   asanThreadRegistry().FinishThread(tid);
    107  1.1  kamil   FlushToDeadThreadStats(&stats_);
    108  1.1  kamil   // We also clear the shadow on thread destruction because
    109  1.1  kamil   // some code may still be executing in later TSD destructors
    110  1.1  kamil   // and we don't want it to have any poisoned stack.
    111  1.1  kamil   ClearShadowForThreadStackAndTLS();
    112  1.1  kamil   DeleteFakeStack(tid);
    113  1.1  kamil   uptr size = RoundUpTo(sizeof(AsanThread), GetPageSizeCached());
    114  1.1  kamil   UnmapOrDie(this, size);
    115  1.1  kamil   DTLS_Destroy();
    116  1.1  kamil }
    117  1.1  kamil 
    118  1.1  kamil void AsanThread::StartSwitchFiber(FakeStack **fake_stack_save, uptr bottom,
    119  1.1  kamil                                   uptr size) {
    120  1.1  kamil   if (atomic_load(&stack_switching_, memory_order_relaxed)) {
    121  1.1  kamil     Report("ERROR: starting fiber switch while in fiber switch\n");
    122  1.1  kamil     Die();
    123  1.1  kamil   }
    124  1.1  kamil 
    125  1.1  kamil   next_stack_bottom_ = bottom;
    126  1.1  kamil   next_stack_top_ = bottom + size;
    127  1.1  kamil   atomic_store(&stack_switching_, 1, memory_order_release);
    128  1.1  kamil 
    129  1.1  kamil   FakeStack *current_fake_stack = fake_stack_;
    130  1.1  kamil   if (fake_stack_save)
    131  1.1  kamil     *fake_stack_save = fake_stack_;
    132  1.1  kamil   fake_stack_ = nullptr;
    133  1.1  kamil   SetTLSFakeStack(nullptr);
    134  1.1  kamil   // if fake_stack_save is null, the fiber will die, delete the fakestack
    135  1.1  kamil   if (!fake_stack_save && current_fake_stack)
    136  1.1  kamil     current_fake_stack->Destroy(this->tid());
    137  1.1  kamil }
    138  1.1  kamil 
    139  1.1  kamil void AsanThread::FinishSwitchFiber(FakeStack *fake_stack_save,
    140  1.1  kamil                                    uptr *bottom_old,
    141  1.1  kamil                                    uptr *size_old) {
    142  1.1  kamil   if (!atomic_load(&stack_switching_, memory_order_relaxed)) {
    143  1.1  kamil     Report("ERROR: finishing a fiber switch that has not started\n");
    144  1.1  kamil     Die();
    145  1.1  kamil   }
    146  1.1  kamil 
    147  1.1  kamil   if (fake_stack_save) {
    148  1.1  kamil     SetTLSFakeStack(fake_stack_save);
    149  1.1  kamil     fake_stack_ = fake_stack_save;
    150  1.1  kamil   }
    151  1.1  kamil 
    152  1.1  kamil   if (bottom_old)
    153  1.1  kamil     *bottom_old = stack_bottom_;
    154  1.1  kamil   if (size_old)
    155  1.1  kamil     *size_old = stack_top_ - stack_bottom_;
    156  1.1  kamil   stack_bottom_ = next_stack_bottom_;
    157  1.1  kamil   stack_top_ = next_stack_top_;
    158  1.1  kamil   atomic_store(&stack_switching_, 0, memory_order_release);
    159  1.1  kamil   next_stack_top_ = 0;
    160  1.1  kamil   next_stack_bottom_ = 0;
    161  1.1  kamil }
    162  1.1  kamil 
    163  1.1  kamil inline AsanThread::StackBounds AsanThread::GetStackBounds() const {
    164  1.1  kamil   if (!atomic_load(&stack_switching_, memory_order_acquire)) {
    165  1.1  kamil     // Make sure the stack bounds are fully initialized.
    166  1.1  kamil     if (stack_bottom_ >= stack_top_) return {0, 0};
    167  1.1  kamil     return {stack_bottom_, stack_top_};
    168  1.1  kamil   }
    169  1.1  kamil   char local;
    170  1.1  kamil   const uptr cur_stack = (uptr)&local;
    171  1.1  kamil   // Note: need to check next stack first, because FinishSwitchFiber
    172  1.1  kamil   // may be in process of overwriting stack_top_/bottom_. But in such case
    173  1.1  kamil   // we are already on the next stack.
    174  1.1  kamil   if (cur_stack >= next_stack_bottom_ && cur_stack < next_stack_top_)
    175  1.1  kamil     return {next_stack_bottom_, next_stack_top_};
    176  1.1  kamil   return {stack_bottom_, stack_top_};
    177  1.1  kamil }
    178  1.1  kamil 
    179  1.1  kamil uptr AsanThread::stack_top() {
    180  1.1  kamil   return GetStackBounds().top;
    181  1.1  kamil }
    182  1.1  kamil 
    183  1.1  kamil uptr AsanThread::stack_bottom() {
    184  1.1  kamil   return GetStackBounds().bottom;
    185  1.1  kamil }
    186  1.1  kamil 
    187  1.1  kamil uptr AsanThread::stack_size() {
    188  1.1  kamil   const auto bounds = GetStackBounds();
    189  1.1  kamil   return bounds.top - bounds.bottom;
    190  1.1  kamil }
    191  1.1  kamil 
    192  1.1  kamil // We want to create the FakeStack lazyly on the first use, but not eralier
    193  1.1  kamil // than the stack size is known and the procedure has to be async-signal safe.
    194  1.1  kamil FakeStack *AsanThread::AsyncSignalSafeLazyInitFakeStack() {
    195  1.1  kamil   uptr stack_size = this->stack_size();
    196  1.1  kamil   if (stack_size == 0)  // stack_size is not yet available, don't use FakeStack.
    197  1.1  kamil     return nullptr;
    198  1.1  kamil   uptr old_val = 0;
    199  1.1  kamil   // fake_stack_ has 3 states:
    200  1.1  kamil   // 0   -- not initialized
    201  1.1  kamil   // 1   -- being initialized
    202  1.1  kamil   // ptr -- initialized
    203  1.1  kamil   // This CAS checks if the state was 0 and if so changes it to state 1,
    204  1.1  kamil   // if that was successful, it initializes the pointer.
    205  1.1  kamil   if (atomic_compare_exchange_strong(
    206  1.1  kamil       reinterpret_cast<atomic_uintptr_t *>(&fake_stack_), &old_val, 1UL,
    207  1.1  kamil       memory_order_relaxed)) {
    208  1.1  kamil     uptr stack_size_log = Log2(RoundUpToPowerOfTwo(stack_size));
    209  1.1  kamil     CHECK_LE(flags()->min_uar_stack_size_log, flags()->max_uar_stack_size_log);
    210  1.1  kamil     stack_size_log =
    211  1.1  kamil         Min(stack_size_log, static_cast<uptr>(flags()->max_uar_stack_size_log));
    212  1.1  kamil     stack_size_log =
    213  1.1  kamil         Max(stack_size_log, static_cast<uptr>(flags()->min_uar_stack_size_log));
    214  1.1  kamil     fake_stack_ = FakeStack::Create(stack_size_log);
    215  1.1  kamil     SetTLSFakeStack(fake_stack_);
    216  1.1  kamil     return fake_stack_;
    217  1.1  kamil   }
    218  1.1  kamil   return nullptr;
    219  1.1  kamil }
    220  1.1  kamil 
    221  1.1  kamil void AsanThread::Init(const InitOptions *options) {
    222  1.1  kamil   next_stack_top_ = next_stack_bottom_ = 0;
    223  1.1  kamil   atomic_store(&stack_switching_, false, memory_order_release);
    224  1.1  kamil   CHECK_EQ(this->stack_size(), 0U);
    225  1.1  kamil   SetThreadStackAndTls(options);
    226  1.1  kamil   if (stack_top_ != stack_bottom_) {
    227  1.1  kamil     CHECK_GT(this->stack_size(), 0U);
    228  1.1  kamil     CHECK(AddrIsInMem(stack_bottom_));
    229  1.1  kamil     CHECK(AddrIsInMem(stack_top_ - 1));
    230  1.1  kamil   }
    231  1.1  kamil   ClearShadowForThreadStackAndTLS();
    232  1.1  kamil   fake_stack_ = nullptr;
    233  1.1  kamil   if (__asan_option_detect_stack_use_after_return)
    234  1.1  kamil     AsyncSignalSafeLazyInitFakeStack();
    235  1.1  kamil   int local = 0;
    236  1.1  kamil   VReport(1, "T%d: stack [%p,%p) size 0x%zx; local=%p\n", tid(),
    237  1.1  kamil           (void *)stack_bottom_, (void *)stack_top_, stack_top_ - stack_bottom_,
    238  1.1  kamil           &local);
    239  1.1  kamil }
    240  1.1  kamil 
    241  1.1  kamil // Fuchsia and RTEMS don't use ThreadStart.
    242  1.1  kamil // asan_fuchsia.c/asan_rtems.c define CreateMainThread and
    243  1.1  kamil // SetThreadStackAndTls.
    244  1.1  kamil #if !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
    245  1.1  kamil 
    246  1.1  kamil thread_return_t AsanThread::ThreadStart(
    247  1.1  kamil     tid_t os_id, atomic_uintptr_t *signal_thread_is_registered) {
    248  1.1  kamil   Init();
    249  1.1  kamil   asanThreadRegistry().StartThread(tid(), os_id, /*workerthread*/ false,
    250  1.1  kamil                                    nullptr);
    251  1.1  kamil   if (signal_thread_is_registered)
    252  1.1  kamil     atomic_store(signal_thread_is_registered, 1, memory_order_release);
    253  1.1  kamil 
    254  1.1  kamil   if (common_flags()->use_sigaltstack) SetAlternateSignalStack();
    255  1.1  kamil 
    256  1.1  kamil   if (!start_routine_) {
    257  1.1  kamil     // start_routine_ == 0 if we're on the main thread or on one of the
    258  1.1  kamil     // OS X libdispatch worker threads. But nobody is supposed to call
    259  1.1  kamil     // ThreadStart() for the worker threads.
    260  1.1  kamil     CHECK_EQ(tid(), 0);
    261  1.1  kamil     return 0;
    262  1.1  kamil   }
    263  1.1  kamil 
    264  1.1  kamil   thread_return_t res = start_routine_(arg_);
    265  1.1  kamil 
    266  1.1  kamil   // On POSIX systems we defer this to the TSD destructor. LSan will consider
    267  1.1  kamil   // the thread's memory as non-live from the moment we call Destroy(), even
    268  1.1  kamil   // though that memory might contain pointers to heap objects which will be
    269  1.1  kamil   // cleaned up by a user-defined TSD destructor. Thus, calling Destroy() before
    270  1.1  kamil   // the TSD destructors have run might cause false positives in LSan.
    271  1.1  kamil   if (!SANITIZER_POSIX)
    272  1.1  kamil     this->Destroy();
    273  1.1  kamil 
    274  1.1  kamil   return res;
    275  1.1  kamil }
    276  1.1  kamil 
    277  1.1  kamil AsanThread *CreateMainThread() {
    278  1.1  kamil   AsanThread *main_thread = AsanThread::Create(
    279  1.1  kamil       /* start_routine */ nullptr, /* arg */ nullptr, /* parent_tid */ 0,
    280  1.1  kamil       /* stack */ nullptr, /* detached */ true);
    281  1.1  kamil   SetCurrentThread(main_thread);
    282  1.1  kamil   main_thread->ThreadStart(internal_getpid(),
    283  1.1  kamil                            /* signal_thread_is_registered */ nullptr);
    284  1.1  kamil   return main_thread;
    285  1.1  kamil }
    286  1.1  kamil 
    287  1.1  kamil // This implementation doesn't use the argument, which is just passed down
    288  1.1  kamil // from the caller of Init (which see, above).  It's only there to support
    289  1.1  kamil // OS-specific implementations that need more information passed through.
    290  1.1  kamil void AsanThread::SetThreadStackAndTls(const InitOptions *options) {
    291  1.1  kamil   DCHECK_EQ(options, nullptr);
    292  1.1  kamil   uptr tls_size = 0;
    293  1.1  kamil   uptr stack_size = 0;
    294  1.1  kamil   GetThreadStackAndTls(tid() == 0, &stack_bottom_, &stack_size, &tls_begin_,
    295  1.1  kamil                        &tls_size);
    296  1.1  kamil   stack_top_ = stack_bottom_ + stack_size;
    297  1.1  kamil   tls_end_ = tls_begin_ + tls_size;
    298  1.1  kamil   dtls_ = DTLS_Get();
    299  1.1  kamil 
    300  1.1  kamil   if (stack_top_ != stack_bottom_) {
    301  1.1  kamil     int local;
    302  1.1  kamil     CHECK(AddrIsInStack((uptr)&local));
    303  1.1  kamil   }
    304  1.1  kamil }
    305  1.1  kamil 
    306  1.1  kamil #endif  // !SANITIZER_FUCHSIA && !SANITIZER_RTEMS
    307  1.1  kamil 
    308  1.1  kamil void AsanThread::ClearShadowForThreadStackAndTLS() {
    309  1.1  kamil   if (stack_top_ != stack_bottom_)
    310  1.1  kamil     PoisonShadow(stack_bottom_, stack_top_ - stack_bottom_, 0);
    311  1.1  kamil   if (tls_begin_ != tls_end_) {
    312  1.1  kamil     uptr tls_begin_aligned = RoundDownTo(tls_begin_, SHADOW_GRANULARITY);
    313  1.1  kamil     uptr tls_end_aligned = RoundUpTo(tls_end_, SHADOW_GRANULARITY);
    314  1.1  kamil     FastPoisonShadowPartialRightRedzone(tls_begin_aligned,
    315  1.1  kamil                                         tls_end_ - tls_begin_aligned,
    316  1.1  kamil                                         tls_end_aligned - tls_end_, 0);
    317  1.1  kamil   }
    318  1.1  kamil }
    319  1.1  kamil 
    320  1.1  kamil bool AsanThread::GetStackFrameAccessByAddr(uptr addr,
    321  1.1  kamil                                            StackFrameAccess *access) {
    322  1.1  kamil   if (stack_top_ == stack_bottom_)
    323  1.1  kamil     return false;
    324  1.1  kamil 
    325  1.1  kamil   uptr bottom = 0;
    326  1.1  kamil   if (AddrIsInStack(addr)) {
    327  1.1  kamil     bottom = stack_bottom();
    328  1.1  kamil   } else if (has_fake_stack()) {
    329  1.1  kamil     bottom = fake_stack()->AddrIsInFakeStack(addr);
    330  1.1  kamil     CHECK(bottom);
    331  1.1  kamil     access->offset = addr - bottom;
    332  1.1  kamil     access->frame_pc = ((uptr*)bottom)[2];
    333  1.1  kamil     access->frame_descr = (const char *)((uptr*)bottom)[1];
    334  1.1  kamil     return true;
    335  1.1  kamil   }
    336  1.1  kamil   uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8);  // align addr.
    337  1.1  kamil   uptr mem_ptr = RoundDownTo(aligned_addr, SHADOW_GRANULARITY);
    338  1.1  kamil   u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
    339  1.1  kamil   u8 *shadow_bottom = (u8*)MemToShadow(bottom);
    340  1.1  kamil 
    341  1.1  kamil   while (shadow_ptr >= shadow_bottom &&
    342  1.1  kamil          *shadow_ptr != kAsanStackLeftRedzoneMagic) {
    343  1.1  kamil     shadow_ptr--;
    344  1.1  kamil     mem_ptr -= SHADOW_GRANULARITY;
    345  1.1  kamil   }
    346  1.1  kamil 
    347  1.1  kamil   while (shadow_ptr >= shadow_bottom &&
    348  1.1  kamil          *shadow_ptr == kAsanStackLeftRedzoneMagic) {
    349  1.1  kamil     shadow_ptr--;
    350  1.1  kamil     mem_ptr -= SHADOW_GRANULARITY;
    351  1.1  kamil   }
    352  1.1  kamil 
    353  1.1  kamil   if (shadow_ptr < shadow_bottom) {
    354  1.1  kamil     return false;
    355  1.1  kamil   }
    356  1.1  kamil 
    357  1.1  kamil   uptr* ptr = (uptr*)(mem_ptr + SHADOW_GRANULARITY);
    358  1.1  kamil   CHECK(ptr[0] == kCurrentStackFrameMagic);
    359  1.1  kamil   access->offset = addr - (uptr)ptr;
    360  1.1  kamil   access->frame_pc = ptr[2];
    361  1.1  kamil   access->frame_descr = (const char*)ptr[1];
    362  1.1  kamil   return true;
    363  1.1  kamil }
    364  1.1  kamil 
    365  1.1  kamil uptr AsanThread::GetStackVariableShadowStart(uptr addr) {
    366  1.1  kamil   uptr bottom = 0;
    367  1.1  kamil   if (AddrIsInStack(addr)) {
    368  1.1  kamil     bottom = stack_bottom();
    369  1.1  kamil   } else if (has_fake_stack()) {
    370  1.1  kamil     bottom = fake_stack()->AddrIsInFakeStack(addr);
    371  1.1  kamil     CHECK(bottom);
    372  1.1  kamil   } else
    373  1.1  kamil     return 0;
    374  1.1  kamil 
    375  1.1  kamil   uptr aligned_addr = RoundDownTo(addr, SANITIZER_WORDSIZE / 8);  // align addr.
    376  1.1  kamil   u8 *shadow_ptr = (u8*)MemToShadow(aligned_addr);
    377  1.1  kamil   u8 *shadow_bottom = (u8*)MemToShadow(bottom);
    378  1.1  kamil 
    379  1.1  kamil   while (shadow_ptr >= shadow_bottom &&
    380  1.1  kamil          (*shadow_ptr != kAsanStackLeftRedzoneMagic &&
    381  1.1  kamil           *shadow_ptr != kAsanStackMidRedzoneMagic &&
    382  1.1  kamil           *shadow_ptr != kAsanStackRightRedzoneMagic))
    383  1.1  kamil     shadow_ptr--;
    384  1.1  kamil 
    385  1.1  kamil   return (uptr)shadow_ptr + 1;
    386  1.1  kamil }
    387  1.1  kamil 
    388  1.1  kamil bool AsanThread::AddrIsInStack(uptr addr) {
    389  1.1  kamil   const auto bounds = GetStackBounds();
    390  1.1  kamil   return addr >= bounds.bottom && addr < bounds.top;
    391  1.1  kamil }
    392  1.1  kamil 
    393  1.1  kamil static bool ThreadStackContainsAddress(ThreadContextBase *tctx_base,
    394  1.1  kamil                                        void *addr) {
    395  1.1  kamil   AsanThreadContext *tctx = static_cast<AsanThreadContext*>(tctx_base);
    396  1.1  kamil   AsanThread *t = tctx->thread;
    397  1.1  kamil   if (!t) return false;
    398  1.1  kamil   if (t->AddrIsInStack((uptr)addr)) return true;
    399  1.1  kamil   if (t->has_fake_stack() && t->fake_stack()->AddrIsInFakeStack((uptr)addr))
    400  1.1  kamil     return true;
    401  1.1  kamil   return false;
    402  1.1  kamil }
    403  1.1  kamil 
    404  1.1  kamil AsanThread *GetCurrentThread() {
    405  1.1  kamil   if (SANITIZER_RTEMS && !asan_inited)
    406  1.1  kamil     return nullptr;
    407  1.1  kamil 
    408  1.1  kamil   AsanThreadContext *context =
    409  1.1  kamil       reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
    410  1.1  kamil   if (!context) {
    411  1.1  kamil     if (SANITIZER_ANDROID) {
    412  1.1  kamil       // On Android, libc constructor is called _after_ asan_init, and cleans up
    413  1.1  kamil       // TSD. Try to figure out if this is still the main thread by the stack
    414  1.1  kamil       // address. We are not entirely sure that we have correct main thread
    415  1.1  kamil       // limits, so only do this magic on Android, and only if the found thread
    416  1.1  kamil       // is the main thread.
    417  1.1  kamil       AsanThreadContext *tctx = GetThreadContextByTidLocked(0);
    418  1.1  kamil       if (tctx && ThreadStackContainsAddress(tctx, &context)) {
    419  1.1  kamil         SetCurrentThread(tctx->thread);
    420  1.1  kamil         return tctx->thread;
    421  1.1  kamil       }
    422  1.1  kamil     }
    423  1.1  kamil     return nullptr;
    424  1.1  kamil   }
    425  1.1  kamil   return context->thread;
    426  1.1  kamil }
    427  1.1  kamil 
    428  1.1  kamil void SetCurrentThread(AsanThread *t) {
    429  1.1  kamil   CHECK(t->context());
    430  1.1  kamil   VReport(2, "SetCurrentThread: %p for thread %p\n", t->context(),
    431  1.1  kamil           (void *)GetThreadSelf());
    432  1.1  kamil   // Make sure we do not reset the current AsanThread.
    433  1.1  kamil   CHECK_EQ(0, AsanTSDGet());
    434  1.1  kamil   AsanTSDSet(t->context());
    435  1.1  kamil   CHECK_EQ(t->context(), AsanTSDGet());
    436  1.1  kamil }
    437  1.1  kamil 
    438  1.1  kamil u32 GetCurrentTidOrInvalid() {
    439  1.1  kamil   AsanThread *t = GetCurrentThread();
    440  1.1  kamil   return t ? t->tid() : kInvalidTid;
    441  1.1  kamil }
    442  1.1  kamil 
    443  1.1  kamil AsanThread *FindThreadByStackAddress(uptr addr) {
    444  1.1  kamil   asanThreadRegistry().CheckLocked();
    445  1.1  kamil   AsanThreadContext *tctx = static_cast<AsanThreadContext *>(
    446  1.1  kamil       asanThreadRegistry().FindThreadContextLocked(ThreadStackContainsAddress,
    447  1.1  kamil                                                    (void *)addr));
    448  1.1  kamil   return tctx ? tctx->thread : nullptr;
    449  1.1  kamil }
    450  1.1  kamil 
    451  1.1  kamil void EnsureMainThreadIDIsCorrect() {
    452  1.1  kamil   AsanThreadContext *context =
    453  1.1  kamil       reinterpret_cast<AsanThreadContext *>(AsanTSDGet());
    454  1.1  kamil   if (context && (context->tid == 0))
    455  1.1  kamil     context->os_id = GetTid();
    456  1.1  kamil }
    457  1.1  kamil 
    458  1.1  kamil __asan::AsanThread *GetAsanThreadByOsIDLocked(tid_t os_id) {
    459  1.1  kamil   __asan::AsanThreadContext *context = static_cast<__asan::AsanThreadContext *>(
    460  1.1  kamil       __asan::asanThreadRegistry().FindThreadContextByOsIDLocked(os_id));
    461  1.1  kamil   if (!context) return nullptr;
    462  1.1  kamil   return context->thread;
    463  1.1  kamil }
    464  1.1  kamil } // namespace __asan
    465  1.1  kamil 
    466  1.1  kamil // --- Implementation of LSan-specific functions --- {{{1
    467  1.1  kamil namespace __lsan {
    468  1.1  kamil bool GetThreadRangesLocked(tid_t os_id, uptr *stack_begin, uptr *stack_end,
    469  1.1  kamil                            uptr *tls_begin, uptr *tls_end, uptr *cache_begin,
    470  1.1  kamil                            uptr *cache_end, DTLS **dtls) {
    471  1.1  kamil   __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
    472  1.1  kamil   if (!t) return false;
    473  1.1  kamil   *stack_begin = t->stack_bottom();
    474  1.1  kamil   *stack_end = t->stack_top();
    475  1.1  kamil   *tls_begin = t->tls_begin();
    476  1.1  kamil   *tls_end = t->tls_end();
    477  1.1  kamil   // ASan doesn't keep allocator caches in TLS, so these are unused.
    478  1.1  kamil   *cache_begin = 0;
    479  1.1  kamil   *cache_end = 0;
    480  1.1  kamil   *dtls = t->dtls();
    481  1.1  kamil   return true;
    482  1.1  kamil }
    483  1.1  kamil 
    484  1.1  kamil void ForEachExtraStackRange(tid_t os_id, RangeIteratorCallback callback,
    485  1.1  kamil                             void *arg) {
    486  1.1  kamil   __asan::AsanThread *t = __asan::GetAsanThreadByOsIDLocked(os_id);
    487  1.1  kamil   if (t && t->has_fake_stack())
    488  1.1  kamil     t->fake_stack()->ForEachFakeFrame(callback, arg);
    489  1.1  kamil }
    490  1.1  kamil 
    491  1.1  kamil void LockThreadRegistry() {
    492  1.1  kamil   __asan::asanThreadRegistry().Lock();
    493  1.1  kamil }
    494  1.1  kamil 
    495  1.1  kamil void UnlockThreadRegistry() {
    496  1.1  kamil   __asan::asanThreadRegistry().Unlock();
    497  1.1  kamil }
    498  1.1  kamil 
    499  1.1  kamil ThreadRegistry *GetThreadRegistryLocked() {
    500  1.1  kamil   __asan::asanThreadRegistry().CheckLocked();
    501  1.1  kamil   return &__asan::asanThreadRegistry();
    502  1.1  kamil }
    503  1.1  kamil 
    504  1.1  kamil void EnsureMainThreadIDIsCorrect() {
    505  1.1  kamil   __asan::EnsureMainThreadIDIsCorrect();
    506  1.1  kamil }
    507  1.1  kamil } // namespace __lsan
    508  1.1  kamil 
    509  1.1  kamil // ---------------------- Interface ---------------- {{{1
    510  1.1  kamil using namespace __asan;  // NOLINT
    511  1.1  kamil 
    512  1.1  kamil extern "C" {
    513  1.1  kamil SANITIZER_INTERFACE_ATTRIBUTE
    514  1.1  kamil void __sanitizer_start_switch_fiber(void **fakestacksave, const void *bottom,
    515  1.1  kamil                                     uptr size) {
    516  1.1  kamil   AsanThread *t = GetCurrentThread();
    517  1.1  kamil   if (!t) {
    518  1.1  kamil     VReport(1, "__asan_start_switch_fiber called from unknown thread\n");
    519  1.1  kamil     return;
    520  1.1  kamil   }
    521  1.1  kamil   t->StartSwitchFiber((FakeStack**)fakestacksave, (uptr)bottom, size);
    522  1.1  kamil }
    523  1.1  kamil 
    524  1.1  kamil SANITIZER_INTERFACE_ATTRIBUTE
    525  1.1  kamil void __sanitizer_finish_switch_fiber(void* fakestack,
    526  1.1  kamil                                      const void **bottom_old,
    527  1.1  kamil                                      uptr *size_old) {
    528  1.1  kamil   AsanThread *t = GetCurrentThread();
    529  1.1  kamil   if (!t) {
    530  1.1  kamil     VReport(1, "__asan_finish_switch_fiber called from unknown thread\n");
    531  1.1  kamil     return;
    532  1.1  kamil   }
    533  1.1  kamil   t->FinishSwitchFiber((FakeStack*)fakestack,
    534  1.1  kamil                        (uptr*)bottom_old,
    535  1.1  kamil                        (uptr*)size_old);
    536  1.1  kamil }
    537  1.1  kamil }
    538