Home | History | Annotate | Line # | Download | only in tsan
      1  1.1  mrg //===-- tsan_rtl_thread.cpp -----------------------------------------------===//
      2  1.1  mrg //
      3  1.1  mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4  1.1  mrg // See https://llvm.org/LICENSE.txt for license information.
      5  1.1  mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6  1.1  mrg //
      7  1.1  mrg //===----------------------------------------------------------------------===//
      8  1.1  mrg //
      9  1.1  mrg // This file is a part of ThreadSanitizer (TSan), a race detector.
     10  1.1  mrg //
     11  1.1  mrg //===----------------------------------------------------------------------===//
     12  1.1  mrg 
     13  1.1  mrg #include "sanitizer_common/sanitizer_placement_new.h"
     14  1.1  mrg #include "tsan_rtl.h"
     15  1.1  mrg #include "tsan_mman.h"
     16  1.1  mrg #include "tsan_platform.h"
     17  1.1  mrg #include "tsan_report.h"
     18  1.1  mrg #include "tsan_sync.h"
     19  1.1  mrg 
     20  1.1  mrg namespace __tsan {
     21  1.1  mrg 
     22  1.1  mrg // ThreadContext implementation.
     23  1.1  mrg 
     24  1.4  mrg ThreadContext::ThreadContext(Tid tid) : ThreadContextBase(tid), thr(), sync() {}
     25  1.1  mrg 
     26  1.1  mrg #if !SANITIZER_GO
     27  1.1  mrg ThreadContext::~ThreadContext() {
     28  1.1  mrg }
     29  1.1  mrg #endif
     30  1.1  mrg 
     31  1.4  mrg void ThreadContext::OnReset() { CHECK(!sync); }
     32  1.1  mrg 
     33  1.1  mrg #if !SANITIZER_GO
     34  1.1  mrg struct ThreadLeak {
     35  1.1  mrg   ThreadContext *tctx;
     36  1.1  mrg   int count;
     37  1.1  mrg };
     38  1.1  mrg 
     39  1.3  mrg static void CollectThreadLeaks(ThreadContextBase *tctx_base, void *arg) {
     40  1.3  mrg   auto &leaks = *static_cast<Vector<ThreadLeak> *>(arg);
     41  1.3  mrg   auto *tctx = static_cast<ThreadContext *>(tctx_base);
     42  1.1  mrg   if (tctx->detached || tctx->status != ThreadStatusFinished)
     43  1.1  mrg     return;
     44  1.1  mrg   for (uptr i = 0; i < leaks.Size(); i++) {
     45  1.1  mrg     if (leaks[i].tctx->creation_stack_id == tctx->creation_stack_id) {
     46  1.1  mrg       leaks[i].count++;
     47  1.1  mrg       return;
     48  1.1  mrg     }
     49  1.1  mrg   }
     50  1.3  mrg   leaks.PushBack({tctx, 1});
     51  1.1  mrg }
     52  1.1  mrg #endif
     53  1.1  mrg 
     54  1.4  mrg // Disabled on Mac because lldb test TestTsanBasic fails:
     55  1.4  mrg // https://reviews.llvm.org/D112603#3163158
     56  1.4  mrg #if !SANITIZER_GO && !SANITIZER_APPLE
     57  1.1  mrg static void ReportIgnoresEnabled(ThreadContext *tctx, IgnoreSet *set) {
     58  1.3  mrg   if (tctx->tid == kMainTid) {
     59  1.1  mrg     Printf("ThreadSanitizer: main thread finished with ignores enabled\n");
     60  1.1  mrg   } else {
     61  1.1  mrg     Printf("ThreadSanitizer: thread T%d %s finished with ignores enabled,"
     62  1.1  mrg       " created at:\n", tctx->tid, tctx->name);
     63  1.1  mrg     PrintStack(SymbolizeStackId(tctx->creation_stack_id));
     64  1.1  mrg   }
     65  1.1  mrg   Printf("  One of the following ignores was not ended"
     66  1.1  mrg       " (in order of probability)\n");
     67  1.1  mrg   for (uptr i = 0; i < set->Size(); i++) {
     68  1.1  mrg     Printf("  Ignore was enabled at:\n");
     69  1.1  mrg     PrintStack(SymbolizeStackId(set->At(i)));
     70  1.1  mrg   }
     71  1.1  mrg   Die();
     72  1.1  mrg }
     73  1.1  mrg 
     74  1.1  mrg static void ThreadCheckIgnore(ThreadState *thr) {
     75  1.1  mrg   if (ctx->after_multithreaded_fork)
     76  1.1  mrg     return;
     77  1.1  mrg   if (thr->ignore_reads_and_writes)
     78  1.1  mrg     ReportIgnoresEnabled(thr->tctx, &thr->mop_ignore_set);
     79  1.1  mrg   if (thr->ignore_sync)
     80  1.1  mrg     ReportIgnoresEnabled(thr->tctx, &thr->sync_ignore_set);
     81  1.1  mrg }
     82  1.1  mrg #else
     83  1.1  mrg static void ThreadCheckIgnore(ThreadState *thr) {}
     84  1.1  mrg #endif
     85  1.1  mrg 
     86  1.1  mrg void ThreadFinalize(ThreadState *thr) {
     87  1.1  mrg   ThreadCheckIgnore(thr);
     88  1.1  mrg #if !SANITIZER_GO
     89  1.3  mrg   if (!ShouldReport(thr, ReportTypeThreadLeak))
     90  1.1  mrg     return;
     91  1.3  mrg   ThreadRegistryLock l(&ctx->thread_registry);
     92  1.1  mrg   Vector<ThreadLeak> leaks;
     93  1.3  mrg   ctx->thread_registry.RunCallbackForEachThreadLocked(CollectThreadLeaks,
     94  1.3  mrg                                                       &leaks);
     95  1.1  mrg   for (uptr i = 0; i < leaks.Size(); i++) {
     96  1.1  mrg     ScopedReport rep(ReportTypeThreadLeak);
     97  1.1  mrg     rep.AddThread(leaks[i].tctx, true);
     98  1.1  mrg     rep.SetCount(leaks[i].count);
     99  1.1  mrg     OutputReport(thr, rep);
    100  1.1  mrg   }
    101  1.1  mrg #endif
    102  1.1  mrg }
    103  1.1  mrg 
    104  1.1  mrg int ThreadCount(ThreadState *thr) {
    105  1.1  mrg   uptr result;
    106  1.3  mrg   ctx->thread_registry.GetNumberOfThreads(0, 0, &result);
    107  1.1  mrg   return (int)result;
    108  1.1  mrg }
    109  1.1  mrg 
    110  1.3  mrg struct OnCreatedArgs {
    111  1.4  mrg   VectorClock *sync;
    112  1.4  mrg   uptr sync_epoch;
    113  1.4  mrg   StackID stack;
    114  1.3  mrg };
    115  1.3  mrg 
    116  1.3  mrg Tid ThreadCreate(ThreadState *thr, uptr pc, uptr uid, bool detached) {
    117  1.4  mrg   // The main thread and GCD workers don't have a parent thread.
    118  1.4  mrg   Tid parent = kInvalidTid;
    119  1.4  mrg   OnCreatedArgs arg = {nullptr, 0, kInvalidStackID};
    120  1.4  mrg   if (thr) {
    121  1.4  mrg     parent = thr->tid;
    122  1.4  mrg     arg.stack = CurrentStackId(thr, pc);
    123  1.4  mrg     if (!thr->ignore_sync) {
    124  1.4  mrg       SlotLocker locker(thr);
    125  1.4  mrg       thr->clock.ReleaseStore(&arg.sync);
    126  1.4  mrg       arg.sync_epoch = ctx->global_epoch;
    127  1.4  mrg       IncrementEpoch(thr);
    128  1.4  mrg     }
    129  1.4  mrg   }
    130  1.4  mrg   Tid tid = ctx->thread_registry.CreateThread(uid, detached, parent, &arg);
    131  1.4  mrg   DPrintf("#%d: ThreadCreate tid=%d uid=%zu\n", parent, tid, uid);
    132  1.1  mrg   return tid;
    133  1.1  mrg }
    134  1.1  mrg 
    135  1.3  mrg void ThreadContext::OnCreated(void *arg) {
    136  1.3  mrg   OnCreatedArgs *args = static_cast<OnCreatedArgs *>(arg);
    137  1.4  mrg   sync = args->sync;
    138  1.4  mrg   sync_epoch = args->sync_epoch;
    139  1.4  mrg   creation_stack_id = args->stack;
    140  1.3  mrg }
    141  1.3  mrg 
    142  1.3  mrg extern "C" void __tsan_stack_initialization() {}
    143  1.3  mrg 
    144  1.3  mrg struct OnStartedArgs {
    145  1.3  mrg   ThreadState *thr;
    146  1.3  mrg   uptr stk_addr;
    147  1.3  mrg   uptr stk_size;
    148  1.3  mrg   uptr tls_addr;
    149  1.3  mrg   uptr tls_size;
    150  1.3  mrg };
    151  1.3  mrg 
    152  1.3  mrg void ThreadStart(ThreadState *thr, Tid tid, tid_t os_id,
    153  1.1  mrg                  ThreadType thread_type) {
    154  1.4  mrg   ctx->thread_registry.StartThread(tid, os_id, thread_type, thr);
    155  1.4  mrg   if (!thr->ignore_sync) {
    156  1.4  mrg     SlotAttachAndLock(thr);
    157  1.4  mrg     if (thr->tctx->sync_epoch == ctx->global_epoch)
    158  1.4  mrg       thr->clock.Acquire(thr->tctx->sync);
    159  1.4  mrg     SlotUnlock(thr);
    160  1.4  mrg   }
    161  1.4  mrg   Free(thr->tctx->sync);
    162  1.4  mrg 
    163  1.1  mrg   uptr stk_addr = 0;
    164  1.1  mrg   uptr stk_size = 0;
    165  1.1  mrg   uptr tls_addr = 0;
    166  1.1  mrg   uptr tls_size = 0;
    167  1.1  mrg #if !SANITIZER_GO
    168  1.1  mrg   if (thread_type != ThreadType::Fiber)
    169  1.3  mrg     GetThreadStackAndTls(tid == kMainTid, &stk_addr, &stk_size, &tls_addr,
    170  1.3  mrg                          &tls_size);
    171  1.1  mrg #endif
    172  1.4  mrg   thr->stk_addr = stk_addr;
    173  1.4  mrg   thr->stk_size = stk_size;
    174  1.4  mrg   thr->tls_addr = tls_addr;
    175  1.4  mrg   thr->tls_size = tls_size;
    176  1.1  mrg 
    177  1.1  mrg #if !SANITIZER_GO
    178  1.1  mrg   if (ctx->after_multithreaded_fork) {
    179  1.1  mrg     thr->ignore_interceptors++;
    180  1.1  mrg     ThreadIgnoreBegin(thr, 0);
    181  1.1  mrg     ThreadIgnoreSyncBegin(thr, 0);
    182  1.1  mrg   }
    183  1.1  mrg #endif
    184  1.3  mrg 
    185  1.3  mrg #if !SANITIZER_GO
    186  1.3  mrg   // Don't imitate stack/TLS writes for the main thread,
    187  1.3  mrg   // because its initialization is synchronized with all
    188  1.3  mrg   // subsequent threads anyway.
    189  1.3  mrg   if (tid != kMainTid) {
    190  1.3  mrg     if (stk_addr && stk_size) {
    191  1.3  mrg       const uptr pc = StackTrace::GetNextInstructionPc(
    192  1.3  mrg           reinterpret_cast<uptr>(__tsan_stack_initialization));
    193  1.3  mrg       MemoryRangeImitateWrite(thr, pc, stk_addr, stk_size);
    194  1.3  mrg     }
    195  1.3  mrg 
    196  1.3  mrg     if (tls_addr && tls_size)
    197  1.3  mrg       ImitateTlsWrite(thr, tls_addr, tls_size);
    198  1.3  mrg   }
    199  1.3  mrg #endif
    200  1.3  mrg }
    201  1.3  mrg 
    202  1.3  mrg void ThreadContext::OnStarted(void *arg) {
    203  1.4  mrg   thr = static_cast<ThreadState *>(arg);
    204  1.4  mrg   DPrintf("#%d: ThreadStart\n", tid);
    205  1.4  mrg   new (thr) ThreadState(tid);
    206  1.3  mrg   if (common_flags()->detect_deadlocks)
    207  1.4  mrg     thr->dd_lt = ctx->dd->CreateLogicalThread(tid);
    208  1.3  mrg   thr->tctx = this;
    209  1.4  mrg #if !SANITIZER_GO
    210  1.3  mrg   thr->is_inited = true;
    211  1.4  mrg #endif
    212  1.1  mrg }
    213  1.1  mrg 
    214  1.1  mrg void ThreadFinish(ThreadState *thr) {
    215  1.4  mrg   DPrintf("#%d: ThreadFinish\n", thr->tid);
    216  1.1  mrg   ThreadCheckIgnore(thr);
    217  1.1  mrg   if (thr->stk_addr && thr->stk_size)
    218  1.1  mrg     DontNeedShadowFor(thr->stk_addr, thr->stk_size);
    219  1.1  mrg   if (thr->tls_addr && thr->tls_size)
    220  1.1  mrg     DontNeedShadowFor(thr->tls_addr, thr->tls_size);
    221  1.1  mrg   thr->is_dead = true;
    222  1.4  mrg #if !SANITIZER_GO
    223  1.4  mrg   thr->is_inited = false;
    224  1.4  mrg   thr->ignore_interceptors++;
    225  1.4  mrg   PlatformCleanUpThreadState(thr);
    226  1.4  mrg #endif
    227  1.4  mrg   if (!thr->ignore_sync) {
    228  1.4  mrg     SlotLocker locker(thr);
    229  1.4  mrg     ThreadRegistryLock lock(&ctx->thread_registry);
    230  1.4  mrg     // Note: detached is protected by the thread registry mutex,
    231  1.4  mrg     // the thread may be detaching concurrently in another thread.
    232  1.4  mrg     if (!thr->tctx->detached) {
    233  1.4  mrg       thr->clock.ReleaseStore(&thr->tctx->sync);
    234  1.4  mrg       thr->tctx->sync_epoch = ctx->global_epoch;
    235  1.4  mrg       IncrementEpoch(thr);
    236  1.4  mrg     }
    237  1.4  mrg   }
    238  1.4  mrg #if !SANITIZER_GO
    239  1.4  mrg   UnmapOrDie(thr->shadow_stack, kShadowStackSize * sizeof(uptr));
    240  1.4  mrg #else
    241  1.3  mrg   Free(thr->shadow_stack);
    242  1.4  mrg #endif
    243  1.4  mrg   thr->shadow_stack = nullptr;
    244  1.3  mrg   thr->shadow_stack_pos = nullptr;
    245  1.3  mrg   thr->shadow_stack_end = nullptr;
    246  1.3  mrg   if (common_flags()->detect_deadlocks)
    247  1.3  mrg     ctx->dd->DestroyLogicalThread(thr->dd_lt);
    248  1.4  mrg   SlotDetach(thr);
    249  1.4  mrg   ctx->thread_registry.FinishThread(thr->tid);
    250  1.3  mrg   thr->~ThreadState();
    251  1.4  mrg }
    252  1.4  mrg 
    253  1.4  mrg void ThreadContext::OnFinished() {
    254  1.4  mrg   Lock lock(&ctx->slot_mtx);
    255  1.4  mrg   Lock lock1(&trace.mtx);
    256  1.4  mrg   // Queue all trace parts into the global recycle queue.
    257  1.4  mrg   auto parts = &trace.parts;
    258  1.4  mrg   while (trace.local_head) {
    259  1.4  mrg     CHECK(parts->Queued(trace.local_head));
    260  1.4  mrg     ctx->trace_part_recycle.PushBack(trace.local_head);
    261  1.4  mrg     trace.local_head = parts->Next(trace.local_head);
    262  1.4  mrg   }
    263  1.4  mrg   ctx->trace_part_recycle_finished += parts->Size();
    264  1.4  mrg   if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadHi) {
    265  1.4  mrg     ctx->trace_part_finished_excess += parts->Size();
    266  1.4  mrg     trace.parts_allocated = 0;
    267  1.4  mrg   } else if (ctx->trace_part_recycle_finished > Trace::kFinishedThreadLo &&
    268  1.4  mrg              parts->Size() > 1) {
    269  1.4  mrg     ctx->trace_part_finished_excess += parts->Size() - 1;
    270  1.4  mrg     trace.parts_allocated = 1;
    271  1.4  mrg   }
    272  1.4  mrg   // From now on replay will use trace->final_pos.
    273  1.4  mrg   trace.final_pos = (Event *)atomic_load_relaxed(&thr->trace_pos);
    274  1.4  mrg   atomic_store_relaxed(&thr->trace_pos, 0);
    275  1.4  mrg   thr->tctx = nullptr;
    276  1.4  mrg   thr = nullptr;
    277  1.1  mrg }
    278  1.1  mrg 
    279  1.3  mrg struct ConsumeThreadContext {
    280  1.3  mrg   uptr uid;
    281  1.3  mrg   ThreadContextBase *tctx;
    282  1.3  mrg };
    283  1.3  mrg 
    284  1.4  mrg Tid ThreadConsumeTid(ThreadState *thr, uptr pc, uptr uid) {
    285  1.4  mrg   return ctx->thread_registry.ConsumeThreadUserId(uid);
    286  1.1  mrg }
    287  1.1  mrg 
    288  1.4  mrg struct JoinArg {
    289  1.4  mrg   VectorClock *sync;
    290  1.4  mrg   uptr sync_epoch;
    291  1.4  mrg };
    292  1.1  mrg 
    293  1.3  mrg void ThreadJoin(ThreadState *thr, uptr pc, Tid tid) {
    294  1.1  mrg   CHECK_GT(tid, 0);
    295  1.1  mrg   DPrintf("#%d: ThreadJoin tid=%d\n", thr->tid, tid);
    296  1.4  mrg   JoinArg arg = {};
    297  1.4  mrg   ctx->thread_registry.JoinThread(tid, &arg);
    298  1.4  mrg   if (!thr->ignore_sync) {
    299  1.4  mrg     SlotLocker locker(thr);
    300  1.4  mrg     if (arg.sync_epoch == ctx->global_epoch)
    301  1.4  mrg       thr->clock.Acquire(arg.sync);
    302  1.4  mrg   }
    303  1.4  mrg   Free(arg.sync);
    304  1.3  mrg }
    305  1.3  mrg 
    306  1.4  mrg void ThreadContext::OnJoined(void *ptr) {
    307  1.4  mrg   auto arg = static_cast<JoinArg *>(ptr);
    308  1.4  mrg   arg->sync = sync;
    309  1.4  mrg   arg->sync_epoch = sync_epoch;
    310  1.4  mrg   sync = nullptr;
    311  1.4  mrg   sync_epoch = 0;
    312  1.1  mrg }
    313  1.1  mrg 
    314  1.4  mrg void ThreadContext::OnDead() { CHECK_EQ(sync, nullptr); }
    315  1.3  mrg 
    316  1.3  mrg void ThreadDetach(ThreadState *thr, uptr pc, Tid tid) {
    317  1.1  mrg   CHECK_GT(tid, 0);
    318  1.3  mrg   ctx->thread_registry.DetachThread(tid, thr);
    319  1.1  mrg }
    320  1.1  mrg 
    321  1.4  mrg void ThreadContext::OnDetached(void *arg) { Free(sync); }
    322  1.3  mrg 
    323  1.3  mrg void ThreadNotJoined(ThreadState *thr, uptr pc, Tid tid, uptr uid) {
    324  1.1  mrg   CHECK_GT(tid, 0);
    325  1.3  mrg   ctx->thread_registry.SetThreadUserId(tid, uid);
    326  1.1  mrg }
    327  1.1  mrg 
    328  1.1  mrg void ThreadSetName(ThreadState *thr, const char *name) {
    329  1.3  mrg   ctx->thread_registry.SetThreadName(thr->tid, name);
    330  1.1  mrg }
    331  1.1  mrg 
    332  1.1  mrg #if !SANITIZER_GO
    333  1.1  mrg void FiberSwitchImpl(ThreadState *from, ThreadState *to) {
    334  1.1  mrg   Processor *proc = from->proc();
    335  1.1  mrg   ProcUnwire(proc, from);
    336  1.1  mrg   ProcWire(proc, to);
    337  1.1  mrg   set_cur_thread(to);
    338  1.1  mrg }
    339  1.1  mrg 
    340  1.1  mrg ThreadState *FiberCreate(ThreadState *thr, uptr pc, unsigned flags) {
    341  1.3  mrg   void *mem = Alloc(sizeof(ThreadState));
    342  1.1  mrg   ThreadState *fiber = static_cast<ThreadState *>(mem);
    343  1.1  mrg   internal_memset(fiber, 0, sizeof(*fiber));
    344  1.3  mrg   Tid tid = ThreadCreate(thr, pc, 0, true);
    345  1.1  mrg   FiberSwitchImpl(thr, fiber);
    346  1.1  mrg   ThreadStart(fiber, tid, 0, ThreadType::Fiber);
    347  1.1  mrg   FiberSwitchImpl(fiber, thr);
    348  1.1  mrg   return fiber;
    349  1.1  mrg }
    350  1.1  mrg 
    351  1.1  mrg void FiberDestroy(ThreadState *thr, uptr pc, ThreadState *fiber) {
    352  1.1  mrg   FiberSwitchImpl(thr, fiber);
    353  1.1  mrg   ThreadFinish(fiber);
    354  1.1  mrg   FiberSwitchImpl(fiber, thr);
    355  1.3  mrg   Free(fiber);
    356  1.1  mrg }
    357  1.1  mrg 
    358  1.1  mrg void FiberSwitch(ThreadState *thr, uptr pc,
    359  1.1  mrg                  ThreadState *fiber, unsigned flags) {
    360  1.1  mrg   if (!(flags & FiberSwitchFlagNoSync))
    361  1.1  mrg     Release(thr, pc, (uptr)fiber);
    362  1.1  mrg   FiberSwitchImpl(thr, fiber);
    363  1.1  mrg   if (!(flags & FiberSwitchFlagNoSync))
    364  1.1  mrg     Acquire(fiber, pc, (uptr)fiber);
    365  1.1  mrg }
    366  1.1  mrg #endif
    367  1.1  mrg 
    368  1.1  mrg }  // namespace __tsan
    369