Home | History | Annotate | Line # | Download | only in rtl
      1 //===-- tsan_rtl_mutex.cc -------------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 
     14 #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
     15 #include <sanitizer_common/sanitizer_stackdepot.h>
     16 
     17 #include "tsan_rtl.h"
     18 #include "tsan_flags.h"
     19 #include "tsan_sync.h"
     20 #include "tsan_report.h"
     21 #include "tsan_symbolize.h"
     22 #include "tsan_platform.h"
     23 
     24 namespace __tsan {
     25 
     26 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
     27 
     28 struct Callback : DDCallback {
     29   ThreadState *thr;
     30   uptr pc;
     31 
     32   Callback(ThreadState *thr, uptr pc)
     33       : thr(thr)
     34       , pc(pc) {
     35     DDCallback::pt = thr->proc()->dd_pt;
     36     DDCallback::lt = thr->dd_lt;
     37   }
     38 
     39   u32 Unwind() override { return CurrentStackId(thr, pc); }
     40   int UniqueTid() override { return thr->unique_id; }
     41 };
     42 
     43 void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
     44   Callback cb(thr, pc);
     45   ctx->dd->MutexInit(&cb, &s->dd);
     46   s->dd.ctx = s->GetId();
     47 }
     48 
     49 static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
     50     uptr addr, u64 mid) {
     51   // In Go, these misuses are either impossible, or detected by std lib,
     52   // or false positives (e.g. unlock in a different thread).
     53   if (SANITIZER_GO)
     54     return;
     55   ThreadRegistryLock l(ctx->thread_registry);
     56   ScopedReport rep(typ);
     57   rep.AddMutex(mid);
     58   VarSizeStackTrace trace;
     59   ObtainCurrentStack(thr, pc, &trace);
     60   rep.AddStack(trace, true);
     61   rep.AddLocation(addr, 1);
     62   OutputReport(thr, rep);
     63 }
     64 
     65 void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
     66   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
     67   StatInc(thr, StatMutexCreate);
     68   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
     69     CHECK(!thr->is_freeing);
     70     thr->is_freeing = true;
     71     MemoryWrite(thr, pc, addr, kSizeLog1);
     72     thr->is_freeing = false;
     73   }
     74   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
     75   s->SetFlags(flagz & MutexCreationFlagMask);
     76   if (!SANITIZER_GO && s->creation_stack_id == 0)
     77     s->creation_stack_id = CurrentStackId(thr, pc);
     78   s->mtx.Unlock();
     79 }
     80 
     81 void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
     82   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
     83   StatInc(thr, StatMutexDestroy);
     84   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
     85   if (s == 0)
     86     return;
     87   if ((flagz & MutexFlagLinkerInit)
     88       || s->IsFlagSet(MutexFlagLinkerInit)
     89       || ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
     90     // Destroy is no-op for linker-initialized mutexes.
     91     s->mtx.Unlock();
     92     return;
     93   }
     94   if (common_flags()->detect_deadlocks) {
     95     Callback cb(thr, pc);
     96     ctx->dd->MutexDestroy(&cb, &s->dd);
     97     ctx->dd->MutexInit(&cb, &s->dd);
     98   }
     99   bool unlock_locked = false;
    100   if (flags()->report_destroy_locked
    101       && s->owner_tid != SyncVar::kInvalidTid
    102       && !s->IsFlagSet(MutexFlagBroken)) {
    103     s->SetFlags(MutexFlagBroken);
    104     unlock_locked = true;
    105   }
    106   u64 mid = s->GetId();
    107   u64 last_lock = s->last_lock;
    108   if (!unlock_locked)
    109     s->Reset(thr->proc());  // must not reset it before the report is printed
    110   s->mtx.Unlock();
    111   if (unlock_locked) {
    112     ThreadRegistryLock l(ctx->thread_registry);
    113     ScopedReport rep(ReportTypeMutexDestroyLocked);
    114     rep.AddMutex(mid);
    115     VarSizeStackTrace trace;
    116     ObtainCurrentStack(thr, pc, &trace);
    117     rep.AddStack(trace, true);
    118     FastState last(last_lock);
    119     RestoreStack(last.tid(), last.epoch(), &trace, 0);
    120     rep.AddStack(trace, true);
    121     rep.AddLocation(addr, 1);
    122     OutputReport(thr, rep);
    123 
    124     SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, true);
    125     if (s != 0) {
    126       s->Reset(thr->proc());
    127       s->mtx.Unlock();
    128     }
    129   }
    130   thr->mset.Remove(mid);
    131   // Imitate a memory write to catch unlock-destroy races.
    132   // Do this outside of sync mutex, because it can report a race which locks
    133   // sync mutexes.
    134   if (IsAppMem(addr)) {
    135     CHECK(!thr->is_freeing);
    136     thr->is_freeing = true;
    137     MemoryWrite(thr, pc, addr, kSizeLog1);
    138     thr->is_freeing = false;
    139   }
    140   // s will be destroyed and freed in MetaMap::FreeBlock.
    141 }
    142 
    143 void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    144   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    145   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
    146     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
    147     s->UpdateFlags(flagz);
    148     if (s->owner_tid != thr->tid) {
    149       Callback cb(thr, pc);
    150       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
    151       s->mtx.ReadUnlock();
    152       ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    153     } else {
    154       s->mtx.ReadUnlock();
    155     }
    156   }
    157 }
    158 
    159 void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
    160   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
    161       thr->tid, addr, flagz, rec);
    162   if (flagz & MutexFlagRecursiveLock)
    163     CHECK_GT(rec, 0);
    164   else
    165     rec = 1;
    166   if (IsAppMem(addr))
    167     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    168   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    169   s->UpdateFlags(flagz);
    170   thr->fast_state.IncrementEpoch();
    171   TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
    172   bool report_double_lock = false;
    173   if (s->owner_tid == SyncVar::kInvalidTid) {
    174     CHECK_EQ(s->recursion, 0);
    175     s->owner_tid = thr->tid;
    176     s->last_lock = thr->fast_state.raw();
    177   } else if (s->owner_tid == thr->tid) {
    178     CHECK_GT(s->recursion, 0);
    179   } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    180     s->SetFlags(MutexFlagBroken);
    181     report_double_lock = true;
    182   }
    183   const bool first = s->recursion == 0;
    184   s->recursion += rec;
    185   if (first) {
    186     StatInc(thr, StatMutexLock);
    187     AcquireImpl(thr, pc, &s->clock);
    188     AcquireImpl(thr, pc, &s->read_clock);
    189   } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
    190     StatInc(thr, StatMutexRecLock);
    191   }
    192   thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
    193   bool pre_lock = false;
    194   if (first && common_flags()->detect_deadlocks) {
    195     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
    196         !(flagz & MutexFlagTryLock);
    197     Callback cb(thr, pc);
    198     if (pre_lock)
    199       ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
    200     ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
    201   }
    202   u64 mid = s->GetId();
    203   s->mtx.Unlock();
    204   // Can't touch s after this point.
    205   s = 0;
    206   if (report_double_lock)
    207     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
    208   if (first && pre_lock && common_flags()->detect_deadlocks) {
    209     Callback cb(thr, pc);
    210     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    211   }
    212 }
    213 
    214 int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    215   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    216   if (IsAppMem(addr))
    217     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    218   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    219   thr->fast_state.IncrementEpoch();
    220   TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
    221   int rec = 0;
    222   bool report_bad_unlock = false;
    223   if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
    224     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    225       s->SetFlags(MutexFlagBroken);
    226       report_bad_unlock = true;
    227     }
    228   } else {
    229     rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
    230     s->recursion -= rec;
    231     if (s->recursion == 0) {
    232       StatInc(thr, StatMutexUnlock);
    233       s->owner_tid = SyncVar::kInvalidTid;
    234       ReleaseStoreImpl(thr, pc, &s->clock);
    235     } else {
    236       StatInc(thr, StatMutexRecUnlock);
    237     }
    238   }
    239   thr->mset.Del(s->GetId(), true);
    240   if (common_flags()->detect_deadlocks && s->recursion == 0 &&
    241       !report_bad_unlock) {
    242     Callback cb(thr, pc);
    243     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
    244   }
    245   u64 mid = s->GetId();
    246   s->mtx.Unlock();
    247   // Can't touch s after this point.
    248   if (report_bad_unlock)
    249     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
    250   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
    251     Callback cb(thr, pc);
    252     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    253   }
    254   return rec;
    255 }
    256 
    257 void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    258   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    259   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
    260     SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
    261     s->UpdateFlags(flagz);
    262     Callback cb(thr, pc);
    263     ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
    264     s->mtx.ReadUnlock();
    265     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    266   }
    267 }
    268 
    269 void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    270   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    271   StatInc(thr, StatMutexReadLock);
    272   if (IsAppMem(addr))
    273     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    274   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, false);
    275   s->UpdateFlags(flagz);
    276   thr->fast_state.IncrementEpoch();
    277   TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
    278   bool report_bad_lock = false;
    279   if (s->owner_tid != SyncVar::kInvalidTid) {
    280     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    281       s->SetFlags(MutexFlagBroken);
    282       report_bad_lock = true;
    283     }
    284   }
    285   AcquireImpl(thr, pc, &s->clock);
    286   s->last_lock = thr->fast_state.raw();
    287   thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
    288   bool pre_lock = false;
    289   if (common_flags()->detect_deadlocks) {
    290     pre_lock = (flagz & MutexFlagDoPreLockOnPostLock) &&
    291         !(flagz & MutexFlagTryLock);
    292     Callback cb(thr, pc);
    293     if (pre_lock)
    294       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
    295     ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
    296   }
    297   u64 mid = s->GetId();
    298   s->mtx.ReadUnlock();
    299   // Can't touch s after this point.
    300   s = 0;
    301   if (report_bad_lock)
    302     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
    303   if (pre_lock  && common_flags()->detect_deadlocks) {
    304     Callback cb(thr, pc);
    305     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    306   }
    307 }
    308 
    309 void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
    310   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
    311   StatInc(thr, StatMutexReadUnlock);
    312   if (IsAppMem(addr))
    313     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    314   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    315   thr->fast_state.IncrementEpoch();
    316   TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
    317   bool report_bad_unlock = false;
    318   if (s->owner_tid != SyncVar::kInvalidTid) {
    319     if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    320       s->SetFlags(MutexFlagBroken);
    321       report_bad_unlock = true;
    322     }
    323   }
    324   ReleaseImpl(thr, pc, &s->read_clock);
    325   if (common_flags()->detect_deadlocks && s->recursion == 0) {
    326     Callback cb(thr, pc);
    327     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
    328   }
    329   u64 mid = s->GetId();
    330   s->mtx.Unlock();
    331   // Can't touch s after this point.
    332   thr->mset.Del(mid, false);
    333   if (report_bad_unlock)
    334     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
    335   if (common_flags()->detect_deadlocks) {
    336     Callback cb(thr, pc);
    337     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    338   }
    339 }
    340 
    341 void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
    342   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
    343   if (IsAppMem(addr))
    344     MemoryReadAtomic(thr, pc, addr, kSizeLog1);
    345   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    346   bool write = true;
    347   bool report_bad_unlock = false;
    348   if (s->owner_tid == SyncVar::kInvalidTid) {
    349     // Seems to be read unlock.
    350     write = false;
    351     StatInc(thr, StatMutexReadUnlock);
    352     thr->fast_state.IncrementEpoch();
    353     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
    354     ReleaseImpl(thr, pc, &s->read_clock);
    355   } else if (s->owner_tid == thr->tid) {
    356     // Seems to be write unlock.
    357     thr->fast_state.IncrementEpoch();
    358     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
    359     CHECK_GT(s->recursion, 0);
    360     s->recursion--;
    361     if (s->recursion == 0) {
    362       StatInc(thr, StatMutexUnlock);
    363       s->owner_tid = SyncVar::kInvalidTid;
    364       ReleaseStoreImpl(thr, pc, &s->clock);
    365     } else {
    366       StatInc(thr, StatMutexRecUnlock);
    367     }
    368   } else if (!s->IsFlagSet(MutexFlagBroken)) {
    369     s->SetFlags(MutexFlagBroken);
    370     report_bad_unlock = true;
    371   }
    372   thr->mset.Del(s->GetId(), write);
    373   if (common_flags()->detect_deadlocks && s->recursion == 0) {
    374     Callback cb(thr, pc);
    375     ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
    376   }
    377   u64 mid = s->GetId();
    378   s->mtx.Unlock();
    379   // Can't touch s after this point.
    380   if (report_bad_unlock)
    381     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
    382   if (common_flags()->detect_deadlocks) {
    383     Callback cb(thr, pc);
    384     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    385   }
    386 }
    387 
    388 void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
    389   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
    390   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    391   s->owner_tid = SyncVar::kInvalidTid;
    392   s->recursion = 0;
    393   s->mtx.Unlock();
    394 }
    395 
    396 void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
    397   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
    398   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    399   u64 mid = s->GetId();
    400   s->mtx.Unlock();
    401   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, mid);
    402 }
    403 
    404 void Acquire(ThreadState *thr, uptr pc, uptr addr) {
    405   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
    406   if (thr->ignore_sync)
    407     return;
    408   SyncVar *s = ctx->metamap.GetIfExistsAndLock(addr, false);
    409   if (!s)
    410     return;
    411   AcquireImpl(thr, pc, &s->clock);
    412   s->mtx.ReadUnlock();
    413 }
    414 
    415 static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
    416   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
    417   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    418   u64 epoch = tctx->epoch1;
    419   if (tctx->status == ThreadStatusRunning)
    420     epoch = tctx->thr->fast_state.epoch();
    421   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
    422 }
    423 
    424 void AcquireGlobal(ThreadState *thr, uptr pc) {
    425   DPrintf("#%d: AcquireGlobal\n", thr->tid);
    426   if (thr->ignore_sync)
    427     return;
    428   ThreadRegistryLock l(ctx->thread_registry);
    429   ctx->thread_registry->RunCallbackForEachThreadLocked(
    430       UpdateClockCallback, thr);
    431 }
    432 
    433 void Release(ThreadState *thr, uptr pc, uptr addr) {
    434   DPrintf("#%d: Release %zx\n", thr->tid, addr);
    435   if (thr->ignore_sync)
    436     return;
    437   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    438   thr->fast_state.IncrementEpoch();
    439   // Can't increment epoch w/o writing to the trace as well.
    440   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    441   ReleaseImpl(thr, pc, &s->clock);
    442   s->mtx.Unlock();
    443 }
    444 
    445 void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
    446   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
    447   if (thr->ignore_sync)
    448     return;
    449   SyncVar *s = ctx->metamap.GetOrCreateAndLock(thr, pc, addr, true);
    450   thr->fast_state.IncrementEpoch();
    451   // Can't increment epoch w/o writing to the trace as well.
    452   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    453   ReleaseStoreImpl(thr, pc, &s->clock);
    454   s->mtx.Unlock();
    455 }
    456 
    457 #if !SANITIZER_GO
    458 static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
    459   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
    460   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    461   u64 epoch = tctx->epoch1;
    462   if (tctx->status == ThreadStatusRunning)
    463     epoch = tctx->thr->fast_state.epoch();
    464   thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
    465 }
    466 
    467 void AfterSleep(ThreadState *thr, uptr pc) {
    468   DPrintf("#%d: AfterSleep %zx\n", thr->tid);
    469   if (thr->ignore_sync)
    470     return;
    471   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
    472   ThreadRegistryLock l(ctx->thread_registry);
    473   ctx->thread_registry->RunCallbackForEachThreadLocked(
    474       UpdateSleepClockCallback, thr);
    475 }
    476 #endif
    477 
    478 void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    479   if (thr->ignore_sync)
    480     return;
    481   thr->clock.set(thr->fast_state.epoch());
    482   thr->clock.acquire(&thr->proc()->clock_cache, c);
    483   StatInc(thr, StatSyncAcquire);
    484 }
    485 
    486 void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    487   if (thr->ignore_sync)
    488     return;
    489   thr->clock.set(thr->fast_state.epoch());
    490   thr->fast_synch_epoch = thr->fast_state.epoch();
    491   thr->clock.release(&thr->proc()->clock_cache, c);
    492   StatInc(thr, StatSyncRelease);
    493 }
    494 
    495 void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    496   if (thr->ignore_sync)
    497     return;
    498   thr->clock.set(thr->fast_state.epoch());
    499   thr->fast_synch_epoch = thr->fast_state.epoch();
    500   thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
    501   StatInc(thr, StatSyncRelease);
    502 }
    503 
    504 void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    505   if (thr->ignore_sync)
    506     return;
    507   thr->clock.set(thr->fast_state.epoch());
    508   thr->fast_synch_epoch = thr->fast_state.epoch();
    509   thr->clock.acq_rel(&thr->proc()->clock_cache, c);
    510   StatInc(thr, StatSyncAcquire);
    511   StatInc(thr, StatSyncRelease);
    512 }
    513 
    514 void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
    515   if (r == 0)
    516     return;
    517   ThreadRegistryLock l(ctx->thread_registry);
    518   ScopedReport rep(ReportTypeDeadlock);
    519   for (int i = 0; i < r->n; i++) {
    520     rep.AddMutex(r->loop[i].mtx_ctx0);
    521     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
    522     rep.AddThread((int)r->loop[i].thr_ctx);
    523   }
    524   uptr dummy_pc = 0x42;
    525   for (int i = 0; i < r->n; i++) {
    526     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
    527       u32 stk = r->loop[i].stk[j];
    528       if (stk && stk != 0xffffffff) {
    529         rep.AddStack(StackDepotGet(stk), true);
    530       } else {
    531         // Sometimes we fail to extract the stack trace (FIXME: investigate),
    532         // but we should still produce some stack trace in the report.
    533         rep.AddStack(StackTrace(&dummy_pc, 1), true);
    534       }
    535     }
    536   }
    537   OutputReport(thr, rep);
    538 }
    539 
    540 }  // namespace __tsan
    541