Home | History | Annotate | Line # | Download | only in tsan
      1  1.1  mrg //===-- tsan_rtl_mutex.cpp ------------------------------------------------===//
      2  1.1  mrg //
      3  1.1  mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4  1.1  mrg // See https://llvm.org/LICENSE.txt for license information.
      5  1.1  mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6  1.1  mrg //
      7  1.1  mrg //===----------------------------------------------------------------------===//
      8  1.1  mrg //
      9  1.1  mrg // This file is a part of ThreadSanitizer (TSan), a race detector.
     10  1.1  mrg //
     11  1.1  mrg //===----------------------------------------------------------------------===//
     12  1.1  mrg 
     13  1.1  mrg #include <sanitizer_common/sanitizer_deadlock_detector_interface.h>
     14  1.1  mrg #include <sanitizer_common/sanitizer_stackdepot.h>
     15  1.1  mrg 
     16  1.1  mrg #include "tsan_rtl.h"
     17  1.1  mrg #include "tsan_flags.h"
     18  1.1  mrg #include "tsan_sync.h"
     19  1.1  mrg #include "tsan_report.h"
     20  1.1  mrg #include "tsan_symbolize.h"
     21  1.1  mrg #include "tsan_platform.h"
     22  1.1  mrg 
     23  1.1  mrg namespace __tsan {
     24  1.1  mrg 
     25  1.1  mrg void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r);
     26  1.1  mrg 
     27  1.1  mrg struct Callback final : public DDCallback {
     28  1.1  mrg   ThreadState *thr;
     29  1.1  mrg   uptr pc;
     30  1.1  mrg 
     31  1.1  mrg   Callback(ThreadState *thr, uptr pc)
     32  1.1  mrg       : thr(thr)
     33  1.1  mrg       , pc(pc) {
     34  1.1  mrg     DDCallback::pt = thr->proc()->dd_pt;
     35  1.1  mrg     DDCallback::lt = thr->dd_lt;
     36  1.1  mrg   }
     37  1.1  mrg 
     38  1.1  mrg   StackID Unwind() override { return CurrentStackId(thr, pc); }
     39  1.1  mrg   int UniqueTid() override { return thr->unique_id; }
     40  1.1  mrg };
     41  1.1  mrg 
     42  1.1  mrg void DDMutexInit(ThreadState *thr, uptr pc, SyncVar *s) {
     43  1.1  mrg   Callback cb(thr, pc);
     44  1.1  mrg   ctx->dd->MutexInit(&cb, &s->dd);
     45  1.1  mrg   s->dd.ctx = s->GetId();
     46  1.1  mrg }
     47  1.1  mrg 
     48  1.1  mrg static void ReportMutexMisuse(ThreadState *thr, uptr pc, ReportType typ,
     49  1.1  mrg     uptr addr, u64 mid) {
     50  1.1  mrg   // In Go, these misuses are either impossible, or detected by std lib,
     51  1.1  mrg   // or false positives (e.g. unlock in a different thread).
     52  1.1  mrg   if (SANITIZER_GO)
     53  1.1  mrg     return;
     54  1.1  mrg   if (!ShouldReport(thr, typ))
     55  1.1  mrg     return;
     56  1.1  mrg   ThreadRegistryLock l(&ctx->thread_registry);
     57  1.1  mrg   ScopedReport rep(typ);
     58  1.1  mrg   rep.AddMutex(mid);
     59  1.1  mrg   VarSizeStackTrace trace;
     60  1.1  mrg   ObtainCurrentStack(thr, pc, &trace);
     61  1.1  mrg   rep.AddStack(trace, true);
     62  1.1  mrg   rep.AddLocation(addr, 1);
     63  1.1  mrg   OutputReport(thr, rep);
     64  1.1  mrg }
     65  1.1  mrg 
     66  1.1  mrg void MutexCreate(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
     67  1.1  mrg   DPrintf("#%d: MutexCreate %zx flagz=0x%x\n", thr->tid, addr, flagz);
     68  1.1  mrg   if (!(flagz & MutexFlagLinkerInit) && IsAppMem(addr)) {
     69  1.1  mrg     CHECK(!thr->is_freeing);
     70  1.1  mrg     thr->is_freeing = true;
     71  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessWrite);
     72  1.1  mrg     thr->is_freeing = false;
     73  1.1  mrg   }
     74  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
     75  1.1  mrg   Lock l(&s->mtx);
     76  1.1  mrg   s->SetFlags(flagz & MutexCreationFlagMask);
     77  1.1  mrg   // Save stack in the case the sync object was created before as atomic.
     78  1.1  mrg   if (!SANITIZER_GO && s->creation_stack_id == 0)
     79  1.1  mrg     s->creation_stack_id = CurrentStackId(thr, pc);
     80  1.1  mrg }
     81  1.1  mrg 
     82  1.1  mrg void MutexDestroy(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
     83  1.1  mrg   DPrintf("#%d: MutexDestroy %zx\n", thr->tid, addr);
     84  1.1  mrg   bool unlock_locked = false;
     85  1.1  mrg   u64 mid = 0;
     86  1.1  mrg   u64 last_lock = 0;
     87  1.1  mrg   {
     88  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
     89  1.1  mrg     if (s == 0)
     90  1.1  mrg       return;
     91  1.1  mrg     Lock l(&s->mtx);
     92  1.1  mrg     if ((flagz & MutexFlagLinkerInit) || s->IsFlagSet(MutexFlagLinkerInit) ||
     93  1.1  mrg         ((flagz & MutexFlagNotStatic) && !s->IsFlagSet(MutexFlagNotStatic))) {
     94  1.1  mrg       // Destroy is no-op for linker-initialized mutexes.
     95  1.1  mrg       return;
     96  1.1  mrg     }
     97  1.1  mrg     if (common_flags()->detect_deadlocks) {
     98  1.1  mrg       Callback cb(thr, pc);
     99  1.1  mrg       ctx->dd->MutexDestroy(&cb, &s->dd);
    100  1.1  mrg       ctx->dd->MutexInit(&cb, &s->dd);
    101  1.1  mrg     }
    102  1.1  mrg     if (flags()->report_destroy_locked && s->owner_tid != kInvalidTid &&
    103  1.1  mrg         !s->IsFlagSet(MutexFlagBroken)) {
    104  1.1  mrg       s->SetFlags(MutexFlagBroken);
    105  1.1  mrg       unlock_locked = true;
    106  1.1  mrg     }
    107  1.1  mrg     mid = s->GetId();
    108  1.1  mrg     last_lock = s->last_lock;
    109  1.1  mrg     if (!unlock_locked)
    110  1.1  mrg       s->Reset(thr->proc());  // must not reset it before the report is printed
    111  1.1  mrg   }
    112  1.1  mrg   if (unlock_locked && ShouldReport(thr, ReportTypeMutexDestroyLocked)) {
    113  1.1  mrg     ThreadRegistryLock l(&ctx->thread_registry);
    114  1.1  mrg     ScopedReport rep(ReportTypeMutexDestroyLocked);
    115  1.1  mrg     rep.AddMutex(mid);
    116  1.1  mrg     VarSizeStackTrace trace;
    117  1.1  mrg     ObtainCurrentStack(thr, pc, &trace);
    118  1.1  mrg     rep.AddStack(trace, true);
    119  1.1  mrg     FastState last(last_lock);
    120  1.1  mrg     RestoreStack(last.tid(), last.epoch(), &trace, 0);
    121  1.1  mrg     rep.AddStack(trace, true);
    122  1.1  mrg     rep.AddLocation(addr, 1);
    123  1.1  mrg     OutputReport(thr, rep);
    124  1.1  mrg 
    125  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
    126  1.1  mrg     if (s != 0) {
    127  1.1  mrg       Lock l(&s->mtx);
    128  1.1  mrg       s->Reset(thr->proc());
    129  1.1  mrg     }
    130  1.1  mrg   }
    131  1.1  mrg   thr->mset.Remove(mid);
    132  1.1  mrg   // Imitate a memory write to catch unlock-destroy races.
    133  1.1  mrg   // Do this outside of sync mutex, because it can report a race which locks
    134  1.1  mrg   // sync mutexes.
    135  1.1  mrg   if (IsAppMem(addr))
    136  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessWrite | kAccessFree);
    137  1.1  mrg   // s will be destroyed and freed in MetaMap::FreeBlock.
    138  1.1  mrg }
    139  1.1  mrg 
    140  1.1  mrg void MutexPreLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    141  1.1  mrg   DPrintf("#%d: MutexPreLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    142  1.1  mrg   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
    143  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    144  1.1  mrg     {
    145  1.1  mrg       ReadLock l(&s->mtx);
    146  1.1  mrg       s->UpdateFlags(flagz);
    147  1.1  mrg       if (s->owner_tid != thr->tid) {
    148  1.1  mrg         Callback cb(thr, pc);
    149  1.1  mrg         ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
    150  1.1  mrg       }
    151  1.1  mrg     }
    152  1.1  mrg     Callback cb(thr, pc);
    153  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    154  1.1  mrg   }
    155  1.1  mrg }
    156  1.1  mrg 
    157  1.1  mrg void MutexPostLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz, int rec) {
    158  1.1  mrg   DPrintf("#%d: MutexPostLock %zx flag=0x%x rec=%d\n",
    159  1.1  mrg       thr->tid, addr, flagz, rec);
    160  1.1  mrg   if (flagz & MutexFlagRecursiveLock)
    161  1.1  mrg     CHECK_GT(rec, 0);
    162  1.1  mrg   else
    163  1.1  mrg     rec = 1;
    164  1.1  mrg   if (IsAppMem(addr))
    165  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
    166  1.1  mrg   u64 mid = 0;
    167  1.1  mrg   bool pre_lock = false;
    168  1.1  mrg   bool first = false;
    169  1.1  mrg   bool report_double_lock = false;
    170  1.1  mrg   {
    171  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    172  1.1  mrg     Lock l(&s->mtx);
    173  1.1  mrg     s->UpdateFlags(flagz);
    174  1.1  mrg     thr->fast_state.IncrementEpoch();
    175  1.1  mrg     TraceAddEvent(thr, thr->fast_state, EventTypeLock, s->GetId());
    176  1.1  mrg     if (s->owner_tid == kInvalidTid) {
    177  1.1  mrg       CHECK_EQ(s->recursion, 0);
    178  1.1  mrg       s->owner_tid = thr->tid;
    179  1.1  mrg       s->last_lock = thr->fast_state.raw();
    180  1.1  mrg     } else if (s->owner_tid == thr->tid) {
    181  1.1  mrg       CHECK_GT(s->recursion, 0);
    182  1.1  mrg     } else if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    183  1.1  mrg       s->SetFlags(MutexFlagBroken);
    184  1.1  mrg       report_double_lock = true;
    185  1.1  mrg     }
    186  1.1  mrg     first = s->recursion == 0;
    187  1.1  mrg     s->recursion += rec;
    188  1.1  mrg     if (first) {
    189  1.1  mrg       AcquireImpl(thr, pc, &s->clock);
    190  1.1  mrg       AcquireImpl(thr, pc, &s->read_clock);
    191  1.1  mrg     } else if (!s->IsFlagSet(MutexFlagWriteReentrant)) {
    192  1.1  mrg     }
    193  1.1  mrg     thr->mset.Add(s->GetId(), true, thr->fast_state.epoch());
    194  1.1  mrg     if (first && common_flags()->detect_deadlocks) {
    195  1.1  mrg       pre_lock =
    196  1.1  mrg           (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
    197  1.1  mrg       Callback cb(thr, pc);
    198  1.1  mrg       if (pre_lock)
    199  1.1  mrg         ctx->dd->MutexBeforeLock(&cb, &s->dd, true);
    200  1.1  mrg       ctx->dd->MutexAfterLock(&cb, &s->dd, true, flagz & MutexFlagTryLock);
    201  1.1  mrg     }
    202  1.1  mrg     mid = s->GetId();
    203  1.1  mrg   }
    204  1.1  mrg   if (report_double_lock)
    205  1.1  mrg     ReportMutexMisuse(thr, pc, ReportTypeMutexDoubleLock, addr, mid);
    206  1.1  mrg   if (first && pre_lock && common_flags()->detect_deadlocks) {
    207  1.1  mrg     Callback cb(thr, pc);
    208  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    209  1.1  mrg   }
    210  1.1  mrg }
    211  1.1  mrg 
    212  1.1  mrg int MutexUnlock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    213  1.1  mrg   DPrintf("#%d: MutexUnlock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    214  1.1  mrg   if (IsAppMem(addr))
    215  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
    216  1.1  mrg   u64 mid = 0;
    217  1.1  mrg   bool report_bad_unlock = false;
    218  1.1  mrg   int rec = 0;
    219  1.1  mrg   {
    220  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    221  1.1  mrg     Lock l(&s->mtx);
    222  1.1  mrg     thr->fast_state.IncrementEpoch();
    223  1.1  mrg     TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
    224  1.1  mrg     if (!SANITIZER_GO && (s->recursion == 0 || s->owner_tid != thr->tid)) {
    225  1.1  mrg       if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    226  1.1  mrg         s->SetFlags(MutexFlagBroken);
    227  1.1  mrg         report_bad_unlock = true;
    228  1.1  mrg       }
    229  1.1  mrg     } else {
    230  1.1  mrg       rec = (flagz & MutexFlagRecursiveUnlock) ? s->recursion : 1;
    231  1.1  mrg       s->recursion -= rec;
    232  1.1  mrg       if (s->recursion == 0) {
    233  1.1  mrg         s->owner_tid = kInvalidTid;
    234  1.1  mrg         ReleaseStoreImpl(thr, pc, &s->clock);
    235  1.1  mrg       } else {
    236  1.1  mrg       }
    237  1.1  mrg     }
    238  1.1  mrg     thr->mset.Del(s->GetId(), true);
    239  1.1  mrg     if (common_flags()->detect_deadlocks && s->recursion == 0 &&
    240  1.1  mrg         !report_bad_unlock) {
    241  1.1  mrg       Callback cb(thr, pc);
    242  1.1  mrg       ctx->dd->MutexBeforeUnlock(&cb, &s->dd, true);
    243  1.1  mrg     }
    244  1.1  mrg     mid = s->GetId();
    245  1.1  mrg   }
    246  1.1  mrg   if (report_bad_unlock)
    247  1.1  mrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
    248  1.1  mrg   if (common_flags()->detect_deadlocks && !report_bad_unlock) {
    249  1.1  mrg     Callback cb(thr, pc);
    250  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    251  1.1  mrg   }
    252  1.1  mrg   return rec;
    253  1.1  mrg }
    254  1.1  mrg 
    255  1.1  mrg void MutexPreReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    256  1.1  mrg   DPrintf("#%d: MutexPreReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    257  1.1  mrg   if (!(flagz & MutexFlagTryLock) && common_flags()->detect_deadlocks) {
    258  1.1  mrg     {
    259  1.1  mrg       SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    260  1.1  mrg       ReadLock l(&s->mtx);
    261  1.1  mrg       s->UpdateFlags(flagz);
    262  1.1  mrg       Callback cb(thr, pc);
    263  1.1  mrg       ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
    264  1.1  mrg     }
    265  1.1  mrg     Callback cb(thr, pc);
    266  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    267  1.1  mrg   }
    268  1.1  mrg }
    269  1.1  mrg 
    270  1.1  mrg void MutexPostReadLock(ThreadState *thr, uptr pc, uptr addr, u32 flagz) {
    271  1.1  mrg   DPrintf("#%d: MutexPostReadLock %zx flagz=0x%x\n", thr->tid, addr, flagz);
    272  1.1  mrg   if (IsAppMem(addr))
    273  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
    274  1.1  mrg   u64 mid = 0;
    275  1.1  mrg   bool report_bad_lock = false;
    276  1.1  mrg   bool pre_lock = false;
    277  1.1  mrg   {
    278  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    279  1.1  mrg     ReadLock l(&s->mtx);
    280  1.1  mrg     s->UpdateFlags(flagz);
    281  1.1  mrg     thr->fast_state.IncrementEpoch();
    282  1.1  mrg     TraceAddEvent(thr, thr->fast_state, EventTypeRLock, s->GetId());
    283  1.1  mrg     if (s->owner_tid != kInvalidTid) {
    284  1.1  mrg       if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    285  1.1  mrg         s->SetFlags(MutexFlagBroken);
    286  1.1  mrg         report_bad_lock = true;
    287  1.1  mrg       }
    288  1.1  mrg     }
    289  1.1  mrg     AcquireImpl(thr, pc, &s->clock);
    290  1.1  mrg     s->last_lock = thr->fast_state.raw();
    291  1.1  mrg     thr->mset.Add(s->GetId(), false, thr->fast_state.epoch());
    292  1.1  mrg     if (common_flags()->detect_deadlocks) {
    293  1.1  mrg       pre_lock =
    294  1.1  mrg           (flagz & MutexFlagDoPreLockOnPostLock) && !(flagz & MutexFlagTryLock);
    295  1.1  mrg       Callback cb(thr, pc);
    296  1.1  mrg       if (pre_lock)
    297  1.1  mrg         ctx->dd->MutexBeforeLock(&cb, &s->dd, false);
    298  1.1  mrg       ctx->dd->MutexAfterLock(&cb, &s->dd, false, flagz & MutexFlagTryLock);
    299  1.1  mrg     }
    300  1.1  mrg     mid = s->GetId();
    301  1.1  mrg   }
    302  1.1  mrg   if (report_bad_lock)
    303  1.1  mrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadLock, addr, mid);
    304  1.1  mrg   if (pre_lock  && common_flags()->detect_deadlocks) {
    305  1.1  mrg     Callback cb(thr, pc);
    306  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    307  1.1  mrg   }
    308  1.1  mrg }
    309  1.1  mrg 
    310  1.1  mrg void MutexReadUnlock(ThreadState *thr, uptr pc, uptr addr) {
    311  1.1  mrg   DPrintf("#%d: MutexReadUnlock %zx\n", thr->tid, addr);
    312  1.1  mrg   if (IsAppMem(addr))
    313  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
    314  1.1  mrg   u64 mid = 0;
    315  1.1  mrg   bool report_bad_unlock = false;
    316  1.1  mrg   {
    317  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    318  1.1  mrg     Lock l(&s->mtx);
    319  1.1  mrg     thr->fast_state.IncrementEpoch();
    320  1.1  mrg     TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
    321  1.1  mrg     if (s->owner_tid != kInvalidTid) {
    322  1.1  mrg       if (flags()->report_mutex_bugs && !s->IsFlagSet(MutexFlagBroken)) {
    323  1.1  mrg         s->SetFlags(MutexFlagBroken);
    324  1.1  mrg         report_bad_unlock = true;
    325  1.1  mrg       }
    326  1.1  mrg     }
    327  1.1  mrg     ReleaseImpl(thr, pc, &s->read_clock);
    328  1.1  mrg     if (common_flags()->detect_deadlocks && s->recursion == 0) {
    329  1.1  mrg       Callback cb(thr, pc);
    330  1.1  mrg       ctx->dd->MutexBeforeUnlock(&cb, &s->dd, false);
    331  1.1  mrg     }
    332  1.1  mrg     mid = s->GetId();
    333  1.1  mrg   }
    334  1.1  mrg   thr->mset.Del(mid, false);
    335  1.1  mrg   if (report_bad_unlock)
    336  1.1  mrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadReadUnlock, addr, mid);
    337  1.1  mrg   if (common_flags()->detect_deadlocks) {
    338  1.1  mrg     Callback cb(thr, pc);
    339  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    340  1.1  mrg   }
    341  1.1  mrg }
    342  1.1  mrg 
    343  1.1  mrg void MutexReadOrWriteUnlock(ThreadState *thr, uptr pc, uptr addr) {
    344  1.1  mrg   DPrintf("#%d: MutexReadOrWriteUnlock %zx\n", thr->tid, addr);
    345  1.1  mrg   if (IsAppMem(addr))
    346  1.1  mrg     MemoryAccess(thr, pc, addr, 1, kAccessRead | kAccessAtomic);
    347  1.1  mrg   u64 mid = 0;
    348  1.1  mrg   bool report_bad_unlock = false;
    349  1.1  mrg   {
    350  1.1  mrg     SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    351  1.1  mrg     Lock l(&s->mtx);
    352  1.1  mrg     bool write = true;
    353  1.1  mrg     if (s->owner_tid == kInvalidTid) {
    354  1.1  mrg       // Seems to be read unlock.
    355  1.1  mrg       write = false;
    356  1.1  mrg       thr->fast_state.IncrementEpoch();
    357  1.1  mrg       TraceAddEvent(thr, thr->fast_state, EventTypeRUnlock, s->GetId());
    358  1.1  mrg       ReleaseImpl(thr, pc, &s->read_clock);
    359  1.1  mrg     } else if (s->owner_tid == thr->tid) {
    360  1.1  mrg       // Seems to be write unlock.
    361  1.1  mrg       thr->fast_state.IncrementEpoch();
    362  1.1  mrg       TraceAddEvent(thr, thr->fast_state, EventTypeUnlock, s->GetId());
    363  1.1  mrg       CHECK_GT(s->recursion, 0);
    364  1.1  mrg       s->recursion--;
    365  1.1  mrg       if (s->recursion == 0) {
    366  1.1  mrg         s->owner_tid = kInvalidTid;
    367  1.1  mrg         ReleaseStoreImpl(thr, pc, &s->clock);
    368  1.1  mrg       } else {
    369  1.1  mrg       }
    370  1.1  mrg     } else if (!s->IsFlagSet(MutexFlagBroken)) {
    371  1.1  mrg       s->SetFlags(MutexFlagBroken);
    372  1.1  mrg       report_bad_unlock = true;
    373  1.1  mrg     }
    374  1.1  mrg     thr->mset.Del(s->GetId(), write);
    375  1.1  mrg     if (common_flags()->detect_deadlocks && s->recursion == 0) {
    376  1.1  mrg       Callback cb(thr, pc);
    377  1.1  mrg       ctx->dd->MutexBeforeUnlock(&cb, &s->dd, write);
    378  1.1  mrg     }
    379  1.1  mrg     mid = s->GetId();
    380  1.1  mrg   }
    381  1.1  mrg   if (report_bad_unlock)
    382  1.1  mrg     ReportMutexMisuse(thr, pc, ReportTypeMutexBadUnlock, addr, mid);
    383  1.1  mrg   if (common_flags()->detect_deadlocks) {
    384  1.1  mrg     Callback cb(thr, pc);
    385  1.1  mrg     ReportDeadlock(thr, pc, ctx->dd->GetReport(&cb));
    386  1.1  mrg   }
    387  1.1  mrg }
    388  1.1  mrg 
    389  1.1  mrg void MutexRepair(ThreadState *thr, uptr pc, uptr addr) {
    390  1.1  mrg   DPrintf("#%d: MutexRepair %zx\n", thr->tid, addr);
    391  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    392  1.1  mrg   Lock l(&s->mtx);
    393  1.1  mrg   s->owner_tid = kInvalidTid;
    394  1.1  mrg   s->recursion = 0;
    395  1.1  mrg }
    396  1.1  mrg 
    397  1.1  mrg void MutexInvalidAccess(ThreadState *thr, uptr pc, uptr addr) {
    398  1.1  mrg   DPrintf("#%d: MutexInvalidAccess %zx\n", thr->tid, addr);
    399  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, true);
    400  1.1  mrg   ReportMutexMisuse(thr, pc, ReportTypeMutexInvalidAccess, addr, s->GetId());
    401  1.1  mrg }
    402  1.1  mrg 
    403  1.1  mrg void Acquire(ThreadState *thr, uptr pc, uptr addr) {
    404  1.1  mrg   DPrintf("#%d: Acquire %zx\n", thr->tid, addr);
    405  1.1  mrg   if (thr->ignore_sync)
    406  1.1  mrg     return;
    407  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncIfExists(addr);
    408  1.1  mrg   if (!s)
    409  1.1  mrg     return;
    410  1.1  mrg   ReadLock l(&s->mtx);
    411  1.1  mrg   AcquireImpl(thr, pc, &s->clock);
    412  1.1  mrg }
    413  1.1  mrg 
    414  1.1  mrg static void UpdateClockCallback(ThreadContextBase *tctx_base, void *arg) {
    415  1.1  mrg   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
    416  1.1  mrg   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    417  1.1  mrg   u64 epoch = tctx->epoch1;
    418  1.1  mrg   if (tctx->status == ThreadStatusRunning) {
    419  1.1  mrg     epoch = tctx->thr->fast_state.epoch();
    420  1.1  mrg     tctx->thr->clock.NoteGlobalAcquire(epoch);
    421  1.1  mrg   }
    422  1.1  mrg   thr->clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
    423  1.1  mrg }
    424  1.1  mrg 
    425  1.1  mrg void AcquireGlobal(ThreadState *thr) {
    426  1.1  mrg   DPrintf("#%d: AcquireGlobal\n", thr->tid);
    427  1.1  mrg   if (thr->ignore_sync)
    428  1.1  mrg     return;
    429  1.1  mrg   ThreadRegistryLock l(&ctx->thread_registry);
    430  1.1  mrg   ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateClockCallback, thr);
    431  1.1  mrg }
    432  1.1  mrg 
    433  1.1  mrg void ReleaseStoreAcquire(ThreadState *thr, uptr pc, uptr addr) {
    434  1.1  mrg   DPrintf("#%d: ReleaseStoreAcquire %zx\n", thr->tid, addr);
    435  1.1  mrg   if (thr->ignore_sync)
    436  1.1  mrg     return;
    437  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
    438  1.1  mrg   Lock l(&s->mtx);
    439  1.1  mrg   thr->fast_state.IncrementEpoch();
    440  1.1  mrg   // Can't increment epoch w/o writing to the trace as well.
    441  1.1  mrg   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    442  1.1  mrg   ReleaseStoreAcquireImpl(thr, pc, &s->clock);
    443  1.1  mrg }
    444  1.1  mrg 
    445  1.1  mrg void Release(ThreadState *thr, uptr pc, uptr addr) {
    446  1.1  mrg   DPrintf("#%d: Release %zx\n", thr->tid, addr);
    447  1.1  mrg   if (thr->ignore_sync)
    448  1.1  mrg     return;
    449  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
    450  1.1  mrg   Lock l(&s->mtx);
    451  1.1  mrg   thr->fast_state.IncrementEpoch();
    452  1.1  mrg   // Can't increment epoch w/o writing to the trace as well.
    453  1.1  mrg   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    454  1.1  mrg   ReleaseImpl(thr, pc, &s->clock);
    455  1.1  mrg }
    456  1.1  mrg 
    457  1.1  mrg void ReleaseStore(ThreadState *thr, uptr pc, uptr addr) {
    458  1.1  mrg   DPrintf("#%d: ReleaseStore %zx\n", thr->tid, addr);
    459  1.1  mrg   if (thr->ignore_sync)
    460  1.1  mrg     return;
    461  1.1  mrg   SyncVar *s = ctx->metamap.GetSyncOrCreate(thr, pc, addr, false);
    462  1.1  mrg   Lock l(&s->mtx);
    463  1.1  mrg   thr->fast_state.IncrementEpoch();
    464  1.1  mrg   // Can't increment epoch w/o writing to the trace as well.
    465  1.1  mrg   TraceAddEvent(thr, thr->fast_state, EventTypeMop, 0);
    466  1.1  mrg   ReleaseStoreImpl(thr, pc, &s->clock);
    467  1.1  mrg }
    468  1.1  mrg 
    469  1.1  mrg #if !SANITIZER_GO
    470  1.1  mrg static void UpdateSleepClockCallback(ThreadContextBase *tctx_base, void *arg) {
    471  1.1  mrg   ThreadState *thr = reinterpret_cast<ThreadState*>(arg);
    472  1.1  mrg   ThreadContext *tctx = static_cast<ThreadContext*>(tctx_base);
    473  1.1  mrg   u64 epoch = tctx->epoch1;
    474  1.1  mrg   if (tctx->status == ThreadStatusRunning)
    475  1.1  mrg     epoch = tctx->thr->fast_state.epoch();
    476  1.1  mrg   thr->last_sleep_clock.set(&thr->proc()->clock_cache, tctx->tid, epoch);
    477  1.1  mrg }
    478  1.1  mrg 
    479  1.1  mrg void AfterSleep(ThreadState *thr, uptr pc) {
    480  1.1  mrg   DPrintf("#%d: AfterSleep\n", thr->tid);
    481  1.1  mrg   if (thr->ignore_sync)
    482  1.1  mrg     return;
    483  1.1  mrg   thr->last_sleep_stack_id = CurrentStackId(thr, pc);
    484  1.1  mrg   ThreadRegistryLock l(&ctx->thread_registry);
    485  1.1  mrg   ctx->thread_registry.RunCallbackForEachThreadLocked(UpdateSleepClockCallback,
    486  1.1  mrg                                                       thr);
    487  1.1  mrg }
    488  1.1  mrg #endif
    489  1.1  mrg 
    490  1.1  mrg void AcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    491  1.1  mrg   if (thr->ignore_sync)
    492  1.1  mrg     return;
    493  1.1  mrg   thr->clock.set(thr->fast_state.epoch());
    494  1.1  mrg   thr->clock.acquire(&thr->proc()->clock_cache, c);
    495  1.1  mrg }
    496  1.1  mrg 
    497  1.1  mrg void ReleaseStoreAcquireImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    498  1.1  mrg   if (thr->ignore_sync)
    499  1.1  mrg     return;
    500  1.1  mrg   thr->clock.set(thr->fast_state.epoch());
    501  1.1  mrg   thr->fast_synch_epoch = thr->fast_state.epoch();
    502  1.1  mrg   thr->clock.releaseStoreAcquire(&thr->proc()->clock_cache, c);
    503  1.1  mrg }
    504  1.1  mrg 
    505  1.1  mrg void ReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    506  1.1  mrg   if (thr->ignore_sync)
    507  1.1  mrg     return;
    508  1.1  mrg   thr->clock.set(thr->fast_state.epoch());
    509  1.1  mrg   thr->fast_synch_epoch = thr->fast_state.epoch();
    510  1.1  mrg   thr->clock.release(&thr->proc()->clock_cache, c);
    511  1.1  mrg }
    512  1.1  mrg 
    513  1.1  mrg void ReleaseStoreImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    514  1.1  mrg   if (thr->ignore_sync)
    515  1.1  mrg     return;
    516  1.1  mrg   thr->clock.set(thr->fast_state.epoch());
    517  1.1  mrg   thr->fast_synch_epoch = thr->fast_state.epoch();
    518  1.1  mrg   thr->clock.ReleaseStore(&thr->proc()->clock_cache, c);
    519  1.1  mrg }
    520  1.1  mrg 
    521  1.1  mrg void AcquireReleaseImpl(ThreadState *thr, uptr pc, SyncClock *c) {
    522  1.1  mrg   if (thr->ignore_sync)
    523  1.1  mrg     return;
    524  1.1  mrg   thr->clock.set(thr->fast_state.epoch());
    525  1.1  mrg   thr->fast_synch_epoch = thr->fast_state.epoch();
    526  1.1  mrg   thr->clock.acq_rel(&thr->proc()->clock_cache, c);
    527  1.1  mrg }
    528  1.1  mrg 
    529  1.1  mrg void ReportDeadlock(ThreadState *thr, uptr pc, DDReport *r) {
    530  1.1  mrg   if (r == 0 || !ShouldReport(thr, ReportTypeDeadlock))
    531  1.1  mrg     return;
    532  1.1  mrg   ThreadRegistryLock l(&ctx->thread_registry);
    533  1.1  mrg   ScopedReport rep(ReportTypeDeadlock);
    534  1.1  mrg   for (int i = 0; i < r->n; i++) {
    535  1.1  mrg     rep.AddMutex(r->loop[i].mtx_ctx0);
    536  1.1  mrg     rep.AddUniqueTid((int)r->loop[i].thr_ctx);
    537  1.1  mrg     rep.AddThread((int)r->loop[i].thr_ctx);
    538  1.1  mrg   }
    539  1.1  mrg   uptr dummy_pc = 0x42;
    540  1.1  mrg   for (int i = 0; i < r->n; i++) {
    541  1.1  mrg     for (int j = 0; j < (flags()->second_deadlock_stack ? 2 : 1); j++) {
    542  1.1  mrg       u32 stk = r->loop[i].stk[j];
    543  1.1  mrg       if (stk && stk != 0xffffffff) {
    544  1.1  mrg         rep.AddStack(StackDepotGet(stk), true);
    545  1.1  mrg       } else {
    546  1.1  mrg         // Sometimes we fail to extract the stack trace (FIXME: investigate),
    547  1.1  mrg         // but we should still produce some stack trace in the report.
    548  1.1  mrg         rep.AddStack(StackTrace(&dummy_pc, 1), true);
    549  1.1  mrg       }
    550  1.1  mrg     }
    551  1.1  mrg   }
    552  1.1  mrg   OutputReport(thr, rep);
    553  1.1  mrg }
    554  1.1  mrg 
    555  1.1  mrg }  // namespace __tsan
    556