Home | History | Annotate | Line # | Download | only in lsan
      1  1.1  joerg //=-- lsan_common.cc ------------------------------------------------------===//
      2  1.1  joerg //
      3  1.1  joerg //                     The LLVM Compiler Infrastructure
      4  1.1  joerg //
      5  1.1  joerg // This file is distributed under the University of Illinois Open Source
      6  1.1  joerg // License. See LICENSE.TXT for details.
      7  1.1  joerg //
      8  1.1  joerg //===----------------------------------------------------------------------===//
      9  1.1  joerg //
     10  1.1  joerg // This file is a part of LeakSanitizer.
     11  1.1  joerg // Implementation of common leak checking functionality.
     12  1.1  joerg //
     13  1.1  joerg //===----------------------------------------------------------------------===//
     14  1.1  joerg 
     15  1.1  joerg #include "lsan_common.h"
     16  1.1  joerg 
     17  1.1  joerg #include "sanitizer_common/sanitizer_common.h"
     18  1.3  kamil #include "sanitizer_common/sanitizer_flag_parser.h"
     19  1.1  joerg #include "sanitizer_common/sanitizer_flags.h"
     20  1.1  joerg #include "sanitizer_common/sanitizer_placement_new.h"
     21  1.3  kamil #include "sanitizer_common/sanitizer_procmaps.h"
     22  1.3  kamil #include "sanitizer_common/sanitizer_report_decorator.h"
     23  1.1  joerg #include "sanitizer_common/sanitizer_stackdepot.h"
     24  1.1  joerg #include "sanitizer_common/sanitizer_stacktrace.h"
     25  1.1  joerg #include "sanitizer_common/sanitizer_suppressions.h"
     26  1.3  kamil #include "sanitizer_common/sanitizer_thread_registry.h"
     27  1.3  kamil #include "sanitizer_common/sanitizer_tls_get_addr.h"
     28  1.1  joerg 
     29  1.1  joerg #if CAN_SANITIZE_LEAKS
     30  1.1  joerg namespace __lsan {
     31  1.1  joerg 
     32  1.3  kamil // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
     33  1.3  kamil // also to protect the global list of root regions.
     34  1.1  joerg BlockingMutex global_mutex(LINKER_INITIALIZED);
     35  1.1  joerg 
     36  1.1  joerg Flags lsan_flags;
     37  1.1  joerg 
     38  1.3  kamil void DisableCounterUnderflow() {
     39  1.3  kamil   if (common_flags()->detect_leaks) {
     40  1.3  kamil     Report("Unmatched call to __lsan_enable().\n");
     41  1.3  kamil     Die();
     42  1.1  joerg   }
     43  1.1  joerg }
     44  1.1  joerg 
     45  1.3  kamil void Flags::SetDefaults() {
     46  1.3  kamil #define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
     47  1.3  kamil #include "lsan_flags.inc"
     48  1.3  kamil #undef LSAN_FLAG
     49  1.3  kamil }
     50  1.3  kamil 
     51  1.3  kamil void RegisterLsanFlags(FlagParser *parser, Flags *f) {
     52  1.3  kamil #define LSAN_FLAG(Type, Name, DefaultValue, Description) \
     53  1.3  kamil   RegisterFlag(parser, #Name, Description, &f->Name);
     54  1.3  kamil #include "lsan_flags.inc"
     55  1.3  kamil #undef LSAN_FLAG
     56  1.3  kamil }
     57  1.3  kamil 
     58  1.3  kamil #define LOG_POINTERS(...)                           \
     59  1.3  kamil   do {                                              \
     60  1.3  kamil     if (flags()->log_pointers) Report(__VA_ARGS__); \
     61  1.3  kamil   } while (0)
     62  1.3  kamil 
     63  1.3  kamil #define LOG_THREADS(...)                           \
     64  1.3  kamil   do {                                             \
     65  1.3  kamil     if (flags()->log_threads) Report(__VA_ARGS__); \
     66  1.3  kamil   } while (0)
     67  1.3  kamil 
     68  1.3  kamil ALIGNED(64) static char suppression_placeholder[sizeof(SuppressionContext)];
     69  1.3  kamil static SuppressionContext *suppression_ctx = nullptr;
     70  1.3  kamil static const char kSuppressionLeak[] = "leak";
     71  1.3  kamil static const char *kSuppressionTypes[] = { kSuppressionLeak };
     72  1.3  kamil static const char kStdSuppressions[] =
     73  1.3  kamil #if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
     74  1.3  kamil   // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
     75  1.3  kamil   // definition.
     76  1.3  kamil   "leak:*pthread_exit*\n"
     77  1.3  kamil #endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
     78  1.3  kamil #if SANITIZER_MAC
     79  1.3  kamil   // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
     80  1.3  kamil   "leak:*_os_trace*\n"
     81  1.3  kamil #endif
     82  1.3  kamil   // TLS leak in some glibc versions, described in
     83  1.3  kamil   // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
     84  1.3  kamil   "leak:*tls_get_addr*\n";
     85  1.1  joerg 
     86  1.1  joerg void InitializeSuppressions() {
     87  1.3  kamil   CHECK_EQ(nullptr, suppression_ctx);
     88  1.3  kamil   suppression_ctx = new (suppression_placeholder) // NOLINT
     89  1.3  kamil       SuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
     90  1.3  kamil   suppression_ctx->ParseFromFile(flags()->suppressions);
     91  1.1  joerg   if (&__lsan_default_suppressions)
     92  1.1  joerg     suppression_ctx->Parse(__lsan_default_suppressions());
     93  1.3  kamil   suppression_ctx->Parse(kStdSuppressions);
     94  1.3  kamil }
     95  1.3  kamil 
     96  1.3  kamil static SuppressionContext *GetSuppressionContext() {
     97  1.3  kamil   CHECK(suppression_ctx);
     98  1.3  kamil   return suppression_ctx;
     99  1.3  kamil }
    100  1.3  kamil 
    101  1.3  kamil static InternalMmapVector<RootRegion> *root_regions;
    102  1.3  kamil 
    103  1.3  kamil InternalMmapVector<RootRegion> const *GetRootRegions() { return root_regions; }
    104  1.3  kamil 
    105  1.3  kamil void InitializeRootRegions() {
    106  1.3  kamil   CHECK(!root_regions);
    107  1.3  kamil   ALIGNED(64) static char placeholder[sizeof(InternalMmapVector<RootRegion>)];
    108  1.3  kamil   root_regions = new (placeholder) InternalMmapVector<RootRegion>();  // NOLINT
    109  1.3  kamil }
    110  1.3  kamil 
    111  1.3  kamil const char *MaybeCallLsanDefaultOptions() {
    112  1.3  kamil   return (&__lsan_default_options) ? __lsan_default_options() : "";
    113  1.1  joerg }
    114  1.1  joerg 
    115  1.1  joerg void InitCommonLsan() {
    116  1.3  kamil   InitializeRootRegions();
    117  1.3  kamil   if (common_flags()->detect_leaks) {
    118  1.3  kamil     // Initialization which can fail or print warnings should only be done if
    119  1.3  kamil     // LSan is actually enabled.
    120  1.3  kamil     InitializeSuppressions();
    121  1.3  kamil     InitializePlatformSpecificModules();
    122  1.3  kamil   }
    123  1.1  joerg }
    124  1.1  joerg 
    125  1.3  kamil class Decorator: public __sanitizer::SanitizerCommonDecorator {
    126  1.3  kamil  public:
    127  1.3  kamil   Decorator() : SanitizerCommonDecorator() { }
    128  1.3  kamil   const char *Error() { return Red(); }
    129  1.3  kamil   const char *Leak() { return Blue(); }
    130  1.3  kamil };
    131  1.3  kamil 
    132  1.1  joerg static inline bool CanBeAHeapPointer(uptr p) {
    133  1.1  joerg   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
    134  1.1  joerg   // bound on heap addresses.
    135  1.1  joerg   const uptr kMinAddress = 4 * 4096;
    136  1.1  joerg   if (p < kMinAddress) return false;
    137  1.3  kamil #if defined(__x86_64__)
    138  1.1  joerg   // Accept only canonical form user-space addresses.
    139  1.1  joerg   return ((p >> 47) == 0);
    140  1.3  kamil #elif defined(__mips64)
    141  1.3  kamil   return ((p >> 40) == 0);
    142  1.3  kamil #elif defined(__aarch64__)
    143  1.3  kamil   unsigned runtimeVMA =
    144  1.3  kamil     (MostSignificantSetBitIndex(GET_CURRENT_FRAME()) + 1);
    145  1.3  kamil   return ((p >> runtimeVMA) == 0);
    146  1.1  joerg #else
    147  1.1  joerg   return true;
    148  1.1  joerg #endif
    149  1.1  joerg }
    150  1.1  joerg 
    151  1.1  joerg // Scans the memory range, looking for byte patterns that point into allocator
    152  1.1  joerg // chunks. Marks those chunks with |tag| and adds them to |frontier|.
    153  1.3  kamil // There are two usage modes for this function: finding reachable chunks
    154  1.3  kamil // (|tag| = kReachable) and finding indirectly leaked chunks
    155  1.1  joerg // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
    156  1.1  joerg // so |frontier| = 0.
    157  1.1  joerg void ScanRangeForPointers(uptr begin, uptr end,
    158  1.1  joerg                           Frontier *frontier,
    159  1.1  joerg                           const char *region_type, ChunkTag tag) {
    160  1.3  kamil   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
    161  1.1  joerg   const uptr alignment = flags()->pointer_alignment();
    162  1.3  kamil   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, begin, end);
    163  1.1  joerg   uptr pp = begin;
    164  1.1  joerg   if (pp % alignment)
    165  1.1  joerg     pp = pp + alignment - pp % alignment;
    166  1.1  joerg   for (; pp + sizeof(void *) <= end; pp += alignment) {  // NOLINT
    167  1.3  kamil     void *p = *reinterpret_cast<void **>(pp);
    168  1.1  joerg     if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
    169  1.1  joerg     uptr chunk = PointsIntoChunk(p);
    170  1.1  joerg     if (!chunk) continue;
    171  1.3  kamil     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
    172  1.3  kamil     if (chunk == begin) continue;
    173  1.1  joerg     LsanMetadata m(chunk);
    174  1.3  kamil     if (m.tag() == kReachable || m.tag() == kIgnored) continue;
    175  1.3  kamil 
    176  1.3  kamil     // Do this check relatively late so we can log only the interesting cases.
    177  1.3  kamil     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
    178  1.3  kamil       LOG_POINTERS(
    179  1.3  kamil           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
    180  1.3  kamil           "%zu.\n",
    181  1.3  kamil           pp, p, chunk, chunk + m.requested_size(), m.requested_size());
    182  1.3  kamil       continue;
    183  1.3  kamil     }
    184  1.3  kamil 
    185  1.1  joerg     m.set_tag(tag);
    186  1.3  kamil     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
    187  1.3  kamil                  chunk, chunk + m.requested_size(), m.requested_size());
    188  1.1  joerg     if (frontier)
    189  1.1  joerg       frontier->push_back(chunk);
    190  1.1  joerg   }
    191  1.1  joerg }
    192  1.1  joerg 
    193  1.3  kamil // Scans a global range for pointers
    194  1.3  kamil void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
    195  1.3  kamil   uptr allocator_begin = 0, allocator_end = 0;
    196  1.3  kamil   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
    197  1.3  kamil   if (begin <= allocator_begin && allocator_begin < end) {
    198  1.3  kamil     CHECK_LE(allocator_begin, allocator_end);
    199  1.3  kamil     CHECK_LE(allocator_end, end);
    200  1.3  kamil     if (begin < allocator_begin)
    201  1.3  kamil       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
    202  1.3  kamil                            kReachable);
    203  1.3  kamil     if (allocator_end < end)
    204  1.3  kamil       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
    205  1.3  kamil   } else {
    206  1.3  kamil     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
    207  1.3  kamil   }
    208  1.3  kamil }
    209  1.3  kamil 
    210  1.3  kamil void ForEachExtraStackRangeCb(uptr begin, uptr end, void* arg) {
    211  1.3  kamil   Frontier *frontier = reinterpret_cast<Frontier *>(arg);
    212  1.3  kamil   ScanRangeForPointers(begin, end, frontier, "FAKE STACK", kReachable);
    213  1.3  kamil }
    214  1.3  kamil 
    215  1.1  joerg // Scans thread data (stacks and TLS) for heap pointers.
    216  1.1  joerg static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
    217  1.1  joerg                            Frontier *frontier) {
    218  1.3  kamil   InternalMmapVector<uptr> registers(suspended_threads.RegisterCount());
    219  1.1  joerg   uptr registers_begin = reinterpret_cast<uptr>(registers.data());
    220  1.3  kamil   uptr registers_end =
    221  1.3  kamil       reinterpret_cast<uptr>(registers.data() + registers.size());
    222  1.3  kamil   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
    223  1.3  kamil     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
    224  1.3  kamil     LOG_THREADS("Processing thread %d.\n", os_id);
    225  1.1  joerg     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
    226  1.3  kamil     DTLS *dtls;
    227  1.1  joerg     bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
    228  1.1  joerg                                               &tls_begin, &tls_end,
    229  1.3  kamil                                               &cache_begin, &cache_end, &dtls);
    230  1.1  joerg     if (!thread_found) {
    231  1.1  joerg       // If a thread can't be found in the thread registry, it's probably in the
    232  1.1  joerg       // process of destruction. Log this event and move on.
    233  1.3  kamil       LOG_THREADS("Thread %d not found in registry.\n", os_id);
    234  1.1  joerg       continue;
    235  1.1  joerg     }
    236  1.1  joerg     uptr sp;
    237  1.3  kamil     PtraceRegistersStatus have_registers =
    238  1.3  kamil         suspended_threads.GetRegistersAndSP(i, registers.data(), &sp);
    239  1.3  kamil     if (have_registers != REGISTERS_AVAILABLE) {
    240  1.3  kamil       Report("Unable to get registers from thread %d.\n", os_id);
    241  1.3  kamil       // If unable to get SP, consider the entire stack to be reachable unless
    242  1.3  kamil       // GetRegistersAndSP failed with ESRCH.
    243  1.3  kamil       if (have_registers == REGISTERS_UNAVAILABLE_FATAL) continue;
    244  1.1  joerg       sp = stack_begin;
    245  1.1  joerg     }
    246  1.1  joerg 
    247  1.1  joerg     if (flags()->use_registers && have_registers)
    248  1.1  joerg       ScanRangeForPointers(registers_begin, registers_end, frontier,
    249  1.1  joerg                            "REGISTERS", kReachable);
    250  1.1  joerg 
    251  1.1  joerg     if (flags()->use_stacks) {
    252  1.3  kamil       LOG_THREADS("Stack at %p-%p (SP = %p).\n", stack_begin, stack_end, sp);
    253  1.1  joerg       if (sp < stack_begin || sp >= stack_end) {
    254  1.1  joerg         // SP is outside the recorded stack range (e.g. the thread is running a
    255  1.3  kamil         // signal handler on alternate stack, or swapcontext was used).
    256  1.3  kamil         // Again, consider the entire stack range to be reachable.
    257  1.3  kamil         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
    258  1.3  kamil         uptr page_size = GetPageSizeCached();
    259  1.3  kamil         int skipped = 0;
    260  1.3  kamil         while (stack_begin < stack_end &&
    261  1.3  kamil                !IsAccessibleMemoryRange(stack_begin, 1)) {
    262  1.3  kamil           skipped++;
    263  1.3  kamil           stack_begin += page_size;
    264  1.3  kamil         }
    265  1.3  kamil         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
    266  1.3  kamil                     skipped, stack_begin, stack_end);
    267  1.1  joerg       } else {
    268  1.1  joerg         // Shrink the stack range to ignore out-of-scope values.
    269  1.1  joerg         stack_begin = sp;
    270  1.1  joerg       }
    271  1.1  joerg       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
    272  1.1  joerg                            kReachable);
    273  1.3  kamil       ForEachExtraStackRange(os_id, ForEachExtraStackRangeCb, frontier);
    274  1.1  joerg     }
    275  1.1  joerg 
    276  1.1  joerg     if (flags()->use_tls) {
    277  1.3  kamil       if (tls_begin) {
    278  1.3  kamil         LOG_THREADS("TLS at %p-%p.\n", tls_begin, tls_end);
    279  1.3  kamil         // If the tls and cache ranges don't overlap, scan full tls range,
    280  1.3  kamil         // otherwise, only scan the non-overlapping portions
    281  1.3  kamil         if (cache_begin == cache_end || tls_end < cache_begin ||
    282  1.3  kamil             tls_begin > cache_end) {
    283  1.3  kamil           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
    284  1.3  kamil         } else {
    285  1.3  kamil           if (tls_begin < cache_begin)
    286  1.3  kamil             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
    287  1.3  kamil                                  kReachable);
    288  1.3  kamil           if (tls_end > cache_end)
    289  1.3  kamil             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
    290  1.3  kamil                                  kReachable);
    291  1.3  kamil         }
    292  1.3  kamil       }
    293  1.3  kamil       if (dtls && !DTLSInDestruction(dtls)) {
    294  1.3  kamil         for (uptr j = 0; j < dtls->dtv_size; ++j) {
    295  1.3  kamil           uptr dtls_beg = dtls->dtv[j].beg;
    296  1.3  kamil           uptr dtls_end = dtls_beg + dtls->dtv[j].size;
    297  1.3  kamil           if (dtls_beg < dtls_end) {
    298  1.3  kamil             LOG_THREADS("DTLS %zu at %p-%p.\n", j, dtls_beg, dtls_end);
    299  1.3  kamil             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
    300  1.3  kamil                                  kReachable);
    301  1.3  kamil           }
    302  1.3  kamil         }
    303  1.1  joerg       } else {
    304  1.3  kamil         // We are handling a thread with DTLS under destruction. Log about
    305  1.3  kamil         // this and continue.
    306  1.3  kamil         LOG_THREADS("Thread %d has DTLS under destruction.\n", os_id);
    307  1.1  joerg       }
    308  1.1  joerg     }
    309  1.1  joerg   }
    310  1.1  joerg }
    311  1.1  joerg 
    312  1.3  kamil void ScanRootRegion(Frontier *frontier, const RootRegion &root_region,
    313  1.3  kamil                     uptr region_begin, uptr region_end, bool is_readable) {
    314  1.3  kamil   uptr intersection_begin = Max(root_region.begin, region_begin);
    315  1.3  kamil   uptr intersection_end = Min(region_end, root_region.begin + root_region.size);
    316  1.3  kamil   if (intersection_begin >= intersection_end) return;
    317  1.3  kamil   LOG_POINTERS("Root region %p-%p intersects with mapped region %p-%p (%s)\n",
    318  1.3  kamil                root_region.begin, root_region.begin + root_region.size,
    319  1.3  kamil                region_begin, region_end,
    320  1.3  kamil                is_readable ? "readable" : "unreadable");
    321  1.3  kamil   if (is_readable)
    322  1.3  kamil     ScanRangeForPointers(intersection_begin, intersection_end, frontier, "ROOT",
    323  1.3  kamil                          kReachable);
    324  1.3  kamil }
    325  1.3  kamil 
    326  1.3  kamil static void ProcessRootRegion(Frontier *frontier,
    327  1.3  kamil                               const RootRegion &root_region) {
    328  1.3  kamil   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
    329  1.3  kamil   MemoryMappedSegment segment;
    330  1.3  kamil   while (proc_maps.Next(&segment)) {
    331  1.3  kamil     ScanRootRegion(frontier, root_region, segment.start, segment.end,
    332  1.3  kamil                    segment.IsReadable());
    333  1.3  kamil   }
    334  1.3  kamil }
    335  1.3  kamil 
    336  1.3  kamil // Scans root regions for heap pointers.
    337  1.3  kamil static void ProcessRootRegions(Frontier *frontier) {
    338  1.3  kamil   if (!flags()->use_root_regions) return;
    339  1.3  kamil   CHECK(root_regions);
    340  1.3  kamil   for (uptr i = 0; i < root_regions->size(); i++) {
    341  1.3  kamil     ProcessRootRegion(frontier, (*root_regions)[i]);
    342  1.3  kamil   }
    343  1.3  kamil }
    344  1.3  kamil 
    345  1.1  joerg static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
    346  1.1  joerg   while (frontier->size()) {
    347  1.1  joerg     uptr next_chunk = frontier->back();
    348  1.1  joerg     frontier->pop_back();
    349  1.1  joerg     LsanMetadata m(next_chunk);
    350  1.1  joerg     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
    351  1.1  joerg                          "HEAP", tag);
    352  1.1  joerg   }
    353  1.1  joerg }
    354  1.1  joerg 
    355  1.1  joerg // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
    356  1.1  joerg // which are reachable from it as indirectly leaked.
    357  1.1  joerg static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
    358  1.1  joerg   chunk = GetUserBegin(chunk);
    359  1.1  joerg   LsanMetadata m(chunk);
    360  1.1  joerg   if (m.allocated() && m.tag() != kReachable) {
    361  1.1  joerg     ScanRangeForPointers(chunk, chunk + m.requested_size(),
    362  1.3  kamil                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
    363  1.1  joerg   }
    364  1.1  joerg }
    365  1.1  joerg 
    366  1.1  joerg // ForEachChunk callback. If chunk is marked as ignored, adds its address to
    367  1.1  joerg // frontier.
    368  1.1  joerg static void CollectIgnoredCb(uptr chunk, void *arg) {
    369  1.1  joerg   CHECK(arg);
    370  1.1  joerg   chunk = GetUserBegin(chunk);
    371  1.1  joerg   LsanMetadata m(chunk);
    372  1.3  kamil   if (m.allocated() && m.tag() == kIgnored) {
    373  1.3  kamil     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n",
    374  1.3  kamil                  chunk, chunk + m.requested_size(), m.requested_size());
    375  1.1  joerg     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
    376  1.3  kamil   }
    377  1.3  kamil }
    378  1.3  kamil 
    379  1.3  kamil static uptr GetCallerPC(u32 stack_id, StackDepotReverseMap *map) {
    380  1.3  kamil   CHECK(stack_id);
    381  1.3  kamil   StackTrace stack = map->Get(stack_id);
    382  1.3  kamil   // The top frame is our malloc/calloc/etc. The next frame is the caller.
    383  1.3  kamil   if (stack.size >= 2)
    384  1.3  kamil     return stack.trace[1];
    385  1.3  kamil   return 0;
    386  1.3  kamil }
    387  1.3  kamil 
    388  1.3  kamil struct InvalidPCParam {
    389  1.3  kamil   Frontier *frontier;
    390  1.3  kamil   StackDepotReverseMap *stack_depot_reverse_map;
    391  1.3  kamil   bool skip_linker_allocations;
    392  1.3  kamil };
    393  1.3  kamil 
    394  1.3  kamil // ForEachChunk callback. If the caller pc is invalid or is within the linker,
    395  1.3  kamil // mark as reachable. Called by ProcessPlatformSpecificAllocations.
    396  1.3  kamil static void MarkInvalidPCCb(uptr chunk, void *arg) {
    397  1.3  kamil   CHECK(arg);
    398  1.3  kamil   InvalidPCParam *param = reinterpret_cast<InvalidPCParam *>(arg);
    399  1.3  kamil   chunk = GetUserBegin(chunk);
    400  1.3  kamil   LsanMetadata m(chunk);
    401  1.3  kamil   if (m.allocated() && m.tag() != kReachable && m.tag() != kIgnored) {
    402  1.3  kamil     u32 stack_id = m.stack_trace_id();
    403  1.3  kamil     uptr caller_pc = 0;
    404  1.3  kamil     if (stack_id > 0)
    405  1.3  kamil       caller_pc = GetCallerPC(stack_id, param->stack_depot_reverse_map);
    406  1.3  kamil     // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
    407  1.3  kamil     // it as reachable, as we can't properly report its allocation stack anyway.
    408  1.3  kamil     if (caller_pc == 0 || (param->skip_linker_allocations &&
    409  1.3  kamil                            GetLinker()->containsAddress(caller_pc))) {
    410  1.3  kamil       m.set_tag(kReachable);
    411  1.3  kamil       param->frontier->push_back(chunk);
    412  1.3  kamil     }
    413  1.3  kamil   }
    414  1.3  kamil }
    415  1.3  kamil 
    416  1.3  kamil // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
    417  1.3  kamil // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
    418  1.3  kamil // modules accounting etc.
    419  1.3  kamil // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
    420  1.3  kamil // They are allocated with a __libc_memalign() call in allocate_and_init()
    421  1.3  kamil // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
    422  1.3  kamil // blocks, but we can make sure they come from our own allocator by intercepting
    423  1.3  kamil // __libc_memalign(). On top of that, there is no easy way to reach them. Their
    424  1.3  kamil // addresses are stored in a dynamically allocated array (the DTV) which is
    425  1.3  kamil // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
    426  1.3  kamil // being reachable from the static TLS, and the dynamic TLS being reachable from
    427  1.3  kamil // the DTV. This is because the initial DTV is allocated before our interception
    428  1.3  kamil // mechanism kicks in, and thus we don't recognize it as allocated memory. We
    429  1.3  kamil // can't special-case it either, since we don't know its size.
    430  1.3  kamil // Our solution is to include in the root set all allocations made from
    431  1.3  kamil // ld-linux.so (which is where allocate_and_init() is implemented). This is
    432  1.3  kamil // guaranteed to include all dynamic TLS blocks (and possibly other allocations
    433  1.3  kamil // which we don't care about).
    434  1.3  kamil // On all other platforms, this simply checks to ensure that the caller pc is
    435  1.3  kamil // valid before reporting chunks as leaked.
    436  1.3  kamil void ProcessPC(Frontier *frontier) {
    437  1.3  kamil   StackDepotReverseMap stack_depot_reverse_map;
    438  1.3  kamil   InvalidPCParam arg;
    439  1.3  kamil   arg.frontier = frontier;
    440  1.3  kamil   arg.stack_depot_reverse_map = &stack_depot_reverse_map;
    441  1.3  kamil   arg.skip_linker_allocations =
    442  1.3  kamil       flags()->use_tls && flags()->use_ld_allocations && GetLinker() != nullptr;
    443  1.3  kamil   ForEachChunk(MarkInvalidPCCb, &arg);
    444  1.1  joerg }
    445  1.1  joerg 
    446  1.1  joerg // Sets the appropriate tag on each chunk.
    447  1.1  joerg static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
    448  1.1  joerg   // Holds the flood fill frontier.
    449  1.3  kamil   Frontier frontier;
    450  1.1  joerg 
    451  1.3  kamil   ForEachChunk(CollectIgnoredCb, &frontier);
    452  1.3  kamil   ProcessGlobalRegions(&frontier);
    453  1.1  joerg   ProcessThreads(suspended_threads, &frontier);
    454  1.3  kamil   ProcessRootRegions(&frontier);
    455  1.1  joerg   FloodFillTag(&frontier, kReachable);
    456  1.3  kamil 
    457  1.3  kamil   CHECK_EQ(0, frontier.size());
    458  1.3  kamil   ProcessPC(&frontier);
    459  1.3  kamil 
    460  1.1  joerg   // The check here is relatively expensive, so we do this in a separate flood
    461  1.1  joerg   // fill. That way we can skip the check for chunks that are reachable
    462  1.1  joerg   // otherwise.
    463  1.3  kamil   LOG_POINTERS("Processing platform-specific allocations.\n");
    464  1.1  joerg   ProcessPlatformSpecificAllocations(&frontier);
    465  1.1  joerg   FloodFillTag(&frontier, kReachable);
    466  1.1  joerg 
    467  1.1  joerg   // Iterate over leaked chunks and mark those that are reachable from other
    468  1.1  joerg   // leaked chunks.
    469  1.3  kamil   LOG_POINTERS("Scanning leaked chunks.\n");
    470  1.3  kamil   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
    471  1.3  kamil }
    472  1.3  kamil 
    473  1.3  kamil // ForEachChunk callback. Resets the tags to pre-leak-check state.
    474  1.3  kamil static void ResetTagsCb(uptr chunk, void *arg) {
    475  1.3  kamil   (void)arg;
    476  1.3  kamil   chunk = GetUserBegin(chunk);
    477  1.3  kamil   LsanMetadata m(chunk);
    478  1.3  kamil   if (m.allocated() && m.tag() != kIgnored)
    479  1.3  kamil     m.set_tag(kDirectlyLeaked);
    480  1.1  joerg }
    481  1.1  joerg 
    482  1.1  joerg static void PrintStackTraceById(u32 stack_trace_id) {
    483  1.1  joerg   CHECK(stack_trace_id);
    484  1.3  kamil   StackDepotGet(stack_trace_id).Print();
    485  1.1  joerg }
    486  1.1  joerg 
    487  1.3  kamil // ForEachChunk callback. Aggregates information about unreachable chunks into
    488  1.3  kamil // a LeakReport.
    489  1.1  joerg static void CollectLeaksCb(uptr chunk, void *arg) {
    490  1.1  joerg   CHECK(arg);
    491  1.1  joerg   LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
    492  1.1  joerg   chunk = GetUserBegin(chunk);
    493  1.1  joerg   LsanMetadata m(chunk);
    494  1.1  joerg   if (!m.allocated()) return;
    495  1.1  joerg   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
    496  1.3  kamil     u32 resolution = flags()->resolution;
    497  1.3  kamil     u32 stack_trace_id = 0;
    498  1.1  joerg     if (resolution > 0) {
    499  1.3  kamil       StackTrace stack = StackDepotGet(m.stack_trace_id());
    500  1.3  kamil       stack.size = Min(stack.size, resolution);
    501  1.3  kamil       stack_trace_id = StackDepotPut(stack);
    502  1.1  joerg     } else {
    503  1.3  kamil       stack_trace_id = m.stack_trace_id();
    504  1.1  joerg     }
    505  1.3  kamil     leak_report->AddLeakedChunk(chunk, stack_trace_id, m.requested_size(),
    506  1.3  kamil                                 m.tag());
    507  1.1  joerg   }
    508  1.1  joerg }
    509  1.1  joerg 
    510  1.1  joerg static void PrintMatchedSuppressions() {
    511  1.3  kamil   InternalMmapVector<Suppression *> matched;
    512  1.3  kamil   GetSuppressionContext()->GetMatched(&matched);
    513  1.1  joerg   if (!matched.size())
    514  1.1  joerg     return;
    515  1.1  joerg   const char *line = "-----------------------------------------------------";
    516  1.1  joerg   Printf("%s\n", line);
    517  1.1  joerg   Printf("Suppressions used:\n");
    518  1.1  joerg   Printf("  count      bytes template\n");
    519  1.1  joerg   for (uptr i = 0; i < matched.size(); i++)
    520  1.3  kamil     Printf("%7zu %10zu %s\n", static_cast<uptr>(atomic_load_relaxed(
    521  1.3  kamil         &matched[i]->hit_count)), matched[i]->weight, matched[i]->templ);
    522  1.1  joerg   Printf("%s\n\n", line);
    523  1.1  joerg }
    524  1.1  joerg 
    525  1.3  kamil struct CheckForLeaksParam {
    526  1.1  joerg   bool success;
    527  1.1  joerg   LeakReport leak_report;
    528  1.1  joerg };
    529  1.1  joerg 
    530  1.3  kamil static void ReportIfNotSuspended(ThreadContextBase *tctx, void *arg) {
    531  1.3  kamil   const InternalMmapVector<tid_t> &suspended_threads =
    532  1.3  kamil       *(const InternalMmapVector<tid_t> *)arg;
    533  1.3  kamil   if (tctx->status == ThreadStatusRunning) {
    534  1.3  kamil     uptr i = InternalLowerBound(suspended_threads, 0, suspended_threads.size(),
    535  1.3  kamil                                 tctx->os_id, CompareLess<int>());
    536  1.3  kamil     if (i >= suspended_threads.size() || suspended_threads[i] != tctx->os_id)
    537  1.3  kamil       Report("Running thread %d was not suspended. False leaks are possible.\n",
    538  1.3  kamil              tctx->os_id);
    539  1.3  kamil   };
    540  1.3  kamil }
    541  1.3  kamil 
    542  1.3  kamil static void ReportUnsuspendedThreads(
    543  1.3  kamil     const SuspendedThreadsList &suspended_threads) {
    544  1.3  kamil   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
    545  1.3  kamil   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
    546  1.3  kamil     threads[i] = suspended_threads.GetThreadID(i);
    547  1.3  kamil 
    548  1.3  kamil   Sort(threads.data(), threads.size());
    549  1.3  kamil 
    550  1.3  kamil   GetThreadRegistryLocked()->RunCallbackForEachThreadLocked(
    551  1.3  kamil       &ReportIfNotSuspended, &threads);
    552  1.3  kamil }
    553  1.3  kamil 
    554  1.3  kamil static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
    555  1.3  kamil                                   void *arg) {
    556  1.3  kamil   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
    557  1.1  joerg   CHECK(param);
    558  1.1  joerg   CHECK(!param->success);
    559  1.3  kamil   ReportUnsuspendedThreads(suspended_threads);
    560  1.1  joerg   ClassifyAllChunks(suspended_threads);
    561  1.1  joerg   ForEachChunk(CollectLeaksCb, &param->leak_report);
    562  1.3  kamil   // Clean up for subsequent leak checks. This assumes we did not overwrite any
    563  1.3  kamil   // kIgnored tags.
    564  1.3  kamil   ForEachChunk(ResetTagsCb, nullptr);
    565  1.1  joerg   param->success = true;
    566  1.1  joerg }
    567  1.1  joerg 
    568  1.3  kamil static bool CheckForLeaks() {
    569  1.1  joerg   if (&__lsan_is_turned_off && __lsan_is_turned_off())
    570  1.3  kamil       return false;
    571  1.3  kamil   EnsureMainThreadIDIsCorrect();
    572  1.3  kamil   CheckForLeaksParam param;
    573  1.1  joerg   param.success = false;
    574  1.1  joerg   LockThreadRegistry();
    575  1.1  joerg   LockAllocator();
    576  1.3  kamil   DoStopTheWorld(CheckForLeaksCallback, &param);
    577  1.1  joerg   UnlockAllocator();
    578  1.1  joerg   UnlockThreadRegistry();
    579  1.1  joerg 
    580  1.1  joerg   if (!param.success) {
    581  1.1  joerg     Report("LeakSanitizer has encountered a fatal error.\n");
    582  1.3  kamil     Report(
    583  1.3  kamil         "HINT: For debugging, try setting environment variable "
    584  1.3  kamil         "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
    585  1.3  kamil     Report(
    586  1.3  kamil         "HINT: LeakSanitizer does not work under ptrace (strace, gdb, etc)\n");
    587  1.1  joerg     Die();
    588  1.1  joerg   }
    589  1.3  kamil   param.leak_report.ApplySuppressions();
    590  1.3  kamil   uptr unsuppressed_count = param.leak_report.UnsuppressedLeakCount();
    591  1.3  kamil   if (unsuppressed_count > 0) {
    592  1.3  kamil     Decorator d;
    593  1.1  joerg     Printf("\n"
    594  1.1  joerg            "================================================================="
    595  1.1  joerg            "\n");
    596  1.3  kamil     Printf("%s", d.Error());
    597  1.1  joerg     Report("ERROR: LeakSanitizer: detected memory leaks\n");
    598  1.3  kamil     Printf("%s", d.Default());
    599  1.3  kamil     param.leak_report.ReportTopLeaks(flags()->max_leaks);
    600  1.1  joerg   }
    601  1.3  kamil   if (common_flags()->print_suppressions)
    602  1.1  joerg     PrintMatchedSuppressions();
    603  1.3  kamil   if (unsuppressed_count > 0) {
    604  1.1  joerg     param.leak_report.PrintSummary();
    605  1.3  kamil     return true;
    606  1.1  joerg   }
    607  1.3  kamil   return false;
    608  1.3  kamil }
    609  1.3  kamil 
    610  1.3  kamil static bool has_reported_leaks = false;
    611  1.3  kamil bool HasReportedLeaks() { return has_reported_leaks; }
    612  1.3  kamil 
    613  1.3  kamil void DoLeakCheck() {
    614  1.3  kamil   BlockingMutexLock l(&global_mutex);
    615  1.3  kamil   static bool already_done;
    616  1.3  kamil   if (already_done) return;
    617  1.3  kamil   already_done = true;
    618  1.3  kamil   has_reported_leaks = CheckForLeaks();
    619  1.3  kamil   if (has_reported_leaks) HandleLeaks();
    620  1.3  kamil }
    621  1.3  kamil 
    622  1.3  kamil static int DoRecoverableLeakCheck() {
    623  1.3  kamil   BlockingMutexLock l(&global_mutex);
    624  1.3  kamil   bool have_leaks = CheckForLeaks();
    625  1.3  kamil   return have_leaks ? 1 : 0;
    626  1.1  joerg }
    627  1.1  joerg 
    628  1.3  kamil void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
    629  1.3  kamil 
    630  1.1  joerg static Suppression *GetSuppressionForAddr(uptr addr) {
    631  1.3  kamil   Suppression *s = nullptr;
    632  1.3  kamil 
    633  1.3  kamil   // Suppress by module name.
    634  1.3  kamil   SuppressionContext *suppressions = GetSuppressionContext();
    635  1.3  kamil   if (const char *module_name =
    636  1.3  kamil           Symbolizer::GetOrInit()->GetModuleNameForPc(addr))
    637  1.3  kamil     if (suppressions->Match(module_name, kSuppressionLeak, &s))
    638  1.1  joerg       return s;
    639  1.3  kamil 
    640  1.3  kamil   // Suppress by file or function name.
    641  1.3  kamil   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
    642  1.3  kamil   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
    643  1.3  kamil     if (suppressions->Match(cur->info.function, kSuppressionLeak, &s) ||
    644  1.3  kamil         suppressions->Match(cur->info.file, kSuppressionLeak, &s)) {
    645  1.3  kamil       break;
    646  1.3  kamil     }
    647  1.1  joerg   }
    648  1.3  kamil   frames->ClearAll();
    649  1.3  kamil   return s;
    650  1.1  joerg }
    651  1.1  joerg 
    652  1.1  joerg static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
    653  1.3  kamil   StackTrace stack = StackDepotGet(stack_trace_id);
    654  1.3  kamil   for (uptr i = 0; i < stack.size; i++) {
    655  1.3  kamil     Suppression *s = GetSuppressionForAddr(
    656  1.3  kamil         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
    657  1.1  joerg     if (s) return s;
    658  1.1  joerg   }
    659  1.3  kamil   return nullptr;
    660  1.1  joerg }
    661  1.1  joerg 
    662  1.1  joerg ///// LeakReport implementation. /////
    663  1.1  joerg 
    664  1.1  joerg // A hard limit on the number of distinct leaks, to avoid quadratic complexity
    665  1.3  kamil // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
    666  1.3  kamil // in real-world applications.
    667  1.1  joerg // FIXME: Get rid of this limit by changing the implementation of LeakReport to
    668  1.1  joerg // use a hash table.
    669  1.3  kamil const uptr kMaxLeaksConsidered = 5000;
    670  1.1  joerg 
    671  1.3  kamil void LeakReport::AddLeakedChunk(uptr chunk, u32 stack_trace_id,
    672  1.3  kamil                                 uptr leaked_size, ChunkTag tag) {
    673  1.1  joerg   CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
    674  1.1  joerg   bool is_directly_leaked = (tag == kDirectlyLeaked);
    675  1.3  kamil   uptr i;
    676  1.3  kamil   for (i = 0; i < leaks_.size(); i++) {
    677  1.1  joerg     if (leaks_[i].stack_trace_id == stack_trace_id &&
    678  1.1  joerg         leaks_[i].is_directly_leaked == is_directly_leaked) {
    679  1.1  joerg       leaks_[i].hit_count++;
    680  1.1  joerg       leaks_[i].total_size += leaked_size;
    681  1.3  kamil       break;
    682  1.1  joerg     }
    683  1.3  kamil   }
    684  1.3  kamil   if (i == leaks_.size()) {
    685  1.3  kamil     if (leaks_.size() == kMaxLeaksConsidered) return;
    686  1.3  kamil     Leak leak = { next_id_++, /* hit_count */ 1, leaked_size, stack_trace_id,
    687  1.3  kamil                   is_directly_leaked, /* is_suppressed */ false };
    688  1.3  kamil     leaks_.push_back(leak);
    689  1.3  kamil   }
    690  1.3  kamil   if (flags()->report_objects) {
    691  1.3  kamil     LeakedObject obj = {leaks_[i].id, chunk, leaked_size};
    692  1.3  kamil     leaked_objects_.push_back(obj);
    693  1.3  kamil   }
    694  1.1  joerg }
    695  1.1  joerg 
    696  1.3  kamil static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
    697  1.3  kamil   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
    698  1.3  kamil     return leak1.total_size > leak2.total_size;
    699  1.3  kamil   else
    700  1.3  kamil     return leak1.is_directly_leaked;
    701  1.1  joerg }
    702  1.1  joerg 
    703  1.3  kamil void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
    704  1.1  joerg   CHECK(leaks_.size() <= kMaxLeaksConsidered);
    705  1.1  joerg   Printf("\n");
    706  1.1  joerg   if (leaks_.size() == kMaxLeaksConsidered)
    707  1.1  joerg     Printf("Too many leaks! Only the first %zu leaks encountered will be "
    708  1.1  joerg            "reported.\n",
    709  1.1  joerg            kMaxLeaksConsidered);
    710  1.1  joerg 
    711  1.3  kamil   uptr unsuppressed_count = UnsuppressedLeakCount();
    712  1.3  kamil   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
    713  1.3  kamil     Printf("The %zu top leak(s):\n", num_leaks_to_report);
    714  1.3  kamil   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
    715  1.3  kamil   uptr leaks_reported = 0;
    716  1.1  joerg   for (uptr i = 0; i < leaks_.size(); i++) {
    717  1.1  joerg     if (leaks_[i].is_suppressed) continue;
    718  1.3  kamil     PrintReportForLeak(i);
    719  1.3  kamil     leaks_reported++;
    720  1.3  kamil     if (leaks_reported == num_leaks_to_report) break;
    721  1.3  kamil   }
    722  1.3  kamil   if (leaks_reported < unsuppressed_count) {
    723  1.3  kamil     uptr remaining = unsuppressed_count - leaks_reported;
    724  1.3  kamil     Printf("Omitting %zu more leak(s).\n", remaining);
    725  1.3  kamil   }
    726  1.3  kamil }
    727  1.3  kamil 
    728  1.3  kamil void LeakReport::PrintReportForLeak(uptr index) {
    729  1.3  kamil   Decorator d;
    730  1.3  kamil   Printf("%s", d.Leak());
    731  1.3  kamil   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
    732  1.3  kamil          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
    733  1.3  kamil          leaks_[index].total_size, leaks_[index].hit_count);
    734  1.3  kamil   Printf("%s", d.Default());
    735  1.3  kamil 
    736  1.3  kamil   PrintStackTraceById(leaks_[index].stack_trace_id);
    737  1.3  kamil 
    738  1.3  kamil   if (flags()->report_objects) {
    739  1.3  kamil     Printf("Objects leaked above:\n");
    740  1.3  kamil     PrintLeakedObjectsForLeak(index);
    741  1.1  joerg     Printf("\n");
    742  1.1  joerg   }
    743  1.3  kamil }
    744  1.3  kamil 
    745  1.3  kamil void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
    746  1.3  kamil   u32 leak_id = leaks_[index].id;
    747  1.3  kamil   for (uptr j = 0; j < leaked_objects_.size(); j++) {
    748  1.3  kamil     if (leaked_objects_[j].leak_id == leak_id)
    749  1.3  kamil       Printf("%p (%zu bytes)\n", leaked_objects_[j].addr,
    750  1.3  kamil              leaked_objects_[j].size);
    751  1.1  joerg   }
    752  1.1  joerg }
    753  1.1  joerg 
    754  1.1  joerg void LeakReport::PrintSummary() {
    755  1.1  joerg   CHECK(leaks_.size() <= kMaxLeaksConsidered);
    756  1.1  joerg   uptr bytes = 0, allocations = 0;
    757  1.1  joerg   for (uptr i = 0; i < leaks_.size(); i++) {
    758  1.1  joerg       if (leaks_[i].is_suppressed) continue;
    759  1.1  joerg       bytes += leaks_[i].total_size;
    760  1.1  joerg       allocations += leaks_[i].hit_count;
    761  1.1  joerg   }
    762  1.3  kamil   InternalScopedString summary(kMaxSummaryLength);
    763  1.3  kamil   summary.append("%zu byte(s) leaked in %zu allocation(s).", bytes,
    764  1.3  kamil                  allocations);
    765  1.3  kamil   ReportErrorSummary(summary.data());
    766  1.1  joerg }
    767  1.1  joerg 
    768  1.3  kamil void LeakReport::ApplySuppressions() {
    769  1.1  joerg   for (uptr i = 0; i < leaks_.size(); i++) {
    770  1.1  joerg     Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
    771  1.1  joerg     if (s) {
    772  1.1  joerg       s->weight += leaks_[i].total_size;
    773  1.3  kamil       atomic_store_relaxed(&s->hit_count, atomic_load_relaxed(&s->hit_count) +
    774  1.3  kamil           leaks_[i].hit_count);
    775  1.1  joerg       leaks_[i].is_suppressed = true;
    776  1.1  joerg     }
    777  1.1  joerg   }
    778  1.1  joerg }
    779  1.3  kamil 
    780  1.3  kamil uptr LeakReport::UnsuppressedLeakCount() {
    781  1.3  kamil   uptr result = 0;
    782  1.3  kamil   for (uptr i = 0; i < leaks_.size(); i++)
    783  1.3  kamil     if (!leaks_[i].is_suppressed) result++;
    784  1.3  kamil   return result;
    785  1.3  kamil }
    786  1.3  kamil 
    787  1.3  kamil } // namespace __lsan
    788  1.3  kamil #else // CAN_SANITIZE_LEAKS
    789  1.3  kamil namespace __lsan {
    790  1.3  kamil void InitCommonLsan() { }
    791  1.3  kamil void DoLeakCheck() { }
    792  1.3  kamil void DoRecoverableLeakCheckVoid() { }
    793  1.3  kamil void DisableInThisThread() { }
    794  1.3  kamil void EnableInThisThread() { }
    795  1.3  kamil }
    796  1.3  kamil #endif // CAN_SANITIZE_LEAKS
    797  1.1  joerg 
    798  1.1  joerg using namespace __lsan;  // NOLINT
    799  1.1  joerg 
    800  1.1  joerg extern "C" {
    801  1.1  joerg SANITIZER_INTERFACE_ATTRIBUTE
    802  1.1  joerg void __lsan_ignore_object(const void *p) {
    803  1.1  joerg #if CAN_SANITIZE_LEAKS
    804  1.3  kamil   if (!common_flags()->detect_leaks)
    805  1.3  kamil     return;
    806  1.1  joerg   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
    807  1.1  joerg   // locked.
    808  1.1  joerg   BlockingMutexLock l(&global_mutex);
    809  1.1  joerg   IgnoreObjectResult res = IgnoreObjectLocked(p);
    810  1.3  kamil   if (res == kIgnoreObjectInvalid)
    811  1.3  kamil     VReport(1, "__lsan_ignore_object(): no heap object found at %p", p);
    812  1.3  kamil   if (res == kIgnoreObjectAlreadyIgnored)
    813  1.3  kamil     VReport(1, "__lsan_ignore_object(): "
    814  1.1  joerg            "heap object at %p is already being ignored\n", p);
    815  1.3  kamil   if (res == kIgnoreObjectSuccess)
    816  1.3  kamil     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
    817  1.3  kamil #endif // CAN_SANITIZE_LEAKS
    818  1.3  kamil }
    819  1.3  kamil 
    820  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE
    821  1.3  kamil void __lsan_register_root_region(const void *begin, uptr size) {
    822  1.3  kamil #if CAN_SANITIZE_LEAKS
    823  1.3  kamil   BlockingMutexLock l(&global_mutex);
    824  1.3  kamil   CHECK(root_regions);
    825  1.3  kamil   RootRegion region = {reinterpret_cast<uptr>(begin), size};
    826  1.3  kamil   root_regions->push_back(region);
    827  1.3  kamil   VReport(1, "Registered root region at %p of size %llu\n", begin, size);
    828  1.3  kamil #endif // CAN_SANITIZE_LEAKS
    829  1.3  kamil }
    830  1.3  kamil 
    831  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE
    832  1.3  kamil void __lsan_unregister_root_region(const void *begin, uptr size) {
    833  1.3  kamil #if CAN_SANITIZE_LEAKS
    834  1.3  kamil   BlockingMutexLock l(&global_mutex);
    835  1.3  kamil   CHECK(root_regions);
    836  1.3  kamil   bool removed = false;
    837  1.3  kamil   for (uptr i = 0; i < root_regions->size(); i++) {
    838  1.3  kamil     RootRegion region = (*root_regions)[i];
    839  1.3  kamil     if (region.begin == reinterpret_cast<uptr>(begin) && region.size == size) {
    840  1.3  kamil       removed = true;
    841  1.3  kamil       uptr last_index = root_regions->size() - 1;
    842  1.3  kamil       (*root_regions)[i] = (*root_regions)[last_index];
    843  1.3  kamil       root_regions->pop_back();
    844  1.3  kamil       VReport(1, "Unregistered root region at %p of size %llu\n", begin, size);
    845  1.3  kamil       break;
    846  1.3  kamil     }
    847  1.3  kamil   }
    848  1.3  kamil   if (!removed) {
    849  1.3  kamil     Report(
    850  1.3  kamil         "__lsan_unregister_root_region(): region at %p of size %llu has not "
    851  1.3  kamil         "been registered.\n",
    852  1.3  kamil         begin, size);
    853  1.3  kamil     Die();
    854  1.3  kamil   }
    855  1.3  kamil #endif // CAN_SANITIZE_LEAKS
    856  1.1  joerg }
    857  1.1  joerg 
    858  1.1  joerg SANITIZER_INTERFACE_ATTRIBUTE
    859  1.1  joerg void __lsan_disable() {
    860  1.1  joerg #if CAN_SANITIZE_LEAKS
    861  1.3  kamil   __lsan::DisableInThisThread();
    862  1.1  joerg #endif
    863  1.1  joerg }
    864  1.1  joerg 
    865  1.1  joerg SANITIZER_INTERFACE_ATTRIBUTE
    866  1.1  joerg void __lsan_enable() {
    867  1.1  joerg #if CAN_SANITIZE_LEAKS
    868  1.3  kamil   __lsan::EnableInThisThread();
    869  1.1  joerg #endif
    870  1.1  joerg }
    871  1.1  joerg 
    872  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE
    873  1.3  kamil void __lsan_do_leak_check() {
    874  1.3  kamil #if CAN_SANITIZE_LEAKS
    875  1.3  kamil   if (common_flags()->detect_leaks)
    876  1.3  kamil     __lsan::DoLeakCheck();
    877  1.3  kamil #endif // CAN_SANITIZE_LEAKS
    878  1.3  kamil }
    879  1.3  kamil 
    880  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE
    881  1.3  kamil int __lsan_do_recoverable_leak_check() {
    882  1.3  kamil #if CAN_SANITIZE_LEAKS
    883  1.3  kamil   if (common_flags()->detect_leaks)
    884  1.3  kamil     return __lsan::DoRecoverableLeakCheck();
    885  1.3  kamil #endif // CAN_SANITIZE_LEAKS
    886  1.3  kamil   return 0;
    887  1.3  kamil }
    888  1.3  kamil 
    889  1.1  joerg #if !SANITIZER_SUPPORTS_WEAK_HOOKS
    890  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    891  1.3  kamil const char * __lsan_default_options() {
    892  1.3  kamil   return "";
    893  1.3  kamil }
    894  1.3  kamil 
    895  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    896  1.1  joerg int __lsan_is_turned_off() {
    897  1.1  joerg   return 0;
    898  1.1  joerg }
    899  1.3  kamil 
    900  1.3  kamil SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE
    901  1.3  kamil const char *__lsan_default_suppressions() {
    902  1.3  kamil   return "";
    903  1.3  kamil }
    904  1.1  joerg #endif
    905  1.3  kamil } // extern "C"
    906