Home | History | Annotate | Line # | Download | only in lsan
      1  1.1  mrg //=-- lsan_common.cpp -----------------------------------------------------===//
      2  1.1  mrg //
      3  1.1  mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4  1.1  mrg // See https://llvm.org/LICENSE.txt for license information.
      5  1.1  mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6  1.1  mrg //
      7  1.1  mrg //===----------------------------------------------------------------------===//
      8  1.1  mrg //
      9  1.1  mrg // This file is a part of LeakSanitizer.
     10  1.1  mrg // Implementation of common leak checking functionality.
     11  1.1  mrg //
     12  1.1  mrg //===----------------------------------------------------------------------===//
     13  1.1  mrg 
     14  1.1  mrg #include "lsan_common.h"
     15  1.1  mrg 
     16  1.1  mrg #include "sanitizer_common/sanitizer_common.h"
     17  1.1  mrg #include "sanitizer_common/sanitizer_flag_parser.h"
     18  1.1  mrg #include "sanitizer_common/sanitizer_flags.h"
     19  1.1  mrg #include "sanitizer_common/sanitizer_placement_new.h"
     20  1.1  mrg #include "sanitizer_common/sanitizer_procmaps.h"
     21  1.1  mrg #include "sanitizer_common/sanitizer_report_decorator.h"
     22  1.1  mrg #include "sanitizer_common/sanitizer_stackdepot.h"
     23  1.1  mrg #include "sanitizer_common/sanitizer_stacktrace.h"
     24  1.1  mrg #include "sanitizer_common/sanitizer_suppressions.h"
     25  1.1  mrg #include "sanitizer_common/sanitizer_thread_registry.h"
     26  1.1  mrg #include "sanitizer_common/sanitizer_tls_get_addr.h"
     27  1.1  mrg 
     28  1.1  mrg #if CAN_SANITIZE_LEAKS
     29  1.5  mrg 
     30  1.5  mrg #  if SANITIZER_APPLE
     31  1.5  mrg // https://github.com/apple-oss-distributions/objc4/blob/8701d5672d3fd3cd817aeb84db1077aafe1a1604/runtime/objc-runtime-new.h#L127
     32  1.5  mrg #    if SANITIZER_IOS && !SANITIZER_IOSSIM
     33  1.5  mrg #      define OBJC_DATA_MASK 0x0000007ffffffff8UL
     34  1.5  mrg #    else
     35  1.5  mrg #      define OBJC_DATA_MASK 0x00007ffffffffff8UL
     36  1.5  mrg #    endif
     37  1.5  mrg #  endif
     38  1.5  mrg 
     39  1.1  mrg namespace __lsan {
     40  1.1  mrg 
     41  1.1  mrg // This mutex is used to prevent races between DoLeakCheck and IgnoreObject, and
     42  1.1  mrg // also to protect the global list of root regions.
     43  1.5  mrg static Mutex global_mutex;
     44  1.1  mrg 
     45  1.1  mrg Flags lsan_flags;
     46  1.1  mrg 
     47  1.1  mrg void DisableCounterUnderflow() {
     48  1.1  mrg   if (common_flags()->detect_leaks) {
     49  1.1  mrg     Report("Unmatched call to __lsan_enable().\n");
     50  1.1  mrg     Die();
     51  1.1  mrg   }
     52  1.1  mrg }
     53  1.1  mrg 
     54  1.1  mrg void Flags::SetDefaults() {
     55  1.5  mrg #  define LSAN_FLAG(Type, Name, DefaultValue, Description) Name = DefaultValue;
     56  1.5  mrg #  include "lsan_flags.inc"
     57  1.5  mrg #  undef LSAN_FLAG
     58  1.1  mrg }
     59  1.1  mrg 
     60  1.1  mrg void RegisterLsanFlags(FlagParser *parser, Flags *f) {
     61  1.5  mrg #  define LSAN_FLAG(Type, Name, DefaultValue, Description) \
     62  1.5  mrg     RegisterFlag(parser, #Name, Description, &f->Name);
     63  1.5  mrg #  include "lsan_flags.inc"
     64  1.5  mrg #  undef LSAN_FLAG
     65  1.5  mrg }
     66  1.5  mrg 
     67  1.5  mrg #  define LOG_POINTERS(...)      \
     68  1.5  mrg     do {                         \
     69  1.5  mrg       if (flags()->log_pointers) \
     70  1.5  mrg         Report(__VA_ARGS__);     \
     71  1.5  mrg     } while (0)
     72  1.5  mrg 
     73  1.5  mrg #  define LOG_THREADS(...)      \
     74  1.5  mrg     do {                        \
     75  1.5  mrg       if (flags()->log_threads) \
     76  1.5  mrg         Report(__VA_ARGS__);    \
     77  1.5  mrg     } while (0)
     78  1.1  mrg 
     79  1.3  mrg class LeakSuppressionContext {
     80  1.3  mrg   bool parsed = false;
     81  1.3  mrg   SuppressionContext context;
     82  1.3  mrg   bool suppressed_stacks_sorted = true;
     83  1.3  mrg   InternalMmapVector<u32> suppressed_stacks;
     84  1.5  mrg   const LoadedModule *suppress_module = nullptr;
     85  1.3  mrg 
     86  1.5  mrg   void LazyInit();
     87  1.3  mrg   Suppression *GetSuppressionForAddr(uptr addr);
     88  1.5  mrg   bool SuppressInvalid(const StackTrace &stack);
     89  1.5  mrg   bool SuppressByRule(const StackTrace &stack, uptr hit_count, uptr total_size);
     90  1.3  mrg 
     91  1.3  mrg  public:
     92  1.3  mrg   LeakSuppressionContext(const char *supprression_types[],
     93  1.3  mrg                          int suppression_types_num)
     94  1.3  mrg       : context(supprression_types, suppression_types_num) {}
     95  1.3  mrg 
     96  1.5  mrg   bool Suppress(u32 stack_trace_id, uptr hit_count, uptr total_size);
     97  1.3  mrg 
     98  1.3  mrg   const InternalMmapVector<u32> &GetSortedSuppressedStacks() {
     99  1.3  mrg     if (!suppressed_stacks_sorted) {
    100  1.3  mrg       suppressed_stacks_sorted = true;
    101  1.3  mrg       SortAndDedup(suppressed_stacks);
    102  1.3  mrg     }
    103  1.3  mrg     return suppressed_stacks;
    104  1.3  mrg   }
    105  1.3  mrg   void PrintMatchedSuppressions();
    106  1.3  mrg };
    107  1.3  mrg 
    108  1.3  mrg ALIGNED(64) static char suppression_placeholder[sizeof(LeakSuppressionContext)];
    109  1.3  mrg static LeakSuppressionContext *suppression_ctx = nullptr;
    110  1.1  mrg static const char kSuppressionLeak[] = "leak";
    111  1.5  mrg static const char *kSuppressionTypes[] = {kSuppressionLeak};
    112  1.1  mrg static const char kStdSuppressions[] =
    113  1.5  mrg #  if SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
    114  1.3  mrg     // For more details refer to the SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
    115  1.3  mrg     // definition.
    116  1.3  mrg     "leak:*pthread_exit*\n"
    117  1.5  mrg #  endif  // SANITIZER_SUPPRESS_LEAK_ON_PTHREAD_EXIT
    118  1.5  mrg #  if SANITIZER_APPLE
    119  1.3  mrg     // For Darwin and os_log/os_trace: https://reviews.llvm.org/D35173
    120  1.3  mrg     "leak:*_os_trace*\n"
    121  1.5  mrg #  endif
    122  1.3  mrg     // TLS leak in some glibc versions, described in
    123  1.3  mrg     // https://sourceware.org/bugzilla/show_bug.cgi?id=12650.
    124  1.3  mrg     "leak:*tls_get_addr*\n";
    125  1.1  mrg 
    126  1.1  mrg void InitializeSuppressions() {
    127  1.1  mrg   CHECK_EQ(nullptr, suppression_ctx);
    128  1.1  mrg   suppression_ctx = new (suppression_placeholder)
    129  1.3  mrg       LeakSuppressionContext(kSuppressionTypes, ARRAY_SIZE(kSuppressionTypes));
    130  1.1  mrg }
    131  1.1  mrg 
    132  1.3  mrg void LeakSuppressionContext::LazyInit() {
    133  1.3  mrg   if (!parsed) {
    134  1.3  mrg     parsed = true;
    135  1.3  mrg     context.ParseFromFile(flags()->suppressions);
    136  1.3  mrg     if (&__lsan_default_suppressions)
    137  1.3  mrg       context.Parse(__lsan_default_suppressions());
    138  1.3  mrg     context.Parse(kStdSuppressions);
    139  1.5  mrg     if (flags()->use_tls && flags()->use_ld_allocations)
    140  1.5  mrg       suppress_module = GetLinker();
    141  1.5  mrg   }
    142  1.5  mrg }
    143  1.5  mrg 
    144  1.5  mrg Suppression *LeakSuppressionContext::GetSuppressionForAddr(uptr addr) {
    145  1.5  mrg   Suppression *s = nullptr;
    146  1.5  mrg 
    147  1.5  mrg   // Suppress by module name.
    148  1.5  mrg   const char *module_name = Symbolizer::GetOrInit()->GetModuleNameForPc(addr);
    149  1.5  mrg   if (!module_name)
    150  1.5  mrg     module_name = "<unknown module>";
    151  1.5  mrg   if (context.Match(module_name, kSuppressionLeak, &s))
    152  1.5  mrg     return s;
    153  1.5  mrg 
    154  1.5  mrg   // Suppress by file or function name.
    155  1.5  mrg   SymbolizedStack *frames = Symbolizer::GetOrInit()->SymbolizePC(addr);
    156  1.5  mrg   for (SymbolizedStack *cur = frames; cur; cur = cur->next) {
    157  1.5  mrg     if (context.Match(cur->info.function, kSuppressionLeak, &s) ||
    158  1.5  mrg         context.Match(cur->info.file, kSuppressionLeak, &s)) {
    159  1.5  mrg       break;
    160  1.5  mrg     }
    161  1.5  mrg   }
    162  1.5  mrg   frames->ClearAll();
    163  1.5  mrg   return s;
    164  1.5  mrg }
    165  1.5  mrg 
    166  1.5  mrg static uptr GetCallerPC(const StackTrace &stack) {
    167  1.5  mrg   // The top frame is our malloc/calloc/etc. The next frame is the caller.
    168  1.5  mrg   if (stack.size >= 2)
    169  1.5  mrg     return stack.trace[1];
    170  1.5  mrg   return 0;
    171  1.5  mrg }
    172  1.5  mrg 
    173  1.5  mrg #  if SANITIZER_APPLE
    174  1.5  mrg // Several pointers in the Objective-C runtime (method cache and class_rw_t,
    175  1.5  mrg // for example) are tagged with additional bits we need to strip.
    176  1.5  mrg static inline void *TransformPointer(void *p) {
    177  1.5  mrg   uptr ptr = reinterpret_cast<uptr>(p);
    178  1.5  mrg   return reinterpret_cast<void *>(ptr & OBJC_DATA_MASK);
    179  1.5  mrg }
    180  1.5  mrg #  endif
    181  1.5  mrg 
    182  1.5  mrg // On Linux, treats all chunks allocated from ld-linux.so as reachable, which
    183  1.5  mrg // covers dynamically allocated TLS blocks, internal dynamic loader's loaded
    184  1.5  mrg // modules accounting etc.
    185  1.5  mrg // Dynamic TLS blocks contain the TLS variables of dynamically loaded modules.
    186  1.5  mrg // They are allocated with a __libc_memalign() call in allocate_and_init()
    187  1.5  mrg // (elf/dl-tls.c). Glibc won't tell us the address ranges occupied by those
    188  1.5  mrg // blocks, but we can make sure they come from our own allocator by intercepting
    189  1.5  mrg // __libc_memalign(). On top of that, there is no easy way to reach them. Their
    190  1.5  mrg // addresses are stored in a dynamically allocated array (the DTV) which is
    191  1.5  mrg // referenced from the static TLS. Unfortunately, we can't just rely on the DTV
    192  1.5  mrg // being reachable from the static TLS, and the dynamic TLS being reachable from
    193  1.5  mrg // the DTV. This is because the initial DTV is allocated before our interception
    194  1.5  mrg // mechanism kicks in, and thus we don't recognize it as allocated memory. We
    195  1.5  mrg // can't special-case it either, since we don't know its size.
    196  1.5  mrg // Our solution is to include in the root set all allocations made from
    197  1.5  mrg // ld-linux.so (which is where allocate_and_init() is implemented). This is
    198  1.5  mrg // guaranteed to include all dynamic TLS blocks (and possibly other allocations
    199  1.5  mrg // which we don't care about).
    200  1.5  mrg // On all other platforms, this simply checks to ensure that the caller pc is
    201  1.5  mrg // valid before reporting chunks as leaked.
    202  1.5  mrg bool LeakSuppressionContext::SuppressInvalid(const StackTrace &stack) {
    203  1.5  mrg   uptr caller_pc = GetCallerPC(stack);
    204  1.5  mrg   // If caller_pc is unknown, this chunk may be allocated in a coroutine. Mark
    205  1.5  mrg   // it as reachable, as we can't properly report its allocation stack anyway.
    206  1.5  mrg   return !caller_pc ||
    207  1.5  mrg          (suppress_module && suppress_module->containsAddress(caller_pc));
    208  1.5  mrg }
    209  1.5  mrg 
    210  1.5  mrg bool LeakSuppressionContext::SuppressByRule(const StackTrace &stack,
    211  1.5  mrg                                             uptr hit_count, uptr total_size) {
    212  1.5  mrg   for (uptr i = 0; i < stack.size; i++) {
    213  1.5  mrg     Suppression *s = GetSuppressionForAddr(
    214  1.5  mrg         StackTrace::GetPreviousInstructionPc(stack.trace[i]));
    215  1.5  mrg     if (s) {
    216  1.5  mrg       s->weight += total_size;
    217  1.5  mrg       atomic_fetch_add(&s->hit_count, hit_count, memory_order_relaxed);
    218  1.5  mrg       return true;
    219  1.5  mrg     }
    220  1.3  mrg   }
    221  1.5  mrg   return false;
    222  1.5  mrg }
    223  1.5  mrg 
    224  1.5  mrg bool LeakSuppressionContext::Suppress(u32 stack_trace_id, uptr hit_count,
    225  1.5  mrg                                       uptr total_size) {
    226  1.5  mrg   LazyInit();
    227  1.5  mrg   StackTrace stack = StackDepotGet(stack_trace_id);
    228  1.5  mrg   if (!SuppressInvalid(stack) && !SuppressByRule(stack, hit_count, total_size))
    229  1.5  mrg     return false;
    230  1.5  mrg   suppressed_stacks_sorted = false;
    231  1.5  mrg   suppressed_stacks.push_back(stack_trace_id);
    232  1.5  mrg   return true;
    233  1.3  mrg }
    234  1.3  mrg 
    235  1.3  mrg static LeakSuppressionContext *GetSuppressionContext() {
    236  1.1  mrg   CHECK(suppression_ctx);
    237  1.1  mrg   return suppression_ctx;
    238  1.1  mrg }
    239  1.1  mrg 
    240  1.1  mrg void InitCommonLsan() {
    241  1.1  mrg   if (common_flags()->detect_leaks) {
    242  1.1  mrg     // Initialization which can fail or print warnings should only be done if
    243  1.1  mrg     // LSan is actually enabled.
    244  1.1  mrg     InitializeSuppressions();
    245  1.1  mrg     InitializePlatformSpecificModules();
    246  1.1  mrg   }
    247  1.1  mrg }
    248  1.1  mrg 
    249  1.5  mrg class Decorator : public __sanitizer::SanitizerCommonDecorator {
    250  1.1  mrg  public:
    251  1.5  mrg   Decorator() : SanitizerCommonDecorator() {}
    252  1.1  mrg   const char *Error() { return Red(); }
    253  1.1  mrg   const char *Leak() { return Blue(); }
    254  1.1  mrg };
    255  1.1  mrg 
    256  1.5  mrg static inline bool MaybeUserPointer(uptr p) {
    257  1.1  mrg   // Since our heap is located in mmap-ed memory, we can assume a sensible lower
    258  1.1  mrg   // bound on heap addresses.
    259  1.1  mrg   const uptr kMinAddress = 4 * 4096;
    260  1.5  mrg   if (p < kMinAddress)
    261  1.5  mrg     return false;
    262  1.5  mrg #  if defined(__x86_64__)
    263  1.5  mrg   // TODO: support LAM48 and 5 level page tables.
    264  1.5  mrg   // LAM_U57 mask format
    265  1.5  mrg   //  * top byte: 0x81 because the format is: [0] [6-bit tag] [0]
    266  1.5  mrg   //  * top-1 byte: 0xff because it should be 0
    267  1.5  mrg   //  * top-2 byte: 0x80 because Linux uses 128 TB VMA ending at 0x7fffffffffff
    268  1.5  mrg   constexpr uptr kLAM_U57Mask = 0x81ff80;
    269  1.5  mrg   constexpr uptr kPointerMask = kLAM_U57Mask << 40;
    270  1.5  mrg   return ((p & kPointerMask) == 0);
    271  1.5  mrg #  elif defined(__mips64) && defined(_LP64)
    272  1.5  mrg   return ((p >> 40) == 0);
    273  1.5  mrg #  elif defined(__aarch64__)
    274  1.5  mrg   // TBI (Top Byte Ignore) feature of AArch64: bits [63:56] are ignored in
    275  1.5  mrg   // address translation and can be used to store a tag.
    276  1.5  mrg   constexpr uptr kPointerMask = 255ULL << 48;
    277  1.5  mrg   // Accept up to 48 bit VMA.
    278  1.5  mrg   return ((p & kPointerMask) == 0);
    279  1.5  mrg #  elif defined(__loongarch_lp64)
    280  1.5  mrg   // Allow 47-bit user-space VMA at current.
    281  1.1  mrg   return ((p >> 47) == 0);
    282  1.5  mrg #  else
    283  1.1  mrg   return true;
    284  1.5  mrg #  endif
    285  1.1  mrg }
    286  1.1  mrg 
    287  1.1  mrg // Scans the memory range, looking for byte patterns that point into allocator
    288  1.1  mrg // chunks. Marks those chunks with |tag| and adds them to |frontier|.
    289  1.1  mrg // There are two usage modes for this function: finding reachable chunks
    290  1.1  mrg // (|tag| = kReachable) and finding indirectly leaked chunks
    291  1.1  mrg // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
    292  1.1  mrg // so |frontier| = 0.
    293  1.5  mrg void ScanRangeForPointers(uptr begin, uptr end, Frontier *frontier,
    294  1.1  mrg                           const char *region_type, ChunkTag tag) {
    295  1.1  mrg   CHECK(tag == kReachable || tag == kIndirectlyLeaked);
    296  1.1  mrg   const uptr alignment = flags()->pointer_alignment();
    297  1.3  mrg   LOG_POINTERS("Scanning %s range %p-%p.\n", region_type, (void *)begin,
    298  1.3  mrg                (void *)end);
    299  1.1  mrg   uptr pp = begin;
    300  1.1  mrg   if (pp % alignment)
    301  1.1  mrg     pp = pp + alignment - pp % alignment;
    302  1.1  mrg   for (; pp + sizeof(void *) <= end; pp += alignment) {
    303  1.1  mrg     void *p = *reinterpret_cast<void **>(pp);
    304  1.5  mrg #  if SANITIZER_APPLE
    305  1.5  mrg     p = TransformPointer(p);
    306  1.5  mrg #  endif
    307  1.5  mrg     if (!MaybeUserPointer(reinterpret_cast<uptr>(p)))
    308  1.5  mrg       continue;
    309  1.1  mrg     uptr chunk = PointsIntoChunk(p);
    310  1.5  mrg     if (!chunk)
    311  1.5  mrg       continue;
    312  1.1  mrg     // Pointers to self don't count. This matters when tag == kIndirectlyLeaked.
    313  1.5  mrg     if (chunk == begin)
    314  1.5  mrg       continue;
    315  1.1  mrg     LsanMetadata m(chunk);
    316  1.5  mrg     if (m.tag() == kReachable || m.tag() == kIgnored)
    317  1.5  mrg       continue;
    318  1.1  mrg 
    319  1.1  mrg     // Do this check relatively late so we can log only the interesting cases.
    320  1.1  mrg     if (!flags()->use_poisoned && WordIsPoisoned(pp)) {
    321  1.1  mrg       LOG_POINTERS(
    322  1.1  mrg           "%p is poisoned: ignoring %p pointing into chunk %p-%p of size "
    323  1.1  mrg           "%zu.\n",
    324  1.3  mrg           (void *)pp, p, (void *)chunk, (void *)(chunk + m.requested_size()),
    325  1.3  mrg           m.requested_size());
    326  1.1  mrg       continue;
    327  1.1  mrg     }
    328  1.1  mrg 
    329  1.1  mrg     m.set_tag(tag);
    330  1.3  mrg     LOG_POINTERS("%p: found %p pointing into chunk %p-%p of size %zu.\n",
    331  1.3  mrg                  (void *)pp, p, (void *)chunk,
    332  1.3  mrg                  (void *)(chunk + m.requested_size()), m.requested_size());
    333  1.1  mrg     if (frontier)
    334  1.1  mrg       frontier->push_back(chunk);
    335  1.1  mrg   }
    336  1.1  mrg }
    337  1.1  mrg 
    338  1.1  mrg // Scans a global range for pointers
    339  1.1  mrg void ScanGlobalRange(uptr begin, uptr end, Frontier *frontier) {
    340  1.1  mrg   uptr allocator_begin = 0, allocator_end = 0;
    341  1.1  mrg   GetAllocatorGlobalRange(&allocator_begin, &allocator_end);
    342  1.1  mrg   if (begin <= allocator_begin && allocator_begin < end) {
    343  1.1  mrg     CHECK_LE(allocator_begin, allocator_end);
    344  1.1  mrg     CHECK_LE(allocator_end, end);
    345  1.1  mrg     if (begin < allocator_begin)
    346  1.1  mrg       ScanRangeForPointers(begin, allocator_begin, frontier, "GLOBAL",
    347  1.1  mrg                            kReachable);
    348  1.1  mrg     if (allocator_end < end)
    349  1.1  mrg       ScanRangeForPointers(allocator_end, end, frontier, "GLOBAL", kReachable);
    350  1.1  mrg   } else {
    351  1.1  mrg     ScanRangeForPointers(begin, end, frontier, "GLOBAL", kReachable);
    352  1.1  mrg   }
    353  1.1  mrg }
    354  1.1  mrg 
    355  1.5  mrg void ScanExtraStackRanges(const InternalMmapVector<Range> &ranges,
    356  1.5  mrg                           Frontier *frontier) {
    357  1.5  mrg   for (uptr i = 0; i < ranges.size(); i++) {
    358  1.5  mrg     ScanRangeForPointers(ranges[i].begin, ranges[i].end, frontier, "FAKE STACK",
    359  1.5  mrg                          kReachable);
    360  1.5  mrg   }
    361  1.1  mrg }
    362  1.1  mrg 
    363  1.5  mrg #  if SANITIZER_FUCHSIA
    364  1.3  mrg 
    365  1.3  mrg // Fuchsia handles all threads together with its own callback.
    366  1.5  mrg static void ProcessThreads(SuspendedThreadsList const &, Frontier *, tid_t,
    367  1.5  mrg                            uptr) {}
    368  1.3  mrg 
    369  1.5  mrg #  else
    370  1.3  mrg 
    371  1.5  mrg #    if SANITIZER_ANDROID
    372  1.3  mrg // FIXME: Move this out into *libcdep.cpp
    373  1.3  mrg extern "C" SANITIZER_WEAK_ATTRIBUTE void __libc_iterate_dynamic_tls(
    374  1.3  mrg     pid_t, void (*cb)(void *, void *, uptr, void *), void *);
    375  1.5  mrg #    endif
    376  1.3  mrg 
    377  1.3  mrg static void ProcessThreadRegistry(Frontier *frontier) {
    378  1.3  mrg   InternalMmapVector<uptr> ptrs;
    379  1.5  mrg   GetAdditionalThreadContextPtrsLocked(&ptrs);
    380  1.3  mrg 
    381  1.3  mrg   for (uptr i = 0; i < ptrs.size(); ++i) {
    382  1.3  mrg     void *ptr = reinterpret_cast<void *>(ptrs[i]);
    383  1.3  mrg     uptr chunk = PointsIntoChunk(ptr);
    384  1.3  mrg     if (!chunk)
    385  1.3  mrg       continue;
    386  1.3  mrg     LsanMetadata m(chunk);
    387  1.3  mrg     if (!m.allocated())
    388  1.3  mrg       continue;
    389  1.3  mrg 
    390  1.3  mrg     // Mark as reachable and add to frontier.
    391  1.3  mrg     LOG_POINTERS("Treating pointer %p from ThreadContext as reachable\n", ptr);
    392  1.3  mrg     m.set_tag(kReachable);
    393  1.3  mrg     frontier->push_back(chunk);
    394  1.3  mrg   }
    395  1.3  mrg }
    396  1.3  mrg 
    397  1.1  mrg // Scans thread data (stacks and TLS) for heap pointers.
    398  1.1  mrg static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
    399  1.5  mrg                            Frontier *frontier, tid_t caller_tid,
    400  1.5  mrg                            uptr caller_sp) {
    401  1.3  mrg   InternalMmapVector<uptr> registers;
    402  1.5  mrg   InternalMmapVector<Range> extra_ranges;
    403  1.1  mrg   for (uptr i = 0; i < suspended_threads.ThreadCount(); i++) {
    404  1.1  mrg     tid_t os_id = static_cast<tid_t>(suspended_threads.GetThreadID(i));
    405  1.3  mrg     LOG_THREADS("Processing thread %llu.\n", os_id);
    406  1.1  mrg     uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
    407  1.1  mrg     DTLS *dtls;
    408  1.5  mrg     bool thread_found =
    409  1.5  mrg         GetThreadRangesLocked(os_id, &stack_begin, &stack_end, &tls_begin,
    410  1.5  mrg                               &tls_end, &cache_begin, &cache_end, &dtls);
    411  1.1  mrg     if (!thread_found) {
    412  1.1  mrg       // If a thread can't be found in the thread registry, it's probably in the
    413  1.1  mrg       // process of destruction. Log this event and move on.
    414  1.3  mrg       LOG_THREADS("Thread %llu not found in registry.\n", os_id);
    415  1.1  mrg       continue;
    416  1.1  mrg     }
    417  1.1  mrg     uptr sp;
    418  1.1  mrg     PtraceRegistersStatus have_registers =
    419  1.3  mrg         suspended_threads.GetRegistersAndSP(i, &registers, &sp);
    420  1.1  mrg     if (have_registers != REGISTERS_AVAILABLE) {
    421  1.3  mrg       Report("Unable to get registers from thread %llu.\n", os_id);
    422  1.1  mrg       // If unable to get SP, consider the entire stack to be reachable unless
    423  1.1  mrg       // GetRegistersAndSP failed with ESRCH.
    424  1.5  mrg       if (have_registers == REGISTERS_UNAVAILABLE_FATAL)
    425  1.5  mrg         continue;
    426  1.1  mrg       sp = stack_begin;
    427  1.1  mrg     }
    428  1.5  mrg     if (suspended_threads.GetThreadID(i) == caller_tid) {
    429  1.5  mrg       sp = caller_sp;
    430  1.5  mrg     }
    431  1.1  mrg 
    432  1.3  mrg     if (flags()->use_registers && have_registers) {
    433  1.3  mrg       uptr registers_begin = reinterpret_cast<uptr>(registers.data());
    434  1.3  mrg       uptr registers_end =
    435  1.3  mrg           reinterpret_cast<uptr>(registers.data() + registers.size());
    436  1.1  mrg       ScanRangeForPointers(registers_begin, registers_end, frontier,
    437  1.1  mrg                            "REGISTERS", kReachable);
    438  1.3  mrg     }
    439  1.1  mrg 
    440  1.1  mrg     if (flags()->use_stacks) {
    441  1.3  mrg       LOG_THREADS("Stack at %p-%p (SP = %p).\n", (void *)stack_begin,
    442  1.3  mrg                   (void *)stack_end, (void *)sp);
    443  1.1  mrg       if (sp < stack_begin || sp >= stack_end) {
    444  1.1  mrg         // SP is outside the recorded stack range (e.g. the thread is running a
    445  1.1  mrg         // signal handler on alternate stack, or swapcontext was used).
    446  1.1  mrg         // Again, consider the entire stack range to be reachable.
    447  1.1  mrg         LOG_THREADS("WARNING: stack pointer not in stack range.\n");
    448  1.1  mrg         uptr page_size = GetPageSizeCached();
    449  1.1  mrg         int skipped = 0;
    450  1.1  mrg         while (stack_begin < stack_end &&
    451  1.1  mrg                !IsAccessibleMemoryRange(stack_begin, 1)) {
    452  1.1  mrg           skipped++;
    453  1.1  mrg           stack_begin += page_size;
    454  1.1  mrg         }
    455  1.1  mrg         LOG_THREADS("Skipped %d guard page(s) to obtain stack %p-%p.\n",
    456  1.3  mrg                     skipped, (void *)stack_begin, (void *)stack_end);
    457  1.1  mrg       } else {
    458  1.1  mrg         // Shrink the stack range to ignore out-of-scope values.
    459  1.1  mrg         stack_begin = sp;
    460  1.1  mrg       }
    461  1.1  mrg       ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
    462  1.1  mrg                            kReachable);
    463  1.5  mrg       extra_ranges.clear();
    464  1.5  mrg       GetThreadExtraStackRangesLocked(os_id, &extra_ranges);
    465  1.5  mrg       ScanExtraStackRanges(extra_ranges, frontier);
    466  1.1  mrg     }
    467  1.1  mrg 
    468  1.1  mrg     if (flags()->use_tls) {
    469  1.1  mrg       if (tls_begin) {
    470  1.3  mrg         LOG_THREADS("TLS at %p-%p.\n", (void *)tls_begin, (void *)tls_end);
    471  1.1  mrg         // If the tls and cache ranges don't overlap, scan full tls range,
    472  1.1  mrg         // otherwise, only scan the non-overlapping portions
    473  1.1  mrg         if (cache_begin == cache_end || tls_end < cache_begin ||
    474  1.1  mrg             tls_begin > cache_end) {
    475  1.1  mrg           ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
    476  1.1  mrg         } else {
    477  1.1  mrg           if (tls_begin < cache_begin)
    478  1.1  mrg             ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
    479  1.1  mrg                                  kReachable);
    480  1.1  mrg           if (tls_end > cache_end)
    481  1.1  mrg             ScanRangeForPointers(cache_end, tls_end, frontier, "TLS",
    482  1.1  mrg                                  kReachable);
    483  1.1  mrg         }
    484  1.1  mrg       }
    485  1.5  mrg #    if SANITIZER_ANDROID
    486  1.3  mrg       auto *cb = +[](void *dtls_begin, void *dtls_end, uptr /*dso_idd*/,
    487  1.3  mrg                      void *arg) -> void {
    488  1.3  mrg         ScanRangeForPointers(reinterpret_cast<uptr>(dtls_begin),
    489  1.3  mrg                              reinterpret_cast<uptr>(dtls_end),
    490  1.3  mrg                              reinterpret_cast<Frontier *>(arg), "DTLS",
    491  1.3  mrg                              kReachable);
    492  1.3  mrg       };
    493  1.3  mrg 
    494  1.3  mrg       // FIXME: There might be a race-condition here (and in Bionic) if the
    495  1.3  mrg       // thread is suspended in the middle of updating its DTLS. IOWs, we
    496  1.3  mrg       // could scan already freed memory. (probably fine for now)
    497  1.3  mrg       __libc_iterate_dynamic_tls(os_id, cb, frontier);
    498  1.5  mrg #    else
    499  1.1  mrg       if (dtls && !DTLSInDestruction(dtls)) {
    500  1.3  mrg         ForEachDVT(dtls, [&](const DTLS::DTV &dtv, int id) {
    501  1.3  mrg           uptr dtls_beg = dtv.beg;
    502  1.3  mrg           uptr dtls_end = dtls_beg + dtv.size;
    503  1.1  mrg           if (dtls_beg < dtls_end) {
    504  1.3  mrg             LOG_THREADS("DTLS %d at %p-%p.\n", id, (void *)dtls_beg,
    505  1.3  mrg                         (void *)dtls_end);
    506  1.1  mrg             ScanRangeForPointers(dtls_beg, dtls_end, frontier, "DTLS",
    507  1.1  mrg                                  kReachable);
    508  1.1  mrg           }
    509  1.3  mrg         });
    510  1.1  mrg       } else {
    511  1.1  mrg         // We are handling a thread with DTLS under destruction. Log about
    512  1.1  mrg         // this and continue.
    513  1.3  mrg         LOG_THREADS("Thread %llu has DTLS under destruction.\n", os_id);
    514  1.1  mrg       }
    515  1.5  mrg #    endif
    516  1.1  mrg     }
    517  1.1  mrg   }
    518  1.3  mrg 
    519  1.3  mrg   // Add pointers reachable from ThreadContexts
    520  1.3  mrg   ProcessThreadRegistry(frontier);
    521  1.1  mrg }
    522  1.1  mrg 
    523  1.5  mrg #  endif  // SANITIZER_FUCHSIA
    524  1.5  mrg 
    525  1.5  mrg // A map that contains [region_begin, region_end) pairs.
    526  1.5  mrg using RootRegions = DenseMap<detail::DenseMapPair<uptr, uptr>, uptr>;
    527  1.3  mrg 
    528  1.5  mrg static RootRegions &GetRootRegionsLocked() {
    529  1.5  mrg   global_mutex.CheckLocked();
    530  1.5  mrg   static RootRegions *regions = nullptr;
    531  1.5  mrg   alignas(RootRegions) static char placeholder[sizeof(RootRegions)];
    532  1.5  mrg   if (!regions)
    533  1.5  mrg     regions = new (placeholder) RootRegions();
    534  1.5  mrg   return *regions;
    535  1.1  mrg }
    536  1.1  mrg 
    537  1.5  mrg bool HasRootRegions() { return !GetRootRegionsLocked().empty(); }
    538  1.5  mrg 
    539  1.5  mrg void ScanRootRegions(Frontier *frontier,
    540  1.5  mrg                      const InternalMmapVectorNoCtor<Region> &mapped_regions) {
    541  1.5  mrg   if (!flags()->use_root_regions)
    542  1.5  mrg     return;
    543  1.5  mrg 
    544  1.5  mrg   InternalMmapVector<Region> regions;
    545  1.5  mrg   GetRootRegionsLocked().forEach([&](const auto &kv) {
    546  1.5  mrg     regions.push_back({kv.first.first, kv.first.second});
    547  1.5  mrg     return true;
    548  1.5  mrg   });
    549  1.5  mrg 
    550  1.5  mrg   InternalMmapVector<Region> intersection;
    551  1.5  mrg   Intersect(mapped_regions, regions, intersection);
    552  1.5  mrg 
    553  1.5  mrg   for (const Region &r : intersection) {
    554  1.5  mrg     LOG_POINTERS("Root region intersects with mapped region at %p-%p\n",
    555  1.5  mrg                  (void *)r.begin, (void *)r.end);
    556  1.5  mrg     ScanRangeForPointers(r.begin, r.end, frontier, "ROOT", kReachable);
    557  1.1  mrg   }
    558  1.1  mrg }
    559  1.1  mrg 
    560  1.1  mrg // Scans root regions for heap pointers.
    561  1.1  mrg static void ProcessRootRegions(Frontier *frontier) {
    562  1.5  mrg   if (!flags()->use_root_regions || !HasRootRegions())
    563  1.5  mrg     return;
    564  1.5  mrg   MemoryMappingLayout proc_maps(/*cache_enabled*/ true);
    565  1.5  mrg   MemoryMappedSegment segment;
    566  1.5  mrg   InternalMmapVector<Region> mapped_regions;
    567  1.5  mrg   while (proc_maps.Next(&segment))
    568  1.5  mrg     if (segment.IsReadable())
    569  1.5  mrg       mapped_regions.push_back({segment.start, segment.end});
    570  1.5  mrg   ScanRootRegions(frontier, mapped_regions);
    571  1.1  mrg }
    572  1.1  mrg 
    573  1.1  mrg static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
    574  1.1  mrg   while (frontier->size()) {
    575  1.1  mrg     uptr next_chunk = frontier->back();
    576  1.1  mrg     frontier->pop_back();
    577  1.1  mrg     LsanMetadata m(next_chunk);
    578  1.1  mrg     ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
    579  1.1  mrg                          "HEAP", tag);
    580  1.1  mrg   }
    581  1.1  mrg }
    582  1.1  mrg 
    583  1.1  mrg // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
    584  1.1  mrg // which are reachable from it as indirectly leaked.
    585  1.1  mrg static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
    586  1.1  mrg   chunk = GetUserBegin(chunk);
    587  1.1  mrg   LsanMetadata m(chunk);
    588  1.1  mrg   if (m.allocated() && m.tag() != kReachable) {
    589  1.1  mrg     ScanRangeForPointers(chunk, chunk + m.requested_size(),
    590  1.1  mrg                          /* frontier */ nullptr, "HEAP", kIndirectlyLeaked);
    591  1.1  mrg   }
    592  1.1  mrg }
    593  1.1  mrg 
    594  1.3  mrg static void IgnoredSuppressedCb(uptr chunk, void *arg) {
    595  1.3  mrg   CHECK(arg);
    596  1.3  mrg   chunk = GetUserBegin(chunk);
    597  1.3  mrg   LsanMetadata m(chunk);
    598  1.3  mrg   if (!m.allocated() || m.tag() == kIgnored)
    599  1.3  mrg     return;
    600  1.3  mrg 
    601  1.3  mrg   const InternalMmapVector<u32> &suppressed =
    602  1.3  mrg       *static_cast<const InternalMmapVector<u32> *>(arg);
    603  1.3  mrg   uptr idx = InternalLowerBound(suppressed, m.stack_trace_id());
    604  1.3  mrg   if (idx >= suppressed.size() || m.stack_trace_id() != suppressed[idx])
    605  1.3  mrg     return;
    606  1.3  mrg 
    607  1.3  mrg   LOG_POINTERS("Suppressed: chunk %p-%p of size %zu.\n", (void *)chunk,
    608  1.3  mrg                (void *)(chunk + m.requested_size()), m.requested_size());
    609  1.3  mrg   m.set_tag(kIgnored);
    610  1.3  mrg }
    611  1.3  mrg 
    612  1.1  mrg // ForEachChunk callback. If chunk is marked as ignored, adds its address to
    613  1.1  mrg // frontier.
    614  1.1  mrg static void CollectIgnoredCb(uptr chunk, void *arg) {
    615  1.1  mrg   CHECK(arg);
    616  1.1  mrg   chunk = GetUserBegin(chunk);
    617  1.1  mrg   LsanMetadata m(chunk);
    618  1.1  mrg   if (m.allocated() && m.tag() == kIgnored) {
    619  1.3  mrg     LOG_POINTERS("Ignored: chunk %p-%p of size %zu.\n", (void *)chunk,
    620  1.3  mrg                  (void *)(chunk + m.requested_size()), m.requested_size());
    621  1.1  mrg     reinterpret_cast<Frontier *>(arg)->push_back(chunk);
    622  1.1  mrg   }
    623  1.1  mrg }
    624  1.1  mrg 
    625  1.1  mrg // Sets the appropriate tag on each chunk.
    626  1.3  mrg static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads,
    627  1.5  mrg                               Frontier *frontier, tid_t caller_tid,
    628  1.5  mrg                               uptr caller_sp) {
    629  1.3  mrg   const InternalMmapVector<u32> &suppressed_stacks =
    630  1.3  mrg       GetSuppressionContext()->GetSortedSuppressedStacks();
    631  1.3  mrg   if (!suppressed_stacks.empty()) {
    632  1.3  mrg     ForEachChunk(IgnoredSuppressedCb,
    633  1.3  mrg                  const_cast<InternalMmapVector<u32> *>(&suppressed_stacks));
    634  1.3  mrg   }
    635  1.3  mrg   ForEachChunk(CollectIgnoredCb, frontier);
    636  1.3  mrg   ProcessGlobalRegions(frontier);
    637  1.5  mrg   ProcessThreads(suspended_threads, frontier, caller_tid, caller_sp);
    638  1.3  mrg   ProcessRootRegions(frontier);
    639  1.3  mrg   FloodFillTag(frontier, kReachable);
    640  1.1  mrg 
    641  1.1  mrg   // The check here is relatively expensive, so we do this in a separate flood
    642  1.1  mrg   // fill. That way we can skip the check for chunks that are reachable
    643  1.1  mrg   // otherwise.
    644  1.1  mrg   LOG_POINTERS("Processing platform-specific allocations.\n");
    645  1.3  mrg   ProcessPlatformSpecificAllocations(frontier);
    646  1.3  mrg   FloodFillTag(frontier, kReachable);
    647  1.1  mrg 
    648  1.1  mrg   // Iterate over leaked chunks and mark those that are reachable from other
    649  1.1  mrg   // leaked chunks.
    650  1.1  mrg   LOG_POINTERS("Scanning leaked chunks.\n");
    651  1.1  mrg   ForEachChunk(MarkIndirectlyLeakedCb, nullptr);
    652  1.1  mrg }
    653  1.1  mrg 
    654  1.1  mrg // ForEachChunk callback. Resets the tags to pre-leak-check state.
    655  1.1  mrg static void ResetTagsCb(uptr chunk, void *arg) {
    656  1.1  mrg   (void)arg;
    657  1.1  mrg   chunk = GetUserBegin(chunk);
    658  1.1  mrg   LsanMetadata m(chunk);
    659  1.1  mrg   if (m.allocated() && m.tag() != kIgnored)
    660  1.1  mrg     m.set_tag(kDirectlyLeaked);
    661  1.1  mrg }
    662  1.1  mrg 
    663  1.1  mrg // ForEachChunk callback. Aggregates information about unreachable chunks into
    664  1.1  mrg // a LeakReport.
    665  1.1  mrg static void CollectLeaksCb(uptr chunk, void *arg) {
    666  1.1  mrg   CHECK(arg);
    667  1.5  mrg   LeakedChunks *leaks = reinterpret_cast<LeakedChunks *>(arg);
    668  1.1  mrg   chunk = GetUserBegin(chunk);
    669  1.1  mrg   LsanMetadata m(chunk);
    670  1.5  mrg   if (!m.allocated())
    671  1.5  mrg     return;
    672  1.5  mrg   if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked)
    673  1.5  mrg     leaks->push_back({chunk, m.stack_trace_id(), m.requested_size(), m.tag()});
    674  1.1  mrg }
    675  1.1  mrg 
    676  1.3  mrg void LeakSuppressionContext::PrintMatchedSuppressions() {
    677  1.1  mrg   InternalMmapVector<Suppression *> matched;
    678  1.3  mrg   context.GetMatched(&matched);
    679  1.1  mrg   if (!matched.size())
    680  1.1  mrg     return;
    681  1.1  mrg   const char *line = "-----------------------------------------------------";
    682  1.1  mrg   Printf("%s\n", line);
    683  1.1  mrg   Printf("Suppressions used:\n");
    684  1.1  mrg   Printf("  count      bytes template\n");
    685  1.3  mrg   for (uptr i = 0; i < matched.size(); i++) {
    686  1.3  mrg     Printf("%7zu %10zu %s\n",
    687  1.3  mrg            static_cast<uptr>(atomic_load_relaxed(&matched[i]->hit_count)),
    688  1.3  mrg            matched[i]->weight, matched[i]->templ);
    689  1.3  mrg   }
    690  1.1  mrg   Printf("%s\n\n", line);
    691  1.1  mrg }
    692  1.1  mrg 
    693  1.5  mrg #  if SANITIZER_FUCHSIA
    694  1.3  mrg 
    695  1.3  mrg // Fuchsia provides a libc interface that guarantees all threads are
    696  1.3  mrg // covered, and SuspendedThreadList is never really used.
    697  1.3  mrg static void ReportUnsuspendedThreads(const SuspendedThreadsList &) {}
    698  1.3  mrg 
    699  1.5  mrg #  else  // !SANITIZER_FUCHSIA
    700  1.3  mrg 
    701  1.1  mrg static void ReportUnsuspendedThreads(
    702  1.1  mrg     const SuspendedThreadsList &suspended_threads) {
    703  1.1  mrg   InternalMmapVector<tid_t> threads(suspended_threads.ThreadCount());
    704  1.1  mrg   for (uptr i = 0; i < suspended_threads.ThreadCount(); ++i)
    705  1.1  mrg     threads[i] = suspended_threads.GetThreadID(i);
    706  1.1  mrg 
    707  1.1  mrg   Sort(threads.data(), threads.size());
    708  1.1  mrg 
    709  1.5  mrg   InternalMmapVector<tid_t> unsuspended;
    710  1.5  mrg   GetRunningThreadsLocked(&unsuspended);
    711  1.5  mrg 
    712  1.5  mrg   for (auto os_id : unsuspended) {
    713  1.5  mrg     uptr i = InternalLowerBound(threads, os_id);
    714  1.5  mrg     if (i >= threads.size() || threads[i] != os_id)
    715  1.5  mrg       Report(
    716  1.6  mrg           "Running thread %llu was not suspended. False leaks are possible.\n",
    717  1.5  mrg           os_id);
    718  1.5  mrg   }
    719  1.1  mrg }
    720  1.1  mrg 
    721  1.5  mrg #  endif  // !SANITIZER_FUCHSIA
    722  1.3  mrg 
    723  1.1  mrg static void CheckForLeaksCallback(const SuspendedThreadsList &suspended_threads,
    724  1.1  mrg                                   void *arg) {
    725  1.1  mrg   CheckForLeaksParam *param = reinterpret_cast<CheckForLeaksParam *>(arg);
    726  1.1  mrg   CHECK(param);
    727  1.1  mrg   CHECK(!param->success);
    728  1.1  mrg   ReportUnsuspendedThreads(suspended_threads);
    729  1.5  mrg   ClassifyAllChunks(suspended_threads, &param->frontier, param->caller_tid,
    730  1.5  mrg                     param->caller_sp);
    731  1.5  mrg   ForEachChunk(CollectLeaksCb, &param->leaks);
    732  1.1  mrg   // Clean up for subsequent leak checks. This assumes we did not overwrite any
    733  1.1  mrg   // kIgnored tags.
    734  1.1  mrg   ForEachChunk(ResetTagsCb, nullptr);
    735  1.1  mrg   param->success = true;
    736  1.1  mrg }
    737  1.1  mrg 
    738  1.3  mrg static bool PrintResults(LeakReport &report) {
    739  1.3  mrg   uptr unsuppressed_count = report.UnsuppressedLeakCount();
    740  1.3  mrg   if (unsuppressed_count) {
    741  1.1  mrg     Decorator d;
    742  1.3  mrg     Printf(
    743  1.3  mrg         "\n"
    744  1.3  mrg         "================================================================="
    745  1.3  mrg         "\n");
    746  1.1  mrg     Printf("%s", d.Error());
    747  1.1  mrg     Report("ERROR: LeakSanitizer: detected memory leaks\n");
    748  1.1  mrg     Printf("%s", d.Default());
    749  1.3  mrg     report.ReportTopLeaks(flags()->max_leaks);
    750  1.1  mrg   }
    751  1.1  mrg   if (common_flags()->print_suppressions)
    752  1.3  mrg     GetSuppressionContext()->PrintMatchedSuppressions();
    753  1.1  mrg   if (unsuppressed_count > 0) {
    754  1.3  mrg     report.PrintSummary();
    755  1.1  mrg     return true;
    756  1.1  mrg   }
    757  1.1  mrg   return false;
    758  1.1  mrg }
    759  1.1  mrg 
    760  1.3  mrg static bool CheckForLeaks() {
    761  1.5  mrg   if (&__lsan_is_turned_off && __lsan_is_turned_off()) {
    762  1.5  mrg     VReport(1, "LeakSanitizer is disabled");
    763  1.3  mrg     return false;
    764  1.5  mrg   }
    765  1.5  mrg   VReport(1, "LeakSanitizer: checking for leaks");
    766  1.3  mrg   // Inside LockStuffAndStopTheWorld we can't run symbolizer, so we can't match
    767  1.3  mrg   // suppressions. However if a stack id was previously suppressed, it should be
    768  1.3  mrg   // suppressed in future checks as well.
    769  1.3  mrg   for (int i = 0;; ++i) {
    770  1.3  mrg     EnsureMainThreadIDIsCorrect();
    771  1.3  mrg     CheckForLeaksParam param;
    772  1.5  mrg     // Capture calling thread's stack pointer early, to avoid false negatives.
    773  1.5  mrg     // Old frame with dead pointers might be overlapped by new frame inside
    774  1.5  mrg     // CheckForLeaks which does not use bytes with pointers before the
    775  1.5  mrg     // threads are suspended and stack pointers captured.
    776  1.5  mrg     param.caller_tid = GetTid();
    777  1.5  mrg     param.caller_sp = reinterpret_cast<uptr>(__builtin_frame_address(0));
    778  1.3  mrg     LockStuffAndStopTheWorld(CheckForLeaksCallback, &param);
    779  1.3  mrg     if (!param.success) {
    780  1.3  mrg       Report("LeakSanitizer has encountered a fatal error.\n");
    781  1.3  mrg       Report(
    782  1.3  mrg           "HINT: For debugging, try setting environment variable "
    783  1.3  mrg           "LSAN_OPTIONS=verbosity=1:log_threads=1\n");
    784  1.3  mrg       Report(
    785  1.3  mrg           "HINT: LeakSanitizer does not work under ptrace (strace, gdb, "
    786  1.3  mrg           "etc)\n");
    787  1.3  mrg       Die();
    788  1.3  mrg     }
    789  1.5  mrg     LeakReport leak_report;
    790  1.5  mrg     leak_report.AddLeakedChunks(param.leaks);
    791  1.5  mrg 
    792  1.3  mrg     // No new suppressions stacks, so rerun will not help and we can report.
    793  1.5  mrg     if (!leak_report.ApplySuppressions())
    794  1.5  mrg       return PrintResults(leak_report);
    795  1.3  mrg 
    796  1.3  mrg     // No indirect leaks to report, so we are done here.
    797  1.5  mrg     if (!leak_report.IndirectUnsuppressedLeakCount())
    798  1.5  mrg       return PrintResults(leak_report);
    799  1.3  mrg 
    800  1.3  mrg     if (i >= 8) {
    801  1.3  mrg       Report("WARNING: LeakSanitizer gave up on indirect leaks suppression.\n");
    802  1.5  mrg       return PrintResults(leak_report);
    803  1.3  mrg     }
    804  1.3  mrg 
    805  1.3  mrg     // We found a new previously unseen suppressed call stack. Rerun to make
    806  1.3  mrg     // sure it does not hold indirect leaks.
    807  1.3  mrg     VReport(1, "Rerun with %zu suppressed stacks.",
    808  1.3  mrg             GetSuppressionContext()->GetSortedSuppressedStacks().size());
    809  1.3  mrg   }
    810  1.3  mrg }
    811  1.3  mrg 
    812  1.1  mrg static bool has_reported_leaks = false;
    813  1.1  mrg bool HasReportedLeaks() { return has_reported_leaks; }
    814  1.1  mrg 
    815  1.1  mrg void DoLeakCheck() {
    816  1.3  mrg   Lock l(&global_mutex);
    817  1.1  mrg   static bool already_done;
    818  1.5  mrg   if (already_done)
    819  1.5  mrg     return;
    820  1.1  mrg   already_done = true;
    821  1.1  mrg   has_reported_leaks = CheckForLeaks();
    822  1.5  mrg   if (has_reported_leaks)
    823  1.5  mrg     HandleLeaks();
    824  1.1  mrg }
    825  1.1  mrg 
    826  1.1  mrg static int DoRecoverableLeakCheck() {
    827  1.3  mrg   Lock l(&global_mutex);
    828  1.1  mrg   bool have_leaks = CheckForLeaks();
    829  1.1  mrg   return have_leaks ? 1 : 0;
    830  1.1  mrg }
    831  1.1  mrg 
    832  1.1  mrg void DoRecoverableLeakCheckVoid() { DoRecoverableLeakCheck(); }
    833  1.1  mrg 
    834  1.1  mrg ///// LeakReport implementation. /////
    835  1.1  mrg 
    836  1.1  mrg // A hard limit on the number of distinct leaks, to avoid quadratic complexity
    837  1.1  mrg // in LeakReport::AddLeakedChunk(). We don't expect to ever see this many leaks
    838  1.1  mrg // in real-world applications.
    839  1.5  mrg // FIXME: Get rid of this limit by moving logic into DedupLeaks.
    840  1.1  mrg const uptr kMaxLeaksConsidered = 5000;
    841  1.1  mrg 
    842  1.5  mrg void LeakReport::AddLeakedChunks(const LeakedChunks &chunks) {
    843  1.5  mrg   for (const LeakedChunk &leak : chunks) {
    844  1.5  mrg     uptr chunk = leak.chunk;
    845  1.5  mrg     u32 stack_trace_id = leak.stack_trace_id;
    846  1.5  mrg     uptr leaked_size = leak.leaked_size;
    847  1.5  mrg     ChunkTag tag = leak.tag;
    848  1.5  mrg     CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
    849  1.5  mrg 
    850  1.5  mrg     if (u32 resolution = flags()->resolution) {
    851  1.5  mrg       StackTrace stack = StackDepotGet(stack_trace_id);
    852  1.5  mrg       stack.size = Min(stack.size, resolution);
    853  1.5  mrg       stack_trace_id = StackDepotPut(stack);
    854  1.5  mrg     }
    855  1.5  mrg 
    856  1.5  mrg     bool is_directly_leaked = (tag == kDirectlyLeaked);
    857  1.5  mrg     uptr i;
    858  1.5  mrg     for (i = 0; i < leaks_.size(); i++) {
    859  1.5  mrg       if (leaks_[i].stack_trace_id == stack_trace_id &&
    860  1.5  mrg           leaks_[i].is_directly_leaked == is_directly_leaked) {
    861  1.5  mrg         leaks_[i].hit_count++;
    862  1.5  mrg         leaks_[i].total_size += leaked_size;
    863  1.5  mrg         break;
    864  1.5  mrg       }
    865  1.5  mrg     }
    866  1.5  mrg     if (i == leaks_.size()) {
    867  1.5  mrg       if (leaks_.size() == kMaxLeaksConsidered)
    868  1.5  mrg         return;
    869  1.5  mrg       Leak leak = {next_id_++,         /* hit_count */ 1,
    870  1.5  mrg                    leaked_size,        stack_trace_id,
    871  1.5  mrg                    is_directly_leaked, /* is_suppressed */ false};
    872  1.5  mrg       leaks_.push_back(leak);
    873  1.5  mrg     }
    874  1.5  mrg     if (flags()->report_objects) {
    875  1.5  mrg       LeakedObject obj = {leaks_[i].id, GetUserAddr(chunk), leaked_size};
    876  1.5  mrg       leaked_objects_.push_back(obj);
    877  1.1  mrg     }
    878  1.1  mrg   }
    879  1.1  mrg }
    880  1.1  mrg 
    881  1.1  mrg static bool LeakComparator(const Leak &leak1, const Leak &leak2) {
    882  1.1  mrg   if (leak1.is_directly_leaked == leak2.is_directly_leaked)
    883  1.1  mrg     return leak1.total_size > leak2.total_size;
    884  1.1  mrg   else
    885  1.1  mrg     return leak1.is_directly_leaked;
    886  1.1  mrg }
    887  1.1  mrg 
    888  1.1  mrg void LeakReport::ReportTopLeaks(uptr num_leaks_to_report) {
    889  1.1  mrg   CHECK(leaks_.size() <= kMaxLeaksConsidered);
    890  1.1  mrg   Printf("\n");
    891  1.1  mrg   if (leaks_.size() == kMaxLeaksConsidered)
    892  1.5  mrg     Printf(
    893  1.5  mrg         "Too many leaks! Only the first %zu leaks encountered will be "
    894  1.5  mrg         "reported.\n",
    895  1.5  mrg         kMaxLeaksConsidered);
    896  1.1  mrg 
    897  1.1  mrg   uptr unsuppressed_count = UnsuppressedLeakCount();
    898  1.1  mrg   if (num_leaks_to_report > 0 && num_leaks_to_report < unsuppressed_count)
    899  1.1  mrg     Printf("The %zu top leak(s):\n", num_leaks_to_report);
    900  1.1  mrg   Sort(leaks_.data(), leaks_.size(), &LeakComparator);
    901  1.1  mrg   uptr leaks_reported = 0;
    902  1.1  mrg   for (uptr i = 0; i < leaks_.size(); i++) {
    903  1.5  mrg     if (leaks_[i].is_suppressed)
    904  1.5  mrg       continue;
    905  1.1  mrg     PrintReportForLeak(i);
    906  1.1  mrg     leaks_reported++;
    907  1.5  mrg     if (leaks_reported == num_leaks_to_report)
    908  1.5  mrg       break;
    909  1.1  mrg   }
    910  1.1  mrg   if (leaks_reported < unsuppressed_count) {
    911  1.1  mrg     uptr remaining = unsuppressed_count - leaks_reported;
    912  1.1  mrg     Printf("Omitting %zu more leak(s).\n", remaining);
    913  1.1  mrg   }
    914  1.1  mrg }
    915  1.1  mrg 
    916  1.1  mrg void LeakReport::PrintReportForLeak(uptr index) {
    917  1.1  mrg   Decorator d;
    918  1.1  mrg   Printf("%s", d.Leak());
    919  1.1  mrg   Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
    920  1.1  mrg          leaks_[index].is_directly_leaked ? "Direct" : "Indirect",
    921  1.1  mrg          leaks_[index].total_size, leaks_[index].hit_count);
    922  1.1  mrg   Printf("%s", d.Default());
    923  1.1  mrg 
    924  1.3  mrg   CHECK(leaks_[index].stack_trace_id);
    925  1.3  mrg   StackDepotGet(leaks_[index].stack_trace_id).Print();
    926  1.1  mrg 
    927  1.1  mrg   if (flags()->report_objects) {
    928  1.1  mrg     Printf("Objects leaked above:\n");
    929  1.1  mrg     PrintLeakedObjectsForLeak(index);
    930  1.1  mrg     Printf("\n");
    931  1.1  mrg   }
    932  1.1  mrg }
    933  1.1  mrg 
    934  1.1  mrg void LeakReport::PrintLeakedObjectsForLeak(uptr index) {
    935  1.1  mrg   u32 leak_id = leaks_[index].id;
    936  1.1  mrg   for (uptr j = 0; j < leaked_objects_.size(); j++) {
    937  1.1  mrg     if (leaked_objects_[j].leak_id == leak_id)
    938  1.3  mrg       Printf("%p (%zu bytes)\n", (void *)leaked_objects_[j].addr,
    939  1.1  mrg              leaked_objects_[j].size);
    940  1.1  mrg   }
    941  1.1  mrg }
    942  1.1  mrg 
    943  1.1  mrg void LeakReport::PrintSummary() {
    944  1.1  mrg   CHECK(leaks_.size() <= kMaxLeaksConsidered);
    945  1.1  mrg   uptr bytes = 0, allocations = 0;
    946  1.1  mrg   for (uptr i = 0; i < leaks_.size(); i++) {
    947  1.5  mrg     if (leaks_[i].is_suppressed)
    948  1.5  mrg       continue;
    949  1.5  mrg     bytes += leaks_[i].total_size;
    950  1.5  mrg     allocations += leaks_[i].hit_count;
    951  1.1  mrg   }
    952  1.3  mrg   InternalScopedString summary;
    953  1.5  mrg   summary.AppendF("%zu byte(s) leaked in %zu allocation(s).", bytes,
    954  1.5  mrg                   allocations);
    955  1.1  mrg   ReportErrorSummary(summary.data());
    956  1.1  mrg }
    957  1.1  mrg 
    958  1.3  mrg uptr LeakReport::ApplySuppressions() {
    959  1.3  mrg   LeakSuppressionContext *suppressions = GetSuppressionContext();
    960  1.5  mrg   uptr new_suppressions = 0;
    961  1.1  mrg   for (uptr i = 0; i < leaks_.size(); i++) {
    962  1.5  mrg     if (suppressions->Suppress(leaks_[i].stack_trace_id, leaks_[i].hit_count,
    963  1.5  mrg                                leaks_[i].total_size)) {
    964  1.1  mrg       leaks_[i].is_suppressed = true;
    965  1.3  mrg       ++new_suppressions;
    966  1.1  mrg     }
    967  1.1  mrg   }
    968  1.3  mrg   return new_suppressions;
    969  1.1  mrg }
    970  1.1  mrg 
    971  1.1  mrg uptr LeakReport::UnsuppressedLeakCount() {
    972  1.1  mrg   uptr result = 0;
    973  1.1  mrg   for (uptr i = 0; i < leaks_.size(); i++)
    974  1.5  mrg     if (!leaks_[i].is_suppressed)
    975  1.5  mrg       result++;
    976  1.1  mrg   return result;
    977  1.1  mrg }
    978  1.1  mrg 
    979  1.3  mrg uptr LeakReport::IndirectUnsuppressedLeakCount() {
    980  1.3  mrg   uptr result = 0;
    981  1.3  mrg   for (uptr i = 0; i < leaks_.size(); i++)
    982  1.3  mrg     if (!leaks_[i].is_suppressed && !leaks_[i].is_directly_leaked)
    983  1.3  mrg       result++;
    984  1.3  mrg   return result;
    985  1.3  mrg }
    986  1.3  mrg 
    987  1.5  mrg }  // namespace __lsan
    988  1.5  mrg #else   // CAN_SANITIZE_LEAKS
    989  1.1  mrg namespace __lsan {
    990  1.5  mrg void InitCommonLsan() {}
    991  1.5  mrg void DoLeakCheck() {}
    992  1.5  mrg void DoRecoverableLeakCheckVoid() {}
    993  1.5  mrg void DisableInThisThread() {}
    994  1.5  mrg void EnableInThisThread() {}
    995  1.5  mrg }  // namespace __lsan
    996  1.5  mrg #endif  // CAN_SANITIZE_LEAKS
    997  1.1  mrg 
    998  1.1  mrg using namespace __lsan;
    999  1.1  mrg 
   1000  1.1  mrg extern "C" {
   1001  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1002  1.1  mrg void __lsan_ignore_object(const void *p) {
   1003  1.1  mrg #if CAN_SANITIZE_LEAKS
   1004  1.1  mrg   if (!common_flags()->detect_leaks)
   1005  1.1  mrg     return;
   1006  1.1  mrg   // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
   1007  1.1  mrg   // locked.
   1008  1.3  mrg   Lock l(&global_mutex);
   1009  1.5  mrg   IgnoreObjectResult res = IgnoreObject(p);
   1010  1.1  mrg   if (res == kIgnoreObjectInvalid)
   1011  1.5  mrg     VReport(1, "__lsan_ignore_object(): no heap object found at %p\n", p);
   1012  1.1  mrg   if (res == kIgnoreObjectAlreadyIgnored)
   1013  1.5  mrg     VReport(1,
   1014  1.5  mrg             "__lsan_ignore_object(): "
   1015  1.5  mrg             "heap object at %p is already being ignored\n",
   1016  1.5  mrg             p);
   1017  1.1  mrg   if (res == kIgnoreObjectSuccess)
   1018  1.1  mrg     VReport(1, "__lsan_ignore_object(): ignoring heap object at %p\n", p);
   1019  1.5  mrg #endif  // CAN_SANITIZE_LEAKS
   1020  1.1  mrg }
   1021  1.1  mrg 
   1022  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1023  1.1  mrg void __lsan_register_root_region(const void *begin, uptr size) {
   1024  1.1  mrg #if CAN_SANITIZE_LEAKS
   1025  1.5  mrg   VReport(1, "Registered root region at %p of size %zu\n", begin, size);
   1026  1.5  mrg   uptr b = reinterpret_cast<uptr>(begin);
   1027  1.5  mrg   uptr e = b + size;
   1028  1.5  mrg   CHECK_LT(b, e);
   1029  1.5  mrg 
   1030  1.3  mrg   Lock l(&global_mutex);
   1031  1.5  mrg   ++GetRootRegionsLocked()[{b, e}];
   1032  1.5  mrg #endif  // CAN_SANITIZE_LEAKS
   1033  1.1  mrg }
   1034  1.1  mrg 
   1035  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1036  1.1  mrg void __lsan_unregister_root_region(const void *begin, uptr size) {
   1037  1.1  mrg #if CAN_SANITIZE_LEAKS
   1038  1.5  mrg   uptr b = reinterpret_cast<uptr>(begin);
   1039  1.5  mrg   uptr e = b + size;
   1040  1.5  mrg   CHECK_LT(b, e);
   1041  1.5  mrg   VReport(1, "Unregistered root region at %p of size %zu\n", begin, size);
   1042  1.5  mrg 
   1043  1.5  mrg   {
   1044  1.5  mrg     Lock l(&global_mutex);
   1045  1.5  mrg     if (auto *f = GetRootRegionsLocked().find({b, e})) {
   1046  1.5  mrg       if (--(f->second) == 0)
   1047  1.5  mrg         GetRootRegionsLocked().erase(f);
   1048  1.5  mrg       return;
   1049  1.1  mrg     }
   1050  1.1  mrg   }
   1051  1.5  mrg   Report(
   1052  1.5  mrg       "__lsan_unregister_root_region(): region at %p of size %zu has not "
   1053  1.5  mrg       "been registered.\n",
   1054  1.5  mrg       begin, size);
   1055  1.5  mrg   Die();
   1056  1.5  mrg #endif  // CAN_SANITIZE_LEAKS
   1057  1.1  mrg }
   1058  1.1  mrg 
   1059  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1060  1.1  mrg void __lsan_disable() {
   1061  1.1  mrg #if CAN_SANITIZE_LEAKS
   1062  1.1  mrg   __lsan::DisableInThisThread();
   1063  1.1  mrg #endif
   1064  1.1  mrg }
   1065  1.1  mrg 
   1066  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1067  1.1  mrg void __lsan_enable() {
   1068  1.1  mrg #if CAN_SANITIZE_LEAKS
   1069  1.1  mrg   __lsan::EnableInThisThread();
   1070  1.1  mrg #endif
   1071  1.1  mrg }
   1072  1.1  mrg 
   1073  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1074  1.1  mrg void __lsan_do_leak_check() {
   1075  1.1  mrg #if CAN_SANITIZE_LEAKS
   1076  1.1  mrg   if (common_flags()->detect_leaks)
   1077  1.1  mrg     __lsan::DoLeakCheck();
   1078  1.5  mrg #endif  // CAN_SANITIZE_LEAKS
   1079  1.1  mrg }
   1080  1.1  mrg 
   1081  1.1  mrg SANITIZER_INTERFACE_ATTRIBUTE
   1082  1.1  mrg int __lsan_do_recoverable_leak_check() {
   1083  1.1  mrg #if CAN_SANITIZE_LEAKS
   1084  1.1  mrg   if (common_flags()->detect_leaks)
   1085  1.1  mrg     return __lsan::DoRecoverableLeakCheck();
   1086  1.5  mrg #endif  // CAN_SANITIZE_LEAKS
   1087  1.1  mrg   return 0;
   1088  1.1  mrg }
   1089  1.1  mrg 
   1090  1.3  mrg SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_options, void) {
   1091  1.1  mrg   return "";
   1092  1.1  mrg }
   1093  1.1  mrg 
   1094  1.3  mrg #if !SANITIZER_SUPPORTS_WEAK_HOOKS
   1095  1.5  mrg SANITIZER_INTERFACE_WEAK_DEF(int, __lsan_is_turned_off, void) {
   1096  1.1  mrg   return 0;
   1097  1.1  mrg }
   1098  1.1  mrg 
   1099  1.5  mrg SANITIZER_INTERFACE_WEAK_DEF(const char *, __lsan_default_suppressions, void) {
   1100  1.1  mrg   return "";
   1101  1.1  mrg }
   1102  1.1  mrg #endif
   1103  1.5  mrg }  // extern "C"
   1104