Home | History | Annotate | Line # | Download | only in sanitizer_common
      1  1.1  mrg //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
      2  1.1  mrg //
      3  1.9  mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
      4  1.9  mrg // See https://llvm.org/LICENSE.txt for license information.
      5  1.9  mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
      6  1.1  mrg //
      7  1.1  mrg //===----------------------------------------------------------------------===//
      8  1.1  mrg //
      9  1.4  mrg // This file is shared between run-time libraries of sanitizers.
     10  1.4  mrg //
     11  1.1  mrg // It declares common functions and classes that are used in both runtimes.
     12  1.1  mrg // Implementation of some functions are provided in sanitizer_common, while
     13  1.1  mrg // others must be defined by run-time library itself.
     14  1.1  mrg //===----------------------------------------------------------------------===//
     15  1.1  mrg #ifndef SANITIZER_COMMON_H
     16  1.1  mrg #define SANITIZER_COMMON_H
     17  1.1  mrg 
     18  1.4  mrg #include "sanitizer_flags.h"
     19  1.4  mrg #include "sanitizer_interface_internal.h"
     20  1.1  mrg #include "sanitizer_internal_defs.h"
     21  1.3  mrg #include "sanitizer_libc.h"
     22  1.4  mrg #include "sanitizer_list.h"
     23  1.3  mrg #include "sanitizer_mutex.h"
     24  1.4  mrg 
     25  1.6  mrg #if defined(_MSC_VER) && !defined(__clang__)
     26  1.4  mrg extern "C" void _ReadWriteBarrier();
     27  1.4  mrg #pragma intrinsic(_ReadWriteBarrier)
     28  1.4  mrg #endif
     29  1.1  mrg 
     30  1.1  mrg namespace __sanitizer {
     31  1.7  mrg 
     32  1.7  mrg struct AddressInfo;
     33  1.7  mrg struct BufferedStackTrace;
     34  1.7  mrg struct SignalContext;
     35  1.1  mrg struct StackTrace;
     36  1.1  mrg 
     37  1.1  mrg // Constants.
     38  1.1  mrg const uptr kWordSize = SANITIZER_WORDSIZE / 8;
     39  1.1  mrg const uptr kWordSizeInBits = 8 * kWordSize;
     40  1.1  mrg 
     41  1.8  mrg const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE;
     42  1.1  mrg 
     43  1.4  mrg const uptr kMaxPathLength = 4096;
     44  1.4  mrg 
     45  1.6  mrg const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
     46  1.3  mrg 
     47  1.9  mrg const uptr kErrorMessageBufferSize = 1 << 16;
     48  1.3  mrg 
     49  1.4  mrg // Denotes fake PC values that come from JIT/JAVA/etc.
     50  1.8  mrg // For such PC values __tsan_symbolize_external_ex() will be called.
     51  1.4  mrg const u64 kExternalPCBit = 1ULL << 60;
     52  1.4  mrg 
     53  1.1  mrg extern const char *SanitizerToolName;  // Can be changed by the tool.
     54  1.1  mrg 
     55  1.4  mrg extern atomic_uint32_t current_verbosity;
     56  1.9  mrg inline void SetVerbosity(int verbosity) {
     57  1.4  mrg   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
     58  1.4  mrg }
     59  1.9  mrg inline int Verbosity() {
     60  1.4  mrg   return atomic_load(&current_verbosity, memory_order_relaxed);
     61  1.4  mrg }
     62  1.4  mrg 
     63  1.9  mrg #if SANITIZER_ANDROID
     64  1.9  mrg inline uptr GetPageSize() {
     65  1.9  mrg // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array.
     66  1.9  mrg   return 4096;
     67  1.9  mrg }
     68  1.9  mrg inline uptr GetPageSizeCached() {
     69  1.9  mrg   return 4096;
     70  1.9  mrg }
     71  1.9  mrg #else
     72  1.1  mrg uptr GetPageSize();
     73  1.6  mrg extern uptr PageSizeCached;
     74  1.9  mrg inline uptr GetPageSizeCached() {
     75  1.6  mrg   if (!PageSizeCached)
     76  1.6  mrg     PageSizeCached = GetPageSize();
     77  1.6  mrg   return PageSizeCached;
     78  1.6  mrg }
     79  1.9  mrg #endif
     80  1.1  mrg uptr GetMmapGranularity();
     81  1.3  mrg uptr GetMaxVirtualAddress();
     82  1.8  mrg uptr GetMaxUserVirtualAddress();
     83  1.1  mrg // Threads
     84  1.7  mrg tid_t GetTid();
     85  1.8  mrg int TgKill(pid_t pid, tid_t tid, int sig);
     86  1.1  mrg uptr GetThreadSelf();
     87  1.1  mrg void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
     88  1.1  mrg                                 uptr *stack_bottom);
     89  1.3  mrg void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
     90  1.3  mrg                           uptr *tls_addr, uptr *tls_size);
     91  1.1  mrg 
     92  1.1  mrg // Memory management
     93  1.6  mrg void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
     94  1.9  mrg inline void *MmapOrDieQuietly(uptr size, const char *mem_type) {
     95  1.6  mrg   return MmapOrDie(size, mem_type, /*raw_report*/ true);
     96  1.6  mrg }
     97  1.1  mrg void UnmapOrDie(void *addr, uptr size);
     98  1.7  mrg // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
     99  1.7  mrg // case returns nullptr.
    100  1.7  mrg void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
    101  1.8  mrg bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr)
    102  1.8  mrg      WARN_UNUSED_RESULT;
    103  1.9  mrg bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size,
    104  1.9  mrg                              const char *name = nullptr) WARN_UNUSED_RESULT;
    105  1.3  mrg void *MmapNoReserveOrDie(uptr size, const char *mem_type);
    106  1.9  mrg void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
    107  1.7  mrg // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
    108  1.7  mrg // that case returns nullptr.
    109  1.9  mrg void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size,
    110  1.9  mrg                                  const char *name = nullptr);
    111  1.6  mrg void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
    112  1.6  mrg void *MmapNoAccess(uptr size);
    113  1.1  mrg // Map aligned chunk of address space; size and alignment are powers of two.
    114  1.7  mrg // Dies on all but out of memory errors, in the latter case returns nullptr.
    115  1.7  mrg void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
    116  1.7  mrg                                    const char *mem_type);
    117  1.6  mrg // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
    118  1.4  mrg // unaccessible memory.
    119  1.4  mrg bool MprotectNoAccess(uptr addr, uptr size);
    120  1.6  mrg bool MprotectReadOnly(uptr addr, uptr size);
    121  1.6  mrg 
    122  1.8  mrg void MprotectMallocZones(void *addr, int prot);
    123  1.8  mrg 
    124  1.9  mrg #if SANITIZER_LINUX
    125  1.9  mrg // Unmap memory. Currently only used on Linux.
    126  1.9  mrg void UnmapFromTo(uptr from, uptr to);
    127  1.9  mrg #endif
    128  1.9  mrg 
    129  1.9  mrg // Maps shadow_size_bytes of shadow memory and returns shadow address. It will
    130  1.9  mrg // be aligned to the mmap granularity * 2^shadow_scale, or to
    131  1.9  mrg // 2^min_shadow_base_alignment if that is larger. The returned address will
    132  1.9  mrg // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and
    133  1.9  mrg // shadow_size_bytes bytes on the right, which on linux is mapped no access.
    134  1.9  mrg // The high_mem_end may be updated if the original shadow size doesn't fit.
    135  1.9  mrg uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale,
    136  1.9  mrg                       uptr min_shadow_base_alignment, uptr &high_mem_end);
    137  1.9  mrg 
    138  1.9  mrg // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size).
    139  1.9  mrg // Reserves 2*S bytes of address space to the right of the returned address and
    140  1.9  mrg // ring_buffer_size bytes to the left.  The returned address is aligned to 2*S.
    141  1.9  mrg // Also creates num_aliases regions of accessible memory starting at offset S
    142  1.9  mrg // from the returned address.  Each region has size alias_size and is backed by
    143  1.9  mrg // the same physical memory.
    144  1.9  mrg uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size,
    145  1.9  mrg                                 uptr num_aliases, uptr ring_buffer_size);
    146  1.9  mrg 
    147  1.9  mrg // Reserve memory range [beg, end]. If madvise_shadow is true then apply
    148  1.9  mrg // madvise (e.g. hugepages, core dumping) requested by options.
    149  1.9  mrg void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name,
    150  1.9  mrg                               bool madvise_shadow = true);
    151  1.9  mrg 
    152  1.9  mrg // Protect size bytes of memory starting at addr. Also try to protect
    153  1.9  mrg // several pages at the start of the address space as specified by
    154  1.9  mrg // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start.
    155  1.9  mrg void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start,
    156  1.9  mrg                 uptr zero_base_max_shadow_start);
    157  1.9  mrg 
    158  1.6  mrg // Find an available address space.
    159  1.7  mrg uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
    160  1.8  mrg                               uptr *largest_gap_found, uptr *max_occupied_addr);
    161  1.4  mrg 
    162  1.1  mrg // Used to check if we can map shadow memory to a fixed location.
    163  1.1  mrg bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
    164  1.7  mrg // Releases memory pages entirely within the [beg, end] address range. Noop if
    165  1.7  mrg // the provided range does not contain at least one entire page.
    166  1.7  mrg void ReleaseMemoryPagesToOS(uptr beg, uptr end);
    167  1.3  mrg void IncreaseTotalMmap(uptr size);
    168  1.3  mrg void DecreaseTotalMmap(uptr size);
    169  1.4  mrg uptr GetRSS();
    170  1.9  mrg void SetShadowRegionHugePageMode(uptr addr, uptr length);
    171  1.8  mrg bool DontDumpShadowMemory(uptr addr, uptr length);
    172  1.4  mrg // Check if the built VMA size matches the runtime one.
    173  1.4  mrg void CheckVMASize();
    174  1.6  mrg void RunMallocHooks(const void *ptr, uptr size);
    175  1.6  mrg void RunFreeHooks(const void *ptr);
    176  1.1  mrg 
    177  1.8  mrg class ReservedAddressRange {
    178  1.8  mrg  public:
    179  1.8  mrg   uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0);
    180  1.9  mrg   uptr InitAligned(uptr size, uptr align, const char *name = nullptr);
    181  1.9  mrg   uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr);
    182  1.9  mrg   uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr);
    183  1.8  mrg   void Unmap(uptr addr, uptr size);
    184  1.8  mrg   void *base() const { return base_; }
    185  1.8  mrg   uptr size() const { return size_; }
    186  1.8  mrg 
    187  1.8  mrg  private:
    188  1.8  mrg   void* base_;
    189  1.8  mrg   uptr size_;
    190  1.8  mrg   const char* name_;
    191  1.8  mrg   uptr os_handle_;
    192  1.8  mrg };
    193  1.8  mrg 
    194  1.7  mrg typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
    195  1.9  mrg                                /*out*/ uptr *stats);
    196  1.7  mrg 
    197  1.7  mrg // Parse the contents of /proc/self/smaps and generate a memory profile.
    198  1.9  mrg // |cb| is a tool-specific callback that fills the |stats| array.
    199  1.9  mrg void GetMemoryProfile(fill_profile_f cb, uptr *stats);
    200  1.9  mrg void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps,
    201  1.9  mrg                             uptr smaps_len);
    202  1.7  mrg 
    203  1.1  mrg // Simple low-level (mmap-based) allocator for internal use. Doesn't have
    204  1.1  mrg // constructor, so all instances of LowLevelAllocator should be
    205  1.1  mrg // linker initialized.
    206  1.1  mrg class LowLevelAllocator {
    207  1.1  mrg  public:
    208  1.1  mrg   // Requires an external lock.
    209  1.1  mrg   void *Allocate(uptr size);
    210  1.1  mrg  private:
    211  1.1  mrg   char *allocated_end_;
    212  1.1  mrg   char *allocated_current_;
    213  1.1  mrg };
    214  1.8  mrg // Set the min alignment of LowLevelAllocator to at least alignment.
    215  1.8  mrg void SetLowLevelAllocateMinAlignment(uptr alignment);
    216  1.1  mrg typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
    217  1.1  mrg // Allows to register tool-specific callbacks for LowLevelAllocator.
    218  1.1  mrg // Passing NULL removes the callback.
    219  1.1  mrg void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
    220  1.1  mrg 
    221  1.1  mrg // IO
    222  1.7  mrg void CatastrophicErrorWrite(const char *buffer, uptr length);
    223  1.1  mrg void RawWrite(const char *buffer);
    224  1.3  mrg bool ColorizeReports();
    225  1.6  mrg void RemoveANSIEscapeSequencesFromString(char *buffer);
    226  1.9  mrg void Printf(const char *format, ...) FORMAT(1, 2);
    227  1.9  mrg void Report(const char *format, ...) FORMAT(1, 2);
    228  1.1  mrg void SetPrintfAndReportCallback(void (*callback)(const char *));
    229  1.3  mrg #define VReport(level, ...)                                              \
    230  1.3  mrg   do {                                                                   \
    231  1.4  mrg     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
    232  1.3  mrg   } while (0)
    233  1.3  mrg #define VPrintf(level, ...)                                              \
    234  1.3  mrg   do {                                                                   \
    235  1.4  mrg     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
    236  1.3  mrg   } while (0)
    237  1.3  mrg 
    238  1.7  mrg // Lock sanitizer error reporting and protects against nested errors.
    239  1.7  mrg class ScopedErrorReportLock {
    240  1.7  mrg  public:
    241  1.9  mrg   ScopedErrorReportLock() ACQUIRE(mutex_) { Lock(); }
    242  1.9  mrg   ~ScopedErrorReportLock() RELEASE(mutex_) { Unlock(); }
    243  1.9  mrg 
    244  1.9  mrg   static void Lock() ACQUIRE(mutex_);
    245  1.9  mrg   static void Unlock() RELEASE(mutex_);
    246  1.9  mrg   static void CheckLocked() CHECK_LOCKED(mutex_);
    247  1.4  mrg 
    248  1.9  mrg  private:
    249  1.9  mrg   static atomic_uintptr_t reporting_thread_;
    250  1.9  mrg   static StaticSpinMutex mutex_;
    251  1.4  mrg };
    252  1.4  mrg 
    253  1.3  mrg extern uptr stoptheworld_tracer_pid;
    254  1.3  mrg extern uptr stoptheworld_tracer_ppid;
    255  1.1  mrg 
    256  1.3  mrg bool IsAccessibleMemoryRange(uptr beg, uptr size);
    257  1.3  mrg 
    258  1.3  mrg // Error report formatting.
    259  1.3  mrg const char *StripPathPrefix(const char *filepath,
    260  1.3  mrg                             const char *strip_file_prefix);
    261  1.3  mrg // Strip the directories from the module name.
    262  1.3  mrg const char *StripModuleName(const char *module);
    263  1.1  mrg 
    264  1.1  mrg // OS
    265  1.4  mrg uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
    266  1.4  mrg uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
    267  1.9  mrg uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len);
    268  1.4  mrg uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
    269  1.4  mrg const char *GetProcessName();
    270  1.4  mrg void UpdateProcessName();
    271  1.4  mrg void CacheBinaryName();
    272  1.3  mrg void DisableCoreDumperIfNecessary();
    273  1.1  mrg void DumpProcessMap();
    274  1.1  mrg const char *GetEnv(const char *name);
    275  1.3  mrg bool SetEnv(const char *name, const char *value);
    276  1.4  mrg 
    277  1.1  mrg u32 GetUid();
    278  1.1  mrg void ReExec();
    279  1.8  mrg void CheckASLR();
    280  1.9  mrg void CheckMPROTECT();
    281  1.6  mrg char **GetArgv();
    282  1.9  mrg char **GetEnviron();
    283  1.6  mrg void PrintCmdline();
    284  1.1  mrg bool StackSizeIsUnlimited();
    285  1.1  mrg void SetStackSizeLimitInBytes(uptr limit);
    286  1.3  mrg bool AddressSpaceIsUnlimited();
    287  1.3  mrg void SetAddressSpaceUnlimited();
    288  1.3  mrg void AdjustStackSize(void *attr);
    289  1.8  mrg void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
    290  1.3  mrg void SetSandboxingCallback(void (*f)());
    291  1.3  mrg 
    292  1.4  mrg void InitializeCoverage(bool enabled, const char *coverage_dir);
    293  1.4  mrg 
    294  1.3  mrg void InitTlsSize();
    295  1.3  mrg uptr GetTlsSize();
    296  1.1  mrg 
    297  1.1  mrg // Other
    298  1.9  mrg void SleepForSeconds(unsigned seconds);
    299  1.9  mrg void SleepForMillis(unsigned millis);
    300  1.3  mrg u64 NanoTime();
    301  1.8  mrg u64 MonotonicNanoTime();
    302  1.1  mrg int Atexit(void (*function)(void));
    303  1.4  mrg bool TemplateMatch(const char *templ, const char *str);
    304  1.1  mrg 
    305  1.1  mrg // Exit
    306  1.1  mrg void NORETURN Abort();
    307  1.1  mrg void NORETURN Die();
    308  1.3  mrg void NORETURN
    309  1.1  mrg CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
    310  1.4  mrg void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
    311  1.6  mrg                                       const char *mmap_type, error_t err,
    312  1.6  mrg                                       bool raw_report = false);
    313  1.1  mrg 
    314  1.9  mrg // Specific tools may override behavior of "Die" function to do tool-specific
    315  1.9  mrg // job.
    316  1.3  mrg typedef void (*DieCallbackType)(void);
    317  1.4  mrg 
    318  1.4  mrg // It's possible to add several callbacks that would be run when "Die" is
    319  1.4  mrg // called. The callbacks will be run in the opposite order. The tools are
    320  1.4  mrg // strongly recommended to setup all callbacks during initialization, when there
    321  1.4  mrg // is only a single thread.
    322  1.4  mrg bool AddDieCallback(DieCallbackType callback);
    323  1.4  mrg bool RemoveDieCallback(DieCallbackType callback);
    324  1.4  mrg 
    325  1.4  mrg void SetUserDieCallback(DieCallbackType callback);
    326  1.4  mrg 
    327  1.9  mrg void SetCheckUnwindCallback(void (*callback)());
    328  1.1  mrg 
    329  1.4  mrg // Callback will be called if soft_rss_limit_mb is given and the limit is
    330  1.4  mrg // exceeded (exceeded==true) or if rss went down below the limit
    331  1.4  mrg // (exceeded==false).
    332  1.4  mrg // The callback should be registered once at the tool init time.
    333  1.4  mrg void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
    334  1.4  mrg 
    335  1.3  mrg // Functions related to signal handling.
    336  1.3  mrg typedef void (*SignalHandlerType)(int, void *, void *);
    337  1.7  mrg HandleSignalMode GetHandleSignalMode(int signum);
    338  1.3  mrg void InstallDeadlySignalHandlers(SignalHandlerType handler);
    339  1.7  mrg 
    340  1.7  mrg // Signal reporting.
    341  1.7  mrg // Each sanitizer uses slightly different implementation of stack unwinding.
    342  1.7  mrg typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
    343  1.7  mrg                                               const void *callback_context,
    344  1.7  mrg                                               BufferedStackTrace *stack);
    345  1.7  mrg // Print deadly signal report and die.
    346  1.7  mrg void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
    347  1.7  mrg                         UnwindSignalStackCallbackType unwind,
    348  1.7  mrg                         const void *unwind_context);
    349  1.7  mrg 
    350  1.7  mrg // Part of HandleDeadlySignal, exposed for asan.
    351  1.7  mrg void StartReportDeadlySignal();
    352  1.7  mrg // Part of HandleDeadlySignal, exposed for asan.
    353  1.7  mrg void ReportDeadlySignal(const SignalContext &sig, u32 tid,
    354  1.7  mrg                         UnwindSignalStackCallbackType unwind,
    355  1.7  mrg                         const void *unwind_context);
    356  1.7  mrg 
    357  1.3  mrg // Alternative signal stack (POSIX-only).
    358  1.3  mrg void SetAlternateSignalStack();
    359  1.3  mrg void UnsetAlternateSignalStack();
    360  1.3  mrg 
    361  1.3  mrg // Construct a one-line string:
    362  1.3  mrg //   SUMMARY: SanitizerToolName: error_message
    363  1.3  mrg // and pass it to __sanitizer_report_error_summary.
    364  1.7  mrg // If alt_tool_name is provided, it's used in place of SanitizerToolName.
    365  1.7  mrg void ReportErrorSummary(const char *error_message,
    366  1.7  mrg                         const char *alt_tool_name = nullptr);
    367  1.3  mrg // Same as above, but construct error_message as:
    368  1.4  mrg //   error_type file:line[:column][ function]
    369  1.7  mrg void ReportErrorSummary(const char *error_type, const AddressInfo &info,
    370  1.7  mrg                         const char *alt_tool_name = nullptr);
    371  1.4  mrg // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
    372  1.7  mrg void ReportErrorSummary(const char *error_type, const StackTrace *trace,
    373  1.7  mrg                         const char *alt_tool_name = nullptr);
    374  1.1  mrg 
    375  1.9  mrg void ReportMmapWriteExec(int prot, int mflags);
    376  1.8  mrg 
    377  1.1  mrg // Math
    378  1.3  mrg #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
    379  1.1  mrg extern "C" {
    380  1.9  mrg unsigned char _BitScanForward(unsigned long *index, unsigned long mask);
    381  1.9  mrg unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);
    382  1.1  mrg #if defined(_WIN64)
    383  1.9  mrg unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);
    384  1.9  mrg unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);
    385  1.1  mrg #endif
    386  1.1  mrg }
    387  1.1  mrg #endif
    388  1.1  mrg 
    389  1.9  mrg inline uptr MostSignificantSetBitIndex(uptr x) {
    390  1.3  mrg   CHECK_NE(x, 0U);
    391  1.9  mrg   unsigned long up;
    392  1.3  mrg #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
    393  1.4  mrg # ifdef _WIN64
    394  1.4  mrg   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
    395  1.4  mrg # else
    396  1.1  mrg   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
    397  1.4  mrg # endif
    398  1.1  mrg #elif defined(_WIN64)
    399  1.1  mrg   _BitScanReverse64(&up, x);
    400  1.1  mrg #else
    401  1.1  mrg   _BitScanReverse(&up, x);
    402  1.1  mrg #endif
    403  1.1  mrg   return up;
    404  1.1  mrg }
    405  1.1  mrg 
    406  1.9  mrg inline uptr LeastSignificantSetBitIndex(uptr x) {
    407  1.3  mrg   CHECK_NE(x, 0U);
    408  1.9  mrg   unsigned long up;
    409  1.3  mrg #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
    410  1.4  mrg # ifdef _WIN64
    411  1.4  mrg   up = __builtin_ctzll(x);
    412  1.4  mrg # else
    413  1.3  mrg   up = __builtin_ctzl(x);
    414  1.4  mrg # endif
    415  1.3  mrg #elif defined(_WIN64)
    416  1.3  mrg   _BitScanForward64(&up, x);
    417  1.3  mrg #else
    418  1.3  mrg   _BitScanForward(&up, x);
    419  1.3  mrg #endif
    420  1.3  mrg   return up;
    421  1.3  mrg }
    422  1.3  mrg 
    423  1.9  mrg inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; }
    424  1.1  mrg 
    425  1.9  mrg inline uptr RoundUpToPowerOfTwo(uptr size) {
    426  1.1  mrg   CHECK(size);
    427  1.1  mrg   if (IsPowerOfTwo(size)) return size;
    428  1.1  mrg 
    429  1.1  mrg   uptr up = MostSignificantSetBitIndex(size);
    430  1.6  mrg   CHECK_LT(size, (1ULL << (up + 1)));
    431  1.6  mrg   CHECK_GT(size, (1ULL << up));
    432  1.4  mrg   return 1ULL << (up + 1);
    433  1.1  mrg }
    434  1.1  mrg 
    435  1.9  mrg inline constexpr uptr RoundUpTo(uptr size, uptr boundary) {
    436  1.6  mrg   RAW_CHECK(IsPowerOfTwo(boundary));
    437  1.1  mrg   return (size + boundary - 1) & ~(boundary - 1);
    438  1.1  mrg }
    439  1.1  mrg 
    440  1.9  mrg inline constexpr uptr RoundDownTo(uptr x, uptr boundary) {
    441  1.1  mrg   return x & ~(boundary - 1);
    442  1.1  mrg }
    443  1.1  mrg 
    444  1.9  mrg inline constexpr bool IsAligned(uptr a, uptr alignment) {
    445  1.1  mrg   return (a & (alignment - 1)) == 0;
    446  1.1  mrg }
    447  1.1  mrg 
    448  1.9  mrg inline uptr Log2(uptr x) {
    449  1.1  mrg   CHECK(IsPowerOfTwo(x));
    450  1.4  mrg   return LeastSignificantSetBitIndex(x);
    451  1.1  mrg }
    452  1.1  mrg 
    453  1.1  mrg // Don't use std::min, std::max or std::swap, to minimize dependency
    454  1.1  mrg // on libstdc++.
    455  1.9  mrg template <class T>
    456  1.9  mrg constexpr T Min(T a, T b) {
    457  1.9  mrg   return a < b ? a : b;
    458  1.9  mrg }
    459  1.9  mrg template <class T>
    460  1.9  mrg constexpr T Max(T a, T b) {
    461  1.9  mrg   return a > b ? a : b;
    462  1.9  mrg }
    463  1.1  mrg template<class T> void Swap(T& a, T& b) {
    464  1.1  mrg   T tmp = a;
    465  1.1  mrg   a = b;
    466  1.1  mrg   b = tmp;
    467  1.1  mrg }
    468  1.1  mrg 
    469  1.1  mrg // Char handling
    470  1.9  mrg inline bool IsSpace(int c) {
    471  1.1  mrg   return (c == ' ') || (c == '\n') || (c == '\t') ||
    472  1.1  mrg          (c == '\f') || (c == '\r') || (c == '\v');
    473  1.1  mrg }
    474  1.9  mrg inline bool IsDigit(int c) {
    475  1.1  mrg   return (c >= '0') && (c <= '9');
    476  1.1  mrg }
    477  1.9  mrg inline int ToLower(int c) {
    478  1.1  mrg   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
    479  1.1  mrg }
    480  1.1  mrg 
    481  1.3  mrg // A low-level vector based on mmap. May incur a significant memory overhead for
    482  1.3  mrg // small vectors.
    483  1.3  mrg // WARNING: The current implementation supports only POD types.
    484  1.3  mrg template<typename T>
    485  1.4  mrg class InternalMmapVectorNoCtor {
    486  1.3  mrg  public:
    487  1.9  mrg   using value_type = T;
    488  1.4  mrg   void Initialize(uptr initial_capacity) {
    489  1.8  mrg     capacity_bytes_ = 0;
    490  1.3  mrg     size_ = 0;
    491  1.8  mrg     data_ = 0;
    492  1.8  mrg     reserve(initial_capacity);
    493  1.3  mrg   }
    494  1.8  mrg   void Destroy() { UnmapOrDie(data_, capacity_bytes_); }
    495  1.3  mrg   T &operator[](uptr i) {
    496  1.3  mrg     CHECK_LT(i, size_);
    497  1.3  mrg     return data_[i];
    498  1.3  mrg   }
    499  1.3  mrg   const T &operator[](uptr i) const {
    500  1.3  mrg     CHECK_LT(i, size_);
    501  1.3  mrg     return data_[i];
    502  1.3  mrg   }
    503  1.3  mrg   void push_back(const T &element) {
    504  1.8  mrg     CHECK_LE(size_, capacity());
    505  1.8  mrg     if (size_ == capacity()) {
    506  1.3  mrg       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
    507  1.8  mrg       Realloc(new_capacity);
    508  1.3  mrg     }
    509  1.6  mrg     internal_memcpy(&data_[size_++], &element, sizeof(T));
    510  1.3  mrg   }
    511  1.3  mrg   T &back() {
    512  1.3  mrg     CHECK_GT(size_, 0);
    513  1.3  mrg     return data_[size_ - 1];
    514  1.3  mrg   }
    515  1.3  mrg   void pop_back() {
    516  1.3  mrg     CHECK_GT(size_, 0);
    517  1.3  mrg     size_--;
    518  1.3  mrg   }
    519  1.3  mrg   uptr size() const {
    520  1.3  mrg     return size_;
    521  1.3  mrg   }
    522  1.3  mrg   const T *data() const {
    523  1.3  mrg     return data_;
    524  1.3  mrg   }
    525  1.4  mrg   T *data() {
    526  1.4  mrg     return data_;
    527  1.4  mrg   }
    528  1.8  mrg   uptr capacity() const { return capacity_bytes_ / sizeof(T); }
    529  1.8  mrg   void reserve(uptr new_size) {
    530  1.8  mrg     // Never downsize internal buffer.
    531  1.8  mrg     if (new_size > capacity())
    532  1.8  mrg       Realloc(new_size);
    533  1.3  mrg   }
    534  1.7  mrg   void resize(uptr new_size) {
    535  1.7  mrg     if (new_size > size_) {
    536  1.8  mrg       reserve(new_size);
    537  1.7  mrg       internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
    538  1.7  mrg     }
    539  1.7  mrg     size_ = new_size;
    540  1.7  mrg   }
    541  1.3  mrg 
    542  1.3  mrg   void clear() { size_ = 0; }
    543  1.4  mrg   bool empty() const { return size() == 0; }
    544  1.3  mrg 
    545  1.6  mrg   const T *begin() const {
    546  1.6  mrg     return data();
    547  1.6  mrg   }
    548  1.6  mrg   T *begin() {
    549  1.6  mrg     return data();
    550  1.6  mrg   }
    551  1.6  mrg   const T *end() const {
    552  1.6  mrg     return data() + size();
    553  1.6  mrg   }
    554  1.6  mrg   T *end() {
    555  1.6  mrg     return data() + size();
    556  1.6  mrg   }
    557  1.6  mrg 
    558  1.8  mrg   void swap(InternalMmapVectorNoCtor &other) {
    559  1.8  mrg     Swap(data_, other.data_);
    560  1.8  mrg     Swap(capacity_bytes_, other.capacity_bytes_);
    561  1.8  mrg     Swap(size_, other.size_);
    562  1.8  mrg   }
    563  1.8  mrg 
    564  1.3  mrg  private:
    565  1.8  mrg   void Realloc(uptr new_capacity) {
    566  1.3  mrg     CHECK_GT(new_capacity, 0);
    567  1.3  mrg     CHECK_LE(size_, new_capacity);
    568  1.8  mrg     uptr new_capacity_bytes =
    569  1.8  mrg         RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached());
    570  1.8  mrg     T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector");
    571  1.3  mrg     internal_memcpy(new_data, data_, size_ * sizeof(T));
    572  1.8  mrg     UnmapOrDie(data_, capacity_bytes_);
    573  1.3  mrg     data_ = new_data;
    574  1.8  mrg     capacity_bytes_ = new_capacity_bytes;
    575  1.3  mrg   }
    576  1.3  mrg 
    577  1.3  mrg   T *data_;
    578  1.8  mrg   uptr capacity_bytes_;
    579  1.3  mrg   uptr size_;
    580  1.3  mrg };
    581  1.3  mrg 
    582  1.8  mrg template <typename T>
    583  1.8  mrg bool operator==(const InternalMmapVectorNoCtor<T> &lhs,
    584  1.8  mrg                 const InternalMmapVectorNoCtor<T> &rhs) {
    585  1.8  mrg   if (lhs.size() != rhs.size()) return false;
    586  1.8  mrg   return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0;
    587  1.8  mrg }
    588  1.8  mrg 
    589  1.8  mrg template <typename T>
    590  1.8  mrg bool operator!=(const InternalMmapVectorNoCtor<T> &lhs,
    591  1.8  mrg                 const InternalMmapVectorNoCtor<T> &rhs) {
    592  1.8  mrg   return !(lhs == rhs);
    593  1.8  mrg }
    594  1.8  mrg 
    595  1.4  mrg template<typename T>
    596  1.4  mrg class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
    597  1.4  mrg  public:
    598  1.9  mrg   InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); }
    599  1.8  mrg   explicit InternalMmapVector(uptr cnt) {
    600  1.8  mrg     InternalMmapVectorNoCtor<T>::Initialize(cnt);
    601  1.8  mrg     this->resize(cnt);
    602  1.4  mrg   }
    603  1.4  mrg   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
    604  1.8  mrg   // Disallow copies and moves.
    605  1.8  mrg   InternalMmapVector(const InternalMmapVector &) = delete;
    606  1.8  mrg   InternalMmapVector &operator=(const InternalMmapVector &) = delete;
    607  1.8  mrg   InternalMmapVector(InternalMmapVector &&) = delete;
    608  1.8  mrg   InternalMmapVector &operator=(InternalMmapVector &&) = delete;
    609  1.8  mrg };
    610  1.8  mrg 
    611  1.9  mrg class InternalScopedString {
    612  1.8  mrg  public:
    613  1.9  mrg   InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; }
    614  1.9  mrg 
    615  1.9  mrg   uptr length() const { return buffer_.size() - 1; }
    616  1.8  mrg   void clear() {
    617  1.9  mrg     buffer_.resize(1);
    618  1.9  mrg     buffer_[0] = '\0';
    619  1.8  mrg   }
    620  1.9  mrg   void append(const char *format, ...) FORMAT(2, 3);
    621  1.9  mrg   const char *data() const { return buffer_.data(); }
    622  1.9  mrg   char *data() { return buffer_.data(); }
    623  1.8  mrg 
    624  1.8  mrg  private:
    625  1.9  mrg   InternalMmapVector<char> buffer_;
    626  1.8  mrg };
    627  1.8  mrg 
    628  1.8  mrg template <class T>
    629  1.8  mrg struct CompareLess {
    630  1.8  mrg   bool operator()(const T &a, const T &b) const { return a < b; }
    631  1.4  mrg };
    632  1.4  mrg 
    633  1.3  mrg // HeapSort for arrays and InternalMmapVector.
    634  1.8  mrg template <class T, class Compare = CompareLess<T>>
    635  1.8  mrg void Sort(T *v, uptr size, Compare comp = {}) {
    636  1.3  mrg   if (size < 2)
    637  1.3  mrg     return;
    638  1.3  mrg   // Stage 1: insert elements to the heap.
    639  1.3  mrg   for (uptr i = 1; i < size; i++) {
    640  1.3  mrg     uptr j, p;
    641  1.3  mrg     for (j = i; j > 0; j = p) {
    642  1.3  mrg       p = (j - 1) / 2;
    643  1.8  mrg       if (comp(v[p], v[j]))
    644  1.8  mrg         Swap(v[j], v[p]);
    645  1.3  mrg       else
    646  1.3  mrg         break;
    647  1.3  mrg     }
    648  1.3  mrg   }
    649  1.3  mrg   // Stage 2: swap largest element with the last one,
    650  1.3  mrg   // and sink the new top.
    651  1.3  mrg   for (uptr i = size - 1; i > 0; i--) {
    652  1.8  mrg     Swap(v[0], v[i]);
    653  1.3  mrg     uptr j, max_ind;
    654  1.3  mrg     for (j = 0; j < i; j = max_ind) {
    655  1.3  mrg       uptr left = 2 * j + 1;
    656  1.3  mrg       uptr right = 2 * j + 2;
    657  1.3  mrg       max_ind = j;
    658  1.8  mrg       if (left < i && comp(v[max_ind], v[left]))
    659  1.3  mrg         max_ind = left;
    660  1.8  mrg       if (right < i && comp(v[max_ind], v[right]))
    661  1.3  mrg         max_ind = right;
    662  1.3  mrg       if (max_ind != j)
    663  1.8  mrg         Swap(v[j], v[max_ind]);
    664  1.3  mrg       else
    665  1.3  mrg         break;
    666  1.3  mrg     }
    667  1.3  mrg   }
    668  1.3  mrg }
    669  1.3  mrg 
    670  1.7  mrg // Works like std::lower_bound: finds the first element that is not less
    671  1.7  mrg // than the val.
    672  1.9  mrg template <class Container,
    673  1.9  mrg           class Compare = CompareLess<typename Container::value_type>>
    674  1.9  mrg uptr InternalLowerBound(const Container &v,
    675  1.9  mrg                         const typename Container::value_type &val,
    676  1.9  mrg                         Compare comp = {}) {
    677  1.9  mrg   uptr first = 0;
    678  1.9  mrg   uptr last = v.size();
    679  1.7  mrg   while (last > first) {
    680  1.3  mrg     uptr mid = (first + last) / 2;
    681  1.3  mrg     if (comp(v[mid], val))
    682  1.3  mrg       first = mid + 1;
    683  1.3  mrg     else
    684  1.7  mrg       last = mid;
    685  1.7  mrg   }
    686  1.7  mrg   return first;
    687  1.7  mrg }
    688  1.7  mrg 
    689  1.7  mrg enum ModuleArch {
    690  1.7  mrg   kModuleArchUnknown,
    691  1.7  mrg   kModuleArchI386,
    692  1.7  mrg   kModuleArchX86_64,
    693  1.7  mrg   kModuleArchX86_64H,
    694  1.7  mrg   kModuleArchARMV6,
    695  1.7  mrg   kModuleArchARMV7,
    696  1.7  mrg   kModuleArchARMV7S,
    697  1.7  mrg   kModuleArchARMV7K,
    698  1.9  mrg   kModuleArchARM64,
    699  1.9  mrg   kModuleArchRISCV64,
    700  1.9  mrg   kModuleArchHexagon
    701  1.7  mrg };
    702  1.7  mrg 
    703  1.9  mrg // Sorts and removes duplicates from the container.
    704  1.9  mrg template <class Container,
    705  1.9  mrg           class Compare = CompareLess<typename Container::value_type>>
    706  1.9  mrg void SortAndDedup(Container &v, Compare comp = {}) {
    707  1.9  mrg   Sort(v.data(), v.size(), comp);
    708  1.9  mrg   uptr size = v.size();
    709  1.9  mrg   if (size < 2)
    710  1.9  mrg     return;
    711  1.9  mrg   uptr last = 0;
    712  1.9  mrg   for (uptr i = 1; i < size; ++i) {
    713  1.9  mrg     if (comp(v[last], v[i])) {
    714  1.9  mrg       ++last;
    715  1.9  mrg       if (last != i)
    716  1.9  mrg         v[last] = v[i];
    717  1.9  mrg     } else {
    718  1.9  mrg       CHECK(!comp(v[i], v[last]));
    719  1.9  mrg     }
    720  1.9  mrg   }
    721  1.9  mrg   v.resize(last + 1);
    722  1.9  mrg }
    723  1.9  mrg 
    724  1.9  mrg constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28);
    725  1.9  mrg 
    726  1.8  mrg // Opens the file 'file_name" and reads up to 'max_len' bytes.
    727  1.8  mrg // The resulting buffer is mmaped and stored in '*buff'.
    728  1.8  mrg // Returns true if file was successfully opened and read.
    729  1.8  mrg bool ReadFileToVector(const char *file_name,
    730  1.8  mrg                       InternalMmapVectorNoCtor<char> *buff,
    731  1.9  mrg                       uptr max_len = kDefaultFileMaxSize,
    732  1.9  mrg                       error_t *errno_p = nullptr);
    733  1.8  mrg 
    734  1.8  mrg // Opens the file 'file_name" and reads up to 'max_len' bytes.
    735  1.8  mrg // This function is less I/O efficient than ReadFileToVector as it may reread
    736  1.8  mrg // file multiple times to avoid mmap during read attempts. It's used to read
    737  1.8  mrg // procmap, so short reads with mmap in between can produce inconsistent result.
    738  1.8  mrg // The resulting buffer is mmaped and stored in '*buff'.
    739  1.8  mrg // The size of the mmaped region is stored in '*buff_size'.
    740  1.8  mrg // The total number of read bytes is stored in '*read_len'.
    741  1.8  mrg // Returns true if file was successfully opened and read.
    742  1.8  mrg bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
    743  1.9  mrg                       uptr *read_len, uptr max_len = kDefaultFileMaxSize,
    744  1.8  mrg                       error_t *errno_p = nullptr);
    745  1.8  mrg 
    746  1.7  mrg // When adding a new architecture, don't forget to also update
    747  1.9  mrg // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp.
    748  1.7  mrg inline const char *ModuleArchToString(ModuleArch arch) {
    749  1.7  mrg   switch (arch) {
    750  1.7  mrg     case kModuleArchUnknown:
    751  1.7  mrg       return "";
    752  1.7  mrg     case kModuleArchI386:
    753  1.7  mrg       return "i386";
    754  1.7  mrg     case kModuleArchX86_64:
    755  1.7  mrg       return "x86_64";
    756  1.7  mrg     case kModuleArchX86_64H:
    757  1.7  mrg       return "x86_64h";
    758  1.7  mrg     case kModuleArchARMV6:
    759  1.7  mrg       return "armv6";
    760  1.7  mrg     case kModuleArchARMV7:
    761  1.7  mrg       return "armv7";
    762  1.7  mrg     case kModuleArchARMV7S:
    763  1.7  mrg       return "armv7s";
    764  1.7  mrg     case kModuleArchARMV7K:
    765  1.7  mrg       return "armv7k";
    766  1.7  mrg     case kModuleArchARM64:
    767  1.7  mrg       return "arm64";
    768  1.9  mrg     case kModuleArchRISCV64:
    769  1.9  mrg       return "riscv64";
    770  1.9  mrg     case kModuleArchHexagon:
    771  1.9  mrg       return "hexagon";
    772  1.3  mrg   }
    773  1.7  mrg   CHECK(0 && "Invalid module arch");
    774  1.7  mrg   return "";
    775  1.3  mrg }
    776  1.3  mrg 
    777  1.7  mrg const uptr kModuleUUIDSize = 16;
    778  1.7  mrg const uptr kMaxSegName = 16;
    779  1.7  mrg 
    780  1.3  mrg // Represents a binary loaded into virtual memory (e.g. this can be an
    781  1.3  mrg // executable or a shared object).
    782  1.3  mrg class LoadedModule {
    783  1.3  mrg  public:
    784  1.7  mrg   LoadedModule()
    785  1.7  mrg       : full_name_(nullptr),
    786  1.7  mrg         base_address_(0),
    787  1.7  mrg         max_executable_address_(0),
    788  1.7  mrg         arch_(kModuleArchUnknown),
    789  1.7  mrg         instrumented_(false) {
    790  1.7  mrg     internal_memset(uuid_, 0, kModuleUUIDSize);
    791  1.7  mrg     ranges_.clear();
    792  1.7  mrg   }
    793  1.4  mrg   void set(const char *module_name, uptr base_address);
    794  1.7  mrg   void set(const char *module_name, uptr base_address, ModuleArch arch,
    795  1.7  mrg            u8 uuid[kModuleUUIDSize], bool instrumented);
    796  1.4  mrg   void clear();
    797  1.7  mrg   void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
    798  1.7  mrg                        const char *name = nullptr);
    799  1.3  mrg   bool containsAddress(uptr address) const;
    800  1.3  mrg 
    801  1.3  mrg   const char *full_name() const { return full_name_; }
    802  1.3  mrg   uptr base_address() const { return base_address_; }
    803  1.7  mrg   uptr max_executable_address() const { return max_executable_address_; }
    804  1.7  mrg   ModuleArch arch() const { return arch_; }
    805  1.7  mrg   const u8 *uuid() const { return uuid_; }
    806  1.7  mrg   bool instrumented() const { return instrumented_; }
    807  1.3  mrg 
    808  1.3  mrg   struct AddressRange {
    809  1.4  mrg     AddressRange *next;
    810  1.3  mrg     uptr beg;
    811  1.3  mrg     uptr end;
    812  1.4  mrg     bool executable;
    813  1.7  mrg     bool writable;
    814  1.7  mrg     char name[kMaxSegName];
    815  1.4  mrg 
    816  1.7  mrg     AddressRange(uptr beg, uptr end, bool executable, bool writable,
    817  1.7  mrg                  const char *name)
    818  1.7  mrg         : next(nullptr),
    819  1.7  mrg           beg(beg),
    820  1.7  mrg           end(end),
    821  1.7  mrg           executable(executable),
    822  1.7  mrg           writable(writable) {
    823  1.7  mrg       internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
    824  1.7  mrg     }
    825  1.3  mrg   };
    826  1.4  mrg 
    827  1.6  mrg   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
    828  1.4  mrg 
    829  1.4  mrg  private:
    830  1.4  mrg   char *full_name_;  // Owned.
    831  1.3  mrg   uptr base_address_;
    832  1.7  mrg   uptr max_executable_address_;
    833  1.7  mrg   ModuleArch arch_;
    834  1.7  mrg   u8 uuid_[kModuleUUIDSize];
    835  1.7  mrg   bool instrumented_;
    836  1.4  mrg   IntrusiveList<AddressRange> ranges_;
    837  1.3  mrg };
    838  1.3  mrg 
    839  1.6  mrg // List of LoadedModules. OS-dependent implementation is responsible for
    840  1.6  mrg // filling this information.
    841  1.6  mrg class ListOfModules {
    842  1.6  mrg  public:
    843  1.7  mrg   ListOfModules() : initialized(false) {}
    844  1.6  mrg   ~ListOfModules() { clear(); }
    845  1.6  mrg   void init();
    846  1.7  mrg   void fallbackInit();  // Uses fallback init if available, otherwise clears
    847  1.6  mrg   const LoadedModule *begin() const { return modules_.begin(); }
    848  1.6  mrg   LoadedModule *begin() { return modules_.begin(); }
    849  1.6  mrg   const LoadedModule *end() const { return modules_.end(); }
    850  1.6  mrg   LoadedModule *end() { return modules_.end(); }
    851  1.6  mrg   uptr size() const { return modules_.size(); }
    852  1.6  mrg   const LoadedModule &operator[](uptr i) const {
    853  1.6  mrg     CHECK_LT(i, modules_.size());
    854  1.6  mrg     return modules_[i];
    855  1.6  mrg   }
    856  1.6  mrg 
    857  1.6  mrg  private:
    858  1.6  mrg   void clear() {
    859  1.6  mrg     for (auto &module : modules_) module.clear();
    860  1.6  mrg     modules_.clear();
    861  1.6  mrg   }
    862  1.7  mrg   void clearOrInit() {
    863  1.7  mrg     initialized ? clear() : modules_.Initialize(kInitialCapacity);
    864  1.7  mrg     initialized = true;
    865  1.7  mrg   }
    866  1.6  mrg 
    867  1.7  mrg   InternalMmapVectorNoCtor<LoadedModule> modules_;
    868  1.6  mrg   // We rarely have more than 16K loaded modules.
    869  1.6  mrg   static const uptr kInitialCapacity = 1 << 14;
    870  1.7  mrg   bool initialized;
    871  1.6  mrg };
    872  1.3  mrg 
    873  1.3  mrg // Callback type for iterating over a set of memory ranges.
    874  1.3  mrg typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
    875  1.3  mrg 
    876  1.4  mrg enum AndroidApiLevel {
    877  1.4  mrg   ANDROID_NOT_ANDROID = 0,
    878  1.4  mrg   ANDROID_KITKAT = 19,
    879  1.4  mrg   ANDROID_LOLLIPOP_MR1 = 22,
    880  1.4  mrg   ANDROID_POST_LOLLIPOP = 23
    881  1.4  mrg };
    882  1.4  mrg 
    883  1.6  mrg void WriteToSyslog(const char *buffer);
    884  1.6  mrg 
    885  1.9  mrg #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__)
    886  1.9  mrg #define SANITIZER_WIN_TRACE 1
    887  1.9  mrg #else
    888  1.9  mrg #define SANITIZER_WIN_TRACE 0
    889  1.9  mrg #endif
    890  1.9  mrg 
    891  1.9  mrg #if SANITIZER_MAC || SANITIZER_WIN_TRACE
    892  1.6  mrg void LogFullErrorReport(const char *buffer);
    893  1.6  mrg #else
    894  1.9  mrg inline void LogFullErrorReport(const char *buffer) {}
    895  1.6  mrg #endif
    896  1.6  mrg 
    897  1.6  mrg #if SANITIZER_LINUX || SANITIZER_MAC
    898  1.6  mrg void WriteOneLineToSyslog(const char *s);
    899  1.6  mrg void LogMessageOnPrintf(const char *str);
    900  1.6  mrg #else
    901  1.9  mrg inline void WriteOneLineToSyslog(const char *s) {}
    902  1.9  mrg inline void LogMessageOnPrintf(const char *str) {}
    903  1.6  mrg #endif
    904  1.6  mrg 
    905  1.9  mrg #if SANITIZER_LINUX || SANITIZER_WIN_TRACE
    906  1.4  mrg // Initialize Android logging. Any writes before this are silently lost.
    907  1.4  mrg void AndroidLogInit();
    908  1.7  mrg void SetAbortMessage(const char *);
    909  1.1  mrg #else
    910  1.9  mrg inline void AndroidLogInit() {}
    911  1.7  mrg // FIXME: MacOS implementation could use CRSetCrashLogMessage.
    912  1.9  mrg inline void SetAbortMessage(const char *) {}
    913  1.1  mrg #endif
    914  1.1  mrg 
    915  1.3  mrg #if SANITIZER_ANDROID
    916  1.3  mrg void SanitizerInitializeUnwinder();
    917  1.4  mrg AndroidApiLevel AndroidGetApiLevel();
    918  1.3  mrg #else
    919  1.9  mrg inline void AndroidLogWrite(const char *buffer_unused) {}
    920  1.9  mrg inline void SanitizerInitializeUnwinder() {}
    921  1.9  mrg inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
    922  1.4  mrg #endif
    923  1.4  mrg 
    924  1.9  mrg inline uptr GetPthreadDestructorIterations() {
    925  1.4  mrg #if SANITIZER_ANDROID
    926  1.4  mrg   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
    927  1.4  mrg #elif SANITIZER_POSIX
    928  1.4  mrg   return 4;
    929  1.4  mrg #else
    930  1.4  mrg // Unused on Windows.
    931  1.4  mrg   return 0;
    932  1.4  mrg #endif
    933  1.4  mrg }
    934  1.4  mrg 
    935  1.9  mrg void *internal_start_thread(void *(*func)(void*), void *arg);
    936  1.4  mrg void internal_join_thread(void *th);
    937  1.4  mrg void MaybeStartBackgroudThread();
    938  1.4  mrg 
    939  1.4  mrg // Make the compiler think that something is going on there.
    940  1.4  mrg // Use this inside a loop that looks like memset/memcpy/etc to prevent the
    941  1.4  mrg // compiler from recognising it and turning it into an actual call to
    942  1.4  mrg // memset/memcpy/etc.
    943  1.4  mrg static inline void SanitizerBreakOptimization(void *arg) {
    944  1.6  mrg #if defined(_MSC_VER) && !defined(__clang__)
    945  1.4  mrg   _ReadWriteBarrier();
    946  1.4  mrg #else
    947  1.4  mrg   __asm__ __volatile__("" : : "r" (arg) : "memory");
    948  1.3  mrg #endif
    949  1.4  mrg }
    950  1.4  mrg 
    951  1.4  mrg struct SignalContext {
    952  1.7  mrg   void *siginfo;
    953  1.4  mrg   void *context;
    954  1.4  mrg   uptr addr;
    955  1.4  mrg   uptr pc;
    956  1.4  mrg   uptr sp;
    957  1.4  mrg   uptr bp;
    958  1.6  mrg   bool is_memory_access;
    959  1.6  mrg   enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
    960  1.4  mrg 
    961  1.9  mrg   // In some cases the kernel cannot provide the true faulting address; `addr`
    962  1.9  mrg   // will be zero then.  This field allows to distinguish between these cases
    963  1.9  mrg   // and dereferences of null.
    964  1.9  mrg   bool is_true_faulting_addr;
    965  1.9  mrg 
    966  1.7  mrg   // VS2013 doesn't implement unrestricted unions, so we need a trivial default
    967  1.7  mrg   // constructor
    968  1.7  mrg   SignalContext() = default;
    969  1.4  mrg 
    970  1.4  mrg   // Creates signal context in a platform-specific manner.
    971  1.7  mrg   // SignalContext is going to keep pointers to siginfo and context without
    972  1.7  mrg   // owning them.
    973  1.7  mrg   SignalContext(void *siginfo, void *context)
    974  1.7  mrg       : siginfo(siginfo),
    975  1.7  mrg         context(context),
    976  1.7  mrg         addr(GetAddress()),
    977  1.7  mrg         is_memory_access(IsMemoryAccess()),
    978  1.9  mrg         write_flag(GetWriteFlag()),
    979  1.9  mrg         is_true_faulting_addr(IsTrueFaultingAddress()) {
    980  1.7  mrg     InitPcSpBp();
    981  1.7  mrg   }
    982  1.7  mrg 
    983  1.7  mrg   static void DumpAllRegisters(void *context);
    984  1.7  mrg 
    985  1.7  mrg   // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
    986  1.7  mrg   int GetType() const;
    987  1.7  mrg 
    988  1.7  mrg   // String description of the signal.
    989  1.7  mrg   const char *Describe() const;
    990  1.7  mrg 
    991  1.7  mrg   // Returns true if signal is stack overflow.
    992  1.7  mrg   bool IsStackOverflow() const;
    993  1.6  mrg 
    994  1.7  mrg  private:
    995  1.7  mrg   // Platform specific initialization.
    996  1.7  mrg   void InitPcSpBp();
    997  1.7  mrg   uptr GetAddress() const;
    998  1.7  mrg   WriteFlag GetWriteFlag() const;
    999  1.7  mrg   bool IsMemoryAccess() const;
   1000  1.9  mrg   bool IsTrueFaultingAddress() const;
   1001  1.4  mrg };
   1002  1.4  mrg 
   1003  1.9  mrg void InitializePlatformEarly();
   1004  1.6  mrg void MaybeReexec();
   1005  1.6  mrg 
   1006  1.6  mrg template <typename Fn>
   1007  1.6  mrg class RunOnDestruction {
   1008  1.6  mrg  public:
   1009  1.6  mrg   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
   1010  1.6  mrg   ~RunOnDestruction() { fn_(); }
   1011  1.6  mrg 
   1012  1.6  mrg  private:
   1013  1.6  mrg   Fn fn_;
   1014  1.6  mrg };
   1015  1.6  mrg 
   1016  1.6  mrg // A simple scope guard. Usage:
   1017  1.6  mrg // auto cleanup = at_scope_exit([]{ do_cleanup; });
   1018  1.6  mrg template <typename Fn>
   1019  1.6  mrg RunOnDestruction<Fn> at_scope_exit(Fn fn) {
   1020  1.6  mrg   return RunOnDestruction<Fn>(fn);
   1021  1.6  mrg }
   1022  1.6  mrg 
   1023  1.6  mrg // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
   1024  1.6  mrg // if a process uses virtual memory over 4TB (as many sanitizers like
   1025  1.6  mrg // to do).  This function will abort the process if running on a kernel
   1026  1.6  mrg // that looks vulnerable.
   1027  1.6  mrg #if SANITIZER_LINUX && SANITIZER_S390_64
   1028  1.6  mrg void AvoidCVE_2016_2143();
   1029  1.6  mrg #else
   1030  1.9  mrg inline void AvoidCVE_2016_2143() {}
   1031  1.6  mrg #endif
   1032  1.6  mrg 
   1033  1.6  mrg struct StackDepotStats {
   1034  1.6  mrg   uptr n_uniq_ids;
   1035  1.6  mrg   uptr allocated;
   1036  1.6  mrg };
   1037  1.6  mrg 
   1038  1.7  mrg // The default value for allocator_release_to_os_interval_ms common flag to
   1039  1.7  mrg // indicate that sanitizer allocator should not attempt to release memory to OS.
   1040  1.7  mrg const s32 kReleaseToOSIntervalNever = -1;
   1041  1.7  mrg 
   1042  1.7  mrg void CheckNoDeepBind(const char *filename, int flag);
   1043  1.7  mrg 
   1044  1.7  mrg // Returns the requested amount of random data (up to 256 bytes) that can then
   1045  1.7  mrg // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
   1046  1.7  mrg bool GetRandom(void *buffer, uptr length, bool blocking = true);
   1047  1.7  mrg 
   1048  1.8  mrg // Returns the number of logical processors on the system.
   1049  1.8  mrg u32 GetNumberOfCPUs();
   1050  1.8  mrg extern u32 NumberOfCPUsCached;
   1051  1.9  mrg inline u32 GetNumberOfCPUsCached() {
   1052  1.8  mrg   if (!NumberOfCPUsCached)
   1053  1.8  mrg     NumberOfCPUsCached = GetNumberOfCPUs();
   1054  1.8  mrg   return NumberOfCPUsCached;
   1055  1.8  mrg }
   1056  1.8  mrg 
   1057  1.9  mrg template <typename T>
   1058  1.9  mrg class ArrayRef {
   1059  1.9  mrg  public:
   1060  1.9  mrg   ArrayRef() {}
   1061  1.9  mrg   ArrayRef(T *begin, T *end) : begin_(begin), end_(end) {}
   1062  1.9  mrg 
   1063  1.9  mrg   T *begin() { return begin_; }
   1064  1.9  mrg   T *end() { return end_; }
   1065  1.9  mrg 
   1066  1.9  mrg  private:
   1067  1.9  mrg   T *begin_ = nullptr;
   1068  1.9  mrg   T *end_ = nullptr;
   1069  1.9  mrg };
   1070  1.9  mrg 
   1071  1.1  mrg }  // namespace __sanitizer
   1072  1.1  mrg 
   1073  1.3  mrg inline void *operator new(__sanitizer::operator_new_size_type size,
   1074  1.3  mrg                           __sanitizer::LowLevelAllocator &alloc) {
   1075  1.3  mrg   return alloc.Allocate(size);
   1076  1.3  mrg }
   1077  1.3  mrg 
   1078  1.1  mrg #endif  // SANITIZER_COMMON_H
   1079