Home | History | Annotate | Line # | Download | only in sanitizer_common
sanitizer_common.h revision 1.1.1.5
      1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
      2 //
      3 // This file is distributed under the University of Illinois Open Source
      4 // License. See LICENSE.TXT for details.
      5 //
      6 //===----------------------------------------------------------------------===//
      7 //
      8 // This file is shared between run-time libraries of sanitizers.
      9 //
     10 // It declares common functions and classes that are used in both runtimes.
     11 // Implementation of some functions are provided in sanitizer_common, while
     12 // others must be defined by run-time library itself.
     13 //===----------------------------------------------------------------------===//
     14 #ifndef SANITIZER_COMMON_H
     15 #define SANITIZER_COMMON_H
     16 
     17 #include "sanitizer_flags.h"
     18 #include "sanitizer_interface_internal.h"
     19 #include "sanitizer_internal_defs.h"
     20 #include "sanitizer_libc.h"
     21 #include "sanitizer_list.h"
     22 #include "sanitizer_mutex.h"
     23 
     24 #if defined(_MSC_VER) && !defined(__clang__)
     25 extern "C" void _ReadWriteBarrier();
     26 #pragma intrinsic(_ReadWriteBarrier)
     27 #endif
     28 
     29 namespace __sanitizer {
     30 
     31 struct AddressInfo;
     32 struct BufferedStackTrace;
     33 struct SignalContext;
     34 struct StackTrace;
     35 
     36 // Constants.
     37 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
     38 const uptr kWordSizeInBits = 8 * kWordSize;
     39 
     40 #if defined(__powerpc__) || defined(__powerpc64__)
     41   const uptr kCacheLineSize = 128;
     42 #else
     43   const uptr kCacheLineSize = 64;
     44 #endif
     45 
     46 const uptr kMaxPathLength = 4096;
     47 
     48 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
     49 
     50 static const uptr kErrorMessageBufferSize = 1 << 16;
     51 
     52 // Denotes fake PC values that come from JIT/JAVA/etc.
     53 // For such PC values __tsan_symbolize_external() will be called.
     54 const u64 kExternalPCBit = 1ULL << 60;
     55 
     56 extern const char *SanitizerToolName;  // Can be changed by the tool.
     57 
     58 extern atomic_uint32_t current_verbosity;
     59 INLINE void SetVerbosity(int verbosity) {
     60   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
     61 }
     62 INLINE int Verbosity() {
     63   return atomic_load(&current_verbosity, memory_order_relaxed);
     64 }
     65 
     66 uptr GetPageSize();
     67 extern uptr PageSizeCached;
     68 INLINE uptr GetPageSizeCached() {
     69   if (!PageSizeCached)
     70     PageSizeCached = GetPageSize();
     71   return PageSizeCached;
     72 }
     73 uptr GetMmapGranularity();
     74 uptr GetMaxVirtualAddress();
     75 // Threads
     76 tid_t GetTid();
     77 uptr GetThreadSelf();
     78 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
     79                                 uptr *stack_bottom);
     80 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
     81                           uptr *tls_addr, uptr *tls_size);
     82 
     83 // Memory management
     84 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
     85 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
     86   return MmapOrDie(size, mem_type, /*raw_report*/ true);
     87 }
     88 void UnmapOrDie(void *addr, uptr size);
     89 // Behaves just like MmapOrDie, but tolerates out of memory condition, in that
     90 // case returns nullptr.
     91 void *MmapOrDieOnFatalError(uptr size, const char *mem_type);
     92 void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
     93                          const char *name = nullptr);
     94 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
     95 void *MmapFixedOrDie(uptr fixed_addr, uptr size);
     96 // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in
     97 // that case returns nullptr.
     98 void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size);
     99 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
    100 void *MmapNoAccess(uptr size);
    101 // Map aligned chunk of address space; size and alignment are powers of two.
    102 // Dies on all but out of memory errors, in the latter case returns nullptr.
    103 void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment,
    104                                    const char *mem_type);
    105 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
    106 // unaccessible memory.
    107 bool MprotectNoAccess(uptr addr, uptr size);
    108 bool MprotectReadOnly(uptr addr, uptr size);
    109 
    110 // Find an available address space.
    111 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding,
    112                               uptr *largest_gap_found);
    113 
    114 // Used to check if we can map shadow memory to a fixed location.
    115 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
    116 // Releases memory pages entirely within the [beg, end] address range. Noop if
    117 // the provided range does not contain at least one entire page.
    118 void ReleaseMemoryPagesToOS(uptr beg, uptr end);
    119 void IncreaseTotalMmap(uptr size);
    120 void DecreaseTotalMmap(uptr size);
    121 uptr GetRSS();
    122 void NoHugePagesInRegion(uptr addr, uptr length);
    123 void DontDumpShadowMemory(uptr addr, uptr length);
    124 // Check if the built VMA size matches the runtime one.
    125 void CheckVMASize();
    126 void RunMallocHooks(const void *ptr, uptr size);
    127 void RunFreeHooks(const void *ptr);
    128 
    129 typedef void (*fill_profile_f)(uptr start, uptr rss, bool file,
    130                                /*out*/uptr *stats, uptr stats_size);
    131 
    132 // Parse the contents of /proc/self/smaps and generate a memory profile.
    133 // |cb| is a tool-specific callback that fills the |stats| array containing
    134 // |stats_size| elements.
    135 void GetMemoryProfile(fill_profile_f cb, uptr *stats, uptr stats_size);
    136 
    137 // InternalScopedBuffer can be used instead of large stack arrays to
    138 // keep frame size low.
    139 // FIXME: use InternalAlloc instead of MmapOrDie once
    140 // InternalAlloc is made libc-free.
    141 template <typename T>
    142 class InternalScopedBuffer {
    143  public:
    144   explicit InternalScopedBuffer(uptr cnt) {
    145     cnt_ = cnt;
    146     ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
    147   }
    148   ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
    149   T &operator[](uptr i) { return ptr_[i]; }
    150   T *data() { return ptr_; }
    151   uptr size() { return cnt_ * sizeof(T); }
    152 
    153  private:
    154   T *ptr_;
    155   uptr cnt_;
    156   // Disallow copies and moves.
    157   InternalScopedBuffer(const InternalScopedBuffer &) = delete;
    158   InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
    159   InternalScopedBuffer(InternalScopedBuffer &&) = delete;
    160   InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
    161 };
    162 
    163 class InternalScopedString : public InternalScopedBuffer<char> {
    164  public:
    165   explicit InternalScopedString(uptr max_length)
    166       : InternalScopedBuffer<char>(max_length), length_(0) {
    167     (*this)[0] = '\0';
    168   }
    169   uptr length() { return length_; }
    170   void clear() {
    171     (*this)[0] = '\0';
    172     length_ = 0;
    173   }
    174   void append(const char *format, ...);
    175 
    176  private:
    177   uptr length_;
    178 };
    179 
    180 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
    181 // constructor, so all instances of LowLevelAllocator should be
    182 // linker initialized.
    183 class LowLevelAllocator {
    184  public:
    185   // Requires an external lock.
    186   void *Allocate(uptr size);
    187  private:
    188   char *allocated_end_;
    189   char *allocated_current_;
    190 };
    191 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
    192 // Allows to register tool-specific callbacks for LowLevelAllocator.
    193 // Passing NULL removes the callback.
    194 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
    195 
    196 // IO
    197 void CatastrophicErrorWrite(const char *buffer, uptr length);
    198 void RawWrite(const char *buffer);
    199 bool ColorizeReports();
    200 void RemoveANSIEscapeSequencesFromString(char *buffer);
    201 void Printf(const char *format, ...);
    202 void Report(const char *format, ...);
    203 void SetPrintfAndReportCallback(void (*callback)(const char *));
    204 #define VReport(level, ...)                                              \
    205   do {                                                                   \
    206     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
    207   } while (0)
    208 #define VPrintf(level, ...)                                              \
    209   do {                                                                   \
    210     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
    211   } while (0)
    212 
    213 // Can be used to prevent mixing error reports from different sanitizers.
    214 // FIXME: Replace with ScopedErrorReportLock and hide.
    215 extern StaticSpinMutex CommonSanitizerReportMutex;
    216 
    217 // Lock sanitizer error reporting and protects against nested errors.
    218 class ScopedErrorReportLock {
    219  public:
    220   ScopedErrorReportLock();
    221   ~ScopedErrorReportLock();
    222 
    223   static void CheckLocked();
    224 };
    225 
    226 extern uptr stoptheworld_tracer_pid;
    227 extern uptr stoptheworld_tracer_ppid;
    228 
    229 // Opens the file 'file_name" and reads up to 'max_len' bytes.
    230 // The resulting buffer is mmaped and stored in '*buff'.
    231 // The size of the mmaped region is stored in '*buff_size'.
    232 // The total number of read bytes is stored in '*read_len'.
    233 // Returns true if file was successfully opened and read.
    234 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
    235                       uptr *read_len, uptr max_len = 1 << 26,
    236                       error_t *errno_p = nullptr);
    237 
    238 bool IsAccessibleMemoryRange(uptr beg, uptr size);
    239 
    240 // Error report formatting.
    241 const char *StripPathPrefix(const char *filepath,
    242                             const char *strip_file_prefix);
    243 // Strip the directories from the module name.
    244 const char *StripModuleName(const char *module);
    245 
    246 // OS
    247 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
    248 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
    249 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
    250 const char *GetProcessName();
    251 void UpdateProcessName();
    252 void CacheBinaryName();
    253 void DisableCoreDumperIfNecessary();
    254 void DumpProcessMap();
    255 void PrintModuleMap();
    256 const char *GetEnv(const char *name);
    257 bool SetEnv(const char *name, const char *value);
    258 
    259 u32 GetUid();
    260 void ReExec();
    261 char **GetArgv();
    262 void PrintCmdline();
    263 bool StackSizeIsUnlimited();
    264 uptr GetStackSizeLimitInBytes();
    265 void SetStackSizeLimitInBytes(uptr limit);
    266 bool AddressSpaceIsUnlimited();
    267 void SetAddressSpaceUnlimited();
    268 void AdjustStackSize(void *attr);
    269 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
    270 void SetSandboxingCallback(void (*f)());
    271 
    272 void InitializeCoverage(bool enabled, const char *coverage_dir);
    273 
    274 void InitTlsSize();
    275 uptr GetTlsSize();
    276 
    277 // Other
    278 void SleepForSeconds(int seconds);
    279 void SleepForMillis(int millis);
    280 u64 NanoTime();
    281 int Atexit(void (*function)(void));
    282 void SortArray(uptr *array, uptr size);
    283 void SortArray(u32 *array, uptr size);
    284 bool TemplateMatch(const char *templ, const char *str);
    285 
    286 // Exit
    287 void NORETURN Abort();
    288 void NORETURN Die();
    289 void NORETURN
    290 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
    291 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
    292                                       const char *mmap_type, error_t err,
    293                                       bool raw_report = false);
    294 
    295 // Set the name of the current thread to 'name', return true on succees.
    296 // The name may be truncated to a system-dependent limit.
    297 bool SanitizerSetThreadName(const char *name);
    298 // Get the name of the current thread (no more than max_len bytes),
    299 // return true on succees. name should have space for at least max_len+1 bytes.
    300 bool SanitizerGetThreadName(char *name, int max_len);
    301 
    302 // Specific tools may override behavior of "Die" and "CheckFailed" functions
    303 // to do tool-specific job.
    304 typedef void (*DieCallbackType)(void);
    305 
    306 // It's possible to add several callbacks that would be run when "Die" is
    307 // called. The callbacks will be run in the opposite order. The tools are
    308 // strongly recommended to setup all callbacks during initialization, when there
    309 // is only a single thread.
    310 bool AddDieCallback(DieCallbackType callback);
    311 bool RemoveDieCallback(DieCallbackType callback);
    312 
    313 void SetUserDieCallback(DieCallbackType callback);
    314 
    315 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
    316                                        u64, u64);
    317 void SetCheckFailedCallback(CheckFailedCallbackType callback);
    318 
    319 // Callback will be called if soft_rss_limit_mb is given and the limit is
    320 // exceeded (exceeded==true) or if rss went down below the limit
    321 // (exceeded==false).
    322 // The callback should be registered once at the tool init time.
    323 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
    324 
    325 // Functions related to signal handling.
    326 typedef void (*SignalHandlerType)(int, void *, void *);
    327 HandleSignalMode GetHandleSignalMode(int signum);
    328 void InstallDeadlySignalHandlers(SignalHandlerType handler);
    329 
    330 // Signal reporting.
    331 // Each sanitizer uses slightly different implementation of stack unwinding.
    332 typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig,
    333                                               const void *callback_context,
    334                                               BufferedStackTrace *stack);
    335 // Print deadly signal report and die.
    336 void HandleDeadlySignal(void *siginfo, void *context, u32 tid,
    337                         UnwindSignalStackCallbackType unwind,
    338                         const void *unwind_context);
    339 
    340 // Part of HandleDeadlySignal, exposed for asan.
    341 void StartReportDeadlySignal();
    342 // Part of HandleDeadlySignal, exposed for asan.
    343 void ReportDeadlySignal(const SignalContext &sig, u32 tid,
    344                         UnwindSignalStackCallbackType unwind,
    345                         const void *unwind_context);
    346 
    347 // Alternative signal stack (POSIX-only).
    348 void SetAlternateSignalStack();
    349 void UnsetAlternateSignalStack();
    350 
    351 // We don't want a summary too long.
    352 const int kMaxSummaryLength = 1024;
    353 // Construct a one-line string:
    354 //   SUMMARY: SanitizerToolName: error_message
    355 // and pass it to __sanitizer_report_error_summary.
    356 // If alt_tool_name is provided, it's used in place of SanitizerToolName.
    357 void ReportErrorSummary(const char *error_message,
    358                         const char *alt_tool_name = nullptr);
    359 // Same as above, but construct error_message as:
    360 //   error_type file:line[:column][ function]
    361 void ReportErrorSummary(const char *error_type, const AddressInfo &info,
    362                         const char *alt_tool_name = nullptr);
    363 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
    364 void ReportErrorSummary(const char *error_type, const StackTrace *trace,
    365                         const char *alt_tool_name = nullptr);
    366 
    367 // Math
    368 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
    369 extern "C" {
    370 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);  // NOLINT
    371 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);  // NOLINT
    372 #if defined(_WIN64)
    373 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);  // NOLINT
    374 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);  // NOLINT
    375 #endif
    376 }
    377 #endif
    378 
    379 INLINE uptr MostSignificantSetBitIndex(uptr x) {
    380   CHECK_NE(x, 0U);
    381   unsigned long up;  // NOLINT
    382 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
    383 # ifdef _WIN64
    384   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
    385 # else
    386   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
    387 # endif
    388 #elif defined(_WIN64)
    389   _BitScanReverse64(&up, x);
    390 #else
    391   _BitScanReverse(&up, x);
    392 #endif
    393   return up;
    394 }
    395 
    396 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
    397   CHECK_NE(x, 0U);
    398   unsigned long up;  // NOLINT
    399 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
    400 # ifdef _WIN64
    401   up = __builtin_ctzll(x);
    402 # else
    403   up = __builtin_ctzl(x);
    404 # endif
    405 #elif defined(_WIN64)
    406   _BitScanForward64(&up, x);
    407 #else
    408   _BitScanForward(&up, x);
    409 #endif
    410   return up;
    411 }
    412 
    413 INLINE bool IsPowerOfTwo(uptr x) {
    414   return (x & (x - 1)) == 0;
    415 }
    416 
    417 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
    418   CHECK(size);
    419   if (IsPowerOfTwo(size)) return size;
    420 
    421   uptr up = MostSignificantSetBitIndex(size);
    422   CHECK_LT(size, (1ULL << (up + 1)));
    423   CHECK_GT(size, (1ULL << up));
    424   return 1ULL << (up + 1);
    425 }
    426 
    427 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
    428   RAW_CHECK(IsPowerOfTwo(boundary));
    429   return (size + boundary - 1) & ~(boundary - 1);
    430 }
    431 
    432 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
    433   return x & ~(boundary - 1);
    434 }
    435 
    436 INLINE bool IsAligned(uptr a, uptr alignment) {
    437   return (a & (alignment - 1)) == 0;
    438 }
    439 
    440 INLINE uptr Log2(uptr x) {
    441   CHECK(IsPowerOfTwo(x));
    442   return LeastSignificantSetBitIndex(x);
    443 }
    444 
    445 // Don't use std::min, std::max or std::swap, to minimize dependency
    446 // on libstdc++.
    447 template<class T> T Min(T a, T b) { return a < b ? a : b; }
    448 template<class T> T Max(T a, T b) { return a > b ? a : b; }
    449 template<class T> void Swap(T& a, T& b) {
    450   T tmp = a;
    451   a = b;
    452   b = tmp;
    453 }
    454 
    455 // Char handling
    456 INLINE bool IsSpace(int c) {
    457   return (c == ' ') || (c == '\n') || (c == '\t') ||
    458          (c == '\f') || (c == '\r') || (c == '\v');
    459 }
    460 INLINE bool IsDigit(int c) {
    461   return (c >= '0') && (c <= '9');
    462 }
    463 INLINE int ToLower(int c) {
    464   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
    465 }
    466 
    467 // A low-level vector based on mmap. May incur a significant memory overhead for
    468 // small vectors.
    469 // WARNING: The current implementation supports only POD types.
    470 template<typename T>
    471 class InternalMmapVectorNoCtor {
    472  public:
    473   void Initialize(uptr initial_capacity) {
    474     capacity_ = Max(initial_capacity, (uptr)1);
    475     size_ = 0;
    476     data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
    477   }
    478   void Destroy() {
    479     UnmapOrDie(data_, capacity_ * sizeof(T));
    480   }
    481   T &operator[](uptr i) {
    482     CHECK_LT(i, size_);
    483     return data_[i];
    484   }
    485   const T &operator[](uptr i) const {
    486     CHECK_LT(i, size_);
    487     return data_[i];
    488   }
    489   void push_back(const T &element) {
    490     CHECK_LE(size_, capacity_);
    491     if (size_ == capacity_) {
    492       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
    493       Resize(new_capacity);
    494     }
    495     internal_memcpy(&data_[size_++], &element, sizeof(T));
    496   }
    497   T &back() {
    498     CHECK_GT(size_, 0);
    499     return data_[size_ - 1];
    500   }
    501   void pop_back() {
    502     CHECK_GT(size_, 0);
    503     size_--;
    504   }
    505   uptr size() const {
    506     return size_;
    507   }
    508   const T *data() const {
    509     return data_;
    510   }
    511   T *data() {
    512     return data_;
    513   }
    514   uptr capacity() const {
    515     return capacity_;
    516   }
    517   void resize(uptr new_size) {
    518     Resize(new_size);
    519     if (new_size > size_) {
    520       internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_));
    521     }
    522     size_ = new_size;
    523   }
    524 
    525   void clear() { size_ = 0; }
    526   bool empty() const { return size() == 0; }
    527 
    528   const T *begin() const {
    529     return data();
    530   }
    531   T *begin() {
    532     return data();
    533   }
    534   const T *end() const {
    535     return data() + size();
    536   }
    537   T *end() {
    538     return data() + size();
    539   }
    540 
    541  private:
    542   void Resize(uptr new_capacity) {
    543     CHECK_GT(new_capacity, 0);
    544     CHECK_LE(size_, new_capacity);
    545     T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
    546                                  "InternalMmapVector");
    547     internal_memcpy(new_data, data_, size_ * sizeof(T));
    548     T *old_data = data_;
    549     data_ = new_data;
    550     UnmapOrDie(old_data, capacity_ * sizeof(T));
    551     capacity_ = new_capacity;
    552   }
    553 
    554   T *data_;
    555   uptr capacity_;
    556   uptr size_;
    557 };
    558 
    559 template<typename T>
    560 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
    561  public:
    562   explicit InternalMmapVector(uptr initial_capacity) {
    563     InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
    564   }
    565   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
    566   // Disallow evil constructors.
    567   InternalMmapVector(const InternalMmapVector&);
    568   void operator=(const InternalMmapVector&);
    569 };
    570 
    571 // HeapSort for arrays and InternalMmapVector.
    572 template<class Container, class Compare>
    573 void InternalSort(Container *v, uptr size, Compare comp) {
    574   if (size < 2)
    575     return;
    576   // Stage 1: insert elements to the heap.
    577   for (uptr i = 1; i < size; i++) {
    578     uptr j, p;
    579     for (j = i; j > 0; j = p) {
    580       p = (j - 1) / 2;
    581       if (comp((*v)[p], (*v)[j]))
    582         Swap((*v)[j], (*v)[p]);
    583       else
    584         break;
    585     }
    586   }
    587   // Stage 2: swap largest element with the last one,
    588   // and sink the new top.
    589   for (uptr i = size - 1; i > 0; i--) {
    590     Swap((*v)[0], (*v)[i]);
    591     uptr j, max_ind;
    592     for (j = 0; j < i; j = max_ind) {
    593       uptr left = 2 * j + 1;
    594       uptr right = 2 * j + 2;
    595       max_ind = j;
    596       if (left < i && comp((*v)[max_ind], (*v)[left]))
    597         max_ind = left;
    598       if (right < i && comp((*v)[max_ind], (*v)[right]))
    599         max_ind = right;
    600       if (max_ind != j)
    601         Swap((*v)[j], (*v)[max_ind]);
    602       else
    603         break;
    604     }
    605   }
    606 }
    607 
    608 // Works like std::lower_bound: finds the first element that is not less
    609 // than the val.
    610 template <class Container, class Value, class Compare>
    611 uptr InternalLowerBound(const Container &v, uptr first, uptr last,
    612                         const Value &val, Compare comp) {
    613   while (last > first) {
    614     uptr mid = (first + last) / 2;
    615     if (comp(v[mid], val))
    616       first = mid + 1;
    617     else
    618       last = mid;
    619   }
    620   return first;
    621 }
    622 
    623 enum ModuleArch {
    624   kModuleArchUnknown,
    625   kModuleArchI386,
    626   kModuleArchX86_64,
    627   kModuleArchX86_64H,
    628   kModuleArchARMV6,
    629   kModuleArchARMV7,
    630   kModuleArchARMV7S,
    631   kModuleArchARMV7K,
    632   kModuleArchARM64
    633 };
    634 
    635 // When adding a new architecture, don't forget to also update
    636 // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cc.
    637 inline const char *ModuleArchToString(ModuleArch arch) {
    638   switch (arch) {
    639     case kModuleArchUnknown:
    640       return "";
    641     case kModuleArchI386:
    642       return "i386";
    643     case kModuleArchX86_64:
    644       return "x86_64";
    645     case kModuleArchX86_64H:
    646       return "x86_64h";
    647     case kModuleArchARMV6:
    648       return "armv6";
    649     case kModuleArchARMV7:
    650       return "armv7";
    651     case kModuleArchARMV7S:
    652       return "armv7s";
    653     case kModuleArchARMV7K:
    654       return "armv7k";
    655     case kModuleArchARM64:
    656       return "arm64";
    657   }
    658   CHECK(0 && "Invalid module arch");
    659   return "";
    660 }
    661 
    662 const uptr kModuleUUIDSize = 16;
    663 const uptr kMaxSegName = 16;
    664 
    665 // Represents a binary loaded into virtual memory (e.g. this can be an
    666 // executable or a shared object).
    667 class LoadedModule {
    668  public:
    669   LoadedModule()
    670       : full_name_(nullptr),
    671         base_address_(0),
    672         max_executable_address_(0),
    673         arch_(kModuleArchUnknown),
    674         instrumented_(false) {
    675     internal_memset(uuid_, 0, kModuleUUIDSize);
    676     ranges_.clear();
    677   }
    678   void set(const char *module_name, uptr base_address);
    679   void set(const char *module_name, uptr base_address, ModuleArch arch,
    680            u8 uuid[kModuleUUIDSize], bool instrumented);
    681   void clear();
    682   void addAddressRange(uptr beg, uptr end, bool executable, bool writable,
    683                        const char *name = nullptr);
    684   bool containsAddress(uptr address) const;
    685 
    686   const char *full_name() const { return full_name_; }
    687   uptr base_address() const { return base_address_; }
    688   uptr max_executable_address() const { return max_executable_address_; }
    689   ModuleArch arch() const { return arch_; }
    690   const u8 *uuid() const { return uuid_; }
    691   bool instrumented() const { return instrumented_; }
    692 
    693   struct AddressRange {
    694     AddressRange *next;
    695     uptr beg;
    696     uptr end;
    697     bool executable;
    698     bool writable;
    699     char name[kMaxSegName];
    700 
    701     AddressRange(uptr beg, uptr end, bool executable, bool writable,
    702                  const char *name)
    703         : next(nullptr),
    704           beg(beg),
    705           end(end),
    706           executable(executable),
    707           writable(writable) {
    708       internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name));
    709     }
    710   };
    711 
    712   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
    713 
    714  private:
    715   char *full_name_;  // Owned.
    716   uptr base_address_;
    717   uptr max_executable_address_;
    718   ModuleArch arch_;
    719   u8 uuid_[kModuleUUIDSize];
    720   bool instrumented_;
    721   IntrusiveList<AddressRange> ranges_;
    722 };
    723 
    724 // List of LoadedModules. OS-dependent implementation is responsible for
    725 // filling this information.
    726 class ListOfModules {
    727  public:
    728   ListOfModules() : initialized(false) {}
    729   ~ListOfModules() { clear(); }
    730   void init();
    731   void fallbackInit();  // Uses fallback init if available, otherwise clears
    732   const LoadedModule *begin() const { return modules_.begin(); }
    733   LoadedModule *begin() { return modules_.begin(); }
    734   const LoadedModule *end() const { return modules_.end(); }
    735   LoadedModule *end() { return modules_.end(); }
    736   uptr size() const { return modules_.size(); }
    737   const LoadedModule &operator[](uptr i) const {
    738     CHECK_LT(i, modules_.size());
    739     return modules_[i];
    740   }
    741 
    742  private:
    743   void clear() {
    744     for (auto &module : modules_) module.clear();
    745     modules_.clear();
    746   }
    747   void clearOrInit() {
    748     initialized ? clear() : modules_.Initialize(kInitialCapacity);
    749     initialized = true;
    750   }
    751 
    752   InternalMmapVectorNoCtor<LoadedModule> modules_;
    753   // We rarely have more than 16K loaded modules.
    754   static const uptr kInitialCapacity = 1 << 14;
    755   bool initialized;
    756 };
    757 
    758 // Callback type for iterating over a set of memory ranges.
    759 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
    760 
    761 enum AndroidApiLevel {
    762   ANDROID_NOT_ANDROID = 0,
    763   ANDROID_KITKAT = 19,
    764   ANDROID_LOLLIPOP_MR1 = 22,
    765   ANDROID_POST_LOLLIPOP = 23
    766 };
    767 
    768 void WriteToSyslog(const char *buffer);
    769 
    770 #if SANITIZER_MAC
    771 void LogFullErrorReport(const char *buffer);
    772 #else
    773 INLINE void LogFullErrorReport(const char *buffer) {}
    774 #endif
    775 
    776 #if SANITIZER_LINUX || SANITIZER_MAC
    777 void WriteOneLineToSyslog(const char *s);
    778 void LogMessageOnPrintf(const char *str);
    779 #else
    780 INLINE void WriteOneLineToSyslog(const char *s) {}
    781 INLINE void LogMessageOnPrintf(const char *str) {}
    782 #endif
    783 
    784 #if SANITIZER_LINUX
    785 // Initialize Android logging. Any writes before this are silently lost.
    786 void AndroidLogInit();
    787 void SetAbortMessage(const char *);
    788 #else
    789 INLINE void AndroidLogInit() {}
    790 // FIXME: MacOS implementation could use CRSetCrashLogMessage.
    791 INLINE void SetAbortMessage(const char *) {}
    792 #endif
    793 
    794 #if SANITIZER_ANDROID
    795 void SanitizerInitializeUnwinder();
    796 AndroidApiLevel AndroidGetApiLevel();
    797 #else
    798 INLINE void AndroidLogWrite(const char *buffer_unused) {}
    799 INLINE void SanitizerInitializeUnwinder() {}
    800 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
    801 #endif
    802 
    803 INLINE uptr GetPthreadDestructorIterations() {
    804 #if SANITIZER_ANDROID
    805   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
    806 #elif SANITIZER_POSIX
    807   return 4;
    808 #else
    809 // Unused on Windows.
    810   return 0;
    811 #endif
    812 }
    813 
    814 void *internal_start_thread(void(*func)(void*), void *arg);
    815 void internal_join_thread(void *th);
    816 void MaybeStartBackgroudThread();
    817 
    818 // Make the compiler think that something is going on there.
    819 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
    820 // compiler from recognising it and turning it into an actual call to
    821 // memset/memcpy/etc.
    822 static inline void SanitizerBreakOptimization(void *arg) {
    823 #if defined(_MSC_VER) && !defined(__clang__)
    824   _ReadWriteBarrier();
    825 #else
    826   __asm__ __volatile__("" : : "r" (arg) : "memory");
    827 #endif
    828 }
    829 
    830 struct SignalContext {
    831   void *siginfo;
    832   void *context;
    833   uptr addr;
    834   uptr pc;
    835   uptr sp;
    836   uptr bp;
    837   bool is_memory_access;
    838   enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
    839 
    840   // VS2013 doesn't implement unrestricted unions, so we need a trivial default
    841   // constructor
    842   SignalContext() = default;
    843 
    844   // Creates signal context in a platform-specific manner.
    845   // SignalContext is going to keep pointers to siginfo and context without
    846   // owning them.
    847   SignalContext(void *siginfo, void *context)
    848       : siginfo(siginfo),
    849         context(context),
    850         addr(GetAddress()),
    851         is_memory_access(IsMemoryAccess()),
    852         write_flag(GetWriteFlag()) {
    853     InitPcSpBp();
    854   }
    855 
    856   static void DumpAllRegisters(void *context);
    857 
    858   // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION.
    859   int GetType() const;
    860 
    861   // String description of the signal.
    862   const char *Describe() const;
    863 
    864   // Returns true if signal is stack overflow.
    865   bool IsStackOverflow() const;
    866 
    867  private:
    868   // Platform specific initialization.
    869   void InitPcSpBp();
    870   uptr GetAddress() const;
    871   WriteFlag GetWriteFlag() const;
    872   bool IsMemoryAccess() const;
    873 };
    874 
    875 void MaybeReexec();
    876 
    877 template <typename Fn>
    878 class RunOnDestruction {
    879  public:
    880   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
    881   ~RunOnDestruction() { fn_(); }
    882 
    883  private:
    884   Fn fn_;
    885 };
    886 
    887 // A simple scope guard. Usage:
    888 // auto cleanup = at_scope_exit([]{ do_cleanup; });
    889 template <typename Fn>
    890 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
    891   return RunOnDestruction<Fn>(fn);
    892 }
    893 
    894 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
    895 // if a process uses virtual memory over 4TB (as many sanitizers like
    896 // to do).  This function will abort the process if running on a kernel
    897 // that looks vulnerable.
    898 #if SANITIZER_LINUX && SANITIZER_S390_64
    899 void AvoidCVE_2016_2143();
    900 #else
    901 INLINE void AvoidCVE_2016_2143() {}
    902 #endif
    903 
    904 struct StackDepotStats {
    905   uptr n_uniq_ids;
    906   uptr allocated;
    907 };
    908 
    909 // The default value for allocator_release_to_os_interval_ms common flag to
    910 // indicate that sanitizer allocator should not attempt to release memory to OS.
    911 const s32 kReleaseToOSIntervalNever = -1;
    912 
    913 void CheckNoDeepBind(const char *filename, int flag);
    914 
    915 // Returns the requested amount of random data (up to 256 bytes) that can then
    916 // be used to seed a PRNG. Defaults to blocking like the underlying syscall.
    917 bool GetRandom(void *buffer, uptr length, bool blocking = true);
    918 
    919 }  // namespace __sanitizer
    920 
    921 inline void *operator new(__sanitizer::operator_new_size_type size,
    922                           __sanitizer::LowLevelAllocator &alloc) {
    923   return alloc.Allocate(size);
    924 }
    925 
    926 #endif  // SANITIZER_COMMON_H
    927