Home | History | Annotate | Line # | Download | only in sanitizer_common
sanitizer_common.h revision 1.6
      1 //===-- sanitizer_common.h --------------------------------------*- C++ -*-===//
      2 //
      3 // This file is distributed under the University of Illinois Open Source
      4 // License. See LICENSE.TXT for details.
      5 //
      6 //===----------------------------------------------------------------------===//
      7 //
      8 // This file is shared between run-time libraries of sanitizers.
      9 //
     10 // It declares common functions and classes that are used in both runtimes.
     11 // Implementation of some functions are provided in sanitizer_common, while
     12 // others must be defined by run-time library itself.
     13 //===----------------------------------------------------------------------===//
     14 #ifndef SANITIZER_COMMON_H
     15 #define SANITIZER_COMMON_H
     16 
     17 #include "sanitizer_flags.h"
     18 #include "sanitizer_interface_internal.h"
     19 #include "sanitizer_internal_defs.h"
     20 #include "sanitizer_libc.h"
     21 #include "sanitizer_list.h"
     22 #include "sanitizer_mutex.h"
     23 
     24 #if defined(_MSC_VER) && !defined(__clang__)
     25 extern "C" void _ReadWriteBarrier();
     26 #pragma intrinsic(_ReadWriteBarrier)
     27 #endif
     28 
     29 namespace __sanitizer {
     30 struct StackTrace;
     31 struct AddressInfo;
     32 
     33 // Constants.
     34 const uptr kWordSize = SANITIZER_WORDSIZE / 8;
     35 const uptr kWordSizeInBits = 8 * kWordSize;
     36 
     37 #if defined(__powerpc__) || defined(__powerpc64__)
     38   const uptr kCacheLineSize = 128;
     39 #else
     40   const uptr kCacheLineSize = 64;
     41 #endif
     42 
     43 const uptr kMaxPathLength = 4096;
     44 
     45 const uptr kMaxThreadStackSize = 1 << 30;  // 1Gb
     46 
     47 static const uptr kErrorMessageBufferSize = 1 << 16;
     48 
     49 // Denotes fake PC values that come from JIT/JAVA/etc.
     50 // For such PC values __tsan_symbolize_external() will be called.
     51 const u64 kExternalPCBit = 1ULL << 60;
     52 
     53 extern const char *SanitizerToolName;  // Can be changed by the tool.
     54 
     55 extern atomic_uint32_t current_verbosity;
     56 INLINE void SetVerbosity(int verbosity) {
     57   atomic_store(&current_verbosity, verbosity, memory_order_relaxed);
     58 }
     59 INLINE int Verbosity() {
     60   return atomic_load(&current_verbosity, memory_order_relaxed);
     61 }
     62 
     63 uptr GetPageSize();
     64 extern uptr PageSizeCached;
     65 INLINE uptr GetPageSizeCached() {
     66   if (!PageSizeCached)
     67     PageSizeCached = GetPageSize();
     68   return PageSizeCached;
     69 }
     70 uptr GetMmapGranularity();
     71 uptr GetMaxVirtualAddress();
     72 // Threads
     73 uptr GetTid();
     74 uptr GetThreadSelf();
     75 void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top,
     76                                 uptr *stack_bottom);
     77 void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size,
     78                           uptr *tls_addr, uptr *tls_size);
     79 
     80 // Memory management
     81 void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false);
     82 INLINE void *MmapOrDieQuietly(uptr size, const char *mem_type) {
     83   return MmapOrDie(size, mem_type, /*raw_report*/ true);
     84 }
     85 void UnmapOrDie(void *addr, uptr size);
     86 void *MmapFixedNoReserve(uptr fixed_addr, uptr size,
     87                          const char *name = nullptr);
     88 void *MmapNoReserveOrDie(uptr size, const char *mem_type);
     89 void *MmapFixedOrDie(uptr fixed_addr, uptr size);
     90 void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr);
     91 void *MmapNoAccess(uptr size);
     92 // Map aligned chunk of address space; size and alignment are powers of two.
     93 void *MmapAlignedOrDie(uptr size, uptr alignment, const char *mem_type);
     94 // Disallow access to a memory range.  Use MmapFixedNoAccess to allocate an
     95 // unaccessible memory.
     96 bool MprotectNoAccess(uptr addr, uptr size);
     97 bool MprotectReadOnly(uptr addr, uptr size);
     98 
     99 // Find an available address space.
    100 uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding);
    101 
    102 // Used to check if we can map shadow memory to a fixed location.
    103 bool MemoryRangeIsAvailable(uptr range_start, uptr range_end);
    104 void ReleaseMemoryToOS(uptr addr, uptr size);
    105 void IncreaseTotalMmap(uptr size);
    106 void DecreaseTotalMmap(uptr size);
    107 uptr GetRSS();
    108 void NoHugePagesInRegion(uptr addr, uptr length);
    109 void DontDumpShadowMemory(uptr addr, uptr length);
    110 // Check if the built VMA size matches the runtime one.
    111 void CheckVMASize();
    112 void RunMallocHooks(const void *ptr, uptr size);
    113 void RunFreeHooks(const void *ptr);
    114 
    115 // InternalScopedBuffer can be used instead of large stack arrays to
    116 // keep frame size low.
    117 // FIXME: use InternalAlloc instead of MmapOrDie once
    118 // InternalAlloc is made libc-free.
    119 template <typename T>
    120 class InternalScopedBuffer {
    121  public:
    122   explicit InternalScopedBuffer(uptr cnt) {
    123     cnt_ = cnt;
    124     ptr_ = (T *)MmapOrDie(cnt * sizeof(T), "InternalScopedBuffer");
    125   }
    126   ~InternalScopedBuffer() { UnmapOrDie(ptr_, cnt_ * sizeof(T)); }
    127   T &operator[](uptr i) { return ptr_[i]; }
    128   T *data() { return ptr_; }
    129   uptr size() { return cnt_ * sizeof(T); }
    130 
    131  private:
    132   T *ptr_;
    133   uptr cnt_;
    134   // Disallow copies and moves.
    135   InternalScopedBuffer(const InternalScopedBuffer &) = delete;
    136   InternalScopedBuffer &operator=(const InternalScopedBuffer &) = delete;
    137   InternalScopedBuffer(InternalScopedBuffer &&) = delete;
    138   InternalScopedBuffer &operator=(InternalScopedBuffer &&) = delete;
    139 };
    140 
    141 class InternalScopedString : public InternalScopedBuffer<char> {
    142  public:
    143   explicit InternalScopedString(uptr max_length)
    144       : InternalScopedBuffer<char>(max_length), length_(0) {
    145     (*this)[0] = '\0';
    146   }
    147   uptr length() { return length_; }
    148   void clear() {
    149     (*this)[0] = '\0';
    150     length_ = 0;
    151   }
    152   void append(const char *format, ...);
    153 
    154  private:
    155   uptr length_;
    156 };
    157 
    158 // Simple low-level (mmap-based) allocator for internal use. Doesn't have
    159 // constructor, so all instances of LowLevelAllocator should be
    160 // linker initialized.
    161 class LowLevelAllocator {
    162  public:
    163   // Requires an external lock.
    164   void *Allocate(uptr size);
    165  private:
    166   char *allocated_end_;
    167   char *allocated_current_;
    168 };
    169 typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size);
    170 // Allows to register tool-specific callbacks for LowLevelAllocator.
    171 // Passing NULL removes the callback.
    172 void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback);
    173 
    174 // IO
    175 void RawWrite(const char *buffer);
    176 bool ColorizeReports();
    177 void RemoveANSIEscapeSequencesFromString(char *buffer);
    178 void Printf(const char *format, ...);
    179 void Report(const char *format, ...);
    180 void SetPrintfAndReportCallback(void (*callback)(const char *));
    181 #define VReport(level, ...)                                              \
    182   do {                                                                   \
    183     if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \
    184   } while (0)
    185 #define VPrintf(level, ...)                                              \
    186   do {                                                                   \
    187     if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \
    188   } while (0)
    189 
    190 // Can be used to prevent mixing error reports from different sanitizers.
    191 extern StaticSpinMutex CommonSanitizerReportMutex;
    192 
    193 struct ReportFile {
    194   void Write(const char *buffer, uptr length);
    195   bool SupportsColors();
    196   void SetReportPath(const char *path);
    197 
    198   // Don't use fields directly. They are only declared public to allow
    199   // aggregate initialization.
    200 
    201   // Protects fields below.
    202   StaticSpinMutex *mu;
    203   // Opened file descriptor. Defaults to stderr. It may be equal to
    204   // kInvalidFd, in which case new file will be opened when necessary.
    205   fd_t fd;
    206   // Path prefix of report file, set via __sanitizer_set_report_path.
    207   char path_prefix[kMaxPathLength];
    208   // Full path to report, obtained as <path_prefix>.PID
    209   char full_path[kMaxPathLength];
    210   // PID of the process that opened fd. If a fork() occurs,
    211   // the PID of child will be different from fd_pid.
    212   uptr fd_pid;
    213 
    214  private:
    215   void ReopenIfNecessary();
    216 };
    217 extern ReportFile report_file;
    218 
    219 extern uptr stoptheworld_tracer_pid;
    220 extern uptr stoptheworld_tracer_ppid;
    221 
    222 enum FileAccessMode {
    223   RdOnly,
    224   WrOnly,
    225   RdWr
    226 };
    227 
    228 // Returns kInvalidFd on error.
    229 fd_t OpenFile(const char *filename, FileAccessMode mode,
    230               error_t *errno_p = nullptr);
    231 void CloseFile(fd_t);
    232 
    233 // Return true on success, false on error.
    234 bool ReadFromFile(fd_t fd, void *buff, uptr buff_size,
    235                   uptr *bytes_read = nullptr, error_t *error_p = nullptr);
    236 bool WriteToFile(fd_t fd, const void *buff, uptr buff_size,
    237                  uptr *bytes_written = nullptr, error_t *error_p = nullptr);
    238 
    239 bool RenameFile(const char *oldpath, const char *newpath,
    240                 error_t *error_p = nullptr);
    241 
    242 // Scoped file handle closer.
    243 struct FileCloser {
    244   explicit FileCloser(fd_t fd) : fd(fd) {}
    245   ~FileCloser() { CloseFile(fd); }
    246   fd_t fd;
    247 };
    248 
    249 bool SupportsColoredOutput(fd_t fd);
    250 
    251 // Opens the file 'file_name" and reads up to 'max_len' bytes.
    252 // The resulting buffer is mmaped and stored in '*buff'.
    253 // The size of the mmaped region is stored in '*buff_size'.
    254 // The total number of read bytes is stored in '*read_len'.
    255 // Returns true if file was successfully opened and read.
    256 bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size,
    257                       uptr *read_len, uptr max_len = 1 << 26,
    258                       error_t *errno_p = nullptr);
    259 // Maps given file to virtual memory, and returns pointer to it
    260 // (or NULL if mapping fails). Stores the size of mmaped region
    261 // in '*buff_size'.
    262 void *MapFileToMemory(const char *file_name, uptr *buff_size);
    263 void *MapWritableFileToMemory(void *addr, uptr size, fd_t fd, OFF_T offset);
    264 
    265 bool IsAccessibleMemoryRange(uptr beg, uptr size);
    266 
    267 // Error report formatting.
    268 const char *StripPathPrefix(const char *filepath,
    269                             const char *strip_file_prefix);
    270 // Strip the directories from the module name.
    271 const char *StripModuleName(const char *module);
    272 
    273 // OS
    274 uptr ReadBinaryName(/*out*/char *buf, uptr buf_len);
    275 uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len);
    276 uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len);
    277 const char *GetProcessName();
    278 void UpdateProcessName();
    279 void CacheBinaryName();
    280 void DisableCoreDumperIfNecessary();
    281 void DumpProcessMap();
    282 bool FileExists(const char *filename);
    283 const char *GetEnv(const char *name);
    284 bool SetEnv(const char *name, const char *value);
    285 const char *GetPwd();
    286 char *FindPathToBinary(const char *name);
    287 bool IsPathSeparator(const char c);
    288 bool IsAbsolutePath(const char *path);
    289 // Starts a subprocess and returs its pid.
    290 // If *_fd parameters are not kInvalidFd their corresponding input/output
    291 // streams will be redirect to the file. The files will always be closed
    292 // in parent process even in case of an error.
    293 // The child process will close all fds after STDERR_FILENO
    294 // before passing control to a program.
    295 pid_t StartSubprocess(const char *filename, const char *const argv[],
    296                       fd_t stdin_fd = kInvalidFd, fd_t stdout_fd = kInvalidFd,
    297                       fd_t stderr_fd = kInvalidFd);
    298 // Checks if specified process is still running
    299 bool IsProcessRunning(pid_t pid);
    300 // Waits for the process to finish and returns its exit code.
    301 // Returns -1 in case of an error.
    302 int WaitForProcess(pid_t pid);
    303 
    304 u32 GetUid();
    305 void ReExec();
    306 char **GetArgv();
    307 void PrintCmdline();
    308 bool StackSizeIsUnlimited();
    309 uptr GetStackSizeLimitInBytes();
    310 void SetStackSizeLimitInBytes(uptr limit);
    311 bool AddressSpaceIsUnlimited();
    312 void SetAddressSpaceUnlimited();
    313 void AdjustStackSize(void *attr);
    314 void PrepareForSandboxing(__sanitizer_sandbox_arguments *args);
    315 void CovPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
    316 void SetSandboxingCallback(void (*f)());
    317 
    318 void CoverageUpdateMapping();
    319 void CovBeforeFork();
    320 void CovAfterFork(int child_pid);
    321 
    322 void InitializeCoverage(bool enabled, const char *coverage_dir);
    323 void ReInitializeCoverage(bool enabled, const char *coverage_dir);
    324 
    325 void InitTlsSize();
    326 uptr GetTlsSize();
    327 
    328 // Other
    329 void SleepForSeconds(int seconds);
    330 void SleepForMillis(int millis);
    331 u64 NanoTime();
    332 int Atexit(void (*function)(void));
    333 void SortArray(uptr *array, uptr size);
    334 void SortArray(u32 *array, uptr size);
    335 bool TemplateMatch(const char *templ, const char *str);
    336 
    337 // Exit
    338 void NORETURN Abort();
    339 void NORETURN Die();
    340 void NORETURN
    341 CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2);
    342 void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type,
    343                                       const char *mmap_type, error_t err,
    344                                       bool raw_report = false);
    345 
    346 // Set the name of the current thread to 'name', return true on succees.
    347 // The name may be truncated to a system-dependent limit.
    348 bool SanitizerSetThreadName(const char *name);
    349 // Get the name of the current thread (no more than max_len bytes),
    350 // return true on succees. name should have space for at least max_len+1 bytes.
    351 bool SanitizerGetThreadName(char *name, int max_len);
    352 
    353 // Specific tools may override behavior of "Die" and "CheckFailed" functions
    354 // to do tool-specific job.
    355 typedef void (*DieCallbackType)(void);
    356 
    357 // It's possible to add several callbacks that would be run when "Die" is
    358 // called. The callbacks will be run in the opposite order. The tools are
    359 // strongly recommended to setup all callbacks during initialization, when there
    360 // is only a single thread.
    361 bool AddDieCallback(DieCallbackType callback);
    362 bool RemoveDieCallback(DieCallbackType callback);
    363 
    364 void SetUserDieCallback(DieCallbackType callback);
    365 
    366 typedef void (*CheckFailedCallbackType)(const char *, int, const char *,
    367                                        u64, u64);
    368 void SetCheckFailedCallback(CheckFailedCallbackType callback);
    369 
    370 // Callback will be called if soft_rss_limit_mb is given and the limit is
    371 // exceeded (exceeded==true) or if rss went down below the limit
    372 // (exceeded==false).
    373 // The callback should be registered once at the tool init time.
    374 void SetSoftRssLimitExceededCallback(void (*Callback)(bool exceeded));
    375 
    376 // Callback to be called when we want to try releasing unused allocator memory
    377 // back to the OS.
    378 typedef void (*AllocatorReleaseToOSCallback)();
    379 // The callback should be registered once at the tool init time.
    380 void SetAllocatorReleaseToOSCallback(AllocatorReleaseToOSCallback Callback);
    381 
    382 // Functions related to signal handling.
    383 typedef void (*SignalHandlerType)(int, void *, void *);
    384 bool IsHandledDeadlySignal(int signum);
    385 void InstallDeadlySignalHandlers(SignalHandlerType handler);
    386 // Alternative signal stack (POSIX-only).
    387 void SetAlternateSignalStack();
    388 void UnsetAlternateSignalStack();
    389 
    390 // We don't want a summary too long.
    391 const int kMaxSummaryLength = 1024;
    392 // Construct a one-line string:
    393 //   SUMMARY: SanitizerToolName: error_message
    394 // and pass it to __sanitizer_report_error_summary.
    395 void ReportErrorSummary(const char *error_message);
    396 // Same as above, but construct error_message as:
    397 //   error_type file:line[:column][ function]
    398 void ReportErrorSummary(const char *error_type, const AddressInfo &info);
    399 // Same as above, but obtains AddressInfo by symbolizing top stack trace frame.
    400 void ReportErrorSummary(const char *error_type, const StackTrace *trace);
    401 
    402 // Math
    403 #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__)
    404 extern "C" {
    405 unsigned char _BitScanForward(unsigned long *index, unsigned long mask);  // NOLINT
    406 unsigned char _BitScanReverse(unsigned long *index, unsigned long mask);  // NOLINT
    407 #if defined(_WIN64)
    408 unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask);  // NOLINT
    409 unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask);  // NOLINT
    410 #endif
    411 }
    412 #endif
    413 
    414 INLINE uptr MostSignificantSetBitIndex(uptr x) {
    415   CHECK_NE(x, 0U);
    416   unsigned long up;  // NOLINT
    417 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
    418 # ifdef _WIN64
    419   up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x);
    420 # else
    421   up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x);
    422 # endif
    423 #elif defined(_WIN64)
    424   _BitScanReverse64(&up, x);
    425 #else
    426   _BitScanReverse(&up, x);
    427 #endif
    428   return up;
    429 }
    430 
    431 INLINE uptr LeastSignificantSetBitIndex(uptr x) {
    432   CHECK_NE(x, 0U);
    433   unsigned long up;  // NOLINT
    434 #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__)
    435 # ifdef _WIN64
    436   up = __builtin_ctzll(x);
    437 # else
    438   up = __builtin_ctzl(x);
    439 # endif
    440 #elif defined(_WIN64)
    441   _BitScanForward64(&up, x);
    442 #else
    443   _BitScanForward(&up, x);
    444 #endif
    445   return up;
    446 }
    447 
    448 INLINE bool IsPowerOfTwo(uptr x) {
    449   return (x & (x - 1)) == 0;
    450 }
    451 
    452 INLINE uptr RoundUpToPowerOfTwo(uptr size) {
    453   CHECK(size);
    454   if (IsPowerOfTwo(size)) return size;
    455 
    456   uptr up = MostSignificantSetBitIndex(size);
    457   CHECK_LT(size, (1ULL << (up + 1)));
    458   CHECK_GT(size, (1ULL << up));
    459   return 1ULL << (up + 1);
    460 }
    461 
    462 INLINE uptr RoundUpTo(uptr size, uptr boundary) {
    463   RAW_CHECK(IsPowerOfTwo(boundary));
    464   return (size + boundary - 1) & ~(boundary - 1);
    465 }
    466 
    467 INLINE uptr RoundDownTo(uptr x, uptr boundary) {
    468   return x & ~(boundary - 1);
    469 }
    470 
    471 INLINE bool IsAligned(uptr a, uptr alignment) {
    472   return (a & (alignment - 1)) == 0;
    473 }
    474 
    475 INLINE uptr Log2(uptr x) {
    476   CHECK(IsPowerOfTwo(x));
    477   return LeastSignificantSetBitIndex(x);
    478 }
    479 
    480 // Don't use std::min, std::max or std::swap, to minimize dependency
    481 // on libstdc++.
    482 template<class T> T Min(T a, T b) { return a < b ? a : b; }
    483 template<class T> T Max(T a, T b) { return a > b ? a : b; }
    484 template<class T> void Swap(T& a, T& b) {
    485   T tmp = a;
    486   a = b;
    487   b = tmp;
    488 }
    489 
    490 // Char handling
    491 INLINE bool IsSpace(int c) {
    492   return (c == ' ') || (c == '\n') || (c == '\t') ||
    493          (c == '\f') || (c == '\r') || (c == '\v');
    494 }
    495 INLINE bool IsDigit(int c) {
    496   return (c >= '0') && (c <= '9');
    497 }
    498 INLINE int ToLower(int c) {
    499   return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c;
    500 }
    501 
    502 // A low-level vector based on mmap. May incur a significant memory overhead for
    503 // small vectors.
    504 // WARNING: The current implementation supports only POD types.
    505 template<typename T>
    506 class InternalMmapVectorNoCtor {
    507  public:
    508   void Initialize(uptr initial_capacity) {
    509     capacity_ = Max(initial_capacity, (uptr)1);
    510     size_ = 0;
    511     data_ = (T *)MmapOrDie(capacity_ * sizeof(T), "InternalMmapVectorNoCtor");
    512   }
    513   void Destroy() {
    514     UnmapOrDie(data_, capacity_ * sizeof(T));
    515   }
    516   T &operator[](uptr i) {
    517     CHECK_LT(i, size_);
    518     return data_[i];
    519   }
    520   const T &operator[](uptr i) const {
    521     CHECK_LT(i, size_);
    522     return data_[i];
    523   }
    524   void push_back(const T &element) {
    525     CHECK_LE(size_, capacity_);
    526     if (size_ == capacity_) {
    527       uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1);
    528       Resize(new_capacity);
    529     }
    530     internal_memcpy(&data_[size_++], &element, sizeof(T));
    531   }
    532   T &back() {
    533     CHECK_GT(size_, 0);
    534     return data_[size_ - 1];
    535   }
    536   void pop_back() {
    537     CHECK_GT(size_, 0);
    538     size_--;
    539   }
    540   uptr size() const {
    541     return size_;
    542   }
    543   const T *data() const {
    544     return data_;
    545   }
    546   T *data() {
    547     return data_;
    548   }
    549   uptr capacity() const {
    550     return capacity_;
    551   }
    552 
    553   void clear() { size_ = 0; }
    554   bool empty() const { return size() == 0; }
    555 
    556   const T *begin() const {
    557     return data();
    558   }
    559   T *begin() {
    560     return data();
    561   }
    562   const T *end() const {
    563     return data() + size();
    564   }
    565   T *end() {
    566     return data() + size();
    567   }
    568 
    569  private:
    570   void Resize(uptr new_capacity) {
    571     CHECK_GT(new_capacity, 0);
    572     CHECK_LE(size_, new_capacity);
    573     T *new_data = (T *)MmapOrDie(new_capacity * sizeof(T),
    574                                  "InternalMmapVector");
    575     internal_memcpy(new_data, data_, size_ * sizeof(T));
    576     T *old_data = data_;
    577     data_ = new_data;
    578     UnmapOrDie(old_data, capacity_ * sizeof(T));
    579     capacity_ = new_capacity;
    580   }
    581 
    582   T *data_;
    583   uptr capacity_;
    584   uptr size_;
    585 };
    586 
    587 template<typename T>
    588 class InternalMmapVector : public InternalMmapVectorNoCtor<T> {
    589  public:
    590   explicit InternalMmapVector(uptr initial_capacity) {
    591     InternalMmapVectorNoCtor<T>::Initialize(initial_capacity);
    592   }
    593   ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); }
    594   // Disallow evil constructors.
    595   InternalMmapVector(const InternalMmapVector&);
    596   void operator=(const InternalMmapVector&);
    597 };
    598 
    599 // HeapSort for arrays and InternalMmapVector.
    600 template<class Container, class Compare>
    601 void InternalSort(Container *v, uptr size, Compare comp) {
    602   if (size < 2)
    603     return;
    604   // Stage 1: insert elements to the heap.
    605   for (uptr i = 1; i < size; i++) {
    606     uptr j, p;
    607     for (j = i; j > 0; j = p) {
    608       p = (j - 1) / 2;
    609       if (comp((*v)[p], (*v)[j]))
    610         Swap((*v)[j], (*v)[p]);
    611       else
    612         break;
    613     }
    614   }
    615   // Stage 2: swap largest element with the last one,
    616   // and sink the new top.
    617   for (uptr i = size - 1; i > 0; i--) {
    618     Swap((*v)[0], (*v)[i]);
    619     uptr j, max_ind;
    620     for (j = 0; j < i; j = max_ind) {
    621       uptr left = 2 * j + 1;
    622       uptr right = 2 * j + 2;
    623       max_ind = j;
    624       if (left < i && comp((*v)[max_ind], (*v)[left]))
    625         max_ind = left;
    626       if (right < i && comp((*v)[max_ind], (*v)[right]))
    627         max_ind = right;
    628       if (max_ind != j)
    629         Swap((*v)[j], (*v)[max_ind]);
    630       else
    631         break;
    632     }
    633   }
    634 }
    635 
    636 template<class Container, class Value, class Compare>
    637 uptr InternalBinarySearch(const Container &v, uptr first, uptr last,
    638                           const Value &val, Compare comp) {
    639   uptr not_found = last + 1;
    640   while (last >= first) {
    641     uptr mid = (first + last) / 2;
    642     if (comp(v[mid], val))
    643       first = mid + 1;
    644     else if (comp(val, v[mid]))
    645       last = mid - 1;
    646     else
    647       return mid;
    648   }
    649   return not_found;
    650 }
    651 
    652 // Represents a binary loaded into virtual memory (e.g. this can be an
    653 // executable or a shared object).
    654 class LoadedModule {
    655  public:
    656   LoadedModule() : full_name_(nullptr), base_address_(0) { ranges_.clear(); }
    657   void set(const char *module_name, uptr base_address);
    658   void clear();
    659   void addAddressRange(uptr beg, uptr end, bool executable);
    660   bool containsAddress(uptr address) const;
    661 
    662   const char *full_name() const { return full_name_; }
    663   uptr base_address() const { return base_address_; }
    664 
    665   struct AddressRange {
    666     AddressRange *next;
    667     uptr beg;
    668     uptr end;
    669     bool executable;
    670 
    671     AddressRange(uptr beg, uptr end, bool executable)
    672         : next(nullptr), beg(beg), end(end), executable(executable) {}
    673   };
    674 
    675   const IntrusiveList<AddressRange> &ranges() const { return ranges_; }
    676 
    677  private:
    678   char *full_name_;  // Owned.
    679   uptr base_address_;
    680   IntrusiveList<AddressRange> ranges_;
    681 };
    682 
    683 // List of LoadedModules. OS-dependent implementation is responsible for
    684 // filling this information.
    685 class ListOfModules {
    686  public:
    687   ListOfModules() : modules_(kInitialCapacity) {}
    688   ~ListOfModules() { clear(); }
    689   void init();
    690   const LoadedModule *begin() const { return modules_.begin(); }
    691   LoadedModule *begin() { return modules_.begin(); }
    692   const LoadedModule *end() const { return modules_.end(); }
    693   LoadedModule *end() { return modules_.end(); }
    694   uptr size() const { return modules_.size(); }
    695   const LoadedModule &operator[](uptr i) const {
    696     CHECK_LT(i, modules_.size());
    697     return modules_[i];
    698   }
    699 
    700  private:
    701   void clear() {
    702     for (auto &module : modules_) module.clear();
    703     modules_.clear();
    704   }
    705 
    706   InternalMmapVector<LoadedModule> modules_;
    707   // We rarely have more than 16K loaded modules.
    708   static const uptr kInitialCapacity = 1 << 14;
    709 };
    710 
    711 // Callback type for iterating over a set of memory ranges.
    712 typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg);
    713 
    714 enum AndroidApiLevel {
    715   ANDROID_NOT_ANDROID = 0,
    716   ANDROID_KITKAT = 19,
    717   ANDROID_LOLLIPOP_MR1 = 22,
    718   ANDROID_POST_LOLLIPOP = 23
    719 };
    720 
    721 void WriteToSyslog(const char *buffer);
    722 
    723 #if SANITIZER_MAC
    724 void LogFullErrorReport(const char *buffer);
    725 #else
    726 INLINE void LogFullErrorReport(const char *buffer) {}
    727 #endif
    728 
    729 #if SANITIZER_LINUX || SANITIZER_MAC
    730 void WriteOneLineToSyslog(const char *s);
    731 void LogMessageOnPrintf(const char *str);
    732 #else
    733 INLINE void WriteOneLineToSyslog(const char *s) {}
    734 INLINE void LogMessageOnPrintf(const char *str) {}
    735 #endif
    736 
    737 #if SANITIZER_LINUX
    738 // Initialize Android logging. Any writes before this are silently lost.
    739 void AndroidLogInit();
    740 #else
    741 INLINE void AndroidLogInit() {}
    742 #endif
    743 
    744 #if SANITIZER_ANDROID
    745 void SanitizerInitializeUnwinder();
    746 AndroidApiLevel AndroidGetApiLevel();
    747 #else
    748 INLINE void AndroidLogWrite(const char *buffer_unused) {}
    749 INLINE void SanitizerInitializeUnwinder() {}
    750 INLINE AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; }
    751 #endif
    752 
    753 INLINE uptr GetPthreadDestructorIterations() {
    754 #if SANITIZER_ANDROID
    755   return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4;
    756 #elif SANITIZER_POSIX
    757   return 4;
    758 #else
    759 // Unused on Windows.
    760   return 0;
    761 #endif
    762 }
    763 
    764 void *internal_start_thread(void(*func)(void*), void *arg);
    765 void internal_join_thread(void *th);
    766 void MaybeStartBackgroudThread();
    767 
    768 // Make the compiler think that something is going on there.
    769 // Use this inside a loop that looks like memset/memcpy/etc to prevent the
    770 // compiler from recognising it and turning it into an actual call to
    771 // memset/memcpy/etc.
    772 static inline void SanitizerBreakOptimization(void *arg) {
    773 #if defined(_MSC_VER) && !defined(__clang__)
    774   _ReadWriteBarrier();
    775 #else
    776   __asm__ __volatile__("" : : "r" (arg) : "memory");
    777 #endif
    778 }
    779 
    780 struct SignalContext {
    781   void *context;
    782   uptr addr;
    783   uptr pc;
    784   uptr sp;
    785   uptr bp;
    786   bool is_memory_access;
    787 
    788   enum WriteFlag { UNKNOWN, READ, WRITE } write_flag;
    789 
    790   SignalContext(void *context, uptr addr, uptr pc, uptr sp, uptr bp,
    791                 bool is_memory_access, WriteFlag write_flag)
    792       : context(context),
    793         addr(addr),
    794         pc(pc),
    795         sp(sp),
    796         bp(bp),
    797         is_memory_access(is_memory_access),
    798         write_flag(write_flag) {}
    799 
    800   // Creates signal context in a platform-specific manner.
    801   static SignalContext Create(void *siginfo, void *context);
    802 
    803   // Returns true if the "context" indicates a memory write.
    804   static WriteFlag GetWriteFlag(void *context);
    805 };
    806 
    807 void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp);
    808 
    809 void MaybeReexec();
    810 
    811 template <typename Fn>
    812 class RunOnDestruction {
    813  public:
    814   explicit RunOnDestruction(Fn fn) : fn_(fn) {}
    815   ~RunOnDestruction() { fn_(); }
    816 
    817  private:
    818   Fn fn_;
    819 };
    820 
    821 // A simple scope guard. Usage:
    822 // auto cleanup = at_scope_exit([]{ do_cleanup; });
    823 template <typename Fn>
    824 RunOnDestruction<Fn> at_scope_exit(Fn fn) {
    825   return RunOnDestruction<Fn>(fn);
    826 }
    827 
    828 // Linux on 64-bit s390 had a nasty bug that crashes the whole machine
    829 // if a process uses virtual memory over 4TB (as many sanitizers like
    830 // to do).  This function will abort the process if running on a kernel
    831 // that looks vulnerable.
    832 #if SANITIZER_LINUX && SANITIZER_S390_64
    833 void AvoidCVE_2016_2143();
    834 #else
    835 INLINE void AvoidCVE_2016_2143() {}
    836 #endif
    837 
    838 struct StackDepotStats {
    839   uptr n_uniq_ids;
    840   uptr allocated;
    841 };
    842 
    843 }  // namespace __sanitizer
    844 
    845 inline void *operator new(__sanitizer::operator_new_size_type size,
    846                           __sanitizer::LowLevelAllocator &alloc) {
    847   return alloc.Allocate(size);
    848 }
    849 
    850 #endif  // SANITIZER_COMMON_H
    851