1 1.1 mrg //===-- sanitizer_common.h --------------------------------------*- C++ -*-===// 2 1.1 mrg // 3 1.9 mrg // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 1.9 mrg // See https://llvm.org/LICENSE.txt for license information. 5 1.9 mrg // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 1.1 mrg // 7 1.1 mrg //===----------------------------------------------------------------------===// 8 1.1 mrg // 9 1.3 mrg // This file is shared between run-time libraries of sanitizers. 10 1.3 mrg // 11 1.1 mrg // It declares common functions and classes that are used in both runtimes. 12 1.1 mrg // Implementation of some functions are provided in sanitizer_common, while 13 1.1 mrg // others must be defined by run-time library itself. 14 1.1 mrg //===----------------------------------------------------------------------===// 15 1.1 mrg #ifndef SANITIZER_COMMON_H 16 1.1 mrg #define SANITIZER_COMMON_H 17 1.1 mrg 18 1.3 mrg #include "sanitizer_flags.h" 19 1.1 mrg #include "sanitizer_internal_defs.h" 20 1.2 christos #include "sanitizer_libc.h" 21 1.3 mrg #include "sanitizer_list.h" 22 1.2 christos #include "sanitizer_mutex.h" 23 1.3 mrg 24 1.4 mrg #if defined(_MSC_VER) && !defined(__clang__) 25 1.3 mrg extern "C" void _ReadWriteBarrier(); 26 1.3 mrg #pragma intrinsic(_ReadWriteBarrier) 27 1.3 mrg #endif 28 1.1 mrg 29 1.1 mrg namespace __sanitizer { 30 1.5 mrg 31 1.5 mrg struct AddressInfo; 32 1.5 mrg struct BufferedStackTrace; 33 1.5 mrg struct SignalContext; 34 1.1 mrg struct StackTrace; 35 1.1 mrg 36 1.1 mrg // Constants. 37 1.1 mrg const uptr kWordSize = SANITIZER_WORDSIZE / 8; 38 1.1 mrg const uptr kWordSizeInBits = 8 * kWordSize; 39 1.1 mrg 40 1.6 mrg const uptr kCacheLineSize = SANITIZER_CACHE_LINE_SIZE; 41 1.1 mrg 42 1.3 mrg const uptr kMaxPathLength = 4096; 43 1.3 mrg 44 1.4 mrg const uptr kMaxThreadStackSize = 1 << 30; // 1Gb 45 1.2 christos 46 1.9 mrg const uptr kErrorMessageBufferSize = 1 << 16; 47 1.2 christos 48 1.3 mrg // Denotes fake PC values that come from JIT/JAVA/etc. 49 1.6 mrg // For such PC values __tsan_symbolize_external_ex() will be called. 50 1.3 mrg const u64 kExternalPCBit = 1ULL << 60; 51 1.3 mrg 52 1.1 mrg extern const char *SanitizerToolName; // Can be changed by the tool. 53 1.1 mrg 54 1.3 mrg extern atomic_uint32_t current_verbosity; 55 1.9 mrg inline void SetVerbosity(int verbosity) { 56 1.3 mrg atomic_store(¤t_verbosity, verbosity, memory_order_relaxed); 57 1.3 mrg } 58 1.9 mrg inline int Verbosity() { 59 1.3 mrg return atomic_load(¤t_verbosity, memory_order_relaxed); 60 1.3 mrg } 61 1.3 mrg 62 1.9 mrg #if SANITIZER_ANDROID 63 1.9 mrg inline uptr GetPageSize() { 64 1.9 mrg // Android post-M sysconf(_SC_PAGESIZE) crashes if called from .preinit_array. 65 1.9 mrg return 4096; 66 1.9 mrg } 67 1.9 mrg inline uptr GetPageSizeCached() { 68 1.9 mrg return 4096; 69 1.9 mrg } 70 1.9 mrg #else 71 1.1 mrg uptr GetPageSize(); 72 1.4 mrg extern uptr PageSizeCached; 73 1.9 mrg inline uptr GetPageSizeCached() { 74 1.4 mrg if (!PageSizeCached) 75 1.4 mrg PageSizeCached = GetPageSize(); 76 1.4 mrg return PageSizeCached; 77 1.4 mrg } 78 1.9 mrg #endif 79 1.1 mrg uptr GetMmapGranularity(); 80 1.2 christos uptr GetMaxVirtualAddress(); 81 1.6 mrg uptr GetMaxUserVirtualAddress(); 82 1.1 mrg // Threads 83 1.5 mrg tid_t GetTid(); 84 1.6 mrg int TgKill(pid_t pid, tid_t tid, int sig); 85 1.1 mrg uptr GetThreadSelf(); 86 1.1 mrg void GetThreadStackTopAndBottom(bool at_initialization, uptr *stack_top, 87 1.1 mrg uptr *stack_bottom); 88 1.2 christos void GetThreadStackAndTls(bool main, uptr *stk_addr, uptr *stk_size, 89 1.2 christos uptr *tls_addr, uptr *tls_size); 90 1.1 mrg 91 1.1 mrg // Memory management 92 1.4 mrg void *MmapOrDie(uptr size, const char *mem_type, bool raw_report = false); 93 1.9 mrg inline void *MmapOrDieQuietly(uptr size, const char *mem_type) { 94 1.4 mrg return MmapOrDie(size, mem_type, /*raw_report*/ true); 95 1.4 mrg } 96 1.1 mrg void UnmapOrDie(void *addr, uptr size); 97 1.5 mrg // Behaves just like MmapOrDie, but tolerates out of memory condition, in that 98 1.5 mrg // case returns nullptr. 99 1.5 mrg void *MmapOrDieOnFatalError(uptr size, const char *mem_type); 100 1.6 mrg bool MmapFixedNoReserve(uptr fixed_addr, uptr size, const char *name = nullptr) 101 1.6 mrg WARN_UNUSED_RESULT; 102 1.9 mrg bool MmapFixedSuperNoReserve(uptr fixed_addr, uptr size, 103 1.9 mrg const char *name = nullptr) WARN_UNUSED_RESULT; 104 1.2 christos void *MmapNoReserveOrDie(uptr size, const char *mem_type); 105 1.9 mrg void *MmapFixedOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 106 1.5 mrg // Behaves just like MmapFixedOrDie, but tolerates out of memory condition, in 107 1.5 mrg // that case returns nullptr. 108 1.9 mrg void *MmapFixedOrDieOnFatalError(uptr fixed_addr, uptr size, 109 1.9 mrg const char *name = nullptr); 110 1.4 mrg void *MmapFixedNoAccess(uptr fixed_addr, uptr size, const char *name = nullptr); 111 1.4 mrg void *MmapNoAccess(uptr size); 112 1.1 mrg // Map aligned chunk of address space; size and alignment are powers of two. 113 1.5 mrg // Dies on all but out of memory errors, in the latter case returns nullptr. 114 1.5 mrg void *MmapAlignedOrDieOnFatalError(uptr size, uptr alignment, 115 1.5 mrg const char *mem_type); 116 1.4 mrg // Disallow access to a memory range. Use MmapFixedNoAccess to allocate an 117 1.3 mrg // unaccessible memory. 118 1.3 mrg bool MprotectNoAccess(uptr addr, uptr size); 119 1.4 mrg bool MprotectReadOnly(uptr addr, uptr size); 120 1.10 mrg bool MprotectReadWrite(uptr addr, uptr size); 121 1.4 mrg 122 1.6 mrg void MprotectMallocZones(void *addr, int prot); 123 1.6 mrg 124 1.10 mrg #if SANITIZER_WINDOWS 125 1.10 mrg // Zero previously mmap'd memory. Currently used only on Windows. 126 1.10 mrg bool ZeroMmapFixedRegion(uptr fixed_addr, uptr size) WARN_UNUSED_RESULT; 127 1.10 mrg #endif 128 1.10 mrg 129 1.9 mrg #if SANITIZER_LINUX 130 1.9 mrg // Unmap memory. Currently only used on Linux. 131 1.9 mrg void UnmapFromTo(uptr from, uptr to); 132 1.9 mrg #endif 133 1.9 mrg 134 1.9 mrg // Maps shadow_size_bytes of shadow memory and returns shadow address. It will 135 1.9 mrg // be aligned to the mmap granularity * 2^shadow_scale, or to 136 1.9 mrg // 2^min_shadow_base_alignment if that is larger. The returned address will 137 1.9 mrg // have max(2^min_shadow_base_alignment, mmap granularity) on the left, and 138 1.9 mrg // shadow_size_bytes bytes on the right, which on linux is mapped no access. 139 1.9 mrg // The high_mem_end may be updated if the original shadow size doesn't fit. 140 1.9 mrg uptr MapDynamicShadow(uptr shadow_size_bytes, uptr shadow_scale, 141 1.9 mrg uptr min_shadow_base_alignment, uptr &high_mem_end); 142 1.9 mrg 143 1.9 mrg // Let S = max(shadow_size, num_aliases * alias_size, ring_buffer_size). 144 1.9 mrg // Reserves 2*S bytes of address space to the right of the returned address and 145 1.9 mrg // ring_buffer_size bytes to the left. The returned address is aligned to 2*S. 146 1.9 mrg // Also creates num_aliases regions of accessible memory starting at offset S 147 1.9 mrg // from the returned address. Each region has size alias_size and is backed by 148 1.9 mrg // the same physical memory. 149 1.9 mrg uptr MapDynamicShadowAndAliases(uptr shadow_size, uptr alias_size, 150 1.9 mrg uptr num_aliases, uptr ring_buffer_size); 151 1.9 mrg 152 1.9 mrg // Reserve memory range [beg, end]. If madvise_shadow is true then apply 153 1.9 mrg // madvise (e.g. hugepages, core dumping) requested by options. 154 1.9 mrg void ReserveShadowMemoryRange(uptr beg, uptr end, const char *name, 155 1.9 mrg bool madvise_shadow = true); 156 1.9 mrg 157 1.9 mrg // Protect size bytes of memory starting at addr. Also try to protect 158 1.9 mrg // several pages at the start of the address space as specified by 159 1.9 mrg // zero_base_shadow_start, at most up to the size or zero_base_max_shadow_start. 160 1.9 mrg void ProtectGap(uptr addr, uptr size, uptr zero_base_shadow_start, 161 1.9 mrg uptr zero_base_max_shadow_start); 162 1.9 mrg 163 1.4 mrg // Find an available address space. 164 1.5 mrg uptr FindAvailableMemoryRange(uptr size, uptr alignment, uptr left_padding, 165 1.6 mrg uptr *largest_gap_found, uptr *max_occupied_addr); 166 1.3 mrg 167 1.1 mrg // Used to check if we can map shadow memory to a fixed location. 168 1.1 mrg bool MemoryRangeIsAvailable(uptr range_start, uptr range_end); 169 1.5 mrg // Releases memory pages entirely within the [beg, end] address range. Noop if 170 1.5 mrg // the provided range does not contain at least one entire page. 171 1.5 mrg void ReleaseMemoryPagesToOS(uptr beg, uptr end); 172 1.2 christos void IncreaseTotalMmap(uptr size); 173 1.2 christos void DecreaseTotalMmap(uptr size); 174 1.3 mrg uptr GetRSS(); 175 1.9 mrg void SetShadowRegionHugePageMode(uptr addr, uptr length); 176 1.6 mrg bool DontDumpShadowMemory(uptr addr, uptr length); 177 1.3 mrg // Check if the built VMA size matches the runtime one. 178 1.3 mrg void CheckVMASize(); 179 1.10 mrg void RunMallocHooks(void *ptr, uptr size); 180 1.10 mrg void RunFreeHooks(void *ptr); 181 1.1 mrg 182 1.6 mrg class ReservedAddressRange { 183 1.6 mrg public: 184 1.6 mrg uptr Init(uptr size, const char *name = nullptr, uptr fixed_addr = 0); 185 1.9 mrg uptr InitAligned(uptr size, uptr align, const char *name = nullptr); 186 1.9 mrg uptr Map(uptr fixed_addr, uptr size, const char *name = nullptr); 187 1.9 mrg uptr MapOrDie(uptr fixed_addr, uptr size, const char *name = nullptr); 188 1.6 mrg void Unmap(uptr addr, uptr size); 189 1.6 mrg void *base() const { return base_; } 190 1.6 mrg uptr size() const { return size_; } 191 1.6 mrg 192 1.6 mrg private: 193 1.6 mrg void* base_; 194 1.6 mrg uptr size_; 195 1.6 mrg const char* name_; 196 1.6 mrg uptr os_handle_; 197 1.6 mrg }; 198 1.6 mrg 199 1.5 mrg typedef void (*fill_profile_f)(uptr start, uptr rss, bool file, 200 1.9 mrg /*out*/ uptr *stats); 201 1.5 mrg 202 1.5 mrg // Parse the contents of /proc/self/smaps and generate a memory profile. 203 1.9 mrg // |cb| is a tool-specific callback that fills the |stats| array. 204 1.9 mrg void GetMemoryProfile(fill_profile_f cb, uptr *stats); 205 1.9 mrg void ParseUnixMemoryProfile(fill_profile_f cb, uptr *stats, char *smaps, 206 1.9 mrg uptr smaps_len); 207 1.5 mrg 208 1.1 mrg // Simple low-level (mmap-based) allocator for internal use. Doesn't have 209 1.1 mrg // constructor, so all instances of LowLevelAllocator should be 210 1.1 mrg // linker initialized. 211 1.10 mrg // 212 1.10 mrg // NOTE: Users should instead use the singleton provided via 213 1.10 mrg // `GetGlobalLowLevelAllocator()` rather than create a new one. This way, the 214 1.10 mrg // number of mmap fragments can be reduced and use the same contiguous mmap 215 1.10 mrg // provided by this singleton. 216 1.1 mrg class LowLevelAllocator { 217 1.1 mrg public: 218 1.1 mrg // Requires an external lock. 219 1.1 mrg void *Allocate(uptr size); 220 1.10 mrg 221 1.1 mrg private: 222 1.1 mrg char *allocated_end_; 223 1.1 mrg char *allocated_current_; 224 1.1 mrg }; 225 1.6 mrg // Set the min alignment of LowLevelAllocator to at least alignment. 226 1.6 mrg void SetLowLevelAllocateMinAlignment(uptr alignment); 227 1.1 mrg typedef void (*LowLevelAllocateCallback)(uptr ptr, uptr size); 228 1.1 mrg // Allows to register tool-specific callbacks for LowLevelAllocator. 229 1.1 mrg // Passing NULL removes the callback. 230 1.1 mrg void SetLowLevelAllocateCallback(LowLevelAllocateCallback callback); 231 1.1 mrg 232 1.10 mrg LowLevelAllocator &GetGlobalLowLevelAllocator(); 233 1.10 mrg 234 1.1 mrg // IO 235 1.5 mrg void CatastrophicErrorWrite(const char *buffer, uptr length); 236 1.1 mrg void RawWrite(const char *buffer); 237 1.2 christos bool ColorizeReports(); 238 1.4 mrg void RemoveANSIEscapeSequencesFromString(char *buffer); 239 1.9 mrg void Printf(const char *format, ...) FORMAT(1, 2); 240 1.9 mrg void Report(const char *format, ...) FORMAT(1, 2); 241 1.1 mrg void SetPrintfAndReportCallback(void (*callback)(const char *)); 242 1.2 christos #define VReport(level, ...) \ 243 1.2 christos do { \ 244 1.3 mrg if ((uptr)Verbosity() >= (level)) Report(__VA_ARGS__); \ 245 1.2 christos } while (0) 246 1.2 christos #define VPrintf(level, ...) \ 247 1.2 christos do { \ 248 1.3 mrg if ((uptr)Verbosity() >= (level)) Printf(__VA_ARGS__); \ 249 1.2 christos } while (0) 250 1.2 christos 251 1.5 mrg // Lock sanitizer error reporting and protects against nested errors. 252 1.5 mrg class ScopedErrorReportLock { 253 1.5 mrg public: 254 1.10 mrg ScopedErrorReportLock() SANITIZER_ACQUIRE(mutex_) { Lock(); } 255 1.10 mrg ~ScopedErrorReportLock() SANITIZER_RELEASE(mutex_) { Unlock(); } 256 1.9 mrg 257 1.10 mrg static void Lock() SANITIZER_ACQUIRE(mutex_); 258 1.10 mrg static void Unlock() SANITIZER_RELEASE(mutex_); 259 1.10 mrg static void CheckLocked() SANITIZER_CHECK_LOCKED(mutex_); 260 1.3 mrg 261 1.9 mrg private: 262 1.9 mrg static atomic_uintptr_t reporting_thread_; 263 1.9 mrg static StaticSpinMutex mutex_; 264 1.3 mrg }; 265 1.3 mrg 266 1.2 christos extern uptr stoptheworld_tracer_pid; 267 1.2 christos extern uptr stoptheworld_tracer_ppid; 268 1.1 mrg 269 1.2 christos bool IsAccessibleMemoryRange(uptr beg, uptr size); 270 1.2 christos 271 1.2 christos // Error report formatting. 272 1.2 christos const char *StripPathPrefix(const char *filepath, 273 1.2 christos const char *strip_file_prefix); 274 1.2 christos // Strip the directories from the module name. 275 1.2 christos const char *StripModuleName(const char *module); 276 1.1 mrg 277 1.1 mrg // OS 278 1.3 mrg uptr ReadBinaryName(/*out*/char *buf, uptr buf_len); 279 1.3 mrg uptr ReadBinaryNameCached(/*out*/char *buf, uptr buf_len); 280 1.9 mrg uptr ReadBinaryDir(/*out*/ char *buf, uptr buf_len); 281 1.3 mrg uptr ReadLongProcessName(/*out*/ char *buf, uptr buf_len); 282 1.3 mrg const char *GetProcessName(); 283 1.3 mrg void UpdateProcessName(); 284 1.3 mrg void CacheBinaryName(); 285 1.2 christos void DisableCoreDumperIfNecessary(); 286 1.1 mrg void DumpProcessMap(); 287 1.1 mrg const char *GetEnv(const char *name); 288 1.2 christos bool SetEnv(const char *name, const char *value); 289 1.3 mrg 290 1.1 mrg u32 GetUid(); 291 1.1 mrg void ReExec(); 292 1.6 mrg void CheckASLR(); 293 1.9 mrg void CheckMPROTECT(); 294 1.4 mrg char **GetArgv(); 295 1.9 mrg char **GetEnviron(); 296 1.4 mrg void PrintCmdline(); 297 1.1 mrg bool StackSizeIsUnlimited(); 298 1.1 mrg void SetStackSizeLimitInBytes(uptr limit); 299 1.2 christos bool AddressSpaceIsUnlimited(); 300 1.2 christos void SetAddressSpaceUnlimited(); 301 1.2 christos void AdjustStackSize(void *attr); 302 1.10 mrg void PlatformPrepareForSandboxing(void *args); 303 1.2 christos void SetSandboxingCallback(void (*f)()); 304 1.2 christos 305 1.3 mrg void InitializeCoverage(bool enabled, const char *coverage_dir); 306 1.3 mrg 307 1.2 christos void InitTlsSize(); 308 1.2 christos uptr GetTlsSize(); 309 1.1 mrg 310 1.1 mrg // Other 311 1.10 mrg void WaitForDebugger(unsigned seconds, const char *label); 312 1.9 mrg void SleepForSeconds(unsigned seconds); 313 1.9 mrg void SleepForMillis(unsigned millis); 314 1.2 christos u64 NanoTime(); 315 1.6 mrg u64 MonotonicNanoTime(); 316 1.1 mrg int Atexit(void (*function)(void)); 317 1.3 mrg bool TemplateMatch(const char *templ, const char *str); 318 1.1 mrg 319 1.1 mrg // Exit 320 1.1 mrg void NORETURN Abort(); 321 1.1 mrg void NORETURN Die(); 322 1.2 christos void NORETURN 323 1.1 mrg CheckFailed(const char *file, int line, const char *cond, u64 v1, u64 v2); 324 1.3 mrg void NORETURN ReportMmapFailureAndDie(uptr size, const char *mem_type, 325 1.4 mrg const char *mmap_type, error_t err, 326 1.4 mrg bool raw_report = false); 327 1.10 mrg void NORETURN ReportMunmapFailureAndDie(void *ptr, uptr size, error_t err, 328 1.10 mrg bool raw_report = false); 329 1.10 mrg 330 1.10 mrg // Returns true if the platform-specific error reported is an OOM error. 331 1.10 mrg bool ErrorIsOOM(error_t err); 332 1.10 mrg 333 1.10 mrg // This reports an error in the form: 334 1.10 mrg // 335 1.10 mrg // `ERROR: {{SanitizerToolName}}: out of memory: {{err_msg}}` 336 1.10 mrg // 337 1.10 mrg // Downstream tools that read sanitizer output will know that errors starting 338 1.10 mrg // in this format are specifically OOM errors. 339 1.10 mrg #define ERROR_OOM(err_msg, ...) \ 340 1.10 mrg Report("ERROR: %s: out of memory: " err_msg, SanitizerToolName, __VA_ARGS__) 341 1.1 mrg 342 1.9 mrg // Specific tools may override behavior of "Die" function to do tool-specific 343 1.9 mrg // job. 344 1.2 christos typedef void (*DieCallbackType)(void); 345 1.3 mrg 346 1.3 mrg // It's possible to add several callbacks that would be run when "Die" is 347 1.3 mrg // called. The callbacks will be run in the opposite order. The tools are 348 1.3 mrg // strongly recommended to setup all callbacks during initialization, when there 349 1.3 mrg // is only a single thread. 350 1.3 mrg bool AddDieCallback(DieCallbackType callback); 351 1.3 mrg bool RemoveDieCallback(DieCallbackType callback); 352 1.3 mrg 353 1.3 mrg void SetUserDieCallback(DieCallbackType callback); 354 1.3 mrg 355 1.9 mrg void SetCheckUnwindCallback(void (*callback)()); 356 1.1 mrg 357 1.2 christos // Functions related to signal handling. 358 1.2 christos typedef void (*SignalHandlerType)(int, void *, void *); 359 1.5 mrg HandleSignalMode GetHandleSignalMode(int signum); 360 1.2 christos void InstallDeadlySignalHandlers(SignalHandlerType handler); 361 1.5 mrg 362 1.5 mrg // Signal reporting. 363 1.5 mrg // Each sanitizer uses slightly different implementation of stack unwinding. 364 1.5 mrg typedef void (*UnwindSignalStackCallbackType)(const SignalContext &sig, 365 1.5 mrg const void *callback_context, 366 1.5 mrg BufferedStackTrace *stack); 367 1.5 mrg // Print deadly signal report and die. 368 1.5 mrg void HandleDeadlySignal(void *siginfo, void *context, u32 tid, 369 1.5 mrg UnwindSignalStackCallbackType unwind, 370 1.5 mrg const void *unwind_context); 371 1.5 mrg 372 1.5 mrg // Part of HandleDeadlySignal, exposed for asan. 373 1.5 mrg void StartReportDeadlySignal(); 374 1.5 mrg // Part of HandleDeadlySignal, exposed for asan. 375 1.5 mrg void ReportDeadlySignal(const SignalContext &sig, u32 tid, 376 1.5 mrg UnwindSignalStackCallbackType unwind, 377 1.5 mrg const void *unwind_context); 378 1.5 mrg 379 1.2 christos // Alternative signal stack (POSIX-only). 380 1.2 christos void SetAlternateSignalStack(); 381 1.2 christos void UnsetAlternateSignalStack(); 382 1.2 christos 383 1.2 christos // Construct a one-line string: 384 1.2 christos // SUMMARY: SanitizerToolName: error_message 385 1.2 christos // and pass it to __sanitizer_report_error_summary. 386 1.5 mrg // If alt_tool_name is provided, it's used in place of SanitizerToolName. 387 1.5 mrg void ReportErrorSummary(const char *error_message, 388 1.5 mrg const char *alt_tool_name = nullptr); 389 1.2 christos // Same as above, but construct error_message as: 390 1.3 mrg // error_type file:line[:column][ function] 391 1.5 mrg void ReportErrorSummary(const char *error_type, const AddressInfo &info, 392 1.5 mrg const char *alt_tool_name = nullptr); 393 1.3 mrg // Same as above, but obtains AddressInfo by symbolizing top stack trace frame. 394 1.5 mrg void ReportErrorSummary(const char *error_type, const StackTrace *trace, 395 1.5 mrg const char *alt_tool_name = nullptr); 396 1.1 mrg 397 1.9 mrg void ReportMmapWriteExec(int prot, int mflags); 398 1.6 mrg 399 1.1 mrg // Math 400 1.2 christos #if SANITIZER_WINDOWS && !defined(__clang__) && !defined(__GNUC__) 401 1.1 mrg extern "C" { 402 1.9 mrg unsigned char _BitScanForward(unsigned long *index, unsigned long mask); 403 1.9 mrg unsigned char _BitScanReverse(unsigned long *index, unsigned long mask); 404 1.1 mrg #if defined(_WIN64) 405 1.9 mrg unsigned char _BitScanForward64(unsigned long *index, unsigned __int64 mask); 406 1.9 mrg unsigned char _BitScanReverse64(unsigned long *index, unsigned __int64 mask); 407 1.1 mrg #endif 408 1.1 mrg } 409 1.1 mrg #endif 410 1.1 mrg 411 1.9 mrg inline uptr MostSignificantSetBitIndex(uptr x) { 412 1.2 christos CHECK_NE(x, 0U); 413 1.9 mrg unsigned long up; 414 1.2 christos #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 415 1.3 mrg # ifdef _WIN64 416 1.3 mrg up = SANITIZER_WORDSIZE - 1 - __builtin_clzll(x); 417 1.3 mrg # else 418 1.1 mrg up = SANITIZER_WORDSIZE - 1 - __builtin_clzl(x); 419 1.3 mrg # endif 420 1.1 mrg #elif defined(_WIN64) 421 1.1 mrg _BitScanReverse64(&up, x); 422 1.1 mrg #else 423 1.1 mrg _BitScanReverse(&up, x); 424 1.1 mrg #endif 425 1.1 mrg return up; 426 1.1 mrg } 427 1.1 mrg 428 1.9 mrg inline uptr LeastSignificantSetBitIndex(uptr x) { 429 1.2 christos CHECK_NE(x, 0U); 430 1.9 mrg unsigned long up; 431 1.2 christos #if !SANITIZER_WINDOWS || defined(__clang__) || defined(__GNUC__) 432 1.3 mrg # ifdef _WIN64 433 1.3 mrg up = __builtin_ctzll(x); 434 1.3 mrg # else 435 1.2 christos up = __builtin_ctzl(x); 436 1.3 mrg # endif 437 1.2 christos #elif defined(_WIN64) 438 1.2 christos _BitScanForward64(&up, x); 439 1.2 christos #else 440 1.2 christos _BitScanForward(&up, x); 441 1.2 christos #endif 442 1.2 christos return up; 443 1.2 christos } 444 1.2 christos 445 1.9 mrg inline constexpr bool IsPowerOfTwo(uptr x) { return (x & (x - 1)) == 0; } 446 1.1 mrg 447 1.9 mrg inline uptr RoundUpToPowerOfTwo(uptr size) { 448 1.1 mrg CHECK(size); 449 1.1 mrg if (IsPowerOfTwo(size)) return size; 450 1.1 mrg 451 1.1 mrg uptr up = MostSignificantSetBitIndex(size); 452 1.4 mrg CHECK_LT(size, (1ULL << (up + 1))); 453 1.4 mrg CHECK_GT(size, (1ULL << up)); 454 1.3 mrg return 1ULL << (up + 1); 455 1.1 mrg } 456 1.1 mrg 457 1.9 mrg inline constexpr uptr RoundUpTo(uptr size, uptr boundary) { 458 1.4 mrg RAW_CHECK(IsPowerOfTwo(boundary)); 459 1.1 mrg return (size + boundary - 1) & ~(boundary - 1); 460 1.1 mrg } 461 1.1 mrg 462 1.9 mrg inline constexpr uptr RoundDownTo(uptr x, uptr boundary) { 463 1.1 mrg return x & ~(boundary - 1); 464 1.1 mrg } 465 1.1 mrg 466 1.9 mrg inline constexpr bool IsAligned(uptr a, uptr alignment) { 467 1.1 mrg return (a & (alignment - 1)) == 0; 468 1.1 mrg } 469 1.1 mrg 470 1.9 mrg inline uptr Log2(uptr x) { 471 1.1 mrg CHECK(IsPowerOfTwo(x)); 472 1.3 mrg return LeastSignificantSetBitIndex(x); 473 1.1 mrg } 474 1.1 mrg 475 1.1 mrg // Don't use std::min, std::max or std::swap, to minimize dependency 476 1.1 mrg // on libstdc++. 477 1.9 mrg template <class T> 478 1.9 mrg constexpr T Min(T a, T b) { 479 1.9 mrg return a < b ? a : b; 480 1.9 mrg } 481 1.9 mrg template <class T> 482 1.9 mrg constexpr T Max(T a, T b) { 483 1.9 mrg return a > b ? a : b; 484 1.9 mrg } 485 1.10 mrg template <class T> 486 1.10 mrg constexpr T Abs(T a) { 487 1.10 mrg return a < 0 ? -a : a; 488 1.10 mrg } 489 1.1 mrg template<class T> void Swap(T& a, T& b) { 490 1.1 mrg T tmp = a; 491 1.1 mrg a = b; 492 1.1 mrg b = tmp; 493 1.1 mrg } 494 1.1 mrg 495 1.1 mrg // Char handling 496 1.9 mrg inline bool IsSpace(int c) { 497 1.1 mrg return (c == ' ') || (c == '\n') || (c == '\t') || 498 1.1 mrg (c == '\f') || (c == '\r') || (c == '\v'); 499 1.1 mrg } 500 1.9 mrg inline bool IsDigit(int c) { 501 1.1 mrg return (c >= '0') && (c <= '9'); 502 1.1 mrg } 503 1.9 mrg inline int ToLower(int c) { 504 1.1 mrg return (c >= 'A' && c <= 'Z') ? (c + 'a' - 'A') : c; 505 1.1 mrg } 506 1.1 mrg 507 1.2 christos // A low-level vector based on mmap. May incur a significant memory overhead for 508 1.2 christos // small vectors. 509 1.2 christos // WARNING: The current implementation supports only POD types. 510 1.2 christos template<typename T> 511 1.3 mrg class InternalMmapVectorNoCtor { 512 1.2 christos public: 513 1.9 mrg using value_type = T; 514 1.3 mrg void Initialize(uptr initial_capacity) { 515 1.6 mrg capacity_bytes_ = 0; 516 1.2 christos size_ = 0; 517 1.6 mrg data_ = 0; 518 1.6 mrg reserve(initial_capacity); 519 1.2 christos } 520 1.6 mrg void Destroy() { UnmapOrDie(data_, capacity_bytes_); } 521 1.2 christos T &operator[](uptr i) { 522 1.2 christos CHECK_LT(i, size_); 523 1.2 christos return data_[i]; 524 1.2 christos } 525 1.2 christos const T &operator[](uptr i) const { 526 1.2 christos CHECK_LT(i, size_); 527 1.2 christos return data_[i]; 528 1.2 christos } 529 1.2 christos void push_back(const T &element) { 530 1.10 mrg if (UNLIKELY(size_ >= capacity())) { 531 1.10 mrg CHECK_EQ(size_, capacity()); 532 1.2 christos uptr new_capacity = RoundUpToPowerOfTwo(size_ + 1); 533 1.6 mrg Realloc(new_capacity); 534 1.2 christos } 535 1.4 mrg internal_memcpy(&data_[size_++], &element, sizeof(T)); 536 1.2 christos } 537 1.2 christos T &back() { 538 1.2 christos CHECK_GT(size_, 0); 539 1.2 christos return data_[size_ - 1]; 540 1.2 christos } 541 1.2 christos void pop_back() { 542 1.2 christos CHECK_GT(size_, 0); 543 1.2 christos size_--; 544 1.2 christos } 545 1.2 christos uptr size() const { 546 1.2 christos return size_; 547 1.2 christos } 548 1.2 christos const T *data() const { 549 1.2 christos return data_; 550 1.2 christos } 551 1.3 mrg T *data() { 552 1.3 mrg return data_; 553 1.3 mrg } 554 1.6 mrg uptr capacity() const { return capacity_bytes_ / sizeof(T); } 555 1.6 mrg void reserve(uptr new_size) { 556 1.6 mrg // Never downsize internal buffer. 557 1.6 mrg if (new_size > capacity()) 558 1.6 mrg Realloc(new_size); 559 1.2 christos } 560 1.5 mrg void resize(uptr new_size) { 561 1.5 mrg if (new_size > size_) { 562 1.6 mrg reserve(new_size); 563 1.5 mrg internal_memset(&data_[size_], 0, sizeof(T) * (new_size - size_)); 564 1.5 mrg } 565 1.5 mrg size_ = new_size; 566 1.5 mrg } 567 1.2 christos 568 1.2 christos void clear() { size_ = 0; } 569 1.3 mrg bool empty() const { return size() == 0; } 570 1.2 christos 571 1.4 mrg const T *begin() const { 572 1.4 mrg return data(); 573 1.4 mrg } 574 1.4 mrg T *begin() { 575 1.4 mrg return data(); 576 1.4 mrg } 577 1.4 mrg const T *end() const { 578 1.4 mrg return data() + size(); 579 1.4 mrg } 580 1.4 mrg T *end() { 581 1.4 mrg return data() + size(); 582 1.4 mrg } 583 1.4 mrg 584 1.6 mrg void swap(InternalMmapVectorNoCtor &other) { 585 1.6 mrg Swap(data_, other.data_); 586 1.6 mrg Swap(capacity_bytes_, other.capacity_bytes_); 587 1.6 mrg Swap(size_, other.size_); 588 1.6 mrg } 589 1.6 mrg 590 1.2 christos private: 591 1.10 mrg NOINLINE void Realloc(uptr new_capacity) { 592 1.2 christos CHECK_GT(new_capacity, 0); 593 1.2 christos CHECK_LE(size_, new_capacity); 594 1.6 mrg uptr new_capacity_bytes = 595 1.6 mrg RoundUpTo(new_capacity * sizeof(T), GetPageSizeCached()); 596 1.6 mrg T *new_data = (T *)MmapOrDie(new_capacity_bytes, "InternalMmapVector"); 597 1.2 christos internal_memcpy(new_data, data_, size_ * sizeof(T)); 598 1.6 mrg UnmapOrDie(data_, capacity_bytes_); 599 1.2 christos data_ = new_data; 600 1.6 mrg capacity_bytes_ = new_capacity_bytes; 601 1.2 christos } 602 1.2 christos 603 1.2 christos T *data_; 604 1.6 mrg uptr capacity_bytes_; 605 1.2 christos uptr size_; 606 1.2 christos }; 607 1.2 christos 608 1.6 mrg template <typename T> 609 1.6 mrg bool operator==(const InternalMmapVectorNoCtor<T> &lhs, 610 1.6 mrg const InternalMmapVectorNoCtor<T> &rhs) { 611 1.6 mrg if (lhs.size() != rhs.size()) return false; 612 1.6 mrg return internal_memcmp(lhs.data(), rhs.data(), lhs.size() * sizeof(T)) == 0; 613 1.6 mrg } 614 1.6 mrg 615 1.6 mrg template <typename T> 616 1.6 mrg bool operator!=(const InternalMmapVectorNoCtor<T> &lhs, 617 1.6 mrg const InternalMmapVectorNoCtor<T> &rhs) { 618 1.6 mrg return !(lhs == rhs); 619 1.6 mrg } 620 1.6 mrg 621 1.3 mrg template<typename T> 622 1.3 mrg class InternalMmapVector : public InternalMmapVectorNoCtor<T> { 623 1.3 mrg public: 624 1.9 mrg InternalMmapVector() { InternalMmapVectorNoCtor<T>::Initialize(0); } 625 1.6 mrg explicit InternalMmapVector(uptr cnt) { 626 1.6 mrg InternalMmapVectorNoCtor<T>::Initialize(cnt); 627 1.6 mrg this->resize(cnt); 628 1.3 mrg } 629 1.3 mrg ~InternalMmapVector() { InternalMmapVectorNoCtor<T>::Destroy(); } 630 1.6 mrg // Disallow copies and moves. 631 1.6 mrg InternalMmapVector(const InternalMmapVector &) = delete; 632 1.6 mrg InternalMmapVector &operator=(const InternalMmapVector &) = delete; 633 1.6 mrg InternalMmapVector(InternalMmapVector &&) = delete; 634 1.6 mrg InternalMmapVector &operator=(InternalMmapVector &&) = delete; 635 1.6 mrg }; 636 1.6 mrg 637 1.9 mrg class InternalScopedString { 638 1.6 mrg public: 639 1.9 mrg InternalScopedString() : buffer_(1) { buffer_[0] = '\0'; } 640 1.9 mrg 641 1.9 mrg uptr length() const { return buffer_.size() - 1; } 642 1.6 mrg void clear() { 643 1.9 mrg buffer_.resize(1); 644 1.9 mrg buffer_[0] = '\0'; 645 1.6 mrg } 646 1.10 mrg void Append(const char *str); 647 1.10 mrg void AppendF(const char *format, ...) FORMAT(2, 3); 648 1.9 mrg const char *data() const { return buffer_.data(); } 649 1.9 mrg char *data() { return buffer_.data(); } 650 1.6 mrg 651 1.6 mrg private: 652 1.9 mrg InternalMmapVector<char> buffer_; 653 1.6 mrg }; 654 1.6 mrg 655 1.6 mrg template <class T> 656 1.6 mrg struct CompareLess { 657 1.6 mrg bool operator()(const T &a, const T &b) const { return a < b; } 658 1.3 mrg }; 659 1.3 mrg 660 1.2 christos // HeapSort for arrays and InternalMmapVector. 661 1.6 mrg template <class T, class Compare = CompareLess<T>> 662 1.6 mrg void Sort(T *v, uptr size, Compare comp = {}) { 663 1.2 christos if (size < 2) 664 1.2 christos return; 665 1.2 christos // Stage 1: insert elements to the heap. 666 1.2 christos for (uptr i = 1; i < size; i++) { 667 1.2 christos uptr j, p; 668 1.2 christos for (j = i; j > 0; j = p) { 669 1.2 christos p = (j - 1) / 2; 670 1.6 mrg if (comp(v[p], v[j])) 671 1.6 mrg Swap(v[j], v[p]); 672 1.2 christos else 673 1.2 christos break; 674 1.2 christos } 675 1.2 christos } 676 1.2 christos // Stage 2: swap largest element with the last one, 677 1.2 christos // and sink the new top. 678 1.2 christos for (uptr i = size - 1; i > 0; i--) { 679 1.6 mrg Swap(v[0], v[i]); 680 1.2 christos uptr j, max_ind; 681 1.2 christos for (j = 0; j < i; j = max_ind) { 682 1.2 christos uptr left = 2 * j + 1; 683 1.2 christos uptr right = 2 * j + 2; 684 1.2 christos max_ind = j; 685 1.6 mrg if (left < i && comp(v[max_ind], v[left])) 686 1.2 christos max_ind = left; 687 1.6 mrg if (right < i && comp(v[max_ind], v[right])) 688 1.2 christos max_ind = right; 689 1.2 christos if (max_ind != j) 690 1.6 mrg Swap(v[j], v[max_ind]); 691 1.2 christos else 692 1.2 christos break; 693 1.2 christos } 694 1.2 christos } 695 1.2 christos } 696 1.2 christos 697 1.5 mrg // Works like std::lower_bound: finds the first element that is not less 698 1.5 mrg // than the val. 699 1.10 mrg template <class Container, class T, 700 1.9 mrg class Compare = CompareLess<typename Container::value_type>> 701 1.10 mrg uptr InternalLowerBound(const Container &v, const T &val, Compare comp = {}) { 702 1.9 mrg uptr first = 0; 703 1.9 mrg uptr last = v.size(); 704 1.5 mrg while (last > first) { 705 1.2 christos uptr mid = (first + last) / 2; 706 1.2 christos if (comp(v[mid], val)) 707 1.2 christos first = mid + 1; 708 1.2 christos else 709 1.5 mrg last = mid; 710 1.5 mrg } 711 1.5 mrg return first; 712 1.5 mrg } 713 1.5 mrg 714 1.5 mrg enum ModuleArch { 715 1.5 mrg kModuleArchUnknown, 716 1.5 mrg kModuleArchI386, 717 1.5 mrg kModuleArchX86_64, 718 1.5 mrg kModuleArchX86_64H, 719 1.5 mrg kModuleArchARMV6, 720 1.5 mrg kModuleArchARMV7, 721 1.5 mrg kModuleArchARMV7S, 722 1.5 mrg kModuleArchARMV7K, 723 1.9 mrg kModuleArchARM64, 724 1.10 mrg kModuleArchLoongArch64, 725 1.9 mrg kModuleArchRISCV64, 726 1.9 mrg kModuleArchHexagon 727 1.5 mrg }; 728 1.5 mrg 729 1.9 mrg // Sorts and removes duplicates from the container. 730 1.9 mrg template <class Container, 731 1.9 mrg class Compare = CompareLess<typename Container::value_type>> 732 1.9 mrg void SortAndDedup(Container &v, Compare comp = {}) { 733 1.9 mrg Sort(v.data(), v.size(), comp); 734 1.9 mrg uptr size = v.size(); 735 1.9 mrg if (size < 2) 736 1.9 mrg return; 737 1.9 mrg uptr last = 0; 738 1.9 mrg for (uptr i = 1; i < size; ++i) { 739 1.9 mrg if (comp(v[last], v[i])) { 740 1.9 mrg ++last; 741 1.9 mrg if (last != i) 742 1.9 mrg v[last] = v[i]; 743 1.9 mrg } else { 744 1.9 mrg CHECK(!comp(v[i], v[last])); 745 1.9 mrg } 746 1.9 mrg } 747 1.9 mrg v.resize(last + 1); 748 1.9 mrg } 749 1.9 mrg 750 1.9 mrg constexpr uptr kDefaultFileMaxSize = FIRST_32_SECOND_64(1 << 26, 1 << 28); 751 1.9 mrg 752 1.6 mrg // Opens the file 'file_name" and reads up to 'max_len' bytes. 753 1.6 mrg // The resulting buffer is mmaped and stored in '*buff'. 754 1.6 mrg // Returns true if file was successfully opened and read. 755 1.6 mrg bool ReadFileToVector(const char *file_name, 756 1.6 mrg InternalMmapVectorNoCtor<char> *buff, 757 1.9 mrg uptr max_len = kDefaultFileMaxSize, 758 1.9 mrg error_t *errno_p = nullptr); 759 1.6 mrg 760 1.6 mrg // Opens the file 'file_name" and reads up to 'max_len' bytes. 761 1.6 mrg // This function is less I/O efficient than ReadFileToVector as it may reread 762 1.6 mrg // file multiple times to avoid mmap during read attempts. It's used to read 763 1.6 mrg // procmap, so short reads with mmap in between can produce inconsistent result. 764 1.6 mrg // The resulting buffer is mmaped and stored in '*buff'. 765 1.6 mrg // The size of the mmaped region is stored in '*buff_size'. 766 1.6 mrg // The total number of read bytes is stored in '*read_len'. 767 1.6 mrg // Returns true if file was successfully opened and read. 768 1.6 mrg bool ReadFileToBuffer(const char *file_name, char **buff, uptr *buff_size, 769 1.9 mrg uptr *read_len, uptr max_len = kDefaultFileMaxSize, 770 1.6 mrg error_t *errno_p = nullptr); 771 1.6 mrg 772 1.10 mrg int GetModuleAndOffsetForPc(uptr pc, char *module_name, uptr module_name_len, 773 1.10 mrg uptr *pc_offset); 774 1.10 mrg 775 1.5 mrg // When adding a new architecture, don't forget to also update 776 1.9 mrg // script/asan_symbolize.py and sanitizer_symbolizer_libcdep.cpp. 777 1.5 mrg inline const char *ModuleArchToString(ModuleArch arch) { 778 1.5 mrg switch (arch) { 779 1.5 mrg case kModuleArchUnknown: 780 1.5 mrg return ""; 781 1.5 mrg case kModuleArchI386: 782 1.5 mrg return "i386"; 783 1.5 mrg case kModuleArchX86_64: 784 1.5 mrg return "x86_64"; 785 1.5 mrg case kModuleArchX86_64H: 786 1.5 mrg return "x86_64h"; 787 1.5 mrg case kModuleArchARMV6: 788 1.5 mrg return "armv6"; 789 1.5 mrg case kModuleArchARMV7: 790 1.5 mrg return "armv7"; 791 1.5 mrg case kModuleArchARMV7S: 792 1.5 mrg return "armv7s"; 793 1.5 mrg case kModuleArchARMV7K: 794 1.5 mrg return "armv7k"; 795 1.5 mrg case kModuleArchARM64: 796 1.5 mrg return "arm64"; 797 1.10 mrg case kModuleArchLoongArch64: 798 1.10 mrg return "loongarch64"; 799 1.9 mrg case kModuleArchRISCV64: 800 1.9 mrg return "riscv64"; 801 1.9 mrg case kModuleArchHexagon: 802 1.9 mrg return "hexagon"; 803 1.2 christos } 804 1.5 mrg CHECK(0 && "Invalid module arch"); 805 1.5 mrg return ""; 806 1.2 christos } 807 1.2 christos 808 1.10 mrg #if SANITIZER_APPLE 809 1.5 mrg const uptr kModuleUUIDSize = 16; 810 1.10 mrg #else 811 1.10 mrg const uptr kModuleUUIDSize = 32; 812 1.10 mrg #endif 813 1.5 mrg const uptr kMaxSegName = 16; 814 1.5 mrg 815 1.2 christos // Represents a binary loaded into virtual memory (e.g. this can be an 816 1.2 christos // executable or a shared object). 817 1.2 christos class LoadedModule { 818 1.2 christos public: 819 1.5 mrg LoadedModule() 820 1.5 mrg : full_name_(nullptr), 821 1.5 mrg base_address_(0), 822 1.10 mrg max_address_(0), 823 1.5 mrg arch_(kModuleArchUnknown), 824 1.10 mrg uuid_size_(0), 825 1.5 mrg instrumented_(false) { 826 1.5 mrg internal_memset(uuid_, 0, kModuleUUIDSize); 827 1.5 mrg ranges_.clear(); 828 1.5 mrg } 829 1.3 mrg void set(const char *module_name, uptr base_address); 830 1.5 mrg void set(const char *module_name, uptr base_address, ModuleArch arch, 831 1.5 mrg u8 uuid[kModuleUUIDSize], bool instrumented); 832 1.10 mrg void setUuid(const char *uuid, uptr size); 833 1.3 mrg void clear(); 834 1.5 mrg void addAddressRange(uptr beg, uptr end, bool executable, bool writable, 835 1.5 mrg const char *name = nullptr); 836 1.2 christos bool containsAddress(uptr address) const; 837 1.2 christos 838 1.2 christos const char *full_name() const { return full_name_; } 839 1.2 christos uptr base_address() const { return base_address_; } 840 1.10 mrg uptr max_address() const { return max_address_; } 841 1.5 mrg ModuleArch arch() const { return arch_; } 842 1.5 mrg const u8 *uuid() const { return uuid_; } 843 1.10 mrg uptr uuid_size() const { return uuid_size_; } 844 1.5 mrg bool instrumented() const { return instrumented_; } 845 1.2 christos 846 1.2 christos struct AddressRange { 847 1.3 mrg AddressRange *next; 848 1.2 christos uptr beg; 849 1.2 christos uptr end; 850 1.3 mrg bool executable; 851 1.5 mrg bool writable; 852 1.5 mrg char name[kMaxSegName]; 853 1.3 mrg 854 1.5 mrg AddressRange(uptr beg, uptr end, bool executable, bool writable, 855 1.5 mrg const char *name) 856 1.5 mrg : next(nullptr), 857 1.5 mrg beg(beg), 858 1.5 mrg end(end), 859 1.5 mrg executable(executable), 860 1.5 mrg writable(writable) { 861 1.5 mrg internal_strncpy(this->name, (name ? name : ""), ARRAY_SIZE(this->name)); 862 1.5 mrg } 863 1.2 christos }; 864 1.3 mrg 865 1.4 mrg const IntrusiveList<AddressRange> &ranges() const { return ranges_; } 866 1.3 mrg 867 1.3 mrg private: 868 1.3 mrg char *full_name_; // Owned. 869 1.2 christos uptr base_address_; 870 1.10 mrg uptr max_address_; 871 1.5 mrg ModuleArch arch_; 872 1.10 mrg uptr uuid_size_; 873 1.5 mrg u8 uuid_[kModuleUUIDSize]; 874 1.5 mrg bool instrumented_; 875 1.3 mrg IntrusiveList<AddressRange> ranges_; 876 1.2 christos }; 877 1.2 christos 878 1.4 mrg // List of LoadedModules. OS-dependent implementation is responsible for 879 1.4 mrg // filling this information. 880 1.4 mrg class ListOfModules { 881 1.4 mrg public: 882 1.5 mrg ListOfModules() : initialized(false) {} 883 1.4 mrg ~ListOfModules() { clear(); } 884 1.4 mrg void init(); 885 1.5 mrg void fallbackInit(); // Uses fallback init if available, otherwise clears 886 1.4 mrg const LoadedModule *begin() const { return modules_.begin(); } 887 1.4 mrg LoadedModule *begin() { return modules_.begin(); } 888 1.4 mrg const LoadedModule *end() const { return modules_.end(); } 889 1.4 mrg LoadedModule *end() { return modules_.end(); } 890 1.4 mrg uptr size() const { return modules_.size(); } 891 1.4 mrg const LoadedModule &operator[](uptr i) const { 892 1.4 mrg CHECK_LT(i, modules_.size()); 893 1.4 mrg return modules_[i]; 894 1.4 mrg } 895 1.4 mrg 896 1.4 mrg private: 897 1.4 mrg void clear() { 898 1.4 mrg for (auto &module : modules_) module.clear(); 899 1.4 mrg modules_.clear(); 900 1.4 mrg } 901 1.5 mrg void clearOrInit() { 902 1.5 mrg initialized ? clear() : modules_.Initialize(kInitialCapacity); 903 1.5 mrg initialized = true; 904 1.5 mrg } 905 1.4 mrg 906 1.5 mrg InternalMmapVectorNoCtor<LoadedModule> modules_; 907 1.4 mrg // We rarely have more than 16K loaded modules. 908 1.4 mrg static const uptr kInitialCapacity = 1 << 14; 909 1.5 mrg bool initialized; 910 1.4 mrg }; 911 1.2 christos 912 1.2 christos // Callback type for iterating over a set of memory ranges. 913 1.2 christos typedef void (*RangeIteratorCallback)(uptr begin, uptr end, void *arg); 914 1.2 christos 915 1.3 mrg enum AndroidApiLevel { 916 1.3 mrg ANDROID_NOT_ANDROID = 0, 917 1.3 mrg ANDROID_KITKAT = 19, 918 1.3 mrg ANDROID_LOLLIPOP_MR1 = 22, 919 1.3 mrg ANDROID_POST_LOLLIPOP = 23 920 1.3 mrg }; 921 1.3 mrg 922 1.4 mrg void WriteToSyslog(const char *buffer); 923 1.4 mrg 924 1.9 mrg #if defined(SANITIZER_WINDOWS) && defined(_MSC_VER) && !defined(__clang__) 925 1.9 mrg #define SANITIZER_WIN_TRACE 1 926 1.9 mrg #else 927 1.9 mrg #define SANITIZER_WIN_TRACE 0 928 1.9 mrg #endif 929 1.9 mrg 930 1.10 mrg #if SANITIZER_APPLE || SANITIZER_WIN_TRACE 931 1.4 mrg void LogFullErrorReport(const char *buffer); 932 1.4 mrg #else 933 1.9 mrg inline void LogFullErrorReport(const char *buffer) {} 934 1.4 mrg #endif 935 1.4 mrg 936 1.10 mrg #if SANITIZER_LINUX || SANITIZER_APPLE 937 1.4 mrg void WriteOneLineToSyslog(const char *s); 938 1.4 mrg void LogMessageOnPrintf(const char *str); 939 1.4 mrg #else 940 1.9 mrg inline void WriteOneLineToSyslog(const char *s) {} 941 1.9 mrg inline void LogMessageOnPrintf(const char *str) {} 942 1.4 mrg #endif 943 1.4 mrg 944 1.9 mrg #if SANITIZER_LINUX || SANITIZER_WIN_TRACE 945 1.3 mrg // Initialize Android logging. Any writes before this are silently lost. 946 1.3 mrg void AndroidLogInit(); 947 1.5 mrg void SetAbortMessage(const char *); 948 1.1 mrg #else 949 1.9 mrg inline void AndroidLogInit() {} 950 1.5 mrg // FIXME: MacOS implementation could use CRSetCrashLogMessage. 951 1.9 mrg inline void SetAbortMessage(const char *) {} 952 1.1 mrg #endif 953 1.1 mrg 954 1.2 christos #if SANITIZER_ANDROID 955 1.2 christos void SanitizerInitializeUnwinder(); 956 1.3 mrg AndroidApiLevel AndroidGetApiLevel(); 957 1.2 christos #else 958 1.9 mrg inline void AndroidLogWrite(const char *buffer_unused) {} 959 1.9 mrg inline void SanitizerInitializeUnwinder() {} 960 1.9 mrg inline AndroidApiLevel AndroidGetApiLevel() { return ANDROID_NOT_ANDROID; } 961 1.3 mrg #endif 962 1.3 mrg 963 1.9 mrg inline uptr GetPthreadDestructorIterations() { 964 1.3 mrg #if SANITIZER_ANDROID 965 1.3 mrg return (AndroidGetApiLevel() == ANDROID_LOLLIPOP_MR1) ? 8 : 4; 966 1.3 mrg #elif SANITIZER_POSIX 967 1.3 mrg return 4; 968 1.3 mrg #else 969 1.3 mrg // Unused on Windows. 970 1.3 mrg return 0; 971 1.3 mrg #endif 972 1.3 mrg } 973 1.3 mrg 974 1.9 mrg void *internal_start_thread(void *(*func)(void*), void *arg); 975 1.3 mrg void internal_join_thread(void *th); 976 1.3 mrg void MaybeStartBackgroudThread(); 977 1.3 mrg 978 1.3 mrg // Make the compiler think that something is going on there. 979 1.3 mrg // Use this inside a loop that looks like memset/memcpy/etc to prevent the 980 1.3 mrg // compiler from recognising it and turning it into an actual call to 981 1.3 mrg // memset/memcpy/etc. 982 1.3 mrg static inline void SanitizerBreakOptimization(void *arg) { 983 1.4 mrg #if defined(_MSC_VER) && !defined(__clang__) 984 1.3 mrg _ReadWriteBarrier(); 985 1.3 mrg #else 986 1.3 mrg __asm__ __volatile__("" : : "r" (arg) : "memory"); 987 1.2 christos #endif 988 1.3 mrg } 989 1.3 mrg 990 1.3 mrg struct SignalContext { 991 1.5 mrg void *siginfo; 992 1.3 mrg void *context; 993 1.3 mrg uptr addr; 994 1.3 mrg uptr pc; 995 1.3 mrg uptr sp; 996 1.3 mrg uptr bp; 997 1.4 mrg bool is_memory_access; 998 1.10 mrg enum WriteFlag { Unknown, Read, Write } write_flag; 999 1.3 mrg 1000 1.9 mrg // In some cases the kernel cannot provide the true faulting address; `addr` 1001 1.9 mrg // will be zero then. This field allows to distinguish between these cases 1002 1.9 mrg // and dereferences of null. 1003 1.9 mrg bool is_true_faulting_addr; 1004 1.9 mrg 1005 1.5 mrg // VS2013 doesn't implement unrestricted unions, so we need a trivial default 1006 1.5 mrg // constructor 1007 1.5 mrg SignalContext() = default; 1008 1.3 mrg 1009 1.3 mrg // Creates signal context in a platform-specific manner. 1010 1.5 mrg // SignalContext is going to keep pointers to siginfo and context without 1011 1.5 mrg // owning them. 1012 1.5 mrg SignalContext(void *siginfo, void *context) 1013 1.5 mrg : siginfo(siginfo), 1014 1.5 mrg context(context), 1015 1.5 mrg addr(GetAddress()), 1016 1.5 mrg is_memory_access(IsMemoryAccess()), 1017 1.9 mrg write_flag(GetWriteFlag()), 1018 1.9 mrg is_true_faulting_addr(IsTrueFaultingAddress()) { 1019 1.5 mrg InitPcSpBp(); 1020 1.5 mrg } 1021 1.5 mrg 1022 1.5 mrg static void DumpAllRegisters(void *context); 1023 1.5 mrg 1024 1.5 mrg // Type of signal e.g. SIGSEGV or EXCEPTION_ACCESS_VIOLATION. 1025 1.5 mrg int GetType() const; 1026 1.5 mrg 1027 1.5 mrg // String description of the signal. 1028 1.5 mrg const char *Describe() const; 1029 1.5 mrg 1030 1.5 mrg // Returns true if signal is stack overflow. 1031 1.5 mrg bool IsStackOverflow() const; 1032 1.4 mrg 1033 1.5 mrg private: 1034 1.5 mrg // Platform specific initialization. 1035 1.5 mrg void InitPcSpBp(); 1036 1.5 mrg uptr GetAddress() const; 1037 1.5 mrg WriteFlag GetWriteFlag() const; 1038 1.5 mrg bool IsMemoryAccess() const; 1039 1.9 mrg bool IsTrueFaultingAddress() const; 1040 1.3 mrg }; 1041 1.3 mrg 1042 1.9 mrg void InitializePlatformEarly(); 1043 1.4 mrg 1044 1.4 mrg template <typename Fn> 1045 1.4 mrg class RunOnDestruction { 1046 1.4 mrg public: 1047 1.4 mrg explicit RunOnDestruction(Fn fn) : fn_(fn) {} 1048 1.4 mrg ~RunOnDestruction() { fn_(); } 1049 1.4 mrg 1050 1.4 mrg private: 1051 1.4 mrg Fn fn_; 1052 1.4 mrg }; 1053 1.4 mrg 1054 1.4 mrg // A simple scope guard. Usage: 1055 1.4 mrg // auto cleanup = at_scope_exit([]{ do_cleanup; }); 1056 1.4 mrg template <typename Fn> 1057 1.4 mrg RunOnDestruction<Fn> at_scope_exit(Fn fn) { 1058 1.4 mrg return RunOnDestruction<Fn>(fn); 1059 1.4 mrg } 1060 1.4 mrg 1061 1.4 mrg // Linux on 64-bit s390 had a nasty bug that crashes the whole machine 1062 1.4 mrg // if a process uses virtual memory over 4TB (as many sanitizers like 1063 1.4 mrg // to do). This function will abort the process if running on a kernel 1064 1.4 mrg // that looks vulnerable. 1065 1.4 mrg #if SANITIZER_LINUX && SANITIZER_S390_64 1066 1.4 mrg void AvoidCVE_2016_2143(); 1067 1.4 mrg #else 1068 1.9 mrg inline void AvoidCVE_2016_2143() {} 1069 1.4 mrg #endif 1070 1.4 mrg 1071 1.4 mrg struct StackDepotStats { 1072 1.4 mrg uptr n_uniq_ids; 1073 1.4 mrg uptr allocated; 1074 1.4 mrg }; 1075 1.4 mrg 1076 1.5 mrg // The default value for allocator_release_to_os_interval_ms common flag to 1077 1.5 mrg // indicate that sanitizer allocator should not attempt to release memory to OS. 1078 1.5 mrg const s32 kReleaseToOSIntervalNever = -1; 1079 1.5 mrg 1080 1.5 mrg void CheckNoDeepBind(const char *filename, int flag); 1081 1.5 mrg 1082 1.5 mrg // Returns the requested amount of random data (up to 256 bytes) that can then 1083 1.5 mrg // be used to seed a PRNG. Defaults to blocking like the underlying syscall. 1084 1.5 mrg bool GetRandom(void *buffer, uptr length, bool blocking = true); 1085 1.5 mrg 1086 1.6 mrg // Returns the number of logical processors on the system. 1087 1.6 mrg u32 GetNumberOfCPUs(); 1088 1.6 mrg extern u32 NumberOfCPUsCached; 1089 1.9 mrg inline u32 GetNumberOfCPUsCached() { 1090 1.6 mrg if (!NumberOfCPUsCached) 1091 1.6 mrg NumberOfCPUsCached = GetNumberOfCPUs(); 1092 1.6 mrg return NumberOfCPUsCached; 1093 1.6 mrg } 1094 1.6 mrg 1095 1.1 mrg } // namespace __sanitizer 1096 1.1 mrg 1097 1.2 christos inline void *operator new(__sanitizer::operator_new_size_type size, 1098 1.8 mrg __sanitizer::LowLevelAllocator &alloc) { 1099 1.2 christos return alloc.Allocate(size); 1100 1.2 christos } 1101 1.2 christos 1102 1.1 mrg #endif // SANITIZER_COMMON_H 1103