1 //===-- asan_report.cc ----------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of AddressSanitizer, an address sanity checker. 11 // 12 // This file contains error reporting code. 13 //===----------------------------------------------------------------------===// 14 15 #include "asan_errors.h" 16 #include "asan_flags.h" 17 #include "asan_descriptions.h" 18 #include "asan_internal.h" 19 #include "asan_mapping.h" 20 #include "asan_report.h" 21 #include "asan_scariness_score.h" 22 #include "asan_stack.h" 23 #include "asan_thread.h" 24 #include "sanitizer_common/sanitizer_common.h" 25 #include "sanitizer_common/sanitizer_flags.h" 26 #include "sanitizer_common/sanitizer_report_decorator.h" 27 #include "sanitizer_common/sanitizer_stackdepot.h" 28 #include "sanitizer_common/sanitizer_symbolizer.h" 29 30 namespace __asan { 31 32 // -------------------- User-specified callbacks ----------------- {{{1 33 static void (*error_report_callback)(const char*); 34 static char *error_message_buffer = nullptr; 35 static uptr error_message_buffer_pos = 0; 36 static BlockingMutex error_message_buf_mutex(LINKER_INITIALIZED); 37 static const unsigned kAsanBuggyPcPoolSize = 25; 38 static __sanitizer::atomic_uintptr_t AsanBuggyPcPool[kAsanBuggyPcPoolSize]; 39 40 void AppendToErrorMessageBuffer(const char *buffer) { 41 BlockingMutexLock l(&error_message_buf_mutex); 42 if (!error_message_buffer) { 43 error_message_buffer = 44 (char*)MmapOrDieQuietly(kErrorMessageBufferSize, __func__); 45 error_message_buffer_pos = 0; 46 } 47 uptr length = internal_strlen(buffer); 48 RAW_CHECK(kErrorMessageBufferSize >= error_message_buffer_pos); 49 uptr remaining = kErrorMessageBufferSize - error_message_buffer_pos; 50 internal_strncpy(error_message_buffer + error_message_buffer_pos, 51 buffer, remaining); 52 error_message_buffer[kErrorMessageBufferSize - 1] = '\0'; 53 // FIXME: reallocate the buffer instead of truncating the message. 54 error_message_buffer_pos += Min(remaining, length); 55 } 56 57 // ---------------------- Helper functions ----------------------- {{{1 58 59 void PrintMemoryByte(InternalScopedString *str, const char *before, u8 byte, 60 bool in_shadow, const char *after) { 61 Decorator d; 62 str->append("%s%s%x%x%s%s", before, 63 in_shadow ? d.ShadowByte(byte) : d.MemoryByte(), byte >> 4, 64 byte & 15, d.Default(), after); 65 } 66 67 static void PrintZoneForPointer(uptr ptr, uptr zone_ptr, 68 const char *zone_name) { 69 if (zone_ptr) { 70 if (zone_name) { 71 Printf("malloc_zone_from_ptr(%p) = %p, which is %s\n", 72 ptr, zone_ptr, zone_name); 73 } else { 74 Printf("malloc_zone_from_ptr(%p) = %p, which doesn't have a name\n", 75 ptr, zone_ptr); 76 } 77 } else { 78 Printf("malloc_zone_from_ptr(%p) = 0\n", ptr); 79 } 80 } 81 82 // ---------------------- Address Descriptions ------------------- {{{1 83 84 bool ParseFrameDescription(const char *frame_descr, 85 InternalMmapVector<StackVarDescr> *vars) { 86 CHECK(frame_descr); 87 const char *p; 88 // This string is created by the compiler and has the following form: 89 // "n alloc_1 alloc_2 ... alloc_n" 90 // where alloc_i looks like "offset size len ObjectName" 91 // or "offset size len ObjectName:line". 92 uptr n_objects = (uptr)internal_simple_strtoll(frame_descr, &p, 10); 93 if (n_objects == 0) 94 return false; 95 96 for (uptr i = 0; i < n_objects; i++) { 97 uptr beg = (uptr)internal_simple_strtoll(p, &p, 10); 98 uptr size = (uptr)internal_simple_strtoll(p, &p, 10); 99 uptr len = (uptr)internal_simple_strtoll(p, &p, 10); 100 if (beg == 0 || size == 0 || *p != ' ') { 101 return false; 102 } 103 p++; 104 char *colon_pos = internal_strchr(p, ':'); 105 uptr line = 0; 106 uptr name_len = len; 107 if (colon_pos != nullptr && colon_pos < p + len) { 108 name_len = colon_pos - p; 109 line = (uptr)internal_simple_strtoll(colon_pos + 1, nullptr, 10); 110 } 111 StackVarDescr var = {beg, size, p, name_len, line}; 112 vars->push_back(var); 113 p += len; 114 } 115 116 return true; 117 } 118 119 // -------------------- Different kinds of reports ----------------- {{{1 120 121 // Use ScopedInErrorReport to run common actions just before and 122 // immediately after printing error report. 123 class ScopedInErrorReport { 124 public: 125 explicit ScopedInErrorReport(bool fatal = false) 126 : halt_on_error_(fatal || flags()->halt_on_error) { 127 // Make sure the registry and sanitizer report mutexes are locked while 128 // we're printing an error report. 129 // We can lock them only here to avoid self-deadlock in case of 130 // recursive reports. 131 asanThreadRegistry().Lock(); 132 Printf( 133 "=================================================================\n"); 134 } 135 136 ~ScopedInErrorReport() { 137 if (halt_on_error_ && !__sanitizer_acquire_crash_state()) { 138 asanThreadRegistry().Unlock(); 139 return; 140 } 141 ASAN_ON_ERROR(); 142 if (current_error_.IsValid()) current_error_.Print(); 143 144 // Make sure the current thread is announced. 145 DescribeThread(GetCurrentThread()); 146 // We may want to grab this lock again when printing stats. 147 asanThreadRegistry().Unlock(); 148 // Print memory stats. 149 if (flags()->print_stats) 150 __asan_print_accumulated_stats(); 151 152 if (common_flags()->print_cmdline) 153 PrintCmdline(); 154 155 if (common_flags()->print_module_map == 2) PrintModuleMap(); 156 157 // Copy the message buffer so that we could start logging without holding a 158 // lock that gets aquired during printing. 159 InternalMmapVector<char> buffer_copy(kErrorMessageBufferSize); 160 { 161 BlockingMutexLock l(&error_message_buf_mutex); 162 internal_memcpy(buffer_copy.data(), 163 error_message_buffer, kErrorMessageBufferSize); 164 } 165 166 LogFullErrorReport(buffer_copy.data()); 167 168 if (error_report_callback) { 169 error_report_callback(buffer_copy.data()); 170 } 171 172 if (halt_on_error_ && common_flags()->abort_on_error) { 173 // On Android the message is truncated to 512 characters. 174 // FIXME: implement "compact" error format, possibly without, or with 175 // highly compressed stack traces? 176 // FIXME: or just use the summary line as abort message? 177 SetAbortMessage(buffer_copy.data()); 178 } 179 180 // In halt_on_error = false mode, reset the current error object (before 181 // unlocking). 182 if (!halt_on_error_) 183 internal_memset(¤t_error_, 0, sizeof(current_error_)); 184 185 if (halt_on_error_) { 186 Report("ABORTING\n"); 187 Die(); 188 } 189 } 190 191 void ReportError(const ErrorDescription &description) { 192 // Can only report one error per ScopedInErrorReport. 193 CHECK_EQ(current_error_.kind, kErrorKindInvalid); 194 current_error_ = description; 195 } 196 197 static ErrorDescription &CurrentError() { 198 return current_error_; 199 } 200 201 private: 202 ScopedErrorReportLock error_report_lock_; 203 // Error currently being reported. This enables the destructor to interact 204 // with the debugger and point it to an error description. 205 static ErrorDescription current_error_; 206 bool halt_on_error_; 207 }; 208 209 ErrorDescription ScopedInErrorReport::current_error_(LINKER_INITIALIZED); 210 211 void ReportDeadlySignal(const SignalContext &sig) { 212 ScopedInErrorReport in_report(/*fatal*/ true); 213 ErrorDeadlySignal error(GetCurrentTidOrInvalid(), sig); 214 in_report.ReportError(error); 215 } 216 217 void ReportDoubleFree(uptr addr, BufferedStackTrace *free_stack) { 218 ScopedInErrorReport in_report; 219 ErrorDoubleFree error(GetCurrentTidOrInvalid(), free_stack, addr); 220 in_report.ReportError(error); 221 } 222 223 void ReportNewDeleteTypeMismatch(uptr addr, uptr delete_size, 224 uptr delete_alignment, 225 BufferedStackTrace *free_stack) { 226 ScopedInErrorReport in_report; 227 ErrorNewDeleteTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, 228 delete_size, delete_alignment); 229 in_report.ReportError(error); 230 } 231 232 void ReportFreeNotMalloced(uptr addr, BufferedStackTrace *free_stack) { 233 ScopedInErrorReport in_report; 234 ErrorFreeNotMalloced error(GetCurrentTidOrInvalid(), free_stack, addr); 235 in_report.ReportError(error); 236 } 237 238 void ReportAllocTypeMismatch(uptr addr, BufferedStackTrace *free_stack, 239 AllocType alloc_type, 240 AllocType dealloc_type) { 241 ScopedInErrorReport in_report; 242 ErrorAllocTypeMismatch error(GetCurrentTidOrInvalid(), free_stack, addr, 243 alloc_type, dealloc_type); 244 in_report.ReportError(error); 245 } 246 247 void ReportMallocUsableSizeNotOwned(uptr addr, BufferedStackTrace *stack) { 248 ScopedInErrorReport in_report; 249 ErrorMallocUsableSizeNotOwned error(GetCurrentTidOrInvalid(), stack, addr); 250 in_report.ReportError(error); 251 } 252 253 void ReportSanitizerGetAllocatedSizeNotOwned(uptr addr, 254 BufferedStackTrace *stack) { 255 ScopedInErrorReport in_report; 256 ErrorSanitizerGetAllocatedSizeNotOwned error(GetCurrentTidOrInvalid(), stack, 257 addr); 258 in_report.ReportError(error); 259 } 260 261 void ReportCallocOverflow(uptr count, uptr size, BufferedStackTrace *stack) { 262 ScopedInErrorReport in_report(/*fatal*/ true); 263 ErrorCallocOverflow error(GetCurrentTidOrInvalid(), stack, count, size); 264 in_report.ReportError(error); 265 } 266 267 void ReportPvallocOverflow(uptr size, BufferedStackTrace *stack) { 268 ScopedInErrorReport in_report(/*fatal*/ true); 269 ErrorPvallocOverflow error(GetCurrentTidOrInvalid(), stack, size); 270 in_report.ReportError(error); 271 } 272 273 void ReportInvalidAllocationAlignment(uptr alignment, 274 BufferedStackTrace *stack) { 275 ScopedInErrorReport in_report(/*fatal*/ true); 276 ErrorInvalidAllocationAlignment error(GetCurrentTidOrInvalid(), stack, 277 alignment); 278 in_report.ReportError(error); 279 } 280 281 void ReportInvalidAlignedAllocAlignment(uptr size, uptr alignment, 282 BufferedStackTrace *stack) { 283 ScopedInErrorReport in_report(/*fatal*/ true); 284 ErrorInvalidAlignedAllocAlignment error(GetCurrentTidOrInvalid(), stack, 285 size, alignment); 286 in_report.ReportError(error); 287 } 288 289 void ReportInvalidPosixMemalignAlignment(uptr alignment, 290 BufferedStackTrace *stack) { 291 ScopedInErrorReport in_report(/*fatal*/ true); 292 ErrorInvalidPosixMemalignAlignment error(GetCurrentTidOrInvalid(), stack, 293 alignment); 294 in_report.ReportError(error); 295 } 296 297 void ReportAllocationSizeTooBig(uptr user_size, uptr total_size, uptr max_size, 298 BufferedStackTrace *stack) { 299 ScopedInErrorReport in_report(/*fatal*/ true); 300 ErrorAllocationSizeTooBig error(GetCurrentTidOrInvalid(), stack, user_size, 301 total_size, max_size); 302 in_report.ReportError(error); 303 } 304 305 void ReportRssLimitExceeded(BufferedStackTrace *stack) { 306 ScopedInErrorReport in_report(/*fatal*/ true); 307 ErrorRssLimitExceeded error(GetCurrentTidOrInvalid(), stack); 308 in_report.ReportError(error); 309 } 310 311 void ReportOutOfMemory(uptr requested_size, BufferedStackTrace *stack) { 312 ScopedInErrorReport in_report(/*fatal*/ true); 313 ErrorOutOfMemory error(GetCurrentTidOrInvalid(), stack, requested_size); 314 in_report.ReportError(error); 315 } 316 317 void ReportStringFunctionMemoryRangesOverlap(const char *function, 318 const char *offset1, uptr length1, 319 const char *offset2, uptr length2, 320 BufferedStackTrace *stack) { 321 ScopedInErrorReport in_report; 322 ErrorStringFunctionMemoryRangesOverlap error( 323 GetCurrentTidOrInvalid(), stack, (uptr)offset1, length1, (uptr)offset2, 324 length2, function); 325 in_report.ReportError(error); 326 } 327 328 void ReportStringFunctionSizeOverflow(uptr offset, uptr size, 329 BufferedStackTrace *stack) { 330 ScopedInErrorReport in_report; 331 ErrorStringFunctionSizeOverflow error(GetCurrentTidOrInvalid(), stack, offset, 332 size); 333 in_report.ReportError(error); 334 } 335 336 void ReportBadParamsToAnnotateContiguousContainer(uptr beg, uptr end, 337 uptr old_mid, uptr new_mid, 338 BufferedStackTrace *stack) { 339 ScopedInErrorReport in_report; 340 ErrorBadParamsToAnnotateContiguousContainer error( 341 GetCurrentTidOrInvalid(), stack, beg, end, old_mid, new_mid); 342 in_report.ReportError(error); 343 } 344 345 void ReportODRViolation(const __asan_global *g1, u32 stack_id1, 346 const __asan_global *g2, u32 stack_id2) { 347 ScopedInErrorReport in_report; 348 ErrorODRViolation error(GetCurrentTidOrInvalid(), g1, stack_id1, g2, 349 stack_id2); 350 in_report.ReportError(error); 351 } 352 353 // ----------------------- CheckForInvalidPointerPair ----------- {{{1 354 static NOINLINE void ReportInvalidPointerPair(uptr pc, uptr bp, uptr sp, 355 uptr a1, uptr a2) { 356 ScopedInErrorReport in_report; 357 ErrorInvalidPointerPair error(GetCurrentTidOrInvalid(), pc, bp, sp, a1, a2); 358 in_report.ReportError(error); 359 } 360 361 static bool IsInvalidPointerPair(uptr a1, uptr a2) { 362 if (a1 == a2) 363 return false; 364 365 // 256B in shadow memory can be iterated quite fast 366 static const uptr kMaxOffset = 2048; 367 368 uptr left = a1 < a2 ? a1 : a2; 369 uptr right = a1 < a2 ? a2 : a1; 370 uptr offset = right - left; 371 if (offset <= kMaxOffset) 372 return __asan_region_is_poisoned(left, offset); 373 374 AsanThread *t = GetCurrentThread(); 375 376 // check whether left is a stack memory pointer 377 if (uptr shadow_offset1 = t->GetStackVariableShadowStart(left)) { 378 uptr shadow_offset2 = t->GetStackVariableShadowStart(right); 379 return shadow_offset2 == 0 || shadow_offset1 != shadow_offset2; 380 } 381 382 // check whether left is a heap memory address 383 HeapAddressDescription hdesc1, hdesc2; 384 if (GetHeapAddressInformation(left, 0, &hdesc1) && 385 hdesc1.chunk_access.access_type == kAccessTypeInside) 386 return !GetHeapAddressInformation(right, 0, &hdesc2) || 387 hdesc2.chunk_access.access_type != kAccessTypeInside || 388 hdesc1.chunk_access.chunk_begin != hdesc2.chunk_access.chunk_begin; 389 390 // check whether left is an address of a global variable 391 GlobalAddressDescription gdesc1, gdesc2; 392 if (GetGlobalAddressInformation(left, 0, &gdesc1)) 393 return !GetGlobalAddressInformation(right - 1, 0, &gdesc2) || 394 !gdesc1.PointsInsideTheSameVariable(gdesc2); 395 396 if (t->GetStackVariableShadowStart(right) || 397 GetHeapAddressInformation(right, 0, &hdesc2) || 398 GetGlobalAddressInformation(right - 1, 0, &gdesc2)) 399 return true; 400 401 // At this point we know nothing about both a1 and a2 addresses. 402 return false; 403 } 404 405 static INLINE void CheckForInvalidPointerPair(void *p1, void *p2) { 406 switch (flags()->detect_invalid_pointer_pairs) { 407 case 0 : return; 408 case 1 : if (p1 == nullptr || p2 == nullptr) return; break; 409 } 410 411 uptr a1 = reinterpret_cast<uptr>(p1); 412 uptr a2 = reinterpret_cast<uptr>(p2); 413 414 if (IsInvalidPointerPair(a1, a2)) { 415 GET_CALLER_PC_BP_SP; 416 ReportInvalidPointerPair(pc, bp, sp, a1, a2); 417 } 418 } 419 // ----------------------- Mac-specific reports ----------------- {{{1 420 421 void ReportMacMzReallocUnknown(uptr addr, uptr zone_ptr, const char *zone_name, 422 BufferedStackTrace *stack) { 423 ScopedInErrorReport in_report; 424 Printf("mz_realloc(%p) -- attempting to realloc unallocated memory.\n" 425 "This is an unrecoverable problem, exiting now.\n", 426 addr); 427 PrintZoneForPointer(addr, zone_ptr, zone_name); 428 stack->Print(); 429 DescribeAddressIfHeap(addr); 430 } 431 432 // -------------- SuppressErrorReport -------------- {{{1 433 // Avoid error reports duplicating for ASan recover mode. 434 static bool SuppressErrorReport(uptr pc) { 435 if (!common_flags()->suppress_equal_pcs) return false; 436 for (unsigned i = 0; i < kAsanBuggyPcPoolSize; i++) { 437 uptr cmp = atomic_load_relaxed(&AsanBuggyPcPool[i]); 438 if (cmp == 0 && atomic_compare_exchange_strong(&AsanBuggyPcPool[i], &cmp, 439 pc, memory_order_relaxed)) 440 return false; 441 if (cmp == pc) return true; 442 } 443 Die(); 444 } 445 446 void ReportGenericError(uptr pc, uptr bp, uptr sp, uptr addr, bool is_write, 447 uptr access_size, u32 exp, bool fatal) { 448 if (!fatal && SuppressErrorReport(pc)) return; 449 ENABLE_FRAME_POINTER; 450 451 // Optimization experiments. 452 // The experiments can be used to evaluate potential optimizations that remove 453 // instrumentation (assess false negatives). Instead of completely removing 454 // some instrumentation, compiler can emit special calls into runtime 455 // (e.g. __asan_report_exp_load1 instead of __asan_report_load1) and pass 456 // mask of experiments (exp). 457 // The reaction to a non-zero value of exp is to be defined. 458 (void)exp; 459 460 ScopedInErrorReport in_report(fatal); 461 ErrorGeneric error(GetCurrentTidOrInvalid(), pc, bp, sp, addr, is_write, 462 access_size); 463 in_report.ReportError(error); 464 } 465 466 } // namespace __asan 467 468 // --------------------------- Interface --------------------- {{{1 469 using namespace __asan; // NOLINT 470 471 void __asan_report_error(uptr pc, uptr bp, uptr sp, uptr addr, int is_write, 472 uptr access_size, u32 exp) { 473 ENABLE_FRAME_POINTER; 474 bool fatal = flags()->halt_on_error; 475 ReportGenericError(pc, bp, sp, addr, is_write, access_size, exp, fatal); 476 } 477 478 void NOINLINE __asan_set_error_report_callback(void (*callback)(const char*)) { 479 BlockingMutexLock l(&error_message_buf_mutex); 480 error_report_callback = callback; 481 } 482 483 void __asan_describe_address(uptr addr) { 484 // Thread registry must be locked while we're describing an address. 485 asanThreadRegistry().Lock(); 486 PrintAddressDescription(addr, 1, ""); 487 asanThreadRegistry().Unlock(); 488 } 489 490 int __asan_report_present() { 491 return ScopedInErrorReport::CurrentError().kind != kErrorKindInvalid; 492 } 493 494 uptr __asan_get_report_pc() { 495 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 496 return ScopedInErrorReport::CurrentError().Generic.pc; 497 return 0; 498 } 499 500 uptr __asan_get_report_bp() { 501 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 502 return ScopedInErrorReport::CurrentError().Generic.bp; 503 return 0; 504 } 505 506 uptr __asan_get_report_sp() { 507 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 508 return ScopedInErrorReport::CurrentError().Generic.sp; 509 return 0; 510 } 511 512 uptr __asan_get_report_address() { 513 ErrorDescription &err = ScopedInErrorReport::CurrentError(); 514 if (err.kind == kErrorKindGeneric) 515 return err.Generic.addr_description.Address(); 516 else if (err.kind == kErrorKindDoubleFree) 517 return err.DoubleFree.addr_description.addr; 518 return 0; 519 } 520 521 int __asan_get_report_access_type() { 522 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 523 return ScopedInErrorReport::CurrentError().Generic.is_write; 524 return 0; 525 } 526 527 uptr __asan_get_report_access_size() { 528 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 529 return ScopedInErrorReport::CurrentError().Generic.access_size; 530 return 0; 531 } 532 533 const char *__asan_get_report_description() { 534 if (ScopedInErrorReport::CurrentError().kind == kErrorKindGeneric) 535 return ScopedInErrorReport::CurrentError().Generic.bug_descr; 536 return ScopedInErrorReport::CurrentError().Base.scariness.GetDescription(); 537 } 538 539 extern "C" { 540 SANITIZER_INTERFACE_ATTRIBUTE 541 void __sanitizer_ptr_sub(void *a, void *b) { 542 CheckForInvalidPointerPair(a, b); 543 } 544 SANITIZER_INTERFACE_ATTRIBUTE 545 void __sanitizer_ptr_cmp(void *a, void *b) { 546 CheckForInvalidPointerPair(a, b); 547 } 548 } // extern "C" 549 550 // Provide default implementation of __asan_on_error that does nothing 551 // and may be overriden by user. 552 SANITIZER_INTERFACE_WEAK_DEF(void, __asan_on_error, void) {} 553