lsan_common.cc revision 1.1.1.1 1 //=-- lsan_common.cc ------------------------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of LeakSanitizer.
11 // Implementation of common leak checking functionality.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "lsan_common.h"
16
17 #include "sanitizer_common/sanitizer_common.h"
18 #include "sanitizer_common/sanitizer_flags.h"
19 #include "sanitizer_common/sanitizer_placement_new.h"
20 #include "sanitizer_common/sanitizer_stackdepot.h"
21 #include "sanitizer_common/sanitizer_stacktrace.h"
22 #include "sanitizer_common/sanitizer_stoptheworld.h"
23 #include "sanitizer_common/sanitizer_suppressions.h"
24
25 #if CAN_SANITIZE_LEAKS
26 namespace __lsan {
27
28 // This mutex is used to prevent races between DoLeakCheck and IgnoreObject.
29 BlockingMutex global_mutex(LINKER_INITIALIZED);
30
31 THREADLOCAL int disable_counter;
32 bool DisabledInThisThread() { return disable_counter > 0; }
33
34 Flags lsan_flags;
35
36 static void InitializeFlags() {
37 Flags *f = flags();
38 // Default values.
39 f->report_objects = false;
40 f->resolution = 0;
41 f->max_leaks = 0;
42 f->exitcode = 23;
43 f->suppressions="";
44 f->use_registers = true;
45 f->use_globals = true;
46 f->use_stacks = true;
47 f->use_tls = true;
48 f->use_unaligned = false;
49 f->verbosity = 0;
50 f->log_pointers = false;
51 f->log_threads = false;
52
53 const char *options = GetEnv("LSAN_OPTIONS");
54 if (options) {
55 ParseFlag(options, &f->use_registers, "use_registers");
56 ParseFlag(options, &f->use_globals, "use_globals");
57 ParseFlag(options, &f->use_stacks, "use_stacks");
58 ParseFlag(options, &f->use_tls, "use_tls");
59 ParseFlag(options, &f->use_unaligned, "use_unaligned");
60 ParseFlag(options, &f->report_objects, "report_objects");
61 ParseFlag(options, &f->resolution, "resolution");
62 CHECK_GE(&f->resolution, 0);
63 ParseFlag(options, &f->max_leaks, "max_leaks");
64 CHECK_GE(&f->max_leaks, 0);
65 ParseFlag(options, &f->verbosity, "verbosity");
66 ParseFlag(options, &f->log_pointers, "log_pointers");
67 ParseFlag(options, &f->log_threads, "log_threads");
68 ParseFlag(options, &f->exitcode, "exitcode");
69 ParseFlag(options, &f->suppressions, "suppressions");
70 }
71 }
72
73 SuppressionContext *suppression_ctx;
74
75 void InitializeSuppressions() {
76 CHECK(!suppression_ctx);
77 ALIGNED(64) static char placeholder_[sizeof(SuppressionContext)];
78 suppression_ctx = new(placeholder_) SuppressionContext;
79 char *suppressions_from_file;
80 uptr buffer_size;
81 if (ReadFileToBuffer(flags()->suppressions, &suppressions_from_file,
82 &buffer_size, 1 << 26 /* max_len */))
83 suppression_ctx->Parse(suppressions_from_file);
84 if (flags()->suppressions[0] && !buffer_size) {
85 Printf("LeakSanitizer: failed to read suppressions file '%s'\n",
86 flags()->suppressions);
87 Die();
88 }
89 if (&__lsan_default_suppressions)
90 suppression_ctx->Parse(__lsan_default_suppressions());
91 }
92
93 void InitCommonLsan() {
94 InitializeFlags();
95 InitializeSuppressions();
96 InitializePlatformSpecificModules();
97 }
98
99 static inline bool CanBeAHeapPointer(uptr p) {
100 // Since our heap is located in mmap-ed memory, we can assume a sensible lower
101 // bound on heap addresses.
102 const uptr kMinAddress = 4 * 4096;
103 if (p < kMinAddress) return false;
104 #ifdef __x86_64__
105 // Accept only canonical form user-space addresses.
106 return ((p >> 47) == 0);
107 #else
108 return true;
109 #endif
110 }
111
112 // Scans the memory range, looking for byte patterns that point into allocator
113 // chunks. Marks those chunks with |tag| and adds them to |frontier|.
114 // There are two usage modes for this function: finding reachable or ignored
115 // chunks (|tag| = kReachable or kIgnored) and finding indirectly leaked chunks
116 // (|tag| = kIndirectlyLeaked). In the second case, there's no flood fill,
117 // so |frontier| = 0.
118 void ScanRangeForPointers(uptr begin, uptr end,
119 Frontier *frontier,
120 const char *region_type, ChunkTag tag) {
121 const uptr alignment = flags()->pointer_alignment();
122 if (flags()->log_pointers)
123 Report("Scanning %s range %p-%p.\n", region_type, begin, end);
124 uptr pp = begin;
125 if (pp % alignment)
126 pp = pp + alignment - pp % alignment;
127 for (; pp + sizeof(void *) <= end; pp += alignment) { // NOLINT
128 void *p = *reinterpret_cast<void**>(pp);
129 if (!CanBeAHeapPointer(reinterpret_cast<uptr>(p))) continue;
130 uptr chunk = PointsIntoChunk(p);
131 if (!chunk) continue;
132 LsanMetadata m(chunk);
133 // Reachable beats ignored beats leaked.
134 if (m.tag() == kReachable) continue;
135 if (m.tag() == kIgnored && tag != kReachable) continue;
136 m.set_tag(tag);
137 if (flags()->log_pointers)
138 Report("%p: found %p pointing into chunk %p-%p of size %zu.\n", pp, p,
139 chunk, chunk + m.requested_size(), m.requested_size());
140 if (frontier)
141 frontier->push_back(chunk);
142 }
143 }
144
145 // Scans thread data (stacks and TLS) for heap pointers.
146 static void ProcessThreads(SuspendedThreadsList const &suspended_threads,
147 Frontier *frontier) {
148 InternalScopedBuffer<uptr> registers(SuspendedThreadsList::RegisterCount());
149 uptr registers_begin = reinterpret_cast<uptr>(registers.data());
150 uptr registers_end = registers_begin + registers.size();
151 for (uptr i = 0; i < suspended_threads.thread_count(); i++) {
152 uptr os_id = static_cast<uptr>(suspended_threads.GetThreadID(i));
153 if (flags()->log_threads) Report("Processing thread %d.\n", os_id);
154 uptr stack_begin, stack_end, tls_begin, tls_end, cache_begin, cache_end;
155 bool thread_found = GetThreadRangesLocked(os_id, &stack_begin, &stack_end,
156 &tls_begin, &tls_end,
157 &cache_begin, &cache_end);
158 if (!thread_found) {
159 // If a thread can't be found in the thread registry, it's probably in the
160 // process of destruction. Log this event and move on.
161 if (flags()->log_threads)
162 Report("Thread %d not found in registry.\n", os_id);
163 continue;
164 }
165 uptr sp;
166 bool have_registers =
167 (suspended_threads.GetRegistersAndSP(i, registers.data(), &sp) == 0);
168 if (!have_registers) {
169 Report("Unable to get registers from thread %d.\n");
170 // If unable to get SP, consider the entire stack to be reachable.
171 sp = stack_begin;
172 }
173
174 if (flags()->use_registers && have_registers)
175 ScanRangeForPointers(registers_begin, registers_end, frontier,
176 "REGISTERS", kReachable);
177
178 if (flags()->use_stacks) {
179 if (flags()->log_threads)
180 Report("Stack at %p-%p, SP = %p.\n", stack_begin, stack_end, sp);
181 if (sp < stack_begin || sp >= stack_end) {
182 // SP is outside the recorded stack range (e.g. the thread is running a
183 // signal handler on alternate stack). Again, consider the entire stack
184 // range to be reachable.
185 if (flags()->log_threads)
186 Report("WARNING: stack pointer not in stack range.\n");
187 } else {
188 // Shrink the stack range to ignore out-of-scope values.
189 stack_begin = sp;
190 }
191 ScanRangeForPointers(stack_begin, stack_end, frontier, "STACK",
192 kReachable);
193 }
194
195 if (flags()->use_tls) {
196 if (flags()->log_threads) Report("TLS at %p-%p.\n", tls_begin, tls_end);
197 if (cache_begin == cache_end) {
198 ScanRangeForPointers(tls_begin, tls_end, frontier, "TLS", kReachable);
199 } else {
200 // Because LSan should not be loaded with dlopen(), we can assume
201 // that allocator cache will be part of static TLS image.
202 CHECK_LE(tls_begin, cache_begin);
203 CHECK_GE(tls_end, cache_end);
204 if (tls_begin < cache_begin)
205 ScanRangeForPointers(tls_begin, cache_begin, frontier, "TLS",
206 kReachable);
207 if (tls_end > cache_end)
208 ScanRangeForPointers(cache_end, tls_end, frontier, "TLS", kReachable);
209 }
210 }
211 }
212 }
213
214 static void FloodFillTag(Frontier *frontier, ChunkTag tag) {
215 while (frontier->size()) {
216 uptr next_chunk = frontier->back();
217 frontier->pop_back();
218 LsanMetadata m(next_chunk);
219 ScanRangeForPointers(next_chunk, next_chunk + m.requested_size(), frontier,
220 "HEAP", tag);
221 }
222 }
223
224 // ForEachChunk callback. If the chunk is marked as leaked, marks all chunks
225 // which are reachable from it as indirectly leaked.
226 static void MarkIndirectlyLeakedCb(uptr chunk, void *arg) {
227 chunk = GetUserBegin(chunk);
228 LsanMetadata m(chunk);
229 if (m.allocated() && m.tag() != kReachable) {
230 ScanRangeForPointers(chunk, chunk + m.requested_size(),
231 /* frontier */ 0, "HEAP", kIndirectlyLeaked);
232 }
233 }
234
235 // ForEachChunk callback. If chunk is marked as ignored, adds its address to
236 // frontier.
237 static void CollectIgnoredCb(uptr chunk, void *arg) {
238 CHECK(arg);
239 chunk = GetUserBegin(chunk);
240 LsanMetadata m(chunk);
241 if (m.allocated() && m.tag() == kIgnored)
242 reinterpret_cast<Frontier *>(arg)->push_back(chunk);
243 }
244
245 // Sets the appropriate tag on each chunk.
246 static void ClassifyAllChunks(SuspendedThreadsList const &suspended_threads) {
247 // Holds the flood fill frontier.
248 Frontier frontier(GetPageSizeCached());
249
250 if (flags()->use_globals)
251 ProcessGlobalRegions(&frontier);
252 ProcessThreads(suspended_threads, &frontier);
253 FloodFillTag(&frontier, kReachable);
254 // The check here is relatively expensive, so we do this in a separate flood
255 // fill. That way we can skip the check for chunks that are reachable
256 // otherwise.
257 ProcessPlatformSpecificAllocations(&frontier);
258 FloodFillTag(&frontier, kReachable);
259
260 if (flags()->log_pointers)
261 Report("Scanning ignored chunks.\n");
262 CHECK_EQ(0, frontier.size());
263 ForEachChunk(CollectIgnoredCb, &frontier);
264 FloodFillTag(&frontier, kIgnored);
265
266 // Iterate over leaked chunks and mark those that are reachable from other
267 // leaked chunks.
268 if (flags()->log_pointers)
269 Report("Scanning leaked chunks.\n");
270 ForEachChunk(MarkIndirectlyLeakedCb, 0 /* arg */);
271 }
272
273 static void PrintStackTraceById(u32 stack_trace_id) {
274 CHECK(stack_trace_id);
275 uptr size = 0;
276 const uptr *trace = StackDepotGet(stack_trace_id, &size);
277 StackTrace::PrintStack(trace, size, common_flags()->symbolize,
278 common_flags()->strip_path_prefix, 0);
279 }
280
281 // ForEachChunk callback. Aggregates unreachable chunks into a LeakReport.
282 static void CollectLeaksCb(uptr chunk, void *arg) {
283 CHECK(arg);
284 LeakReport *leak_report = reinterpret_cast<LeakReport *>(arg);
285 chunk = GetUserBegin(chunk);
286 LsanMetadata m(chunk);
287 if (!m.allocated()) return;
288 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
289 uptr resolution = flags()->resolution;
290 if (resolution > 0) {
291 uptr size = 0;
292 const uptr *trace = StackDepotGet(m.stack_trace_id(), &size);
293 size = Min(size, resolution);
294 leak_report->Add(StackDepotPut(trace, size), m.requested_size(), m.tag());
295 } else {
296 leak_report->Add(m.stack_trace_id(), m.requested_size(), m.tag());
297 }
298 }
299 }
300
301 // ForEachChunkCallback. Prints addresses of unreachable chunks.
302 static void PrintLeakedCb(uptr chunk, void *arg) {
303 chunk = GetUserBegin(chunk);
304 LsanMetadata m(chunk);
305 if (!m.allocated()) return;
306 if (m.tag() == kDirectlyLeaked || m.tag() == kIndirectlyLeaked) {
307 Printf("%s leaked %zu byte object at %p.\n",
308 m.tag() == kDirectlyLeaked ? "Directly" : "Indirectly",
309 m.requested_size(), chunk);
310 }
311 }
312
313 static void PrintMatchedSuppressions() {
314 InternalMmapVector<Suppression *> matched(1);
315 suppression_ctx->GetMatched(&matched);
316 if (!matched.size())
317 return;
318 const char *line = "-----------------------------------------------------";
319 Printf("%s\n", line);
320 Printf("Suppressions used:\n");
321 Printf(" count bytes template\n");
322 for (uptr i = 0; i < matched.size(); i++)
323 Printf("%7zu %10zu %s\n", static_cast<uptr>(matched[i]->hit_count),
324 matched[i]->weight, matched[i]->templ);
325 Printf("%s\n\n", line);
326 }
327
328 static void PrintLeaked() {
329 Printf("\n");
330 Printf("Reporting individual objects:\n");
331 ForEachChunk(PrintLeakedCb, 0 /* arg */);
332 }
333
334 struct DoLeakCheckParam {
335 bool success;
336 LeakReport leak_report;
337 };
338
339 static void DoLeakCheckCallback(const SuspendedThreadsList &suspended_threads,
340 void *arg) {
341 DoLeakCheckParam *param = reinterpret_cast<DoLeakCheckParam *>(arg);
342 CHECK(param);
343 CHECK(!param->success);
344 CHECK(param->leak_report.IsEmpty());
345 ClassifyAllChunks(suspended_threads);
346 ForEachChunk(CollectLeaksCb, ¶m->leak_report);
347 if (!param->leak_report.IsEmpty() && flags()->report_objects)
348 PrintLeaked();
349 param->success = true;
350 }
351
352 void DoLeakCheck() {
353 BlockingMutexLock l(&global_mutex);
354 static bool already_done;
355 CHECK(!already_done);
356 already_done = true;
357 if (&__lsan_is_turned_off && __lsan_is_turned_off())
358 return;
359
360 DoLeakCheckParam param;
361 param.success = false;
362 LockThreadRegistry();
363 LockAllocator();
364 StopTheWorld(DoLeakCheckCallback, ¶m);
365 UnlockAllocator();
366 UnlockThreadRegistry();
367
368 if (!param.success) {
369 Report("LeakSanitizer has encountered a fatal error.\n");
370 Die();
371 }
372 uptr have_unsuppressed = param.leak_report.ApplySuppressions();
373 if (have_unsuppressed) {
374 Printf("\n"
375 "================================================================="
376 "\n");
377 Report("ERROR: LeakSanitizer: detected memory leaks\n");
378 param.leak_report.PrintLargest(flags()->max_leaks);
379 }
380 if (have_unsuppressed || (flags()->verbosity >= 1)) {
381 PrintMatchedSuppressions();
382 param.leak_report.PrintSummary();
383 }
384 if (have_unsuppressed && flags()->exitcode)
385 internal__exit(flags()->exitcode);
386 }
387
388 static Suppression *GetSuppressionForAddr(uptr addr) {
389 static const uptr kMaxAddrFrames = 16;
390 InternalScopedBuffer<AddressInfo> addr_frames(kMaxAddrFrames);
391 for (uptr i = 0; i < kMaxAddrFrames; i++) new (&addr_frames[i]) AddressInfo();
392 uptr addr_frames_num = __sanitizer::SymbolizeCode(addr, addr_frames.data(),
393 kMaxAddrFrames);
394 for (uptr i = 0; i < addr_frames_num; i++) {
395 Suppression* s;
396 if (suppression_ctx->Match(addr_frames[i].function, SuppressionLeak, &s) ||
397 suppression_ctx->Match(addr_frames[i].file, SuppressionLeak, &s) ||
398 suppression_ctx->Match(addr_frames[i].module, SuppressionLeak, &s))
399 return s;
400 }
401 return 0;
402 }
403
404 static Suppression *GetSuppressionForStack(u32 stack_trace_id) {
405 uptr size = 0;
406 const uptr *trace = StackDepotGet(stack_trace_id, &size);
407 for (uptr i = 0; i < size; i++) {
408 Suppression *s =
409 GetSuppressionForAddr(StackTrace::GetPreviousInstructionPc(trace[i]));
410 if (s) return s;
411 }
412 return 0;
413 }
414
415 ///// LeakReport implementation. /////
416
417 // A hard limit on the number of distinct leaks, to avoid quadratic complexity
418 // in LeakReport::Add(). We don't expect to ever see this many leaks in
419 // real-world applications.
420 // FIXME: Get rid of this limit by changing the implementation of LeakReport to
421 // use a hash table.
422 const uptr kMaxLeaksConsidered = 1000;
423
424 void LeakReport::Add(u32 stack_trace_id, uptr leaked_size, ChunkTag tag) {
425 CHECK(tag == kDirectlyLeaked || tag == kIndirectlyLeaked);
426 bool is_directly_leaked = (tag == kDirectlyLeaked);
427 for (uptr i = 0; i < leaks_.size(); i++)
428 if (leaks_[i].stack_trace_id == stack_trace_id &&
429 leaks_[i].is_directly_leaked == is_directly_leaked) {
430 leaks_[i].hit_count++;
431 leaks_[i].total_size += leaked_size;
432 return;
433 }
434 if (leaks_.size() == kMaxLeaksConsidered) return;
435 Leak leak = { /* hit_count */ 1, leaked_size, stack_trace_id,
436 is_directly_leaked, /* is_suppressed */ false };
437 leaks_.push_back(leak);
438 }
439
440 static bool IsLarger(const Leak &leak1, const Leak &leak2) {
441 return leak1.total_size > leak2.total_size;
442 }
443
444 void LeakReport::PrintLargest(uptr num_leaks_to_print) {
445 CHECK(leaks_.size() <= kMaxLeaksConsidered);
446 Printf("\n");
447 if (leaks_.size() == kMaxLeaksConsidered)
448 Printf("Too many leaks! Only the first %zu leaks encountered will be "
449 "reported.\n",
450 kMaxLeaksConsidered);
451
452 uptr unsuppressed_count = 0;
453 for (uptr i = 0; i < leaks_.size(); i++)
454 if (!leaks_[i].is_suppressed) unsuppressed_count++;
455 if (num_leaks_to_print > 0 && num_leaks_to_print < unsuppressed_count)
456 Printf("The %zu largest leak(s):\n", num_leaks_to_print);
457 InternalSort(&leaks_, leaks_.size(), IsLarger);
458 uptr leaks_printed = 0;
459 for (uptr i = 0; i < leaks_.size(); i++) {
460 if (leaks_[i].is_suppressed) continue;
461 Printf("%s leak of %zu byte(s) in %zu object(s) allocated from:\n",
462 leaks_[i].is_directly_leaked ? "Direct" : "Indirect",
463 leaks_[i].total_size, leaks_[i].hit_count);
464 PrintStackTraceById(leaks_[i].stack_trace_id);
465 Printf("\n");
466 leaks_printed++;
467 if (leaks_printed == num_leaks_to_print) break;
468 }
469 if (leaks_printed < unsuppressed_count) {
470 uptr remaining = unsuppressed_count - leaks_printed;
471 Printf("Omitting %zu more leak(s).\n", remaining);
472 }
473 }
474
475 void LeakReport::PrintSummary() {
476 CHECK(leaks_.size() <= kMaxLeaksConsidered);
477 uptr bytes = 0, allocations = 0;
478 for (uptr i = 0; i < leaks_.size(); i++) {
479 if (leaks_[i].is_suppressed) continue;
480 bytes += leaks_[i].total_size;
481 allocations += leaks_[i].hit_count;
482 }
483 Printf(
484 "SUMMARY: LeakSanitizer: %zu byte(s) leaked in %zu allocation(s).\n\n",
485 bytes, allocations);
486 }
487
488 uptr LeakReport::ApplySuppressions() {
489 uptr unsuppressed_count = 0;
490 for (uptr i = 0; i < leaks_.size(); i++) {
491 Suppression *s = GetSuppressionForStack(leaks_[i].stack_trace_id);
492 if (s) {
493 s->weight += leaks_[i].total_size;
494 s->hit_count += leaks_[i].hit_count;
495 leaks_[i].is_suppressed = true;
496 } else {
497 unsuppressed_count++;
498 }
499 }
500 return unsuppressed_count;
501 }
502 } // namespace __lsan
503 #endif // CAN_SANITIZE_LEAKS
504
505 using namespace __lsan; // NOLINT
506
507 extern "C" {
508 SANITIZER_INTERFACE_ATTRIBUTE
509 void __lsan_ignore_object(const void *p) {
510 #if CAN_SANITIZE_LEAKS
511 // Cannot use PointsIntoChunk or LsanMetadata here, since the allocator is not
512 // locked.
513 BlockingMutexLock l(&global_mutex);
514 IgnoreObjectResult res = IgnoreObjectLocked(p);
515 if (res == kIgnoreObjectInvalid && flags()->verbosity >= 2)
516 Report("__lsan_ignore_object(): no heap object found at %p", p);
517 if (res == kIgnoreObjectAlreadyIgnored && flags()->verbosity >= 2)
518 Report("__lsan_ignore_object(): "
519 "heap object at %p is already being ignored\n", p);
520 if (res == kIgnoreObjectSuccess && flags()->verbosity >= 3)
521 Report("__lsan_ignore_object(): ignoring heap object at %p\n", p);
522 #endif // CAN_SANITIZE_LEAKS
523 }
524
525 SANITIZER_INTERFACE_ATTRIBUTE
526 void __lsan_disable() {
527 #if CAN_SANITIZE_LEAKS
528 __lsan::disable_counter++;
529 #endif
530 }
531
532 SANITIZER_INTERFACE_ATTRIBUTE
533 void __lsan_enable() {
534 #if CAN_SANITIZE_LEAKS
535 if (!__lsan::disable_counter) {
536 Report("Unmatched call to __lsan_enable().\n");
537 Die();
538 }
539 __lsan::disable_counter--;
540 #endif
541 }
542
543 #if !SANITIZER_SUPPORTS_WEAK_HOOKS
544 SANITIZER_WEAK_ATTRIBUTE SANITIZER_INTERFACE_ATTRIBUTE
545 int __lsan_is_turned_off() {
546 return 0;
547 }
548 #endif
549 } // extern "C"
550