1 //=-- lsan_allocator.cc ---------------------------------------------------===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file is a part of LeakSanitizer. 11 // See lsan_allocator.h for details. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #include "lsan_allocator.h" 16 17 #include "sanitizer_common/sanitizer_allocator.h" 18 #include "sanitizer_common/sanitizer_allocator_checks.h" 19 #include "sanitizer_common/sanitizer_allocator_interface.h" 20 #include "sanitizer_common/sanitizer_allocator_report.h" 21 #include "sanitizer_common/sanitizer_errno.h" 22 #include "sanitizer_common/sanitizer_internal_defs.h" 23 #include "sanitizer_common/sanitizer_stackdepot.h" 24 #include "sanitizer_common/sanitizer_stacktrace.h" 25 #include "lsan_common.h" 26 27 extern "C" void *memset(void *ptr, int value, uptr num); 28 29 namespace __lsan { 30 #if defined(__i386__) || defined(__arm__) 31 static const uptr kMaxAllowedMallocSize = 1UL << 30; 32 #elif defined(__mips64) || defined(__aarch64__) 33 static const uptr kMaxAllowedMallocSize = 4UL << 30; 34 #else 35 static const uptr kMaxAllowedMallocSize = 8UL << 30; 36 #endif 37 38 static Allocator allocator; 39 40 void InitializeAllocator() { 41 SetAllocatorMayReturnNull(common_flags()->allocator_may_return_null); 42 allocator.InitLinkerInitialized( 43 common_flags()->allocator_release_to_os_interval_ms); 44 } 45 46 void AllocatorThreadFinish() { 47 allocator.SwallowCache(GetAllocatorCache()); 48 } 49 50 static ChunkMetadata *Metadata(const void *p) { 51 return reinterpret_cast<ChunkMetadata *>(allocator.GetMetaData(p)); 52 } 53 54 static void RegisterAllocation(const StackTrace &stack, void *p, uptr size) { 55 if (!p) return; 56 ChunkMetadata *m = Metadata(p); 57 CHECK(m); 58 m->tag = DisabledInThisThread() ? kIgnored : kDirectlyLeaked; 59 m->stack_trace_id = StackDepotPut(stack); 60 m->requested_size = size; 61 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 1, memory_order_relaxed); 62 } 63 64 static void RegisterDeallocation(void *p) { 65 if (!p) return; 66 ChunkMetadata *m = Metadata(p); 67 CHECK(m); 68 atomic_store(reinterpret_cast<atomic_uint8_t *>(m), 0, memory_order_relaxed); 69 } 70 71 static void *ReportAllocationSizeTooBig(uptr size, const StackTrace &stack) { 72 if (AllocatorMayReturnNull()) { 73 Report("WARNING: LeakSanitizer failed to allocate 0x%zx bytes\n", size); 74 return nullptr; 75 } 76 ReportAllocationSizeTooBig(size, kMaxAllowedMallocSize, &stack); 77 } 78 79 void *Allocate(const StackTrace &stack, uptr size, uptr alignment, 80 bool cleared) { 81 if (size == 0) 82 size = 1; 83 if (size > kMaxAllowedMallocSize) 84 return ReportAllocationSizeTooBig(size, stack); 85 void *p = allocator.Allocate(GetAllocatorCache(), size, alignment); 86 if (UNLIKELY(!p)) { 87 SetAllocatorOutOfMemory(); 88 if (AllocatorMayReturnNull()) 89 return nullptr; 90 ReportOutOfMemory(size, &stack); 91 } 92 // Do not rely on the allocator to clear the memory (it's slow). 93 if (cleared && allocator.FromPrimary(p)) 94 memset(p, 0, size); 95 RegisterAllocation(stack, p, size); 96 if (&__sanitizer_malloc_hook) __sanitizer_malloc_hook(p, size); 97 RunMallocHooks(p, size); 98 return p; 99 } 100 101 static void *Calloc(uptr nmemb, uptr size, const StackTrace &stack) { 102 if (UNLIKELY(CheckForCallocOverflow(size, nmemb))) { 103 if (AllocatorMayReturnNull()) 104 return nullptr; 105 ReportCallocOverflow(nmemb, size, &stack); 106 } 107 size *= nmemb; 108 return Allocate(stack, size, 1, true); 109 } 110 111 void Deallocate(void *p) { 112 if (&__sanitizer_free_hook) __sanitizer_free_hook(p); 113 RunFreeHooks(p); 114 RegisterDeallocation(p); 115 allocator.Deallocate(GetAllocatorCache(), p); 116 } 117 118 void *Reallocate(const StackTrace &stack, void *p, uptr new_size, 119 uptr alignment) { 120 RegisterDeallocation(p); 121 if (new_size > kMaxAllowedMallocSize) { 122 allocator.Deallocate(GetAllocatorCache(), p); 123 return ReportAllocationSizeTooBig(new_size, stack); 124 } 125 p = allocator.Reallocate(GetAllocatorCache(), p, new_size, alignment); 126 RegisterAllocation(stack, p, new_size); 127 return p; 128 } 129 130 void GetAllocatorCacheRange(uptr *begin, uptr *end) { 131 *begin = (uptr)GetAllocatorCache(); 132 *end = *begin + sizeof(AllocatorCache); 133 } 134 135 uptr GetMallocUsableSize(const void *p) { 136 ChunkMetadata *m = Metadata(p); 137 if (!m) return 0; 138 return m->requested_size; 139 } 140 141 int lsan_posix_memalign(void **memptr, uptr alignment, uptr size, 142 const StackTrace &stack) { 143 if (UNLIKELY(!CheckPosixMemalignAlignment(alignment))) { 144 if (AllocatorMayReturnNull()) 145 return errno_EINVAL; 146 ReportInvalidPosixMemalignAlignment(alignment, &stack); 147 } 148 void *ptr = Allocate(stack, size, alignment, kAlwaysClearMemory); 149 if (UNLIKELY(!ptr)) 150 // OOM error is already taken care of by Allocate. 151 return errno_ENOMEM; 152 CHECK(IsAligned((uptr)ptr, alignment)); 153 *memptr = ptr; 154 return 0; 155 } 156 157 void *lsan_aligned_alloc(uptr alignment, uptr size, const StackTrace &stack) { 158 if (UNLIKELY(!CheckAlignedAllocAlignmentAndSize(alignment, size))) { 159 errno = errno_EINVAL; 160 if (AllocatorMayReturnNull()) 161 return nullptr; 162 ReportInvalidAlignedAllocAlignment(size, alignment, &stack); 163 } 164 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); 165 } 166 167 void *lsan_memalign(uptr alignment, uptr size, const StackTrace &stack) { 168 if (UNLIKELY(!IsPowerOfTwo(alignment))) { 169 errno = errno_EINVAL; 170 if (AllocatorMayReturnNull()) 171 return nullptr; 172 ReportInvalidAllocationAlignment(alignment, &stack); 173 } 174 return SetErrnoOnNull(Allocate(stack, size, alignment, kAlwaysClearMemory)); 175 } 176 177 void *lsan_malloc(uptr size, const StackTrace &stack) { 178 return SetErrnoOnNull(Allocate(stack, size, 1, kAlwaysClearMemory)); 179 } 180 181 void lsan_free(void *p) { 182 Deallocate(p); 183 } 184 185 void *lsan_realloc(void *p, uptr size, const StackTrace &stack) { 186 return SetErrnoOnNull(Reallocate(stack, p, size, 1)); 187 } 188 189 void *lsan_calloc(uptr nmemb, uptr size, const StackTrace &stack) { 190 return SetErrnoOnNull(Calloc(nmemb, size, stack)); 191 } 192 193 void *lsan_valloc(uptr size, const StackTrace &stack) { 194 return SetErrnoOnNull( 195 Allocate(stack, size, GetPageSizeCached(), kAlwaysClearMemory)); 196 } 197 198 void *lsan_pvalloc(uptr size, const StackTrace &stack) { 199 uptr PageSize = GetPageSizeCached(); 200 if (UNLIKELY(CheckForPvallocOverflow(size, PageSize))) { 201 errno = errno_ENOMEM; 202 if (AllocatorMayReturnNull()) 203 return nullptr; 204 ReportPvallocOverflow(size, &stack); 205 } 206 // pvalloc(0) should allocate one page. 207 size = size ? RoundUpTo(size, PageSize) : PageSize; 208 return SetErrnoOnNull(Allocate(stack, size, PageSize, kAlwaysClearMemory)); 209 } 210 211 uptr lsan_mz_size(const void *p) { 212 return GetMallocUsableSize(p); 213 } 214 215 ///// Interface to the common LSan module. ///// 216 217 void LockAllocator() { 218 allocator.ForceLock(); 219 } 220 221 void UnlockAllocator() { 222 allocator.ForceUnlock(); 223 } 224 225 void GetAllocatorGlobalRange(uptr *begin, uptr *end) { 226 *begin = (uptr)&allocator; 227 *end = *begin + sizeof(allocator); 228 } 229 230 uptr PointsIntoChunk(void* p) { 231 uptr addr = reinterpret_cast<uptr>(p); 232 uptr chunk = reinterpret_cast<uptr>(allocator.GetBlockBeginFastLocked(p)); 233 if (!chunk) return 0; 234 // LargeMmapAllocator considers pointers to the meta-region of a chunk to be 235 // valid, but we don't want that. 236 if (addr < chunk) return 0; 237 ChunkMetadata *m = Metadata(reinterpret_cast<void *>(chunk)); 238 CHECK(m); 239 if (!m->allocated) 240 return 0; 241 if (addr < chunk + m->requested_size) 242 return chunk; 243 if (IsSpecialCaseOfOperatorNew0(chunk, m->requested_size, addr)) 244 return chunk; 245 return 0; 246 } 247 248 uptr GetUserBegin(uptr chunk) { 249 return chunk; 250 } 251 252 LsanMetadata::LsanMetadata(uptr chunk) { 253 metadata_ = Metadata(reinterpret_cast<void *>(chunk)); 254 CHECK(metadata_); 255 } 256 257 bool LsanMetadata::allocated() const { 258 return reinterpret_cast<ChunkMetadata *>(metadata_)->allocated; 259 } 260 261 ChunkTag LsanMetadata::tag() const { 262 return reinterpret_cast<ChunkMetadata *>(metadata_)->tag; 263 } 264 265 void LsanMetadata::set_tag(ChunkTag value) { 266 reinterpret_cast<ChunkMetadata *>(metadata_)->tag = value; 267 } 268 269 uptr LsanMetadata::requested_size() const { 270 return reinterpret_cast<ChunkMetadata *>(metadata_)->requested_size; 271 } 272 273 u32 LsanMetadata::stack_trace_id() const { 274 return reinterpret_cast<ChunkMetadata *>(metadata_)->stack_trace_id; 275 } 276 277 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 278 allocator.ForEachChunk(callback, arg); 279 } 280 281 IgnoreObjectResult IgnoreObjectLocked(const void *p) { 282 void *chunk = allocator.GetBlockBegin(p); 283 if (!chunk || p < chunk) return kIgnoreObjectInvalid; 284 ChunkMetadata *m = Metadata(chunk); 285 CHECK(m); 286 if (m->allocated && (uptr)p < (uptr)chunk + m->requested_size) { 287 if (m->tag == kIgnored) 288 return kIgnoreObjectAlreadyIgnored; 289 m->tag = kIgnored; 290 return kIgnoreObjectSuccess; 291 } else { 292 return kIgnoreObjectInvalid; 293 } 294 } 295 } // namespace __lsan 296 297 using namespace __lsan; 298 299 extern "C" { 300 SANITIZER_INTERFACE_ATTRIBUTE 301 uptr __sanitizer_get_current_allocated_bytes() { 302 uptr stats[AllocatorStatCount]; 303 allocator.GetStats(stats); 304 return stats[AllocatorStatAllocated]; 305 } 306 307 SANITIZER_INTERFACE_ATTRIBUTE 308 uptr __sanitizer_get_heap_size() { 309 uptr stats[AllocatorStatCount]; 310 allocator.GetStats(stats); 311 return stats[AllocatorStatMapped]; 312 } 313 314 SANITIZER_INTERFACE_ATTRIBUTE 315 uptr __sanitizer_get_free_bytes() { return 0; } 316 317 SANITIZER_INTERFACE_ATTRIBUTE 318 uptr __sanitizer_get_unmapped_bytes() { return 0; } 319 320 SANITIZER_INTERFACE_ATTRIBUTE 321 uptr __sanitizer_get_estimated_allocated_size(uptr size) { return size; } 322 323 SANITIZER_INTERFACE_ATTRIBUTE 324 int __sanitizer_get_ownership(const void *p) { return Metadata(p) != nullptr; } 325 326 SANITIZER_INTERFACE_ATTRIBUTE 327 uptr __sanitizer_get_allocated_size(const void *p) { 328 return GetMallocUsableSize(p); 329 } 330 331 #if !SANITIZER_SUPPORTS_WEAK_HOOKS 332 // Provide default (no-op) implementation of malloc hooks. 333 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 334 void __sanitizer_malloc_hook(void *ptr, uptr size) { 335 (void)ptr; 336 (void)size; 337 } 338 SANITIZER_INTERFACE_ATTRIBUTE SANITIZER_WEAK_ATTRIBUTE 339 void __sanitizer_free_hook(void *ptr) { 340 (void)ptr; 341 } 342 #endif 343 } // extern "C" 344