1 //===-- sanitizer_allocator_combined.h --------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Part of the Sanitizer Allocator. 11 // 12 //===----------------------------------------------------------------------===// 13 #ifndef SANITIZER_ALLOCATOR_H 14 #error This file must be included inside sanitizer_allocator.h 15 #endif 16 17 // This class implements a complete memory allocator by using two 18 // internal allocators: 19 // PrimaryAllocator is efficient, but may not allocate some sizes (alignments). 20 // When allocating 2^x bytes it should return 2^x aligned chunk. 21 // PrimaryAllocator is used via a local AllocatorCache. 22 // SecondaryAllocator can allocate anything, but is not efficient. 23 template <class PrimaryAllocator, class AllocatorCache, 24 class SecondaryAllocator, 25 typename AddressSpaceViewTy = LocalAddressSpaceView> // NOLINT 26 class CombinedAllocator { 27 public: 28 using AddressSpaceView = AddressSpaceViewTy; 29 static_assert(is_same<AddressSpaceView, 30 typename PrimaryAllocator::AddressSpaceView>::value, 31 "PrimaryAllocator is using wrong AddressSpaceView"); 32 static_assert(is_same<AddressSpaceView, 33 typename SecondaryAllocator::AddressSpaceView>::value, 34 "SecondaryAllocator is using wrong AddressSpaceView"); 35 36 void InitLinkerInitialized(s32 release_to_os_interval_ms) { 37 stats_.InitLinkerInitialized(); 38 primary_.Init(release_to_os_interval_ms); 39 secondary_.InitLinkerInitialized(); 40 } 41 42 void Init(s32 release_to_os_interval_ms) { 43 stats_.Init(); 44 primary_.Init(release_to_os_interval_ms); 45 secondary_.Init(); 46 } 47 48 void *Allocate(AllocatorCache *cache, uptr size, uptr alignment) { 49 // Returning 0 on malloc(0) may break a lot of code. 50 if (size == 0) 51 size = 1; 52 if (size + alignment < size) { 53 Report("WARNING: %s: CombinedAllocator allocation overflow: " 54 "0x%zx bytes with 0x%zx alignment requested\n", 55 SanitizerToolName, size, alignment); 56 return nullptr; 57 } 58 uptr original_size = size; 59 // If alignment requirements are to be fulfilled by the frontend allocator 60 // rather than by the primary or secondary, passing an alignment lower than 61 // or equal to 8 will prevent any further rounding up, as well as the later 62 // alignment check. 63 if (alignment > 8) 64 size = RoundUpTo(size, alignment); 65 // The primary allocator should return a 2^x aligned allocation when 66 // requested 2^x bytes, hence using the rounded up 'size' when being 67 // serviced by the primary (this is no longer true when the primary is 68 // using a non-fixed base address). The secondary takes care of the 69 // alignment without such requirement, and allocating 'size' would use 70 // extraneous memory, so we employ 'original_size'. 71 void *res; 72 if (primary_.CanAllocate(size, alignment)) 73 res = cache->Allocate(&primary_, primary_.ClassID(size)); 74 else 75 res = secondary_.Allocate(&stats_, original_size, alignment); 76 if (alignment > 8) 77 CHECK_EQ(reinterpret_cast<uptr>(res) & (alignment - 1), 0); 78 return res; 79 } 80 81 s32 ReleaseToOSIntervalMs() const { 82 return primary_.ReleaseToOSIntervalMs(); 83 } 84 85 void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) { 86 primary_.SetReleaseToOSIntervalMs(release_to_os_interval_ms); 87 } 88 89 void ForceReleaseToOS() { 90 primary_.ForceReleaseToOS(); 91 } 92 93 void Deallocate(AllocatorCache *cache, void *p) { 94 if (!p) return; 95 if (primary_.PointerIsMine(p)) 96 cache->Deallocate(&primary_, primary_.GetSizeClass(p), p); 97 else 98 secondary_.Deallocate(&stats_, p); 99 } 100 101 void *Reallocate(AllocatorCache *cache, void *p, uptr new_size, 102 uptr alignment) { 103 if (!p) 104 return Allocate(cache, new_size, alignment); 105 if (!new_size) { 106 Deallocate(cache, p); 107 return nullptr; 108 } 109 CHECK(PointerIsMine(p)); 110 uptr old_size = GetActuallyAllocatedSize(p); 111 uptr memcpy_size = Min(new_size, old_size); 112 void *new_p = Allocate(cache, new_size, alignment); 113 if (new_p) 114 internal_memcpy(new_p, p, memcpy_size); 115 Deallocate(cache, p); 116 return new_p; 117 } 118 119 bool PointerIsMine(void *p) { 120 if (primary_.PointerIsMine(p)) 121 return true; 122 return secondary_.PointerIsMine(p); 123 } 124 125 bool FromPrimary(void *p) { 126 return primary_.PointerIsMine(p); 127 } 128 129 void *GetMetaData(const void *p) { 130 if (primary_.PointerIsMine(p)) 131 return primary_.GetMetaData(p); 132 return secondary_.GetMetaData(p); 133 } 134 135 void *GetBlockBegin(const void *p) { 136 if (primary_.PointerIsMine(p)) 137 return primary_.GetBlockBegin(p); 138 return secondary_.GetBlockBegin(p); 139 } 140 141 // This function does the same as GetBlockBegin, but is much faster. 142 // Must be called with the allocator locked. 143 void *GetBlockBeginFastLocked(void *p) { 144 if (primary_.PointerIsMine(p)) 145 return primary_.GetBlockBegin(p); 146 return secondary_.GetBlockBeginFastLocked(p); 147 } 148 149 uptr GetActuallyAllocatedSize(void *p) { 150 if (primary_.PointerIsMine(p)) 151 return primary_.GetActuallyAllocatedSize(p); 152 return secondary_.GetActuallyAllocatedSize(p); 153 } 154 155 uptr TotalMemoryUsed() { 156 return primary_.TotalMemoryUsed() + secondary_.TotalMemoryUsed(); 157 } 158 159 void TestOnlyUnmap() { primary_.TestOnlyUnmap(); } 160 161 void InitCache(AllocatorCache *cache) { 162 cache->Init(&stats_); 163 } 164 165 void DestroyCache(AllocatorCache *cache) { 166 cache->Destroy(&primary_, &stats_); 167 } 168 169 void SwallowCache(AllocatorCache *cache) { 170 cache->Drain(&primary_); 171 } 172 173 void GetStats(AllocatorStatCounters s) const { 174 stats_.Get(s); 175 } 176 177 void PrintStats() { 178 primary_.PrintStats(); 179 secondary_.PrintStats(); 180 } 181 182 // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone 183 // introspection API. 184 void ForceLock() { 185 primary_.ForceLock(); 186 secondary_.ForceLock(); 187 } 188 189 void ForceUnlock() { 190 secondary_.ForceUnlock(); 191 primary_.ForceUnlock(); 192 } 193 194 // Iterate over all existing chunks. 195 // The allocator must be locked when calling this function. 196 void ForEachChunk(ForEachChunkCallback callback, void *arg) { 197 primary_.ForEachChunk(callback, arg); 198 secondary_.ForEachChunk(callback, arg); 199 } 200 201 private: 202 PrimaryAllocator primary_; 203 SecondaryAllocator secondary_; 204 AllocatorGlobalStats stats_; 205 }; 206