Home | History | Annotate | Line # | Download | only in sanitizer_common
      1 //===-- sanitizer_allocator_primary32.h -------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // Part of the Sanitizer Allocator.
     11 //
     12 //===----------------------------------------------------------------------===//
     13 #ifndef SANITIZER_ALLOCATOR_H
     14 #error This file must be included inside sanitizer_allocator.h
     15 #endif
     16 
     17 template<class SizeClassAllocator> struct SizeClassAllocator32LocalCache;
     18 
     19 // SizeClassAllocator32 -- allocator for 32-bit address space.
     20 // This allocator can theoretically be used on 64-bit arch, but there it is less
     21 // efficient than SizeClassAllocator64.
     22 //
     23 // [kSpaceBeg, kSpaceBeg + kSpaceSize) is the range of addresses which can
     24 // be returned by MmapOrDie().
     25 //
     26 // Region:
     27 //   a result of a single call to MmapAlignedOrDieOnFatalError(kRegionSize,
     28 //                                                             kRegionSize).
     29 // Since the regions are aligned by kRegionSize, there are exactly
     30 // kNumPossibleRegions possible regions in the address space and so we keep
     31 // a ByteMap possible_regions to store the size classes of each Region.
     32 // 0 size class means the region is not used by the allocator.
     33 //
     34 // One Region is used to allocate chunks of a single size class.
     35 // A Region looks like this:
     36 // UserChunk1 .. UserChunkN <gap> MetaChunkN .. MetaChunk1
     37 //
     38 // In order to avoid false sharing the objects of this class should be
     39 // chache-line aligned.
     40 
     41 struct SizeClassAllocator32FlagMasks {  //  Bit masks.
     42   enum {
     43     kRandomShuffleChunks = 1,
     44     kUseSeparateSizeClassForBatch = 2,
     45   };
     46 };
     47 
     48 template <class Params>
     49 class SizeClassAllocator32 {
     50  public:
     51   using AddressSpaceView = typename Params::AddressSpaceView;
     52   static const uptr kSpaceBeg = Params::kSpaceBeg;
     53   static const u64 kSpaceSize = Params::kSpaceSize;
     54   static const uptr kMetadataSize = Params::kMetadataSize;
     55   typedef typename Params::SizeClassMap SizeClassMap;
     56   static const uptr kRegionSizeLog = Params::kRegionSizeLog;
     57   typedef typename Params::ByteMap ByteMap;
     58   typedef typename Params::MapUnmapCallback MapUnmapCallback;
     59 
     60   static_assert(
     61       is_same<typename ByteMap::AddressSpaceView, AddressSpaceView>::value,
     62       "AddressSpaceView type mismatch");
     63 
     64   static const bool kRandomShuffleChunks = Params::kFlags &
     65       SizeClassAllocator32FlagMasks::kRandomShuffleChunks;
     66   static const bool kUseSeparateSizeClassForBatch = Params::kFlags &
     67       SizeClassAllocator32FlagMasks::kUseSeparateSizeClassForBatch;
     68 
     69   struct TransferBatch {
     70     static const uptr kMaxNumCached = SizeClassMap::kMaxNumCachedHint - 2;
     71     void SetFromArray(void *batch[], uptr count) {
     72       DCHECK_LE(count, kMaxNumCached);
     73       count_ = count;
     74       for (uptr i = 0; i < count; i++)
     75         batch_[i] = batch[i];
     76     }
     77     uptr Count() const { return count_; }
     78     void Clear() { count_ = 0; }
     79     void Add(void *ptr) {
     80       batch_[count_++] = ptr;
     81       DCHECK_LE(count_, kMaxNumCached);
     82     }
     83     void CopyToArray(void *to_batch[]) const {
     84       for (uptr i = 0, n = Count(); i < n; i++)
     85         to_batch[i] = batch_[i];
     86     }
     87 
     88     // How much memory do we need for a batch containing n elements.
     89     static uptr AllocationSizeRequiredForNElements(uptr n) {
     90       return sizeof(uptr) * 2 + sizeof(void *) * n;
     91     }
     92     static uptr MaxCached(uptr size) {
     93       return Min(kMaxNumCached, SizeClassMap::MaxCachedHint(size));
     94     }
     95 
     96     TransferBatch *next;
     97 
     98    private:
     99     uptr count_;
    100     void *batch_[kMaxNumCached];
    101   };
    102 
    103   static const uptr kBatchSize = sizeof(TransferBatch);
    104   COMPILER_CHECK((kBatchSize & (kBatchSize - 1)) == 0);
    105   COMPILER_CHECK(kBatchSize == SizeClassMap::kMaxNumCachedHint * sizeof(uptr));
    106 
    107   static uptr ClassIdToSize(uptr class_id) {
    108     return (class_id == SizeClassMap::kBatchClassID) ?
    109         kBatchSize : SizeClassMap::Size(class_id);
    110   }
    111 
    112   typedef SizeClassAllocator32<Params> ThisT;
    113   typedef SizeClassAllocator32LocalCache<ThisT> AllocatorCache;
    114 
    115   void Init(s32 release_to_os_interval_ms) {
    116     possible_regions.Init();
    117     internal_memset(size_class_info_array, 0, sizeof(size_class_info_array));
    118   }
    119 
    120   s32 ReleaseToOSIntervalMs() const {
    121     return kReleaseToOSIntervalNever;
    122   }
    123 
    124   void SetReleaseToOSIntervalMs(s32 release_to_os_interval_ms) {
    125     // This is empty here. Currently only implemented in 64-bit allocator.
    126   }
    127 
    128   void ForceReleaseToOS() {
    129     // Currently implemented in 64-bit allocator only.
    130   }
    131 
    132   void *MapWithCallback(uptr size) {
    133     void *res = MmapOrDie(size, PrimaryAllocatorName);
    134     MapUnmapCallback().OnMap((uptr)res, size);
    135     return res;
    136   }
    137 
    138   void UnmapWithCallback(uptr beg, uptr size) {
    139     MapUnmapCallback().OnUnmap(beg, size);
    140     UnmapOrDie(reinterpret_cast<void *>(beg), size);
    141   }
    142 
    143   static bool CanAllocate(uptr size, uptr alignment) {
    144     return size <= SizeClassMap::kMaxSize &&
    145       alignment <= SizeClassMap::kMaxSize;
    146   }
    147 
    148   void *GetMetaData(const void *p) {
    149     CHECK(PointerIsMine(p));
    150     uptr mem = reinterpret_cast<uptr>(p);
    151     uptr beg = ComputeRegionBeg(mem);
    152     uptr size = ClassIdToSize(GetSizeClass(p));
    153     u32 offset = mem - beg;
    154     uptr n = offset / (u32)size;  // 32-bit division
    155     uptr meta = (beg + kRegionSize) - (n + 1) * kMetadataSize;
    156     return reinterpret_cast<void*>(meta);
    157   }
    158 
    159   NOINLINE TransferBatch *AllocateBatch(AllocatorStats *stat, AllocatorCache *c,
    160                                         uptr class_id) {
    161     DCHECK_LT(class_id, kNumClasses);
    162     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    163     SpinMutexLock l(&sci->mutex);
    164     if (sci->free_list.empty()) {
    165       if (UNLIKELY(!PopulateFreeList(stat, c, sci, class_id)))
    166         return nullptr;
    167       DCHECK(!sci->free_list.empty());
    168     }
    169     TransferBatch *b = sci->free_list.front();
    170     sci->free_list.pop_front();
    171     return b;
    172   }
    173 
    174   NOINLINE void DeallocateBatch(AllocatorStats *stat, uptr class_id,
    175                                 TransferBatch *b) {
    176     DCHECK_LT(class_id, kNumClasses);
    177     CHECK_GT(b->Count(), 0);
    178     SizeClassInfo *sci = GetSizeClassInfo(class_id);
    179     SpinMutexLock l(&sci->mutex);
    180     sci->free_list.push_front(b);
    181   }
    182 
    183   bool PointerIsMine(const void *p) {
    184     uptr mem = reinterpret_cast<uptr>(p);
    185     if (mem < kSpaceBeg || mem >= kSpaceBeg + kSpaceSize)
    186       return false;
    187     return GetSizeClass(p) != 0;
    188   }
    189 
    190   uptr GetSizeClass(const void *p) {
    191     return possible_regions[ComputeRegionId(reinterpret_cast<uptr>(p))];
    192   }
    193 
    194   void *GetBlockBegin(const void *p) {
    195     CHECK(PointerIsMine(p));
    196     uptr mem = reinterpret_cast<uptr>(p);
    197     uptr beg = ComputeRegionBeg(mem);
    198     uptr size = ClassIdToSize(GetSizeClass(p));
    199     u32 offset = mem - beg;
    200     u32 n = offset / (u32)size;  // 32-bit division
    201     uptr res = beg + (n * (u32)size);
    202     return reinterpret_cast<void*>(res);
    203   }
    204 
    205   uptr GetActuallyAllocatedSize(void *p) {
    206     CHECK(PointerIsMine(p));
    207     return ClassIdToSize(GetSizeClass(p));
    208   }
    209 
    210   uptr ClassID(uptr size) { return SizeClassMap::ClassID(size); }
    211 
    212   uptr TotalMemoryUsed() {
    213     // No need to lock here.
    214     uptr res = 0;
    215     for (uptr i = 0; i < kNumPossibleRegions; i++)
    216       if (possible_regions[i])
    217         res += kRegionSize;
    218     return res;
    219   }
    220 
    221   void TestOnlyUnmap() {
    222     for (uptr i = 0; i < kNumPossibleRegions; i++)
    223       if (possible_regions[i])
    224         UnmapWithCallback((i * kRegionSize), kRegionSize);
    225   }
    226 
    227   // ForceLock() and ForceUnlock() are needed to implement Darwin malloc zone
    228   // introspection API.
    229   void ForceLock() {
    230     for (uptr i = 0; i < kNumClasses; i++) {
    231       GetSizeClassInfo(i)->mutex.Lock();
    232     }
    233   }
    234 
    235   void ForceUnlock() {
    236     for (int i = kNumClasses - 1; i >= 0; i--) {
    237       GetSizeClassInfo(i)->mutex.Unlock();
    238     }
    239   }
    240 
    241   // Iterate over all existing chunks.
    242   // The allocator must be locked when calling this function.
    243   void ForEachChunk(ForEachChunkCallback callback, void *arg) {
    244     for (uptr region = 0; region < kNumPossibleRegions; region++)
    245       if (possible_regions[region]) {
    246         uptr chunk_size = ClassIdToSize(possible_regions[region]);
    247         uptr max_chunks_in_region = kRegionSize / (chunk_size + kMetadataSize);
    248         uptr region_beg = region * kRegionSize;
    249         for (uptr chunk = region_beg;
    250              chunk < region_beg + max_chunks_in_region * chunk_size;
    251              chunk += chunk_size) {
    252           // Too slow: CHECK_EQ((void *)chunk, GetBlockBegin((void *)chunk));
    253           callback(chunk, arg);
    254         }
    255       }
    256   }
    257 
    258   void PrintStats() {}
    259 
    260   static uptr AdditionalSize() { return 0; }
    261 
    262   typedef SizeClassMap SizeClassMapT;
    263   static const uptr kNumClasses = SizeClassMap::kNumClasses;
    264 
    265  private:
    266   static const uptr kRegionSize = 1 << kRegionSizeLog;
    267   static const uptr kNumPossibleRegions = kSpaceSize / kRegionSize;
    268 
    269   struct ALIGNED(SANITIZER_CACHE_LINE_SIZE) SizeClassInfo {
    270     StaticSpinMutex mutex;
    271     IntrusiveList<TransferBatch> free_list;
    272     u32 rand_state;
    273   };
    274   COMPILER_CHECK(sizeof(SizeClassInfo) % kCacheLineSize == 0);
    275 
    276   uptr ComputeRegionId(uptr mem) {
    277     const uptr res = mem >> kRegionSizeLog;
    278     CHECK_LT(res, kNumPossibleRegions);
    279     return res;
    280   }
    281 
    282   uptr ComputeRegionBeg(uptr mem) {
    283     return mem & ~(kRegionSize - 1);
    284   }
    285 
    286   uptr AllocateRegion(AllocatorStats *stat, uptr class_id) {
    287     DCHECK_LT(class_id, kNumClasses);
    288     const uptr res = reinterpret_cast<uptr>(MmapAlignedOrDieOnFatalError(
    289         kRegionSize, kRegionSize, PrimaryAllocatorName));
    290     if (UNLIKELY(!res))
    291       return 0;
    292     MapUnmapCallback().OnMap(res, kRegionSize);
    293     stat->Add(AllocatorStatMapped, kRegionSize);
    294     CHECK(IsAligned(res, kRegionSize));
    295     possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
    296     return res;
    297   }
    298 
    299   SizeClassInfo *GetSizeClassInfo(uptr class_id) {
    300     DCHECK_LT(class_id, kNumClasses);
    301     return &size_class_info_array[class_id];
    302   }
    303 
    304   bool PopulateBatches(AllocatorCache *c, SizeClassInfo *sci, uptr class_id,
    305                        TransferBatch **current_batch, uptr max_count,
    306                        uptr *pointers_array, uptr count) {
    307     // If using a separate class for batches, we do not need to shuffle it.
    308     if (kRandomShuffleChunks && (!kUseSeparateSizeClassForBatch ||
    309         class_id != SizeClassMap::kBatchClassID))
    310       RandomShuffle(pointers_array, count, &sci->rand_state);
    311     TransferBatch *b = *current_batch;
    312     for (uptr i = 0; i < count; i++) {
    313       if (!b) {
    314         b = c->CreateBatch(class_id, this, (TransferBatch*)pointers_array[i]);
    315         if (UNLIKELY(!b))
    316           return false;
    317         b->Clear();
    318       }
    319       b->Add((void*)pointers_array[i]);
    320       if (b->Count() == max_count) {
    321         sci->free_list.push_back(b);
    322         b = nullptr;
    323       }
    324     }
    325     *current_batch = b;
    326     return true;
    327   }
    328 
    329   bool PopulateFreeList(AllocatorStats *stat, AllocatorCache *c,
    330                         SizeClassInfo *sci, uptr class_id) {
    331     const uptr region = AllocateRegion(stat, class_id);
    332     if (UNLIKELY(!region))
    333       return false;
    334     if (kRandomShuffleChunks)
    335       if (UNLIKELY(sci->rand_state == 0))
    336         // The random state is initialized from ASLR (PIE) and time.
    337         sci->rand_state = reinterpret_cast<uptr>(sci) ^ NanoTime();
    338     const uptr size = ClassIdToSize(class_id);
    339     const uptr n_chunks = kRegionSize / (size + kMetadataSize);
    340     const uptr max_count = TransferBatch::MaxCached(size);
    341     DCHECK_GT(max_count, 0);
    342     TransferBatch *b = nullptr;
    343     constexpr uptr kShuffleArraySize = 48;
    344     uptr shuffle_array[kShuffleArraySize];
    345     uptr count = 0;
    346     for (uptr i = region; i < region + n_chunks * size; i += size) {
    347       shuffle_array[count++] = i;
    348       if (count == kShuffleArraySize) {
    349         if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
    350                                       shuffle_array, count)))
    351           return false;
    352         count = 0;
    353       }
    354     }
    355     if (count) {
    356       if (UNLIKELY(!PopulateBatches(c, sci, class_id, &b, max_count,
    357                                     shuffle_array, count)))
    358         return false;
    359     }
    360     if (b) {
    361       CHECK_GT(b->Count(), 0);
    362       sci->free_list.push_back(b);
    363     }
    364     return true;
    365   }
    366 
    367   ByteMap possible_regions;
    368   SizeClassInfo size_class_info_array[kNumClasses];
    369 };
    370