Home | History | Annotate | Line # | Download | only in rtl
      1 //===-- tsan_dense_alloc.h --------------------------------------*- C++ -*-===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer (TSan), a race detector.
     11 //
     12 // A DenseSlabAlloc is a freelist-based allocator of fixed-size objects.
     13 // DenseSlabAllocCache is a thread-local cache for DenseSlabAlloc.
     14 // The only difference with traditional slab allocators is that DenseSlabAlloc
     15 // allocates/free indices of objects and provide a functionality to map
     16 // the index onto the real pointer. The index is u32, that is, 2 times smaller
     17 // than uptr (hense the Dense prefix).
     18 //===----------------------------------------------------------------------===//
     19 #ifndef TSAN_DENSE_ALLOC_H
     20 #define TSAN_DENSE_ALLOC_H
     21 
     22 #include "sanitizer_common/sanitizer_common.h"
     23 #include "tsan_defs.h"
     24 #include "tsan_mutex.h"
     25 
     26 namespace __tsan {
     27 
     28 class DenseSlabAllocCache {
     29   static const uptr kSize = 128;
     30   typedef u32 IndexT;
     31   uptr pos;
     32   IndexT cache[kSize];
     33   template<typename T, uptr kL1Size, uptr kL2Size> friend class DenseSlabAlloc;
     34 };
     35 
     36 template<typename T, uptr kL1Size, uptr kL2Size>
     37 class DenseSlabAlloc {
     38  public:
     39   typedef DenseSlabAllocCache Cache;
     40   typedef typename Cache::IndexT IndexT;
     41 
     42   explicit DenseSlabAlloc(const char *name) {
     43     // Check that kL1Size and kL2Size are sane.
     44     CHECK_EQ(kL1Size & (kL1Size - 1), 0);
     45     CHECK_EQ(kL2Size & (kL2Size - 1), 0);
     46     CHECK_GE(1ull << (sizeof(IndexT) * 8), kL1Size * kL2Size);
     47     // Check that it makes sense to use the dense alloc.
     48     CHECK_GE(sizeof(T), sizeof(IndexT));
     49     internal_memset(map_, 0, sizeof(map_));
     50     freelist_ = 0;
     51     fillpos_ = 0;
     52     name_ = name;
     53   }
     54 
     55   ~DenseSlabAlloc() {
     56     for (uptr i = 0; i < kL1Size; i++) {
     57       if (map_[i] != 0)
     58         UnmapOrDie(map_[i], kL2Size * sizeof(T));
     59     }
     60   }
     61 
     62   IndexT Alloc(Cache *c) {
     63     if (c->pos == 0)
     64       Refill(c);
     65     return c->cache[--c->pos];
     66   }
     67 
     68   void Free(Cache *c, IndexT idx) {
     69     DCHECK_NE(idx, 0);
     70     if (c->pos == Cache::kSize)
     71       Drain(c);
     72     c->cache[c->pos++] = idx;
     73   }
     74 
     75   T *Map(IndexT idx) {
     76     DCHECK_NE(idx, 0);
     77     DCHECK_LE(idx, kL1Size * kL2Size);
     78     return &map_[idx / kL2Size][idx % kL2Size];
     79   }
     80 
     81   void FlushCache(Cache *c) {
     82     SpinMutexLock lock(&mtx_);
     83     while (c->pos) {
     84       IndexT idx = c->cache[--c->pos];
     85       *(IndexT*)Map(idx) = freelist_;
     86       freelist_ = idx;
     87     }
     88   }
     89 
     90   void InitCache(Cache *c) {
     91     c->pos = 0;
     92     internal_memset(c->cache, 0, sizeof(c->cache));
     93   }
     94 
     95  private:
     96   T *map_[kL1Size];
     97   SpinMutex mtx_;
     98   IndexT freelist_;
     99   uptr fillpos_;
    100   const char *name_;
    101 
    102   void Refill(Cache *c) {
    103     SpinMutexLock lock(&mtx_);
    104     if (freelist_ == 0) {
    105       if (fillpos_ == kL1Size) {
    106         Printf("ThreadSanitizer: %s overflow (%zu*%zu). Dying.\n",
    107             name_, kL1Size, kL2Size);
    108         Die();
    109       }
    110       VPrintf(2, "ThreadSanitizer: growing %s: %zu out of %zu*%zu\n",
    111           name_, fillpos_, kL1Size, kL2Size);
    112       T *batch = (T*)MmapOrDie(kL2Size * sizeof(T), name_);
    113       // Reserve 0 as invalid index.
    114       IndexT start = fillpos_ == 0 ? 1 : 0;
    115       for (IndexT i = start; i < kL2Size; i++) {
    116         new(batch + i) T;
    117         *(IndexT*)(batch + i) = i + 1 + fillpos_ * kL2Size;
    118       }
    119       *(IndexT*)(batch + kL2Size - 1) = 0;
    120       freelist_ = fillpos_ * kL2Size + start;
    121       map_[fillpos_++] = batch;
    122     }
    123     for (uptr i = 0; i < Cache::kSize / 2 && freelist_ != 0; i++) {
    124       IndexT idx = freelist_;
    125       c->cache[c->pos++] = idx;
    126       freelist_ = *(IndexT*)Map(idx);
    127     }
    128   }
    129 
    130   void Drain(Cache *c) {
    131     SpinMutexLock lock(&mtx_);
    132     for (uptr i = 0; i < Cache::kSize / 2; i++) {
    133       IndexT idx = c->cache[--c->pos];
    134       *(IndexT*)Map(idx) = freelist_;
    135       freelist_ = idx;
    136     }
    137   }
    138 };
    139 
    140 }  // namespace __tsan
    141 
    142 #endif  // TSAN_DENSE_ALLOC_H
    143