1 //===-- sanitizer_addrhashmap.h ---------------------------------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // Concurrent uptr->T hashmap. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef SANITIZER_ADDRHASHMAP_H 15 #define SANITIZER_ADDRHASHMAP_H 16 17 #include "sanitizer_common.h" 18 #include "sanitizer_mutex.h" 19 #include "sanitizer_atomic.h" 20 #include "sanitizer_allocator_internal.h" 21 22 namespace __sanitizer { 23 24 // Concurrent uptr->T hashmap. 25 // T must be a POD type, kSize is preferably a prime but can be any number. 26 // Usage example: 27 // 28 // typedef AddrHashMap<uptr, 11> Map; 29 // Map m; 30 // { 31 // Map::Handle h(&m, addr); 32 // use h.operator->() to access the data 33 // if h.created() then the element was just created, and the current thread 34 // has exclusive access to it 35 // otherwise the current thread has only read access to the data 36 // } 37 // { 38 // Map::Handle h(&m, addr, true); 39 // this will remove the data from the map in Handle dtor 40 // the current thread has exclusive access to the data 41 // if !h.exists() then the element never existed 42 // } 43 template<typename T, uptr kSize> 44 class AddrHashMap { 45 private: 46 struct Cell { 47 atomic_uintptr_t addr; 48 T val; 49 }; 50 51 struct AddBucket { 52 uptr cap; 53 uptr size; 54 Cell cells[1]; // variable len 55 }; 56 57 static const uptr kBucketSize = 3; 58 59 struct Bucket { 60 RWMutex mtx; 61 atomic_uintptr_t add; 62 Cell cells[kBucketSize]; 63 }; 64 65 public: 66 AddrHashMap(); 67 68 class Handle { 69 public: 70 Handle(AddrHashMap<T, kSize> *map, uptr addr); 71 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove); 72 Handle(AddrHashMap<T, kSize> *map, uptr addr, bool remove, bool create); 73 74 ~Handle(); 75 T *operator->(); 76 T &operator*(); 77 const T &operator*() const; 78 bool created() const; 79 bool exists() const; 80 81 private: 82 friend AddrHashMap<T, kSize>; 83 AddrHashMap<T, kSize> *map_; 84 Bucket *bucket_; 85 Cell *cell_; 86 uptr addr_; 87 uptr addidx_; 88 bool created_; 89 bool remove_; 90 bool create_; 91 }; 92 93 private: 94 friend class Handle; 95 Bucket *table_; 96 97 void acquire(Handle *h); 98 void release(Handle *h); 99 uptr calcHash(uptr addr); 100 }; 101 102 template<typename T, uptr kSize> 103 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr) { 104 map_ = map; 105 addr_ = addr; 106 remove_ = false; 107 create_ = true; 108 map_->acquire(this); 109 } 110 111 template<typename T, uptr kSize> 112 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr, 113 bool remove) { 114 map_ = map; 115 addr_ = addr; 116 remove_ = remove; 117 create_ = true; 118 map_->acquire(this); 119 } 120 121 template<typename T, uptr kSize> 122 AddrHashMap<T, kSize>::Handle::Handle(AddrHashMap<T, kSize> *map, uptr addr, 123 bool remove, bool create) { 124 map_ = map; 125 addr_ = addr; 126 remove_ = remove; 127 create_ = create; 128 map_->acquire(this); 129 } 130 131 template<typename T, uptr kSize> 132 AddrHashMap<T, kSize>::Handle::~Handle() { 133 map_->release(this); 134 } 135 136 template <typename T, uptr kSize> 137 T *AddrHashMap<T, kSize>::Handle::operator->() { 138 return &cell_->val; 139 } 140 141 template <typename T, uptr kSize> 142 const T &AddrHashMap<T, kSize>::Handle::operator*() const { 143 return cell_->val; 144 } 145 146 template <typename T, uptr kSize> 147 T &AddrHashMap<T, kSize>::Handle::operator*() { 148 return cell_->val; 149 } 150 151 template<typename T, uptr kSize> 152 bool AddrHashMap<T, kSize>::Handle::created() const { 153 return created_; 154 } 155 156 template<typename T, uptr kSize> 157 bool AddrHashMap<T, kSize>::Handle::exists() const { 158 return cell_ != nullptr; 159 } 160 161 template<typename T, uptr kSize> 162 AddrHashMap<T, kSize>::AddrHashMap() { 163 table_ = (Bucket*)MmapOrDie(kSize * sizeof(table_[0]), "AddrHashMap"); 164 } 165 166 template<typename T, uptr kSize> 167 void AddrHashMap<T, kSize>::acquire(Handle *h) { 168 uptr addr = h->addr_; 169 uptr hash = calcHash(addr); 170 Bucket *b = &table_[hash]; 171 172 h->created_ = false; 173 h->addidx_ = -1U; 174 h->bucket_ = b; 175 h->cell_ = nullptr; 176 177 // If we want to remove the element, we need exclusive access to the bucket, 178 // so skip the lock-free phase. 179 if (h->remove_) 180 goto locked; 181 182 retry: 183 // First try to find an existing element w/o read mutex. 184 CHECK(!h->remove_); 185 // Check the embed cells. 186 for (uptr i = 0; i < kBucketSize; i++) { 187 Cell *c = &b->cells[i]; 188 uptr addr1 = atomic_load(&c->addr, memory_order_acquire); 189 if (addr1 == addr) { 190 h->cell_ = c; 191 return; 192 } 193 } 194 195 // Check the add cells with read lock. 196 if (atomic_load(&b->add, memory_order_relaxed)) { 197 b->mtx.ReadLock(); 198 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed); 199 for (uptr i = 0; i < add->size; i++) { 200 Cell *c = &add->cells[i]; 201 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); 202 if (addr1 == addr) { 203 h->addidx_ = i; 204 h->cell_ = c; 205 return; 206 } 207 } 208 b->mtx.ReadUnlock(); 209 } 210 211 locked: 212 // Re-check existence under write lock. 213 // Embed cells. 214 b->mtx.Lock(); 215 for (uptr i = 0; i < kBucketSize; i++) { 216 Cell *c = &b->cells[i]; 217 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); 218 if (addr1 == addr) { 219 if (h->remove_) { 220 h->cell_ = c; 221 return; 222 } 223 b->mtx.Unlock(); 224 goto retry; 225 } 226 } 227 228 // Add cells. 229 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed); 230 if (add) { 231 for (uptr i = 0; i < add->size; i++) { 232 Cell *c = &add->cells[i]; 233 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); 234 if (addr1 == addr) { 235 if (h->remove_) { 236 h->addidx_ = i; 237 h->cell_ = c; 238 return; 239 } 240 b->mtx.Unlock(); 241 goto retry; 242 } 243 } 244 } 245 246 // The element does not exist, no need to create it if we want to remove. 247 if (h->remove_ || !h->create_) { 248 b->mtx.Unlock(); 249 return; 250 } 251 252 // Now try to create it under the mutex. 253 h->created_ = true; 254 // See if we have a free embed cell. 255 for (uptr i = 0; i < kBucketSize; i++) { 256 Cell *c = &b->cells[i]; 257 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); 258 if (addr1 == 0) { 259 h->cell_ = c; 260 return; 261 } 262 } 263 264 // Store in the add cells. 265 if (!add) { 266 // Allocate a new add array. 267 const uptr kInitSize = 64; 268 add = (AddBucket*)InternalAlloc(kInitSize); 269 internal_memset(add, 0, kInitSize); 270 add->cap = (kInitSize - sizeof(*add)) / sizeof(add->cells[0]) + 1; 271 add->size = 0; 272 atomic_store(&b->add, (uptr)add, memory_order_relaxed); 273 } 274 if (add->size == add->cap) { 275 // Grow existing add array. 276 uptr oldsize = sizeof(*add) + (add->cap - 1) * sizeof(add->cells[0]); 277 uptr newsize = oldsize * 2; 278 AddBucket *add1 = (AddBucket*)InternalAlloc(newsize); 279 internal_memset(add1, 0, newsize); 280 add1->cap = (newsize - sizeof(*add)) / sizeof(add->cells[0]) + 1; 281 add1->size = add->size; 282 internal_memcpy(add1->cells, add->cells, add->size * sizeof(add->cells[0])); 283 InternalFree(add); 284 atomic_store(&b->add, (uptr)add1, memory_order_relaxed); 285 add = add1; 286 } 287 // Store. 288 uptr i = add->size++; 289 Cell *c = &add->cells[i]; 290 CHECK_EQ(atomic_load(&c->addr, memory_order_relaxed), 0); 291 h->addidx_ = i; 292 h->cell_ = c; 293 } 294 295 template<typename T, uptr kSize> 296 void AddrHashMap<T, kSize>::release(Handle *h) { 297 if (!h->cell_) 298 return; 299 Bucket *b = h->bucket_; 300 Cell *c = h->cell_; 301 uptr addr1 = atomic_load(&c->addr, memory_order_relaxed); 302 if (h->created_) { 303 // Denote completion of insertion. 304 CHECK_EQ(addr1, 0); 305 // After the following store, the element becomes available 306 // for lock-free reads. 307 atomic_store(&c->addr, h->addr_, memory_order_release); 308 b->mtx.Unlock(); 309 } else if (h->remove_) { 310 // Denote that the cell is empty now. 311 CHECK_EQ(addr1, h->addr_); 312 atomic_store(&c->addr, 0, memory_order_release); 313 // See if we need to compact the bucket. 314 AddBucket *add = (AddBucket*)atomic_load(&b->add, memory_order_relaxed); 315 if (h->addidx_ == -1U) { 316 // Removed from embed array, move an add element into the freed cell. 317 if (add && add->size != 0) { 318 uptr last = --add->size; 319 Cell *c1 = &add->cells[last]; 320 c->val = c1->val; 321 uptr addr1 = atomic_load(&c1->addr, memory_order_relaxed); 322 atomic_store(&c->addr, addr1, memory_order_release); 323 atomic_store(&c1->addr, 0, memory_order_release); 324 } 325 } else { 326 // Removed from add array, compact it. 327 uptr last = --add->size; 328 Cell *c1 = &add->cells[last]; 329 if (c != c1) { 330 *c = *c1; 331 atomic_store(&c1->addr, 0, memory_order_relaxed); 332 } 333 } 334 if (add && add->size == 0) { 335 // FIXME(dvyukov): free add? 336 } 337 b->mtx.Unlock(); 338 } else { 339 CHECK_EQ(addr1, h->addr_); 340 if (h->addidx_ != -1U) 341 b->mtx.ReadUnlock(); 342 } 343 } 344 345 template<typename T, uptr kSize> 346 uptr AddrHashMap<T, kSize>::calcHash(uptr addr) { 347 addr += addr << 10; 348 addr ^= addr >> 6; 349 return addr % kSize; 350 } 351 352 } // namespace __sanitizer 353 354 #endif // SANITIZER_ADDRHASHMAP_H 355