slab.c revision 7ec681f3
11.170Srkujawa/*
21.16Scgd * Copyright 2010 Marek Olšák <maraeo@gmail.com>
31.16Scgd * Copyright 2016 Advanced Micro Devices, Inc.
41.16Scgd *
51.1Schopps * Permission is hereby granted, free of charge, to any person obtaining a
61.129She * copy of this software and associated documentation files (the "Software"),
71.129She * to deal in the Software without restriction, including without limitation
81.129She * on the rights to use, copy, modify, merge, publish, distribute, sub
91.1Schopps * license, and/or sell copies of the Software, and to permit persons to whom
101.1Schopps * the Software is furnished to do so, subject to the following conditions:
111.124Sthorpej *
121.113Slukem * The above copyright notice and this permission notice (including the next
131.113Slukem * paragraph) shall be included in all copies or substantial portions of the
141.113Slukem * Software.
151.113Slukem *
161.113Slukem * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
171.113Slukem * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
181.113Slukem * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
191.113Slukem * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
201.113Slukem * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
211.113Slukem * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
221.83Sis * USE OR OTHER DEALINGS IN THE SOFTWARE. */
231.120Sheinz
241.120Sheinz#include "slab.h"
251.120Sheinz#include "macros.h"
261.112Slukem#include "u_atomic.h"
271.113Slukem#include <stdint.h>
281.113Slukem#include <stdbool.h>
291.84Sis#include <string.h>
301.113Slukem
311.113Slukem#define SLAB_MAGIC_ALLOCATED 0xcafe4321
321.113Slukem#define SLAB_MAGIC_FREE 0x7ee01234
331.128Sis
341.105Sis#ifndef NDEBUG
351.154Srkujawa#define SET_MAGIC(element, value)   (element)->magic = (value)
361.151Srkujawa#define CHECK_MAGIC(element, value) assert((element)->magic == (value))
371.112Slukem#else
381.83Sis#define SET_MAGIC(element, value)
391.111Slukem#define CHECK_MAGIC(element, value)
401.111Slukem#endif
411.1Schopps
421.72Sthorpej/* One array element within a big buffer. */
431.37Sthorpejstruct slab_element_header {
441.1Schopps   /* The next element in the free or migrated list. */
451.94Sis   struct slab_element_header *next;
461.94Sis
471.94Sis   /* This is either
481.94Sis    * - a pointer to the child pool to which this element belongs, or
491.94Sis    * - a pointer to the orphaned page of the element, with the least
501.94Sis    *   significant bit set to 1.
511.94Sis    */
521.95Saymeric   intptr_t owner;
531.95Saymeric
541.95Saymeric#ifndef NDEBUG
551.98Sis   intptr_t magic;
561.98Sis#endif
571.106Sis};
581.106Sis
591.106Sis/* The page is an array of allocations in one block. */
601.98Sisstruct slab_page_header {
611.158Srkujawa   union {
621.158Srkujawa      /* Next page in the same child pool. */
631.158Srkujawa      struct slab_page_header *next;
641.146Srkujawa
651.146Srkujawa      /* Number of remaining, non-freed elements (for orphaned pages). */
661.43Sis      unsigned num_remaining;
671.72Sthorpej   } u;
681.37Sthorpej   /* Memory after the last member is dedicated to the page itself.
691.15Schopps    * The allocated size is always larger than this structure.
701.1Schopps    */
711.1Schopps};
721.1Schopps
731.1Schopps
741.72Sthorpejstatic struct slab_element_header *
751.37Sthorpejslab_get_element(struct slab_parent_pool *parent,
761.1Schopps                 struct slab_page_header *page, unsigned index)
771.60Sis{
781.60Sis   return (struct slab_element_header*)
791.164Srkujawa          ((uint8_t*)&page[1] + (parent->element_size * index));
801.60Sis}
811.60Sis
821.60Sis/* The given object/element belongs to an orphaned page (i.e. the owning child
831.60Sis * pool has been destroyed). Mark the element as freed and free the whole page
841.72Sthorpej * when no elements are left in it.
851.60Sis */
861.60Sisstatic void
871.1Schoppsslab_free_orphaned(struct slab_element_header *elt)
881.1Schopps{
891.104Sis   struct slab_page_header *page;
901.37Sthorpej
911.1Schopps   assert(elt->owner & 1);
921.104Sis
931.1Schopps   page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
941.1Schopps   if (!p_atomic_dec_return(&page->u.num_remaining))
951.72Sthorpej      free(page);
961.37Sthorpej}
971.1Schopps
981.1Schopps/**
991.43Sis * Create a parent pool for the allocation of same-sized objects.
1001.72Sthorpej *
1011.37Sthorpej * \param item_size     Size of one object.
1021.1Schopps * \param num_items     Number of objects to allocate at once.
1031.1Schopps */
1041.57Sisvoid
1051.121Saugustssslab_create_parent(struct slab_parent_pool *parent,
1061.68Sis                   unsigned item_size,
1071.111Slukem                   unsigned num_items)
1081.68Sis{
1091.121Saugustss   simple_mtx_init(&parent->mutex, mtx_plain);
1101.68Sis   parent->element_size = ALIGN_POT(sizeof(struct slab_element_header) + item_size,
1111.68Sis                                    sizeof(intptr_t));
1121.109Sis   parent->num_elements = num_items;
1131.121Saugustss}
1141.109Sis
1151.109Sisvoid
1161.117Sisslab_destroy_parent(struct slab_parent_pool *parent)
1171.121Saugustss{
1181.117Sis   simple_mtx_destroy(&parent->mutex);
1191.117Sis}
1201.57Sis
1211.1Schopps/**
1221.132Sjandberg * Create a child pool linked to the given parent.
1231.42Sis */
1241.44Sveegovoid slab_create_child(struct slab_child_pool *pool,
1251.1Schopps                       struct slab_parent_pool *parent)
1261.56Sveego{
1271.72Sthorpej   pool->parent = parent;
1281.37Sthorpej   pool->pages = NULL;
1291.1Schopps   pool->free = NULL;
1301.72Sthorpej   pool->migrated = NULL;
1311.37Sthorpej}
1321.1Schopps
1331.1Schopps/**
1341.1Schopps * Destroy the child pool.
1351.1Schopps *
1361.1Schopps * Pages associated to the pool will be orphaned. They are eventually freed
1371.150Sphx * when all objects in them are freed.
1381.37Sthorpej */
1391.1Schoppsvoid slab_destroy_child(struct slab_child_pool *pool)
1401.1Schopps{
1411.72Sthorpej   if (!pool->parent)
1421.37Sthorpej      return; /* the slab probably wasn't even created */
1431.1Schopps
1441.1Schopps   simple_mtx_lock(&pool->parent->mutex);
1451.4Schopps
1461.120Sheinz   while (pool->pages) {
1471.150Sphx      struct slab_page_header *page = pool->pages;
1481.120Sheinz      pool->pages = page->u.next;
1491.120Sheinz      p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
1501.120Sheinz
1511.150Sphx      for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
1521.120Sheinz         struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
1531.120Sheinz         p_atomic_set(&elt->owner, (intptr_t)page | 1);
1541.1Schopps      }
1551.56Sveego   }
1561.72Sthorpej
1571.37Sthorpej   while (pool->migrated) {
1581.10Schopps      struct slab_element_header *elt = pool->migrated;
1591.40Sis      pool->migrated = elt->next;
1601.1Schopps      slab_free_orphaned(elt);
1611.104Sis   }
1621.104Sis
1631.104Sis   simple_mtx_unlock(&pool->parent->mutex);
1641.111Slukem
1651.104Sis   while (pool->free) {
1661.88Sthorpej      struct slab_element_header *elt = pool->free;
1671.39Scgd      pool->free = elt->next;
1681.104Sis      slab_free_orphaned(elt);
1691.104Sis   }
1701.104Sis
1711.1Schopps   /* Guard against use-after-free. */
1721.56Sveego   pool->parent = NULL;
1731.72Sthorpej}
1741.37Sthorpej
1751.10Schoppsstatic bool
1761.40Sisslab_add_new_page(struct slab_child_pool *pool)
1771.1Schopps{
1781.56Sveego   struct slab_page_header *page = malloc(sizeof(struct slab_page_header) +
1791.72Sthorpej      pool->parent->num_elements * pool->parent->element_size);
1801.56Sveego
1811.56Sveego   if (!page)
1821.56Sveego      return false;
1831.56Sveego
1841.56Sveego   for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
1851.72Sthorpej      struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
1861.37Sthorpej      elt->owner = (intptr_t)pool;
1871.15Schopps      assert(!(elt->owner & 1));
1881.40Sis
1891.31Schopps      elt->next = pool->free;
1901.56Sveego      pool->free = elt;
1911.72Sthorpej      SET_MAGIC(elt, SLAB_MAGIC_FREE);
1921.37Sthorpej   }
1931.27Schopps
1941.40Sis   page->u.next = pool->pages;
1951.122Sjdolecek   pool->pages = page;
1961.1Schopps
1971.56Sveego   return true;
1981.72Sthorpej}
1991.56Sveego
2001.56Sveego/**
2011.56Sveego * Allocate an object from the child pool. Single-threaded (i.e. the caller
2021.48Sveego * must ensure that no operation happens on the same child pool in another
2031.56Sveego * thread).
2041.72Sthorpej */
2051.48Sveegovoid *
2061.48Sveegoslab_alloc(struct slab_child_pool *pool)
2071.48Sveego{
2081.70Sveego   struct slab_element_header *elt;
2091.70Sveego
2101.72Sthorpej   if (!pool->free) {
2111.70Sveego      /* First, collect elements that belong to us but were freed from a
2121.70Sveego       * different child pool.
2131.70Sveego       */
2141.48Sveego      simple_mtx_lock(&pool->parent->mutex);
2151.45Sthorpej      pool->free = pool->migrated;
2161.56Sveego      pool->migrated = NULL;
2171.74Sdrochner      simple_mtx_unlock(&pool->parent->mutex);
2181.47Sthorpej
2191.78Sveego      /* Now allocate a new page. */
2201.78Sveego      if (!pool->free && !slab_add_new_page(pool))
2211.78Sveego         return NULL;
2221.78Sveego   }
2231.78Sveego
2241.37Sthorpej   elt = pool->free;
2251.156Srkujawa   pool->free = elt->next;
2261.156Srkujawa
2271.156Srkujawa   CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
2281.156Srkujawa   SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
2291.156Srkujawa
2301.156Srkujawa   return &elt[1];
2311.156Srkujawa}
2321.156Srkujawa
2331.156Srkujawa/**
2341.156Srkujawa * Free an object allocated from the slab. Single-threaded (i.e. the caller
2351.165Srkujawa * must ensure that no operation happens on the same child pool in another
2361.165Srkujawa * thread).
2371.165Srkujawa *
2381.56Sveego * Freeing an object in a different child pool from the one where it was
2391.72Sthorpej * allocated is allowed, as long the pool belong to the same parent. No
2401.161Sphx * additional locking is required in this case.
2411.161Sphx */
2421.18Schoppsvoid slab_free(struct slab_child_pool *pool, void *ptr)
2431.18Schopps{
2441.37Sthorpej   struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
2451.76Sis   intptr_t owner_int;
2461.1Schopps
2471.56Sveego   CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
2481.72Sthorpej   SET_MAGIC(elt, SLAB_MAGIC_FREE);
2491.37Sthorpej
2501.32Sjtc   if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
2511.30Schopps      /* This is the simple case: The caller guarantees that we can safely
2521.56Sveego       * access the free list.
2531.72Sthorpej       */
2541.37Sthorpej      elt->next = pool->free;
2551.30Schopps      pool->free = elt;
2561.28Schopps      return;
2571.28Schopps   }
2581.72Sthorpej
2591.37Sthorpej   /* The slow case: migration or an orphaned page. */
2601.28Schopps   if (pool->parent)
2611.15Schopps      simple_mtx_lock(&pool->parent->mutex);
2621.15Schopps
2631.72Sthorpej   /* Note: we _must_ re-read elt->owner here because the owning child pool
2641.38Smhitch    * may have been destroyed by another thread in the meantime.
2651.15Schopps    */
2661.72Sthorpej   owner_int = p_atomic_read(&elt->owner);
2671.37Sthorpej
2681.72Sthorpej   if (!(owner_int & 1)) {
2691.37Sthorpej      struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
2701.39Scgd      elt->next = owner->migrated;
2711.15Schopps      owner->migrated = elt;
2721.69Sis      if (pool->parent)
2731.69Sis         simple_mtx_unlock(&pool->parent->mutex);
2741.69Sis   } else {
2751.62Sis      if (pool->parent)
2761.62Sis         simple_mtx_unlock(&pool->parent->mutex);
2771.72Sthorpej
2781.62Sis      slab_free_orphaned(elt);
2791.62Sis   }
2801.69Sis}
2811.77Sis
2821.72Sthorpej/**
2831.69Sis * Allocate an object from the slab. Single-threaded (no mutex).
2841.69Sis */
2851.98Sisvoid *
2861.98Sisslab_alloc_st(struct slab_mempool *mempool)
2871.98Sis{
2881.98Sis   return slab_alloc(&mempool->child);
2891.98Sis}
2901.98Sis
2911.62Sis/**
2921.68Sis * Free an object allocated from the slab. Single-threaded (no mutex).
2931.68Sis */
2941.75Sisvoid
2951.75Sisslab_free_st(struct slab_mempool *mempool, void *ptr)
2961.75Sis{
2971.75Sis   slab_free(&mempool->child, ptr);
2981.75Sis}
2991.102Sis
3001.102Sisvoid
3011.102Sisslab_destroy(struct slab_mempool *mempool)
3021.68Sis{
3031.63Sis   slab_destroy_child(&mempool->child);
3041.72Sthorpej   slab_destroy_parent(&mempool->parent);
3051.63Sis}
3061.63Sis
3071.62Sis/**
3081.63Sis * Create an allocator for same-sized objects.
3091.63Sis *
3101.63Sis * \param item_size     Size of one object.
3111.89Sthorpej * \param num_items     Number of objects to allocate at once.
3121.56Sveego */
3131.1Schoppsvoid
3141.111Slukemslab_create(struct slab_mempool *mempool,
3151.1Schopps            unsigned item_size,
3161.1Schopps            unsigned num_items)
3171.1Schopps{
3181.72Sthorpej   slab_create_parent(&mempool->parent, item_size, num_items);
3191.37Sthorpej   slab_create_child(&mempool->child, &mempool->parent);
3201.1Schopps}
3211.1Schopps