101e04c3fSmrg/*
201e04c3fSmrg * Copyright 2010 Marek Olšák <maraeo@gmail.com>
301e04c3fSmrg * Copyright 2016 Advanced Micro Devices, Inc.
401e04c3fSmrg *
501e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining a
601e04c3fSmrg * copy of this software and associated documentation files (the "Software"),
701e04c3fSmrg * to deal in the Software without restriction, including without limitation
801e04c3fSmrg * on the rights to use, copy, modify, merge, publish, distribute, sub
901e04c3fSmrg * license, and/or sell copies of the Software, and to permit persons to whom
1001e04c3fSmrg * the Software is furnished to do so, subject to the following conditions:
1101e04c3fSmrg *
1201e04c3fSmrg * The above copyright notice and this permission notice (including the next
1301e04c3fSmrg * paragraph) shall be included in all copies or substantial portions of the
1401e04c3fSmrg * Software.
1501e04c3fSmrg *
1601e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
1701e04c3fSmrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
1801e04c3fSmrg * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
1901e04c3fSmrg * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
2001e04c3fSmrg * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
2101e04c3fSmrg * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2201e04c3fSmrg * USE OR OTHER DEALINGS IN THE SOFTWARE. */
2301e04c3fSmrg
2401e04c3fSmrg#include "slab.h"
2501e04c3fSmrg#include "macros.h"
2601e04c3fSmrg#include "u_atomic.h"
2701e04c3fSmrg#include <stdint.h>
2801e04c3fSmrg#include <stdbool.h>
2901e04c3fSmrg#include <string.h>
3001e04c3fSmrg
3101e04c3fSmrg#define SLAB_MAGIC_ALLOCATED 0xcafe4321
3201e04c3fSmrg#define SLAB_MAGIC_FREE 0x7ee01234
3301e04c3fSmrg
347ec681f3Smrg#ifndef NDEBUG
3501e04c3fSmrg#define SET_MAGIC(element, value)   (element)->magic = (value)
3601e04c3fSmrg#define CHECK_MAGIC(element, value) assert((element)->magic == (value))
3701e04c3fSmrg#else
3801e04c3fSmrg#define SET_MAGIC(element, value)
3901e04c3fSmrg#define CHECK_MAGIC(element, value)
4001e04c3fSmrg#endif
4101e04c3fSmrg
4201e04c3fSmrg/* One array element within a big buffer. */
4301e04c3fSmrgstruct slab_element_header {
4401e04c3fSmrg   /* The next element in the free or migrated list. */
4501e04c3fSmrg   struct slab_element_header *next;
4601e04c3fSmrg
4701e04c3fSmrg   /* This is either
4801e04c3fSmrg    * - a pointer to the child pool to which this element belongs, or
4901e04c3fSmrg    * - a pointer to the orphaned page of the element, with the least
5001e04c3fSmrg    *   significant bit set to 1.
5101e04c3fSmrg    */
5201e04c3fSmrg   intptr_t owner;
5301e04c3fSmrg
547ec681f3Smrg#ifndef NDEBUG
5501e04c3fSmrg   intptr_t magic;
5601e04c3fSmrg#endif
5701e04c3fSmrg};
5801e04c3fSmrg
5901e04c3fSmrg/* The page is an array of allocations in one block. */
6001e04c3fSmrgstruct slab_page_header {
6101e04c3fSmrg   union {
6201e04c3fSmrg      /* Next page in the same child pool. */
6301e04c3fSmrg      struct slab_page_header *next;
6401e04c3fSmrg
6501e04c3fSmrg      /* Number of remaining, non-freed elements (for orphaned pages). */
6601e04c3fSmrg      unsigned num_remaining;
6701e04c3fSmrg   } u;
6801e04c3fSmrg   /* Memory after the last member is dedicated to the page itself.
6901e04c3fSmrg    * The allocated size is always larger than this structure.
7001e04c3fSmrg    */
7101e04c3fSmrg};
7201e04c3fSmrg
7301e04c3fSmrg
7401e04c3fSmrgstatic struct slab_element_header *
7501e04c3fSmrgslab_get_element(struct slab_parent_pool *parent,
7601e04c3fSmrg                 struct slab_page_header *page, unsigned index)
7701e04c3fSmrg{
7801e04c3fSmrg   return (struct slab_element_header*)
7901e04c3fSmrg          ((uint8_t*)&page[1] + (parent->element_size * index));
8001e04c3fSmrg}
8101e04c3fSmrg
8201e04c3fSmrg/* The given object/element belongs to an orphaned page (i.e. the owning child
8301e04c3fSmrg * pool has been destroyed). Mark the element as freed and free the whole page
8401e04c3fSmrg * when no elements are left in it.
8501e04c3fSmrg */
8601e04c3fSmrgstatic void
8701e04c3fSmrgslab_free_orphaned(struct slab_element_header *elt)
8801e04c3fSmrg{
8901e04c3fSmrg   struct slab_page_header *page;
9001e04c3fSmrg
9101e04c3fSmrg   assert(elt->owner & 1);
9201e04c3fSmrg
9301e04c3fSmrg   page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
9401e04c3fSmrg   if (!p_atomic_dec_return(&page->u.num_remaining))
9501e04c3fSmrg      free(page);
9601e04c3fSmrg}
9701e04c3fSmrg
9801e04c3fSmrg/**
9901e04c3fSmrg * Create a parent pool for the allocation of same-sized objects.
10001e04c3fSmrg *
10101e04c3fSmrg * \param item_size     Size of one object.
10201e04c3fSmrg * \param num_items     Number of objects to allocate at once.
10301e04c3fSmrg */
10401e04c3fSmrgvoid
10501e04c3fSmrgslab_create_parent(struct slab_parent_pool *parent,
10601e04c3fSmrg                   unsigned item_size,
10701e04c3fSmrg                   unsigned num_items)
10801e04c3fSmrg{
1097ec681f3Smrg   simple_mtx_init(&parent->mutex, mtx_plain);
11001e04c3fSmrg   parent->element_size = ALIGN_POT(sizeof(struct slab_element_header) + item_size,
11101e04c3fSmrg                                    sizeof(intptr_t));
11201e04c3fSmrg   parent->num_elements = num_items;
11301e04c3fSmrg}
11401e04c3fSmrg
11501e04c3fSmrgvoid
11601e04c3fSmrgslab_destroy_parent(struct slab_parent_pool *parent)
11701e04c3fSmrg{
1187ec681f3Smrg   simple_mtx_destroy(&parent->mutex);
11901e04c3fSmrg}
12001e04c3fSmrg
12101e04c3fSmrg/**
12201e04c3fSmrg * Create a child pool linked to the given parent.
12301e04c3fSmrg */
12401e04c3fSmrgvoid slab_create_child(struct slab_child_pool *pool,
12501e04c3fSmrg                       struct slab_parent_pool *parent)
12601e04c3fSmrg{
12701e04c3fSmrg   pool->parent = parent;
12801e04c3fSmrg   pool->pages = NULL;
12901e04c3fSmrg   pool->free = NULL;
13001e04c3fSmrg   pool->migrated = NULL;
13101e04c3fSmrg}
13201e04c3fSmrg
13301e04c3fSmrg/**
13401e04c3fSmrg * Destroy the child pool.
13501e04c3fSmrg *
13601e04c3fSmrg * Pages associated to the pool will be orphaned. They are eventually freed
13701e04c3fSmrg * when all objects in them are freed.
13801e04c3fSmrg */
13901e04c3fSmrgvoid slab_destroy_child(struct slab_child_pool *pool)
14001e04c3fSmrg{
14101e04c3fSmrg   if (!pool->parent)
14201e04c3fSmrg      return; /* the slab probably wasn't even created */
14301e04c3fSmrg
1447ec681f3Smrg   simple_mtx_lock(&pool->parent->mutex);
14501e04c3fSmrg
14601e04c3fSmrg   while (pool->pages) {
14701e04c3fSmrg      struct slab_page_header *page = pool->pages;
14801e04c3fSmrg      pool->pages = page->u.next;
14901e04c3fSmrg      p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
15001e04c3fSmrg
15101e04c3fSmrg      for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
15201e04c3fSmrg         struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
15301e04c3fSmrg         p_atomic_set(&elt->owner, (intptr_t)page | 1);
15401e04c3fSmrg      }
15501e04c3fSmrg   }
15601e04c3fSmrg
15701e04c3fSmrg   while (pool->migrated) {
15801e04c3fSmrg      struct slab_element_header *elt = pool->migrated;
15901e04c3fSmrg      pool->migrated = elt->next;
16001e04c3fSmrg      slab_free_orphaned(elt);
16101e04c3fSmrg   }
16201e04c3fSmrg
1637ec681f3Smrg   simple_mtx_unlock(&pool->parent->mutex);
16401e04c3fSmrg
16501e04c3fSmrg   while (pool->free) {
16601e04c3fSmrg      struct slab_element_header *elt = pool->free;
16701e04c3fSmrg      pool->free = elt->next;
16801e04c3fSmrg      slab_free_orphaned(elt);
16901e04c3fSmrg   }
17001e04c3fSmrg
17101e04c3fSmrg   /* Guard against use-after-free. */
17201e04c3fSmrg   pool->parent = NULL;
17301e04c3fSmrg}
17401e04c3fSmrg
17501e04c3fSmrgstatic bool
17601e04c3fSmrgslab_add_new_page(struct slab_child_pool *pool)
17701e04c3fSmrg{
17801e04c3fSmrg   struct slab_page_header *page = malloc(sizeof(struct slab_page_header) +
17901e04c3fSmrg      pool->parent->num_elements * pool->parent->element_size);
18001e04c3fSmrg
18101e04c3fSmrg   if (!page)
18201e04c3fSmrg      return false;
18301e04c3fSmrg
18401e04c3fSmrg   for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
18501e04c3fSmrg      struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
18601e04c3fSmrg      elt->owner = (intptr_t)pool;
18701e04c3fSmrg      assert(!(elt->owner & 1));
18801e04c3fSmrg
18901e04c3fSmrg      elt->next = pool->free;
19001e04c3fSmrg      pool->free = elt;
19101e04c3fSmrg      SET_MAGIC(elt, SLAB_MAGIC_FREE);
19201e04c3fSmrg   }
19301e04c3fSmrg
19401e04c3fSmrg   page->u.next = pool->pages;
19501e04c3fSmrg   pool->pages = page;
19601e04c3fSmrg
19701e04c3fSmrg   return true;
19801e04c3fSmrg}
19901e04c3fSmrg
20001e04c3fSmrg/**
20101e04c3fSmrg * Allocate an object from the child pool. Single-threaded (i.e. the caller
20201e04c3fSmrg * must ensure that no operation happens on the same child pool in another
20301e04c3fSmrg * thread).
20401e04c3fSmrg */
20501e04c3fSmrgvoid *
20601e04c3fSmrgslab_alloc(struct slab_child_pool *pool)
20701e04c3fSmrg{
20801e04c3fSmrg   struct slab_element_header *elt;
20901e04c3fSmrg
21001e04c3fSmrg   if (!pool->free) {
21101e04c3fSmrg      /* First, collect elements that belong to us but were freed from a
21201e04c3fSmrg       * different child pool.
21301e04c3fSmrg       */
2147ec681f3Smrg      simple_mtx_lock(&pool->parent->mutex);
21501e04c3fSmrg      pool->free = pool->migrated;
21601e04c3fSmrg      pool->migrated = NULL;
2177ec681f3Smrg      simple_mtx_unlock(&pool->parent->mutex);
21801e04c3fSmrg
21901e04c3fSmrg      /* Now allocate a new page. */
22001e04c3fSmrg      if (!pool->free && !slab_add_new_page(pool))
22101e04c3fSmrg         return NULL;
22201e04c3fSmrg   }
22301e04c3fSmrg
22401e04c3fSmrg   elt = pool->free;
22501e04c3fSmrg   pool->free = elt->next;
22601e04c3fSmrg
22701e04c3fSmrg   CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
22801e04c3fSmrg   SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
22901e04c3fSmrg
23001e04c3fSmrg   return &elt[1];
23101e04c3fSmrg}
23201e04c3fSmrg
23301e04c3fSmrg/**
23401e04c3fSmrg * Free an object allocated from the slab. Single-threaded (i.e. the caller
23501e04c3fSmrg * must ensure that no operation happens on the same child pool in another
23601e04c3fSmrg * thread).
23701e04c3fSmrg *
23801e04c3fSmrg * Freeing an object in a different child pool from the one where it was
23901e04c3fSmrg * allocated is allowed, as long the pool belong to the same parent. No
24001e04c3fSmrg * additional locking is required in this case.
24101e04c3fSmrg */
24201e04c3fSmrgvoid slab_free(struct slab_child_pool *pool, void *ptr)
24301e04c3fSmrg{
24401e04c3fSmrg   struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
24501e04c3fSmrg   intptr_t owner_int;
24601e04c3fSmrg
24701e04c3fSmrg   CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
24801e04c3fSmrg   SET_MAGIC(elt, SLAB_MAGIC_FREE);
24901e04c3fSmrg
25001e04c3fSmrg   if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
25101e04c3fSmrg      /* This is the simple case: The caller guarantees that we can safely
25201e04c3fSmrg       * access the free list.
25301e04c3fSmrg       */
25401e04c3fSmrg      elt->next = pool->free;
25501e04c3fSmrg      pool->free = elt;
25601e04c3fSmrg      return;
25701e04c3fSmrg   }
25801e04c3fSmrg
25901e04c3fSmrg   /* The slow case: migration or an orphaned page. */
2607ec681f3Smrg   if (pool->parent)
2617ec681f3Smrg      simple_mtx_lock(&pool->parent->mutex);
26201e04c3fSmrg
26301e04c3fSmrg   /* Note: we _must_ re-read elt->owner here because the owning child pool
26401e04c3fSmrg    * may have been destroyed by another thread in the meantime.
26501e04c3fSmrg    */
26601e04c3fSmrg   owner_int = p_atomic_read(&elt->owner);
26701e04c3fSmrg
26801e04c3fSmrg   if (!(owner_int & 1)) {
26901e04c3fSmrg      struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
27001e04c3fSmrg      elt->next = owner->migrated;
27101e04c3fSmrg      owner->migrated = elt;
2727ec681f3Smrg      if (pool->parent)
2737ec681f3Smrg         simple_mtx_unlock(&pool->parent->mutex);
27401e04c3fSmrg   } else {
2757ec681f3Smrg      if (pool->parent)
2767ec681f3Smrg         simple_mtx_unlock(&pool->parent->mutex);
27701e04c3fSmrg
27801e04c3fSmrg      slab_free_orphaned(elt);
27901e04c3fSmrg   }
28001e04c3fSmrg}
28101e04c3fSmrg
28201e04c3fSmrg/**
28301e04c3fSmrg * Allocate an object from the slab. Single-threaded (no mutex).
28401e04c3fSmrg */
28501e04c3fSmrgvoid *
2868a1362adSmayaslab_alloc_st(struct slab_mempool *mempool)
28701e04c3fSmrg{
2888a1362adSmaya   return slab_alloc(&mempool->child);
28901e04c3fSmrg}
29001e04c3fSmrg
29101e04c3fSmrg/**
29201e04c3fSmrg * Free an object allocated from the slab. Single-threaded (no mutex).
29301e04c3fSmrg */
29401e04c3fSmrgvoid
2958a1362adSmayaslab_free_st(struct slab_mempool *mempool, void *ptr)
29601e04c3fSmrg{
2978a1362adSmaya   slab_free(&mempool->child, ptr);
29801e04c3fSmrg}
29901e04c3fSmrg
30001e04c3fSmrgvoid
3018a1362adSmayaslab_destroy(struct slab_mempool *mempool)
30201e04c3fSmrg{
3038a1362adSmaya   slab_destroy_child(&mempool->child);
3048a1362adSmaya   slab_destroy_parent(&mempool->parent);
30501e04c3fSmrg}
30601e04c3fSmrg
30701e04c3fSmrg/**
30801e04c3fSmrg * Create an allocator for same-sized objects.
30901e04c3fSmrg *
31001e04c3fSmrg * \param item_size     Size of one object.
31101e04c3fSmrg * \param num_items     Number of objects to allocate at once.
31201e04c3fSmrg */
31301e04c3fSmrgvoid
3148a1362adSmayaslab_create(struct slab_mempool *mempool,
31501e04c3fSmrg            unsigned item_size,
31601e04c3fSmrg            unsigned num_items)
31701e04c3fSmrg{
3188a1362adSmaya   slab_create_parent(&mempool->parent, item_size, num_items);
3198a1362adSmaya   slab_create_child(&mempool->child, &mempool->parent);
32001e04c3fSmrg}
321