1/*
2 * Copyright 2010 Marek Olšák <maraeo@gmail.com>
3 * Copyright 2016 Advanced Micro Devices, Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * on the rights to use, copy, modify, merge, publish, distribute, sub
9 * license, and/or sell copies of the Software, and to permit persons to whom
10 * the Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22 * USE OR OTHER DEALINGS IN THE SOFTWARE. */
23
24#include "slab.h"
25#include "macros.h"
26#include "u_atomic.h"
27#include <stdint.h>
28#include <stdbool.h>
29#include <string.h>
30
31#define SLAB_MAGIC_ALLOCATED 0xcafe4321
32#define SLAB_MAGIC_FREE 0x7ee01234
33
34#ifndef NDEBUG
35#define SET_MAGIC(element, value)   (element)->magic = (value)
36#define CHECK_MAGIC(element, value) assert((element)->magic == (value))
37#else
38#define SET_MAGIC(element, value)
39#define CHECK_MAGIC(element, value)
40#endif
41
42/* One array element within a big buffer. */
43struct slab_element_header {
44   /* The next element in the free or migrated list. */
45   struct slab_element_header *next;
46
47   /* This is either
48    * - a pointer to the child pool to which this element belongs, or
49    * - a pointer to the orphaned page of the element, with the least
50    *   significant bit set to 1.
51    */
52   intptr_t owner;
53
54#ifndef NDEBUG
55   intptr_t magic;
56#endif
57};
58
59/* The page is an array of allocations in one block. */
60struct slab_page_header {
61   union {
62      /* Next page in the same child pool. */
63      struct slab_page_header *next;
64
65      /* Number of remaining, non-freed elements (for orphaned pages). */
66      unsigned num_remaining;
67   } u;
68   /* Memory after the last member is dedicated to the page itself.
69    * The allocated size is always larger than this structure.
70    */
71};
72
73
74static struct slab_element_header *
75slab_get_element(struct slab_parent_pool *parent,
76                 struct slab_page_header *page, unsigned index)
77{
78   return (struct slab_element_header*)
79          ((uint8_t*)&page[1] + (parent->element_size * index));
80}
81
82/* The given object/element belongs to an orphaned page (i.e. the owning child
83 * pool has been destroyed). Mark the element as freed and free the whole page
84 * when no elements are left in it.
85 */
86static void
87slab_free_orphaned(struct slab_element_header *elt)
88{
89   struct slab_page_header *page;
90
91   assert(elt->owner & 1);
92
93   page = (struct slab_page_header *)(elt->owner & ~(intptr_t)1);
94   if (!p_atomic_dec_return(&page->u.num_remaining))
95      free(page);
96}
97
98/**
99 * Create a parent pool for the allocation of same-sized objects.
100 *
101 * \param item_size     Size of one object.
102 * \param num_items     Number of objects to allocate at once.
103 */
104void
105slab_create_parent(struct slab_parent_pool *parent,
106                   unsigned item_size,
107                   unsigned num_items)
108{
109   simple_mtx_init(&parent->mutex, mtx_plain);
110   parent->element_size = ALIGN_POT(sizeof(struct slab_element_header) + item_size,
111                                    sizeof(intptr_t));
112   parent->num_elements = num_items;
113}
114
115void
116slab_destroy_parent(struct slab_parent_pool *parent)
117{
118   simple_mtx_destroy(&parent->mutex);
119}
120
121/**
122 * Create a child pool linked to the given parent.
123 */
124void slab_create_child(struct slab_child_pool *pool,
125                       struct slab_parent_pool *parent)
126{
127   pool->parent = parent;
128   pool->pages = NULL;
129   pool->free = NULL;
130   pool->migrated = NULL;
131}
132
133/**
134 * Destroy the child pool.
135 *
136 * Pages associated to the pool will be orphaned. They are eventually freed
137 * when all objects in them are freed.
138 */
139void slab_destroy_child(struct slab_child_pool *pool)
140{
141   if (!pool->parent)
142      return; /* the slab probably wasn't even created */
143
144   simple_mtx_lock(&pool->parent->mutex);
145
146   while (pool->pages) {
147      struct slab_page_header *page = pool->pages;
148      pool->pages = page->u.next;
149      p_atomic_set(&page->u.num_remaining, pool->parent->num_elements);
150
151      for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
152         struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
153         p_atomic_set(&elt->owner, (intptr_t)page | 1);
154      }
155   }
156
157   while (pool->migrated) {
158      struct slab_element_header *elt = pool->migrated;
159      pool->migrated = elt->next;
160      slab_free_orphaned(elt);
161   }
162
163   simple_mtx_unlock(&pool->parent->mutex);
164
165   while (pool->free) {
166      struct slab_element_header *elt = pool->free;
167      pool->free = elt->next;
168      slab_free_orphaned(elt);
169   }
170
171   /* Guard against use-after-free. */
172   pool->parent = NULL;
173}
174
175static bool
176slab_add_new_page(struct slab_child_pool *pool)
177{
178   struct slab_page_header *page = malloc(sizeof(struct slab_page_header) +
179      pool->parent->num_elements * pool->parent->element_size);
180
181   if (!page)
182      return false;
183
184   for (unsigned i = 0; i < pool->parent->num_elements; ++i) {
185      struct slab_element_header *elt = slab_get_element(pool->parent, page, i);
186      elt->owner = (intptr_t)pool;
187      assert(!(elt->owner & 1));
188
189      elt->next = pool->free;
190      pool->free = elt;
191      SET_MAGIC(elt, SLAB_MAGIC_FREE);
192   }
193
194   page->u.next = pool->pages;
195   pool->pages = page;
196
197   return true;
198}
199
200/**
201 * Allocate an object from the child pool. Single-threaded (i.e. the caller
202 * must ensure that no operation happens on the same child pool in another
203 * thread).
204 */
205void *
206slab_alloc(struct slab_child_pool *pool)
207{
208   struct slab_element_header *elt;
209
210   if (!pool->free) {
211      /* First, collect elements that belong to us but were freed from a
212       * different child pool.
213       */
214      simple_mtx_lock(&pool->parent->mutex);
215      pool->free = pool->migrated;
216      pool->migrated = NULL;
217      simple_mtx_unlock(&pool->parent->mutex);
218
219      /* Now allocate a new page. */
220      if (!pool->free && !slab_add_new_page(pool))
221         return NULL;
222   }
223
224   elt = pool->free;
225   pool->free = elt->next;
226
227   CHECK_MAGIC(elt, SLAB_MAGIC_FREE);
228   SET_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
229
230   return &elt[1];
231}
232
233/**
234 * Free an object allocated from the slab. Single-threaded (i.e. the caller
235 * must ensure that no operation happens on the same child pool in another
236 * thread).
237 *
238 * Freeing an object in a different child pool from the one where it was
239 * allocated is allowed, as long the pool belong to the same parent. No
240 * additional locking is required in this case.
241 */
242void slab_free(struct slab_child_pool *pool, void *ptr)
243{
244   struct slab_element_header *elt = ((struct slab_element_header*)ptr - 1);
245   intptr_t owner_int;
246
247   CHECK_MAGIC(elt, SLAB_MAGIC_ALLOCATED);
248   SET_MAGIC(elt, SLAB_MAGIC_FREE);
249
250   if (p_atomic_read(&elt->owner) == (intptr_t)pool) {
251      /* This is the simple case: The caller guarantees that we can safely
252       * access the free list.
253       */
254      elt->next = pool->free;
255      pool->free = elt;
256      return;
257   }
258
259   /* The slow case: migration or an orphaned page. */
260   if (pool->parent)
261      simple_mtx_lock(&pool->parent->mutex);
262
263   /* Note: we _must_ re-read elt->owner here because the owning child pool
264    * may have been destroyed by another thread in the meantime.
265    */
266   owner_int = p_atomic_read(&elt->owner);
267
268   if (!(owner_int & 1)) {
269      struct slab_child_pool *owner = (struct slab_child_pool *)owner_int;
270      elt->next = owner->migrated;
271      owner->migrated = elt;
272      if (pool->parent)
273         simple_mtx_unlock(&pool->parent->mutex);
274   } else {
275      if (pool->parent)
276         simple_mtx_unlock(&pool->parent->mutex);
277
278      slab_free_orphaned(elt);
279   }
280}
281
282/**
283 * Allocate an object from the slab. Single-threaded (no mutex).
284 */
285void *
286slab_alloc_st(struct slab_mempool *mempool)
287{
288   return slab_alloc(&mempool->child);
289}
290
291/**
292 * Free an object allocated from the slab. Single-threaded (no mutex).
293 */
294void
295slab_free_st(struct slab_mempool *mempool, void *ptr)
296{
297   slab_free(&mempool->child, ptr);
298}
299
300void
301slab_destroy(struct slab_mempool *mempool)
302{
303   slab_destroy_child(&mempool->child);
304   slab_destroy_parent(&mempool->parent);
305}
306
307/**
308 * Create an allocator for same-sized objects.
309 *
310 * \param item_size     Size of one object.
311 * \param num_items     Number of objects to allocate at once.
312 */
313void
314slab_create(struct slab_mempool *mempool,
315            unsigned item_size,
316            unsigned num_items)
317{
318   slab_create_parent(&mempool->parent, item_size, num_items);
319   slab_create_child(&mempool->child, &mempool->parent);
320}
321