1b8e80941Smrg/*
2b8e80941Smrg * Copyright 2016 Advanced Micro Devices, Inc.
3b8e80941Smrg * All Rights Reserved.
4b8e80941Smrg *
5b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining
6b8e80941Smrg * a copy of this software and associated documentation files (the
7b8e80941Smrg * "Software"), to deal in the Software without restriction, including
8b8e80941Smrg * without limitation the rights to use, copy, modify, merge, publish,
9b8e80941Smrg * distribute, sub license, and/or sell copies of the Software, and to
10b8e80941Smrg * permit persons to whom the Software is furnished to do so, subject to
11b8e80941Smrg * the following conditions:
12b8e80941Smrg *
13b8e80941Smrg * The above copyright notice and this permission notice (including the
14b8e80941Smrg * next paragraph) shall be included in all copies or substantial portions
15b8e80941Smrg * of the Software.
16b8e80941Smrg *
17b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18b8e80941Smrg * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19b8e80941Smrg * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20b8e80941Smrg * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21b8e80941Smrg * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23b8e80941Smrg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24b8e80941Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
25b8e80941Smrg *
26b8e80941Smrg */
27b8e80941Smrg
28b8e80941Smrg#include "pb_slab.h"
29b8e80941Smrg
30b8e80941Smrg#include "util/u_math.h"
31b8e80941Smrg#include "util/u_memory.h"
32b8e80941Smrg
33b8e80941Smrg/* All slab allocations from the same heap and with the same size belong
34b8e80941Smrg * to the same group.
35b8e80941Smrg */
36b8e80941Smrgstruct pb_slab_group
37b8e80941Smrg{
38b8e80941Smrg   /* Slabs with allocation candidates. Typically, slabs in this list should
39b8e80941Smrg    * have some free entries.
40b8e80941Smrg    *
41b8e80941Smrg    * However, when the head becomes full we purposefully keep it around
42b8e80941Smrg    * until the next allocation attempt, at which time we try a reclaim.
43b8e80941Smrg    * The intention is to keep serving allocations from the same slab as long
44b8e80941Smrg    * as possible for better locality.
45b8e80941Smrg    *
46b8e80941Smrg    * Due to a race in new slab allocation, additional slabs in this list
47b8e80941Smrg    * can be fully allocated as well.
48b8e80941Smrg    */
49b8e80941Smrg   struct list_head slabs;
50b8e80941Smrg};
51b8e80941Smrg
52b8e80941Smrg
53b8e80941Smrgstatic void
54b8e80941Smrgpb_slab_reclaim(struct pb_slabs *slabs, struct pb_slab_entry *entry)
55b8e80941Smrg{
56b8e80941Smrg   struct pb_slab *slab = entry->slab;
57b8e80941Smrg
58b8e80941Smrg   LIST_DEL(&entry->head); /* remove from reclaim list */
59b8e80941Smrg   LIST_ADD(&entry->head, &slab->free);
60b8e80941Smrg   slab->num_free++;
61b8e80941Smrg
62b8e80941Smrg   /* Add slab to the group's list if it isn't already linked. */
63b8e80941Smrg   if (!slab->head.next) {
64b8e80941Smrg      struct pb_slab_group *group = &slabs->groups[entry->group_index];
65b8e80941Smrg      LIST_ADDTAIL(&slab->head, &group->slabs);
66b8e80941Smrg   }
67b8e80941Smrg
68b8e80941Smrg   if (slab->num_free >= slab->num_entries) {
69b8e80941Smrg      LIST_DEL(&slab->head);
70b8e80941Smrg      slabs->slab_free(slabs->priv, slab);
71b8e80941Smrg   }
72b8e80941Smrg}
73b8e80941Smrg
74b8e80941Smrgstatic void
75b8e80941Smrgpb_slabs_reclaim_locked(struct pb_slabs *slabs)
76b8e80941Smrg{
77b8e80941Smrg   while (!LIST_IS_EMPTY(&slabs->reclaim)) {
78b8e80941Smrg      struct pb_slab_entry *entry =
79b8e80941Smrg         LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
80b8e80941Smrg
81b8e80941Smrg      if (!slabs->can_reclaim(slabs->priv, entry))
82b8e80941Smrg         break;
83b8e80941Smrg
84b8e80941Smrg      pb_slab_reclaim(slabs, entry);
85b8e80941Smrg   }
86b8e80941Smrg}
87b8e80941Smrg
88b8e80941Smrg/* Allocate a slab entry of the given size from the given heap.
89b8e80941Smrg *
90b8e80941Smrg * This will try to re-use entries that have previously been freed. However,
91b8e80941Smrg * if no entries are free (or all free entries are still "in flight" as
92b8e80941Smrg * determined by the can_reclaim fallback function), a new slab will be
93b8e80941Smrg * requested via the slab_alloc callback.
94b8e80941Smrg *
95b8e80941Smrg * Note that slab_free can also be called by this function.
96b8e80941Smrg */
97b8e80941Smrgstruct pb_slab_entry *
98b8e80941Smrgpb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap)
99b8e80941Smrg{
100b8e80941Smrg   unsigned order = MAX2(slabs->min_order, util_logbase2_ceil(size));
101b8e80941Smrg   unsigned group_index;
102b8e80941Smrg   struct pb_slab_group *group;
103b8e80941Smrg   struct pb_slab *slab;
104b8e80941Smrg   struct pb_slab_entry *entry;
105b8e80941Smrg
106b8e80941Smrg   assert(order < slabs->min_order + slabs->num_orders);
107b8e80941Smrg   assert(heap < slabs->num_heaps);
108b8e80941Smrg
109b8e80941Smrg   group_index = heap * slabs->num_orders + (order - slabs->min_order);
110b8e80941Smrg   group = &slabs->groups[group_index];
111b8e80941Smrg
112b8e80941Smrg   mtx_lock(&slabs->mutex);
113b8e80941Smrg
114b8e80941Smrg   /* If there is no candidate slab at all, or the first slab has no free
115b8e80941Smrg    * entries, try reclaiming entries.
116b8e80941Smrg    */
117b8e80941Smrg   if (LIST_IS_EMPTY(&group->slabs) ||
118b8e80941Smrg       LIST_IS_EMPTY(&LIST_ENTRY(struct pb_slab, group->slabs.next, head)->free))
119b8e80941Smrg      pb_slabs_reclaim_locked(slabs);
120b8e80941Smrg
121b8e80941Smrg   /* Remove slabs without free entries. */
122b8e80941Smrg   while (!LIST_IS_EMPTY(&group->slabs)) {
123b8e80941Smrg      slab = LIST_ENTRY(struct pb_slab, group->slabs.next, head);
124b8e80941Smrg      if (!LIST_IS_EMPTY(&slab->free))
125b8e80941Smrg         break;
126b8e80941Smrg
127b8e80941Smrg      LIST_DEL(&slab->head);
128b8e80941Smrg   }
129b8e80941Smrg
130b8e80941Smrg   if (LIST_IS_EMPTY(&group->slabs)) {
131b8e80941Smrg      /* Drop the mutex temporarily to prevent a deadlock where the allocation
132b8e80941Smrg       * calls back into slab functions (most likely to happen for
133b8e80941Smrg       * pb_slab_reclaim if memory is low).
134b8e80941Smrg       *
135b8e80941Smrg       * There's a chance that racing threads will end up allocating multiple
136b8e80941Smrg       * slabs for the same group, but that doesn't hurt correctness.
137b8e80941Smrg       */
138b8e80941Smrg      mtx_unlock(&slabs->mutex);
139b8e80941Smrg      slab = slabs->slab_alloc(slabs->priv, heap, 1 << order, group_index);
140b8e80941Smrg      if (!slab)
141b8e80941Smrg         return NULL;
142b8e80941Smrg      mtx_lock(&slabs->mutex);
143b8e80941Smrg
144b8e80941Smrg      LIST_ADD(&slab->head, &group->slabs);
145b8e80941Smrg   }
146b8e80941Smrg
147b8e80941Smrg   entry = LIST_ENTRY(struct pb_slab_entry, slab->free.next, head);
148b8e80941Smrg   LIST_DEL(&entry->head);
149b8e80941Smrg   slab->num_free--;
150b8e80941Smrg
151b8e80941Smrg   mtx_unlock(&slabs->mutex);
152b8e80941Smrg
153b8e80941Smrg   return entry;
154b8e80941Smrg}
155b8e80941Smrg
156b8e80941Smrg/* Free the given slab entry.
157b8e80941Smrg *
158b8e80941Smrg * The entry may still be in use e.g. by in-flight command submissions. The
159b8e80941Smrg * can_reclaim callback function will be called to determine whether the entry
160b8e80941Smrg * can be handed out again by pb_slab_alloc.
161b8e80941Smrg */
162b8e80941Smrgvoid
163b8e80941Smrgpb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry)
164b8e80941Smrg{
165b8e80941Smrg   mtx_lock(&slabs->mutex);
166b8e80941Smrg   LIST_ADDTAIL(&entry->head, &slabs->reclaim);
167b8e80941Smrg   mtx_unlock(&slabs->mutex);
168b8e80941Smrg}
169b8e80941Smrg
170b8e80941Smrg/* Check if any of the entries handed to pb_slab_free are ready to be re-used.
171b8e80941Smrg *
172b8e80941Smrg * This may end up freeing some slabs and is therefore useful to try to reclaim
173b8e80941Smrg * some no longer used memory. However, calling this function is not strictly
174b8e80941Smrg * required since pb_slab_alloc will eventually do the same thing.
175b8e80941Smrg */
176b8e80941Smrgvoid
177b8e80941Smrgpb_slabs_reclaim(struct pb_slabs *slabs)
178b8e80941Smrg{
179b8e80941Smrg   mtx_lock(&slabs->mutex);
180b8e80941Smrg   pb_slabs_reclaim_locked(slabs);
181b8e80941Smrg   mtx_unlock(&slabs->mutex);
182b8e80941Smrg}
183b8e80941Smrg
184b8e80941Smrg/* Initialize the slabs manager.
185b8e80941Smrg *
186b8e80941Smrg * The minimum and maximum size of slab entries are 2^min_order and
187b8e80941Smrg * 2^max_order, respectively.
188b8e80941Smrg *
189b8e80941Smrg * priv will be passed to the given callback functions.
190b8e80941Smrg */
191b8e80941Smrgbool
192b8e80941Smrgpb_slabs_init(struct pb_slabs *slabs,
193b8e80941Smrg              unsigned min_order, unsigned max_order,
194b8e80941Smrg              unsigned num_heaps,
195b8e80941Smrg              void *priv,
196b8e80941Smrg              slab_can_reclaim_fn *can_reclaim,
197b8e80941Smrg              slab_alloc_fn *slab_alloc,
198b8e80941Smrg              slab_free_fn *slab_free)
199b8e80941Smrg{
200b8e80941Smrg   unsigned num_groups;
201b8e80941Smrg   unsigned i;
202b8e80941Smrg
203b8e80941Smrg   assert(min_order <= max_order);
204b8e80941Smrg   assert(max_order < sizeof(unsigned) * 8 - 1);
205b8e80941Smrg
206b8e80941Smrg   slabs->min_order = min_order;
207b8e80941Smrg   slabs->num_orders = max_order - min_order + 1;
208b8e80941Smrg   slabs->num_heaps = num_heaps;
209b8e80941Smrg
210b8e80941Smrg   slabs->priv = priv;
211b8e80941Smrg   slabs->can_reclaim = can_reclaim;
212b8e80941Smrg   slabs->slab_alloc = slab_alloc;
213b8e80941Smrg   slabs->slab_free = slab_free;
214b8e80941Smrg
215b8e80941Smrg   LIST_INITHEAD(&slabs->reclaim);
216b8e80941Smrg
217b8e80941Smrg   num_groups = slabs->num_orders * slabs->num_heaps;
218b8e80941Smrg   slabs->groups = CALLOC(num_groups, sizeof(*slabs->groups));
219b8e80941Smrg   if (!slabs->groups)
220b8e80941Smrg      return false;
221b8e80941Smrg
222b8e80941Smrg   for (i = 0; i < num_groups; ++i) {
223b8e80941Smrg      struct pb_slab_group *group = &slabs->groups[i];
224b8e80941Smrg      LIST_INITHEAD(&group->slabs);
225b8e80941Smrg   }
226b8e80941Smrg
227b8e80941Smrg   (void) mtx_init(&slabs->mutex, mtx_plain);
228b8e80941Smrg
229b8e80941Smrg   return true;
230b8e80941Smrg}
231b8e80941Smrg
232b8e80941Smrg/* Shutdown the slab manager.
233b8e80941Smrg *
234b8e80941Smrg * This will free all allocated slabs and internal structures, even if some
235b8e80941Smrg * of the slab entries are still in flight (i.e. if can_reclaim would return
236b8e80941Smrg * false).
237b8e80941Smrg */
238b8e80941Smrgvoid
239b8e80941Smrgpb_slabs_deinit(struct pb_slabs *slabs)
240b8e80941Smrg{
241b8e80941Smrg   /* Reclaim all slab entries (even those that are still in flight). This
242b8e80941Smrg    * implicitly calls slab_free for everything.
243b8e80941Smrg    */
244b8e80941Smrg   while (!LIST_IS_EMPTY(&slabs->reclaim)) {
245b8e80941Smrg      struct pb_slab_entry *entry =
246b8e80941Smrg         LIST_ENTRY(struct pb_slab_entry, slabs->reclaim.next, head);
247b8e80941Smrg      pb_slab_reclaim(slabs, entry);
248b8e80941Smrg   }
249b8e80941Smrg
250b8e80941Smrg   FREE(slabs->groups);
251b8e80941Smrg   mtx_destroy(&slabs->mutex);
252b8e80941Smrg}
253