1b8e80941Smrg/*
2b8e80941Smrg * Copyright 2016 Advanced Micro Devices, Inc.
3b8e80941Smrg * All Rights Reserved.
4b8e80941Smrg *
5b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining
6b8e80941Smrg * a copy of this software and associated documentation files (the
7b8e80941Smrg * "Software"), to deal in the Software without restriction, including
8b8e80941Smrg * without limitation the rights to use, copy, modify, merge, publish,
9b8e80941Smrg * distribute, sub license, and/or sell copies of the Software, and to
10b8e80941Smrg * permit persons to whom the Software is furnished to do so, subject to
11b8e80941Smrg * the following conditions:
12b8e80941Smrg *
13b8e80941Smrg * The above copyright notice and this permission notice (including the
14b8e80941Smrg * next paragraph) shall be included in all copies or substantial portions
15b8e80941Smrg * of the Software.
16b8e80941Smrg *
17b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18b8e80941Smrg * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
19b8e80941Smrg * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20b8e80941Smrg * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
21b8e80941Smrg * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
22b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
23b8e80941Smrg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24b8e80941Smrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
25b8e80941Smrg *
26b8e80941Smrg */
27b8e80941Smrg
28b8e80941Smrg/**
29b8e80941Smrg * \file
30b8e80941Smrg *
31b8e80941Smrg * Helper library for carving out smaller allocations (called "(slab) entries")
32b8e80941Smrg * from larger buffers (called "slabs").
33b8e80941Smrg *
34b8e80941Smrg * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
35b8e80941Smrg * meaning of each heap is treated as opaque by this library.
36b8e80941Smrg *
37b8e80941Smrg * The library allows delaying the re-use of an entry, i.e. an entry may be
38b8e80941Smrg * freed by calling \ref pb_slab_free even while the corresponding buffer
39b8e80941Smrg * region is still in use by the GPU. A callback function is called to
40b8e80941Smrg * determine when it is safe to allocate the entry again; the user of this
41b8e80941Smrg * library is expected to maintain the required fences or similar.
42b8e80941Smrg */
43b8e80941Smrg
44b8e80941Smrg#ifndef PB_SLAB_H
45b8e80941Smrg#define PB_SLAB_H
46b8e80941Smrg
47b8e80941Smrg#include "pb_buffer.h"
48b8e80941Smrg#include "util/list.h"
49b8e80941Smrg#include "os/os_thread.h"
50b8e80941Smrg
51b8e80941Smrgstruct pb_slab;
52b8e80941Smrgstruct pb_slabs;
53b8e80941Smrgstruct pb_slab_group;
54b8e80941Smrg
55b8e80941Smrg/* Descriptor of a slab entry.
56b8e80941Smrg *
57b8e80941Smrg * The user of this utility library is expected to embed this in a larger
58b8e80941Smrg * structure that describes a buffer object.
59b8e80941Smrg */
60b8e80941Smrgstruct pb_slab_entry
61b8e80941Smrg{
62b8e80941Smrg   struct list_head head;
63b8e80941Smrg   struct pb_slab *slab; /* the slab that contains this buffer */
64b8e80941Smrg   unsigned group_index; /* index into pb_slabs::groups */
65b8e80941Smrg};
66b8e80941Smrg
67b8e80941Smrg/* Descriptor of a slab from which many entries are carved out.
68b8e80941Smrg *
69b8e80941Smrg * The user of this utility library is expected to embed this in a larger
70b8e80941Smrg * structure that describes a buffer object.
71b8e80941Smrg */
72b8e80941Smrgstruct pb_slab
73b8e80941Smrg{
74b8e80941Smrg   struct list_head head;
75b8e80941Smrg
76b8e80941Smrg   struct list_head free; /* list of free pb_slab_entry structures */
77b8e80941Smrg   unsigned num_free; /* number of entries in free list */
78b8e80941Smrg   unsigned num_entries; /* total number of entries */
79b8e80941Smrg};
80b8e80941Smrg
81b8e80941Smrg/* Callback function that is called when a new slab needs to be allocated
82b8e80941Smrg * for fulfilling allocation requests of the given size from the given heap.
83b8e80941Smrg *
84b8e80941Smrg * The callback must allocate a pb_slab structure and the desired number
85b8e80941Smrg * of entries. All entries that belong to the slab must be added to the free
86b8e80941Smrg * list. Entries' pb_slab_entry structures must be initialized with the given
87b8e80941Smrg * group_index.
88b8e80941Smrg *
89b8e80941Smrg * The callback may call pb_slab functions.
90b8e80941Smrg */
91b8e80941Smrgtypedef struct pb_slab *(slab_alloc_fn)(void *priv,
92b8e80941Smrg                                        unsigned heap,
93b8e80941Smrg                                        unsigned entry_size,
94b8e80941Smrg                                        unsigned group_index);
95b8e80941Smrg
96b8e80941Smrg/* Callback function that is called when all entries of a slab have been freed.
97b8e80941Smrg *
98b8e80941Smrg * The callback must free the slab and all its entries. It must not call any of
99b8e80941Smrg * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
100b8e80941Smrg */
101b8e80941Smrgtypedef void (slab_free_fn)(void *priv, struct pb_slab *);
102b8e80941Smrg
103b8e80941Smrg/* Callback function to determine whether a given entry can already be reused.
104b8e80941Smrg */
105b8e80941Smrgtypedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
106b8e80941Smrg
107b8e80941Smrg/* Manager of slab allocations. The user of this utility library should embed
108b8e80941Smrg * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
109b8e80941Smrg * time.
110b8e80941Smrg */
111b8e80941Smrgstruct pb_slabs
112b8e80941Smrg{
113b8e80941Smrg   mtx_t mutex;
114b8e80941Smrg
115b8e80941Smrg   unsigned min_order;
116b8e80941Smrg   unsigned num_orders;
117b8e80941Smrg   unsigned num_heaps;
118b8e80941Smrg
119b8e80941Smrg   /* One group per (heap, order) pair. */
120b8e80941Smrg   struct pb_slab_group *groups;
121b8e80941Smrg
122b8e80941Smrg   /* List of entries waiting to be reclaimed, i.e. they have been passed to
123b8e80941Smrg    * pb_slab_free, but may not be safe for re-use yet. The tail points at
124b8e80941Smrg    * the most-recently freed entry.
125b8e80941Smrg    */
126b8e80941Smrg   struct list_head reclaim;
127b8e80941Smrg
128b8e80941Smrg   void *priv;
129b8e80941Smrg   slab_can_reclaim_fn *can_reclaim;
130b8e80941Smrg   slab_alloc_fn *slab_alloc;
131b8e80941Smrg   slab_free_fn *slab_free;
132b8e80941Smrg};
133b8e80941Smrg
134b8e80941Smrgstruct pb_slab_entry *
135b8e80941Smrgpb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
136b8e80941Smrg
137b8e80941Smrgvoid
138b8e80941Smrgpb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
139b8e80941Smrg
140b8e80941Smrgvoid
141b8e80941Smrgpb_slabs_reclaim(struct pb_slabs *slabs);
142b8e80941Smrg
143b8e80941Smrgbool
144b8e80941Smrgpb_slabs_init(struct pb_slabs *slabs,
145b8e80941Smrg              unsigned min_order, unsigned max_order,
146b8e80941Smrg              unsigned num_heaps,
147b8e80941Smrg              void *priv,
148b8e80941Smrg              slab_can_reclaim_fn *can_reclaim,
149b8e80941Smrg              slab_alloc_fn *slab_alloc,
150b8e80941Smrg              slab_free_fn *slab_free);
151b8e80941Smrg
152b8e80941Smrgvoid
153b8e80941Smrgpb_slabs_deinit(struct pb_slabs *slabs);
154b8e80941Smrg
155b8e80941Smrg#endif
156