101e04c3fSmrg/*
201e04c3fSmrg * Copyright 2016 Advanced Micro Devices, Inc.
301e04c3fSmrg * All Rights Reserved.
401e04c3fSmrg *
501e04c3fSmrg * Permission is hereby granted, free of charge, to any person obtaining
601e04c3fSmrg * a copy of this software and associated documentation files (the
701e04c3fSmrg * "Software"), to deal in the Software without restriction, including
801e04c3fSmrg * without limitation the rights to use, copy, modify, merge, publish,
901e04c3fSmrg * distribute, sub license, and/or sell copies of the Software, and to
1001e04c3fSmrg * permit persons to whom the Software is furnished to do so, subject to
1101e04c3fSmrg * the following conditions:
1201e04c3fSmrg *
1301e04c3fSmrg * The above copyright notice and this permission notice (including the
1401e04c3fSmrg * next paragraph) shall be included in all copies or substantial portions
1501e04c3fSmrg * of the Software.
1601e04c3fSmrg *
1701e04c3fSmrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
1801e04c3fSmrg * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
1901e04c3fSmrg * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
2001e04c3fSmrg * NON-INFRINGEMENT. IN NO EVENT SHALL THE COPYRIGHT HOLDERS, AUTHORS
2101e04c3fSmrg * AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
2201e04c3fSmrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
2301e04c3fSmrg * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
2401e04c3fSmrg * USE OR OTHER DEALINGS IN THE SOFTWARE.
2501e04c3fSmrg *
2601e04c3fSmrg */
2701e04c3fSmrg
2801e04c3fSmrg/**
2901e04c3fSmrg * \file
3001e04c3fSmrg *
3101e04c3fSmrg * Helper library for carving out smaller allocations (called "(slab) entries")
3201e04c3fSmrg * from larger buffers (called "slabs").
3301e04c3fSmrg *
3401e04c3fSmrg * The library supports maintaining separate heaps (e.g. VRAM vs. GTT). The
3501e04c3fSmrg * meaning of each heap is treated as opaque by this library.
3601e04c3fSmrg *
3701e04c3fSmrg * The library allows delaying the re-use of an entry, i.e. an entry may be
3801e04c3fSmrg * freed by calling \ref pb_slab_free even while the corresponding buffer
3901e04c3fSmrg * region is still in use by the GPU. A callback function is called to
4001e04c3fSmrg * determine when it is safe to allocate the entry again; the user of this
4101e04c3fSmrg * library is expected to maintain the required fences or similar.
4201e04c3fSmrg */
4301e04c3fSmrg
4401e04c3fSmrg#ifndef PB_SLAB_H
4501e04c3fSmrg#define PB_SLAB_H
4601e04c3fSmrg
4701e04c3fSmrg#include "pb_buffer.h"
487ec681f3Smrg#include "util/simple_mtx.h"
4901e04c3fSmrg#include "util/list.h"
5001e04c3fSmrg#include "os/os_thread.h"
5101e04c3fSmrg
5201e04c3fSmrgstruct pb_slab;
5301e04c3fSmrgstruct pb_slabs;
5401e04c3fSmrgstruct pb_slab_group;
5501e04c3fSmrg
5601e04c3fSmrg/* Descriptor of a slab entry.
5701e04c3fSmrg *
5801e04c3fSmrg * The user of this utility library is expected to embed this in a larger
5901e04c3fSmrg * structure that describes a buffer object.
6001e04c3fSmrg */
6101e04c3fSmrgstruct pb_slab_entry
6201e04c3fSmrg{
6301e04c3fSmrg   struct list_head head;
6401e04c3fSmrg   struct pb_slab *slab; /* the slab that contains this buffer */
6501e04c3fSmrg   unsigned group_index; /* index into pb_slabs::groups */
667ec681f3Smrg   unsigned entry_size;
6701e04c3fSmrg};
6801e04c3fSmrg
6901e04c3fSmrg/* Descriptor of a slab from which many entries are carved out.
7001e04c3fSmrg *
7101e04c3fSmrg * The user of this utility library is expected to embed this in a larger
7201e04c3fSmrg * structure that describes a buffer object.
7301e04c3fSmrg */
7401e04c3fSmrgstruct pb_slab
7501e04c3fSmrg{
7601e04c3fSmrg   struct list_head head;
7701e04c3fSmrg
7801e04c3fSmrg   struct list_head free; /* list of free pb_slab_entry structures */
7901e04c3fSmrg   unsigned num_free; /* number of entries in free list */
8001e04c3fSmrg   unsigned num_entries; /* total number of entries */
8101e04c3fSmrg};
8201e04c3fSmrg
8301e04c3fSmrg/* Callback function that is called when a new slab needs to be allocated
8401e04c3fSmrg * for fulfilling allocation requests of the given size from the given heap.
8501e04c3fSmrg *
8601e04c3fSmrg * The callback must allocate a pb_slab structure and the desired number
8701e04c3fSmrg * of entries. All entries that belong to the slab must be added to the free
8801e04c3fSmrg * list. Entries' pb_slab_entry structures must be initialized with the given
8901e04c3fSmrg * group_index.
9001e04c3fSmrg *
9101e04c3fSmrg * The callback may call pb_slab functions.
9201e04c3fSmrg */
9301e04c3fSmrgtypedef struct pb_slab *(slab_alloc_fn)(void *priv,
9401e04c3fSmrg                                        unsigned heap,
9501e04c3fSmrg                                        unsigned entry_size,
9601e04c3fSmrg                                        unsigned group_index);
9701e04c3fSmrg
9801e04c3fSmrg/* Callback function that is called when all entries of a slab have been freed.
9901e04c3fSmrg *
10001e04c3fSmrg * The callback must free the slab and all its entries. It must not call any of
10101e04c3fSmrg * the pb_slab functions, or a deadlock (recursive mutex lock) may occur.
10201e04c3fSmrg */
10301e04c3fSmrgtypedef void (slab_free_fn)(void *priv, struct pb_slab *);
10401e04c3fSmrg
10501e04c3fSmrg/* Callback function to determine whether a given entry can already be reused.
10601e04c3fSmrg */
10701e04c3fSmrgtypedef bool (slab_can_reclaim_fn)(void *priv, struct pb_slab_entry *);
10801e04c3fSmrg
10901e04c3fSmrg/* Manager of slab allocations. The user of this utility library should embed
11001e04c3fSmrg * this in a structure somewhere and call pb_slab_init/deinit at init/shutdown
11101e04c3fSmrg * time.
11201e04c3fSmrg */
11301e04c3fSmrgstruct pb_slabs
11401e04c3fSmrg{
1157ec681f3Smrg   simple_mtx_t mutex;
11601e04c3fSmrg
11701e04c3fSmrg   unsigned min_order;
11801e04c3fSmrg   unsigned num_orders;
11901e04c3fSmrg   unsigned num_heaps;
1207ec681f3Smrg   bool allow_three_fourths_allocations;
12101e04c3fSmrg
1227ec681f3Smrg   /* One group per (heap, order, three_fourth_allocations). */
12301e04c3fSmrg   struct pb_slab_group *groups;
12401e04c3fSmrg
12501e04c3fSmrg   /* List of entries waiting to be reclaimed, i.e. they have been passed to
12601e04c3fSmrg    * pb_slab_free, but may not be safe for re-use yet. The tail points at
12701e04c3fSmrg    * the most-recently freed entry.
12801e04c3fSmrg    */
12901e04c3fSmrg   struct list_head reclaim;
13001e04c3fSmrg
13101e04c3fSmrg   void *priv;
13201e04c3fSmrg   slab_can_reclaim_fn *can_reclaim;
13301e04c3fSmrg   slab_alloc_fn *slab_alloc;
13401e04c3fSmrg   slab_free_fn *slab_free;
13501e04c3fSmrg};
13601e04c3fSmrg
13701e04c3fSmrgstruct pb_slab_entry *
13801e04c3fSmrgpb_slab_alloc(struct pb_slabs *slabs, unsigned size, unsigned heap);
13901e04c3fSmrg
14001e04c3fSmrgvoid
14101e04c3fSmrgpb_slab_free(struct pb_slabs* slabs, struct pb_slab_entry *entry);
14201e04c3fSmrg
14301e04c3fSmrgvoid
14401e04c3fSmrgpb_slabs_reclaim(struct pb_slabs *slabs);
14501e04c3fSmrg
14601e04c3fSmrgbool
14701e04c3fSmrgpb_slabs_init(struct pb_slabs *slabs,
14801e04c3fSmrg              unsigned min_order, unsigned max_order,
1497ec681f3Smrg              unsigned num_heaps, bool allow_three_fourth_allocations,
15001e04c3fSmrg              void *priv,
15101e04c3fSmrg              slab_can_reclaim_fn *can_reclaim,
15201e04c3fSmrg              slab_alloc_fn *slab_alloc,
15301e04c3fSmrg              slab_free_fn *slab_free);
15401e04c3fSmrg
15501e04c3fSmrgvoid
15601e04c3fSmrgpb_slabs_deinit(struct pb_slabs *slabs);
15701e04c3fSmrg
15801e04c3fSmrg#endif
159