17e102996Smaya/*
27e102996Smaya * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
37e102996Smaya *
47e102996Smaya * Permission is hereby granted, free of charge, to any person obtaining a
57e102996Smaya * copy of this software and associated documentation files (the "Software"),
67e102996Smaya * to deal in the Software without restriction, including without limitation
77e102996Smaya * the rights to use, copy, modify, merge, publish, distribute, sublicense,
87e102996Smaya * and/or sell copies of the Software, and to permit persons to whom the
97e102996Smaya * Software is furnished to do so, subject to the following conditions:
107e102996Smaya *
117e102996Smaya * The above copyright notice and this permission notice (including the next
127e102996Smaya * paragraph) shall be included in all copies or substantial portions of the
137e102996Smaya * Software.
147e102996Smaya *
157e102996Smaya * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
167e102996Smaya * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
177e102996Smaya * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
187e102996Smaya * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
197e102996Smaya * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
207e102996Smaya * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
217e102996Smaya * SOFTWARE.
227e102996Smaya *
237e102996Smaya * Authors:
247e102996Smaya *    Rob Clark <robclark@freedesktop.org>
257e102996Smaya */
267e102996Smaya
277e102996Smaya#ifndef FREEDRENO_PRIV_H_
287e102996Smaya#define FREEDRENO_PRIV_H_
297e102996Smaya
307e102996Smaya#include <errno.h>
317ec681f3Smrg#include <fcntl.h>
327ec681f3Smrg#include <stdio.h>
337ec681f3Smrg#include <stdlib.h>
347e102996Smaya#include <string.h>
357e102996Smaya#include <unistd.h>
367e102996Smaya#include <sys/ioctl.h>
377e102996Smaya#include <sys/mman.h>
387e102996Smaya
397e102996Smaya#include <xf86drm.h>
407e102996Smaya
417e102996Smaya#include "util/hash_table.h"
427e102996Smaya#include "util/list.h"
437ec681f3Smrg#include "util/log.h"
447ec681f3Smrg#include "util/simple_mtx.h"
457e102996Smaya#include "util/u_atomic.h"
467e102996Smaya#include "util/u_debug.h"
477ec681f3Smrg#include "util/u_math.h"
487e102996Smaya
497ec681f3Smrg#include "freedreno_dev_info.h"
507e102996Smaya#include "freedreno_drmif.h"
517e102996Smaya#include "freedreno_ringbuffer.h"
527e102996Smaya
537ec681f3Smrgextern simple_mtx_t table_lock;
547ec681f3Smrg
557ec681f3Smrg/*
567ec681f3Smrg * Stupid/simple growable array implementation:
577ec681f3Smrg */
587ec681f3Smrg
597ec681f3Smrg#define MAX_ARRAY_SIZE ((unsigned short)~0)
607ec681f3Smrg
617ec681f3Smrgstatic inline void
627ec681f3Smrggrow(void **ptr, uint16_t nr, uint16_t *max, uint16_t sz)
637ec681f3Smrg{
647ec681f3Smrg   assert((nr + 1) < MAX_ARRAY_SIZE);
657ec681f3Smrg   if ((nr + 1) > *max) {
667ec681f3Smrg      if (*max > MAX_ARRAY_SIZE/2)
677ec681f3Smrg         *max = MAX_ARRAY_SIZE;
687ec681f3Smrg      else if ((*max * 2) < (nr + 1))
697ec681f3Smrg         *max = nr + 5;
707ec681f3Smrg      else
717ec681f3Smrg         *max = *max * 2;
727ec681f3Smrg      *ptr = realloc(*ptr, *max * sz);
737ec681f3Smrg   }
747ec681f3Smrg}
757ec681f3Smrg
767ec681f3Smrg#define DECLARE_ARRAY(type, name)                                              \
777ec681f3Smrg   unsigned short nr_##name, max_##name;                                       \
787ec681f3Smrg   type *name;
797ec681f3Smrg
807ec681f3Smrg#define APPEND(x, name, ...)                                                   \
817ec681f3Smrg   ({                                                                          \
827ec681f3Smrg      grow((void **)&(x)->name, (x)->nr_##name, &(x)->max_##name,              \
837ec681f3Smrg           sizeof((x)->name[0]));                                              \
847ec681f3Smrg      (x)->name[(x)->nr_##name] = __VA_ARGS__;                                 \
857ec681f3Smrg      (x)->nr_##name++;                                                        \
867ec681f3Smrg   })
877ec681f3Smrg
887ec681f3Smrg#define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
897ec681f3Smrg
907e102996Smaya
917e102996Smayastruct fd_device_funcs {
927ec681f3Smrg   int (*bo_new_handle)(struct fd_device *dev, uint32_t size, uint32_t flags,
937ec681f3Smrg                        uint32_t *handle);
947ec681f3Smrg   struct fd_bo *(*bo_from_handle)(struct fd_device *dev, uint32_t size,
957ec681f3Smrg                                   uint32_t handle);
967ec681f3Smrg   struct fd_pipe *(*pipe_new)(struct fd_device *dev, enum fd_pipe_id id,
977ec681f3Smrg                               unsigned prio);
987ec681f3Smrg   void (*destroy)(struct fd_device *dev);
997e102996Smaya};
1007e102996Smaya
1017e102996Smayastruct fd_bo_bucket {
1027ec681f3Smrg   uint32_t size;
1037ec681f3Smrg   struct list_head list;
1047e102996Smaya};
1057e102996Smaya
1067e102996Smayastruct fd_bo_cache {
1077ec681f3Smrg   struct fd_bo_bucket cache_bucket[14 * 4];
1087ec681f3Smrg   int num_buckets;
1097ec681f3Smrg   time_t time;
1107e102996Smaya};
1117e102996Smaya
1127e102996Smayastruct fd_device {
1137ec681f3Smrg   int fd;
1147ec681f3Smrg   enum fd_version version;
1157ec681f3Smrg   int32_t refcnt;
1167ec681f3Smrg
1177ec681f3Smrg   /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects:
1187ec681f3Smrg    *
1197ec681f3Smrg    *   handle_table: maps handle to fd_bo
1207ec681f3Smrg    *   name_table: maps flink name to fd_bo
1217ec681f3Smrg    *
1227ec681f3Smrg    * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always
1237ec681f3Smrg    * returns a new handle.  So we need to figure out if the bo is already
1247ec681f3Smrg    * open in the process first, before calling gem-open.
1257ec681f3Smrg    */
1267ec681f3Smrg   struct hash_table *handle_table, *name_table;
1277ec681f3Smrg
1287ec681f3Smrg   const struct fd_device_funcs *funcs;
1297ec681f3Smrg
1307ec681f3Smrg   struct fd_bo_cache bo_cache;
1317ec681f3Smrg   struct fd_bo_cache ring_cache;
1327ec681f3Smrg
1337ec681f3Smrg   bool has_cached_coherent;
1347ec681f3Smrg
1357ec681f3Smrg   bool closefd; /* call close(fd) upon destruction */
1367ec681f3Smrg
1377ec681f3Smrg   /* just for valgrind: */
1387ec681f3Smrg   int bo_size;
1397ec681f3Smrg
1407ec681f3Smrg   /**
1417ec681f3Smrg    * List of deferred submits, protected by submit_lock.  The deferred
1427ec681f3Smrg    * submits are tracked globally per-device, even if they execute in
1437ec681f3Smrg    * different order on the kernel side (ie. due to different priority
1447ec681f3Smrg    * submitqueues, etc) to preserve the order that they are passed off
1457ec681f3Smrg    * to the kernel.  Once the kernel has them, it is the fences' job
1467ec681f3Smrg    * to preserve correct order of execution.
1477ec681f3Smrg    */
1487ec681f3Smrg   struct list_head deferred_submits;
1497ec681f3Smrg   unsigned deferred_cmds;
1507ec681f3Smrg   simple_mtx_t submit_lock;
1517e102996Smaya};
1527e102996Smaya
1537ec681f3Smrg#define foreach_submit(name, list) \
1547ec681f3Smrg   list_for_each_entry(struct fd_submit, name, list, node)
1557ec681f3Smrg#define foreach_submit_safe(name, list) \
1567ec681f3Smrg   list_for_each_entry_safe(struct fd_submit, name, list, node)
1577ec681f3Smrg#define last_submit(list) \
1587ec681f3Smrg   list_last_entry(list, struct fd_submit, node)
1597ec681f3Smrg
1607e102996Smayavoid fd_bo_cache_init(struct fd_bo_cache *cache, int coarse);
1617e102996Smayavoid fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time);
1627ec681f3Smrgstruct fd_bo *fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size,
1637ec681f3Smrg                                uint32_t flags);
1647e102996Smayaint fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo);
1657e102996Smaya
1667e102996Smaya/* for where @table_lock is already held: */
1677ec681f3Smrgvoid fd_bo_del_locked(struct fd_bo *bo);
1687e102996Smayavoid fd_device_del_locked(struct fd_device *dev);
1697ec681f3Smrgvoid fd_pipe_del_locked(struct fd_pipe *pipe);
1707e102996Smaya
1717e102996Smayastruct fd_pipe_funcs {
1727ec681f3Smrg   struct fd_ringbuffer *(*ringbuffer_new_object)(struct fd_pipe *pipe,
1737ec681f3Smrg                                                  uint32_t size);
1747ec681f3Smrg   struct fd_submit *(*submit_new)(struct fd_pipe *pipe);
1757ec681f3Smrg
1767ec681f3Smrg   /**
1777ec681f3Smrg    * Flush any deferred submits (if deferred submits are supported by
1787ec681f3Smrg    * the pipe implementation)
1797ec681f3Smrg    */
1807ec681f3Smrg   void (*flush)(struct fd_pipe *pipe, uint32_t fence);
1817ec681f3Smrg
1827ec681f3Smrg   int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param,
1837ec681f3Smrg                    uint64_t *value);
1847ec681f3Smrg   int (*wait)(struct fd_pipe *pipe, const struct fd_fence *fence,
1857ec681f3Smrg               uint64_t timeout);
1867ec681f3Smrg   void (*destroy)(struct fd_pipe *pipe);
1877ec681f3Smrg};
1887ec681f3Smrg
1897ec681f3Smrgstruct fd_pipe_control {
1907ec681f3Smrg   uint32_t fence;
1917e102996Smaya};
1927ec681f3Smrg#define control_ptr(pipe, member) \
1937ec681f3Smrg   (pipe)->control_mem, offsetof(struct fd_pipe_control, member), 0, 0
1947e102996Smaya
1957e102996Smayastruct fd_pipe {
1967ec681f3Smrg   struct fd_device *dev;
1977ec681f3Smrg   enum fd_pipe_id id;
1987ec681f3Smrg   struct fd_dev_id dev_id;
1997ec681f3Smrg
2007ec681f3Smrg   /**
2017ec681f3Smrg    * Note refcnt is *not* atomic, but protected by table_lock, since the
2027ec681f3Smrg    * table_lock is held in fd_bo_add_fence(), which is the hotpath.
2037ec681f3Smrg    */
2047ec681f3Smrg   int32_t refcnt;
2057ec681f3Smrg
2067ec681f3Smrg   /**
2077ec681f3Smrg    * Previous fence seqno allocated for this pipe.  The fd_pipe represents
2087ec681f3Smrg    * a single timeline, fences allocated by this pipe can be compared to
2097ec681f3Smrg    * each other, but fences from different pipes are not comparable (as
2107ec681f3Smrg    * there could be preemption of multiple priority level submitqueues at
2117ec681f3Smrg    * play)
2127ec681f3Smrg    */
2137ec681f3Smrg   uint32_t last_fence;
2147ec681f3Smrg
2157ec681f3Smrg   struct fd_bo *control_mem;
2167ec681f3Smrg   volatile struct fd_pipe_control *control;
2177ec681f3Smrg
2187ec681f3Smrg   const struct fd_pipe_funcs *funcs;
2197e102996Smaya};
2207e102996Smaya
2217ec681f3Smrguint32_t fd_pipe_emit_fence(struct fd_pipe *pipe, struct fd_ringbuffer *ring);
2227ec681f3Smrg
2237ec681f3Smrgstatic inline void
2247ec681f3Smrgfd_pipe_flush(struct fd_pipe *pipe, uint32_t fence)
2257ec681f3Smrg{
2267ec681f3Smrg   if (!pipe->funcs->flush)
2277ec681f3Smrg      return;
2287ec681f3Smrg   pipe->funcs->flush(pipe, fence);
2297ec681f3Smrg}
2307ec681f3Smrg
2317e102996Smayastruct fd_submit_funcs {
2327ec681f3Smrg   struct fd_ringbuffer *(*new_ringbuffer)(struct fd_submit *submit,
2337ec681f3Smrg                                           uint32_t size,
2347ec681f3Smrg                                           enum fd_ringbuffer_flags flags);
2357ec681f3Smrg   int (*flush)(struct fd_submit *submit, int in_fence_fd,
2367ec681f3Smrg                struct fd_submit_fence *out_fence);
2377ec681f3Smrg   void (*destroy)(struct fd_submit *submit);
2387e102996Smaya};
2397e102996Smaya
2407e102996Smayastruct fd_submit {
2417ec681f3Smrg   int32_t refcnt;
2427ec681f3Smrg   struct fd_pipe *pipe;
2437ec681f3Smrg   const struct fd_submit_funcs *funcs;
2447e102996Smaya
2457ec681f3Smrg   struct fd_ringbuffer *primary;
2467ec681f3Smrg   uint32_t fence;
2477ec681f3Smrg   struct list_head node;  /* node in fd_pipe::deferred_submits */
2487e102996Smaya};
2497e102996Smaya
2507ec681f3Smrgstatic inline unsigned
2517ec681f3Smrgfd_dev_count_deferred_cmds(struct fd_device *dev)
2527ec681f3Smrg{
2537ec681f3Smrg   unsigned nr = 0;
2547ec681f3Smrg
2557ec681f3Smrg   simple_mtx_assert_locked(&dev->submit_lock);
2567ec681f3Smrg
2577ec681f3Smrg   list_for_each_entry (struct fd_submit, submit, &dev->deferred_submits, node) {
2587ec681f3Smrg      nr += fd_ringbuffer_cmd_count(submit->primary);
2597ec681f3Smrg   }
2607ec681f3Smrg
2617ec681f3Smrg   return nr;
2627ec681f3Smrg}
2637ec681f3Smrg
2647e102996Smayastruct fd_bo_funcs {
2657ec681f3Smrg   int (*offset)(struct fd_bo *bo, uint64_t *offset);
2667ec681f3Smrg   int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op);
2677ec681f3Smrg   void (*cpu_fini)(struct fd_bo *bo);
2687ec681f3Smrg   int (*madvise)(struct fd_bo *bo, int willneed);
2697ec681f3Smrg   uint64_t (*iova)(struct fd_bo *bo);
2707ec681f3Smrg   void (*set_name)(struct fd_bo *bo, const char *fmt, va_list ap);
2717ec681f3Smrg   void (*destroy)(struct fd_bo *bo);
2727e102996Smaya};
2737e102996Smaya
2747ec681f3Smrgstruct fd_bo_fence {
2757ec681f3Smrg   /* For non-shared buffers, track the last pipe the buffer was active
2767ec681f3Smrg    * on, and the per-pipe fence value that indicates when the buffer is
2777ec681f3Smrg    * idle:
2787ec681f3Smrg    */
2797ec681f3Smrg   uint32_t fence;
2807ec681f3Smrg   struct fd_pipe *pipe;
2817e102996Smaya};
2827e102996Smaya
2837ec681f3Smrgstruct fd_bo {
2847ec681f3Smrg   struct fd_device *dev;
2857ec681f3Smrg   uint32_t size;
2867ec681f3Smrg   uint32_t handle;
2877ec681f3Smrg   uint32_t name;
2887ec681f3Smrg   int32_t refcnt;
2897ec681f3Smrg   uint32_t reloc_flags; /* flags like FD_RELOC_DUMP to use for relocs to this BO */
2907ec681f3Smrg   uint32_t alloc_flags; /* flags that control allocation/mapping, ie. FD_BO_x */
2917ec681f3Smrg   uint64_t iova;
2927ec681f3Smrg   void *map;
2937ec681f3Smrg   const struct fd_bo_funcs *funcs;
2947ec681f3Smrg
2957ec681f3Smrg   enum {
2967ec681f3Smrg      NO_CACHE = 0,
2977ec681f3Smrg      BO_CACHE = 1,
2987ec681f3Smrg      RING_CACHE = 2,
2997ec681f3Smrg   } bo_reuse : 2;
3007ec681f3Smrg
3017ec681f3Smrg   /* Buffers that are shared (imported or exported) may be used in
3027ec681f3Smrg    * other processes, so we need to fallback to kernel to determine
3037ec681f3Smrg    * busyness.
3047ec681f3Smrg    */
3057ec681f3Smrg   bool shared : 1;
3067ec681f3Smrg
3077ec681f3Smrg   /* We need to be able to disable userspace fence synchronization for
3087ec681f3Smrg    * special internal buffers, namely the pipe->control buffer, to avoid
3097ec681f3Smrg    * a circular reference loop.
3107ec681f3Smrg    */
3117ec681f3Smrg   bool nosync : 1;
3127ec681f3Smrg
3137ec681f3Smrg   struct list_head list; /* bucket-list entry */
3147ec681f3Smrg   time_t free_time;      /* time when added to bucket-list */
3157ec681f3Smrg
3167ec681f3Smrg   DECLARE_ARRAY(struct fd_bo_fence, fences);
3177ec681f3Smrg
3187ec681f3Smrg   /* In the common case, there is no more than one fence attached.
3197ec681f3Smrg    * This provides storage for the fences table until it grows to
3207ec681f3Smrg    * be larger than a single element.
3217ec681f3Smrg    */
3227ec681f3Smrg   struct fd_bo_fence _inline_fence;
3237ec681f3Smrg};
3247e102996Smaya
3257ec681f3Smrgvoid fd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence);
3267e102996Smaya
3277ec681f3Smrgenum fd_bo_state {
3287ec681f3Smrg   FD_BO_STATE_IDLE,
3297ec681f3Smrg   FD_BO_STATE_BUSY,
3307ec681f3Smrg   FD_BO_STATE_UNKNOWN,
3317ec681f3Smrg};
3327ec681f3Smrgenum fd_bo_state fd_bo_state(struct fd_bo *bo);
3337ec681f3Smrg
3347ec681f3Smrgstruct fd_bo *fd_bo_new_ring(struct fd_device *dev, uint32_t size);
3357ec681f3Smrg
3367ec681f3Smrg#define enable_debug 0 /* TODO make dynamic */
3377ec681f3Smrg
3387ec681f3Smrgbool fd_dbg(void);
3397ec681f3Smrg
3407ec681f3Smrg#define INFO_MSG(fmt, ...)                                                     \
3417ec681f3Smrg   do {                                                                        \
3427ec681f3Smrg      if (fd_dbg())                                                            \
3437ec681f3Smrg         mesa_logi("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__);      \
3447ec681f3Smrg   } while (0)
3457ec681f3Smrg#define DEBUG_MSG(fmt, ...)                                                    \
3467ec681f3Smrg   do                                                                          \
3477ec681f3Smrg      if (enable_debug) {                                                      \
3487ec681f3Smrg         mesa_logd("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__);      \
3497ec681f3Smrg      }                                                                        \
3507ec681f3Smrg   while (0)
3517ec681f3Smrg#define WARN_MSG(fmt, ...)                                                     \
3527ec681f3Smrg   do {                                                                        \
3537ec681f3Smrg      mesa_logw("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__);         \
3547ec681f3Smrg   } while (0)
3557ec681f3Smrg#define ERROR_MSG(fmt, ...)                                                    \
3567ec681f3Smrg   do {                                                                        \
3577ec681f3Smrg      mesa_loge("%s:%d: " fmt, __FUNCTION__, __LINE__, ##__VA_ARGS__);         \
3587ec681f3Smrg   } while (0)
3597e102996Smaya
3607e102996Smaya#define U642VOID(x) ((void *)(unsigned long)(x))
3617e102996Smaya#define VOID2U64(x) ((uint64_t)(unsigned long)(x))
3627e102996Smaya
3637e102996Smaya#if HAVE_VALGRIND
3647ec681f3Smrg#include <memcheck.h>
3657e102996Smaya
3667e102996Smaya/*
3677e102996Smaya * For tracking the backing memory (if valgrind enabled, we force a mmap
3687e102996Smaya * for the purposes of tracking)
3697e102996Smaya */
3707ec681f3Smrgstatic inline void
3717ec681f3SmrgVG_BO_ALLOC(struct fd_bo *bo)
3727e102996Smaya{
3737ec681f3Smrg   if (bo && RUNNING_ON_VALGRIND) {
3747ec681f3Smrg      VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1);
3757ec681f3Smrg   }
3767e102996Smaya}
3777e102996Smaya
3787ec681f3Smrgstatic inline void
3797ec681f3SmrgVG_BO_FREE(struct fd_bo *bo)
3807e102996Smaya{
3817ec681f3Smrg   VALGRIND_FREELIKE_BLOCK(bo->map, 0);
3827e102996Smaya}
3837e102996Smaya
3847e102996Smaya/*
3857e102996Smaya * For tracking bo structs that are in the buffer-cache, so that valgrind
3867e102996Smaya * doesn't attribute ownership to the first one to allocate the recycled
3877e102996Smaya * bo.
3887e102996Smaya *
3897e102996Smaya * Note that the list_head in fd_bo is used to track the buffers in cache
3907e102996Smaya * so disable error reporting on the range while they are in cache so
3917e102996Smaya * valgrind doesn't squawk about list traversal.
3927e102996Smaya *
3937e102996Smaya */
3947ec681f3Smrgstatic inline void
3957ec681f3SmrgVG_BO_RELEASE(struct fd_bo *bo)
3967e102996Smaya{
3977ec681f3Smrg   if (RUNNING_ON_VALGRIND) {
3987ec681f3Smrg      VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
3997ec681f3Smrg      VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size);
4007ec681f3Smrg      VALGRIND_FREELIKE_BLOCK(bo->map, 0);
4017ec681f3Smrg   }
4027e102996Smaya}
4037ec681f3Smrgstatic inline void
4047ec681f3SmrgVG_BO_OBTAIN(struct fd_bo *bo)
4057e102996Smaya{
4067ec681f3Smrg   if (RUNNING_ON_VALGRIND) {
4077ec681f3Smrg      VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size);
4087ec681f3Smrg      VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size);
4097ec681f3Smrg      VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1);
4107ec681f3Smrg   }
4117e102996Smaya}
4127e102996Smaya#else
4137ec681f3Smrgstatic inline void
4147ec681f3SmrgVG_BO_ALLOC(struct fd_bo *bo)
4157ec681f3Smrg{
4167ec681f3Smrg}
4177ec681f3Smrgstatic inline void
4187ec681f3SmrgVG_BO_FREE(struct fd_bo *bo)
4197ec681f3Smrg{
4207ec681f3Smrg}
4217ec681f3Smrgstatic inline void
4227ec681f3SmrgVG_BO_RELEASE(struct fd_bo *bo)
4237ec681f3Smrg{
4247ec681f3Smrg}
4257ec681f3Smrgstatic inline void
4267ec681f3SmrgVG_BO_OBTAIN(struct fd_bo *bo)
4277ec681f3Smrg{
4287ec681f3Smrg}
4297e102996Smaya#endif
4307e102996Smaya
4317ec681f3Smrg#define FD_DEFINE_CAST(parent, child)                                          \
4327ec681f3Smrg   static inline struct child *to_##child(struct parent *x)                    \
4337ec681f3Smrg   {                                                                           \
4347ec681f3Smrg      return (struct child *)x;                                                \
4357ec681f3Smrg   }
4367e102996Smaya
4377e102996Smaya#endif /* FREEDRENO_PRIV_H_ */
438