freedreno_priv.h revision d8807b2f
1/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ 2 3/* 4 * Copyright (C) 2012 Rob Clark <robclark@freedesktop.org> 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice (including the next 14 * paragraph) shall be included in all copies or substantial portions of the 15 * Software. 16 * 17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 23 * SOFTWARE. 24 * 25 * Authors: 26 * Rob Clark <robclark@freedesktop.org> 27 */ 28 29#ifndef FREEDRENO_PRIV_H_ 30#define FREEDRENO_PRIV_H_ 31 32#ifdef HAVE_CONFIG_H 33#include "config.h" 34#endif 35 36#include <stdlib.h> 37#include <errno.h> 38#include <string.h> 39#include <unistd.h> 40#include <errno.h> 41#include <fcntl.h> 42#include <sys/ioctl.h> 43#include <pthread.h> 44#include <stdio.h> 45#include <assert.h> 46 47#include "libdrm_macros.h" 48#include "xf86drm.h" 49#include "xf86atomic.h" 50 51#include "util_double_list.h" 52 53#include "freedreno_drmif.h" 54#include "freedreno_ringbuffer.h" 55#include "drm.h" 56 57#ifndef TRUE 58# define TRUE 1 59#endif 60#ifndef FALSE 61# define FALSE 0 62#endif 63 64struct fd_device_funcs { 65 int (*bo_new_handle)(struct fd_device *dev, uint32_t size, 66 uint32_t flags, uint32_t *handle); 67 struct fd_bo * (*bo_from_handle)(struct fd_device *dev, 68 uint32_t size, uint32_t handle); 69 struct fd_pipe * (*pipe_new)(struct fd_device *dev, enum fd_pipe_id id); 70 void (*destroy)(struct fd_device *dev); 71}; 72 73struct fd_bo_bucket { 74 uint32_t size; 75 struct list_head list; 76}; 77 78struct fd_bo_cache { 79 struct fd_bo_bucket cache_bucket[14 * 4]; 80 int num_buckets; 81 time_t time; 82}; 83 84struct fd_device { 85 int fd; 86 enum fd_version version; 87 atomic_t refcnt; 88 89 /* tables to keep track of bo's, to avoid "evil-twin" fd_bo objects: 90 * 91 * handle_table: maps handle to fd_bo 92 * name_table: maps flink name to fd_bo 93 * 94 * We end up needing two tables, because DRM_IOCTL_GEM_OPEN always 95 * returns a new handle. So we need to figure out if the bo is already 96 * open in the process first, before calling gem-open. 97 */ 98 void *handle_table, *name_table; 99 100 const struct fd_device_funcs *funcs; 101 102 struct fd_bo_cache bo_cache; 103 104 int closefd; /* call close(fd) upon destruction */ 105 106 /* just for valgrind: */ 107 int bo_size; 108}; 109 110drm_private void fd_bo_cache_init(struct fd_bo_cache *cache, int coarse); 111drm_private void fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time); 112drm_private struct fd_bo * fd_bo_cache_alloc(struct fd_bo_cache *cache, 113 uint32_t *size, uint32_t flags); 114drm_private int fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo); 115 116/* for where @table_lock is already held: */ 117drm_private void fd_device_del_locked(struct fd_device *dev); 118 119struct fd_pipe_funcs { 120 struct fd_ringbuffer * (*ringbuffer_new)(struct fd_pipe *pipe, uint32_t size); 121 int (*get_param)(struct fd_pipe *pipe, enum fd_param_id param, uint64_t *value); 122 int (*wait)(struct fd_pipe *pipe, uint32_t timestamp, uint64_t timeout); 123 void (*destroy)(struct fd_pipe *pipe); 124}; 125 126struct fd_pipe { 127 struct fd_device *dev; 128 enum fd_pipe_id id; 129 uint32_t gpu_id; 130 const struct fd_pipe_funcs *funcs; 131}; 132 133struct fd_ringmarker { 134 struct fd_ringbuffer *ring; 135 uint32_t *cur; 136}; 137 138struct fd_ringbuffer_funcs { 139 void * (*hostptr)(struct fd_ringbuffer *ring); 140 int (*flush)(struct fd_ringbuffer *ring, uint32_t *last_start, 141 int in_fence_fd, int *out_fence_fd); 142 void (*grow)(struct fd_ringbuffer *ring, uint32_t size); 143 void (*reset)(struct fd_ringbuffer *ring); 144 void (*emit_reloc)(struct fd_ringbuffer *ring, 145 const struct fd_reloc *reloc); 146 uint32_t (*emit_reloc_ring)(struct fd_ringbuffer *ring, 147 struct fd_ringbuffer *target, uint32_t cmd_idx, 148 uint32_t submit_offset, uint32_t size); 149 uint32_t (*cmd_count)(struct fd_ringbuffer *ring); 150 void (*destroy)(struct fd_ringbuffer *ring); 151}; 152 153struct fd_bo_funcs { 154 int (*offset)(struct fd_bo *bo, uint64_t *offset); 155 int (*cpu_prep)(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op); 156 void (*cpu_fini)(struct fd_bo *bo); 157 int (*madvise)(struct fd_bo *bo, int willneed); 158 void (*destroy)(struct fd_bo *bo); 159}; 160 161struct fd_bo { 162 struct fd_device *dev; 163 uint32_t size; 164 uint32_t handle; 165 uint32_t name; 166 void *map; 167 atomic_t refcnt; 168 const struct fd_bo_funcs *funcs; 169 170 int bo_reuse; 171 struct list_head list; /* bucket-list entry */ 172 time_t free_time; /* time when added to bucket-list */ 173}; 174 175#define ALIGN(v,a) (((v) + (a) - 1) & ~((a) - 1)) 176#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0])) 177 178#define enable_debug 0 /* TODO make dynamic */ 179 180#define INFO_MSG(fmt, ...) \ 181 do { drmMsg("[I] "fmt " (%s:%d)\n", \ 182 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0) 183#define DEBUG_MSG(fmt, ...) \ 184 do if (enable_debug) { drmMsg("[D] "fmt " (%s:%d)\n", \ 185 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0) 186#define WARN_MSG(fmt, ...) \ 187 do { drmMsg("[W] "fmt " (%s:%d)\n", \ 188 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0) 189#define ERROR_MSG(fmt, ...) \ 190 do { drmMsg("[E] " fmt " (%s:%d)\n", \ 191 ##__VA_ARGS__, __FUNCTION__, __LINE__); } while (0) 192 193#define U642VOID(x) ((void *)(unsigned long)(x)) 194#define VOID2U64(x) ((uint64_t)(unsigned long)(x)) 195 196static inline uint32_t 197offset_bytes(void *end, void *start) 198{ 199 return ((char *)end) - ((char *)start); 200} 201 202#ifdef HAVE_VALGRIND 203# include <memcheck.h> 204 205/* 206 * For tracking the backing memory (if valgrind enabled, we force a mmap 207 * for the purposes of tracking) 208 */ 209static inline void VG_BO_ALLOC(struct fd_bo *bo) 210{ 211 if (bo && RUNNING_ON_VALGRIND) { 212 VALGRIND_MALLOCLIKE_BLOCK(fd_bo_map(bo), bo->size, 0, 1); 213 } 214} 215 216static inline void VG_BO_FREE(struct fd_bo *bo) 217{ 218 VALGRIND_FREELIKE_BLOCK(bo->map, 0); 219} 220 221/* 222 * For tracking bo structs that are in the buffer-cache, so that valgrind 223 * doesn't attribute ownership to the first one to allocate the recycled 224 * bo. 225 * 226 * Note that the list_head in fd_bo is used to track the buffers in cache 227 * so disable error reporting on the range while they are in cache so 228 * valgrind doesn't squawk about list traversal. 229 * 230 */ 231static inline void VG_BO_RELEASE(struct fd_bo *bo) 232{ 233 if (RUNNING_ON_VALGRIND) { 234 VALGRIND_DISABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size); 235 VALGRIND_MAKE_MEM_NOACCESS(bo, bo->dev->bo_size); 236 VALGRIND_FREELIKE_BLOCK(bo->map, 0); 237 } 238} 239static inline void VG_BO_OBTAIN(struct fd_bo *bo) 240{ 241 if (RUNNING_ON_VALGRIND) { 242 VALGRIND_MAKE_MEM_DEFINED(bo, bo->dev->bo_size); 243 VALGRIND_ENABLE_ADDR_ERROR_REPORTING_IN_RANGE(bo, bo->dev->bo_size); 244 VALGRIND_MALLOCLIKE_BLOCK(bo->map, bo->size, 0, 1); 245 } 246} 247#else 248static inline void VG_BO_ALLOC(struct fd_bo *bo) {} 249static inline void VG_BO_FREE(struct fd_bo *bo) {} 250static inline void VG_BO_RELEASE(struct fd_bo *bo) {} 251static inline void VG_BO_OBTAIN(struct fd_bo *bo) {} 252#endif 253 254 255#endif /* FREEDRENO_PRIV_H_ */ 256