17e102996Smaya/* 27e102996Smaya * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org> 37e102996Smaya * 47e102996Smaya * Permission is hereby granted, free of charge, to any person obtaining a 57e102996Smaya * copy of this software and associated documentation files (the "Software"), 67e102996Smaya * to deal in the Software without restriction, including without limitation 77e102996Smaya * the rights to use, copy, modify, merge, publish, distribute, sublicense, 87e102996Smaya * and/or sell copies of the Software, and to permit persons to whom the 97e102996Smaya * Software is furnished to do so, subject to the following conditions: 107e102996Smaya * 117e102996Smaya * The above copyright notice and this permission notice (including the next 127e102996Smaya * paragraph) shall be included in all copies or substantial portions of the 137e102996Smaya * Software. 147e102996Smaya * 157e102996Smaya * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 167e102996Smaya * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 177e102996Smaya * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 187e102996Smaya * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 197e102996Smaya * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 207e102996Smaya * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 217e102996Smaya * SOFTWARE. 227e102996Smaya * 237e102996Smaya * Authors: 247e102996Smaya * Rob Clark <robclark@freedesktop.org> 257e102996Smaya */ 267e102996Smaya 277e102996Smaya#include "os/os_mman.h" 287e102996Smaya 297e102996Smaya#include "freedreno_drmif.h" 307e102996Smaya#include "freedreno_priv.h" 317e102996Smaya 327ec681f3Smrgsimple_mtx_t table_lock = _SIMPLE_MTX_INITIALIZER_NP; 337e102996Smayavoid bo_del(struct fd_bo *bo); 347e102996Smaya 357e102996Smaya/* set buffer name, and add to table, call w/ table_lock held: */ 367ec681f3Smrgstatic void 377ec681f3Smrgset_name(struct fd_bo *bo, uint32_t name) 387e102996Smaya{ 397ec681f3Smrg bo->name = name; 407ec681f3Smrg /* add ourself into the handle table: */ 417ec681f3Smrg _mesa_hash_table_insert(bo->dev->name_table, &bo->name, bo); 427e102996Smaya} 437e102996Smaya 447e102996Smaya/* lookup a buffer, call w/ table_lock held: */ 457ec681f3Smrgstatic struct fd_bo * 467ec681f3Smrglookup_bo(struct hash_table *tbl, uint32_t key) 477e102996Smaya{ 487ec681f3Smrg struct fd_bo *bo = NULL; 497ec681f3Smrg struct hash_entry *entry = _mesa_hash_table_search(tbl, &key); 507ec681f3Smrg if (entry) { 517ec681f3Smrg /* found, incr refcnt and return: */ 527ec681f3Smrg bo = fd_bo_ref(entry->data); 537ec681f3Smrg 547ec681f3Smrg /* don't break the bucket if this bo was found in one */ 557ec681f3Smrg list_delinit(&bo->list); 567ec681f3Smrg } 577ec681f3Smrg return bo; 587e102996Smaya} 597e102996Smaya 607e102996Smaya/* allocate a new buffer object, call w/ table_lock held */ 617ec681f3Smrgstatic struct fd_bo * 627ec681f3Smrgbo_from_handle(struct fd_device *dev, uint32_t size, uint32_t handle) 637e102996Smaya{ 647ec681f3Smrg struct fd_bo *bo; 657ec681f3Smrg 667ec681f3Smrg simple_mtx_assert_locked(&table_lock); 677ec681f3Smrg 687ec681f3Smrg bo = dev->funcs->bo_from_handle(dev, size, handle); 697ec681f3Smrg if (!bo) { 707ec681f3Smrg struct drm_gem_close req = { 717ec681f3Smrg .handle = handle, 727ec681f3Smrg }; 737ec681f3Smrg drmIoctl(dev->fd, DRM_IOCTL_GEM_CLOSE, &req); 747ec681f3Smrg return NULL; 757ec681f3Smrg } 767ec681f3Smrg bo->dev = dev; 777ec681f3Smrg bo->size = size; 787ec681f3Smrg bo->handle = handle; 797ec681f3Smrg bo->iova = bo->funcs->iova(bo); 807ec681f3Smrg bo->reloc_flags = FD_RELOC_FLAGS_INIT; 817ec681f3Smrg 827ec681f3Smrg p_atomic_set(&bo->refcnt, 1); 837ec681f3Smrg list_inithead(&bo->list); 847ec681f3Smrg /* add ourself into the handle table: */ 857ec681f3Smrg _mesa_hash_table_insert(dev->handle_table, &bo->handle, bo); 867ec681f3Smrg return bo; 877e102996Smaya} 887e102996Smaya 897e102996Smayastatic struct fd_bo * 907e102996Smayabo_new(struct fd_device *dev, uint32_t size, uint32_t flags, 917ec681f3Smrg struct fd_bo_cache *cache) 927e102996Smaya{ 937ec681f3Smrg struct fd_bo *bo = NULL; 947ec681f3Smrg uint32_t handle; 957ec681f3Smrg int ret; 967ec681f3Smrg 977ec681f3Smrg /* demote cached-coherent to WC if not supported: */ 987ec681f3Smrg if ((flags & FD_BO_CACHED_COHERENT) && !dev->has_cached_coherent) 997ec681f3Smrg flags &= ~FD_BO_CACHED_COHERENT; 1007ec681f3Smrg 1017ec681f3Smrg bo = fd_bo_cache_alloc(cache, &size, flags); 1027ec681f3Smrg if (bo) 1037ec681f3Smrg return bo; 1047e102996Smaya 1057ec681f3Smrg ret = dev->funcs->bo_new_handle(dev, size, flags, &handle); 1067ec681f3Smrg if (ret) 1077ec681f3Smrg return NULL; 1087e102996Smaya 1097ec681f3Smrg simple_mtx_lock(&table_lock); 1107ec681f3Smrg bo = bo_from_handle(dev, size, handle); 1117ec681f3Smrg simple_mtx_unlock(&table_lock); 1127e102996Smaya 1137ec681f3Smrg bo->alloc_flags = flags; 1147ec681f3Smrg bo->max_fences = 1; 1157ec681f3Smrg bo->fences = &bo->_inline_fence; 1167e102996Smaya 1177ec681f3Smrg VG_BO_ALLOC(bo); 1187e102996Smaya 1197ec681f3Smrg return bo; 1207e102996Smaya} 1217e102996Smaya 1227e102996Smayastruct fd_bo * 1237e102996Smaya_fd_bo_new(struct fd_device *dev, uint32_t size, uint32_t flags) 1247e102996Smaya{ 1257ec681f3Smrg struct fd_bo *bo = bo_new(dev, size, flags, &dev->bo_cache); 1267ec681f3Smrg if (bo) 1277ec681f3Smrg bo->bo_reuse = BO_CACHE; 1287ec681f3Smrg return bo; 1297e102996Smaya} 1307e102996Smaya 1317e102996Smayavoid 1327e102996Smaya_fd_bo_set_name(struct fd_bo *bo, const char *fmt, va_list ap) 1337e102996Smaya{ 1347ec681f3Smrg bo->funcs->set_name(bo, fmt, ap); 1357e102996Smaya} 1367e102996Smaya 1377e102996Smaya/* internal function to allocate bo's that use the ringbuffer cache 1387e102996Smaya * instead of the normal bo_cache. The purpose is, because cmdstream 1397e102996Smaya * bo's get vmap'd on the kernel side, and that is expensive, we want 1407e102996Smaya * to re-use cmdstream bo's for cmdstream and not unrelated purposes. 1417e102996Smaya */ 1427e102996Smayastruct fd_bo * 1437ec681f3Smrgfd_bo_new_ring(struct fd_device *dev, uint32_t size) 1447e102996Smaya{ 1457ec681f3Smrg uint32_t flags = FD_BO_GPUREADONLY | FD_BO_CACHED_COHERENT; 1467ec681f3Smrg struct fd_bo *bo = bo_new(dev, size, flags, &dev->ring_cache); 1477ec681f3Smrg if (bo) { 1487ec681f3Smrg bo->bo_reuse = RING_CACHE; 1497ec681f3Smrg bo->reloc_flags |= FD_RELOC_DUMP; 1507ec681f3Smrg fd_bo_set_name(bo, "cmdstream"); 1517ec681f3Smrg } 1527ec681f3Smrg return bo; 1537e102996Smaya} 1547e102996Smaya 1557e102996Smayastruct fd_bo * 1567e102996Smayafd_bo_from_handle(struct fd_device *dev, uint32_t handle, uint32_t size) 1577e102996Smaya{ 1587ec681f3Smrg struct fd_bo *bo = NULL; 1597e102996Smaya 1607ec681f3Smrg simple_mtx_lock(&table_lock); 1617e102996Smaya 1627ec681f3Smrg bo = lookup_bo(dev->handle_table, handle); 1637ec681f3Smrg if (bo) 1647ec681f3Smrg goto out_unlock; 1657e102996Smaya 1667ec681f3Smrg bo = bo_from_handle(dev, size, handle); 1677e102996Smaya 1687ec681f3Smrg VG_BO_ALLOC(bo); 1697e102996Smaya 1707e102996Smayaout_unlock: 1717ec681f3Smrg simple_mtx_unlock(&table_lock); 1727e102996Smaya 1737ec681f3Smrg return bo; 1747e102996Smaya} 1757e102996Smaya 1767e102996Smayastruct fd_bo * 1777e102996Smayafd_bo_from_dmabuf(struct fd_device *dev, int fd) 1787e102996Smaya{ 1797ec681f3Smrg int ret, size; 1807ec681f3Smrg uint32_t handle; 1817ec681f3Smrg struct fd_bo *bo; 1827e102996Smaya 1837ec681f3Smrg simple_mtx_lock(&table_lock); 1847ec681f3Smrg ret = drmPrimeFDToHandle(dev->fd, fd, &handle); 1857ec681f3Smrg if (ret) { 1867ec681f3Smrg simple_mtx_unlock(&table_lock); 1877ec681f3Smrg return NULL; 1887ec681f3Smrg } 1897e102996Smaya 1907ec681f3Smrg bo = lookup_bo(dev->handle_table, handle); 1917ec681f3Smrg if (bo) 1927ec681f3Smrg goto out_unlock; 1937e102996Smaya 1947ec681f3Smrg /* lseek() to get bo size */ 1957ec681f3Smrg size = lseek(fd, 0, SEEK_END); 1967ec681f3Smrg lseek(fd, 0, SEEK_CUR); 1977e102996Smaya 1987ec681f3Smrg bo = bo_from_handle(dev, size, handle); 1997e102996Smaya 2007ec681f3Smrg VG_BO_ALLOC(bo); 2017e102996Smaya 2027e102996Smayaout_unlock: 2037ec681f3Smrg simple_mtx_unlock(&table_lock); 2047e102996Smaya 2057ec681f3Smrg return bo; 2067e102996Smaya} 2077e102996Smaya 2087ec681f3Smrgstruct fd_bo * 2097ec681f3Smrgfd_bo_from_name(struct fd_device *dev, uint32_t name) 2107e102996Smaya{ 2117ec681f3Smrg struct drm_gem_open req = { 2127ec681f3Smrg .name = name, 2137ec681f3Smrg }; 2147ec681f3Smrg struct fd_bo *bo; 2157e102996Smaya 2167ec681f3Smrg simple_mtx_lock(&table_lock); 2177e102996Smaya 2187ec681f3Smrg /* check name table first, to see if bo is already open: */ 2197ec681f3Smrg bo = lookup_bo(dev->name_table, name); 2207ec681f3Smrg if (bo) 2217ec681f3Smrg goto out_unlock; 2227e102996Smaya 2237ec681f3Smrg if (drmIoctl(dev->fd, DRM_IOCTL_GEM_OPEN, &req)) { 2247ec681f3Smrg ERROR_MSG("gem-open failed: %s", strerror(errno)); 2257ec681f3Smrg goto out_unlock; 2267ec681f3Smrg } 2277e102996Smaya 2287ec681f3Smrg bo = lookup_bo(dev->handle_table, req.handle); 2297ec681f3Smrg if (bo) 2307ec681f3Smrg goto out_unlock; 2317e102996Smaya 2327ec681f3Smrg bo = bo_from_handle(dev, req.size, req.handle); 2337ec681f3Smrg if (bo) { 2347ec681f3Smrg set_name(bo, name); 2357ec681f3Smrg VG_BO_ALLOC(bo); 2367ec681f3Smrg } 2377e102996Smaya 2387e102996Smayaout_unlock: 2397ec681f3Smrg simple_mtx_unlock(&table_lock); 2407ec681f3Smrg 2417ec681f3Smrg return bo; 2427ec681f3Smrg} 2437e102996Smaya 2447ec681f3Smrgvoid 2457ec681f3Smrgfd_bo_mark_for_dump(struct fd_bo *bo) 2467ec681f3Smrg{ 2477ec681f3Smrg bo->reloc_flags |= FD_RELOC_DUMP; 2487e102996Smaya} 2497e102996Smaya 2507ec681f3Smrguint64_t 2517ec681f3Smrgfd_bo_get_iova(struct fd_bo *bo) 2527e102996Smaya{ 2537ec681f3Smrg /* ancient kernels did not support this */ 2547ec681f3Smrg assert(bo->iova != 0); 2557ec681f3Smrg return bo->iova; 2567e102996Smaya} 2577e102996Smaya 2587ec681f3Smrgstruct fd_bo * 2597ec681f3Smrgfd_bo_ref(struct fd_bo *bo) 2607e102996Smaya{ 2617ec681f3Smrg p_atomic_inc(&bo->refcnt); 2627ec681f3Smrg return bo; 2637e102996Smaya} 2647e102996Smaya 2657ec681f3Smrgstatic void 2667ec681f3Smrgbo_del_or_recycle(struct fd_bo *bo) 2677e102996Smaya{ 2687ec681f3Smrg struct fd_device *dev = bo->dev; 2697ec681f3Smrg 2707ec681f3Smrg simple_mtx_assert_locked(&table_lock); 2717ec681f3Smrg 2727ec681f3Smrg if ((bo->bo_reuse == BO_CACHE) && 2737ec681f3Smrg (fd_bo_cache_free(&dev->bo_cache, bo) == 0)) 2747ec681f3Smrg return; 2757ec681f3Smrg 2767ec681f3Smrg if ((bo->bo_reuse == RING_CACHE) && 2777ec681f3Smrg (fd_bo_cache_free(&dev->ring_cache, bo) == 0)) 2787ec681f3Smrg return; 2797ec681f3Smrg 2807ec681f3Smrg bo_del(bo); 2817e102996Smaya} 2827e102996Smaya 2837ec681f3Smrgvoid 2847ec681f3Smrgfd_bo_del_locked(struct fd_bo *bo) 2857e102996Smaya{ 2867ec681f3Smrg simple_mtx_assert_locked(&table_lock); 2877e102996Smaya 2887ec681f3Smrg if (!p_atomic_dec_zero(&bo->refcnt)) 2897ec681f3Smrg return; 2907e102996Smaya 2917ec681f3Smrg bo_del_or_recycle(bo); 2927ec681f3Smrg} 2937e102996Smaya 2947ec681f3Smrgvoid 2957ec681f3Smrgfd_bo_del(struct fd_bo *bo) 2967ec681f3Smrg{ 2977ec681f3Smrg if (!p_atomic_dec_zero(&bo->refcnt)) 2987ec681f3Smrg return; 2997e102996Smaya 3007ec681f3Smrg simple_mtx_lock(&table_lock); 3017ec681f3Smrg bo_del_or_recycle(bo); 3027ec681f3Smrg simple_mtx_unlock(&table_lock); 3037ec681f3Smrg} 3047ec681f3Smrg 3057ec681f3Smrg/** 3067ec681f3Smrg * Cleanup fences, dropping pipe references. If 'expired' is true, only 3077ec681f3Smrg * cleanup expired fences. 3087ec681f3Smrg * 3097ec681f3Smrg * Normally we expect at most a single fence, the exception being bo's 3107ec681f3Smrg * shared between contexts 3117ec681f3Smrg */ 3127ec681f3Smrgstatic void 3137ec681f3Smrgcleanup_fences(struct fd_bo *bo, bool expired) 3147ec681f3Smrg{ 3157ec681f3Smrg simple_mtx_assert_locked(&table_lock); 3167ec681f3Smrg 3177ec681f3Smrg for (int i = 0; i < bo->nr_fences; i++) { 3187ec681f3Smrg struct fd_bo_fence *f = &bo->fences[i]; 3197ec681f3Smrg 3207ec681f3Smrg if (expired && fd_fence_before(f->pipe->control->fence, f->fence)) 3217ec681f3Smrg continue; 3227ec681f3Smrg 3237ec681f3Smrg struct fd_pipe *pipe = f->pipe; 3247ec681f3Smrg 3257ec681f3Smrg bo->nr_fences--; 3267ec681f3Smrg 3277ec681f3Smrg if (bo->nr_fences > 0) { 3287ec681f3Smrg /* Shuffle up the last entry to replace the current slot: */ 3297ec681f3Smrg bo->fences[i] = bo->fences[bo->nr_fences]; 3307ec681f3Smrg i--; 3317ec681f3Smrg } 3327ec681f3Smrg 3337ec681f3Smrg fd_pipe_del_locked(pipe); 3347ec681f3Smrg } 3357e102996Smaya} 3367e102996Smaya 3377e102996Smaya/* Called under table_lock */ 3387ec681f3Smrgvoid 3397ec681f3Smrgbo_del(struct fd_bo *bo) 3407ec681f3Smrg{ 3417ec681f3Smrg VG_BO_FREE(bo); 3427ec681f3Smrg 3437ec681f3Smrg simple_mtx_assert_locked(&table_lock); 3447ec681f3Smrg 3457ec681f3Smrg cleanup_fences(bo, false); 3467ec681f3Smrg if (bo->fences != &bo->_inline_fence) 3477ec681f3Smrg free(bo->fences); 3487ec681f3Smrg 3497ec681f3Smrg if (bo->map) 3507ec681f3Smrg os_munmap(bo->map, bo->size); 3517ec681f3Smrg 3527ec681f3Smrg /* TODO probably bo's in bucket list get removed from 3537ec681f3Smrg * handle table?? 3547ec681f3Smrg */ 3557ec681f3Smrg 3567ec681f3Smrg if (bo->handle) { 3577ec681f3Smrg struct drm_gem_close req = { 3587ec681f3Smrg .handle = bo->handle, 3597ec681f3Smrg }; 3607ec681f3Smrg _mesa_hash_table_remove_key(bo->dev->handle_table, &bo->handle); 3617ec681f3Smrg if (bo->name) 3627ec681f3Smrg _mesa_hash_table_remove_key(bo->dev->name_table, &bo->name); 3637ec681f3Smrg drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_CLOSE, &req); 3647ec681f3Smrg } 3657ec681f3Smrg 3667ec681f3Smrg bo->funcs->destroy(bo); 3677ec681f3Smrg} 3687ec681f3Smrg 3697ec681f3Smrgstatic void 3707ec681f3Smrgbo_flush(struct fd_bo *bo) 3717e102996Smaya{ 3727ec681f3Smrg for (int i = 0; i < bo->nr_fences; i++) { 3737ec681f3Smrg struct fd_bo_fence *f = &bo->fences[i]; 3747ec681f3Smrg fd_pipe_flush(f->pipe, f->fence); 3757ec681f3Smrg } 3767e102996Smaya} 3777e102996Smaya 3787ec681f3Smrgint 3797ec681f3Smrgfd_bo_get_name(struct fd_bo *bo, uint32_t *name) 3807e102996Smaya{ 3817ec681f3Smrg if (!bo->name) { 3827ec681f3Smrg struct drm_gem_flink req = { 3837ec681f3Smrg .handle = bo->handle, 3847ec681f3Smrg }; 3857ec681f3Smrg int ret; 3867ec681f3Smrg 3877ec681f3Smrg ret = drmIoctl(bo->dev->fd, DRM_IOCTL_GEM_FLINK, &req); 3887ec681f3Smrg if (ret) { 3897ec681f3Smrg return ret; 3907ec681f3Smrg } 3917ec681f3Smrg 3927ec681f3Smrg simple_mtx_lock(&table_lock); 3937ec681f3Smrg set_name(bo, req.name); 3947ec681f3Smrg simple_mtx_unlock(&table_lock); 3957ec681f3Smrg bo->bo_reuse = NO_CACHE; 3967ec681f3Smrg bo->shared = true; 3977ec681f3Smrg bo_flush(bo); 3987ec681f3Smrg } 3997ec681f3Smrg 4007ec681f3Smrg *name = bo->name; 4017ec681f3Smrg 4027ec681f3Smrg return 0; 4037e102996Smaya} 4047e102996Smaya 4057ec681f3Smrguint32_t 4067ec681f3Smrgfd_bo_handle(struct fd_bo *bo) 4077e102996Smaya{ 4087ec681f3Smrg bo->bo_reuse = NO_CACHE; 4097ec681f3Smrg bo->shared = true; 4107ec681f3Smrg bo_flush(bo); 4117ec681f3Smrg return bo->handle; 4127e102996Smaya} 4137e102996Smaya 4147ec681f3Smrgint 4157ec681f3Smrgfd_bo_dmabuf(struct fd_bo *bo) 4167e102996Smaya{ 4177ec681f3Smrg int ret, prime_fd; 4187e102996Smaya 4197ec681f3Smrg ret = drmPrimeHandleToFD(bo->dev->fd, bo->handle, DRM_CLOEXEC, &prime_fd); 4207ec681f3Smrg if (ret) { 4217ec681f3Smrg ERROR_MSG("failed to get dmabuf fd: %d", ret); 4227ec681f3Smrg return ret; 4237ec681f3Smrg } 4247e102996Smaya 4257ec681f3Smrg bo->bo_reuse = NO_CACHE; 4267ec681f3Smrg bo->shared = true; 4277ec681f3Smrg bo_flush(bo); 4287e102996Smaya 4297ec681f3Smrg return prime_fd; 4307ec681f3Smrg} 4317ec681f3Smrg 4327ec681f3Smrguint32_t 4337ec681f3Smrgfd_bo_size(struct fd_bo *bo) 4347ec681f3Smrg{ 4357ec681f3Smrg return bo->size; 4367e102996Smaya} 4377e102996Smaya 4387ec681f3Smrgbool 4397ec681f3Smrgfd_bo_is_cached(struct fd_bo *bo) 4407e102996Smaya{ 4417ec681f3Smrg return !!(bo->alloc_flags & FD_BO_CACHED_COHERENT); 4427e102996Smaya} 4437e102996Smaya 4447ec681f3Smrgvoid * 4457ec681f3Smrgfd_bo_map(struct fd_bo *bo) 4467e102996Smaya{ 4477ec681f3Smrg if (!bo->map) { 4487ec681f3Smrg uint64_t offset; 4497ec681f3Smrg int ret; 4507ec681f3Smrg 4517ec681f3Smrg ret = bo->funcs->offset(bo, &offset); 4527ec681f3Smrg if (ret) { 4537ec681f3Smrg return NULL; 4547ec681f3Smrg } 4557ec681f3Smrg 4567ec681f3Smrg bo->map = os_mmap(0, bo->size, PROT_READ | PROT_WRITE, MAP_SHARED, 4577ec681f3Smrg bo->dev->fd, offset); 4587ec681f3Smrg if (bo->map == MAP_FAILED) { 4597ec681f3Smrg ERROR_MSG("mmap failed: %s", strerror(errno)); 4607ec681f3Smrg bo->map = NULL; 4617ec681f3Smrg } 4627ec681f3Smrg } 4637ec681f3Smrg return bo->map; 4647e102996Smaya} 4657e102996Smaya 4667e102996Smaya/* a bit odd to take the pipe as an arg, but it's a, umm, quirk of kgsl.. */ 4677ec681f3Smrgint 4687ec681f3Smrgfd_bo_cpu_prep(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t op) 4697e102996Smaya{ 4707ec681f3Smrg if (op & (FD_BO_PREP_NOSYNC | FD_BO_PREP_FLUSH)) { 4717ec681f3Smrg simple_mtx_lock(&table_lock); 4727ec681f3Smrg enum fd_bo_state state = fd_bo_state(bo); 4737ec681f3Smrg simple_mtx_unlock(&table_lock); 4747ec681f3Smrg 4757ec681f3Smrg if (state == FD_BO_STATE_IDLE) 4767ec681f3Smrg return 0; 4777ec681f3Smrg 4787ec681f3Smrg if (op & FD_BO_PREP_FLUSH) 4797ec681f3Smrg bo_flush(bo); 4807ec681f3Smrg 4817ec681f3Smrg /* If we have *only* been asked to flush, then we aren't really 4827ec681f3Smrg * interested about whether shared buffers are busy, so avoid 4837ec681f3Smrg * the kernel ioctl. 4847ec681f3Smrg */ 4857ec681f3Smrg if ((state == FD_BO_STATE_BUSY) || 4867ec681f3Smrg (op == FD_BO_PREP_FLUSH)) 4877ec681f3Smrg return -EBUSY; 4887ec681f3Smrg } 4897ec681f3Smrg 4907ec681f3Smrg /* In case the bo is referenced by a deferred submit, flush up to the 4917ec681f3Smrg * required fence now: 4927ec681f3Smrg */ 4937ec681f3Smrg bo_flush(bo); 4947ec681f3Smrg 4957ec681f3Smrg /* FD_BO_PREP_FLUSH is purely a frontend flag, and is not seen/handled 4967ec681f3Smrg * by backend or kernel: 4977ec681f3Smrg */ 4987ec681f3Smrg return bo->funcs->cpu_prep(bo, pipe, op & ~FD_BO_PREP_FLUSH); 4997e102996Smaya} 5007e102996Smaya 5017ec681f3Smrgvoid 5027ec681f3Smrgfd_bo_cpu_fini(struct fd_bo *bo) 5037e102996Smaya{ 5047ec681f3Smrg// TODO until we have cached buffers, the kernel side ioctl does nothing, 5057ec681f3Smrg// so just skip it. When we have cached buffers, we can make the 5067ec681f3Smrg// ioctl conditional 5077ec681f3Smrg// bo->funcs->cpu_fini(bo); 5087e102996Smaya} 5097ec681f3Smrg 5107ec681f3Smrgvoid 5117ec681f3Smrgfd_bo_add_fence(struct fd_bo *bo, struct fd_pipe *pipe, uint32_t fence) 5127ec681f3Smrg{ 5137ec681f3Smrg simple_mtx_assert_locked(&table_lock); 5147ec681f3Smrg 5157ec681f3Smrg if (bo->nosync) 5167ec681f3Smrg return; 5177ec681f3Smrg 5187ec681f3Smrg /* The common case is bo re-used on the same pipe it had previously 5197ec681f3Smrg * been used on: 5207ec681f3Smrg */ 5217ec681f3Smrg for (int i = 0; i < bo->nr_fences; i++) { 5227ec681f3Smrg struct fd_bo_fence *f = &bo->fences[i]; 5237ec681f3Smrg if (f->pipe == pipe) { 5247ec681f3Smrg assert(fd_fence_before(f->fence, fence)); 5257ec681f3Smrg f->fence = fence; 5267ec681f3Smrg return; 5277ec681f3Smrg } 5287ec681f3Smrg } 5297ec681f3Smrg 5307ec681f3Smrg cleanup_fences(bo, true); 5317ec681f3Smrg 5327ec681f3Smrg /* The first time we grow past a single fence, we need some special 5337ec681f3Smrg * handling, as we've been using the embedded _inline_fence to avoid 5347ec681f3Smrg * a separate allocation: 5357ec681f3Smrg */ 5367ec681f3Smrg if (unlikely((bo->nr_fences == 1) && 5377ec681f3Smrg (bo->fences == &bo->_inline_fence))) { 5387ec681f3Smrg bo->nr_fences = bo->max_fences = 0; 5397ec681f3Smrg bo->fences = NULL; 5407ec681f3Smrg APPEND(bo, fences, bo->_inline_fence); 5417ec681f3Smrg } 5427ec681f3Smrg 5437ec681f3Smrg APPEND(bo, fences, (struct fd_bo_fence){ 5447ec681f3Smrg .pipe = fd_pipe_ref_locked(pipe), 5457ec681f3Smrg .fence = fence, 5467ec681f3Smrg }); 5477ec681f3Smrg} 5487ec681f3Smrg 5497ec681f3Smrgenum fd_bo_state 5507ec681f3Smrgfd_bo_state(struct fd_bo *bo) 5517ec681f3Smrg{ 5527ec681f3Smrg simple_mtx_assert_locked(&table_lock); 5537ec681f3Smrg 5547ec681f3Smrg cleanup_fences(bo, true); 5557ec681f3Smrg 5567ec681f3Smrg if (bo->shared || bo->nosync) 5577ec681f3Smrg return FD_BO_STATE_UNKNOWN; 5587ec681f3Smrg 5597ec681f3Smrg if (!bo->nr_fences) 5607ec681f3Smrg return FD_BO_STATE_IDLE; 5617ec681f3Smrg 5627ec681f3Smrg return FD_BO_STATE_BUSY; 5637ec681f3Smrg} 5647ec681f3Smrg 565