freedreno_bo_cache.c revision 3f012e29
13f012e29Smrg/* -*- mode: C; c-file-style: "k&r"; tab-width 4; indent-tabs-mode: t; -*- */ 23f012e29Smrg 33f012e29Smrg/* 43f012e29Smrg * Copyright (C) 2016 Rob Clark <robclark@freedesktop.org> 53f012e29Smrg * 63f012e29Smrg * Permission is hereby granted, free of charge, to any person obtaining a 73f012e29Smrg * copy of this software and associated documentation files (the "Software"), 83f012e29Smrg * to deal in the Software without restriction, including without limitation 93f012e29Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense, 103f012e29Smrg * and/or sell copies of the Software, and to permit persons to whom the 113f012e29Smrg * Software is furnished to do so, subject to the following conditions: 123f012e29Smrg * 133f012e29Smrg * The above copyright notice and this permission notice (including the next 143f012e29Smrg * paragraph) shall be included in all copies or substantial portions of the 153f012e29Smrg * Software. 163f012e29Smrg * 173f012e29Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 183f012e29Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 193f012e29Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 203f012e29Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 213f012e29Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 223f012e29Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 233f012e29Smrg * SOFTWARE. 243f012e29Smrg * 253f012e29Smrg * Authors: 263f012e29Smrg * Rob Clark <robclark@freedesktop.org> 273f012e29Smrg */ 283f012e29Smrg 293f012e29Smrg#ifdef HAVE_CONFIG_H 303f012e29Smrg# include <config.h> 313f012e29Smrg#endif 323f012e29Smrg 333f012e29Smrg#include "freedreno_drmif.h" 343f012e29Smrg#include "freedreno_priv.h" 353f012e29Smrg 363f012e29Smrg 373f012e29Smrgdrm_private void bo_del(struct fd_bo *bo); 383f012e29Smrgdrm_private extern pthread_mutex_t table_lock; 393f012e29Smrg 403f012e29Smrgstatic void 413f012e29Smrgadd_bucket(struct fd_bo_cache *cache, int size) 423f012e29Smrg{ 433f012e29Smrg unsigned int i = cache->num_buckets; 443f012e29Smrg 453f012e29Smrg assert(i < ARRAY_SIZE(cache->cache_bucket)); 463f012e29Smrg 473f012e29Smrg list_inithead(&cache->cache_bucket[i].list); 483f012e29Smrg cache->cache_bucket[i].size = size; 493f012e29Smrg cache->num_buckets++; 503f012e29Smrg} 513f012e29Smrg 523f012e29Smrg/** 533f012e29Smrg * @coarse: if true, only power-of-two bucket sizes, otherwise 543f012e29Smrg * fill in for a bit smoother size curve.. 553f012e29Smrg */ 563f012e29Smrgdrm_private void 573f012e29Smrgfd_bo_cache_init(struct fd_bo_cache *cache, int course) 583f012e29Smrg{ 593f012e29Smrg unsigned long size, cache_max_size = 64 * 1024 * 1024; 603f012e29Smrg 613f012e29Smrg /* OK, so power of two buckets was too wasteful of memory. 623f012e29Smrg * Give 3 other sizes between each power of two, to hopefully 633f012e29Smrg * cover things accurately enough. (The alternative is 643f012e29Smrg * probably to just go for exact matching of sizes, and assume 653f012e29Smrg * that for things like composited window resize the tiled 663f012e29Smrg * width/height alignment and rounding of sizes to pages will 673f012e29Smrg * get us useful cache hit rates anyway) 683f012e29Smrg */ 693f012e29Smrg add_bucket(cache, 4096); 703f012e29Smrg add_bucket(cache, 4096 * 2); 713f012e29Smrg if (!course) 723f012e29Smrg add_bucket(cache, 4096 * 3); 733f012e29Smrg 743f012e29Smrg /* Initialize the linked lists for BO reuse cache. */ 753f012e29Smrg for (size = 4 * 4096; size <= cache_max_size; size *= 2) { 763f012e29Smrg add_bucket(cache, size); 773f012e29Smrg if (!course) { 783f012e29Smrg add_bucket(cache, size + size * 1 / 4); 793f012e29Smrg add_bucket(cache, size + size * 2 / 4); 803f012e29Smrg add_bucket(cache, size + size * 3 / 4); 813f012e29Smrg } 823f012e29Smrg } 833f012e29Smrg} 843f012e29Smrg 853f012e29Smrg/* Frees older cached buffers. Called under table_lock */ 863f012e29Smrgdrm_private void 873f012e29Smrgfd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time) 883f012e29Smrg{ 893f012e29Smrg int i; 903f012e29Smrg 913f012e29Smrg if (cache->time == time) 923f012e29Smrg return; 933f012e29Smrg 943f012e29Smrg for (i = 0; i < cache->num_buckets; i++) { 953f012e29Smrg struct fd_bo_bucket *bucket = &cache->cache_bucket[i]; 963f012e29Smrg struct fd_bo *bo; 973f012e29Smrg 983f012e29Smrg while (!LIST_IS_EMPTY(&bucket->list)) { 993f012e29Smrg bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list); 1003f012e29Smrg 1013f012e29Smrg /* keep things in cache for at least 1 second: */ 1023f012e29Smrg if (time && ((time - bo->free_time) <= 1)) 1033f012e29Smrg break; 1043f012e29Smrg 1053f012e29Smrg list_del(&bo->list); 1063f012e29Smrg bo_del(bo); 1073f012e29Smrg } 1083f012e29Smrg } 1093f012e29Smrg 1103f012e29Smrg cache->time = time; 1113f012e29Smrg} 1123f012e29Smrg 1133f012e29Smrgstatic struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size) 1143f012e29Smrg{ 1153f012e29Smrg int i; 1163f012e29Smrg 1173f012e29Smrg /* hmm, this is what intel does, but I suppose we could calculate our 1183f012e29Smrg * way to the correct bucket size rather than looping.. 1193f012e29Smrg */ 1203f012e29Smrg for (i = 0; i < cache->num_buckets; i++) { 1213f012e29Smrg struct fd_bo_bucket *bucket = &cache->cache_bucket[i]; 1223f012e29Smrg if (bucket->size >= size) { 1233f012e29Smrg return bucket; 1243f012e29Smrg } 1253f012e29Smrg } 1263f012e29Smrg 1273f012e29Smrg return NULL; 1283f012e29Smrg} 1293f012e29Smrg 1303f012e29Smrgstatic int is_idle(struct fd_bo *bo) 1313f012e29Smrg{ 1323f012e29Smrg return fd_bo_cpu_prep(bo, NULL, 1333f012e29Smrg DRM_FREEDRENO_PREP_READ | 1343f012e29Smrg DRM_FREEDRENO_PREP_WRITE | 1353f012e29Smrg DRM_FREEDRENO_PREP_NOSYNC) == 0; 1363f012e29Smrg} 1373f012e29Smrg 1383f012e29Smrgstatic struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags) 1393f012e29Smrg{ 1403f012e29Smrg struct fd_bo *bo = NULL; 1413f012e29Smrg 1423f012e29Smrg /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could 1433f012e29Smrg * skip the busy check.. if it is only going to be a render target 1443f012e29Smrg * then we probably don't need to stall.. 1453f012e29Smrg * 1463f012e29Smrg * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail 1473f012e29Smrg * (MRU, since likely to be in GPU cache), rather than head (LRU).. 1483f012e29Smrg */ 1493f012e29Smrg pthread_mutex_lock(&table_lock); 1503f012e29Smrg if (!LIST_IS_EMPTY(&bucket->list)) { 1513f012e29Smrg bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list); 1523f012e29Smrg /* TODO check for compatible flags? */ 1533f012e29Smrg if (is_idle(bo)) { 1543f012e29Smrg list_del(&bo->list); 1553f012e29Smrg } else { 1563f012e29Smrg bo = NULL; 1573f012e29Smrg } 1583f012e29Smrg } 1593f012e29Smrg pthread_mutex_unlock(&table_lock); 1603f012e29Smrg 1613f012e29Smrg return bo; 1623f012e29Smrg} 1633f012e29Smrg 1643f012e29Smrg/* NOTE: size is potentially rounded up to bucket size: */ 1653f012e29Smrgdrm_private struct fd_bo * 1663f012e29Smrgfd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags) 1673f012e29Smrg{ 1683f012e29Smrg struct fd_bo *bo = NULL; 1693f012e29Smrg struct fd_bo_bucket *bucket; 1703f012e29Smrg 1713f012e29Smrg *size = ALIGN(*size, 4096); 1723f012e29Smrg bucket = get_bucket(cache, *size); 1733f012e29Smrg 1743f012e29Smrg /* see if we can be green and recycle: */ 1753f012e29Smrgretry: 1763f012e29Smrg if (bucket) { 1773f012e29Smrg *size = bucket->size; 1783f012e29Smrg bo = find_in_bucket(bucket, flags); 1793f012e29Smrg if (bo) { 1803f012e29Smrg if (bo->funcs->madvise(bo, TRUE) <= 0) { 1813f012e29Smrg /* we've lost the backing pages, delete and try again: */ 1823f012e29Smrg pthread_mutex_lock(&table_lock); 1833f012e29Smrg bo_del(bo); 1843f012e29Smrg pthread_mutex_unlock(&table_lock); 1853f012e29Smrg goto retry; 1863f012e29Smrg } 1873f012e29Smrg atomic_set(&bo->refcnt, 1); 1883f012e29Smrg fd_device_ref(bo->dev); 1893f012e29Smrg return bo; 1903f012e29Smrg } 1913f012e29Smrg } 1923f012e29Smrg 1933f012e29Smrg return NULL; 1943f012e29Smrg} 1953f012e29Smrg 1963f012e29Smrgdrm_private int 1973f012e29Smrgfd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo) 1983f012e29Smrg{ 1993f012e29Smrg struct fd_bo_bucket *bucket = get_bucket(cache, bo->size); 2003f012e29Smrg 2013f012e29Smrg /* see if we can be green and recycle: */ 2023f012e29Smrg if (bucket) { 2033f012e29Smrg struct timespec time; 2043f012e29Smrg 2053f012e29Smrg bo->funcs->madvise(bo, FALSE); 2063f012e29Smrg 2073f012e29Smrg clock_gettime(CLOCK_MONOTONIC, &time); 2083f012e29Smrg 2093f012e29Smrg bo->free_time = time.tv_sec; 2103f012e29Smrg list_addtail(&bo->list, &bucket->list); 2113f012e29Smrg fd_bo_cache_cleanup(cache, time.tv_sec); 2123f012e29Smrg 2133f012e29Smrg /* bo's in the bucket cache don't have a ref and 2143f012e29Smrg * don't hold a ref to the dev: 2153f012e29Smrg */ 2163f012e29Smrg fd_device_del_locked(bo->dev); 2173f012e29Smrg 2183f012e29Smrg return 0; 2193f012e29Smrg } 2203f012e29Smrg 2213f012e29Smrg return -1; 2223f012e29Smrg} 223