1/*
2 * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 *    Rob Clark <robclark@freedesktop.org>
25 */
26
27#include "freedreno_drmif.h"
28#include "freedreno_priv.h"
29
30void bo_del(struct fd_bo *bo);
31extern simple_mtx_t table_lock;
32
33static void
34add_bucket(struct fd_bo_cache *cache, int size)
35{
36   unsigned int i = cache->num_buckets;
37
38   assert(i < ARRAY_SIZE(cache->cache_bucket));
39
40   list_inithead(&cache->cache_bucket[i].list);
41   cache->cache_bucket[i].size = size;
42   cache->num_buckets++;
43}
44
45/**
46 * @coarse: if true, only power-of-two bucket sizes, otherwise
47 *    fill in for a bit smoother size curve..
48 */
49void
50fd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
51{
52   unsigned long size, cache_max_size = 64 * 1024 * 1024;
53
54   /* OK, so power of two buckets was too wasteful of memory.
55    * Give 3 other sizes between each power of two, to hopefully
56    * cover things accurately enough.  (The alternative is
57    * probably to just go for exact matching of sizes, and assume
58    * that for things like composited window resize the tiled
59    * width/height alignment and rounding of sizes to pages will
60    * get us useful cache hit rates anyway)
61    */
62   add_bucket(cache, 4096);
63   add_bucket(cache, 4096 * 2);
64   if (!coarse)
65      add_bucket(cache, 4096 * 3);
66
67   /* Initialize the linked lists for BO reuse cache. */
68   for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
69      add_bucket(cache, size);
70      if (!coarse) {
71         add_bucket(cache, size + size * 1 / 4);
72         add_bucket(cache, size + size * 2 / 4);
73         add_bucket(cache, size + size * 3 / 4);
74      }
75   }
76}
77
78/* Frees older cached buffers.  Called under table_lock */
79void
80fd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
81{
82   int i;
83
84   if (cache->time == time)
85      return;
86
87   for (i = 0; i < cache->num_buckets; i++) {
88      struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
89      struct fd_bo *bo;
90
91      while (!list_is_empty(&bucket->list)) {
92         bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
93
94         /* keep things in cache for at least 1 second: */
95         if (time && ((time - bo->free_time) <= 1))
96            break;
97
98         VG_BO_OBTAIN(bo);
99         list_del(&bo->list);
100         bo_del(bo);
101      }
102   }
103
104   cache->time = time;
105}
106
107static struct fd_bo_bucket *
108get_bucket(struct fd_bo_cache *cache, uint32_t size)
109{
110   int i;
111
112   /* hmm, this is what intel does, but I suppose we could calculate our
113    * way to the correct bucket size rather than looping..
114    */
115   for (i = 0; i < cache->num_buckets; i++) {
116      struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
117      if (bucket->size >= size) {
118         return bucket;
119      }
120   }
121
122   return NULL;
123}
124
125static struct fd_bo *
126find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
127{
128   struct fd_bo *bo = NULL;
129
130   /* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
131    * skip the busy check.. if it is only going to be a render target
132    * then we probably don't need to stall..
133    *
134    * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
135    * (MRU, since likely to be in GPU cache), rather than head (LRU)..
136    */
137   simple_mtx_lock(&table_lock);
138   list_for_each_entry (struct fd_bo, entry, &bucket->list, list) {
139      if (fd_bo_state(entry) != FD_BO_STATE_IDLE)
140         break;
141      if (entry->alloc_flags == flags) {
142         bo = entry;
143         list_del(&bo->list);
144         break;
145      }
146   }
147   simple_mtx_unlock(&table_lock);
148
149   return bo;
150}
151
152/* NOTE: size is potentially rounded up to bucket size: */
153struct fd_bo *
154fd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
155{
156   struct fd_bo *bo = NULL;
157   struct fd_bo_bucket *bucket;
158
159   *size = align(*size, 4096);
160   bucket = get_bucket(cache, *size);
161
162   /* see if we can be green and recycle: */
163retry:
164   if (bucket) {
165      *size = bucket->size;
166      bo = find_in_bucket(bucket, flags);
167      if (bo) {
168         VG_BO_OBTAIN(bo);
169         if (bo->funcs->madvise(bo, true) <= 0) {
170            /* we've lost the backing pages, delete and try again: */
171            simple_mtx_lock(&table_lock);
172            bo_del(bo);
173            simple_mtx_unlock(&table_lock);
174            goto retry;
175         }
176         p_atomic_set(&bo->refcnt, 1);
177         bo->reloc_flags = FD_RELOC_FLAGS_INIT;
178         return bo;
179      }
180   }
181
182   return NULL;
183}
184
185int
186fd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
187{
188   if (bo->nosync || bo->shared)
189      return -1;
190
191   struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
192
193   /* see if we can be green and recycle: */
194   if (bucket) {
195      struct timespec time;
196
197      bo->funcs->madvise(bo, false);
198
199      clock_gettime(CLOCK_MONOTONIC, &time);
200
201      bo->free_time = time.tv_sec;
202      VG_BO_RELEASE(bo);
203      list_addtail(&bo->list, &bucket->list);
204      fd_bo_cache_cleanup(cache, time.tv_sec);
205
206      return 0;
207   }
208
209   return -1;
210}
211