1b8e80941Smrg/*
2b8e80941Smrg * Copyright (C) 2012-2018 Rob Clark <robclark@freedesktop.org>
3b8e80941Smrg *
4b8e80941Smrg * Permission is hereby granted, free of charge, to any person obtaining a
5b8e80941Smrg * copy of this software and associated documentation files (the "Software"),
6b8e80941Smrg * to deal in the Software without restriction, including without limitation
7b8e80941Smrg * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8b8e80941Smrg * and/or sell copies of the Software, and to permit persons to whom the
9b8e80941Smrg * Software is furnished to do so, subject to the following conditions:
10b8e80941Smrg *
11b8e80941Smrg * The above copyright notice and this permission notice (including the next
12b8e80941Smrg * paragraph) shall be included in all copies or substantial portions of the
13b8e80941Smrg * Software.
14b8e80941Smrg *
15b8e80941Smrg * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16b8e80941Smrg * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17b8e80941Smrg * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18b8e80941Smrg * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19b8e80941Smrg * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20b8e80941Smrg * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21b8e80941Smrg * SOFTWARE.
22b8e80941Smrg *
23b8e80941Smrg * Authors:
24b8e80941Smrg *    Rob Clark <robclark@freedesktop.org>
25b8e80941Smrg */
26b8e80941Smrg
27b8e80941Smrg#include "freedreno_drmif.h"
28b8e80941Smrg#include "freedreno_priv.h"
29b8e80941Smrg
30b8e80941Smrgvoid bo_del(struct fd_bo *bo);
31b8e80941Smrgextern pthread_mutex_t table_lock;
32b8e80941Smrg
33b8e80941Smrgstatic void
34b8e80941Smrgadd_bucket(struct fd_bo_cache *cache, int size)
35b8e80941Smrg{
36b8e80941Smrg	unsigned int i = cache->num_buckets;
37b8e80941Smrg
38b8e80941Smrg	assert(i < ARRAY_SIZE(cache->cache_bucket));
39b8e80941Smrg
40b8e80941Smrg	list_inithead(&cache->cache_bucket[i].list);
41b8e80941Smrg	cache->cache_bucket[i].size = size;
42b8e80941Smrg	cache->num_buckets++;
43b8e80941Smrg}
44b8e80941Smrg
45b8e80941Smrg/**
46b8e80941Smrg * @coarse: if true, only power-of-two bucket sizes, otherwise
47b8e80941Smrg *    fill in for a bit smoother size curve..
48b8e80941Smrg */
49b8e80941Smrgvoid
50b8e80941Smrgfd_bo_cache_init(struct fd_bo_cache *cache, int coarse)
51b8e80941Smrg{
52b8e80941Smrg	unsigned long size, cache_max_size = 64 * 1024 * 1024;
53b8e80941Smrg
54b8e80941Smrg	/* OK, so power of two buckets was too wasteful of memory.
55b8e80941Smrg	 * Give 3 other sizes between each power of two, to hopefully
56b8e80941Smrg	 * cover things accurately enough.  (The alternative is
57b8e80941Smrg	 * probably to just go for exact matching of sizes, and assume
58b8e80941Smrg	 * that for things like composited window resize the tiled
59b8e80941Smrg	 * width/height alignment and rounding of sizes to pages will
60b8e80941Smrg	 * get us useful cache hit rates anyway)
61b8e80941Smrg	 */
62b8e80941Smrg	add_bucket(cache, 4096);
63b8e80941Smrg	add_bucket(cache, 4096 * 2);
64b8e80941Smrg	if (!coarse)
65b8e80941Smrg		add_bucket(cache, 4096 * 3);
66b8e80941Smrg
67b8e80941Smrg	/* Initialize the linked lists for BO reuse cache. */
68b8e80941Smrg	for (size = 4 * 4096; size <= cache_max_size; size *= 2) {
69b8e80941Smrg		add_bucket(cache, size);
70b8e80941Smrg		if (!coarse) {
71b8e80941Smrg			add_bucket(cache, size + size * 1 / 4);
72b8e80941Smrg			add_bucket(cache, size + size * 2 / 4);
73b8e80941Smrg			add_bucket(cache, size + size * 3 / 4);
74b8e80941Smrg		}
75b8e80941Smrg	}
76b8e80941Smrg}
77b8e80941Smrg
78b8e80941Smrg/* Frees older cached buffers.  Called under table_lock */
79b8e80941Smrgvoid
80b8e80941Smrgfd_bo_cache_cleanup(struct fd_bo_cache *cache, time_t time)
81b8e80941Smrg{
82b8e80941Smrg	int i;
83b8e80941Smrg
84b8e80941Smrg	if (cache->time == time)
85b8e80941Smrg		return;
86b8e80941Smrg
87b8e80941Smrg	for (i = 0; i < cache->num_buckets; i++) {
88b8e80941Smrg		struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
89b8e80941Smrg		struct fd_bo *bo;
90b8e80941Smrg
91b8e80941Smrg		while (!LIST_IS_EMPTY(&bucket->list)) {
92b8e80941Smrg			bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
93b8e80941Smrg
94b8e80941Smrg			/* keep things in cache for at least 1 second: */
95b8e80941Smrg			if (time && ((time - bo->free_time) <= 1))
96b8e80941Smrg				break;
97b8e80941Smrg
98b8e80941Smrg			VG_BO_OBTAIN(bo);
99b8e80941Smrg			list_del(&bo->list);
100b8e80941Smrg			bo_del(bo);
101b8e80941Smrg		}
102b8e80941Smrg	}
103b8e80941Smrg
104b8e80941Smrg	cache->time = time;
105b8e80941Smrg}
106b8e80941Smrg
107b8e80941Smrgstatic struct fd_bo_bucket * get_bucket(struct fd_bo_cache *cache, uint32_t size)
108b8e80941Smrg{
109b8e80941Smrg	int i;
110b8e80941Smrg
111b8e80941Smrg	/* hmm, this is what intel does, but I suppose we could calculate our
112b8e80941Smrg	 * way to the correct bucket size rather than looping..
113b8e80941Smrg	 */
114b8e80941Smrg	for (i = 0; i < cache->num_buckets; i++) {
115b8e80941Smrg		struct fd_bo_bucket *bucket = &cache->cache_bucket[i];
116b8e80941Smrg		if (bucket->size >= size) {
117b8e80941Smrg			return bucket;
118b8e80941Smrg		}
119b8e80941Smrg	}
120b8e80941Smrg
121b8e80941Smrg	return NULL;
122b8e80941Smrg}
123b8e80941Smrg
124b8e80941Smrgstatic int is_idle(struct fd_bo *bo)
125b8e80941Smrg{
126b8e80941Smrg	return fd_bo_cpu_prep(bo, NULL,
127b8e80941Smrg			DRM_FREEDRENO_PREP_READ |
128b8e80941Smrg			DRM_FREEDRENO_PREP_WRITE |
129b8e80941Smrg			DRM_FREEDRENO_PREP_NOSYNC) == 0;
130b8e80941Smrg}
131b8e80941Smrg
132b8e80941Smrgstatic struct fd_bo *find_in_bucket(struct fd_bo_bucket *bucket, uint32_t flags)
133b8e80941Smrg{
134b8e80941Smrg	struct fd_bo *bo = NULL;
135b8e80941Smrg
136b8e80941Smrg	/* TODO .. if we had an ALLOC_FOR_RENDER flag like intel, we could
137b8e80941Smrg	 * skip the busy check.. if it is only going to be a render target
138b8e80941Smrg	 * then we probably don't need to stall..
139b8e80941Smrg	 *
140b8e80941Smrg	 * NOTE that intel takes ALLOC_FOR_RENDER bo's from the list tail
141b8e80941Smrg	 * (MRU, since likely to be in GPU cache), rather than head (LRU)..
142b8e80941Smrg	 */
143b8e80941Smrg	pthread_mutex_lock(&table_lock);
144b8e80941Smrg	if (!LIST_IS_EMPTY(&bucket->list)) {
145b8e80941Smrg		bo = LIST_ENTRY(struct fd_bo, bucket->list.next, list);
146b8e80941Smrg		/* TODO check for compatible flags? */
147b8e80941Smrg		if (is_idle(bo)) {
148b8e80941Smrg			list_del(&bo->list);
149b8e80941Smrg		} else {
150b8e80941Smrg			bo = NULL;
151b8e80941Smrg		}
152b8e80941Smrg	}
153b8e80941Smrg	pthread_mutex_unlock(&table_lock);
154b8e80941Smrg
155b8e80941Smrg	return bo;
156b8e80941Smrg}
157b8e80941Smrg
158b8e80941Smrg/* NOTE: size is potentially rounded up to bucket size: */
159b8e80941Smrgstruct fd_bo *
160b8e80941Smrgfd_bo_cache_alloc(struct fd_bo_cache *cache, uint32_t *size, uint32_t flags)
161b8e80941Smrg{
162b8e80941Smrg	struct fd_bo *bo = NULL;
163b8e80941Smrg	struct fd_bo_bucket *bucket;
164b8e80941Smrg
165b8e80941Smrg	*size = align(*size, 4096);
166b8e80941Smrg	bucket = get_bucket(cache, *size);
167b8e80941Smrg
168b8e80941Smrg	/* see if we can be green and recycle: */
169b8e80941Smrgretry:
170b8e80941Smrg	if (bucket) {
171b8e80941Smrg		*size = bucket->size;
172b8e80941Smrg		bo = find_in_bucket(bucket, flags);
173b8e80941Smrg		if (bo) {
174b8e80941Smrg			VG_BO_OBTAIN(bo);
175b8e80941Smrg			if (bo->funcs->madvise(bo, TRUE) <= 0) {
176b8e80941Smrg				/* we've lost the backing pages, delete and try again: */
177b8e80941Smrg				pthread_mutex_lock(&table_lock);
178b8e80941Smrg				bo_del(bo);
179b8e80941Smrg				pthread_mutex_unlock(&table_lock);
180b8e80941Smrg				goto retry;
181b8e80941Smrg			}
182b8e80941Smrg			p_atomic_set(&bo->refcnt, 1);
183b8e80941Smrg			fd_device_ref(bo->dev);
184b8e80941Smrg			return bo;
185b8e80941Smrg		}
186b8e80941Smrg	}
187b8e80941Smrg
188b8e80941Smrg	return NULL;
189b8e80941Smrg}
190b8e80941Smrg
191b8e80941Smrgint
192b8e80941Smrgfd_bo_cache_free(struct fd_bo_cache *cache, struct fd_bo *bo)
193b8e80941Smrg{
194b8e80941Smrg	struct fd_bo_bucket *bucket = get_bucket(cache, bo->size);
195b8e80941Smrg
196b8e80941Smrg	/* see if we can be green and recycle: */
197b8e80941Smrg	if (bucket) {
198b8e80941Smrg		struct timespec time;
199b8e80941Smrg
200b8e80941Smrg		bo->funcs->madvise(bo, FALSE);
201b8e80941Smrg
202b8e80941Smrg		clock_gettime(CLOCK_MONOTONIC, &time);
203b8e80941Smrg
204b8e80941Smrg		bo->free_time = time.tv_sec;
205b8e80941Smrg		VG_BO_RELEASE(bo);
206b8e80941Smrg		list_addtail(&bo->list, &bucket->list);
207b8e80941Smrg		fd_bo_cache_cleanup(cache, time.tv_sec);
208b8e80941Smrg
209b8e80941Smrg		/* bo's in the bucket cache don't have a ref and
210b8e80941Smrg		 * don't hold a ref to the dev:
211b8e80941Smrg		 */
212b8e80941Smrg		fd_device_del_locked(bo->dev);
213b8e80941Smrg
214b8e80941Smrg		return 0;
215b8e80941Smrg	}
216b8e80941Smrg
217b8e80941Smrg	return -1;
218b8e80941Smrg}
219