radeon_sa.c revision 1.1.1.3 1 /* $NetBSD: radeon_sa.c,v 1.1.1.3 2021/12/18 20:15:51 riastradh Exp $ */
2
3 /*
4 * Copyright 2011 Red Hat Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 *
27 */
28 /*
29 * Authors:
30 * Jerome Glisse <glisse (at) freedesktop.org>
31 */
32 /* Algorithm:
33 *
34 * We store the last allocated bo in "hole", we always try to allocate
35 * after the last allocated bo. Principle is that in a linear GPU ring
36 * progression was is after last is the oldest bo we allocated and thus
37 * the first one that should no longer be in use by the GPU.
38 *
39 * If it's not the case we skip over the bo after last to the closest
40 * done bo if such one exist. If none exist and we are not asked to
41 * block we report failure to allocate.
42 *
43 * If we are asked to block we wait on all the oldest fence of all
44 * rings. We just wait for any of those fence to complete.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: radeon_sa.c,v 1.1.1.3 2021/12/18 20:15:51 riastradh Exp $");
49
50 #include "radeon.h"
51
52 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo);
53 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager);
54
55 int radeon_sa_bo_manager_init(struct radeon_device *rdev,
56 struct radeon_sa_manager *sa_manager,
57 unsigned size, u32 align, u32 domain, u32 flags)
58 {
59 int i, r;
60
61 init_waitqueue_head(&sa_manager->wq);
62 sa_manager->bo = NULL;
63 sa_manager->size = size;
64 sa_manager->domain = domain;
65 sa_manager->align = align;
66 sa_manager->hole = &sa_manager->olist;
67 INIT_LIST_HEAD(&sa_manager->olist);
68 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
69 INIT_LIST_HEAD(&sa_manager->flist[i]);
70 }
71
72 r = radeon_bo_create(rdev, size, align, true,
73 domain, flags, NULL, NULL, &sa_manager->bo);
74 if (r) {
75 dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r);
76 return r;
77 }
78
79 return r;
80 }
81
82 void radeon_sa_bo_manager_fini(struct radeon_device *rdev,
83 struct radeon_sa_manager *sa_manager)
84 {
85 struct radeon_sa_bo *sa_bo, *tmp;
86
87 if (!list_empty(&sa_manager->olist)) {
88 sa_manager->hole = &sa_manager->olist,
89 radeon_sa_bo_try_free(sa_manager);
90 if (!list_empty(&sa_manager->olist)) {
91 dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n");
92 }
93 }
94 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
95 radeon_sa_bo_remove_locked(sa_bo);
96 }
97 radeon_bo_unref(&sa_manager->bo);
98 sa_manager->size = 0;
99 }
100
101 int radeon_sa_bo_manager_start(struct radeon_device *rdev,
102 struct radeon_sa_manager *sa_manager)
103 {
104 int r;
105
106 if (sa_manager->bo == NULL) {
107 dev_err(rdev->dev, "no bo for sa manager\n");
108 return -EINVAL;
109 }
110
111 /* map the buffer */
112 r = radeon_bo_reserve(sa_manager->bo, false);
113 if (r) {
114 dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r);
115 return r;
116 }
117 r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
118 if (r) {
119 radeon_bo_unreserve(sa_manager->bo);
120 dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r);
121 return r;
122 }
123 r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
124 radeon_bo_unreserve(sa_manager->bo);
125 return r;
126 }
127
128 int radeon_sa_bo_manager_suspend(struct radeon_device *rdev,
129 struct radeon_sa_manager *sa_manager)
130 {
131 int r;
132
133 if (sa_manager->bo == NULL) {
134 dev_err(rdev->dev, "no bo for sa manager\n");
135 return -EINVAL;
136 }
137
138 r = radeon_bo_reserve(sa_manager->bo, false);
139 if (!r) {
140 radeon_bo_kunmap(sa_manager->bo);
141 radeon_bo_unpin(sa_manager->bo);
142 radeon_bo_unreserve(sa_manager->bo);
143 }
144 return r;
145 }
146
147 static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo)
148 {
149 struct radeon_sa_manager *sa_manager = sa_bo->manager;
150 if (sa_manager->hole == &sa_bo->olist) {
151 sa_manager->hole = sa_bo->olist.prev;
152 }
153 list_del_init(&sa_bo->olist);
154 list_del_init(&sa_bo->flist);
155 radeon_fence_unref(&sa_bo->fence);
156 kfree(sa_bo);
157 }
158
159 static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager)
160 {
161 struct radeon_sa_bo *sa_bo, *tmp;
162
163 if (sa_manager->hole->next == &sa_manager->olist)
164 return;
165
166 sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist);
167 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
168 if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) {
169 return;
170 }
171 radeon_sa_bo_remove_locked(sa_bo);
172 }
173 }
174
175 static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager)
176 {
177 struct list_head *hole = sa_manager->hole;
178
179 if (hole != &sa_manager->olist) {
180 return list_entry(hole, struct radeon_sa_bo, olist)->eoffset;
181 }
182 return 0;
183 }
184
185 static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager)
186 {
187 struct list_head *hole = sa_manager->hole;
188
189 if (hole->next != &sa_manager->olist) {
190 return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset;
191 }
192 return sa_manager->size;
193 }
194
195 static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager,
196 struct radeon_sa_bo *sa_bo,
197 unsigned size, unsigned align)
198 {
199 unsigned soffset, eoffset, wasted;
200
201 soffset = radeon_sa_bo_hole_soffset(sa_manager);
202 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
203 wasted = (align - (soffset % align)) % align;
204
205 if ((eoffset - soffset) >= (size + wasted)) {
206 soffset += wasted;
207
208 sa_bo->manager = sa_manager;
209 sa_bo->soffset = soffset;
210 sa_bo->eoffset = soffset + size;
211 list_add(&sa_bo->olist, sa_manager->hole);
212 INIT_LIST_HEAD(&sa_bo->flist);
213 sa_manager->hole = &sa_bo->olist;
214 return true;
215 }
216 return false;
217 }
218
219 /**
220 * radeon_sa_event - Check if we can stop waiting
221 *
222 * @sa_manager: pointer to the sa_manager
223 * @size: number of bytes we want to allocate
224 * @align: alignment we need to match
225 *
226 * Check if either there is a fence we can wait for or
227 * enough free memory to satisfy the allocation directly
228 */
229 static bool radeon_sa_event(struct radeon_sa_manager *sa_manager,
230 unsigned size, unsigned align)
231 {
232 unsigned soffset, eoffset, wasted;
233 int i;
234
235 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
236 if (!list_empty(&sa_manager->flist[i])) {
237 return true;
238 }
239 }
240
241 soffset = radeon_sa_bo_hole_soffset(sa_manager);
242 eoffset = radeon_sa_bo_hole_eoffset(sa_manager);
243 wasted = (align - (soffset % align)) % align;
244
245 if ((eoffset - soffset) >= (size + wasted)) {
246 return true;
247 }
248
249 return false;
250 }
251
252 static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager,
253 struct radeon_fence **fences,
254 unsigned *tries)
255 {
256 struct radeon_sa_bo *best_bo = NULL;
257 unsigned i, soffset, best, tmp;
258
259 /* if hole points to the end of the buffer */
260 if (sa_manager->hole->next == &sa_manager->olist) {
261 /* try again with its beginning */
262 sa_manager->hole = &sa_manager->olist;
263 return true;
264 }
265
266 soffset = radeon_sa_bo_hole_soffset(sa_manager);
267 /* to handle wrap around we add sa_manager->size */
268 best = sa_manager->size * 2;
269 /* go over all fence list and try to find the closest sa_bo
270 * of the current last
271 */
272 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
273 struct radeon_sa_bo *sa_bo;
274
275 if (list_empty(&sa_manager->flist[i])) {
276 continue;
277 }
278
279 sa_bo = list_first_entry(&sa_manager->flist[i],
280 struct radeon_sa_bo, flist);
281
282 if (!radeon_fence_signaled(sa_bo->fence)) {
283 fences[i] = sa_bo->fence;
284 continue;
285 }
286
287 /* limit the number of tries each ring gets */
288 if (tries[i] > 2) {
289 continue;
290 }
291
292 tmp = sa_bo->soffset;
293 if (tmp < soffset) {
294 /* wrap around, pretend it's after */
295 tmp += sa_manager->size;
296 }
297 tmp -= soffset;
298 if (tmp < best) {
299 /* this sa bo is the closest one */
300 best = tmp;
301 best_bo = sa_bo;
302 }
303 }
304
305 if (best_bo) {
306 ++tries[best_bo->fence->ring];
307 sa_manager->hole = best_bo->olist.prev;
308
309 /* we knew that this one is signaled,
310 so it's save to remote it */
311 radeon_sa_bo_remove_locked(best_bo);
312 return true;
313 }
314 return false;
315 }
316
317 int radeon_sa_bo_new(struct radeon_device *rdev,
318 struct radeon_sa_manager *sa_manager,
319 struct radeon_sa_bo **sa_bo,
320 unsigned size, unsigned align)
321 {
322 struct radeon_fence *fences[RADEON_NUM_RINGS];
323 unsigned tries[RADEON_NUM_RINGS];
324 int i, r;
325
326 BUG_ON(align > sa_manager->align);
327 BUG_ON(size > sa_manager->size);
328
329 *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL);
330 if ((*sa_bo) == NULL) {
331 return -ENOMEM;
332 }
333 (*sa_bo)->manager = sa_manager;
334 (*sa_bo)->fence = NULL;
335 INIT_LIST_HEAD(&(*sa_bo)->olist);
336 INIT_LIST_HEAD(&(*sa_bo)->flist);
337
338 spin_lock(&sa_manager->wq.lock);
339 do {
340 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
341 fences[i] = NULL;
342 tries[i] = 0;
343 }
344
345 do {
346 radeon_sa_bo_try_free(sa_manager);
347
348 if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo,
349 size, align)) {
350 spin_unlock(&sa_manager->wq.lock);
351 return 0;
352 }
353
354 /* see if we can skip over some allocations */
355 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
356
357 for (i = 0; i < RADEON_NUM_RINGS; ++i)
358 radeon_fence_ref(fences[i]);
359
360 spin_unlock(&sa_manager->wq.lock);
361 r = radeon_fence_wait_any(rdev, fences, false);
362 for (i = 0; i < RADEON_NUM_RINGS; ++i)
363 radeon_fence_unref(&fences[i]);
364 spin_lock(&sa_manager->wq.lock);
365 /* if we have nothing to wait for block */
366 if (r == -ENOENT) {
367 r = wait_event_interruptible_locked(
368 sa_manager->wq,
369 radeon_sa_event(sa_manager, size, align)
370 );
371 }
372
373 } while (!r);
374
375 spin_unlock(&sa_manager->wq.lock);
376 kfree(*sa_bo);
377 *sa_bo = NULL;
378 return r;
379 }
380
381 void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo,
382 struct radeon_fence *fence)
383 {
384 struct radeon_sa_manager *sa_manager;
385
386 if (sa_bo == NULL || *sa_bo == NULL) {
387 return;
388 }
389
390 sa_manager = (*sa_bo)->manager;
391 spin_lock(&sa_manager->wq.lock);
392 if (fence && !radeon_fence_signaled(fence)) {
393 (*sa_bo)->fence = radeon_fence_ref(fence);
394 list_add_tail(&(*sa_bo)->flist,
395 &sa_manager->flist[fence->ring]);
396 } else {
397 radeon_sa_bo_remove_locked(*sa_bo);
398 }
399 wake_up_all_locked(&sa_manager->wq);
400 spin_unlock(&sa_manager->wq.lock);
401 *sa_bo = NULL;
402 }
403
404 #if defined(CONFIG_DEBUG_FS)
405 void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager,
406 struct seq_file *m)
407 {
408 struct radeon_sa_bo *i;
409
410 spin_lock(&sa_manager->wq.lock);
411 list_for_each_entry(i, &sa_manager->olist, olist) {
412 uint64_t soffset = i->soffset + sa_manager->gpu_addr;
413 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
414 if (&i->olist == sa_manager->hole) {
415 seq_printf(m, ">");
416 } else {
417 seq_printf(m, " ");
418 }
419 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
420 soffset, eoffset, eoffset - soffset);
421 if (i->fence) {
422 seq_printf(m, " protected by 0x%016llx on ring %d",
423 i->fence->seq, i->fence->ring);
424 }
425 seq_printf(m, "\n");
426 }
427 spin_unlock(&sa_manager->wq.lock);
428 }
429 #endif
430