amdgpu_sa.c revision 1.1.1.2 1 /* $NetBSD: amdgpu_sa.c,v 1.1.1.2 2021/12/18 20:11:10 riastradh Exp $ */
2
3 /*
4 * Copyright 2011 Red Hat Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the
9 * "Software"), to deal in the Software without restriction, including
10 * without limitation the rights to use, copy, modify, merge, publish,
11 * distribute, sub license, and/or sell copies of the Software, and to
12 * permit persons to whom the Software is furnished to do so, subject to
13 * the following conditions:
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
19 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21 * USE OR OTHER DEALINGS IN THE SOFTWARE.
22 *
23 * The above copyright notice and this permission notice (including the
24 * next paragraph) shall be included in all copies or substantial portions
25 * of the Software.
26 *
27 */
28 /*
29 * Authors:
30 * Jerome Glisse <glisse (at) freedesktop.org>
31 */
32 /* Algorithm:
33 *
34 * We store the last allocated bo in "hole", we always try to allocate
35 * after the last allocated bo. Principle is that in a linear GPU ring
36 * progression was is after last is the oldest bo we allocated and thus
37 * the first one that should no longer be in use by the GPU.
38 *
39 * If it's not the case we skip over the bo after last to the closest
40 * done bo if such one exist. If none exist and we are not asked to
41 * block we report failure to allocate.
42 *
43 * If we are asked to block we wait on all the oldest fence of all
44 * rings. We just wait for any of those fence to complete.
45 */
46
47 #include <sys/cdefs.h>
48 __KERNEL_RCSID(0, "$NetBSD: amdgpu_sa.c,v 1.1.1.2 2021/12/18 20:11:10 riastradh Exp $");
49
50 #include "amdgpu.h"
51
52 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
53 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
54
55 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
56 struct amdgpu_sa_manager *sa_manager,
57 unsigned size, u32 align, u32 domain)
58 {
59 int i, r;
60
61 init_waitqueue_head(&sa_manager->wq);
62 sa_manager->bo = NULL;
63 sa_manager->size = size;
64 sa_manager->domain = domain;
65 sa_manager->align = align;
66 sa_manager->hole = &sa_manager->olist;
67 INIT_LIST_HEAD(&sa_manager->olist);
68 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
69 INIT_LIST_HEAD(&sa_manager->flist[i]);
70
71 r = amdgpu_bo_create_kernel(adev, size, align, domain, &sa_manager->bo,
72 &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
73 if (r) {
74 dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
75 return r;
76 }
77
78 memset(sa_manager->cpu_ptr, 0, sa_manager->size);
79 return r;
80 }
81
82 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
83 struct amdgpu_sa_manager *sa_manager)
84 {
85 struct amdgpu_sa_bo *sa_bo, *tmp;
86
87 if (sa_manager->bo == NULL) {
88 dev_err(adev->dev, "no bo for sa manager\n");
89 return;
90 }
91
92 if (!list_empty(&sa_manager->olist)) {
93 sa_manager->hole = &sa_manager->olist,
94 amdgpu_sa_bo_try_free(sa_manager);
95 if (!list_empty(&sa_manager->olist)) {
96 dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
97 }
98 }
99 list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
100 amdgpu_sa_bo_remove_locked(sa_bo);
101 }
102
103 amdgpu_bo_free_kernel(&sa_manager->bo, &sa_manager->gpu_addr, &sa_manager->cpu_ptr);
104 sa_manager->size = 0;
105 }
106
107 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
108 {
109 struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
110 if (sa_manager->hole == &sa_bo->olist) {
111 sa_manager->hole = sa_bo->olist.prev;
112 }
113 list_del_init(&sa_bo->olist);
114 list_del_init(&sa_bo->flist);
115 dma_fence_put(sa_bo->fence);
116 kfree(sa_bo);
117 }
118
119 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
120 {
121 struct amdgpu_sa_bo *sa_bo, *tmp;
122
123 if (sa_manager->hole->next == &sa_manager->olist)
124 return;
125
126 sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
127 list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
128 if (sa_bo->fence == NULL ||
129 !dma_fence_is_signaled(sa_bo->fence)) {
130 return;
131 }
132 amdgpu_sa_bo_remove_locked(sa_bo);
133 }
134 }
135
136 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
137 {
138 struct list_head *hole = sa_manager->hole;
139
140 if (hole != &sa_manager->olist) {
141 return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
142 }
143 return 0;
144 }
145
146 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
147 {
148 struct list_head *hole = sa_manager->hole;
149
150 if (hole->next != &sa_manager->olist) {
151 return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
152 }
153 return sa_manager->size;
154 }
155
156 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
157 struct amdgpu_sa_bo *sa_bo,
158 unsigned size, unsigned align)
159 {
160 unsigned soffset, eoffset, wasted;
161
162 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
163 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
164 wasted = (align - (soffset % align)) % align;
165
166 if ((eoffset - soffset) >= (size + wasted)) {
167 soffset += wasted;
168
169 sa_bo->manager = sa_manager;
170 sa_bo->soffset = soffset;
171 sa_bo->eoffset = soffset + size;
172 list_add(&sa_bo->olist, sa_manager->hole);
173 INIT_LIST_HEAD(&sa_bo->flist);
174 sa_manager->hole = &sa_bo->olist;
175 return true;
176 }
177 return false;
178 }
179
180 /**
181 * amdgpu_sa_event - Check if we can stop waiting
182 *
183 * @sa_manager: pointer to the sa_manager
184 * @size: number of bytes we want to allocate
185 * @align: alignment we need to match
186 *
187 * Check if either there is a fence we can wait for or
188 * enough free memory to satisfy the allocation directly
189 */
190 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
191 unsigned size, unsigned align)
192 {
193 unsigned soffset, eoffset, wasted;
194 int i;
195
196 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
197 if (!list_empty(&sa_manager->flist[i]))
198 return true;
199
200 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
201 eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
202 wasted = (align - (soffset % align)) % align;
203
204 if ((eoffset - soffset) >= (size + wasted)) {
205 return true;
206 }
207
208 return false;
209 }
210
211 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
212 struct dma_fence **fences,
213 unsigned *tries)
214 {
215 struct amdgpu_sa_bo *best_bo = NULL;
216 unsigned i, soffset, best, tmp;
217
218 /* if hole points to the end of the buffer */
219 if (sa_manager->hole->next == &sa_manager->olist) {
220 /* try again with its beginning */
221 sa_manager->hole = &sa_manager->olist;
222 return true;
223 }
224
225 soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
226 /* to handle wrap around we add sa_manager->size */
227 best = sa_manager->size * 2;
228 /* go over all fence list and try to find the closest sa_bo
229 * of the current last
230 */
231 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i) {
232 struct amdgpu_sa_bo *sa_bo;
233
234 fences[i] = NULL;
235
236 if (list_empty(&sa_manager->flist[i]))
237 continue;
238
239 sa_bo = list_first_entry(&sa_manager->flist[i],
240 struct amdgpu_sa_bo, flist);
241
242 if (!dma_fence_is_signaled(sa_bo->fence)) {
243 fences[i] = sa_bo->fence;
244 continue;
245 }
246
247 /* limit the number of tries each ring gets */
248 if (tries[i] > 2) {
249 continue;
250 }
251
252 tmp = sa_bo->soffset;
253 if (tmp < soffset) {
254 /* wrap around, pretend it's after */
255 tmp += sa_manager->size;
256 }
257 tmp -= soffset;
258 if (tmp < best) {
259 /* this sa bo is the closest one */
260 best = tmp;
261 best_bo = sa_bo;
262 }
263 }
264
265 if (best_bo) {
266 uint32_t idx = best_bo->fence->context;
267
268 idx %= AMDGPU_SA_NUM_FENCE_LISTS;
269 ++tries[idx];
270 sa_manager->hole = best_bo->olist.prev;
271
272 /* we knew that this one is signaled,
273 so it's save to remote it */
274 amdgpu_sa_bo_remove_locked(best_bo);
275 return true;
276 }
277 return false;
278 }
279
280 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
281 struct amdgpu_sa_bo **sa_bo,
282 unsigned size, unsigned align)
283 {
284 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS];
285 unsigned tries[AMDGPU_SA_NUM_FENCE_LISTS];
286 unsigned count;
287 int i, r;
288 signed long t;
289
290 if (WARN_ON_ONCE(align > sa_manager->align))
291 return -EINVAL;
292
293 if (WARN_ON_ONCE(size > sa_manager->size))
294 return -EINVAL;
295
296 *sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
297 if (!(*sa_bo))
298 return -ENOMEM;
299 (*sa_bo)->manager = sa_manager;
300 (*sa_bo)->fence = NULL;
301 INIT_LIST_HEAD(&(*sa_bo)->olist);
302 INIT_LIST_HEAD(&(*sa_bo)->flist);
303
304 spin_lock(&sa_manager->wq.lock);
305 do {
306 for (i = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
307 tries[i] = 0;
308
309 do {
310 amdgpu_sa_bo_try_free(sa_manager);
311
312 if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
313 size, align)) {
314 spin_unlock(&sa_manager->wq.lock);
315 return 0;
316 }
317
318 /* see if we can skip over some allocations */
319 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
320
321 for (i = 0, count = 0; i < AMDGPU_SA_NUM_FENCE_LISTS; ++i)
322 if (fences[i])
323 fences[count++] = dma_fence_get(fences[i]);
324
325 if (count) {
326 spin_unlock(&sa_manager->wq.lock);
327 t = dma_fence_wait_any_timeout(fences, count, false,
328 MAX_SCHEDULE_TIMEOUT,
329 NULL);
330 for (i = 0; i < count; ++i)
331 dma_fence_put(fences[i]);
332
333 r = (t > 0) ? 0 : t;
334 spin_lock(&sa_manager->wq.lock);
335 } else {
336 /* if we have nothing to wait for block */
337 r = wait_event_interruptible_locked(
338 sa_manager->wq,
339 amdgpu_sa_event(sa_manager, size, align)
340 );
341 }
342
343 } while (!r);
344
345 spin_unlock(&sa_manager->wq.lock);
346 kfree(*sa_bo);
347 *sa_bo = NULL;
348 return r;
349 }
350
351 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
352 struct dma_fence *fence)
353 {
354 struct amdgpu_sa_manager *sa_manager;
355
356 if (sa_bo == NULL || *sa_bo == NULL) {
357 return;
358 }
359
360 sa_manager = (*sa_bo)->manager;
361 spin_lock(&sa_manager->wq.lock);
362 if (fence && !dma_fence_is_signaled(fence)) {
363 uint32_t idx;
364
365 (*sa_bo)->fence = dma_fence_get(fence);
366 idx = fence->context % AMDGPU_SA_NUM_FENCE_LISTS;
367 list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
368 } else {
369 amdgpu_sa_bo_remove_locked(*sa_bo);
370 }
371 wake_up_all_locked(&sa_manager->wq);
372 spin_unlock(&sa_manager->wq.lock);
373 *sa_bo = NULL;
374 }
375
376 #if defined(CONFIG_DEBUG_FS)
377
378 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
379 struct seq_file *m)
380 {
381 struct amdgpu_sa_bo *i;
382
383 spin_lock(&sa_manager->wq.lock);
384 list_for_each_entry(i, &sa_manager->olist, olist) {
385 uint64_t soffset = i->soffset + sa_manager->gpu_addr;
386 uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
387 if (&i->olist == sa_manager->hole) {
388 seq_printf(m, ">");
389 } else {
390 seq_printf(m, " ");
391 }
392 seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
393 soffset, eoffset, eoffset - soffset);
394
395 if (i->fence)
396 seq_printf(m, " protected by 0x%016llx on context %llu",
397 i->fence->seqno, i->fence->context);
398
399 seq_printf(m, "\n");
400 }
401 spin_unlock(&sa_manager->wq.lock);
402 }
403 #endif
404