Home | History | Annotate | Line # | Download | only in amdgpu
amdgpu_sa.c revision 1.1.1.1
      1 /*	$NetBSD: amdgpu_sa.c,v 1.1.1.1 2018/08/27 01:34:44 riastradh Exp $	*/
      2 
      3 /*
      4  * Copyright 2011 Red Hat Inc.
      5  * All Rights Reserved.
      6  *
      7  * Permission is hereby granted, free of charge, to any person obtaining a
      8  * copy of this software and associated documentation files (the
      9  * "Software"), to deal in the Software without restriction, including
     10  * without limitation the rights to use, copy, modify, merge, publish,
     11  * distribute, sub license, and/or sell copies of the Software, and to
     12  * permit persons to whom the Software is furnished to do so, subject to
     13  * the following conditions:
     14  *
     15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
     16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
     17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
     18  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
     19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
     20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
     21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
     22  *
     23  * The above copyright notice and this permission notice (including the
     24  * next paragraph) shall be included in all copies or substantial portions
     25  * of the Software.
     26  *
     27  */
     28 /*
     29  * Authors:
     30  *    Jerome Glisse <glisse (at) freedesktop.org>
     31  */
     32 /* Algorithm:
     33  *
     34  * We store the last allocated bo in "hole", we always try to allocate
     35  * after the last allocated bo. Principle is that in a linear GPU ring
     36  * progression was is after last is the oldest bo we allocated and thus
     37  * the first one that should no longer be in use by the GPU.
     38  *
     39  * If it's not the case we skip over the bo after last to the closest
     40  * done bo if such one exist. If none exist and we are not asked to
     41  * block we report failure to allocate.
     42  *
     43  * If we are asked to block we wait on all the oldest fence of all
     44  * rings. We just wait for any of those fence to complete.
     45  */
     46 #include <sys/cdefs.h>
     47 __KERNEL_RCSID(0, "$NetBSD: amdgpu_sa.c,v 1.1.1.1 2018/08/27 01:34:44 riastradh Exp $");
     48 
     49 #include <drm/drmP.h>
     50 #include "amdgpu.h"
     51 
     52 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo);
     53 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager);
     54 
     55 int amdgpu_sa_bo_manager_init(struct amdgpu_device *adev,
     56 			      struct amdgpu_sa_manager *sa_manager,
     57 			      unsigned size, u32 align, u32 domain)
     58 {
     59 	int i, r;
     60 
     61 	init_waitqueue_head(&sa_manager->wq);
     62 	sa_manager->bo = NULL;
     63 	sa_manager->size = size;
     64 	sa_manager->domain = domain;
     65 	sa_manager->align = align;
     66 	sa_manager->hole = &sa_manager->olist;
     67 	INIT_LIST_HEAD(&sa_manager->olist);
     68 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
     69 		INIT_LIST_HEAD(&sa_manager->flist[i]);
     70 	}
     71 
     72 	r = amdgpu_bo_create(adev, size, align, true, domain,
     73 			     0, NULL, NULL, &sa_manager->bo);
     74 	if (r) {
     75 		dev_err(adev->dev, "(%d) failed to allocate bo for manager\n", r);
     76 		return r;
     77 	}
     78 
     79 	return r;
     80 }
     81 
     82 void amdgpu_sa_bo_manager_fini(struct amdgpu_device *adev,
     83 			       struct amdgpu_sa_manager *sa_manager)
     84 {
     85 	struct amdgpu_sa_bo *sa_bo, *tmp;
     86 
     87 	if (!list_empty(&sa_manager->olist)) {
     88 		sa_manager->hole = &sa_manager->olist,
     89 		amdgpu_sa_bo_try_free(sa_manager);
     90 		if (!list_empty(&sa_manager->olist)) {
     91 			dev_err(adev->dev, "sa_manager is not empty, clearing anyway\n");
     92 		}
     93 	}
     94 	list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) {
     95 		amdgpu_sa_bo_remove_locked(sa_bo);
     96 	}
     97 	amdgpu_bo_unref(&sa_manager->bo);
     98 	sa_manager->size = 0;
     99 }
    100 
    101 int amdgpu_sa_bo_manager_start(struct amdgpu_device *adev,
    102 			       struct amdgpu_sa_manager *sa_manager)
    103 {
    104 	int r;
    105 
    106 	if (sa_manager->bo == NULL) {
    107 		dev_err(adev->dev, "no bo for sa manager\n");
    108 		return -EINVAL;
    109 	}
    110 
    111 	/* map the buffer */
    112 	r = amdgpu_bo_reserve(sa_manager->bo, false);
    113 	if (r) {
    114 		dev_err(adev->dev, "(%d) failed to reserve manager bo\n", r);
    115 		return r;
    116 	}
    117 	r = amdgpu_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr);
    118 	if (r) {
    119 		amdgpu_bo_unreserve(sa_manager->bo);
    120 		dev_err(adev->dev, "(%d) failed to pin manager bo\n", r);
    121 		return r;
    122 	}
    123 	r = amdgpu_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr);
    124 	amdgpu_bo_unreserve(sa_manager->bo);
    125 	return r;
    126 }
    127 
    128 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
    129 				 struct amdgpu_sa_manager *sa_manager)
    130 {
    131 	int r;
    132 
    133 	if (sa_manager->bo == NULL) {
    134 		dev_err(adev->dev, "no bo for sa manager\n");
    135 		return -EINVAL;
    136 	}
    137 
    138 	r = amdgpu_bo_reserve(sa_manager->bo, false);
    139 	if (!r) {
    140 		amdgpu_bo_kunmap(sa_manager->bo);
    141 		amdgpu_bo_unpin(sa_manager->bo);
    142 		amdgpu_bo_unreserve(sa_manager->bo);
    143 	}
    144 	return r;
    145 }
    146 
    147 static void amdgpu_sa_bo_remove_locked(struct amdgpu_sa_bo *sa_bo)
    148 {
    149 	struct amdgpu_sa_manager *sa_manager = sa_bo->manager;
    150 	if (sa_manager->hole == &sa_bo->olist) {
    151 		sa_manager->hole = sa_bo->olist.prev;
    152 	}
    153 	list_del_init(&sa_bo->olist);
    154 	list_del_init(&sa_bo->flist);
    155 	fence_put(sa_bo->fence);
    156 	kfree(sa_bo);
    157 }
    158 
    159 static void amdgpu_sa_bo_try_free(struct amdgpu_sa_manager *sa_manager)
    160 {
    161 	struct amdgpu_sa_bo *sa_bo, *tmp;
    162 
    163 	if (sa_manager->hole->next == &sa_manager->olist)
    164 		return;
    165 
    166 	sa_bo = list_entry(sa_manager->hole->next, struct amdgpu_sa_bo, olist);
    167 	list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) {
    168 		if (sa_bo->fence == NULL ||
    169 		    !fence_is_signaled(sa_bo->fence)) {
    170 			return;
    171 		}
    172 		amdgpu_sa_bo_remove_locked(sa_bo);
    173 	}
    174 }
    175 
    176 static inline unsigned amdgpu_sa_bo_hole_soffset(struct amdgpu_sa_manager *sa_manager)
    177 {
    178 	struct list_head *hole = sa_manager->hole;
    179 
    180 	if (hole != &sa_manager->olist) {
    181 		return list_entry(hole, struct amdgpu_sa_bo, olist)->eoffset;
    182 	}
    183 	return 0;
    184 }
    185 
    186 static inline unsigned amdgpu_sa_bo_hole_eoffset(struct amdgpu_sa_manager *sa_manager)
    187 {
    188 	struct list_head *hole = sa_manager->hole;
    189 
    190 	if (hole->next != &sa_manager->olist) {
    191 		return list_entry(hole->next, struct amdgpu_sa_bo, olist)->soffset;
    192 	}
    193 	return sa_manager->size;
    194 }
    195 
    196 static bool amdgpu_sa_bo_try_alloc(struct amdgpu_sa_manager *sa_manager,
    197 				   struct amdgpu_sa_bo *sa_bo,
    198 				   unsigned size, unsigned align)
    199 {
    200 	unsigned soffset, eoffset, wasted;
    201 
    202 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
    203 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
    204 	wasted = (align - (soffset % align)) % align;
    205 
    206 	if ((eoffset - soffset) >= (size + wasted)) {
    207 		soffset += wasted;
    208 
    209 		sa_bo->manager = sa_manager;
    210 		sa_bo->soffset = soffset;
    211 		sa_bo->eoffset = soffset + size;
    212 		list_add(&sa_bo->olist, sa_manager->hole);
    213 		INIT_LIST_HEAD(&sa_bo->flist);
    214 		sa_manager->hole = &sa_bo->olist;
    215 		return true;
    216 	}
    217 	return false;
    218 }
    219 
    220 /**
    221  * amdgpu_sa_event - Check if we can stop waiting
    222  *
    223  * @sa_manager: pointer to the sa_manager
    224  * @size: number of bytes we want to allocate
    225  * @align: alignment we need to match
    226  *
    227  * Check if either there is a fence we can wait for or
    228  * enough free memory to satisfy the allocation directly
    229  */
    230 static bool amdgpu_sa_event(struct amdgpu_sa_manager *sa_manager,
    231 			    unsigned size, unsigned align)
    232 {
    233 	unsigned soffset, eoffset, wasted;
    234 	int i;
    235 
    236 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
    237 		if (!list_empty(&sa_manager->flist[i])) {
    238 			return true;
    239 		}
    240 	}
    241 
    242 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
    243 	eoffset = amdgpu_sa_bo_hole_eoffset(sa_manager);
    244 	wasted = (align - (soffset % align)) % align;
    245 
    246 	if ((eoffset - soffset) >= (size + wasted)) {
    247 		return true;
    248 	}
    249 
    250 	return false;
    251 }
    252 
    253 static bool amdgpu_sa_bo_next_hole(struct amdgpu_sa_manager *sa_manager,
    254 				   struct fence **fences,
    255 				   unsigned *tries)
    256 {
    257 	struct amdgpu_sa_bo *best_bo = NULL;
    258 	unsigned i, soffset, best, tmp;
    259 
    260 	/* if hole points to the end of the buffer */
    261 	if (sa_manager->hole->next == &sa_manager->olist) {
    262 		/* try again with its beginning */
    263 		sa_manager->hole = &sa_manager->olist;
    264 		return true;
    265 	}
    266 
    267 	soffset = amdgpu_sa_bo_hole_soffset(sa_manager);
    268 	/* to handle wrap around we add sa_manager->size */
    269 	best = sa_manager->size * 2;
    270 	/* go over all fence list and try to find the closest sa_bo
    271 	 * of the current last
    272 	 */
    273 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
    274 		struct amdgpu_sa_bo *sa_bo;
    275 
    276 		if (list_empty(&sa_manager->flist[i])) {
    277 			continue;
    278 		}
    279 
    280 		sa_bo = list_first_entry(&sa_manager->flist[i],
    281 					 struct amdgpu_sa_bo, flist);
    282 
    283 		if (!fence_is_signaled(sa_bo->fence)) {
    284 			fences[i] = sa_bo->fence;
    285 			continue;
    286 		}
    287 
    288 		/* limit the number of tries each ring gets */
    289 		if (tries[i] > 2) {
    290 			continue;
    291 		}
    292 
    293 		tmp = sa_bo->soffset;
    294 		if (tmp < soffset) {
    295 			/* wrap around, pretend it's after */
    296 			tmp += sa_manager->size;
    297 		}
    298 		tmp -= soffset;
    299 		if (tmp < best) {
    300 			/* this sa bo is the closest one */
    301 			best = tmp;
    302 			best_bo = sa_bo;
    303 		}
    304 	}
    305 
    306 	if (best_bo) {
    307 		uint32_t idx = amdgpu_ring_from_fence(best_bo->fence)->idx;
    308 		++tries[idx];
    309 		sa_manager->hole = best_bo->olist.prev;
    310 
    311 		/* we knew that this one is signaled,
    312 		   so it's save to remote it */
    313 		amdgpu_sa_bo_remove_locked(best_bo);
    314 		return true;
    315 	}
    316 	return false;
    317 }
    318 
    319 int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
    320 		     struct amdgpu_sa_bo **sa_bo,
    321 		     unsigned size, unsigned align)
    322 {
    323 	struct fence *fences[AMDGPU_MAX_RINGS];
    324 	unsigned tries[AMDGPU_MAX_RINGS];
    325 	unsigned count;
    326 	int i, r;
    327 	signed long t;
    328 
    329 	BUG_ON(align > sa_manager->align);
    330 	BUG_ON(size > sa_manager->size);
    331 
    332 	*sa_bo = kmalloc(sizeof(struct amdgpu_sa_bo), GFP_KERNEL);
    333 	if ((*sa_bo) == NULL) {
    334 		return -ENOMEM;
    335 	}
    336 	(*sa_bo)->manager = sa_manager;
    337 	(*sa_bo)->fence = NULL;
    338 	INIT_LIST_HEAD(&(*sa_bo)->olist);
    339 	INIT_LIST_HEAD(&(*sa_bo)->flist);
    340 
    341 	spin_lock(&sa_manager->wq.lock);
    342 	do {
    343 		for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
    344 			fences[i] = NULL;
    345 			tries[i] = 0;
    346 		}
    347 
    348 		do {
    349 			amdgpu_sa_bo_try_free(sa_manager);
    350 
    351 			if (amdgpu_sa_bo_try_alloc(sa_manager, *sa_bo,
    352 						   size, align)) {
    353 				spin_unlock(&sa_manager->wq.lock);
    354 				return 0;
    355 			}
    356 
    357 			/* see if we can skip over some allocations */
    358 		} while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
    359 
    360 		for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
    361 			if (fences[i])
    362 				fences[count++] = fence_get(fences[i]);
    363 
    364 		if (count) {
    365 			spin_unlock(&sa_manager->wq.lock);
    366 			t = fence_wait_any_timeout(fences, count, false,
    367 						   MAX_SCHEDULE_TIMEOUT);
    368 			for (i = 0; i < count; ++i)
    369 				fence_put(fences[i]);
    370 
    371 			r = (t > 0) ? 0 : t;
    372 			spin_lock(&sa_manager->wq.lock);
    373 		} else {
    374 			/* if we have nothing to wait for block */
    375 			r = wait_event_interruptible_locked(
    376 				sa_manager->wq,
    377 				amdgpu_sa_event(sa_manager, size, align)
    378 			);
    379 		}
    380 
    381 	} while (!r);
    382 
    383 	spin_unlock(&sa_manager->wq.lock);
    384 	kfree(*sa_bo);
    385 	*sa_bo = NULL;
    386 	return r;
    387 }
    388 
    389 void amdgpu_sa_bo_free(struct amdgpu_device *adev, struct amdgpu_sa_bo **sa_bo,
    390 		       struct fence *fence)
    391 {
    392 	struct amdgpu_sa_manager *sa_manager;
    393 
    394 	if (sa_bo == NULL || *sa_bo == NULL) {
    395 		return;
    396 	}
    397 
    398 	sa_manager = (*sa_bo)->manager;
    399 	spin_lock(&sa_manager->wq.lock);
    400 	if (fence && !fence_is_signaled(fence)) {
    401 		uint32_t idx;
    402 		(*sa_bo)->fence = fence_get(fence);
    403 		idx = amdgpu_ring_from_fence(fence)->idx;
    404 		list_add_tail(&(*sa_bo)->flist, &sa_manager->flist[idx]);
    405 	} else {
    406 		amdgpu_sa_bo_remove_locked(*sa_bo);
    407 	}
    408 	wake_up_all_locked(&sa_manager->wq);
    409 	spin_unlock(&sa_manager->wq.lock);
    410 	*sa_bo = NULL;
    411 }
    412 
    413 #if defined(CONFIG_DEBUG_FS)
    414 
    415 static void amdgpu_sa_bo_dump_fence(struct fence *fence, struct seq_file *m)
    416 {
    417 	struct amdgpu_fence *a_fence = to_amdgpu_fence(fence);
    418 	struct amd_sched_fence *s_fence = to_amd_sched_fence(fence);
    419 
    420 	if (a_fence)
    421 		seq_printf(m, " protected by 0x%016llx on ring %d",
    422 			   a_fence->seq, a_fence->ring->idx);
    423 
    424 	if (s_fence) {
    425 		struct amdgpu_ring *ring;
    426 
    427 
    428 		ring = container_of(s_fence->sched, struct amdgpu_ring, sched);
    429 		seq_printf(m, " protected by 0x%016x on ring %d",
    430 			   s_fence->base.seqno, ring->idx);
    431 	}
    432 }
    433 
    434 void amdgpu_sa_bo_dump_debug_info(struct amdgpu_sa_manager *sa_manager,
    435 				  struct seq_file *m)
    436 {
    437 	struct amdgpu_sa_bo *i;
    438 
    439 	spin_lock(&sa_manager->wq.lock);
    440 	list_for_each_entry(i, &sa_manager->olist, olist) {
    441 		uint64_t soffset = i->soffset + sa_manager->gpu_addr;
    442 		uint64_t eoffset = i->eoffset + sa_manager->gpu_addr;
    443 		if (&i->olist == sa_manager->hole) {
    444 			seq_printf(m, ">");
    445 		} else {
    446 			seq_printf(m, " ");
    447 		}
    448 		seq_printf(m, "[0x%010llx 0x%010llx] size %8lld",
    449 			   soffset, eoffset, eoffset - soffset);
    450 		if (i->fence)
    451 			amdgpu_sa_bo_dump_fence(i->fence, m);
    452 		seq_printf(m, "\n");
    453 	}
    454 	spin_unlock(&sa_manager->wq.lock);
    455 }
    456 #endif
    457