1 1.3 riastrad /* $NetBSD: radeon_sa.c,v 1.4 2021/12/18 23:45:43 riastradh Exp $ */ 2 1.3 riastrad 3 1.1 riastrad /* 4 1.1 riastrad * Copyright 2011 Red Hat Inc. 5 1.1 riastrad * All Rights Reserved. 6 1.1 riastrad * 7 1.1 riastrad * Permission is hereby granted, free of charge, to any person obtaining a 8 1.1 riastrad * copy of this software and associated documentation files (the 9 1.1 riastrad * "Software"), to deal in the Software without restriction, including 10 1.1 riastrad * without limitation the rights to use, copy, modify, merge, publish, 11 1.1 riastrad * distribute, sub license, and/or sell copies of the Software, and to 12 1.1 riastrad * permit persons to whom the Software is furnished to do so, subject to 13 1.1 riastrad * the following conditions: 14 1.1 riastrad * 15 1.1 riastrad * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 1.1 riastrad * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 1.1 riastrad * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL 18 1.1 riastrad * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, 19 1.1 riastrad * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 20 1.1 riastrad * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE 21 1.1 riastrad * USE OR OTHER DEALINGS IN THE SOFTWARE. 22 1.1 riastrad * 23 1.1 riastrad * The above copyright notice and this permission notice (including the 24 1.1 riastrad * next paragraph) shall be included in all copies or substantial portions 25 1.1 riastrad * of the Software. 26 1.1 riastrad * 27 1.1 riastrad */ 28 1.1 riastrad /* 29 1.1 riastrad * Authors: 30 1.1 riastrad * Jerome Glisse <glisse (at) freedesktop.org> 31 1.1 riastrad */ 32 1.1 riastrad /* Algorithm: 33 1.1 riastrad * 34 1.1 riastrad * We store the last allocated bo in "hole", we always try to allocate 35 1.1 riastrad * after the last allocated bo. Principle is that in a linear GPU ring 36 1.1 riastrad * progression was is after last is the oldest bo we allocated and thus 37 1.1 riastrad * the first one that should no longer be in use by the GPU. 38 1.1 riastrad * 39 1.1 riastrad * If it's not the case we skip over the bo after last to the closest 40 1.1 riastrad * done bo if such one exist. If none exist and we are not asked to 41 1.1 riastrad * block we report failure to allocate. 42 1.1 riastrad * 43 1.1 riastrad * If we are asked to block we wait on all the oldest fence of all 44 1.1 riastrad * rings. We just wait for any of those fence to complete. 45 1.1 riastrad */ 46 1.4 riastrad 47 1.3 riastrad #include <sys/cdefs.h> 48 1.3 riastrad __KERNEL_RCSID(0, "$NetBSD: radeon_sa.c,v 1.4 2021/12/18 23:45:43 riastradh Exp $"); 49 1.3 riastrad 50 1.1 riastrad #include "radeon.h" 51 1.1 riastrad 52 1.1 riastrad static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo); 53 1.1 riastrad static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager); 54 1.1 riastrad 55 1.1 riastrad int radeon_sa_bo_manager_init(struct radeon_device *rdev, 56 1.1 riastrad struct radeon_sa_manager *sa_manager, 57 1.3 riastrad unsigned size, u32 align, u32 domain, u32 flags) 58 1.1 riastrad { 59 1.1 riastrad int i, r; 60 1.1 riastrad 61 1.2 riastrad #ifdef __NetBSD__ 62 1.2 riastrad spin_lock_init(&sa_manager->wq_lock); 63 1.2 riastrad DRM_INIT_WAITQUEUE(&sa_manager->wq, "radsabom"); 64 1.2 riastrad #else 65 1.1 riastrad init_waitqueue_head(&sa_manager->wq); 66 1.2 riastrad #endif 67 1.1 riastrad sa_manager->bo = NULL; 68 1.1 riastrad sa_manager->size = size; 69 1.1 riastrad sa_manager->domain = domain; 70 1.1 riastrad sa_manager->align = align; 71 1.1 riastrad sa_manager->hole = &sa_manager->olist; 72 1.1 riastrad INIT_LIST_HEAD(&sa_manager->olist); 73 1.1 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) { 74 1.1 riastrad INIT_LIST_HEAD(&sa_manager->flist[i]); 75 1.1 riastrad } 76 1.1 riastrad 77 1.1 riastrad r = radeon_bo_create(rdev, size, align, true, 78 1.3 riastrad domain, flags, NULL, NULL, &sa_manager->bo); 79 1.1 riastrad if (r) { 80 1.1 riastrad dev_err(rdev->dev, "(%d) failed to allocate bo for manager\n", r); 81 1.1 riastrad return r; 82 1.1 riastrad } 83 1.1 riastrad 84 1.1 riastrad return r; 85 1.1 riastrad } 86 1.1 riastrad 87 1.1 riastrad void radeon_sa_bo_manager_fini(struct radeon_device *rdev, 88 1.1 riastrad struct radeon_sa_manager *sa_manager) 89 1.1 riastrad { 90 1.1 riastrad struct radeon_sa_bo *sa_bo, *tmp; 91 1.1 riastrad 92 1.1 riastrad if (!list_empty(&sa_manager->olist)) { 93 1.1 riastrad sa_manager->hole = &sa_manager->olist, 94 1.1 riastrad radeon_sa_bo_try_free(sa_manager); 95 1.1 riastrad if (!list_empty(&sa_manager->olist)) { 96 1.1 riastrad dev_err(rdev->dev, "sa_manager is not empty, clearing anyway\n"); 97 1.1 riastrad } 98 1.1 riastrad } 99 1.1 riastrad list_for_each_entry_safe(sa_bo, tmp, &sa_manager->olist, olist) { 100 1.1 riastrad radeon_sa_bo_remove_locked(sa_bo); 101 1.1 riastrad } 102 1.1 riastrad radeon_bo_unref(&sa_manager->bo); 103 1.1 riastrad sa_manager->size = 0; 104 1.2 riastrad #ifdef __NetBSD__ 105 1.2 riastrad DRM_DESTROY_WAITQUEUE(&sa_manager->wq); 106 1.2 riastrad spin_lock_destroy(&sa_manager->wq_lock); 107 1.2 riastrad #endif 108 1.1 riastrad } 109 1.1 riastrad 110 1.1 riastrad int radeon_sa_bo_manager_start(struct radeon_device *rdev, 111 1.1 riastrad struct radeon_sa_manager *sa_manager) 112 1.1 riastrad { 113 1.1 riastrad int r; 114 1.1 riastrad 115 1.1 riastrad if (sa_manager->bo == NULL) { 116 1.1 riastrad dev_err(rdev->dev, "no bo for sa manager\n"); 117 1.1 riastrad return -EINVAL; 118 1.1 riastrad } 119 1.1 riastrad 120 1.1 riastrad /* map the buffer */ 121 1.1 riastrad r = radeon_bo_reserve(sa_manager->bo, false); 122 1.1 riastrad if (r) { 123 1.1 riastrad dev_err(rdev->dev, "(%d) failed to reserve manager bo\n", r); 124 1.1 riastrad return r; 125 1.1 riastrad } 126 1.1 riastrad r = radeon_bo_pin(sa_manager->bo, sa_manager->domain, &sa_manager->gpu_addr); 127 1.1 riastrad if (r) { 128 1.1 riastrad radeon_bo_unreserve(sa_manager->bo); 129 1.1 riastrad dev_err(rdev->dev, "(%d) failed to pin manager bo\n", r); 130 1.1 riastrad return r; 131 1.1 riastrad } 132 1.1 riastrad r = radeon_bo_kmap(sa_manager->bo, &sa_manager->cpu_ptr); 133 1.1 riastrad radeon_bo_unreserve(sa_manager->bo); 134 1.1 riastrad return r; 135 1.1 riastrad } 136 1.1 riastrad 137 1.1 riastrad int radeon_sa_bo_manager_suspend(struct radeon_device *rdev, 138 1.1 riastrad struct radeon_sa_manager *sa_manager) 139 1.1 riastrad { 140 1.1 riastrad int r; 141 1.1 riastrad 142 1.1 riastrad if (sa_manager->bo == NULL) { 143 1.1 riastrad dev_err(rdev->dev, "no bo for sa manager\n"); 144 1.1 riastrad return -EINVAL; 145 1.1 riastrad } 146 1.1 riastrad 147 1.1 riastrad r = radeon_bo_reserve(sa_manager->bo, false); 148 1.1 riastrad if (!r) { 149 1.1 riastrad radeon_bo_kunmap(sa_manager->bo); 150 1.1 riastrad radeon_bo_unpin(sa_manager->bo); 151 1.1 riastrad radeon_bo_unreserve(sa_manager->bo); 152 1.1 riastrad } 153 1.1 riastrad return r; 154 1.1 riastrad } 155 1.1 riastrad 156 1.1 riastrad static void radeon_sa_bo_remove_locked(struct radeon_sa_bo *sa_bo) 157 1.1 riastrad { 158 1.1 riastrad struct radeon_sa_manager *sa_manager = sa_bo->manager; 159 1.1 riastrad if (sa_manager->hole == &sa_bo->olist) { 160 1.1 riastrad sa_manager->hole = sa_bo->olist.prev; 161 1.1 riastrad } 162 1.1 riastrad list_del_init(&sa_bo->olist); 163 1.1 riastrad list_del_init(&sa_bo->flist); 164 1.1 riastrad radeon_fence_unref(&sa_bo->fence); 165 1.1 riastrad kfree(sa_bo); 166 1.1 riastrad } 167 1.1 riastrad 168 1.1 riastrad static void radeon_sa_bo_try_free(struct radeon_sa_manager *sa_manager) 169 1.1 riastrad { 170 1.1 riastrad struct radeon_sa_bo *sa_bo, *tmp; 171 1.1 riastrad 172 1.1 riastrad if (sa_manager->hole->next == &sa_manager->olist) 173 1.1 riastrad return; 174 1.1 riastrad 175 1.1 riastrad sa_bo = list_entry(sa_manager->hole->next, struct radeon_sa_bo, olist); 176 1.1 riastrad list_for_each_entry_safe_from(sa_bo, tmp, &sa_manager->olist, olist) { 177 1.1 riastrad if (sa_bo->fence == NULL || !radeon_fence_signaled(sa_bo->fence)) { 178 1.1 riastrad return; 179 1.1 riastrad } 180 1.1 riastrad radeon_sa_bo_remove_locked(sa_bo); 181 1.1 riastrad } 182 1.1 riastrad } 183 1.1 riastrad 184 1.1 riastrad static inline unsigned radeon_sa_bo_hole_soffset(struct radeon_sa_manager *sa_manager) 185 1.1 riastrad { 186 1.1 riastrad struct list_head *hole = sa_manager->hole; 187 1.1 riastrad 188 1.1 riastrad if (hole != &sa_manager->olist) { 189 1.1 riastrad return list_entry(hole, struct radeon_sa_bo, olist)->eoffset; 190 1.1 riastrad } 191 1.1 riastrad return 0; 192 1.1 riastrad } 193 1.1 riastrad 194 1.1 riastrad static inline unsigned radeon_sa_bo_hole_eoffset(struct radeon_sa_manager *sa_manager) 195 1.1 riastrad { 196 1.1 riastrad struct list_head *hole = sa_manager->hole; 197 1.1 riastrad 198 1.1 riastrad if (hole->next != &sa_manager->olist) { 199 1.1 riastrad return list_entry(hole->next, struct radeon_sa_bo, olist)->soffset; 200 1.1 riastrad } 201 1.1 riastrad return sa_manager->size; 202 1.1 riastrad } 203 1.1 riastrad 204 1.1 riastrad static bool radeon_sa_bo_try_alloc(struct radeon_sa_manager *sa_manager, 205 1.1 riastrad struct radeon_sa_bo *sa_bo, 206 1.1 riastrad unsigned size, unsigned align) 207 1.1 riastrad { 208 1.1 riastrad unsigned soffset, eoffset, wasted; 209 1.1 riastrad 210 1.1 riastrad soffset = radeon_sa_bo_hole_soffset(sa_manager); 211 1.1 riastrad eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 212 1.1 riastrad wasted = (align - (soffset % align)) % align; 213 1.1 riastrad 214 1.1 riastrad if ((eoffset - soffset) >= (size + wasted)) { 215 1.1 riastrad soffset += wasted; 216 1.1 riastrad 217 1.1 riastrad sa_bo->manager = sa_manager; 218 1.1 riastrad sa_bo->soffset = soffset; 219 1.1 riastrad sa_bo->eoffset = soffset + size; 220 1.1 riastrad list_add(&sa_bo->olist, sa_manager->hole); 221 1.1 riastrad INIT_LIST_HEAD(&sa_bo->flist); 222 1.1 riastrad sa_manager->hole = &sa_bo->olist; 223 1.1 riastrad return true; 224 1.1 riastrad } 225 1.1 riastrad return false; 226 1.1 riastrad } 227 1.1 riastrad 228 1.1 riastrad /** 229 1.1 riastrad * radeon_sa_event - Check if we can stop waiting 230 1.1 riastrad * 231 1.1 riastrad * @sa_manager: pointer to the sa_manager 232 1.1 riastrad * @size: number of bytes we want to allocate 233 1.1 riastrad * @align: alignment we need to match 234 1.1 riastrad * 235 1.1 riastrad * Check if either there is a fence we can wait for or 236 1.1 riastrad * enough free memory to satisfy the allocation directly 237 1.1 riastrad */ 238 1.1 riastrad static bool radeon_sa_event(struct radeon_sa_manager *sa_manager, 239 1.1 riastrad unsigned size, unsigned align) 240 1.1 riastrad { 241 1.1 riastrad unsigned soffset, eoffset, wasted; 242 1.1 riastrad int i; 243 1.1 riastrad 244 1.1 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) { 245 1.1 riastrad if (!list_empty(&sa_manager->flist[i])) { 246 1.1 riastrad return true; 247 1.1 riastrad } 248 1.1 riastrad } 249 1.1 riastrad 250 1.1 riastrad soffset = radeon_sa_bo_hole_soffset(sa_manager); 251 1.1 riastrad eoffset = radeon_sa_bo_hole_eoffset(sa_manager); 252 1.1 riastrad wasted = (align - (soffset % align)) % align; 253 1.1 riastrad 254 1.1 riastrad if ((eoffset - soffset) >= (size + wasted)) { 255 1.1 riastrad return true; 256 1.1 riastrad } 257 1.1 riastrad 258 1.1 riastrad return false; 259 1.1 riastrad } 260 1.1 riastrad 261 1.1 riastrad static bool radeon_sa_bo_next_hole(struct radeon_sa_manager *sa_manager, 262 1.1 riastrad struct radeon_fence **fences, 263 1.1 riastrad unsigned *tries) 264 1.1 riastrad { 265 1.1 riastrad struct radeon_sa_bo *best_bo = NULL; 266 1.1 riastrad unsigned i, soffset, best, tmp; 267 1.1 riastrad 268 1.1 riastrad /* if hole points to the end of the buffer */ 269 1.1 riastrad if (sa_manager->hole->next == &sa_manager->olist) { 270 1.1 riastrad /* try again with its beginning */ 271 1.1 riastrad sa_manager->hole = &sa_manager->olist; 272 1.1 riastrad return true; 273 1.1 riastrad } 274 1.1 riastrad 275 1.1 riastrad soffset = radeon_sa_bo_hole_soffset(sa_manager); 276 1.1 riastrad /* to handle wrap around we add sa_manager->size */ 277 1.1 riastrad best = sa_manager->size * 2; 278 1.1 riastrad /* go over all fence list and try to find the closest sa_bo 279 1.1 riastrad * of the current last 280 1.1 riastrad */ 281 1.1 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) { 282 1.1 riastrad struct radeon_sa_bo *sa_bo; 283 1.1 riastrad 284 1.1 riastrad if (list_empty(&sa_manager->flist[i])) { 285 1.1 riastrad continue; 286 1.1 riastrad } 287 1.1 riastrad 288 1.1 riastrad sa_bo = list_first_entry(&sa_manager->flist[i], 289 1.1 riastrad struct radeon_sa_bo, flist); 290 1.1 riastrad 291 1.1 riastrad if (!radeon_fence_signaled(sa_bo->fence)) { 292 1.1 riastrad fences[i] = sa_bo->fence; 293 1.1 riastrad continue; 294 1.1 riastrad } 295 1.1 riastrad 296 1.1 riastrad /* limit the number of tries each ring gets */ 297 1.1 riastrad if (tries[i] > 2) { 298 1.1 riastrad continue; 299 1.1 riastrad } 300 1.1 riastrad 301 1.1 riastrad tmp = sa_bo->soffset; 302 1.1 riastrad if (tmp < soffset) { 303 1.1 riastrad /* wrap around, pretend it's after */ 304 1.1 riastrad tmp += sa_manager->size; 305 1.1 riastrad } 306 1.1 riastrad tmp -= soffset; 307 1.1 riastrad if (tmp < best) { 308 1.1 riastrad /* this sa bo is the closest one */ 309 1.1 riastrad best = tmp; 310 1.1 riastrad best_bo = sa_bo; 311 1.1 riastrad } 312 1.1 riastrad } 313 1.1 riastrad 314 1.1 riastrad if (best_bo) { 315 1.1 riastrad ++tries[best_bo->fence->ring]; 316 1.1 riastrad sa_manager->hole = best_bo->olist.prev; 317 1.1 riastrad 318 1.1 riastrad /* we knew that this one is signaled, 319 1.1 riastrad so it's save to remote it */ 320 1.1 riastrad radeon_sa_bo_remove_locked(best_bo); 321 1.1 riastrad return true; 322 1.1 riastrad } 323 1.1 riastrad return false; 324 1.1 riastrad } 325 1.1 riastrad 326 1.1 riastrad int radeon_sa_bo_new(struct radeon_device *rdev, 327 1.1 riastrad struct radeon_sa_manager *sa_manager, 328 1.1 riastrad struct radeon_sa_bo **sa_bo, 329 1.1 riastrad unsigned size, unsigned align) 330 1.1 riastrad { 331 1.1 riastrad struct radeon_fence *fences[RADEON_NUM_RINGS]; 332 1.1 riastrad unsigned tries[RADEON_NUM_RINGS]; 333 1.1 riastrad int i, r; 334 1.1 riastrad 335 1.1 riastrad BUG_ON(align > sa_manager->align); 336 1.1 riastrad BUG_ON(size > sa_manager->size); 337 1.1 riastrad 338 1.1 riastrad *sa_bo = kmalloc(sizeof(struct radeon_sa_bo), GFP_KERNEL); 339 1.1 riastrad if ((*sa_bo) == NULL) { 340 1.1 riastrad return -ENOMEM; 341 1.1 riastrad } 342 1.1 riastrad (*sa_bo)->manager = sa_manager; 343 1.1 riastrad (*sa_bo)->fence = NULL; 344 1.1 riastrad INIT_LIST_HEAD(&(*sa_bo)->olist); 345 1.1 riastrad INIT_LIST_HEAD(&(*sa_bo)->flist); 346 1.1 riastrad 347 1.2 riastrad #ifdef __NetBSD__ 348 1.2 riastrad spin_lock(&sa_manager->wq_lock); 349 1.2 riastrad #else 350 1.1 riastrad spin_lock(&sa_manager->wq.lock); 351 1.2 riastrad #endif 352 1.1 riastrad do { 353 1.1 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) { 354 1.1 riastrad fences[i] = NULL; 355 1.1 riastrad tries[i] = 0; 356 1.1 riastrad } 357 1.1 riastrad 358 1.1 riastrad do { 359 1.1 riastrad radeon_sa_bo_try_free(sa_manager); 360 1.1 riastrad 361 1.1 riastrad if (radeon_sa_bo_try_alloc(sa_manager, *sa_bo, 362 1.1 riastrad size, align)) { 363 1.2 riastrad #ifdef __NetBSD__ 364 1.2 riastrad spin_unlock(&sa_manager->wq_lock); 365 1.2 riastrad #else 366 1.1 riastrad spin_unlock(&sa_manager->wq.lock); 367 1.2 riastrad #endif 368 1.1 riastrad return 0; 369 1.1 riastrad } 370 1.1 riastrad 371 1.1 riastrad /* see if we can skip over some allocations */ 372 1.1 riastrad } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 373 1.1 riastrad 374 1.3 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) 375 1.3 riastrad radeon_fence_ref(fences[i]); 376 1.3 riastrad 377 1.2 riastrad #ifdef __NetBSD__ 378 1.2 riastrad spin_unlock(&sa_manager->wq_lock); 379 1.2 riastrad r = radeon_fence_wait_any(rdev, fences, false); 380 1.3 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) 381 1.3 riastrad radeon_fence_unref(&fences[i]); 382 1.2 riastrad spin_lock(&sa_manager->wq_lock); 383 1.2 riastrad /* if we have nothing to wait for block */ 384 1.2 riastrad if (r == -ENOENT) 385 1.2 riastrad DRM_SPIN_WAIT_UNTIL(r, &sa_manager->wq, 386 1.2 riastrad &sa_manager->wq_lock, 387 1.2 riastrad radeon_sa_event(sa_manager, size, align)); 388 1.2 riastrad #else 389 1.1 riastrad spin_unlock(&sa_manager->wq.lock); 390 1.1 riastrad r = radeon_fence_wait_any(rdev, fences, false); 391 1.3 riastrad for (i = 0; i < RADEON_NUM_RINGS; ++i) 392 1.3 riastrad radeon_fence_unref(&fences[i]); 393 1.1 riastrad spin_lock(&sa_manager->wq.lock); 394 1.1 riastrad /* if we have nothing to wait for block */ 395 1.1 riastrad if (r == -ENOENT) { 396 1.1 riastrad r = wait_event_interruptible_locked( 397 1.1 riastrad sa_manager->wq, 398 1.1 riastrad radeon_sa_event(sa_manager, size, align) 399 1.1 riastrad ); 400 1.1 riastrad } 401 1.2 riastrad #endif 402 1.1 riastrad 403 1.1 riastrad } while (!r); 404 1.1 riastrad 405 1.2 riastrad #ifdef __NetBSD__ 406 1.2 riastrad spin_unlock(&sa_manager->wq_lock); 407 1.2 riastrad #else 408 1.1 riastrad spin_unlock(&sa_manager->wq.lock); 409 1.2 riastrad #endif 410 1.1 riastrad kfree(*sa_bo); 411 1.1 riastrad *sa_bo = NULL; 412 1.1 riastrad return r; 413 1.1 riastrad } 414 1.1 riastrad 415 1.1 riastrad void radeon_sa_bo_free(struct radeon_device *rdev, struct radeon_sa_bo **sa_bo, 416 1.1 riastrad struct radeon_fence *fence) 417 1.1 riastrad { 418 1.1 riastrad struct radeon_sa_manager *sa_manager; 419 1.1 riastrad 420 1.1 riastrad if (sa_bo == NULL || *sa_bo == NULL) { 421 1.1 riastrad return; 422 1.1 riastrad } 423 1.1 riastrad 424 1.1 riastrad sa_manager = (*sa_bo)->manager; 425 1.2 riastrad #ifdef __NetBSD__ 426 1.2 riastrad spin_lock(&sa_manager->wq_lock); 427 1.2 riastrad #else 428 1.1 riastrad spin_lock(&sa_manager->wq.lock); 429 1.2 riastrad #endif 430 1.1 riastrad if (fence && !radeon_fence_signaled(fence)) { 431 1.1 riastrad (*sa_bo)->fence = radeon_fence_ref(fence); 432 1.1 riastrad list_add_tail(&(*sa_bo)->flist, 433 1.1 riastrad &sa_manager->flist[fence->ring]); 434 1.1 riastrad } else { 435 1.1 riastrad radeon_sa_bo_remove_locked(*sa_bo); 436 1.1 riastrad } 437 1.2 riastrad #ifdef __NetBSD__ 438 1.2 riastrad DRM_SPIN_WAKEUP_ALL(&sa_manager->wq, &sa_manager->wq_lock); 439 1.2 riastrad spin_unlock(&sa_manager->wq_lock); 440 1.2 riastrad #else 441 1.1 riastrad wake_up_all_locked(&sa_manager->wq); 442 1.1 riastrad spin_unlock(&sa_manager->wq.lock); 443 1.2 riastrad #endif 444 1.1 riastrad *sa_bo = NULL; 445 1.1 riastrad } 446 1.1 riastrad 447 1.1 riastrad #if defined(CONFIG_DEBUG_FS) 448 1.1 riastrad void radeon_sa_bo_dump_debug_info(struct radeon_sa_manager *sa_manager, 449 1.1 riastrad struct seq_file *m) 450 1.1 riastrad { 451 1.1 riastrad struct radeon_sa_bo *i; 452 1.1 riastrad 453 1.1 riastrad spin_lock(&sa_manager->wq.lock); 454 1.1 riastrad list_for_each_entry(i, &sa_manager->olist, olist) { 455 1.1 riastrad uint64_t soffset = i->soffset + sa_manager->gpu_addr; 456 1.1 riastrad uint64_t eoffset = i->eoffset + sa_manager->gpu_addr; 457 1.1 riastrad if (&i->olist == sa_manager->hole) { 458 1.1 riastrad seq_printf(m, ">"); 459 1.1 riastrad } else { 460 1.1 riastrad seq_printf(m, " "); 461 1.1 riastrad } 462 1.1 riastrad seq_printf(m, "[0x%010llx 0x%010llx] size %8lld", 463 1.1 riastrad soffset, eoffset, eoffset - soffset); 464 1.1 riastrad if (i->fence) { 465 1.1 riastrad seq_printf(m, " protected by 0x%016llx on ring %d", 466 1.1 riastrad i->fence->seq, i->fence->ring); 467 1.1 riastrad } 468 1.1 riastrad seq_printf(m, "\n"); 469 1.1 riastrad } 470 1.1 riastrad spin_unlock(&sa_manager->wq.lock); 471 1.1 riastrad } 472 1.1 riastrad #endif 473