Home | History | Annotate | Line # | Download | only in i915
      1 /*	$NetBSD: i915_sw_fence.c,v 1.6 2021/12/19 12:11:46 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * (C) Copyright 2016 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: i915_sw_fence.c,v 1.6 2021/12/19 12:11:46 riastradh Exp $");
     11 
     12 #include <linux/slab.h>
     13 #include <linux/dma-fence.h>
     14 #include <linux/irq_work.h>
     15 #include <linux/dma-resv.h>
     16 
     17 #include "i915_sw_fence.h"
     18 #include "i915_selftest.h"
     19 
     20 #include <drm/drm_wait_netbsd.h>
     21 
     22 #include <linux/nbsd-namespace.h>
     23 
     24 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
     25 #define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
     26 #else
     27 #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
     28 #endif
     29 
     30 #define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
     31 
     32 #ifdef __NetBSD__		/* XXX */
     33 spinlock_t i915_sw_fence_lock;
     34 #else
     35 static DEFINE_SPINLOCK(i915_sw_fence_lock);
     36 #endif
     37 
     38 enum {
     39 	DEBUG_FENCE_IDLE = 0,
     40 	DEBUG_FENCE_NOTIFY,
     41 };
     42 
     43 static void *i915_sw_fence_debug_hint(void *addr)
     44 {
     45 	return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
     46 }
     47 
     48 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
     49 
     50 static struct debug_obj_descr i915_sw_fence_debug_descr = {
     51 	.name = "i915_sw_fence",
     52 	.debug_hint = i915_sw_fence_debug_hint,
     53 };
     54 
     55 static inline void debug_fence_init(struct i915_sw_fence *fence)
     56 {
     57 	debug_object_init(fence, &i915_sw_fence_debug_descr);
     58 }
     59 
     60 static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
     61 {
     62 	debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
     63 }
     64 
     65 static inline void debug_fence_activate(struct i915_sw_fence *fence)
     66 {
     67 	debug_object_activate(fence, &i915_sw_fence_debug_descr);
     68 }
     69 
     70 static inline void debug_fence_set_state(struct i915_sw_fence *fence,
     71 					 int old, int new)
     72 {
     73 	debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
     74 }
     75 
     76 static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
     77 {
     78 	debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
     79 }
     80 
     81 static inline void debug_fence_destroy(struct i915_sw_fence *fence)
     82 {
     83 	debug_object_destroy(fence, &i915_sw_fence_debug_descr);
     84 }
     85 
     86 static inline void debug_fence_free(struct i915_sw_fence *fence)
     87 {
     88 	debug_object_free(fence, &i915_sw_fence_debug_descr);
     89 	smp_wmb(); /* flush the change in state before reallocation */
     90 }
     91 
     92 static inline void debug_fence_assert(struct i915_sw_fence *fence)
     93 {
     94 	debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
     95 }
     96 
     97 #else
     98 
     99 static inline void debug_fence_init(struct i915_sw_fence *fence)
    100 {
    101 }
    102 
    103 static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
    104 {
    105 }
    106 
    107 static inline void debug_fence_activate(struct i915_sw_fence *fence)
    108 {
    109 }
    110 
    111 static inline void debug_fence_set_state(struct i915_sw_fence *fence,
    112 					 int old, int new)
    113 {
    114 }
    115 
    116 static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
    117 {
    118 }
    119 
    120 static inline void debug_fence_destroy(struct i915_sw_fence *fence)
    121 {
    122 }
    123 
    124 static inline void debug_fence_free(struct i915_sw_fence *fence)
    125 {
    126 }
    127 
    128 static inline void debug_fence_assert(struct i915_sw_fence *fence)
    129 {
    130 }
    131 
    132 #endif
    133 
    134 static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
    135 				  enum i915_sw_fence_notify state)
    136 {
    137 	i915_sw_fence_notify_t fn;
    138 
    139 	fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
    140 	return fn(fence, state);
    141 }
    142 
    143 void i915_sw_fence_fini(struct i915_sw_fence *fence)
    144 {
    145 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
    146 	debug_fence_free(fence);
    147 #endif
    148 	spin_lock_destroy(&fence->wait.lock);
    149 	BUG_ON(!list_empty(&fence->wait.head));
    150 }
    151 
    152 #ifdef __NetBSD__
    153 
    154 /* XXX whattakludge */
    155 
    156 typedef struct i915_sw_fence_queue wait_queue_head_t;
    157 typedef struct i915_sw_fence_waiter wait_queue_entry_t;
    158 
    159 #define	TASK_NORMAL	0
    160 
    161 struct i915_sw_fence_wq {
    162 	struct i915_sw_fence *fence;
    163 	drm_waitqueue_t wq;
    164 };
    165 
    166 static int
    167 autoremove_wake_function(struct i915_sw_fence_waiter *waiter, unsigned mode,
    168     int flags, void *donottouch_no_really)
    169 {
    170 	struct i915_sw_fence_wq *sfw = waiter->private;
    171 
    172 	/* Caller presumably already completed the fence.  */
    173 	DRM_SPIN_WAKEUP_ALL(&sfw->wq, &sfw->fence->wait.lock);
    174 
    175 	return 0;
    176 }
    177 
    178 void
    179 i915_sw_fence_wait(struct i915_sw_fence *fence)
    180 {
    181 	struct i915_sw_fence_waiter waiter;
    182 	struct i915_sw_fence_wq sfw;
    183 	int ret;
    184 
    185 	waiter.flags = 0;
    186 	waiter.func = autoremove_wake_function;
    187 	waiter.private = &sfw;
    188 
    189 	sfw.fence = fence;
    190 	DRM_INIT_WAITQUEUE(&sfw.wq, "i915swf");
    191 
    192 	spin_lock(&fence->wait.lock);
    193 	list_add_tail(&waiter.entry, &fence->wait.head);
    194 	DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &sfw.wq, &fence->wait.lock,
    195 	    i915_sw_fence_done(fence));
    196 	list_del(&waiter.entry);
    197 	spin_unlock(&fence->wait.lock);
    198 
    199 	DRM_DESTROY_WAITQUEUE(&sfw.wq);
    200 }
    201 
    202 #endif
    203 
    204 static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
    205 					struct list_head *continuation)
    206 {
    207 	wait_queue_head_t *x = &fence->wait;
    208 	wait_queue_entry_t *pos, *next;
    209 	unsigned long flags;
    210 
    211 	debug_fence_deactivate(fence);
    212 
    213 	/*
    214 	 * To prevent unbounded recursion as we traverse the graph of
    215 	 * i915_sw_fences, we move the entry list from this, the next ready
    216 	 * fence, to the tail of the original fence's entry list
    217 	 * (and so added to the list to be woken).
    218 	 */
    219 
    220 	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
    221 	atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
    222 	if (continuation) {
    223 		list_for_each_entry_safe(pos, next, &x->head, entry) {
    224 			if (pos->func == autoremove_wake_function)
    225 				pos->func(pos, TASK_NORMAL, 0, continuation);
    226 			else
    227 				list_move_tail(&pos->entry, continuation);
    228 		}
    229 	} else {
    230 		LIST_HEAD(extra);
    231 
    232 		do {
    233 			list_for_each_entry_safe(pos, next, &x->head, entry) {
    234 				pos->func(pos,
    235 					  TASK_NORMAL, fence->error,
    236 					  &extra);
    237 			}
    238 
    239 			if (list_empty(&extra))
    240 				break;
    241 
    242 			list_splice_tail_init(&extra, &x->head);
    243 		} while (1);
    244 	}
    245 	spin_unlock_irqrestore(&x->lock, flags);
    246 
    247 	debug_fence_assert(fence);
    248 }
    249 
    250 static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
    251 				     struct list_head *continuation)
    252 {
    253 	debug_fence_assert(fence);
    254 
    255 	if (!atomic_dec_and_test(&fence->pending))
    256 		return;
    257 
    258 	debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
    259 
    260 	if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
    261 		return;
    262 
    263 	debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
    264 
    265 	__i915_sw_fence_wake_up_all(fence, continuation);
    266 
    267 	debug_fence_destroy(fence);
    268 	__i915_sw_fence_notify(fence, FENCE_FREE);
    269 }
    270 
    271 void i915_sw_fence_complete(struct i915_sw_fence *fence)
    272 {
    273 	debug_fence_assert(fence);
    274 
    275 	if (WARN_ON(i915_sw_fence_done(fence)))
    276 		return;
    277 
    278 	__i915_sw_fence_complete(fence, NULL);
    279 }
    280 
    281 void i915_sw_fence_await(struct i915_sw_fence *fence)
    282 {
    283 	debug_fence_assert(fence);
    284 	WARN_ON(atomic_inc_return(&fence->pending) <= 1);
    285 }
    286 
    287 void __i915_sw_fence_init(struct i915_sw_fence *fence,
    288 			  i915_sw_fence_notify_t fn,
    289 			  const char *name,
    290 			  struct lock_class_key *key)
    291 {
    292 	BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
    293 
    294 #ifdef __NetBSD__
    295 	spin_lock_init(&fence->wait.lock);
    296 	INIT_LIST_HEAD(&fence->wait.head);
    297 #else
    298 	__init_waitqueue_head(&fence->wait, name, key);
    299 #endif
    300 	fence->flags = (unsigned long)fn;
    301 
    302 	i915_sw_fence_reinit(fence);
    303 }
    304 
    305 void i915_sw_fence_reinit(struct i915_sw_fence *fence)
    306 {
    307 	debug_fence_init(fence);
    308 
    309 	atomic_set(&fence->pending, 1);
    310 	fence->error = 0;
    311 
    312 	I915_SW_FENCE_BUG_ON(!fence->flags);
    313 	I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
    314 }
    315 
    316 void i915_sw_fence_commit(struct i915_sw_fence *fence)
    317 {
    318 	debug_fence_activate(fence);
    319 	i915_sw_fence_complete(fence);
    320 }
    321 
    322 static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
    323 {
    324 	i915_sw_fence_set_error_once(wq->private, flags);
    325 
    326 	list_del(&wq->entry);
    327 	__i915_sw_fence_complete(wq->private, key);
    328 
    329 	if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
    330 		kfree(wq);
    331 	return 0;
    332 }
    333 
    334 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
    335 				    const struct i915_sw_fence * const signaler)
    336 {
    337 	wait_queue_entry_t *wq;
    338 
    339 	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
    340 		return false;
    341 
    342 	if (fence == signaler)
    343 		return true;
    344 
    345 	list_for_each_entry(wq, &fence->wait.head, entry) {
    346 		if (wq->func != i915_sw_fence_wake)
    347 			continue;
    348 
    349 		if (__i915_sw_fence_check_if_after(wq->private, signaler))
    350 			return true;
    351 	}
    352 
    353 	return false;
    354 }
    355 
    356 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
    357 {
    358 	wait_queue_entry_t *wq;
    359 
    360 	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
    361 		return;
    362 
    363 	list_for_each_entry(wq, &fence->wait.head, entry) {
    364 		if (wq->func != i915_sw_fence_wake)
    365 			continue;
    366 
    367 		__i915_sw_fence_clear_checked_bit(wq->private);
    368 	}
    369 }
    370 
    371 static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
    372 				  const struct i915_sw_fence * const signaler)
    373 {
    374 	unsigned long flags;
    375 	bool err;
    376 
    377 	if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
    378 		return false;
    379 
    380 	spin_lock_irqsave(&i915_sw_fence_lock, flags);
    381 	err = __i915_sw_fence_check_if_after(fence, signaler);
    382 	__i915_sw_fence_clear_checked_bit(fence);
    383 	spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
    384 
    385 	return err;
    386 }
    387 
    388 static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
    389 					  struct i915_sw_fence *signaler,
    390 					  wait_queue_entry_t *wq, gfp_t gfp)
    391 {
    392 	unsigned long flags;
    393 	int pending;
    394 
    395 	debug_fence_assert(fence);
    396 	might_sleep_if(gfpflags_allow_blocking(gfp));
    397 
    398 	if (i915_sw_fence_done(signaler)) {
    399 		i915_sw_fence_set_error_once(fence, signaler->error);
    400 		return 0;
    401 	}
    402 
    403 	debug_fence_assert(signaler);
    404 
    405 	/* The dependency graph must be acyclic. */
    406 	if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
    407 		return -EINVAL;
    408 
    409 	pending = 0;
    410 	if (!wq) {
    411 		wq = kmalloc(sizeof(*wq), gfp);
    412 		if (!wq) {
    413 			if (!gfpflags_allow_blocking(gfp))
    414 				return -ENOMEM;
    415 
    416 			i915_sw_fence_wait(signaler);
    417 			i915_sw_fence_set_error_once(fence, signaler->error);
    418 			return 0;
    419 		}
    420 
    421 		pending |= I915_SW_FENCE_FLAG_ALLOC;
    422 	}
    423 
    424 	INIT_LIST_HEAD(&wq->entry);
    425 	wq->flags = pending;
    426 	wq->func = i915_sw_fence_wake;
    427 	wq->private = fence;
    428 
    429 	i915_sw_fence_await(fence);
    430 
    431 	spin_lock_irqsave(&signaler->wait.lock, flags);
    432 	if (likely(!i915_sw_fence_done(signaler))) {
    433 #ifdef __NetBSD__
    434 		list_add(&wq->entry, &signaler->wait.head);
    435 #else
    436 		__add_wait_queue_entry_tail(&signaler->wait, wq);
    437 #endif
    438 		pending = 1;
    439 	} else {
    440 		i915_sw_fence_wake(wq, 0, signaler->error, NULL);
    441 		pending = 0;
    442 	}
    443 	spin_unlock_irqrestore(&signaler->wait.lock, flags);
    444 
    445 	return pending;
    446 }
    447 
    448 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
    449 				 struct i915_sw_fence *signaler,
    450 				 wait_queue_entry_t *wq)
    451 {
    452 	return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
    453 }
    454 
    455 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
    456 				     struct i915_sw_fence *signaler,
    457 				     gfp_t gfp)
    458 {
    459 	return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
    460 }
    461 
    462 struct i915_sw_dma_fence_cb_timer {
    463 	struct i915_sw_dma_fence_cb base;
    464 	struct dma_fence *dma;
    465 	struct timer_list timer;
    466 	struct irq_work work;
    467 	struct rcu_head rcu;
    468 };
    469 
    470 static void dma_i915_sw_fence_wake(struct dma_fence *dma,
    471 				   struct dma_fence_cb *data)
    472 {
    473 	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
    474 
    475 	i915_sw_fence_set_error_once(cb->fence, dma->error);
    476 	i915_sw_fence_complete(cb->fence);
    477 	kfree(cb);
    478 }
    479 
    480 static void timer_i915_sw_fence_wake(struct timer_list *t)
    481 {
    482 	struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
    483 	struct i915_sw_fence *fence;
    484 
    485 	fence = xchg(&cb->base.fence, NULL);
    486 	if (!fence)
    487 		return;
    488 
    489 	pr_notice("Asynchronous wait on fence %s:%s:%"PRIx64" timed out (hint:%p)\n",
    490 		  cb->dma->ops->get_driver_name(cb->dma),
    491 		  cb->dma->ops->get_timeline_name(cb->dma),
    492 		  (uint64_t)cb->dma->seqno,
    493 		  i915_sw_fence_debug_hint(fence));
    494 
    495 	i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
    496 	i915_sw_fence_complete(fence);
    497 }
    498 
    499 static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
    500 					 struct dma_fence_cb *data)
    501 {
    502 	struct i915_sw_dma_fence_cb_timer *cb =
    503 		container_of(data, typeof(*cb), base.base);
    504 	struct i915_sw_fence *fence;
    505 
    506 	fence = xchg(&cb->base.fence, NULL);
    507 	if (fence) {
    508 		i915_sw_fence_set_error_once(fence, dma->error);
    509 		i915_sw_fence_complete(fence);
    510 	}
    511 
    512 	irq_work_queue(&cb->work);
    513 }
    514 
    515 static void irq_i915_sw_fence_work(struct irq_work *wrk)
    516 {
    517 	struct i915_sw_dma_fence_cb_timer *cb =
    518 		container_of(wrk, typeof(*cb), work);
    519 
    520 	del_timer_sync(&cb->timer);
    521 	dma_fence_put(cb->dma);
    522 
    523 	kfree_rcu(cb, rcu);
    524 }
    525 
    526 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
    527 				  struct dma_fence *dma,
    528 				  unsigned long timeout,
    529 				  gfp_t gfp)
    530 {
    531 	struct i915_sw_dma_fence_cb *cb;
    532 	dma_fence_func_t func;
    533 	int ret;
    534 
    535 	debug_fence_assert(fence);
    536 	might_sleep_if(gfpflags_allow_blocking(gfp));
    537 
    538 	if (dma_fence_is_signaled(dma)) {
    539 		i915_sw_fence_set_error_once(fence, dma->error);
    540 		return 0;
    541 	}
    542 
    543 	cb = kmalloc(timeout ?
    544 		     sizeof(struct i915_sw_dma_fence_cb_timer) :
    545 		     sizeof(struct i915_sw_dma_fence_cb),
    546 		     gfp);
    547 	if (!cb) {
    548 		if (!gfpflags_allow_blocking(gfp))
    549 			return -ENOMEM;
    550 
    551 		ret = dma_fence_wait(dma, false);
    552 		if (ret)
    553 			return ret;
    554 
    555 		i915_sw_fence_set_error_once(fence, dma->error);
    556 		return 0;
    557 	}
    558 
    559 	cb->fence = fence;
    560 	i915_sw_fence_await(fence);
    561 
    562 	func = dma_i915_sw_fence_wake;
    563 	if (timeout) {
    564 		struct i915_sw_dma_fence_cb_timer *timer =
    565 			container_of(cb, typeof(*timer), base);
    566 
    567 		timer->dma = dma_fence_get(dma);
    568 		init_irq_work(&timer->work, irq_i915_sw_fence_work);
    569 
    570 		timer_setup(&timer->timer,
    571 			    timer_i915_sw_fence_wake, TIMER_IRQSAFE);
    572 		mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
    573 
    574 		func = dma_i915_sw_fence_wake_timer;
    575 	}
    576 
    577 	ret = dma_fence_add_callback(dma, &cb->base, func);
    578 	if (ret == 0) {
    579 		ret = 1;
    580 	} else {
    581 		func(dma, &cb->base);
    582 		if (ret == -ENOENT) /* fence already signaled */
    583 			ret = 0;
    584 	}
    585 
    586 	return ret;
    587 }
    588 
    589 static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
    590 				     struct dma_fence_cb *data)
    591 {
    592 	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
    593 
    594 	i915_sw_fence_set_error_once(cb->fence, dma->error);
    595 	i915_sw_fence_complete(cb->fence);
    596 }
    597 
    598 int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
    599 				    struct dma_fence *dma,
    600 				    struct i915_sw_dma_fence_cb *cb)
    601 {
    602 	int ret;
    603 
    604 	debug_fence_assert(fence);
    605 
    606 	if (dma_fence_is_signaled(dma)) {
    607 		i915_sw_fence_set_error_once(fence, dma->error);
    608 		return 0;
    609 	}
    610 
    611 	cb->fence = fence;
    612 	i915_sw_fence_await(fence);
    613 
    614 	ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
    615 	if (ret == 0) {
    616 		ret = 1;
    617 	} else {
    618 		__dma_i915_sw_fence_wake(dma, &cb->base);
    619 		if (ret == -ENOENT) /* fence already signaled */
    620 			ret = 0;
    621 	}
    622 
    623 	return ret;
    624 }
    625 
    626 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
    627 				    struct dma_resv *resv,
    628 				    const struct dma_fence_ops *exclude,
    629 				    bool write,
    630 				    unsigned long timeout,
    631 				    gfp_t gfp)
    632 {
    633 	struct dma_fence *excl;
    634 	int ret = 0, pending;
    635 
    636 	debug_fence_assert(fence);
    637 	might_sleep_if(gfpflags_allow_blocking(gfp));
    638 
    639 	if (write) {
    640 		struct dma_fence **shared;
    641 		unsigned int count, i;
    642 
    643 		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
    644 		if (ret)
    645 			return ret;
    646 
    647 		for (i = 0; i < count; i++) {
    648 			if (shared[i]->ops == exclude)
    649 				continue;
    650 
    651 			pending = i915_sw_fence_await_dma_fence(fence,
    652 								shared[i],
    653 								timeout,
    654 								gfp);
    655 			if (pending < 0) {
    656 				ret = pending;
    657 				break;
    658 			}
    659 
    660 			ret |= pending;
    661 		}
    662 
    663 		for (i = 0; i < count; i++)
    664 			dma_fence_put(shared[i]);
    665 		kfree(shared);
    666 	} else {
    667 		excl = dma_resv_get_excl_rcu(resv);
    668 	}
    669 
    670 	if (ret >= 0 && excl && excl->ops != exclude) {
    671 		pending = i915_sw_fence_await_dma_fence(fence,
    672 							excl,
    673 							timeout,
    674 							gfp);
    675 		if (pending < 0)
    676 			ret = pending;
    677 		else
    678 			ret |= pending;
    679 	}
    680 
    681 	dma_fence_put(excl);
    682 
    683 	return ret;
    684 }
    685 
    686 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    687 #include "selftests/lib_sw_fence.c"
    688 #include "selftests/i915_sw_fence.c"
    689 #endif
    690