Home | History | Annotate | Line # | Download | only in i915
i915_sw_fence.c revision 1.4
      1 /*	$NetBSD: i915_sw_fence.c,v 1.4 2021/12/19 11:54:57 riastradh Exp $	*/
      2 
      3 /*
      4  * SPDX-License-Identifier: MIT
      5  *
      6  * (C) Copyright 2016 Intel Corporation
      7  */
      8 
      9 #include <sys/cdefs.h>
     10 __KERNEL_RCSID(0, "$NetBSD: i915_sw_fence.c,v 1.4 2021/12/19 11:54:57 riastradh Exp $");
     11 
     12 #include <linux/slab.h>
     13 #include <linux/dma-fence.h>
     14 #include <linux/irq_work.h>
     15 #include <linux/dma-resv.h>
     16 
     17 #include "i915_sw_fence.h"
     18 #include "i915_selftest.h"
     19 
     20 #include <drm/drm_wait_netbsd.h>
     21 
     22 #include <linux/nbsd-namespace.h>
     23 
     24 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG)
     25 #define I915_SW_FENCE_BUG_ON(expr) BUG_ON(expr)
     26 #else
     27 #define I915_SW_FENCE_BUG_ON(expr) BUILD_BUG_ON_INVALID(expr)
     28 #endif
     29 
     30 #define I915_SW_FENCE_FLAG_ALLOC BIT(3) /* after WQ_FLAG_* for safety */
     31 
     32 #ifdef __NetBSD__		/* XXX */
     33 spinlock_t i915_sw_fence_lock;
     34 #else
     35 static DEFINE_SPINLOCK(i915_sw_fence_lock);
     36 #endif
     37 
     38 enum {
     39 	DEBUG_FENCE_IDLE = 0,
     40 	DEBUG_FENCE_NOTIFY,
     41 };
     42 
     43 static void *i915_sw_fence_debug_hint(void *addr)
     44 {
     45 	return (void *)(((struct i915_sw_fence *)addr)->flags & I915_SW_FENCE_MASK);
     46 }
     47 
     48 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
     49 
     50 static struct debug_obj_descr i915_sw_fence_debug_descr = {
     51 	.name = "i915_sw_fence",
     52 	.debug_hint = i915_sw_fence_debug_hint,
     53 };
     54 
     55 static inline void debug_fence_init(struct i915_sw_fence *fence)
     56 {
     57 	debug_object_init(fence, &i915_sw_fence_debug_descr);
     58 }
     59 
     60 static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
     61 {
     62 	debug_object_init_on_stack(fence, &i915_sw_fence_debug_descr);
     63 }
     64 
     65 static inline void debug_fence_activate(struct i915_sw_fence *fence)
     66 {
     67 	debug_object_activate(fence, &i915_sw_fence_debug_descr);
     68 }
     69 
     70 static inline void debug_fence_set_state(struct i915_sw_fence *fence,
     71 					 int old, int new)
     72 {
     73 	debug_object_active_state(fence, &i915_sw_fence_debug_descr, old, new);
     74 }
     75 
     76 static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
     77 {
     78 	debug_object_deactivate(fence, &i915_sw_fence_debug_descr);
     79 }
     80 
     81 static inline void debug_fence_destroy(struct i915_sw_fence *fence)
     82 {
     83 	debug_object_destroy(fence, &i915_sw_fence_debug_descr);
     84 }
     85 
     86 static inline void debug_fence_free(struct i915_sw_fence *fence)
     87 {
     88 	debug_object_free(fence, &i915_sw_fence_debug_descr);
     89 	smp_wmb(); /* flush the change in state before reallocation */
     90 }
     91 
     92 static inline void debug_fence_assert(struct i915_sw_fence *fence)
     93 {
     94 	debug_object_assert_init(fence, &i915_sw_fence_debug_descr);
     95 }
     96 
     97 #else
     98 
     99 static inline void debug_fence_init(struct i915_sw_fence *fence)
    100 {
    101 }
    102 
    103 static inline void debug_fence_init_onstack(struct i915_sw_fence *fence)
    104 {
    105 }
    106 
    107 static inline void debug_fence_activate(struct i915_sw_fence *fence)
    108 {
    109 }
    110 
    111 static inline void debug_fence_set_state(struct i915_sw_fence *fence,
    112 					 int old, int new)
    113 {
    114 }
    115 
    116 static inline void debug_fence_deactivate(struct i915_sw_fence *fence)
    117 {
    118 }
    119 
    120 static inline void debug_fence_destroy(struct i915_sw_fence *fence)
    121 {
    122 }
    123 
    124 static inline void debug_fence_free(struct i915_sw_fence *fence)
    125 {
    126 }
    127 
    128 static inline void debug_fence_assert(struct i915_sw_fence *fence)
    129 {
    130 }
    131 
    132 #endif
    133 
    134 static int __i915_sw_fence_notify(struct i915_sw_fence *fence,
    135 				  enum i915_sw_fence_notify state)
    136 {
    137 	i915_sw_fence_notify_t fn;
    138 
    139 	fn = (i915_sw_fence_notify_t)(fence->flags & I915_SW_FENCE_MASK);
    140 	return fn(fence, state);
    141 }
    142 
    143 void i915_sw_fence_fini(struct i915_sw_fence *fence)
    144 {
    145 #ifdef CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS
    146 	debug_fence_free(fence);
    147 #endif
    148 	spin_lock_destroy(&fence->wait.lock);
    149 	BUG_ON(!list_empty(&fence->wait.head));
    150 }
    151 
    152 #ifdef __NetBSD__
    153 
    154 /* XXX whattakludge */
    155 
    156 typedef struct i915_sw_fence_queue wait_queue_head_t;
    157 typedef struct i915_sw_fence_waiter wait_queue_entry_t;
    158 
    159 #define	TASK_NORMAL	0
    160 
    161 struct i915_sw_fence_wq {
    162 	struct i915_sw_fence *fence;
    163 	drm_waitqueue_t wq;
    164 };
    165 
    166 static int
    167 autoremove_wake_function(struct i915_sw_fence_waiter *waiter, unsigned mode,
    168     int flags, void *cookie)
    169 {
    170 	struct i915_sw_fence_wq *sfw = cookie;
    171 
    172 	/* Caller presumably already completed the fence.  */
    173 	DRM_SPIN_WAKEUP_ALL(&sfw->wq, &sfw->fence->wait.lock);
    174 
    175 	list_del_init(&waiter->entry);
    176 
    177 	return 0;
    178 }
    179 
    180 void
    181 i915_sw_fence_wait(struct i915_sw_fence *fence)
    182 {
    183 	struct i915_sw_fence_waiter waiter;
    184 	struct i915_sw_fence_wq sfw;
    185 	int ret;
    186 
    187 	waiter.flags = 0;
    188 	waiter.func = autoremove_wake_function;
    189 	waiter.private = &sfw;
    190 
    191 	sfw.fence = fence;
    192 	DRM_INIT_WAITQUEUE(&sfw.wq, "i915swf");
    193 
    194 	spin_lock(&fence->wait.lock);
    195 	list_add_tail(&waiter.entry, &fence->wait.head);
    196 	DRM_SPIN_WAIT_NOINTR_UNTIL(ret, &sfw.wq, &fence->wait.lock,
    197 	    i915_sw_fence_done(fence));
    198 	spin_unlock(&fence->wait.lock);
    199 
    200 	DRM_DESTROY_WAITQUEUE(&sfw.wq);
    201 }
    202 
    203 #endif
    204 
    205 static void __i915_sw_fence_wake_up_all(struct i915_sw_fence *fence,
    206 					struct list_head *continuation)
    207 {
    208 	wait_queue_head_t *x = &fence->wait;
    209 	wait_queue_entry_t *pos, *next;
    210 	unsigned long flags;
    211 
    212 	debug_fence_deactivate(fence);
    213 
    214 	/*
    215 	 * To prevent unbounded recursion as we traverse the graph of
    216 	 * i915_sw_fences, we move the entry list from this, the next ready
    217 	 * fence, to the tail of the original fence's entry list
    218 	 * (and so added to the list to be woken).
    219 	 */
    220 
    221 	spin_lock_irqsave_nested(&x->lock, flags, 1 + !!continuation);
    222 	atomic_set_release(&fence->pending, -1); /* 0 -> -1 [done] */
    223 	if (continuation) {
    224 		list_for_each_entry_safe(pos, next, &x->head, entry) {
    225 			if (pos->func == autoremove_wake_function)
    226 				pos->func(pos, TASK_NORMAL, 0, continuation);
    227 			else
    228 				list_move_tail(&pos->entry, continuation);
    229 		}
    230 	} else {
    231 		LIST_HEAD(extra);
    232 
    233 		do {
    234 			list_for_each_entry_safe(pos, next, &x->head, entry) {
    235 				pos->func(pos,
    236 					  TASK_NORMAL, fence->error,
    237 					  &extra);
    238 			}
    239 
    240 			if (list_empty(&extra))
    241 				break;
    242 
    243 			list_splice_tail_init(&extra, &x->head);
    244 		} while (1);
    245 	}
    246 	spin_unlock_irqrestore(&x->lock, flags);
    247 
    248 	debug_fence_assert(fence);
    249 }
    250 
    251 static void __i915_sw_fence_complete(struct i915_sw_fence *fence,
    252 				     struct list_head *continuation)
    253 {
    254 	debug_fence_assert(fence);
    255 
    256 	if (!atomic_dec_and_test(&fence->pending))
    257 		return;
    258 
    259 	debug_fence_set_state(fence, DEBUG_FENCE_IDLE, DEBUG_FENCE_NOTIFY);
    260 
    261 	if (__i915_sw_fence_notify(fence, FENCE_COMPLETE) != NOTIFY_DONE)
    262 		return;
    263 
    264 	debug_fence_set_state(fence, DEBUG_FENCE_NOTIFY, DEBUG_FENCE_IDLE);
    265 
    266 	__i915_sw_fence_wake_up_all(fence, continuation);
    267 
    268 	debug_fence_destroy(fence);
    269 	__i915_sw_fence_notify(fence, FENCE_FREE);
    270 }
    271 
    272 void i915_sw_fence_complete(struct i915_sw_fence *fence)
    273 {
    274 	debug_fence_assert(fence);
    275 
    276 	if (WARN_ON(i915_sw_fence_done(fence)))
    277 		return;
    278 
    279 	__i915_sw_fence_complete(fence, NULL);
    280 }
    281 
    282 void i915_sw_fence_await(struct i915_sw_fence *fence)
    283 {
    284 	debug_fence_assert(fence);
    285 	WARN_ON(atomic_inc_return(&fence->pending) <= 1);
    286 }
    287 
    288 void __i915_sw_fence_init(struct i915_sw_fence *fence,
    289 			  i915_sw_fence_notify_t fn,
    290 			  const char *name,
    291 			  struct lock_class_key *key)
    292 {
    293 	BUG_ON(!fn || (unsigned long)fn & ~I915_SW_FENCE_MASK);
    294 
    295 #ifdef __NetBSD__
    296 	spin_lock_init(&fence->wait.lock);
    297 	INIT_LIST_HEAD(&fence->wait.head);
    298 #else
    299 	__init_waitqueue_head(&fence->wait, name, key);
    300 #endif
    301 	fence->flags = (unsigned long)fn;
    302 
    303 	i915_sw_fence_reinit(fence);
    304 }
    305 
    306 void i915_sw_fence_reinit(struct i915_sw_fence *fence)
    307 {
    308 	debug_fence_init(fence);
    309 
    310 	atomic_set(&fence->pending, 1);
    311 	fence->error = 0;
    312 
    313 	I915_SW_FENCE_BUG_ON(!fence->flags);
    314 	I915_SW_FENCE_BUG_ON(!list_empty(&fence->wait.head));
    315 }
    316 
    317 void i915_sw_fence_commit(struct i915_sw_fence *fence)
    318 {
    319 	debug_fence_activate(fence);
    320 	i915_sw_fence_complete(fence);
    321 }
    322 
    323 static int i915_sw_fence_wake(wait_queue_entry_t *wq, unsigned mode, int flags, void *key)
    324 {
    325 	i915_sw_fence_set_error_once(wq->private, flags);
    326 
    327 	list_del(&wq->entry);
    328 	__i915_sw_fence_complete(wq->private, key);
    329 
    330 	if (wq->flags & I915_SW_FENCE_FLAG_ALLOC)
    331 		kfree(wq);
    332 	return 0;
    333 }
    334 
    335 static bool __i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
    336 				    const struct i915_sw_fence * const signaler)
    337 {
    338 	wait_queue_entry_t *wq;
    339 
    340 	if (__test_and_set_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
    341 		return false;
    342 
    343 	if (fence == signaler)
    344 		return true;
    345 
    346 	list_for_each_entry(wq, &fence->wait.head, entry) {
    347 		if (wq->func != i915_sw_fence_wake)
    348 			continue;
    349 
    350 		if (__i915_sw_fence_check_if_after(wq->private, signaler))
    351 			return true;
    352 	}
    353 
    354 	return false;
    355 }
    356 
    357 static void __i915_sw_fence_clear_checked_bit(struct i915_sw_fence *fence)
    358 {
    359 	wait_queue_entry_t *wq;
    360 
    361 	if (!__test_and_clear_bit(I915_SW_FENCE_CHECKED_BIT, &fence->flags))
    362 		return;
    363 
    364 	list_for_each_entry(wq, &fence->wait.head, entry) {
    365 		if (wq->func != i915_sw_fence_wake)
    366 			continue;
    367 
    368 		__i915_sw_fence_clear_checked_bit(wq->private);
    369 	}
    370 }
    371 
    372 static bool i915_sw_fence_check_if_after(struct i915_sw_fence *fence,
    373 				  const struct i915_sw_fence * const signaler)
    374 {
    375 	unsigned long flags;
    376 	bool err;
    377 
    378 	if (!IS_ENABLED(CONFIG_DRM_I915_SW_FENCE_CHECK_DAG))
    379 		return false;
    380 
    381 	spin_lock_irqsave(&i915_sw_fence_lock, flags);
    382 	err = __i915_sw_fence_check_if_after(fence, signaler);
    383 	__i915_sw_fence_clear_checked_bit(fence);
    384 	spin_unlock_irqrestore(&i915_sw_fence_lock, flags);
    385 
    386 	return err;
    387 }
    388 
    389 static int __i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
    390 					  struct i915_sw_fence *signaler,
    391 					  wait_queue_entry_t *wq, gfp_t gfp)
    392 {
    393 	unsigned long flags;
    394 	int pending;
    395 
    396 	debug_fence_assert(fence);
    397 	might_sleep_if(gfpflags_allow_blocking(gfp));
    398 
    399 	if (i915_sw_fence_done(signaler)) {
    400 		i915_sw_fence_set_error_once(fence, signaler->error);
    401 		return 0;
    402 	}
    403 
    404 	debug_fence_assert(signaler);
    405 
    406 	/* The dependency graph must be acyclic. */
    407 	if (unlikely(i915_sw_fence_check_if_after(fence, signaler)))
    408 		return -EINVAL;
    409 
    410 	pending = 0;
    411 	if (!wq) {
    412 		wq = kmalloc(sizeof(*wq), gfp);
    413 		if (!wq) {
    414 			if (!gfpflags_allow_blocking(gfp))
    415 				return -ENOMEM;
    416 
    417 			i915_sw_fence_wait(signaler);
    418 			i915_sw_fence_set_error_once(fence, signaler->error);
    419 			return 0;
    420 		}
    421 
    422 		pending |= I915_SW_FENCE_FLAG_ALLOC;
    423 	}
    424 
    425 	INIT_LIST_HEAD(&wq->entry);
    426 	wq->flags = pending;
    427 	wq->func = i915_sw_fence_wake;
    428 	wq->private = fence;
    429 
    430 	i915_sw_fence_await(fence);
    431 
    432 	spin_lock_irqsave(&signaler->wait.lock, flags);
    433 	if (likely(!i915_sw_fence_done(signaler))) {
    434 #ifdef __NetBSD__
    435 		list_add(&wq->entry, &signaler->wait.head);
    436 #else
    437 		__add_wait_queue_entry_tail(&signaler->wait, wq);
    438 #endif
    439 		pending = 1;
    440 	} else {
    441 		i915_sw_fence_wake(wq, 0, signaler->error, NULL);
    442 		pending = 0;
    443 	}
    444 	spin_unlock_irqrestore(&signaler->wait.lock, flags);
    445 
    446 	return pending;
    447 }
    448 
    449 int i915_sw_fence_await_sw_fence(struct i915_sw_fence *fence,
    450 				 struct i915_sw_fence *signaler,
    451 				 wait_queue_entry_t *wq)
    452 {
    453 	return __i915_sw_fence_await_sw_fence(fence, signaler, wq, 0);
    454 }
    455 
    456 int i915_sw_fence_await_sw_fence_gfp(struct i915_sw_fence *fence,
    457 				     struct i915_sw_fence *signaler,
    458 				     gfp_t gfp)
    459 {
    460 	return __i915_sw_fence_await_sw_fence(fence, signaler, NULL, gfp);
    461 }
    462 
    463 struct i915_sw_dma_fence_cb_timer {
    464 	struct i915_sw_dma_fence_cb base;
    465 	struct dma_fence *dma;
    466 	struct timer_list timer;
    467 	struct irq_work work;
    468 	struct rcu_head rcu;
    469 };
    470 
    471 static void dma_i915_sw_fence_wake(struct dma_fence *dma,
    472 				   struct dma_fence_cb *data)
    473 {
    474 	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
    475 
    476 	i915_sw_fence_set_error_once(cb->fence, dma->error);
    477 	i915_sw_fence_complete(cb->fence);
    478 	kfree(cb);
    479 }
    480 
    481 static void timer_i915_sw_fence_wake(struct timer_list *t)
    482 {
    483 	struct i915_sw_dma_fence_cb_timer *cb = from_timer(cb, t, timer);
    484 	struct i915_sw_fence *fence;
    485 
    486 	fence = xchg(&cb->base.fence, NULL);
    487 	if (!fence)
    488 		return;
    489 
    490 	pr_notice("Asynchronous wait on fence %s:%s:%"PRIx64" timed out (hint:%p)\n",
    491 		  cb->dma->ops->get_driver_name(cb->dma),
    492 		  cb->dma->ops->get_timeline_name(cb->dma),
    493 		  (uint64_t)cb->dma->seqno,
    494 		  i915_sw_fence_debug_hint(fence));
    495 
    496 	i915_sw_fence_set_error_once(fence, -ETIMEDOUT);
    497 	i915_sw_fence_complete(fence);
    498 }
    499 
    500 static void dma_i915_sw_fence_wake_timer(struct dma_fence *dma,
    501 					 struct dma_fence_cb *data)
    502 {
    503 	struct i915_sw_dma_fence_cb_timer *cb =
    504 		container_of(data, typeof(*cb), base.base);
    505 	struct i915_sw_fence *fence;
    506 
    507 	fence = xchg(&cb->base.fence, NULL);
    508 	if (fence) {
    509 		i915_sw_fence_set_error_once(fence, dma->error);
    510 		i915_sw_fence_complete(fence);
    511 	}
    512 
    513 	irq_work_queue(&cb->work);
    514 }
    515 
    516 static void irq_i915_sw_fence_work(struct irq_work *wrk)
    517 {
    518 	struct i915_sw_dma_fence_cb_timer *cb =
    519 		container_of(wrk, typeof(*cb), work);
    520 
    521 	del_timer_sync(&cb->timer);
    522 	dma_fence_put(cb->dma);
    523 
    524 	kfree_rcu(cb, rcu);
    525 }
    526 
    527 int i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
    528 				  struct dma_fence *dma,
    529 				  unsigned long timeout,
    530 				  gfp_t gfp)
    531 {
    532 	struct i915_sw_dma_fence_cb *cb;
    533 	dma_fence_func_t func;
    534 	int ret;
    535 
    536 	debug_fence_assert(fence);
    537 	might_sleep_if(gfpflags_allow_blocking(gfp));
    538 
    539 	if (dma_fence_is_signaled(dma)) {
    540 		i915_sw_fence_set_error_once(fence, dma->error);
    541 		return 0;
    542 	}
    543 
    544 	cb = kmalloc(timeout ?
    545 		     sizeof(struct i915_sw_dma_fence_cb_timer) :
    546 		     sizeof(struct i915_sw_dma_fence_cb),
    547 		     gfp);
    548 	if (!cb) {
    549 		if (!gfpflags_allow_blocking(gfp))
    550 			return -ENOMEM;
    551 
    552 		ret = dma_fence_wait(dma, false);
    553 		if (ret)
    554 			return ret;
    555 
    556 		i915_sw_fence_set_error_once(fence, dma->error);
    557 		return 0;
    558 	}
    559 
    560 	cb->fence = fence;
    561 	i915_sw_fence_await(fence);
    562 
    563 	func = dma_i915_sw_fence_wake;
    564 	if (timeout) {
    565 		struct i915_sw_dma_fence_cb_timer *timer =
    566 			container_of(cb, typeof(*timer), base);
    567 
    568 		timer->dma = dma_fence_get(dma);
    569 		init_irq_work(&timer->work, irq_i915_sw_fence_work);
    570 
    571 		timer_setup(&timer->timer,
    572 			    timer_i915_sw_fence_wake, TIMER_IRQSAFE);
    573 		mod_timer(&timer->timer, round_jiffies_up(jiffies + timeout));
    574 
    575 		func = dma_i915_sw_fence_wake_timer;
    576 	}
    577 
    578 	ret = dma_fence_add_callback(dma, &cb->base, func);
    579 	if (ret == 0) {
    580 		ret = 1;
    581 	} else {
    582 		func(dma, &cb->base);
    583 		if (ret == -ENOENT) /* fence already signaled */
    584 			ret = 0;
    585 	}
    586 
    587 	return ret;
    588 }
    589 
    590 static void __dma_i915_sw_fence_wake(struct dma_fence *dma,
    591 				     struct dma_fence_cb *data)
    592 {
    593 	struct i915_sw_dma_fence_cb *cb = container_of(data, typeof(*cb), base);
    594 
    595 	i915_sw_fence_set_error_once(cb->fence, dma->error);
    596 	i915_sw_fence_complete(cb->fence);
    597 }
    598 
    599 int __i915_sw_fence_await_dma_fence(struct i915_sw_fence *fence,
    600 				    struct dma_fence *dma,
    601 				    struct i915_sw_dma_fence_cb *cb)
    602 {
    603 	int ret;
    604 
    605 	debug_fence_assert(fence);
    606 
    607 	if (dma_fence_is_signaled(dma)) {
    608 		i915_sw_fence_set_error_once(fence, dma->error);
    609 		return 0;
    610 	}
    611 
    612 	cb->fence = fence;
    613 	i915_sw_fence_await(fence);
    614 
    615 	ret = dma_fence_add_callback(dma, &cb->base, __dma_i915_sw_fence_wake);
    616 	if (ret == 0) {
    617 		ret = 1;
    618 	} else {
    619 		__dma_i915_sw_fence_wake(dma, &cb->base);
    620 		if (ret == -ENOENT) /* fence already signaled */
    621 			ret = 0;
    622 	}
    623 
    624 	return ret;
    625 }
    626 
    627 int i915_sw_fence_await_reservation(struct i915_sw_fence *fence,
    628 				    struct dma_resv *resv,
    629 				    const struct dma_fence_ops *exclude,
    630 				    bool write,
    631 				    unsigned long timeout,
    632 				    gfp_t gfp)
    633 {
    634 	struct dma_fence *excl;
    635 	int ret = 0, pending;
    636 
    637 	debug_fence_assert(fence);
    638 	might_sleep_if(gfpflags_allow_blocking(gfp));
    639 
    640 	if (write) {
    641 		struct dma_fence **shared;
    642 		unsigned int count, i;
    643 
    644 		ret = dma_resv_get_fences_rcu(resv, &excl, &count, &shared);
    645 		if (ret)
    646 			return ret;
    647 
    648 		for (i = 0; i < count; i++) {
    649 			if (shared[i]->ops == exclude)
    650 				continue;
    651 
    652 			pending = i915_sw_fence_await_dma_fence(fence,
    653 								shared[i],
    654 								timeout,
    655 								gfp);
    656 			if (pending < 0) {
    657 				ret = pending;
    658 				break;
    659 			}
    660 
    661 			ret |= pending;
    662 		}
    663 
    664 		for (i = 0; i < count; i++)
    665 			dma_fence_put(shared[i]);
    666 		kfree(shared);
    667 	} else {
    668 		excl = dma_resv_get_excl_rcu(resv);
    669 	}
    670 
    671 	if (ret >= 0 && excl && excl->ops != exclude) {
    672 		pending = i915_sw_fence_await_dma_fence(fence,
    673 							excl,
    674 							timeout,
    675 							gfp);
    676 		if (pending < 0)
    677 			ret = pending;
    678 		else
    679 			ret |= pending;
    680 	}
    681 
    682 	dma_fence_put(excl);
    683 
    684 	return ret;
    685 }
    686 
    687 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
    688 #include "selftests/lib_sw_fence.c"
    689 #include "selftests/i915_sw_fence.c"
    690 #endif
    691