Home | History | Annotate | Line # | Download | only in linux
linux_dma_fence.c revision 1.1
      1 /*	$NetBSD: linux_dma_fence.c,v 1.1 2021/12/19 00:27:01 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.1 2021/12/19 00:27:01 riastradh Exp $");
     34 
     35 #include <sys/atomic.h>
     36 #include <sys/condvar.h>
     37 #include <sys/queue.h>
     38 
     39 #include <linux/atomic.h>
     40 #include <linux/errno.h>
     41 #include <linux/kref.h>
     42 #include <linux/fence.h>
     43 #include <linux/sched.h>
     44 #include <linux/spinlock.h>
     45 
     46 /*
     47  * linux_fence_trace
     48  *
     49  *	True if we print FENCE_TRACE messages, false if not.  These are
     50  *	extremely noisy, too much even for AB_VERBOSE and AB_DEBUG in
     51  *	boothowto.
     52  */
     53 int	linux_fence_trace = 0;
     54 
     55 /*
     56  * fence_referenced_p(fence)
     57  *
     58  *	True if fence has a positive reference count.  True after
     59  *	fence_init; after the last fence_put, this becomes false.
     60  */
     61 static inline bool __diagused
     62 fence_referenced_p(struct fence *fence)
     63 {
     64 
     65 	return kref_referenced_p(&fence->refcount);
     66 }
     67 
     68 /*
     69  * fence_init(fence, ops, lock, context, seqno)
     70  *
     71  *	Initialize fence.  Caller should call fence_destroy when done,
     72  *	after all references have been released.
     73  */
     74 void
     75 fence_init(struct fence *fence, const struct fence_ops *ops, spinlock_t *lock,
     76     unsigned context, unsigned seqno)
     77 {
     78 
     79 	kref_init(&fence->refcount);
     80 	fence->lock = lock;
     81 	fence->flags = 0;
     82 	fence->context = context;
     83 	fence->seqno = seqno;
     84 	fence->ops = ops;
     85 	TAILQ_INIT(&fence->f_callbacks);
     86 	cv_init(&fence->f_cv, "fence");
     87 }
     88 
     89 /*
     90  * fence_destroy(fence)
     91  *
     92  *	Clean up memory initialized with fence_init.  This is meant to
     93  *	be used after a fence release callback.
     94  */
     95 void
     96 fence_destroy(struct fence *fence)
     97 {
     98 
     99 	KASSERT(!fence_referenced_p(fence));
    100 
    101 	KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
    102 	cv_destroy(&fence->f_cv);
    103 }
    104 
    105 static void
    106 fence_free_cb(struct rcu_head *rcu)
    107 {
    108 	struct fence *fence = container_of(rcu, struct fence, f_rcu);
    109 
    110 	KASSERT(!fence_referenced_p(fence));
    111 
    112 	fence_destroy(fence);
    113 	kfree(fence);
    114 }
    115 
    116 /*
    117  * fence_free(fence)
    118  *
    119  *	Schedule fence to be destroyed and then freed with kfree after
    120  *	any pending RCU read sections on all CPUs have completed.
    121  *	Caller must guarantee all references have been released.  This
    122  *	is meant to be used after a fence release callback.
    123  *
    124  *	NOTE: Callers assume kfree will be used.  We don't even use
    125  *	kmalloc to allocate these -- caller is expected to allocate
    126  *	memory with kmalloc to be initialized with fence_init.
    127  */
    128 void
    129 fence_free(struct fence *fence)
    130 {
    131 
    132 	KASSERT(!fence_referenced_p(fence));
    133 
    134 	call_rcu(&fence->f_rcu, &fence_free_cb);
    135 }
    136 
    137 /*
    138  * fence_context_alloc(n)
    139  *
    140  *	Return the first of a contiguous sequence of unique
    141  *	identifiers, at least until the system wraps around.
    142  */
    143 unsigned
    144 fence_context_alloc(unsigned n)
    145 {
    146 	static volatile unsigned next_context = 0;
    147 
    148 	return atomic_add_int_nv(&next_context, n) - n;
    149 }
    150 
    151 /*
    152  * fence_is_later(a, b)
    153  *
    154  *	True if the sequence number of fence a is later than the
    155  *	sequence number of fence b.  Since sequence numbers wrap
    156  *	around, we define this to mean that the sequence number of
    157  *	fence a is no more than INT_MAX past the sequence number of
    158  *	fence b.
    159  *
    160  *	The two fences must have the same context.
    161  */
    162 bool
    163 fence_is_later(struct fence *a, struct fence *b)
    164 {
    165 
    166 	KASSERTMSG(a->context == b->context, "incommensurate fences"
    167 	    ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
    168 
    169 	return a->seqno - b->seqno < INT_MAX;
    170 }
    171 
    172 /*
    173  * fence_get(fence)
    174  *
    175  *	Acquire a reference to fence.  The fence must not be being
    176  *	destroyed.  Return the fence.
    177  */
    178 struct fence *
    179 fence_get(struct fence *fence)
    180 {
    181 
    182 	if (fence)
    183 		kref_get(&fence->refcount);
    184 	return fence;
    185 }
    186 
    187 /*
    188  * fence_get_rcu(fence)
    189  *
    190  *	Attempt to acquire a reference to a fence that may be about to
    191  *	be destroyed, during a read section.  Return the fence on
    192  *	success, or NULL on failure.
    193  */
    194 struct fence *
    195 fence_get_rcu(struct fence *fence)
    196 {
    197 
    198 	if (!kref_get_unless_zero(&fence->refcount))
    199 		return NULL;
    200 	return fence;
    201 }
    202 
    203 static void
    204 fence_release(struct kref *refcount)
    205 {
    206 	struct fence *fence = container_of(refcount, struct fence, refcount);
    207 
    208 	KASSERT(!fence_referenced_p(fence));
    209 
    210 	if (fence->ops->release)
    211 		(*fence->ops->release)(fence);
    212 	else
    213 		fence_free(fence);
    214 }
    215 
    216 /*
    217  * fence_put(fence)
    218  *
    219  *	Release a reference to fence.  If this was the last one, call
    220  *	the fence's release callback.
    221  */
    222 void
    223 fence_put(struct fence *fence)
    224 {
    225 
    226 	if (fence == NULL)
    227 		return;
    228 	KASSERT(fence_referenced_p(fence));
    229 	kref_put(&fence->refcount, &fence_release);
    230 }
    231 
    232 /*
    233  * fence_ensure_signal_enabled(fence)
    234  *
    235  *	Internal subroutine.  If the fence was already signalled,
    236  *	return -ENOENT.  Otherwise, if the enable signalling callback
    237  *	has not been called yet, call it.  If fails, signal the fence
    238  *	and return -ENOENT.  If it succeeds, or if it had already been
    239  *	called, return zero to indicate success.
    240  *
    241  *	Caller must hold the fence's lock.
    242  */
    243 static int
    244 fence_ensure_signal_enabled(struct fence *fence)
    245 {
    246 
    247 	KASSERT(fence_referenced_p(fence));
    248 	KASSERT(spin_is_locked(fence->lock));
    249 
    250 	/* If the fence was already signalled, fail with -ENOENT.  */
    251 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
    252 		return -ENOENT;
    253 
    254 	/*
    255 	 * If the enable signaling callback has been called, success.
    256 	 * Otherwise, set the bit indicating it.
    257 	 */
    258 	if (test_and_set_bit(FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags))
    259 		return 0;
    260 
    261 	/* Otherwise, note that we've called it and call it.  */
    262 	if (!(*fence->ops->enable_signaling)(fence)) {
    263 		/* If it failed, signal and return -ENOENT.  */
    264 		fence_signal_locked(fence);
    265 		return -ENOENT;
    266 	}
    267 
    268 	/* Success!  */
    269 	return 0;
    270 }
    271 
    272 /*
    273  * fence_add_callback(fence, fcb, fn)
    274  *
    275  *	If fence has been signalled, return -ENOENT.  If the enable
    276  *	signalling callback hasn't been called yet, call it; if it
    277  *	fails, return -ENOENT.  Otherwise, arrange to call fn(fence,
    278  *	fcb) when it is signalled, and return 0.
    279  *
    280  *	The fence uses memory allocated by the caller in fcb from the
    281  *	time of fence_add_callback either to the time of
    282  *	fence_remove_callback, or just before calling fn.
    283  */
    284 int
    285 fence_add_callback(struct fence *fence, struct fence_cb *fcb, fence_func_t fn)
    286 {
    287 	int ret;
    288 
    289 	KASSERT(fence_referenced_p(fence));
    290 
    291 	/* Optimistically try to skip the lock if it's already signalled.  */
    292 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT)) {
    293 		ret = -ENOENT;
    294 		goto out0;
    295 	}
    296 
    297 	/* Acquire the lock.  */
    298 	spin_lock(fence->lock);
    299 
    300 	/* Ensure signalling is enabled, or fail if we can't.  */
    301 	ret = fence_ensure_signal_enabled(fence);
    302 	if (ret)
    303 		goto out1;
    304 
    305 	/* Insert the callback.  */
    306 	fcb->fcb_func = fn;
    307 	TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
    308 	fcb->fcb_onqueue = true;
    309 
    310 	/* Release the lock and we're done.  */
    311 out1:	spin_unlock(fence->lock);
    312 out0:	return ret;
    313 }
    314 
    315 /*
    316  * fence_remove_callback(fence, fcb)
    317  *
    318  *	Remove the callback fcb from fence.  Return true if it was
    319  *	removed from the list, or false if it had already run and so
    320  *	was no longer queued anyway.  Caller must have already called
    321  *	fence_add_callback(fence, fcb).
    322  */
    323 bool
    324 fence_remove_callback(struct fence *fence, struct fence_cb *fcb)
    325 {
    326 	bool onqueue;
    327 
    328 	KASSERT(fence_referenced_p(fence));
    329 
    330 	spin_lock(fence->lock);
    331 	onqueue = fcb->fcb_onqueue;
    332 	if (onqueue) {
    333 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
    334 		fcb->fcb_onqueue = false;
    335 	}
    336 	spin_unlock(fence->lock);
    337 
    338 	return onqueue;
    339 }
    340 
    341 /*
    342  * fence_enable_sw_signaling(fence)
    343  *
    344  *	If it hasn't been called yet and the fence hasn't been
    345  *	signalled yet, call the fence's enable_sw_signaling callback.
    346  *	If when that happens, the callback indicates failure by
    347  *	returning false, signal the fence.
    348  */
    349 void
    350 fence_enable_sw_signaling(struct fence *fence)
    351 {
    352 
    353 	KASSERT(fence_referenced_p(fence));
    354 
    355 	spin_lock(fence->lock);
    356 	(void)fence_ensure_signal_enabled(fence);
    357 	spin_unlock(fence->lock);
    358 }
    359 
    360 /*
    361  * fence_is_signaled(fence)
    362  *
    363  *	Test whether the fence has been signalled.  If it has been
    364  *	signalled by fence_signal(_locked), return true.  If the
    365  *	signalled callback returns true indicating that some implicit
    366  *	external condition has changed, call the callbacks as if with
    367  *	fence_signal.
    368  */
    369 bool
    370 fence_is_signaled(struct fence *fence)
    371 {
    372 	bool signaled;
    373 
    374 	KASSERT(fence_referenced_p(fence));
    375 
    376 	spin_lock(fence->lock);
    377 	signaled = fence_is_signaled_locked(fence);
    378 	spin_unlock(fence->lock);
    379 
    380 	return signaled;
    381 }
    382 
    383 /*
    384  * fence_is_signaled_locked(fence)
    385  *
    386  *	Test whether the fence has been signalled.  Like
    387  *	fence_is_signaleed, but caller already holds the fence's lock.
    388  */
    389 bool
    390 fence_is_signaled_locked(struct fence *fence)
    391 {
    392 
    393 	KASSERT(fence_referenced_p(fence));
    394 	KASSERT(spin_is_locked(fence->lock));
    395 
    396 	/* Check whether we already set the signalled bit.  */
    397 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
    398 		return true;
    399 
    400 	/* If there's a signalled callback, test it.  */
    401 	if (fence->ops->signaled) {
    402 		if ((*fence->ops->signaled)(fence)) {
    403 			/*
    404 			 * It's been signalled implicitly by some
    405 			 * external phenomonen.  Act as though someone
    406 			 * has called fence_signal.
    407 			 */
    408 			fence_signal_locked(fence);
    409 			return true;
    410 		}
    411 	}
    412 
    413 	return false;
    414 }
    415 
    416 /*
    417  * fence_signal(fence)
    418  *
    419  *	Signal the fence.  If it has already been signalled, return
    420  *	-EINVAL.  If it has not been signalled, call the enable
    421  *	signalling callback if it hasn't been called yet, and remove
    422  *	each registered callback from the queue and call it; then
    423  *	return 0.
    424  */
    425 int
    426 fence_signal(struct fence *fence)
    427 {
    428 	int ret;
    429 
    430 	KASSERT(fence_referenced_p(fence));
    431 
    432 	spin_lock(fence->lock);
    433 	ret = fence_signal_locked(fence);
    434 	spin_unlock(fence->lock);
    435 
    436 	return ret;
    437 }
    438 
    439 /*
    440  * fence_signal_locked(fence)
    441  *
    442  *	Signal the fence.  Like fence_signal, but caller already holds
    443  *	the fence's lock.
    444  */
    445 int
    446 fence_signal_locked(struct fence *fence)
    447 {
    448 	struct fence_cb *fcb, *next;
    449 
    450 	KASSERT(fence_referenced_p(fence));
    451 	KASSERT(spin_is_locked(fence->lock));
    452 
    453 	/* If it's been signalled, fail; otherwise set the signalled bit.  */
    454 	if (test_and_set_bit(FENCE_FLAG_SIGNALED_BIT, &fence->flags))
    455 		return -EINVAL;
    456 
    457 	/* Wake waiters.  */
    458 	cv_broadcast(&fence->f_cv);
    459 
    460 	/* Remove and call the callbacks.  */
    461 	TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
    462 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
    463 		fcb->fcb_onqueue = false;
    464 		(*fcb->fcb_func)(fence, fcb);
    465 	}
    466 
    467 	/* Success! */
    468 	return 0;
    469 }
    470 
    471 struct wait_any {
    472 	struct fence_cb	fcb;
    473 	struct wait_any1 {
    474 		kmutex_t	lock;
    475 		kcondvar_t	cv;
    476 		bool		done;
    477 	}		*common;
    478 };
    479 
    480 static void
    481 wait_any_cb(struct fence *fence, struct fence_cb *fcb)
    482 {
    483 	struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
    484 
    485 	KASSERT(fence_referenced_p(fence));
    486 
    487 	mutex_enter(&cb->common->lock);
    488 	cb->common->done = true;
    489 	cv_broadcast(&cb->common->cv);
    490 	mutex_exit(&cb->common->lock);
    491 }
    492 
    493 /*
    494  * fence_wait_any_timeout(fence, nfences, intr, timeout)
    495  *
    496  *	Wait for any of fences[0], fences[1], fences[2], ...,
    497  *	fences[nfences-1] to be signaled.
    498  */
    499 long
    500 fence_wait_any_timeout(struct fence **fences, uint32_t nfences, bool intr,
    501     long timeout)
    502 {
    503 	struct wait_any1 common;
    504 	struct wait_any *cb;
    505 	uint32_t i, j;
    506 	int start, end;
    507 	long ret = 0;
    508 
    509 	/* Allocate an array of callback records.  */
    510 	cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
    511 	if (cb == NULL) {
    512 		ret = -ENOMEM;
    513 		goto out0;
    514 	}
    515 
    516 	/* Initialize a mutex and condvar for the common wait.  */
    517 	mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
    518 	cv_init(&common.cv, "fence");
    519 	common.done = false;
    520 
    521 	/* Add a callback to each of the fences, or stop here if we can't.  */
    522 	for (i = 0; i < nfences; i++) {
    523 		cb[i].common = &common;
    524 		KASSERT(fence_referenced_p(fences[i]));
    525 		ret = fence_add_callback(fences[i], &cb[i].fcb, &wait_any_cb);
    526 		if (ret)
    527 			goto out1;
    528 	}
    529 
    530 	/*
    531 	 * Test whether any of the fences has been signalled.  If they
    532 	 * have, stop here.  If the haven't, we are guaranteed to be
    533 	 * notified by one of the callbacks when they have.
    534 	 */
    535 	for (j = 0; j < nfences; j++) {
    536 		if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags))
    537 			goto out1;
    538 	}
    539 
    540 	/*
    541 	 * None of them was ready immediately.  Wait for one of the
    542 	 * callbacks to notify us when it is done.
    543 	 */
    544 	mutex_enter(&common.lock);
    545 	while (timeout > 0 && !common.done) {
    546 		start = getticks();
    547 		__insn_barrier();
    548 		if (intr) {
    549 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
    550 				ret = -cv_timedwait_sig(&common.cv,
    551 				    &common.lock, MIN(timeout, /* paranoia */
    552 					MAX_SCHEDULE_TIMEOUT));
    553 			} else {
    554 				ret = -cv_wait_sig(&common.cv, &common.lock);
    555 			}
    556 		} else {
    557 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
    558 				ret = -cv_timedwait(&common.cv,
    559 				    &common.lock, MIN(timeout, /* paranoia */
    560 					MAX_SCHEDULE_TIMEOUT));
    561 			} else {
    562 				cv_wait(&common.cv, &common.lock);
    563 				ret = 0;
    564 			}
    565 		}
    566 		end = getticks();
    567 		__insn_barrier();
    568 		if (ret) {
    569 			if (ret == -ERESTART)
    570 				ret = -ERESTARTSYS;
    571 			break;
    572 		}
    573 		timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
    574 	}
    575 	mutex_exit(&common.lock);
    576 
    577 	/*
    578 	 * Massage the return code: if we were interrupted, return
    579 	 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
    580 	 * return the remaining time.
    581 	 */
    582 	if (ret < 0) {
    583 		if (ret == -EINTR || ret == -ERESTART)
    584 			ret = -ERESTARTSYS;
    585 		if (ret == -EWOULDBLOCK)
    586 			ret = 0;
    587 	} else {
    588 		KASSERT(ret == 0);
    589 		ret = timeout;
    590 	}
    591 
    592 out1:	while (i --> 0)
    593 		(void)fence_remove_callback(fences[i], &cb[i].fcb);
    594 	cv_destroy(&common.cv);
    595 	mutex_destroy(&common.lock);
    596 	kfree(cb);
    597 out0:	return ret;
    598 }
    599 
    600 /*
    601  * fence_wait_timeout(fence, intr, timeout)
    602  *
    603  *	Wait until fence is signalled; or until interrupt, if intr is
    604  *	true; or until timeout, if positive.  Return -ERESTARTSYS if
    605  *	interrupted, negative error code on any other error, zero on
    606  *	timeout, or positive number of ticks remaining if the fence is
    607  *	signalled before the timeout.  Works by calling the fence wait
    608  *	callback.
    609  *
    610  *	The timeout must be nonnegative and less than
    611  *	MAX_SCHEDULE_TIMEOUT.
    612  */
    613 long
    614 fence_wait_timeout(struct fence *fence, bool intr, long timeout)
    615 {
    616 
    617 	KASSERT(fence_referenced_p(fence));
    618 	KASSERT(timeout >= 0);
    619 	KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
    620 
    621 	return (*fence->ops->wait)(fence, intr, timeout);
    622 }
    623 
    624 /*
    625  * fence_wait(fence, intr)
    626  *
    627  *	Wait until fence is signalled; or until interrupt, if intr is
    628  *	true.  Return -ERESTARTSYS if interrupted, negative error code
    629  *	on any other error, zero on sucess.  Works by calling the fence
    630  *	wait callback with MAX_SCHEDULE_TIMEOUT.
    631  */
    632 long
    633 fence_wait(struct fence *fence, bool intr)
    634 {
    635 	long ret;
    636 
    637 	KASSERT(fence_referenced_p(fence));
    638 
    639 	ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
    640 	KASSERT(ret != 0);
    641 
    642 	return (ret < 0 ? ret : 0);
    643 }
    644 
    645 /*
    646  * fence_default_wait(fence, intr, timeout)
    647  *
    648  *	Default implementation of fence wait callback using a condition
    649  *	variable.  If the fence is already signalled, return timeout,
    650  *	or 1 if no timeout.  If the enable signalling callback hasn't
    651  *	been called, call it, and if it fails, act as if the fence had
    652  *	been signalled.  Otherwise, wait on the internal condvar.  If
    653  *	timeout is MAX_SCHEDULE_TIMEOUT, treat it as no timeout.
    654  */
    655 long
    656 fence_default_wait(struct fence *fence, bool intr, long timeout)
    657 {
    658 	int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
    659 	kmutex_t *lock = &fence->lock->sl_lock;
    660 	long ret = 0;
    661 
    662 	KASSERT(fence_referenced_p(fence));
    663 	KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
    664 	KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
    665 
    666 	/* Optimistically try to skip the lock if it's already signalled.  */
    667 	if (fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))
    668 		return (timeout < MAX_SCHEDULE_TIMEOUT ? timeout : 1);
    669 
    670 	/* Acquire the lock.  */
    671 	spin_lock(fence->lock);
    672 
    673 	/* Ensure signalling is enabled, or fail if we can't.  */
    674 	ret = fence_ensure_signal_enabled(fence);
    675 	if (ret)
    676 		goto out;
    677 
    678 	/* Find out what our deadline is so we can handle spurious wakeup.  */
    679 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
    680 		now = getticks();
    681 		__insn_barrier();
    682 		starttime = now;
    683 		deadline = starttime + timeout;
    684 	}
    685 
    686 	/* Wait until the signalled bit is set.  */
    687 	while (!(fence->flags & (1u << FENCE_FLAG_SIGNALED_BIT))) {
    688 		/*
    689 		 * If there's a timeout and we've passed the deadline,
    690 		 * give up.
    691 		 */
    692 		if (timeout < MAX_SCHEDULE_TIMEOUT) {
    693 			now = getticks();
    694 			__insn_barrier();
    695 			if (deadline <= now)
    696 				break;
    697 		}
    698 		if (intr) {
    699 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
    700 				ret = -cv_timedwait_sig(&fence->f_cv, lock,
    701 				    deadline - now);
    702 			} else {
    703 				ret = -cv_wait_sig(&fence->f_cv, lock);
    704 			}
    705 		} else {
    706 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
    707 				ret = -cv_timedwait(&fence->f_cv, lock,
    708 				    deadline - now);
    709 			} else {
    710 				cv_wait(&fence->f_cv, lock);
    711 				ret = 0;
    712 			}
    713 		}
    714 		/* If the wait failed, give up.  */
    715 		if (ret) {
    716 			if (ret == -ERESTART)
    717 				ret = -ERESTARTSYS;
    718 			break;
    719 		}
    720 	}
    721 
    722 out:
    723 	/* All done.  Release the lock.  */
    724 	spin_unlock(fence->lock);
    725 
    726 	/* If cv_timedwait gave up, return 0 meaning timeout.  */
    727 	if (ret == -EWOULDBLOCK) {
    728 		/* Only cv_timedwait and cv_timedwait_sig can return this.  */
    729 		KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
    730 		return 0;
    731 	}
    732 
    733 	/* If there was a timeout and the deadline passed, return 0.  */
    734 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
    735 		if (deadline <= now)
    736 			return 0;
    737 	}
    738 
    739 	/* If we were interrupted, return -ERESTARTSYS.  */
    740 	if (ret == -EINTR || ret == -ERESTART)
    741 		return -ERESTARTSYS;
    742 
    743 	/* If there was any other kind of error, fail.  */
    744 	if (ret)
    745 		return ret;
    746 
    747 	/*
    748 	 * Success!  Return the number of ticks left, at least 1, or 1
    749 	 * if no timeout.
    750 	 */
    751 	return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
    752 }
    753