linux_dma_fence.c revision 1.6 1 /* $NetBSD: linux_dma_fence.c,v 1.6 2021/12/19 01:40:48 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.6 2021/12/19 01:40:48 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 /*
47 * linux_dma_fence_trace
48 *
49 * True if we print DMA_FENCE_TRACE messages, false if not. These
50 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
51 * in boothowto.
52 */
53 int linux_dma_fence_trace = 0;
54
55 /*
56 * dma_fence_referenced_p(fence)
57 *
58 * True if fence has a positive reference count. True after
59 * dma_fence_init; after the last dma_fence_put, this becomes
60 * false.
61 */
62 static inline bool __diagused
63 dma_fence_referenced_p(struct dma_fence *fence)
64 {
65
66 return kref_referenced_p(&fence->refcount);
67 }
68
69 /*
70 * dma_fence_init(fence, ops, lock, context, seqno)
71 *
72 * Initialize fence. Caller should call dma_fence_destroy when
73 * done, after all references have been released.
74 */
75 void
76 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
77 spinlock_t *lock, unsigned context, unsigned seqno)
78 {
79
80 kref_init(&fence->refcount);
81 fence->lock = lock;
82 fence->flags = 0;
83 fence->context = context;
84 fence->seqno = seqno;
85 fence->ops = ops;
86 TAILQ_INIT(&fence->f_callbacks);
87 cv_init(&fence->f_cv, "dmafence");
88 }
89
90 /*
91 * dma_fence_destroy(fence)
92 *
93 * Clean up memory initialized with dma_fence_init. This is meant
94 * to be used after a fence release callback.
95 */
96 void
97 dma_fence_destroy(struct dma_fence *fence)
98 {
99
100 KASSERT(!dma_fence_referenced_p(fence));
101
102 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
103 cv_destroy(&fence->f_cv);
104 }
105
106 static void
107 dma_fence_free_cb(struct rcu_head *rcu)
108 {
109 struct dma_fence *fence = container_of(rcu, struct dma_fence, f_rcu);
110
111 KASSERT(!dma_fence_referenced_p(fence));
112
113 dma_fence_destroy(fence);
114 kfree(fence);
115 }
116
117 /*
118 * dma_fence_free(fence)
119 *
120 * Schedule fence to be destroyed and then freed with kfree after
121 * any pending RCU read sections on all CPUs have completed.
122 * Caller must guarantee all references have been released. This
123 * is meant to be used after a fence release callback.
124 *
125 * NOTE: Callers assume kfree will be used. We don't even use
126 * kmalloc to allocate these -- caller is expected to allocate
127 * memory with kmalloc to be initialized with dma_fence_init.
128 */
129 void
130 dma_fence_free(struct dma_fence *fence)
131 {
132
133 KASSERT(!dma_fence_referenced_p(fence));
134
135 call_rcu(&fence->f_rcu, &dma_fence_free_cb);
136 }
137
138 /*
139 * dma_fence_context_alloc(n)
140 *
141 * Return the first of a contiguous sequence of unique
142 * identifiers, at least until the system wraps around.
143 */
144 unsigned
145 dma_fence_context_alloc(unsigned n)
146 {
147 static volatile unsigned next_context = 0;
148
149 return atomic_add_int_nv(&next_context, n) - n;
150 }
151
152 /*
153 * dma_fence_is_later(a, b)
154 *
155 * True if the sequence number of fence a is later than the
156 * sequence number of fence b. Since sequence numbers wrap
157 * around, we define this to mean that the sequence number of
158 * fence a is no more than INT_MAX past the sequence number of
159 * fence b.
160 *
161 * The two fences must have the same context.
162 */
163 bool
164 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
165 {
166
167 KASSERTMSG(a->context == b->context, "incommensurate fences"
168 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
169
170 return a->seqno - b->seqno < INT_MAX;
171 }
172
173 /*
174 * dma_fence_get(fence)
175 *
176 * Acquire a reference to fence. The fence must not be being
177 * destroyed. Return the fence.
178 */
179 struct dma_fence *
180 dma_fence_get(struct dma_fence *fence)
181 {
182
183 if (fence)
184 kref_get(&fence->refcount);
185 return fence;
186 }
187
188 /*
189 * dma_fence_get_rcu(fence)
190 *
191 * Attempt to acquire a reference to a fence that may be about to
192 * be destroyed, during a read section. Return the fence on
193 * success, or NULL on failure.
194 */
195 struct dma_fence *
196 dma_fence_get_rcu(struct dma_fence *fence)
197 {
198
199 if (!kref_get_unless_zero(&fence->refcount))
200 return NULL;
201 return fence;
202 }
203
204 /*
205 * dma_fence_get_rcu_safe(fencep)
206 *
207 * Attempt to acquire a reference to the fence *fencep, which may
208 * be about to be destroyed, during a read section. If the value
209 * of *fencep changes after we read *fencep but before we
210 * increment its reference count, retry. Return *fencep on
211 * success, or NULL on failure.
212 */
213 struct dma_fence *
214 dma_fence_get_rcu_safe(struct dma_fence **fencep)
215 {
216 struct dma_fence *fence, *fence0;
217
218 retry:
219 fence = *fencep;
220
221 /* Load fence only once. */
222 __insn_barrier();
223
224 /* If there's nothing there, give up. */
225 if (fence == NULL)
226 return NULL;
227
228 /* Make sure we don't load stale fence guts. */
229 membar_datadep_consumer();
230
231 /* Try to acquire a reference. If we can't, try again. */
232 if (!dma_fence_get_rcu(fence))
233 goto retry;
234
235 /*
236 * Confirm that it's still the same fence. If not, release it
237 * and retry.
238 */
239 fence0 = *fencep;
240 __insn_barrier();
241 if (fence != fence0) {
242 dma_fence_put(fence);
243 goto retry;
244 }
245
246 /* Success! */
247 return fence;
248 }
249
250 static void
251 dma_fence_release(struct kref *refcount)
252 {
253 struct dma_fence *fence = container_of(refcount, struct dma_fence,
254 refcount);
255
256 KASSERT(!dma_fence_referenced_p(fence));
257
258 if (fence->ops->release)
259 (*fence->ops->release)(fence);
260 else
261 dma_fence_free(fence);
262 }
263
264 /*
265 * dma_fence_put(fence)
266 *
267 * Release a reference to fence. If this was the last one, call
268 * the fence's release callback.
269 */
270 void
271 dma_fence_put(struct dma_fence *fence)
272 {
273
274 if (fence == NULL)
275 return;
276 KASSERT(dma_fence_referenced_p(fence));
277 kref_put(&fence->refcount, &dma_fence_release);
278 }
279
280 /*
281 * dma_fence_ensure_signal_enabled(fence)
282 *
283 * Internal subroutine. If the fence was already signalled,
284 * return -ENOENT. Otherwise, if the enable signalling callback
285 * has not been called yet, call it. If fails, signal the fence
286 * and return -ENOENT. If it succeeds, or if it had already been
287 * called, return zero to indicate success.
288 *
289 * Caller must hold the fence's lock.
290 */
291 static int
292 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
293 {
294
295 KASSERT(dma_fence_referenced_p(fence));
296 KASSERT(spin_is_locked(fence->lock));
297
298 /* If the fence was already signalled, fail with -ENOENT. */
299 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
300 return -ENOENT;
301
302 /*
303 * If the enable signaling callback has been called, success.
304 * Otherwise, set the bit indicating it.
305 */
306 if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags))
307 return 0;
308
309 /* Otherwise, note that we've called it and call it. */
310 if (!(*fence->ops->enable_signaling)(fence)) {
311 /* If it failed, signal and return -ENOENT. */
312 dma_fence_signal_locked(fence);
313 return -ENOENT;
314 }
315
316 /* Success! */
317 return 0;
318 }
319
320 /*
321 * dma_fence_add_callback(fence, fcb, fn)
322 *
323 * If fence has been signalled, return -ENOENT. If the enable
324 * signalling callback hasn't been called yet, call it; if it
325 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
326 * fcb) when it is signalled, and return 0.
327 *
328 * The fence uses memory allocated by the caller in fcb from the
329 * time of dma_fence_add_callback either to the time of
330 * dma_fence_remove_callback, or just before calling fn.
331 */
332 int
333 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
334 dma_fence_func_t fn)
335 {
336 int ret;
337
338 KASSERT(dma_fence_referenced_p(fence));
339
340 /* Optimistically try to skip the lock if it's already signalled. */
341 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
342 ret = -ENOENT;
343 goto out0;
344 }
345
346 /* Acquire the lock. */
347 spin_lock(fence->lock);
348
349 /* Ensure signalling is enabled, or fail if we can't. */
350 ret = dma_fence_ensure_signal_enabled(fence);
351 if (ret)
352 goto out1;
353
354 /* Insert the callback. */
355 fcb->func = fn;
356 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
357 fcb->fcb_onqueue = true;
358
359 /* Release the lock and we're done. */
360 out1: spin_unlock(fence->lock);
361 out0: return ret;
362 }
363
364 /*
365 * dma_fence_remove_callback(fence, fcb)
366 *
367 * Remove the callback fcb from fence. Return true if it was
368 * removed from the list, or false if it had already run and so
369 * was no longer queued anyway. Caller must have already called
370 * dma_fence_add_callback(fence, fcb).
371 */
372 bool
373 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
374 {
375 bool onqueue;
376
377 KASSERT(dma_fence_referenced_p(fence));
378
379 spin_lock(fence->lock);
380 onqueue = fcb->fcb_onqueue;
381 if (onqueue) {
382 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
383 fcb->fcb_onqueue = false;
384 }
385 spin_unlock(fence->lock);
386
387 return onqueue;
388 }
389
390 /*
391 * dma_fence_enable_sw_signaling(fence)
392 *
393 * If it hasn't been called yet and the fence hasn't been
394 * signalled yet, call the fence's enable_sw_signaling callback.
395 * If when that happens, the callback indicates failure by
396 * returning false, signal the fence.
397 */
398 void
399 dma_fence_enable_sw_signaling(struct dma_fence *fence)
400 {
401
402 KASSERT(dma_fence_referenced_p(fence));
403
404 spin_lock(fence->lock);
405 (void)dma_fence_ensure_signal_enabled(fence);
406 spin_unlock(fence->lock);
407 }
408
409 /*
410 * dma_fence_is_signaled(fence)
411 *
412 * Test whether the fence has been signalled. If it has been
413 * signalled by dma_fence_signal(_locked), return true. If the
414 * signalled callback returns true indicating that some implicit
415 * external condition has changed, call the callbacks as if with
416 * dma_fence_signal.
417 */
418 bool
419 dma_fence_is_signaled(struct dma_fence *fence)
420 {
421 bool signaled;
422
423 KASSERT(dma_fence_referenced_p(fence));
424
425 spin_lock(fence->lock);
426 signaled = dma_fence_is_signaled_locked(fence);
427 spin_unlock(fence->lock);
428
429 return signaled;
430 }
431
432 /*
433 * dma_fence_is_signaled_locked(fence)
434 *
435 * Test whether the fence has been signalled. Like
436 * dma_fence_is_signaleed, but caller already holds the fence's lock.
437 */
438 bool
439 dma_fence_is_signaled_locked(struct dma_fence *fence)
440 {
441
442 KASSERT(dma_fence_referenced_p(fence));
443 KASSERT(spin_is_locked(fence->lock));
444
445 /* Check whether we already set the signalled bit. */
446 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
447 return true;
448
449 /* If there's a signalled callback, test it. */
450 if (fence->ops->signaled) {
451 if ((*fence->ops->signaled)(fence)) {
452 /*
453 * It's been signalled implicitly by some
454 * external phenomonen. Act as though someone
455 * has called dma_fence_signal.
456 */
457 dma_fence_signal_locked(fence);
458 return true;
459 }
460 }
461
462 return false;
463 }
464
465 /*
466 * dma_fence_set_error(fence, error)
467 *
468 * Set an error code prior to dma_fence_signal for use by a
469 * waiter to learn about success or failure of the fence.
470 */
471 void
472 dma_fence_set_error(struct dma_fence *fence, int error)
473 {
474
475 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
476 KASSERTMSG(error >= -ELAST, "%d", error);
477 KASSERTMSG(error < 0, "%d", error);
478
479 fence->error = error;
480 }
481
482 /*
483 * dma_fence_signal(fence)
484 *
485 * Signal the fence. If it has already been signalled, return
486 * -EINVAL. If it has not been signalled, call the enable
487 * signalling callback if it hasn't been called yet, and remove
488 * each registered callback from the queue and call it; then
489 * return 0.
490 */
491 int
492 dma_fence_signal(struct dma_fence *fence)
493 {
494 int ret;
495
496 KASSERT(dma_fence_referenced_p(fence));
497
498 spin_lock(fence->lock);
499 ret = dma_fence_signal_locked(fence);
500 spin_unlock(fence->lock);
501
502 return ret;
503 }
504
505 /*
506 * dma_fence_signal_locked(fence)
507 *
508 * Signal the fence. Like dma_fence_signal, but caller already
509 * holds the fence's lock.
510 */
511 int
512 dma_fence_signal_locked(struct dma_fence *fence)
513 {
514 struct dma_fence_cb *fcb, *next;
515
516 KASSERT(dma_fence_referenced_p(fence));
517 KASSERT(spin_is_locked(fence->lock));
518
519 /* If it's been signalled, fail; otherwise set the signalled bit. */
520 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
521 return -EINVAL;
522
523 /* Wake waiters. */
524 cv_broadcast(&fence->f_cv);
525
526 /* Remove and call the callbacks. */
527 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
528 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
529 fcb->fcb_onqueue = false;
530 (*fcb->func)(fence, fcb);
531 }
532
533 /* Success! */
534 return 0;
535 }
536
537 struct wait_any {
538 struct dma_fence_cb fcb;
539 struct wait_any1 {
540 kmutex_t lock;
541 kcondvar_t cv;
542 bool done;
543 } *common;
544 };
545
546 static void
547 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
548 {
549 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
550
551 KASSERT(dma_fence_referenced_p(fence));
552
553 mutex_enter(&cb->common->lock);
554 cb->common->done = true;
555 cv_broadcast(&cb->common->cv);
556 mutex_exit(&cb->common->lock);
557 }
558
559 /*
560 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout)
561 *
562 * Wait for any of fences[0], fences[1], fences[2], ...,
563 * fences[nfences-1] to be signaled.
564 */
565 long
566 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
567 bool intr, long timeout)
568 {
569 struct wait_any1 common;
570 struct wait_any *cb;
571 uint32_t i, j;
572 int start, end;
573 long ret = 0;
574
575 /* Allocate an array of callback records. */
576 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
577 if (cb == NULL) {
578 ret = -ENOMEM;
579 goto out0;
580 }
581
582 /* Initialize a mutex and condvar for the common wait. */
583 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
584 cv_init(&common.cv, "fence");
585 common.done = false;
586
587 /* Add a callback to each of the fences, or stop here if we can't. */
588 for (i = 0; i < nfences; i++) {
589 cb[i].common = &common;
590 KASSERT(dma_fence_referenced_p(fences[i]));
591 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
592 &wait_any_cb);
593 if (ret)
594 goto out1;
595 }
596
597 /*
598 * Test whether any of the fences has been signalled. If they
599 * have, stop here. If the haven't, we are guaranteed to be
600 * notified by one of the callbacks when they have.
601 */
602 for (j = 0; j < nfences; j++) {
603 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags))
604 goto out1;
605 }
606
607 /*
608 * None of them was ready immediately. Wait for one of the
609 * callbacks to notify us when it is done.
610 */
611 mutex_enter(&common.lock);
612 while (timeout > 0 && !common.done) {
613 start = getticks();
614 __insn_barrier();
615 if (intr) {
616 if (timeout != MAX_SCHEDULE_TIMEOUT) {
617 ret = -cv_timedwait_sig(&common.cv,
618 &common.lock, MIN(timeout, /* paranoia */
619 MAX_SCHEDULE_TIMEOUT));
620 } else {
621 ret = -cv_wait_sig(&common.cv, &common.lock);
622 }
623 } else {
624 if (timeout != MAX_SCHEDULE_TIMEOUT) {
625 ret = -cv_timedwait(&common.cv,
626 &common.lock, MIN(timeout, /* paranoia */
627 MAX_SCHEDULE_TIMEOUT));
628 } else {
629 cv_wait(&common.cv, &common.lock);
630 ret = 0;
631 }
632 }
633 end = getticks();
634 __insn_barrier();
635 if (ret) {
636 if (ret == -ERESTART)
637 ret = -ERESTARTSYS;
638 break;
639 }
640 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
641 }
642 mutex_exit(&common.lock);
643
644 /*
645 * Massage the return code: if we were interrupted, return
646 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
647 * return the remaining time.
648 */
649 if (ret < 0) {
650 if (ret == -EINTR || ret == -ERESTART)
651 ret = -ERESTARTSYS;
652 if (ret == -EWOULDBLOCK)
653 ret = 0;
654 } else {
655 KASSERT(ret == 0);
656 ret = timeout;
657 }
658
659 out1: while (i --> 0)
660 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
661 cv_destroy(&common.cv);
662 mutex_destroy(&common.lock);
663 kfree(cb);
664 out0: return ret;
665 }
666
667 /*
668 * dma_fence_wait_timeout(fence, intr, timeout)
669 *
670 * Wait until fence is signalled; or until interrupt, if intr is
671 * true; or until timeout, if positive. Return -ERESTARTSYS if
672 * interrupted, negative error code on any other error, zero on
673 * timeout, or positive number of ticks remaining if the fence is
674 * signalled before the timeout. Works by calling the fence wait
675 * callback.
676 *
677 * The timeout must be nonnegative and less than
678 * MAX_SCHEDULE_TIMEOUT.
679 */
680 long
681 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
682 {
683
684 KASSERT(dma_fence_referenced_p(fence));
685 KASSERT(timeout >= 0);
686 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
687
688 return (*fence->ops->wait)(fence, intr, timeout);
689 }
690
691 /*
692 * dma_fence_wait(fence, intr)
693 *
694 * Wait until fence is signalled; or until interrupt, if intr is
695 * true. Return -ERESTARTSYS if interrupted, negative error code
696 * on any other error, zero on sucess. Works by calling the fence
697 * wait callback with MAX_SCHEDULE_TIMEOUT.
698 */
699 long
700 dma_fence_wait(struct dma_fence *fence, bool intr)
701 {
702 long ret;
703
704 KASSERT(dma_fence_referenced_p(fence));
705
706 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
707 KASSERT(ret != 0);
708
709 return (ret < 0 ? ret : 0);
710 }
711
712 /*
713 * dma_fence_default_wait(fence, intr, timeout)
714 *
715 * Default implementation of fence wait callback using a condition
716 * variable. If the fence is already signalled, return timeout,
717 * or 1 if no timeout. If the enable signalling callback hasn't
718 * been called, call it, and if it fails, act as if the fence had
719 * been signalled. Otherwise, wait on the internal condvar. If
720 * timeout is MAX_SCHEDULE_TIMEOUT, treat it as no timeout.
721 */
722 long
723 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
724 {
725 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
726 kmutex_t *lock = &fence->lock->sl_lock;
727 long ret = 0;
728
729 KASSERT(dma_fence_referenced_p(fence));
730 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
731 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
732
733 /* Optimistically try to skip the lock if it's already signalled. */
734 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
735 return (timeout < MAX_SCHEDULE_TIMEOUT ? timeout : 1);
736
737 /* Acquire the lock. */
738 spin_lock(fence->lock);
739
740 /* Ensure signalling is enabled, or fail if we can't. */
741 ret = dma_fence_ensure_signal_enabled(fence);
742 if (ret)
743 goto out;
744
745 /* Find out what our deadline is so we can handle spurious wakeup. */
746 if (timeout < MAX_SCHEDULE_TIMEOUT) {
747 now = getticks();
748 __insn_barrier();
749 starttime = now;
750 deadline = starttime + timeout;
751 }
752
753 /* Wait until the signalled bit is set. */
754 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
755 /*
756 * If there's a timeout and we've passed the deadline,
757 * give up.
758 */
759 if (timeout < MAX_SCHEDULE_TIMEOUT) {
760 now = getticks();
761 __insn_barrier();
762 if (deadline <= now)
763 break;
764 }
765 if (intr) {
766 if (timeout < MAX_SCHEDULE_TIMEOUT) {
767 ret = -cv_timedwait_sig(&fence->f_cv, lock,
768 deadline - now);
769 } else {
770 ret = -cv_wait_sig(&fence->f_cv, lock);
771 }
772 } else {
773 if (timeout < MAX_SCHEDULE_TIMEOUT) {
774 ret = -cv_timedwait(&fence->f_cv, lock,
775 deadline - now);
776 } else {
777 cv_wait(&fence->f_cv, lock);
778 ret = 0;
779 }
780 }
781 /* If the wait failed, give up. */
782 if (ret) {
783 if (ret == -ERESTART)
784 ret = -ERESTARTSYS;
785 break;
786 }
787 }
788
789 out:
790 /* All done. Release the lock. */
791 spin_unlock(fence->lock);
792
793 /* If cv_timedwait gave up, return 0 meaning timeout. */
794 if (ret == -EWOULDBLOCK) {
795 /* Only cv_timedwait and cv_timedwait_sig can return this. */
796 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
797 return 0;
798 }
799
800 /* If there was a timeout and the deadline passed, return 0. */
801 if (timeout < MAX_SCHEDULE_TIMEOUT) {
802 if (deadline <= now)
803 return 0;
804 }
805
806 /* If we were interrupted, return -ERESTARTSYS. */
807 if (ret == -EINTR || ret == -ERESTART)
808 return -ERESTARTSYS;
809
810 /* If there was any other kind of error, fail. */
811 if (ret)
812 return ret;
813
814 /*
815 * Success! Return the number of ticks left, at least 1, or 1
816 * if no timeout.
817 */
818 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
819 }
820