linux_dma_fence.c revision 1.10 1 /* $NetBSD: linux_dma_fence.c,v 1.10 2021/12/19 10:50:03 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.10 2021/12/19 10:50:03 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 /*
47 * linux_dma_fence_trace
48 *
49 * True if we print DMA_FENCE_TRACE messages, false if not. These
50 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
51 * in boothowto.
52 */
53 int linux_dma_fence_trace = 0;
54
55 /*
56 * dma_fence_referenced_p(fence)
57 *
58 * True if fence has a positive reference count. True after
59 * dma_fence_init; after the last dma_fence_put, this becomes
60 * false.
61 */
62 static inline bool __diagused
63 dma_fence_referenced_p(struct dma_fence *fence)
64 {
65
66 return kref_referenced_p(&fence->refcount);
67 }
68
69 /*
70 * dma_fence_init(fence, ops, lock, context, seqno)
71 *
72 * Initialize fence. Caller should call dma_fence_destroy when
73 * done, after all references have been released.
74 */
75 void
76 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
77 spinlock_t *lock, unsigned context, unsigned seqno)
78 {
79
80 kref_init(&fence->refcount);
81 fence->lock = lock;
82 fence->flags = 0;
83 fence->context = context;
84 fence->seqno = seqno;
85 fence->ops = ops;
86 TAILQ_INIT(&fence->f_callbacks);
87 cv_init(&fence->f_cv, "dmafence");
88 }
89
90 /*
91 * dma_fence_destroy(fence)
92 *
93 * Clean up memory initialized with dma_fence_init. This is meant
94 * to be used after a fence release callback.
95 */
96 void
97 dma_fence_destroy(struct dma_fence *fence)
98 {
99
100 KASSERT(!dma_fence_referenced_p(fence));
101
102 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
103 cv_destroy(&fence->f_cv);
104 }
105
106 static void
107 dma_fence_free_cb(struct rcu_head *rcu)
108 {
109 struct dma_fence *fence = container_of(rcu, struct dma_fence, f_rcu);
110
111 KASSERT(!dma_fence_referenced_p(fence));
112
113 dma_fence_destroy(fence);
114 kfree(fence);
115 }
116
117 /*
118 * dma_fence_free(fence)
119 *
120 * Schedule fence to be destroyed and then freed with kfree after
121 * any pending RCU read sections on all CPUs have completed.
122 * Caller must guarantee all references have been released. This
123 * is meant to be used after a fence release callback.
124 *
125 * NOTE: Callers assume kfree will be used. We don't even use
126 * kmalloc to allocate these -- caller is expected to allocate
127 * memory with kmalloc to be initialized with dma_fence_init.
128 */
129 void
130 dma_fence_free(struct dma_fence *fence)
131 {
132
133 KASSERT(!dma_fence_referenced_p(fence));
134
135 call_rcu(&fence->f_rcu, &dma_fence_free_cb);
136 }
137
138 /*
139 * dma_fence_context_alloc(n)
140 *
141 * Return the first of a contiguous sequence of unique
142 * identifiers, at least until the system wraps around.
143 */
144 unsigned
145 dma_fence_context_alloc(unsigned n)
146 {
147 static volatile unsigned next_context = 0;
148
149 return atomic_add_int_nv(&next_context, n) - n;
150 }
151
152 /*
153 * dma_fence_is_later(a, b)
154 *
155 * True if the sequence number of fence a is later than the
156 * sequence number of fence b. Since sequence numbers wrap
157 * around, we define this to mean that the sequence number of
158 * fence a is no more than INT_MAX past the sequence number of
159 * fence b.
160 *
161 * The two fences must have the same context.
162 */
163 bool
164 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
165 {
166
167 KASSERTMSG(a->context == b->context, "incommensurate fences"
168 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
169
170 return a->seqno - b->seqno < INT_MAX;
171 }
172
173 /*
174 * dma_fence_get_stub()
175 *
176 * Return a dma fence that is always already signalled.
177 */
178 struct dma_fence *
179 dma_fence_get_stub(void)
180 {
181 /*
182 * XXX This probably isn't good enough -- caller may try
183 * operations on this that require the lock, which will
184 * require us to create and destroy the lock on module
185 * load/unload.
186 */
187 static struct dma_fence fence = {
188 .refcount = {1}, /* always referenced */
189 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
190 };
191
192 return dma_fence_get(&fence);
193 }
194
195 /*
196 * dma_fence_get(fence)
197 *
198 * Acquire a reference to fence. The fence must not be being
199 * destroyed. Return the fence.
200 */
201 struct dma_fence *
202 dma_fence_get(struct dma_fence *fence)
203 {
204
205 if (fence)
206 kref_get(&fence->refcount);
207 return fence;
208 }
209
210 /*
211 * dma_fence_get_rcu(fence)
212 *
213 * Attempt to acquire a reference to a fence that may be about to
214 * be destroyed, during a read section. Return the fence on
215 * success, or NULL on failure.
216 */
217 struct dma_fence *
218 dma_fence_get_rcu(struct dma_fence *fence)
219 {
220
221 __insn_barrier();
222 if (!kref_get_unless_zero(&fence->refcount))
223 return NULL;
224 return fence;
225 }
226
227 /*
228 * dma_fence_get_rcu_safe(fencep)
229 *
230 * Attempt to acquire a reference to the fence *fencep, which may
231 * be about to be destroyed, during a read section. If the value
232 * of *fencep changes after we read *fencep but before we
233 * increment its reference count, retry. Return *fencep on
234 * success, or NULL on failure.
235 */
236 struct dma_fence *
237 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
238 {
239 struct dma_fence *fence, *fence0;
240
241 retry:
242 fence = *fencep;
243
244 /* Load fence only once. */
245 __insn_barrier();
246
247 /* If there's nothing there, give up. */
248 if (fence == NULL)
249 return NULL;
250
251 /* Make sure we don't load stale fence guts. */
252 membar_datadep_consumer();
253
254 /* Try to acquire a reference. If we can't, try again. */
255 if (!dma_fence_get_rcu(fence))
256 goto retry;
257
258 /*
259 * Confirm that it's still the same fence. If not, release it
260 * and retry.
261 */
262 fence0 = *fencep;
263 __insn_barrier();
264 if (fence != fence0) {
265 dma_fence_put(fence);
266 goto retry;
267 }
268
269 /* Success! */
270 return fence;
271 }
272
273 static void
274 dma_fence_release(struct kref *refcount)
275 {
276 struct dma_fence *fence = container_of(refcount, struct dma_fence,
277 refcount);
278
279 KASSERT(!dma_fence_referenced_p(fence));
280
281 if (fence->ops->release)
282 (*fence->ops->release)(fence);
283 else
284 dma_fence_free(fence);
285 }
286
287 /*
288 * dma_fence_put(fence)
289 *
290 * Release a reference to fence. If this was the last one, call
291 * the fence's release callback.
292 */
293 void
294 dma_fence_put(struct dma_fence *fence)
295 {
296
297 if (fence == NULL)
298 return;
299 KASSERT(dma_fence_referenced_p(fence));
300 kref_put(&fence->refcount, &dma_fence_release);
301 }
302
303 /*
304 * dma_fence_ensure_signal_enabled(fence)
305 *
306 * Internal subroutine. If the fence was already signalled,
307 * return -ENOENT. Otherwise, if the enable signalling callback
308 * has not been called yet, call it. If fails, signal the fence
309 * and return -ENOENT. If it succeeds, or if it had already been
310 * called, return zero to indicate success.
311 *
312 * Caller must hold the fence's lock.
313 */
314 static int
315 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
316 {
317
318 KASSERT(dma_fence_referenced_p(fence));
319 KASSERT(spin_is_locked(fence->lock));
320
321 /* If the fence was already signalled, fail with -ENOENT. */
322 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
323 return -ENOENT;
324
325 /*
326 * If the enable signaling callback has been called, success.
327 * Otherwise, set the bit indicating it.
328 */
329 if (test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &fence->flags))
330 return 0;
331
332 /* Otherwise, note that we've called it and call it. */
333 if (!(*fence->ops->enable_signaling)(fence)) {
334 /* If it failed, signal and return -ENOENT. */
335 dma_fence_signal_locked(fence);
336 return -ENOENT;
337 }
338
339 /* Success! */
340 return 0;
341 }
342
343 /*
344 * dma_fence_add_callback(fence, fcb, fn)
345 *
346 * If fence has been signalled, return -ENOENT. If the enable
347 * signalling callback hasn't been called yet, call it; if it
348 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
349 * fcb) when it is signalled, and return 0.
350 *
351 * The fence uses memory allocated by the caller in fcb from the
352 * time of dma_fence_add_callback either to the time of
353 * dma_fence_remove_callback, or just before calling fn.
354 */
355 int
356 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
357 dma_fence_func_t fn)
358 {
359 int ret;
360
361 KASSERT(dma_fence_referenced_p(fence));
362
363 /* Optimistically try to skip the lock if it's already signalled. */
364 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
365 ret = -ENOENT;
366 goto out0;
367 }
368
369 /* Acquire the lock. */
370 spin_lock(fence->lock);
371
372 /* Ensure signalling is enabled, or fail if we can't. */
373 ret = dma_fence_ensure_signal_enabled(fence);
374 if (ret)
375 goto out1;
376
377 /* Insert the callback. */
378 fcb->func = fn;
379 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
380 fcb->fcb_onqueue = true;
381
382 /* Release the lock and we're done. */
383 out1: spin_unlock(fence->lock);
384 out0: return ret;
385 }
386
387 /*
388 * dma_fence_remove_callback(fence, fcb)
389 *
390 * Remove the callback fcb from fence. Return true if it was
391 * removed from the list, or false if it had already run and so
392 * was no longer queued anyway. Caller must have already called
393 * dma_fence_add_callback(fence, fcb).
394 */
395 bool
396 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
397 {
398 bool onqueue;
399
400 KASSERT(dma_fence_referenced_p(fence));
401
402 spin_lock(fence->lock);
403 onqueue = fcb->fcb_onqueue;
404 if (onqueue) {
405 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
406 fcb->fcb_onqueue = false;
407 }
408 spin_unlock(fence->lock);
409
410 return onqueue;
411 }
412
413 /*
414 * dma_fence_enable_sw_signaling(fence)
415 *
416 * If it hasn't been called yet and the fence hasn't been
417 * signalled yet, call the fence's enable_sw_signaling callback.
418 * If when that happens, the callback indicates failure by
419 * returning false, signal the fence.
420 */
421 void
422 dma_fence_enable_sw_signaling(struct dma_fence *fence)
423 {
424
425 KASSERT(dma_fence_referenced_p(fence));
426
427 spin_lock(fence->lock);
428 (void)dma_fence_ensure_signal_enabled(fence);
429 spin_unlock(fence->lock);
430 }
431
432 /*
433 * dma_fence_is_signaled(fence)
434 *
435 * Test whether the fence has been signalled. If it has been
436 * signalled by dma_fence_signal(_locked), return true. If the
437 * signalled callback returns true indicating that some implicit
438 * external condition has changed, call the callbacks as if with
439 * dma_fence_signal.
440 */
441 bool
442 dma_fence_is_signaled(struct dma_fence *fence)
443 {
444 bool signaled;
445
446 KASSERT(dma_fence_referenced_p(fence));
447
448 spin_lock(fence->lock);
449 signaled = dma_fence_is_signaled_locked(fence);
450 spin_unlock(fence->lock);
451
452 return signaled;
453 }
454
455 /*
456 * dma_fence_is_signaled_locked(fence)
457 *
458 * Test whether the fence has been signalled. Like
459 * dma_fence_is_signaleed, but caller already holds the fence's lock.
460 */
461 bool
462 dma_fence_is_signaled_locked(struct dma_fence *fence)
463 {
464
465 KASSERT(dma_fence_referenced_p(fence));
466 KASSERT(spin_is_locked(fence->lock));
467
468 /* Check whether we already set the signalled bit. */
469 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
470 return true;
471
472 /* If there's a signalled callback, test it. */
473 if (fence->ops->signaled) {
474 if ((*fence->ops->signaled)(fence)) {
475 /*
476 * It's been signalled implicitly by some
477 * external phenomonen. Act as though someone
478 * has called dma_fence_signal.
479 */
480 dma_fence_signal_locked(fence);
481 return true;
482 }
483 }
484
485 return false;
486 }
487
488 /*
489 * dma_fence_set_error(fence, error)
490 *
491 * Set an error code prior to dma_fence_signal for use by a
492 * waiter to learn about success or failure of the fence.
493 */
494 void
495 dma_fence_set_error(struct dma_fence *fence, int error)
496 {
497
498 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
499 KASSERTMSG(error >= -ELAST, "%d", error);
500 KASSERTMSG(error < 0, "%d", error);
501
502 fence->error = error;
503 }
504
505 /*
506 * dma_fence_get_status(fence)
507 *
508 * Return 0 if fence has yet to be signalled, 1 if it has been
509 * signalled without error, or negative error code if
510 * dma_fence_set_error was used.
511 */
512 int
513 dma_fence_get_status(struct dma_fence *fence)
514 {
515 int ret;
516
517 spin_lock(fence->lock);
518 if (!dma_fence_is_signaled_locked(fence)) {
519 ret = 0;
520 } else if (fence->error) {
521 ret = fence->error;
522 KASSERTMSG(ret < 0, "%d", ret);
523 } else {
524 ret = 1;
525 }
526 spin_unlock(fence->lock);
527
528 return ret;
529 }
530
531 /*
532 * dma_fence_signal(fence)
533 *
534 * Signal the fence. If it has already been signalled, return
535 * -EINVAL. If it has not been signalled, call the enable
536 * signalling callback if it hasn't been called yet, and remove
537 * each registered callback from the queue and call it; then
538 * return 0.
539 */
540 int
541 dma_fence_signal(struct dma_fence *fence)
542 {
543 int ret;
544
545 KASSERT(dma_fence_referenced_p(fence));
546
547 spin_lock(fence->lock);
548 ret = dma_fence_signal_locked(fence);
549 spin_unlock(fence->lock);
550
551 return ret;
552 }
553
554 /*
555 * dma_fence_signal_locked(fence)
556 *
557 * Signal the fence. Like dma_fence_signal, but caller already
558 * holds the fence's lock.
559 */
560 int
561 dma_fence_signal_locked(struct dma_fence *fence)
562 {
563 struct dma_fence_cb *fcb, *next;
564
565 KASSERT(dma_fence_referenced_p(fence));
566 KASSERT(spin_is_locked(fence->lock));
567
568 /* If it's been signalled, fail; otherwise set the signalled bit. */
569 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
570 return -EINVAL;
571
572 /* Wake waiters. */
573 cv_broadcast(&fence->f_cv);
574
575 /* Remove and call the callbacks. */
576 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
577 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
578 fcb->fcb_onqueue = false;
579 (*fcb->func)(fence, fcb);
580 }
581
582 /* Success! */
583 return 0;
584 }
585
586 struct wait_any {
587 struct dma_fence_cb fcb;
588 struct wait_any1 {
589 kmutex_t lock;
590 kcondvar_t cv;
591 bool done;
592 } *common;
593 };
594
595 static void
596 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
597 {
598 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
599
600 KASSERT(dma_fence_referenced_p(fence));
601
602 mutex_enter(&cb->common->lock);
603 cb->common->done = true;
604 cv_broadcast(&cb->common->cv);
605 mutex_exit(&cb->common->lock);
606 }
607
608 /*
609 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout)
610 *
611 * Wait for any of fences[0], fences[1], fences[2], ...,
612 * fences[nfences-1] to be signaled.
613 */
614 long
615 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
616 bool intr, long timeout)
617 {
618 struct wait_any1 common;
619 struct wait_any *cb;
620 uint32_t i, j;
621 int start, end;
622 long ret = 0;
623
624 /* Allocate an array of callback records. */
625 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
626 if (cb == NULL) {
627 ret = -ENOMEM;
628 goto out0;
629 }
630
631 /* Initialize a mutex and condvar for the common wait. */
632 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
633 cv_init(&common.cv, "fence");
634 common.done = false;
635
636 /* Add a callback to each of the fences, or stop here if we can't. */
637 for (i = 0; i < nfences; i++) {
638 cb[i].common = &common;
639 KASSERT(dma_fence_referenced_p(fences[i]));
640 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
641 &wait_any_cb);
642 if (ret)
643 goto out1;
644 }
645
646 /*
647 * Test whether any of the fences has been signalled. If they
648 * have, stop here. If the haven't, we are guaranteed to be
649 * notified by one of the callbacks when they have.
650 */
651 for (j = 0; j < nfences; j++) {
652 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags))
653 goto out1;
654 }
655
656 /*
657 * None of them was ready immediately. Wait for one of the
658 * callbacks to notify us when it is done.
659 */
660 mutex_enter(&common.lock);
661 while (timeout > 0 && !common.done) {
662 start = getticks();
663 __insn_barrier();
664 if (intr) {
665 if (timeout != MAX_SCHEDULE_TIMEOUT) {
666 ret = -cv_timedwait_sig(&common.cv,
667 &common.lock, MIN(timeout, /* paranoia */
668 MAX_SCHEDULE_TIMEOUT));
669 } else {
670 ret = -cv_wait_sig(&common.cv, &common.lock);
671 }
672 } else {
673 if (timeout != MAX_SCHEDULE_TIMEOUT) {
674 ret = -cv_timedwait(&common.cv,
675 &common.lock, MIN(timeout, /* paranoia */
676 MAX_SCHEDULE_TIMEOUT));
677 } else {
678 cv_wait(&common.cv, &common.lock);
679 ret = 0;
680 }
681 }
682 end = getticks();
683 __insn_barrier();
684 if (ret) {
685 if (ret == -ERESTART)
686 ret = -ERESTARTSYS;
687 break;
688 }
689 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
690 }
691 mutex_exit(&common.lock);
692
693 /*
694 * Massage the return code: if we were interrupted, return
695 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
696 * return the remaining time.
697 */
698 if (ret < 0) {
699 if (ret == -EINTR || ret == -ERESTART)
700 ret = -ERESTARTSYS;
701 if (ret == -EWOULDBLOCK)
702 ret = 0;
703 } else {
704 KASSERT(ret == 0);
705 ret = timeout;
706 }
707
708 out1: while (i --> 0)
709 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
710 cv_destroy(&common.cv);
711 mutex_destroy(&common.lock);
712 kfree(cb);
713 out0: return ret;
714 }
715
716 /*
717 * dma_fence_wait_timeout(fence, intr, timeout)
718 *
719 * Wait until fence is signalled; or until interrupt, if intr is
720 * true; or until timeout, if positive. Return -ERESTARTSYS if
721 * interrupted, negative error code on any other error, zero on
722 * timeout, or positive number of ticks remaining if the fence is
723 * signalled before the timeout. Works by calling the fence wait
724 * callback.
725 *
726 * The timeout must be nonnegative and less than
727 * MAX_SCHEDULE_TIMEOUT.
728 */
729 long
730 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
731 {
732
733 KASSERT(dma_fence_referenced_p(fence));
734 KASSERT(timeout >= 0);
735 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
736
737 return (*fence->ops->wait)(fence, intr, timeout);
738 }
739
740 /*
741 * dma_fence_wait(fence, intr)
742 *
743 * Wait until fence is signalled; or until interrupt, if intr is
744 * true. Return -ERESTARTSYS if interrupted, negative error code
745 * on any other error, zero on sucess. Works by calling the fence
746 * wait callback with MAX_SCHEDULE_TIMEOUT.
747 */
748 long
749 dma_fence_wait(struct dma_fence *fence, bool intr)
750 {
751 long ret;
752
753 KASSERT(dma_fence_referenced_p(fence));
754
755 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
756 KASSERT(ret != 0);
757
758 return (ret < 0 ? ret : 0);
759 }
760
761 /*
762 * dma_fence_default_wait(fence, intr, timeout)
763 *
764 * Default implementation of fence wait callback using a condition
765 * variable. If the fence is already signalled, return timeout,
766 * or 1 if no timeout. If the enable signalling callback hasn't
767 * been called, call it, and if it fails, act as if the fence had
768 * been signalled. Otherwise, wait on the internal condvar. If
769 * timeout is MAX_SCHEDULE_TIMEOUT, treat it as no timeout.
770 */
771 long
772 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
773 {
774 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
775 kmutex_t *lock = &fence->lock->sl_lock;
776 long ret = 0;
777
778 KASSERT(dma_fence_referenced_p(fence));
779 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
780 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
781
782 /* Optimistically try to skip the lock if it's already signalled. */
783 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
784 return (timeout < MAX_SCHEDULE_TIMEOUT ? timeout : 1);
785
786 /* Acquire the lock. */
787 spin_lock(fence->lock);
788
789 /* Ensure signalling is enabled, or fail if we can't. */
790 ret = dma_fence_ensure_signal_enabled(fence);
791 if (ret)
792 goto out;
793
794 /* Find out what our deadline is so we can handle spurious wakeup. */
795 if (timeout < MAX_SCHEDULE_TIMEOUT) {
796 now = getticks();
797 __insn_barrier();
798 starttime = now;
799 deadline = starttime + timeout;
800 }
801
802 /* Wait until the signalled bit is set. */
803 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
804 /*
805 * If there's a timeout and we've passed the deadline,
806 * give up.
807 */
808 if (timeout < MAX_SCHEDULE_TIMEOUT) {
809 now = getticks();
810 __insn_barrier();
811 if (deadline <= now)
812 break;
813 }
814 if (intr) {
815 if (timeout < MAX_SCHEDULE_TIMEOUT) {
816 ret = -cv_timedwait_sig(&fence->f_cv, lock,
817 deadline - now);
818 } else {
819 ret = -cv_wait_sig(&fence->f_cv, lock);
820 }
821 } else {
822 if (timeout < MAX_SCHEDULE_TIMEOUT) {
823 ret = -cv_timedwait(&fence->f_cv, lock,
824 deadline - now);
825 } else {
826 cv_wait(&fence->f_cv, lock);
827 ret = 0;
828 }
829 }
830 /* If the wait failed, give up. */
831 if (ret) {
832 if (ret == -ERESTART)
833 ret = -ERESTARTSYS;
834 break;
835 }
836 }
837
838 out:
839 /* All done. Release the lock. */
840 spin_unlock(fence->lock);
841
842 /* If cv_timedwait gave up, return 0 meaning timeout. */
843 if (ret == -EWOULDBLOCK) {
844 /* Only cv_timedwait and cv_timedwait_sig can return this. */
845 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
846 return 0;
847 }
848
849 /* If there was a timeout and the deadline passed, return 0. */
850 if (timeout < MAX_SCHEDULE_TIMEOUT) {
851 if (deadline <= now)
852 return 0;
853 }
854
855 /* If we were interrupted, return -ERESTARTSYS. */
856 if (ret == -EINTR || ret == -ERESTART)
857 return -ERESTARTSYS;
858
859 /* If there was any other kind of error, fail. */
860 if (ret)
861 return ret;
862
863 /*
864 * Success! Return the number of ticks left, at least 1, or 1
865 * if no timeout.
866 */
867 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
868 }
869