linux_dma_fence.c revision 1.20 1 /* $NetBSD: linux_dma_fence.c,v 1.20 2021/12/19 12:07:38 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.20 2021/12/19 12:07:38 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 /*
47 * linux_dma_fence_trace
48 *
49 * True if we print DMA_FENCE_TRACE messages, false if not. These
50 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
51 * in boothowto.
52 */
53 int linux_dma_fence_trace = 0;
54
55 /*
56 * dma_fence_referenced_p(fence)
57 *
58 * True if fence has a positive reference count. True after
59 * dma_fence_init; after the last dma_fence_put, this becomes
60 * false.
61 */
62 static inline bool __diagused
63 dma_fence_referenced_p(struct dma_fence *fence)
64 {
65
66 return kref_referenced_p(&fence->refcount);
67 }
68
69 /*
70 * dma_fence_init(fence, ops, lock, context, seqno)
71 *
72 * Initialize fence. Caller should call dma_fence_destroy when
73 * done, after all references have been released.
74 */
75 void
76 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
77 spinlock_t *lock, unsigned context, unsigned seqno)
78 {
79
80 kref_init(&fence->refcount);
81 fence->lock = lock;
82 fence->flags = 0;
83 fence->context = context;
84 fence->seqno = seqno;
85 fence->ops = ops;
86 fence->error = 0;
87 TAILQ_INIT(&fence->f_callbacks);
88 cv_init(&fence->f_cv, "dmafence");
89 }
90
91 /*
92 * dma_fence_reset(fence)
93 *
94 * Ensure fence is in a quiescent state. Allowed either for newly
95 * initialized or freed fences, but not fences with more than one
96 * reference.
97 *
98 * XXX extension to Linux API
99 */
100 void
101 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
102 spinlock_t *lock, unsigned context, unsigned seqno)
103 {
104
105 KASSERT(kref_read(&fence->refcount) == 0 ||
106 kref_read(&fence->refcount) == 1);
107 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
108 KASSERT(fence->lock == lock);
109 KASSERT(fence->ops == ops);
110
111 kref_init(&fence->refcount);
112 fence->flags = 0;
113 fence->context = context;
114 fence->seqno = seqno;
115 fence->error = 0;
116 }
117
118 /*
119 * dma_fence_destroy(fence)
120 *
121 * Clean up memory initialized with dma_fence_init. This is meant
122 * to be used after a fence release callback.
123 *
124 * XXX extension to Linux API
125 */
126 void
127 dma_fence_destroy(struct dma_fence *fence)
128 {
129
130 KASSERT(!dma_fence_referenced_p(fence));
131
132 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
133 cv_destroy(&fence->f_cv);
134 }
135
136 static void
137 dma_fence_free_cb(struct rcu_head *rcu)
138 {
139 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
140
141 KASSERT(!dma_fence_referenced_p(fence));
142
143 dma_fence_destroy(fence);
144 kfree(fence);
145 }
146
147 /*
148 * dma_fence_free(fence)
149 *
150 * Schedule fence to be destroyed and then freed with kfree after
151 * any pending RCU read sections on all CPUs have completed.
152 * Caller must guarantee all references have been released. This
153 * is meant to be used after a fence release callback.
154 *
155 * NOTE: Callers assume kfree will be used. We don't even use
156 * kmalloc to allocate these -- caller is expected to allocate
157 * memory with kmalloc to be initialized with dma_fence_init.
158 */
159 void
160 dma_fence_free(struct dma_fence *fence)
161 {
162
163 KASSERT(!dma_fence_referenced_p(fence));
164
165 call_rcu(&fence->rcu, &dma_fence_free_cb);
166 }
167
168 /*
169 * dma_fence_context_alloc(n)
170 *
171 * Return the first of a contiguous sequence of unique
172 * identifiers, at least until the system wraps around.
173 */
174 unsigned
175 dma_fence_context_alloc(unsigned n)
176 {
177 static volatile unsigned next_context = 0;
178
179 return atomic_add_int_nv(&next_context, n) - n;
180 }
181
182 /*
183 * dma_fence_is_later(a, b)
184 *
185 * True if the sequence number of fence a is later than the
186 * sequence number of fence b. Since sequence numbers wrap
187 * around, we define this to mean that the sequence number of
188 * fence a is no more than INT_MAX past the sequence number of
189 * fence b.
190 *
191 * The two fences must have the same context.
192 */
193 bool
194 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
195 {
196
197 KASSERTMSG(a->context == b->context, "incommensurate fences"
198 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
199
200 return a->seqno - b->seqno < INT_MAX;
201 }
202
203 /*
204 * dma_fence_get_stub()
205 *
206 * Return a dma fence that is always already signalled.
207 */
208 struct dma_fence *
209 dma_fence_get_stub(void)
210 {
211 /*
212 * XXX This probably isn't good enough -- caller may try
213 * operations on this that require the lock, which will
214 * require us to create and destroy the lock on module
215 * load/unload.
216 */
217 static struct dma_fence fence = {
218 .refcount = {1}, /* always referenced */
219 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
220 };
221
222 return dma_fence_get(&fence);
223 }
224
225 /*
226 * dma_fence_get(fence)
227 *
228 * Acquire a reference to fence. The fence must not be being
229 * destroyed. Return the fence.
230 */
231 struct dma_fence *
232 dma_fence_get(struct dma_fence *fence)
233 {
234
235 if (fence)
236 kref_get(&fence->refcount);
237 return fence;
238 }
239
240 /*
241 * dma_fence_get_rcu(fence)
242 *
243 * Attempt to acquire a reference to a fence that may be about to
244 * be destroyed, during a read section. Return the fence on
245 * success, or NULL on failure.
246 */
247 struct dma_fence *
248 dma_fence_get_rcu(struct dma_fence *fence)
249 {
250
251 __insn_barrier();
252 if (!kref_get_unless_zero(&fence->refcount))
253 return NULL;
254 return fence;
255 }
256
257 /*
258 * dma_fence_get_rcu_safe(fencep)
259 *
260 * Attempt to acquire a reference to the fence *fencep, which may
261 * be about to be destroyed, during a read section. If the value
262 * of *fencep changes after we read *fencep but before we
263 * increment its reference count, retry. Return *fencep on
264 * success, or NULL on failure.
265 */
266 struct dma_fence *
267 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
268 {
269 struct dma_fence *fence, *fence0;
270
271 retry:
272 fence = *fencep;
273
274 /* Load fence only once. */
275 __insn_barrier();
276
277 /* If there's nothing there, give up. */
278 if (fence == NULL)
279 return NULL;
280
281 /* Make sure we don't load stale fence guts. */
282 membar_datadep_consumer();
283
284 /* Try to acquire a reference. If we can't, try again. */
285 if (!dma_fence_get_rcu(fence))
286 goto retry;
287
288 /*
289 * Confirm that it's still the same fence. If not, release it
290 * and retry.
291 */
292 fence0 = *fencep;
293 __insn_barrier();
294 if (fence != fence0) {
295 dma_fence_put(fence);
296 goto retry;
297 }
298
299 /* Success! */
300 return fence;
301 }
302
303 static void
304 dma_fence_release(struct kref *refcount)
305 {
306 struct dma_fence *fence = container_of(refcount, struct dma_fence,
307 refcount);
308
309 KASSERT(!dma_fence_referenced_p(fence));
310
311 if (fence->ops->release)
312 (*fence->ops->release)(fence);
313 else
314 dma_fence_free(fence);
315 }
316
317 /*
318 * dma_fence_put(fence)
319 *
320 * Release a reference to fence. If this was the last one, call
321 * the fence's release callback.
322 */
323 void
324 dma_fence_put(struct dma_fence *fence)
325 {
326
327 if (fence == NULL)
328 return;
329 KASSERT(dma_fence_referenced_p(fence));
330 kref_put(&fence->refcount, &dma_fence_release);
331 }
332
333 /*
334 * dma_fence_ensure_signal_enabled(fence)
335 *
336 * Internal subroutine. If the fence was already signalled,
337 * return -ENOENT. Otherwise, if the enable signalling callback
338 * has not been called yet, call it. If fails, signal the fence
339 * and return -ENOENT. If it succeeds, or if it had already been
340 * called, return zero to indicate success.
341 *
342 * Caller must hold the fence's lock.
343 */
344 static int
345 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
346 {
347 bool already_enabled;
348
349 KASSERT(dma_fence_referenced_p(fence));
350 KASSERT(spin_is_locked(fence->lock));
351
352 /* Determine whether signalling was enabled, and enable it. */
353 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
354 &fence->flags);
355
356 /* If the fence was already signalled, fail with -ENOENT. */
357 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
358 return -ENOENT;
359
360 /*
361 * Otherwise, if it wasn't enabled yet, try to enable
362 * signalling, or fail if the fence doesn't support that.
363 */
364 if (!already_enabled) {
365 if (fence->ops->enable_signaling == NULL)
366 return -ENOENT;
367 if (!(*fence->ops->enable_signaling)(fence)) {
368 /* If it failed, signal and return -ENOENT. */
369 dma_fence_signal_locked(fence);
370 return -ENOENT;
371 }
372 }
373
374 /* Success! */
375 return 0;
376 }
377
378 /*
379 * dma_fence_add_callback(fence, fcb, fn)
380 *
381 * If fence has been signalled, return -ENOENT. If the enable
382 * signalling callback hasn't been called yet, call it; if it
383 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
384 * fcb) when it is signalled, and return 0.
385 *
386 * The fence uses memory allocated by the caller in fcb from the
387 * time of dma_fence_add_callback either to the time of
388 * dma_fence_remove_callback, or just before calling fn.
389 */
390 int
391 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
392 dma_fence_func_t fn)
393 {
394 int ret;
395
396 KASSERT(dma_fence_referenced_p(fence));
397
398 /* Optimistically try to skip the lock if it's already signalled. */
399 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
400 ret = -ENOENT;
401 goto out0;
402 }
403
404 /* Acquire the lock. */
405 spin_lock(fence->lock);
406
407 /* Ensure signalling is enabled, or fail if we can't. */
408 ret = dma_fence_ensure_signal_enabled(fence);
409 if (ret)
410 goto out1;
411
412 /* Insert the callback. */
413 fcb->func = fn;
414 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
415 fcb->fcb_onqueue = true;
416
417 /* Release the lock and we're done. */
418 out1: spin_unlock(fence->lock);
419 out0: return ret;
420 }
421
422 /*
423 * dma_fence_remove_callback(fence, fcb)
424 *
425 * Remove the callback fcb from fence. Return true if it was
426 * removed from the list, or false if it had already run and so
427 * was no longer queued anyway. Caller must have already called
428 * dma_fence_add_callback(fence, fcb).
429 */
430 bool
431 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
432 {
433 bool onqueue;
434
435 KASSERT(dma_fence_referenced_p(fence));
436
437 spin_lock(fence->lock);
438 onqueue = fcb->fcb_onqueue;
439 if (onqueue) {
440 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
441 fcb->fcb_onqueue = false;
442 }
443 spin_unlock(fence->lock);
444
445 return onqueue;
446 }
447
448 /*
449 * dma_fence_enable_sw_signaling(fence)
450 *
451 * If it hasn't been called yet and the fence hasn't been
452 * signalled yet, call the fence's enable_sw_signaling callback.
453 * If when that happens, the callback indicates failure by
454 * returning false, signal the fence.
455 */
456 void
457 dma_fence_enable_sw_signaling(struct dma_fence *fence)
458 {
459
460 KASSERT(dma_fence_referenced_p(fence));
461
462 spin_lock(fence->lock);
463 (void)dma_fence_ensure_signal_enabled(fence);
464 spin_unlock(fence->lock);
465 }
466
467 /*
468 * dma_fence_is_signaled(fence)
469 *
470 * Test whether the fence has been signalled. If it has been
471 * signalled by dma_fence_signal(_locked), return true. If the
472 * signalled callback returns true indicating that some implicit
473 * external condition has changed, call the callbacks as if with
474 * dma_fence_signal.
475 */
476 bool
477 dma_fence_is_signaled(struct dma_fence *fence)
478 {
479 bool signaled;
480
481 KASSERT(dma_fence_referenced_p(fence));
482
483 spin_lock(fence->lock);
484 signaled = dma_fence_is_signaled_locked(fence);
485 spin_unlock(fence->lock);
486
487 return signaled;
488 }
489
490 /*
491 * dma_fence_is_signaled_locked(fence)
492 *
493 * Test whether the fence has been signalled. Like
494 * dma_fence_is_signaleed, but caller already holds the fence's lock.
495 */
496 bool
497 dma_fence_is_signaled_locked(struct dma_fence *fence)
498 {
499
500 KASSERT(dma_fence_referenced_p(fence));
501 KASSERT(spin_is_locked(fence->lock));
502
503 /* Check whether we already set the signalled bit. */
504 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
505 return true;
506
507 /* If there's a signalled callback, test it. */
508 if (fence->ops->signaled) {
509 if ((*fence->ops->signaled)(fence)) {
510 /*
511 * It's been signalled implicitly by some
512 * external phenomonen. Act as though someone
513 * has called dma_fence_signal.
514 */
515 dma_fence_signal_locked(fence);
516 return true;
517 }
518 }
519
520 return false;
521 }
522
523 /*
524 * dma_fence_set_error(fence, error)
525 *
526 * Set an error code prior to dma_fence_signal for use by a
527 * waiter to learn about success or failure of the fence.
528 */
529 void
530 dma_fence_set_error(struct dma_fence *fence, int error)
531 {
532
533 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
534 KASSERTMSG(error >= -ELAST, "%d", error);
535 KASSERTMSG(error < 0, "%d", error);
536
537 fence->error = error;
538 }
539
540 /*
541 * dma_fence_get_status(fence)
542 *
543 * Return 0 if fence has yet to be signalled, 1 if it has been
544 * signalled without error, or negative error code if
545 * dma_fence_set_error was used.
546 */
547 int
548 dma_fence_get_status(struct dma_fence *fence)
549 {
550 int ret;
551
552 spin_lock(fence->lock);
553 if (!dma_fence_is_signaled_locked(fence)) {
554 ret = 0;
555 } else if (fence->error) {
556 ret = fence->error;
557 KASSERTMSG(ret < 0, "%d", ret);
558 } else {
559 ret = 1;
560 }
561 spin_unlock(fence->lock);
562
563 return ret;
564 }
565
566 /*
567 * dma_fence_signal(fence)
568 *
569 * Signal the fence. If it has already been signalled, return
570 * -EINVAL. If it has not been signalled, call the enable
571 * signalling callback if it hasn't been called yet, and remove
572 * each registered callback from the queue and call it; then
573 * return 0.
574 */
575 int
576 dma_fence_signal(struct dma_fence *fence)
577 {
578 int ret;
579
580 KASSERT(dma_fence_referenced_p(fence));
581
582 spin_lock(fence->lock);
583 ret = dma_fence_signal_locked(fence);
584 spin_unlock(fence->lock);
585
586 return ret;
587 }
588
589 /*
590 * dma_fence_signal_locked(fence)
591 *
592 * Signal the fence. Like dma_fence_signal, but caller already
593 * holds the fence's lock.
594 */
595 int
596 dma_fence_signal_locked(struct dma_fence *fence)
597 {
598 struct dma_fence_cb *fcb, *next;
599
600 KASSERT(dma_fence_referenced_p(fence));
601 KASSERT(spin_is_locked(fence->lock));
602
603 /* If it's been signalled, fail; otherwise set the signalled bit. */
604 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
605 return -EINVAL;
606
607 /* Wake waiters. */
608 cv_broadcast(&fence->f_cv);
609
610 /* Remove and call the callbacks. */
611 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
612 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
613 fcb->fcb_onqueue = false;
614 (*fcb->func)(fence, fcb);
615 }
616
617 /* Success! */
618 return 0;
619 }
620
621 struct wait_any {
622 struct dma_fence_cb fcb;
623 struct wait_any1 {
624 kmutex_t lock;
625 kcondvar_t cv;
626 bool done;
627 uint32_t *ip;
628 struct wait_any *cb;
629 } *common;
630 };
631
632 static void
633 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
634 {
635 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
636
637 KASSERT(dma_fence_referenced_p(fence));
638
639 mutex_enter(&cb->common->lock);
640 cb->common->done = true;
641 if (cb->common->ip)
642 *cb->common->ip = cb - cb->common->cb;
643 cv_broadcast(&cb->common->cv);
644 mutex_exit(&cb->common->lock);
645 }
646
647 /*
648 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
649 *
650 * Wait for any of fences[0], fences[1], fences[2], ...,
651 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
652 * to the index of the first one.
653 */
654 long
655 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
656 bool intr, long timeout, uint32_t *ip)
657 {
658 struct wait_any1 common;
659 struct wait_any *cb;
660 uint32_t i, j;
661 int start, end;
662 long ret = 0;
663
664 /* Allocate an array of callback records. */
665 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
666 if (cb == NULL) {
667 ret = -ENOMEM;
668 goto out0;
669 }
670
671 /* Initialize a mutex and condvar for the common wait. */
672 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
673 cv_init(&common.cv, "fence");
674 common.done = false;
675 common.ip = ip;
676 common.cb = cb;
677
678 /* Add a callback to each of the fences, or stop here if we can't. */
679 for (i = 0; i < nfences; i++) {
680 cb[i].common = &common;
681 KASSERT(dma_fence_referenced_p(fences[i]));
682 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
683 &wait_any_cb);
684 if (ret)
685 goto out1;
686 }
687
688 /*
689 * Test whether any of the fences has been signalled. If they
690 * have, stop here. If the haven't, we are guaranteed to be
691 * notified by one of the callbacks when they have.
692 */
693 for (j = 0; j < nfences; j++) {
694 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags)) {
695 if (ip)
696 *ip = j;
697 ret = 0;
698 goto out1;
699 }
700 }
701
702 /*
703 * None of them was ready immediately. Wait for one of the
704 * callbacks to notify us when it is done.
705 */
706 mutex_enter(&common.lock);
707 while (timeout > 0 && !common.done) {
708 start = getticks();
709 __insn_barrier();
710 if (intr) {
711 if (timeout != MAX_SCHEDULE_TIMEOUT) {
712 ret = -cv_timedwait_sig(&common.cv,
713 &common.lock, MIN(timeout, /* paranoia */
714 MAX_SCHEDULE_TIMEOUT));
715 } else {
716 ret = -cv_wait_sig(&common.cv, &common.lock);
717 }
718 } else {
719 if (timeout != MAX_SCHEDULE_TIMEOUT) {
720 ret = -cv_timedwait(&common.cv,
721 &common.lock, MIN(timeout, /* paranoia */
722 MAX_SCHEDULE_TIMEOUT));
723 } else {
724 cv_wait(&common.cv, &common.lock);
725 ret = 0;
726 }
727 }
728 end = getticks();
729 __insn_barrier();
730 if (ret) {
731 if (ret == -ERESTART)
732 ret = -ERESTARTSYS;
733 break;
734 }
735 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
736 }
737 mutex_exit(&common.lock);
738
739 /*
740 * Massage the return code: if we were interrupted, return
741 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
742 * return the remaining time.
743 */
744 if (ret < 0) {
745 if (ret == -EINTR || ret == -ERESTART)
746 ret = -ERESTARTSYS;
747 if (ret == -EWOULDBLOCK)
748 ret = 0;
749 } else {
750 KASSERT(ret == 0);
751 ret = timeout;
752 }
753
754 out1: while (i --> 0)
755 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
756 cv_destroy(&common.cv);
757 mutex_destroy(&common.lock);
758 kfree(cb);
759 out0: return ret;
760 }
761
762 /*
763 * dma_fence_wait_timeout(fence, intr, timeout)
764 *
765 * Wait until fence is signalled; or until interrupt, if intr is
766 * true; or until timeout, if positive. Return -ERESTARTSYS if
767 * interrupted, negative error code on any other error, zero on
768 * timeout, or positive number of ticks remaining if the fence is
769 * signalled before the timeout. Works by calling the fence wait
770 * callback.
771 *
772 * The timeout must be nonnegative and less than
773 * MAX_SCHEDULE_TIMEOUT.
774 */
775 long
776 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
777 {
778
779 KASSERT(dma_fence_referenced_p(fence));
780 KASSERT(timeout >= 0);
781 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
782
783 if (fence->ops->wait)
784 return (*fence->ops->wait)(fence, intr, timeout);
785 else
786 return dma_fence_default_wait(fence, intr, timeout);
787 }
788
789 /*
790 * dma_fence_wait(fence, intr)
791 *
792 * Wait until fence is signalled; or until interrupt, if intr is
793 * true. Return -ERESTARTSYS if interrupted, negative error code
794 * on any other error, zero on sucess. Works by calling the fence
795 * wait callback with MAX_SCHEDULE_TIMEOUT.
796 */
797 long
798 dma_fence_wait(struct dma_fence *fence, bool intr)
799 {
800 long ret;
801
802 KASSERT(dma_fence_referenced_p(fence));
803
804 if (fence->ops->wait)
805 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
806 else
807 ret = dma_fence_default_wait(fence, intr,
808 MAX_SCHEDULE_TIMEOUT);
809 KASSERT(ret != 0);
810
811 return (ret < 0 ? ret : 0);
812 }
813
814 /*
815 * dma_fence_default_wait(fence, intr, timeout)
816 *
817 * Default implementation of fence wait callback using a condition
818 * variable. If the fence is already signalled, return timeout,
819 * or 1 if timeout is zero meaning poll. If the enable signalling
820 * callback hasn't been called, call it, and if it fails, act as
821 * if the fence had been signalled. Otherwise, wait on the
822 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
823 * indefinitely.
824 */
825 long
826 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
827 {
828 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
829 kmutex_t *lock = &fence->lock->sl_lock;
830 long ret = 0;
831
832 KASSERT(dma_fence_referenced_p(fence));
833 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
834 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
835
836 /* Optimistically try to skip the lock if it's already signalled. */
837 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
838 return (timeout ? timeout : 1);
839
840 /* Acquire the lock. */
841 spin_lock(fence->lock);
842
843 /* Ensure signalling is enabled, or stop if already completed. */
844 if (dma_fence_ensure_signal_enabled(fence) != 0) {
845 spin_unlock(fence->lock);
846 return (timeout ? timeout : 1);
847 }
848
849 /* If merely polling, stop here. */
850 if (timeout == 0) {
851 spin_unlock(fence->lock);
852 return 0;
853 }
854
855 /* Find out what our deadline is so we can handle spurious wakeup. */
856 if (timeout < MAX_SCHEDULE_TIMEOUT) {
857 now = getticks();
858 __insn_barrier();
859 starttime = now;
860 deadline = starttime + timeout;
861 }
862
863 /* Wait until the signalled bit is set. */
864 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
865 /*
866 * If there's a timeout and we've passed the deadline,
867 * give up.
868 */
869 if (timeout < MAX_SCHEDULE_TIMEOUT) {
870 now = getticks();
871 __insn_barrier();
872 if (deadline <= now)
873 break;
874 }
875 if (intr) {
876 if (timeout < MAX_SCHEDULE_TIMEOUT) {
877 ret = -cv_timedwait_sig(&fence->f_cv, lock,
878 deadline - now);
879 } else {
880 ret = -cv_wait_sig(&fence->f_cv, lock);
881 }
882 } else {
883 if (timeout < MAX_SCHEDULE_TIMEOUT) {
884 ret = -cv_timedwait(&fence->f_cv, lock,
885 deadline - now);
886 } else {
887 cv_wait(&fence->f_cv, lock);
888 ret = 0;
889 }
890 }
891 /* If the wait failed, give up. */
892 if (ret) {
893 if (ret == -ERESTART)
894 ret = -ERESTARTSYS;
895 break;
896 }
897 }
898
899 /* All done. Release the lock. */
900 spin_unlock(fence->lock);
901
902 /* If cv_timedwait gave up, return 0 meaning timeout. */
903 if (ret == -EWOULDBLOCK) {
904 /* Only cv_timedwait and cv_timedwait_sig can return this. */
905 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
906 return 0;
907 }
908
909 /* If there was a timeout and the deadline passed, return 0. */
910 if (timeout < MAX_SCHEDULE_TIMEOUT) {
911 if (deadline <= now)
912 return 0;
913 }
914
915 /* If we were interrupted, return -ERESTARTSYS. */
916 if (ret == -EINTR || ret == -ERESTART)
917 return -ERESTARTSYS;
918
919 /* If there was any other kind of error, fail. */
920 if (ret)
921 return ret;
922
923 /*
924 * Success! Return the number of ticks left, at least 1, or 1
925 * if no timeout.
926 */
927 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
928 }
929
930 /*
931 * __dma_fence_signal(fence)
932 *
933 * Set fence's signalled bit, without waking waiters yet. Return
934 * true if it was newly set, false if it was already set.
935 */
936 bool
937 __dma_fence_signal(struct dma_fence *fence)
938 {
939
940 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
941 return false;
942
943 return true;
944 }
945
946 /*
947 * __dma_fence_signal_wake(fence)
948 *
949 * Wake fence's waiters. Caller must have previously called
950 * __dma_fence_signal and it must have previously returned true.
951 */
952 void
953 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
954 {
955 struct dma_fence_cb *fcb, *next;
956
957 spin_lock(fence->lock);
958
959 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
960
961 /* Wake waiters. */
962 cv_broadcast(&fence->f_cv);
963
964 /* Remove and call the callbacks. */
965 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
966 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
967 fcb->fcb_onqueue = false;
968 (*fcb->func)(fence, fcb);
969 }
970
971 spin_unlock(fence->lock);
972 }
973