linux_dma_fence.c revision 1.22 1 /* $NetBSD: linux_dma_fence.c,v 1.22 2021/12/19 12:09:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.22 2021/12/19 12:09:27 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 /*
47 * linux_dma_fence_trace
48 *
49 * True if we print DMA_FENCE_TRACE messages, false if not. These
50 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
51 * in boothowto.
52 */
53 int linux_dma_fence_trace = 0;
54
55 /*
56 * dma_fence_referenced_p(fence)
57 *
58 * True if fence has a positive reference count. True after
59 * dma_fence_init; after the last dma_fence_put, this becomes
60 * false.
61 */
62 static inline bool __diagused
63 dma_fence_referenced_p(struct dma_fence *fence)
64 {
65
66 return kref_referenced_p(&fence->refcount);
67 }
68
69 /*
70 * dma_fence_init(fence, ops, lock, context, seqno)
71 *
72 * Initialize fence. Caller should call dma_fence_destroy when
73 * done, after all references have been released.
74 */
75 void
76 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
77 spinlock_t *lock, unsigned context, unsigned seqno)
78 {
79
80 kref_init(&fence->refcount);
81 fence->lock = lock;
82 fence->flags = 0;
83 fence->context = context;
84 fence->seqno = seqno;
85 fence->ops = ops;
86 fence->error = 0;
87 TAILQ_INIT(&fence->f_callbacks);
88 cv_init(&fence->f_cv, "dmafence");
89 }
90
91 /*
92 * dma_fence_reset(fence)
93 *
94 * Ensure fence is in a quiescent state. Allowed either for newly
95 * initialized or freed fences, but not fences with more than one
96 * reference.
97 *
98 * XXX extension to Linux API
99 */
100 void
101 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
102 spinlock_t *lock, unsigned context, unsigned seqno)
103 {
104
105 KASSERT(kref_read(&fence->refcount) == 0 ||
106 kref_read(&fence->refcount) == 1);
107 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
108 KASSERT(fence->lock == lock);
109 KASSERT(fence->ops == ops);
110
111 kref_init(&fence->refcount);
112 fence->flags = 0;
113 fence->context = context;
114 fence->seqno = seqno;
115 fence->error = 0;
116 }
117
118 /*
119 * dma_fence_destroy(fence)
120 *
121 * Clean up memory initialized with dma_fence_init. This is meant
122 * to be used after a fence release callback.
123 *
124 * XXX extension to Linux API
125 */
126 void
127 dma_fence_destroy(struct dma_fence *fence)
128 {
129
130 KASSERT(!dma_fence_referenced_p(fence));
131
132 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
133 cv_destroy(&fence->f_cv);
134 }
135
136 static void
137 dma_fence_free_cb(struct rcu_head *rcu)
138 {
139 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
140
141 KASSERT(!dma_fence_referenced_p(fence));
142
143 dma_fence_destroy(fence);
144 kfree(fence);
145 }
146
147 /*
148 * dma_fence_free(fence)
149 *
150 * Schedule fence to be destroyed and then freed with kfree after
151 * any pending RCU read sections on all CPUs have completed.
152 * Caller must guarantee all references have been released. This
153 * is meant to be used after a fence release callback.
154 *
155 * NOTE: Callers assume kfree will be used. We don't even use
156 * kmalloc to allocate these -- caller is expected to allocate
157 * memory with kmalloc to be initialized with dma_fence_init.
158 */
159 void
160 dma_fence_free(struct dma_fence *fence)
161 {
162
163 KASSERT(!dma_fence_referenced_p(fence));
164
165 call_rcu(&fence->rcu, &dma_fence_free_cb);
166 }
167
168 /*
169 * dma_fence_context_alloc(n)
170 *
171 * Return the first of a contiguous sequence of unique
172 * identifiers, at least until the system wraps around.
173 */
174 unsigned
175 dma_fence_context_alloc(unsigned n)
176 {
177 static volatile unsigned next_context = 0;
178
179 return atomic_add_int_nv(&next_context, n) - n;
180 }
181
182 /*
183 * dma_fence_is_later(a, b)
184 *
185 * True if the sequence number of fence a is later than the
186 * sequence number of fence b. Since sequence numbers wrap
187 * around, we define this to mean that the sequence number of
188 * fence a is no more than INT_MAX past the sequence number of
189 * fence b.
190 *
191 * The two fences must have the same context.
192 */
193 bool
194 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
195 {
196
197 KASSERTMSG(a->context == b->context, "incommensurate fences"
198 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
199
200 return a->seqno - b->seqno < INT_MAX;
201 }
202
203 /*
204 * dma_fence_get_stub()
205 *
206 * Return a dma fence that is always already signalled.
207 */
208 struct dma_fence *
209 dma_fence_get_stub(void)
210 {
211 /*
212 * XXX This probably isn't good enough -- caller may try
213 * operations on this that require the lock, which will
214 * require us to create and destroy the lock on module
215 * load/unload.
216 */
217 static struct dma_fence fence = {
218 .refcount = {1}, /* always referenced */
219 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
220 };
221
222 return dma_fence_get(&fence);
223 }
224
225 /*
226 * dma_fence_get(fence)
227 *
228 * Acquire a reference to fence. The fence must not be being
229 * destroyed. Return the fence.
230 */
231 struct dma_fence *
232 dma_fence_get(struct dma_fence *fence)
233 {
234
235 if (fence)
236 kref_get(&fence->refcount);
237 return fence;
238 }
239
240 /*
241 * dma_fence_get_rcu(fence)
242 *
243 * Attempt to acquire a reference to a fence that may be about to
244 * be destroyed, during a read section. Return the fence on
245 * success, or NULL on failure.
246 */
247 struct dma_fence *
248 dma_fence_get_rcu(struct dma_fence *fence)
249 {
250
251 __insn_barrier();
252 if (!kref_get_unless_zero(&fence->refcount))
253 return NULL;
254 return fence;
255 }
256
257 /*
258 * dma_fence_get_rcu_safe(fencep)
259 *
260 * Attempt to acquire a reference to the fence *fencep, which may
261 * be about to be destroyed, during a read section. If the value
262 * of *fencep changes after we read *fencep but before we
263 * increment its reference count, retry. Return *fencep on
264 * success, or NULL on failure.
265 */
266 struct dma_fence *
267 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
268 {
269 struct dma_fence *fence, *fence0;
270
271 retry:
272 fence = *fencep;
273
274 /* Load fence only once. */
275 __insn_barrier();
276
277 /* If there's nothing there, give up. */
278 if (fence == NULL)
279 return NULL;
280
281 /* Make sure we don't load stale fence guts. */
282 membar_datadep_consumer();
283
284 /* Try to acquire a reference. If we can't, try again. */
285 if (!dma_fence_get_rcu(fence))
286 goto retry;
287
288 /*
289 * Confirm that it's still the same fence. If not, release it
290 * and retry.
291 */
292 fence0 = *fencep;
293 __insn_barrier();
294 if (fence != fence0) {
295 dma_fence_put(fence);
296 goto retry;
297 }
298
299 /* Success! */
300 return fence;
301 }
302
303 static void
304 dma_fence_release(struct kref *refcount)
305 {
306 struct dma_fence *fence = container_of(refcount, struct dma_fence,
307 refcount);
308
309 KASSERT(!dma_fence_referenced_p(fence));
310
311 if (fence->ops->release)
312 (*fence->ops->release)(fence);
313 else
314 dma_fence_free(fence);
315 }
316
317 /*
318 * dma_fence_put(fence)
319 *
320 * Release a reference to fence. If this was the last one, call
321 * the fence's release callback.
322 */
323 void
324 dma_fence_put(struct dma_fence *fence)
325 {
326
327 if (fence == NULL)
328 return;
329 KASSERT(dma_fence_referenced_p(fence));
330 kref_put(&fence->refcount, &dma_fence_release);
331 }
332
333 /*
334 * dma_fence_ensure_signal_enabled(fence)
335 *
336 * Internal subroutine. If the fence was already signalled,
337 * return -ENOENT. Otherwise, if the enable signalling callback
338 * has not been called yet, call it. If fails, signal the fence
339 * and return -ENOENT. If it succeeds, or if it had already been
340 * called, return zero to indicate success.
341 *
342 * Caller must hold the fence's lock.
343 */
344 static int
345 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
346 {
347 bool already_enabled;
348
349 KASSERT(dma_fence_referenced_p(fence));
350 KASSERT(spin_is_locked(fence->lock));
351
352 /* Determine whether signalling was enabled, and enable it. */
353 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
354 &fence->flags);
355
356 /* If the fence was already signalled, fail with -ENOENT. */
357 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
358 return -ENOENT;
359
360 /*
361 * Otherwise, if it wasn't enabled yet, try to enable
362 * signalling, or fail if the fence doesn't support that.
363 */
364 if (!already_enabled) {
365 if (fence->ops->enable_signaling == NULL)
366 return -ENOENT;
367 if (!(*fence->ops->enable_signaling)(fence)) {
368 /* If it failed, signal and return -ENOENT. */
369 dma_fence_signal_locked(fence);
370 return -ENOENT;
371 }
372 }
373
374 /* Success! */
375 return 0;
376 }
377
378 /*
379 * dma_fence_add_callback(fence, fcb, fn)
380 *
381 * If fence has been signalled, return -ENOENT. If the enable
382 * signalling callback hasn't been called yet, call it; if it
383 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
384 * fcb) when it is signalled, and return 0.
385 *
386 * The fence uses memory allocated by the caller in fcb from the
387 * time of dma_fence_add_callback either to the time of
388 * dma_fence_remove_callback, or just before calling fn.
389 */
390 int
391 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
392 dma_fence_func_t fn)
393 {
394 int ret;
395
396 KASSERT(dma_fence_referenced_p(fence));
397
398 /* Optimistically try to skip the lock if it's already signalled. */
399 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
400 ret = -ENOENT;
401 goto out0;
402 }
403
404 /* Acquire the lock. */
405 spin_lock(fence->lock);
406
407 /* Ensure signalling is enabled, or fail if we can't. */
408 ret = dma_fence_ensure_signal_enabled(fence);
409 if (ret)
410 goto out1;
411
412 /* Insert the callback. */
413 fcb->func = fn;
414 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
415 fcb->fcb_onqueue = true;
416 ret = 0;
417
418 /* Release the lock and we're done. */
419 out1: spin_unlock(fence->lock);
420 out0: if (ret) {
421 fcb->func = NULL;
422 fcb->fcb_onqueue = false;
423 }
424 return ret;
425 }
426
427 /*
428 * dma_fence_remove_callback(fence, fcb)
429 *
430 * Remove the callback fcb from fence. Return true if it was
431 * removed from the list, or false if it had already run and so
432 * was no longer queued anyway. Caller must have already called
433 * dma_fence_add_callback(fence, fcb).
434 */
435 bool
436 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
437 {
438 bool onqueue;
439
440 KASSERT(dma_fence_referenced_p(fence));
441
442 spin_lock(fence->lock);
443 onqueue = fcb->fcb_onqueue;
444 if (onqueue) {
445 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
446 fcb->fcb_onqueue = false;
447 }
448 spin_unlock(fence->lock);
449
450 return onqueue;
451 }
452
453 /*
454 * dma_fence_enable_sw_signaling(fence)
455 *
456 * If it hasn't been called yet and the fence hasn't been
457 * signalled yet, call the fence's enable_sw_signaling callback.
458 * If when that happens, the callback indicates failure by
459 * returning false, signal the fence.
460 */
461 void
462 dma_fence_enable_sw_signaling(struct dma_fence *fence)
463 {
464
465 KASSERT(dma_fence_referenced_p(fence));
466
467 spin_lock(fence->lock);
468 if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
469 (void)dma_fence_ensure_signal_enabled(fence);
470 spin_unlock(fence->lock);
471 }
472
473 /*
474 * dma_fence_is_signaled(fence)
475 *
476 * Test whether the fence has been signalled. If it has been
477 * signalled by dma_fence_signal(_locked), return true. If the
478 * signalled callback returns true indicating that some implicit
479 * external condition has changed, call the callbacks as if with
480 * dma_fence_signal.
481 */
482 bool
483 dma_fence_is_signaled(struct dma_fence *fence)
484 {
485 bool signaled;
486
487 KASSERT(dma_fence_referenced_p(fence));
488
489 spin_lock(fence->lock);
490 signaled = dma_fence_is_signaled_locked(fence);
491 spin_unlock(fence->lock);
492
493 return signaled;
494 }
495
496 /*
497 * dma_fence_is_signaled_locked(fence)
498 *
499 * Test whether the fence has been signalled. Like
500 * dma_fence_is_signaleed, but caller already holds the fence's lock.
501 */
502 bool
503 dma_fence_is_signaled_locked(struct dma_fence *fence)
504 {
505
506 KASSERT(dma_fence_referenced_p(fence));
507 KASSERT(spin_is_locked(fence->lock));
508
509 /* Check whether we already set the signalled bit. */
510 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
511 return true;
512
513 /* If there's a signalled callback, test it. */
514 if (fence->ops->signaled) {
515 if ((*fence->ops->signaled)(fence)) {
516 /*
517 * It's been signalled implicitly by some
518 * external phenomonen. Act as though someone
519 * has called dma_fence_signal.
520 */
521 dma_fence_signal_locked(fence);
522 return true;
523 }
524 }
525
526 return false;
527 }
528
529 /*
530 * dma_fence_set_error(fence, error)
531 *
532 * Set an error code prior to dma_fence_signal for use by a
533 * waiter to learn about success or failure of the fence.
534 */
535 void
536 dma_fence_set_error(struct dma_fence *fence, int error)
537 {
538
539 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
540 KASSERTMSG(error >= -ELAST, "%d", error);
541 KASSERTMSG(error < 0, "%d", error);
542
543 fence->error = error;
544 }
545
546 /*
547 * dma_fence_get_status(fence)
548 *
549 * Return 0 if fence has yet to be signalled, 1 if it has been
550 * signalled without error, or negative error code if
551 * dma_fence_set_error was used.
552 */
553 int
554 dma_fence_get_status(struct dma_fence *fence)
555 {
556 int ret;
557
558 spin_lock(fence->lock);
559 if (!dma_fence_is_signaled_locked(fence)) {
560 ret = 0;
561 } else if (fence->error) {
562 ret = fence->error;
563 KASSERTMSG(ret < 0, "%d", ret);
564 } else {
565 ret = 1;
566 }
567 spin_unlock(fence->lock);
568
569 return ret;
570 }
571
572 /*
573 * dma_fence_signal(fence)
574 *
575 * Signal the fence. If it has already been signalled, return
576 * -EINVAL. If it has not been signalled, call the enable
577 * signalling callback if it hasn't been called yet, and remove
578 * each registered callback from the queue and call it; then
579 * return 0.
580 */
581 int
582 dma_fence_signal(struct dma_fence *fence)
583 {
584 int ret;
585
586 KASSERT(dma_fence_referenced_p(fence));
587
588 spin_lock(fence->lock);
589 ret = dma_fence_signal_locked(fence);
590 spin_unlock(fence->lock);
591
592 return ret;
593 }
594
595 /*
596 * dma_fence_signal_locked(fence)
597 *
598 * Signal the fence. Like dma_fence_signal, but caller already
599 * holds the fence's lock.
600 */
601 int
602 dma_fence_signal_locked(struct dma_fence *fence)
603 {
604 struct dma_fence_cb *fcb, *next;
605
606 KASSERT(dma_fence_referenced_p(fence));
607 KASSERT(spin_is_locked(fence->lock));
608
609 /* If it's been signalled, fail; otherwise set the signalled bit. */
610 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
611 return -EINVAL;
612
613 /* Wake waiters. */
614 cv_broadcast(&fence->f_cv);
615
616 /* Remove and call the callbacks. */
617 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
618 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
619 fcb->fcb_onqueue = false;
620 (*fcb->func)(fence, fcb);
621 }
622
623 /* Success! */
624 return 0;
625 }
626
627 struct wait_any {
628 struct dma_fence_cb fcb;
629 struct wait_any1 {
630 kmutex_t lock;
631 kcondvar_t cv;
632 bool done;
633 uint32_t *ip;
634 struct wait_any *cb;
635 } *common;
636 };
637
638 static void
639 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
640 {
641 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
642
643 KASSERT(dma_fence_referenced_p(fence));
644
645 mutex_enter(&cb->common->lock);
646 cb->common->done = true;
647 if (cb->common->ip)
648 *cb->common->ip = cb - cb->common->cb;
649 cv_broadcast(&cb->common->cv);
650 mutex_exit(&cb->common->lock);
651 }
652
653 /*
654 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
655 *
656 * Wait for any of fences[0], fences[1], fences[2], ...,
657 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
658 * to the index of the first one.
659 */
660 long
661 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
662 bool intr, long timeout, uint32_t *ip)
663 {
664 struct wait_any1 common;
665 struct wait_any *cb;
666 uint32_t i, j;
667 int start, end;
668 long ret = 0;
669
670 /* Allocate an array of callback records. */
671 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
672 if (cb == NULL) {
673 ret = -ENOMEM;
674 goto out0;
675 }
676
677 /* Initialize a mutex and condvar for the common wait. */
678 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
679 cv_init(&common.cv, "fence");
680 common.done = false;
681 common.ip = ip;
682 common.cb = cb;
683
684 /* Add a callback to each of the fences, or stop here if we can't. */
685 for (i = 0; i < nfences; i++) {
686 cb[i].common = &common;
687 KASSERT(dma_fence_referenced_p(fences[i]));
688 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
689 &wait_any_cb);
690 if (ret)
691 goto out1;
692 }
693
694 /*
695 * Test whether any of the fences has been signalled. If they
696 * have, stop here. If the haven't, we are guaranteed to be
697 * notified by one of the callbacks when they have.
698 */
699 for (j = 0; j < nfences; j++) {
700 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags)) {
701 if (ip)
702 *ip = j;
703 ret = 0;
704 goto out1;
705 }
706 }
707
708 /*
709 * None of them was ready immediately. Wait for one of the
710 * callbacks to notify us when it is done.
711 */
712 mutex_enter(&common.lock);
713 while (timeout > 0 && !common.done) {
714 start = getticks();
715 __insn_barrier();
716 if (intr) {
717 if (timeout != MAX_SCHEDULE_TIMEOUT) {
718 ret = -cv_timedwait_sig(&common.cv,
719 &common.lock, MIN(timeout, /* paranoia */
720 MAX_SCHEDULE_TIMEOUT));
721 } else {
722 ret = -cv_wait_sig(&common.cv, &common.lock);
723 }
724 } else {
725 if (timeout != MAX_SCHEDULE_TIMEOUT) {
726 ret = -cv_timedwait(&common.cv,
727 &common.lock, MIN(timeout, /* paranoia */
728 MAX_SCHEDULE_TIMEOUT));
729 } else {
730 cv_wait(&common.cv, &common.lock);
731 ret = 0;
732 }
733 }
734 end = getticks();
735 __insn_barrier();
736 if (ret) {
737 if (ret == -ERESTART)
738 ret = -ERESTARTSYS;
739 break;
740 }
741 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
742 }
743 mutex_exit(&common.lock);
744
745 /*
746 * Massage the return code: if we were interrupted, return
747 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
748 * return the remaining time.
749 */
750 if (ret < 0) {
751 if (ret == -EINTR || ret == -ERESTART)
752 ret = -ERESTARTSYS;
753 if (ret == -EWOULDBLOCK)
754 ret = 0;
755 } else {
756 KASSERT(ret == 0);
757 ret = timeout;
758 }
759
760 out1: while (i --> 0)
761 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
762 cv_destroy(&common.cv);
763 mutex_destroy(&common.lock);
764 kfree(cb);
765 out0: return ret;
766 }
767
768 /*
769 * dma_fence_wait_timeout(fence, intr, timeout)
770 *
771 * Wait until fence is signalled; or until interrupt, if intr is
772 * true; or until timeout, if positive. Return -ERESTARTSYS if
773 * interrupted, negative error code on any other error, zero on
774 * timeout, or positive number of ticks remaining if the fence is
775 * signalled before the timeout. Works by calling the fence wait
776 * callback.
777 *
778 * The timeout must be nonnegative and less than
779 * MAX_SCHEDULE_TIMEOUT.
780 */
781 long
782 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
783 {
784
785 KASSERT(dma_fence_referenced_p(fence));
786 KASSERT(timeout >= 0);
787 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
788
789 if (fence->ops->wait)
790 return (*fence->ops->wait)(fence, intr, timeout);
791 else
792 return dma_fence_default_wait(fence, intr, timeout);
793 }
794
795 /*
796 * dma_fence_wait(fence, intr)
797 *
798 * Wait until fence is signalled; or until interrupt, if intr is
799 * true. Return -ERESTARTSYS if interrupted, negative error code
800 * on any other error, zero on sucess. Works by calling the fence
801 * wait callback with MAX_SCHEDULE_TIMEOUT.
802 */
803 long
804 dma_fence_wait(struct dma_fence *fence, bool intr)
805 {
806 long ret;
807
808 KASSERT(dma_fence_referenced_p(fence));
809
810 if (fence->ops->wait)
811 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
812 else
813 ret = dma_fence_default_wait(fence, intr,
814 MAX_SCHEDULE_TIMEOUT);
815 KASSERT(ret != 0);
816
817 return (ret < 0 ? ret : 0);
818 }
819
820 /*
821 * dma_fence_default_wait(fence, intr, timeout)
822 *
823 * Default implementation of fence wait callback using a condition
824 * variable. If the fence is already signalled, return timeout,
825 * or 1 if timeout is zero meaning poll. If the enable signalling
826 * callback hasn't been called, call it, and if it fails, act as
827 * if the fence had been signalled. Otherwise, wait on the
828 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
829 * indefinitely.
830 */
831 long
832 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
833 {
834 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
835 kmutex_t *lock = &fence->lock->sl_lock;
836 long ret = 0;
837
838 KASSERT(dma_fence_referenced_p(fence));
839 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
840 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
841
842 /* Optimistically try to skip the lock if it's already signalled. */
843 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
844 return (timeout ? timeout : 1);
845
846 /* Acquire the lock. */
847 spin_lock(fence->lock);
848
849 /* Ensure signalling is enabled, or stop if already completed. */
850 if (dma_fence_ensure_signal_enabled(fence) != 0) {
851 spin_unlock(fence->lock);
852 return (timeout ? timeout : 1);
853 }
854
855 /* If merely polling, stop here. */
856 if (timeout == 0) {
857 spin_unlock(fence->lock);
858 return 0;
859 }
860
861 /* Find out what our deadline is so we can handle spurious wakeup. */
862 if (timeout < MAX_SCHEDULE_TIMEOUT) {
863 now = getticks();
864 __insn_barrier();
865 starttime = now;
866 deadline = starttime + timeout;
867 }
868
869 /* Wait until the signalled bit is set. */
870 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
871 /*
872 * If there's a timeout and we've passed the deadline,
873 * give up.
874 */
875 if (timeout < MAX_SCHEDULE_TIMEOUT) {
876 now = getticks();
877 __insn_barrier();
878 if (deadline <= now)
879 break;
880 }
881 if (intr) {
882 if (timeout < MAX_SCHEDULE_TIMEOUT) {
883 ret = -cv_timedwait_sig(&fence->f_cv, lock,
884 deadline - now);
885 } else {
886 ret = -cv_wait_sig(&fence->f_cv, lock);
887 }
888 } else {
889 if (timeout < MAX_SCHEDULE_TIMEOUT) {
890 ret = -cv_timedwait(&fence->f_cv, lock,
891 deadline - now);
892 } else {
893 cv_wait(&fence->f_cv, lock);
894 ret = 0;
895 }
896 }
897 /* If the wait failed, give up. */
898 if (ret) {
899 if (ret == -ERESTART)
900 ret = -ERESTARTSYS;
901 break;
902 }
903 }
904
905 /* All done. Release the lock. */
906 spin_unlock(fence->lock);
907
908 /* If cv_timedwait gave up, return 0 meaning timeout. */
909 if (ret == -EWOULDBLOCK) {
910 /* Only cv_timedwait and cv_timedwait_sig can return this. */
911 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
912 return 0;
913 }
914
915 /* If there was a timeout and the deadline passed, return 0. */
916 if (timeout < MAX_SCHEDULE_TIMEOUT) {
917 if (deadline <= now)
918 return 0;
919 }
920
921 /* If we were interrupted, return -ERESTARTSYS. */
922 if (ret == -EINTR || ret == -ERESTART)
923 return -ERESTARTSYS;
924
925 /* If there was any other kind of error, fail. */
926 if (ret)
927 return ret;
928
929 /*
930 * Success! Return the number of ticks left, at least 1, or 1
931 * if no timeout.
932 */
933 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
934 }
935
936 /*
937 * __dma_fence_signal(fence)
938 *
939 * Set fence's signalled bit, without waking waiters yet. Return
940 * true if it was newly set, false if it was already set.
941 */
942 bool
943 __dma_fence_signal(struct dma_fence *fence)
944 {
945
946 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
947 return false;
948
949 return true;
950 }
951
952 /*
953 * __dma_fence_signal_wake(fence)
954 *
955 * Wake fence's waiters. Caller must have previously called
956 * __dma_fence_signal and it must have previously returned true.
957 */
958 void
959 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
960 {
961 struct dma_fence_cb *fcb, *next;
962
963 spin_lock(fence->lock);
964
965 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
966
967 /* Wake waiters. */
968 cv_broadcast(&fence->f_cv);
969
970 /* Remove and call the callbacks. */
971 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
972 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
973 fcb->fcb_onqueue = false;
974 (*fcb->func)(fence, fcb);
975 }
976
977 spin_unlock(fence->lock);
978 }
979