linux_dma_fence.c revision 1.23 1 /* $NetBSD: linux_dma_fence.c,v 1.23 2021/12/19 12:09:51 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.23 2021/12/19 12:09:51 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 /*
47 * linux_dma_fence_trace
48 *
49 * True if we print DMA_FENCE_TRACE messages, false if not. These
50 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
51 * in boothowto.
52 */
53 int linux_dma_fence_trace = 0;
54
55 /*
56 * dma_fence_referenced_p(fence)
57 *
58 * True if fence has a positive reference count. True after
59 * dma_fence_init; after the last dma_fence_put, this becomes
60 * false.
61 */
62 static inline bool __diagused
63 dma_fence_referenced_p(struct dma_fence *fence)
64 {
65
66 return kref_referenced_p(&fence->refcount);
67 }
68
69 /*
70 * dma_fence_init(fence, ops, lock, context, seqno)
71 *
72 * Initialize fence. Caller should call dma_fence_destroy when
73 * done, after all references have been released.
74 */
75 void
76 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
77 spinlock_t *lock, unsigned context, unsigned seqno)
78 {
79
80 kref_init(&fence->refcount);
81 fence->lock = lock;
82 fence->flags = 0;
83 fence->context = context;
84 fence->seqno = seqno;
85 fence->ops = ops;
86 fence->error = 0;
87 TAILQ_INIT(&fence->f_callbacks);
88 cv_init(&fence->f_cv, "dmafence");
89 }
90
91 /*
92 * dma_fence_reset(fence)
93 *
94 * Ensure fence is in a quiescent state. Allowed either for newly
95 * initialized or freed fences, but not fences with more than one
96 * reference.
97 *
98 * XXX extension to Linux API
99 */
100 void
101 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
102 spinlock_t *lock, unsigned context, unsigned seqno)
103 {
104
105 KASSERT(kref_read(&fence->refcount) == 0 ||
106 kref_read(&fence->refcount) == 1);
107 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
108 KASSERT(fence->lock == lock);
109 KASSERT(fence->ops == ops);
110
111 kref_init(&fence->refcount);
112 fence->flags = 0;
113 fence->context = context;
114 fence->seqno = seqno;
115 fence->error = 0;
116 }
117
118 /*
119 * dma_fence_destroy(fence)
120 *
121 * Clean up memory initialized with dma_fence_init. This is meant
122 * to be used after a fence release callback.
123 *
124 * XXX extension to Linux API
125 */
126 void
127 dma_fence_destroy(struct dma_fence *fence)
128 {
129
130 KASSERT(!dma_fence_referenced_p(fence));
131
132 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
133 cv_destroy(&fence->f_cv);
134 }
135
136 static void
137 dma_fence_free_cb(struct rcu_head *rcu)
138 {
139 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
140
141 KASSERT(!dma_fence_referenced_p(fence));
142
143 dma_fence_destroy(fence);
144 kfree(fence);
145 }
146
147 /*
148 * dma_fence_free(fence)
149 *
150 * Schedule fence to be destroyed and then freed with kfree after
151 * any pending RCU read sections on all CPUs have completed.
152 * Caller must guarantee all references have been released. This
153 * is meant to be used after a fence release callback.
154 *
155 * NOTE: Callers assume kfree will be used. We don't even use
156 * kmalloc to allocate these -- caller is expected to allocate
157 * memory with kmalloc to be initialized with dma_fence_init.
158 */
159 void
160 dma_fence_free(struct dma_fence *fence)
161 {
162
163 KASSERT(!dma_fence_referenced_p(fence));
164
165 call_rcu(&fence->rcu, &dma_fence_free_cb);
166 }
167
168 /*
169 * dma_fence_context_alloc(n)
170 *
171 * Return the first of a contiguous sequence of unique
172 * identifiers, at least until the system wraps around.
173 */
174 unsigned
175 dma_fence_context_alloc(unsigned n)
176 {
177 static volatile unsigned next_context = 0;
178
179 return atomic_add_int_nv(&next_context, n) - n;
180 }
181
182 /*
183 * dma_fence_is_later(a, b)
184 *
185 * True if the sequence number of fence a is later than the
186 * sequence number of fence b. Since sequence numbers wrap
187 * around, we define this to mean that the sequence number of
188 * fence a is no more than INT_MAX past the sequence number of
189 * fence b.
190 *
191 * The two fences must have the same context.
192 */
193 bool
194 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
195 {
196
197 KASSERTMSG(a->context == b->context, "incommensurate fences"
198 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
199
200 return a->seqno - b->seqno < INT_MAX;
201 }
202
203 /*
204 * dma_fence_get_stub()
205 *
206 * Return a dma fence that is always already signalled.
207 */
208 struct dma_fence *
209 dma_fence_get_stub(void)
210 {
211 /*
212 * XXX This probably isn't good enough -- caller may try
213 * operations on this that require the lock, which will
214 * require us to create and destroy the lock on module
215 * load/unload.
216 */
217 static struct dma_fence fence = {
218 .refcount = {1}, /* always referenced */
219 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
220 };
221
222 return dma_fence_get(&fence);
223 }
224
225 /*
226 * dma_fence_get(fence)
227 *
228 * Acquire a reference to fence. The fence must not be being
229 * destroyed. Return the fence.
230 */
231 struct dma_fence *
232 dma_fence_get(struct dma_fence *fence)
233 {
234
235 if (fence)
236 kref_get(&fence->refcount);
237 return fence;
238 }
239
240 /*
241 * dma_fence_get_rcu(fence)
242 *
243 * Attempt to acquire a reference to a fence that may be about to
244 * be destroyed, during a read section. Return the fence on
245 * success, or NULL on failure.
246 */
247 struct dma_fence *
248 dma_fence_get_rcu(struct dma_fence *fence)
249 {
250
251 __insn_barrier();
252 if (!kref_get_unless_zero(&fence->refcount))
253 return NULL;
254 return fence;
255 }
256
257 /*
258 * dma_fence_get_rcu_safe(fencep)
259 *
260 * Attempt to acquire a reference to the fence *fencep, which may
261 * be about to be destroyed, during a read section. If the value
262 * of *fencep changes after we read *fencep but before we
263 * increment its reference count, retry. Return *fencep on
264 * success, or NULL on failure.
265 */
266 struct dma_fence *
267 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
268 {
269 struct dma_fence *fence, *fence0;
270
271 retry:
272 fence = *fencep;
273
274 /* Load fence only once. */
275 __insn_barrier();
276
277 /* If there's nothing there, give up. */
278 if (fence == NULL)
279 return NULL;
280
281 /* Make sure we don't load stale fence guts. */
282 membar_datadep_consumer();
283
284 /* Try to acquire a reference. If we can't, try again. */
285 if (!dma_fence_get_rcu(fence))
286 goto retry;
287
288 /*
289 * Confirm that it's still the same fence. If not, release it
290 * and retry.
291 */
292 fence0 = *fencep;
293 __insn_barrier();
294 if (fence != fence0) {
295 dma_fence_put(fence);
296 goto retry;
297 }
298
299 /* Success! */
300 return fence;
301 }
302
303 static void
304 dma_fence_release(struct kref *refcount)
305 {
306 struct dma_fence *fence = container_of(refcount, struct dma_fence,
307 refcount);
308
309 KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
310 "fence %p has pending callbacks", fence);
311 KASSERT(!dma_fence_referenced_p(fence));
312
313 if (fence->ops->release)
314 (*fence->ops->release)(fence);
315 else
316 dma_fence_free(fence);
317 }
318
319 /*
320 * dma_fence_put(fence)
321 *
322 * Release a reference to fence. If this was the last one, call
323 * the fence's release callback.
324 */
325 void
326 dma_fence_put(struct dma_fence *fence)
327 {
328
329 if (fence == NULL)
330 return;
331 KASSERT(dma_fence_referenced_p(fence));
332 kref_put(&fence->refcount, &dma_fence_release);
333 }
334
335 /*
336 * dma_fence_ensure_signal_enabled(fence)
337 *
338 * Internal subroutine. If the fence was already signalled,
339 * return -ENOENT. Otherwise, if the enable signalling callback
340 * has not been called yet, call it. If fails, signal the fence
341 * and return -ENOENT. If it succeeds, or if it had already been
342 * called, return zero to indicate success.
343 *
344 * Caller must hold the fence's lock.
345 */
346 static int
347 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
348 {
349 bool already_enabled;
350
351 KASSERT(dma_fence_referenced_p(fence));
352 KASSERT(spin_is_locked(fence->lock));
353
354 /* Determine whether signalling was enabled, and enable it. */
355 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
356 &fence->flags);
357
358 /* If the fence was already signalled, fail with -ENOENT. */
359 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
360 return -ENOENT;
361
362 /*
363 * Otherwise, if it wasn't enabled yet, try to enable
364 * signalling, or fail if the fence doesn't support that.
365 */
366 if (!already_enabled) {
367 if (fence->ops->enable_signaling == NULL)
368 return -ENOENT;
369 if (!(*fence->ops->enable_signaling)(fence)) {
370 /* If it failed, signal and return -ENOENT. */
371 dma_fence_signal_locked(fence);
372 return -ENOENT;
373 }
374 }
375
376 /* Success! */
377 return 0;
378 }
379
380 /*
381 * dma_fence_add_callback(fence, fcb, fn)
382 *
383 * If fence has been signalled, return -ENOENT. If the enable
384 * signalling callback hasn't been called yet, call it; if it
385 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
386 * fcb) when it is signalled, and return 0.
387 *
388 * The fence uses memory allocated by the caller in fcb from the
389 * time of dma_fence_add_callback either to the time of
390 * dma_fence_remove_callback, or just before calling fn.
391 */
392 int
393 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
394 dma_fence_func_t fn)
395 {
396 int ret;
397
398 KASSERT(dma_fence_referenced_p(fence));
399
400 /* Optimistically try to skip the lock if it's already signalled. */
401 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
402 ret = -ENOENT;
403 goto out0;
404 }
405
406 /* Acquire the lock. */
407 spin_lock(fence->lock);
408
409 /* Ensure signalling is enabled, or fail if we can't. */
410 ret = dma_fence_ensure_signal_enabled(fence);
411 if (ret)
412 goto out1;
413
414 /* Insert the callback. */
415 fcb->func = fn;
416 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
417 fcb->fcb_onqueue = true;
418 ret = 0;
419
420 /* Release the lock and we're done. */
421 out1: spin_unlock(fence->lock);
422 out0: if (ret) {
423 fcb->func = NULL;
424 fcb->fcb_onqueue = false;
425 }
426 return ret;
427 }
428
429 /*
430 * dma_fence_remove_callback(fence, fcb)
431 *
432 * Remove the callback fcb from fence. Return true if it was
433 * removed from the list, or false if it had already run and so
434 * was no longer queued anyway. Caller must have already called
435 * dma_fence_add_callback(fence, fcb).
436 */
437 bool
438 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
439 {
440 bool onqueue;
441
442 KASSERT(dma_fence_referenced_p(fence));
443
444 spin_lock(fence->lock);
445 onqueue = fcb->fcb_onqueue;
446 if (onqueue) {
447 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
448 fcb->fcb_onqueue = false;
449 }
450 spin_unlock(fence->lock);
451
452 return onqueue;
453 }
454
455 /*
456 * dma_fence_enable_sw_signaling(fence)
457 *
458 * If it hasn't been called yet and the fence hasn't been
459 * signalled yet, call the fence's enable_sw_signaling callback.
460 * If when that happens, the callback indicates failure by
461 * returning false, signal the fence.
462 */
463 void
464 dma_fence_enable_sw_signaling(struct dma_fence *fence)
465 {
466
467 KASSERT(dma_fence_referenced_p(fence));
468
469 spin_lock(fence->lock);
470 if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
471 (void)dma_fence_ensure_signal_enabled(fence);
472 spin_unlock(fence->lock);
473 }
474
475 /*
476 * dma_fence_is_signaled(fence)
477 *
478 * Test whether the fence has been signalled. If it has been
479 * signalled by dma_fence_signal(_locked), return true. If the
480 * signalled callback returns true indicating that some implicit
481 * external condition has changed, call the callbacks as if with
482 * dma_fence_signal.
483 */
484 bool
485 dma_fence_is_signaled(struct dma_fence *fence)
486 {
487 bool signaled;
488
489 KASSERT(dma_fence_referenced_p(fence));
490
491 spin_lock(fence->lock);
492 signaled = dma_fence_is_signaled_locked(fence);
493 spin_unlock(fence->lock);
494
495 return signaled;
496 }
497
498 /*
499 * dma_fence_is_signaled_locked(fence)
500 *
501 * Test whether the fence has been signalled. Like
502 * dma_fence_is_signaleed, but caller already holds the fence's lock.
503 */
504 bool
505 dma_fence_is_signaled_locked(struct dma_fence *fence)
506 {
507
508 KASSERT(dma_fence_referenced_p(fence));
509 KASSERT(spin_is_locked(fence->lock));
510
511 /* Check whether we already set the signalled bit. */
512 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
513 return true;
514
515 /* If there's a signalled callback, test it. */
516 if (fence->ops->signaled) {
517 if ((*fence->ops->signaled)(fence)) {
518 /*
519 * It's been signalled implicitly by some
520 * external phenomonen. Act as though someone
521 * has called dma_fence_signal.
522 */
523 dma_fence_signal_locked(fence);
524 return true;
525 }
526 }
527
528 return false;
529 }
530
531 /*
532 * dma_fence_set_error(fence, error)
533 *
534 * Set an error code prior to dma_fence_signal for use by a
535 * waiter to learn about success or failure of the fence.
536 */
537 void
538 dma_fence_set_error(struct dma_fence *fence, int error)
539 {
540
541 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
542 KASSERTMSG(error >= -ELAST, "%d", error);
543 KASSERTMSG(error < 0, "%d", error);
544
545 fence->error = error;
546 }
547
548 /*
549 * dma_fence_get_status(fence)
550 *
551 * Return 0 if fence has yet to be signalled, 1 if it has been
552 * signalled without error, or negative error code if
553 * dma_fence_set_error was used.
554 */
555 int
556 dma_fence_get_status(struct dma_fence *fence)
557 {
558 int ret;
559
560 spin_lock(fence->lock);
561 if (!dma_fence_is_signaled_locked(fence)) {
562 ret = 0;
563 } else if (fence->error) {
564 ret = fence->error;
565 KASSERTMSG(ret < 0, "%d", ret);
566 } else {
567 ret = 1;
568 }
569 spin_unlock(fence->lock);
570
571 return ret;
572 }
573
574 /*
575 * dma_fence_signal(fence)
576 *
577 * Signal the fence. If it has already been signalled, return
578 * -EINVAL. If it has not been signalled, call the enable
579 * signalling callback if it hasn't been called yet, and remove
580 * each registered callback from the queue and call it; then
581 * return 0.
582 */
583 int
584 dma_fence_signal(struct dma_fence *fence)
585 {
586 int ret;
587
588 KASSERT(dma_fence_referenced_p(fence));
589
590 spin_lock(fence->lock);
591 ret = dma_fence_signal_locked(fence);
592 spin_unlock(fence->lock);
593
594 return ret;
595 }
596
597 /*
598 * dma_fence_signal_locked(fence)
599 *
600 * Signal the fence. Like dma_fence_signal, but caller already
601 * holds the fence's lock.
602 */
603 int
604 dma_fence_signal_locked(struct dma_fence *fence)
605 {
606 struct dma_fence_cb *fcb, *next;
607
608 KASSERT(dma_fence_referenced_p(fence));
609 KASSERT(spin_is_locked(fence->lock));
610
611 /* If it's been signalled, fail; otherwise set the signalled bit. */
612 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
613 return -EINVAL;
614
615 /* Wake waiters. */
616 cv_broadcast(&fence->f_cv);
617
618 /* Remove and call the callbacks. */
619 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
620 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
621 fcb->fcb_onqueue = false;
622 (*fcb->func)(fence, fcb);
623 }
624
625 /* Success! */
626 return 0;
627 }
628
629 struct wait_any {
630 struct dma_fence_cb fcb;
631 struct wait_any1 {
632 kmutex_t lock;
633 kcondvar_t cv;
634 bool done;
635 uint32_t *ip;
636 struct wait_any *cb;
637 } *common;
638 };
639
640 static void
641 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
642 {
643 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
644
645 KASSERT(dma_fence_referenced_p(fence));
646
647 mutex_enter(&cb->common->lock);
648 cb->common->done = true;
649 if (cb->common->ip)
650 *cb->common->ip = cb - cb->common->cb;
651 cv_broadcast(&cb->common->cv);
652 mutex_exit(&cb->common->lock);
653 }
654
655 /*
656 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
657 *
658 * Wait for any of fences[0], fences[1], fences[2], ...,
659 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
660 * to the index of the first one.
661 */
662 long
663 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
664 bool intr, long timeout, uint32_t *ip)
665 {
666 struct wait_any1 common;
667 struct wait_any *cb;
668 uint32_t i, j;
669 int start, end;
670 long ret = 0;
671
672 /* Allocate an array of callback records. */
673 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
674 if (cb == NULL) {
675 ret = -ENOMEM;
676 goto out0;
677 }
678
679 /* Initialize a mutex and condvar for the common wait. */
680 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
681 cv_init(&common.cv, "fence");
682 common.done = false;
683 common.ip = ip;
684 common.cb = cb;
685
686 /* Add a callback to each of the fences, or stop here if we can't. */
687 for (i = 0; i < nfences; i++) {
688 cb[i].common = &common;
689 KASSERT(dma_fence_referenced_p(fences[i]));
690 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
691 &wait_any_cb);
692 if (ret)
693 goto out1;
694 }
695
696 /*
697 * Test whether any of the fences has been signalled. If they
698 * have, stop here. If the haven't, we are guaranteed to be
699 * notified by one of the callbacks when they have.
700 */
701 for (j = 0; j < nfences; j++) {
702 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags)) {
703 if (ip)
704 *ip = j;
705 ret = 0;
706 goto out1;
707 }
708 }
709
710 /*
711 * None of them was ready immediately. Wait for one of the
712 * callbacks to notify us when it is done.
713 */
714 mutex_enter(&common.lock);
715 while (timeout > 0 && !common.done) {
716 start = getticks();
717 __insn_barrier();
718 if (intr) {
719 if (timeout != MAX_SCHEDULE_TIMEOUT) {
720 ret = -cv_timedwait_sig(&common.cv,
721 &common.lock, MIN(timeout, /* paranoia */
722 MAX_SCHEDULE_TIMEOUT));
723 } else {
724 ret = -cv_wait_sig(&common.cv, &common.lock);
725 }
726 } else {
727 if (timeout != MAX_SCHEDULE_TIMEOUT) {
728 ret = -cv_timedwait(&common.cv,
729 &common.lock, MIN(timeout, /* paranoia */
730 MAX_SCHEDULE_TIMEOUT));
731 } else {
732 cv_wait(&common.cv, &common.lock);
733 ret = 0;
734 }
735 }
736 end = getticks();
737 __insn_barrier();
738 if (ret) {
739 if (ret == -ERESTART)
740 ret = -ERESTARTSYS;
741 break;
742 }
743 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
744 }
745 mutex_exit(&common.lock);
746
747 /*
748 * Massage the return code: if we were interrupted, return
749 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
750 * return the remaining time.
751 */
752 if (ret < 0) {
753 if (ret == -EINTR || ret == -ERESTART)
754 ret = -ERESTARTSYS;
755 if (ret == -EWOULDBLOCK)
756 ret = 0;
757 } else {
758 KASSERT(ret == 0);
759 ret = timeout;
760 }
761
762 out1: while (i --> 0)
763 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
764 cv_destroy(&common.cv);
765 mutex_destroy(&common.lock);
766 kfree(cb);
767 out0: return ret;
768 }
769
770 /*
771 * dma_fence_wait_timeout(fence, intr, timeout)
772 *
773 * Wait until fence is signalled; or until interrupt, if intr is
774 * true; or until timeout, if positive. Return -ERESTARTSYS if
775 * interrupted, negative error code on any other error, zero on
776 * timeout, or positive number of ticks remaining if the fence is
777 * signalled before the timeout. Works by calling the fence wait
778 * callback.
779 *
780 * The timeout must be nonnegative and less than
781 * MAX_SCHEDULE_TIMEOUT.
782 */
783 long
784 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
785 {
786
787 KASSERT(dma_fence_referenced_p(fence));
788 KASSERT(timeout >= 0);
789 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
790
791 if (fence->ops->wait)
792 return (*fence->ops->wait)(fence, intr, timeout);
793 else
794 return dma_fence_default_wait(fence, intr, timeout);
795 }
796
797 /*
798 * dma_fence_wait(fence, intr)
799 *
800 * Wait until fence is signalled; or until interrupt, if intr is
801 * true. Return -ERESTARTSYS if interrupted, negative error code
802 * on any other error, zero on sucess. Works by calling the fence
803 * wait callback with MAX_SCHEDULE_TIMEOUT.
804 */
805 long
806 dma_fence_wait(struct dma_fence *fence, bool intr)
807 {
808 long ret;
809
810 KASSERT(dma_fence_referenced_p(fence));
811
812 if (fence->ops->wait)
813 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
814 else
815 ret = dma_fence_default_wait(fence, intr,
816 MAX_SCHEDULE_TIMEOUT);
817 KASSERT(ret != 0);
818
819 return (ret < 0 ? ret : 0);
820 }
821
822 /*
823 * dma_fence_default_wait(fence, intr, timeout)
824 *
825 * Default implementation of fence wait callback using a condition
826 * variable. If the fence is already signalled, return timeout,
827 * or 1 if timeout is zero meaning poll. If the enable signalling
828 * callback hasn't been called, call it, and if it fails, act as
829 * if the fence had been signalled. Otherwise, wait on the
830 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
831 * indefinitely.
832 */
833 long
834 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
835 {
836 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
837 kmutex_t *lock = &fence->lock->sl_lock;
838 long ret = 0;
839
840 KASSERT(dma_fence_referenced_p(fence));
841 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
842 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
843
844 /* Optimistically try to skip the lock if it's already signalled. */
845 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
846 return (timeout ? timeout : 1);
847
848 /* Acquire the lock. */
849 spin_lock(fence->lock);
850
851 /* Ensure signalling is enabled, or stop if already completed. */
852 if (dma_fence_ensure_signal_enabled(fence) != 0) {
853 spin_unlock(fence->lock);
854 return (timeout ? timeout : 1);
855 }
856
857 /* If merely polling, stop here. */
858 if (timeout == 0) {
859 spin_unlock(fence->lock);
860 return 0;
861 }
862
863 /* Find out what our deadline is so we can handle spurious wakeup. */
864 if (timeout < MAX_SCHEDULE_TIMEOUT) {
865 now = getticks();
866 __insn_barrier();
867 starttime = now;
868 deadline = starttime + timeout;
869 }
870
871 /* Wait until the signalled bit is set. */
872 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
873 /*
874 * If there's a timeout and we've passed the deadline,
875 * give up.
876 */
877 if (timeout < MAX_SCHEDULE_TIMEOUT) {
878 now = getticks();
879 __insn_barrier();
880 if (deadline <= now)
881 break;
882 }
883 if (intr) {
884 if (timeout < MAX_SCHEDULE_TIMEOUT) {
885 ret = -cv_timedwait_sig(&fence->f_cv, lock,
886 deadline - now);
887 } else {
888 ret = -cv_wait_sig(&fence->f_cv, lock);
889 }
890 } else {
891 if (timeout < MAX_SCHEDULE_TIMEOUT) {
892 ret = -cv_timedwait(&fence->f_cv, lock,
893 deadline - now);
894 } else {
895 cv_wait(&fence->f_cv, lock);
896 ret = 0;
897 }
898 }
899 /* If the wait failed, give up. */
900 if (ret) {
901 if (ret == -ERESTART)
902 ret = -ERESTARTSYS;
903 break;
904 }
905 }
906
907 /* All done. Release the lock. */
908 spin_unlock(fence->lock);
909
910 /* If cv_timedwait gave up, return 0 meaning timeout. */
911 if (ret == -EWOULDBLOCK) {
912 /* Only cv_timedwait and cv_timedwait_sig can return this. */
913 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
914 return 0;
915 }
916
917 /* If there was a timeout and the deadline passed, return 0. */
918 if (timeout < MAX_SCHEDULE_TIMEOUT) {
919 if (deadline <= now)
920 return 0;
921 }
922
923 /* If we were interrupted, return -ERESTARTSYS. */
924 if (ret == -EINTR || ret == -ERESTART)
925 return -ERESTARTSYS;
926
927 /* If there was any other kind of error, fail. */
928 if (ret)
929 return ret;
930
931 /*
932 * Success! Return the number of ticks left, at least 1, or 1
933 * if no timeout.
934 */
935 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
936 }
937
938 /*
939 * __dma_fence_signal(fence)
940 *
941 * Set fence's signalled bit, without waking waiters yet. Return
942 * true if it was newly set, false if it was already set.
943 */
944 bool
945 __dma_fence_signal(struct dma_fence *fence)
946 {
947
948 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
949 return false;
950
951 return true;
952 }
953
954 /*
955 * __dma_fence_signal_wake(fence)
956 *
957 * Wake fence's waiters. Caller must have previously called
958 * __dma_fence_signal and it must have previously returned true.
959 */
960 void
961 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
962 {
963 struct dma_fence_cb *fcb, *next;
964
965 spin_lock(fence->lock);
966
967 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
968
969 /* Wake waiters. */
970 cv_broadcast(&fence->f_cv);
971
972 /* Remove and call the callbacks. */
973 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
974 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
975 fcb->fcb_onqueue = false;
976 (*fcb->func)(fence, fcb);
977 }
978
979 spin_unlock(fence->lock);
980 }
981