linux_dma_fence.c revision 1.25 1 /* $NetBSD: linux_dma_fence.c,v 1.25 2021/12/19 12:11:05 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.25 2021/12/19 12:11:05 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 #define FENCE_MAGIC_GOOD 0x607ba424048c37e5ULL
47 #define FENCE_MAGIC_BAD 0x7641ca721344505fULL
48
49 /*
50 * linux_dma_fence_trace
51 *
52 * True if we print DMA_FENCE_TRACE messages, false if not. These
53 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
54 * in boothowto.
55 */
56 int linux_dma_fence_trace = 0;
57
58 /*
59 * dma_fence_referenced_p(fence)
60 *
61 * True if fence has a positive reference count. True after
62 * dma_fence_init; after the last dma_fence_put, this becomes
63 * false. The fence must have been initialized and must not have
64 * been destroyed.
65 */
66 static inline bool __diagused
67 dma_fence_referenced_p(struct dma_fence *fence)
68 {
69
70 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
71 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
72
73 return kref_referenced_p(&fence->refcount);
74 }
75
76 /*
77 * dma_fence_init(fence, ops, lock, context, seqno)
78 *
79 * Initialize fence. Caller should call dma_fence_destroy when
80 * done, after all references have been released.
81 */
82 void
83 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
84 spinlock_t *lock, unsigned context, unsigned seqno)
85 {
86
87 kref_init(&fence->refcount);
88 fence->lock = lock;
89 fence->flags = 0;
90 fence->context = context;
91 fence->seqno = seqno;
92 fence->ops = ops;
93 fence->error = 0;
94 TAILQ_INIT(&fence->f_callbacks);
95 cv_init(&fence->f_cv, "dmafence");
96
97 #ifdef DIAGNOSTIC
98 fence->f_magic = FENCE_MAGIC_GOOD;
99 #endif
100 }
101
102 /*
103 * dma_fence_reset(fence)
104 *
105 * Ensure fence is in a quiescent state. Allowed either for newly
106 * initialized or freed fences, but not fences with more than one
107 * reference.
108 *
109 * XXX extension to Linux API
110 */
111 void
112 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
113 spinlock_t *lock, unsigned context, unsigned seqno)
114 {
115
116 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
117 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
118 KASSERT(kref_read(&fence->refcount) == 0 ||
119 kref_read(&fence->refcount) == 1);
120 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
121 KASSERT(fence->lock == lock);
122 KASSERT(fence->ops == ops);
123
124 kref_init(&fence->refcount);
125 fence->flags = 0;
126 fence->context = context;
127 fence->seqno = seqno;
128 fence->error = 0;
129 }
130
131 /*
132 * dma_fence_destroy(fence)
133 *
134 * Clean up memory initialized with dma_fence_init. This is meant
135 * to be used after a fence release callback.
136 *
137 * XXX extension to Linux API
138 */
139 void
140 dma_fence_destroy(struct dma_fence *fence)
141 {
142
143 KASSERT(!dma_fence_referenced_p(fence));
144
145 #ifdef DIAGNOSTIC
146 fence->f_magic = FENCE_MAGIC_BAD;
147 #endif
148
149 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
150 cv_destroy(&fence->f_cv);
151 }
152
153 static void
154 dma_fence_free_cb(struct rcu_head *rcu)
155 {
156 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
157
158 KASSERT(!dma_fence_referenced_p(fence));
159
160 dma_fence_destroy(fence);
161 kfree(fence);
162 }
163
164 /*
165 * dma_fence_free(fence)
166 *
167 * Schedule fence to be destroyed and then freed with kfree after
168 * any pending RCU read sections on all CPUs have completed.
169 * Caller must guarantee all references have been released. This
170 * is meant to be used after a fence release callback.
171 *
172 * NOTE: Callers assume kfree will be used. We don't even use
173 * kmalloc to allocate these -- caller is expected to allocate
174 * memory with kmalloc to be initialized with dma_fence_init.
175 */
176 void
177 dma_fence_free(struct dma_fence *fence)
178 {
179
180 KASSERT(!dma_fence_referenced_p(fence));
181
182 call_rcu(&fence->rcu, &dma_fence_free_cb);
183 }
184
185 /*
186 * dma_fence_context_alloc(n)
187 *
188 * Return the first of a contiguous sequence of unique
189 * identifiers, at least until the system wraps around.
190 */
191 unsigned
192 dma_fence_context_alloc(unsigned n)
193 {
194 static volatile unsigned next_context = 0;
195
196 return atomic_add_int_nv(&next_context, n) - n;
197 }
198
199 /*
200 * dma_fence_is_later(a, b)
201 *
202 * True if the sequence number of fence a is later than the
203 * sequence number of fence b. Since sequence numbers wrap
204 * around, we define this to mean that the sequence number of
205 * fence a is no more than INT_MAX past the sequence number of
206 * fence b.
207 *
208 * The two fences must have the same context.
209 */
210 bool
211 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
212 {
213
214 KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
215 KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
216 KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
217 KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
218 KASSERTMSG(a->context == b->context, "incommensurate fences"
219 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
220
221 return a->seqno - b->seqno < INT_MAX;
222 }
223
224 /*
225 * dma_fence_get_stub()
226 *
227 * Return a dma fence that is always already signalled.
228 */
229 struct dma_fence *
230 dma_fence_get_stub(void)
231 {
232 /*
233 * XXX This probably isn't good enough -- caller may try
234 * operations on this that require the lock, which will
235 * require us to create and destroy the lock on module
236 * load/unload.
237 */
238 static struct dma_fence fence = {
239 .refcount = {1}, /* always referenced */
240 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
241 };
242
243 return dma_fence_get(&fence);
244 }
245
246 /*
247 * dma_fence_get(fence)
248 *
249 * Acquire a reference to fence. The fence must not be being
250 * destroyed. Return the fence.
251 */
252 struct dma_fence *
253 dma_fence_get(struct dma_fence *fence)
254 {
255
256 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
257 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
258
259 if (fence)
260 kref_get(&fence->refcount);
261 return fence;
262 }
263
264 /*
265 * dma_fence_get_rcu(fence)
266 *
267 * Attempt to acquire a reference to a fence that may be about to
268 * be destroyed, during a read section. Return the fence on
269 * success, or NULL on failure.
270 */
271 struct dma_fence *
272 dma_fence_get_rcu(struct dma_fence *fence)
273 {
274
275 __insn_barrier();
276 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
277 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
278 if (!kref_get_unless_zero(&fence->refcount))
279 return NULL;
280 return fence;
281 }
282
283 /*
284 * dma_fence_get_rcu_safe(fencep)
285 *
286 * Attempt to acquire a reference to the fence *fencep, which may
287 * be about to be destroyed, during a read section. If the value
288 * of *fencep changes after we read *fencep but before we
289 * increment its reference count, retry. Return *fencep on
290 * success, or NULL on failure.
291 */
292 struct dma_fence *
293 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
294 {
295 struct dma_fence *fence, *fence0;
296
297 retry:
298 fence = *fencep;
299
300 /* Load fence only once. */
301 __insn_barrier();
302
303 /* If there's nothing there, give up. */
304 if (fence == NULL)
305 return NULL;
306
307 /* Make sure we don't load stale fence guts. */
308 membar_datadep_consumer();
309
310 /* Try to acquire a reference. If we can't, try again. */
311 if (!dma_fence_get_rcu(fence))
312 goto retry;
313
314 /*
315 * Confirm that it's still the same fence. If not, release it
316 * and retry.
317 */
318 fence0 = *fencep;
319 __insn_barrier();
320 if (fence != fence0) {
321 dma_fence_put(fence);
322 goto retry;
323 }
324
325 /* Success! */
326 KASSERT(dma_fence_referenced_p(fence));
327 return fence;
328 }
329
330 static void
331 dma_fence_release(struct kref *refcount)
332 {
333 struct dma_fence *fence = container_of(refcount, struct dma_fence,
334 refcount);
335
336 KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
337 "fence %p has pending callbacks", fence);
338 KASSERT(!dma_fence_referenced_p(fence));
339
340 if (fence->ops->release)
341 (*fence->ops->release)(fence);
342 else
343 dma_fence_free(fence);
344 }
345
346 /*
347 * dma_fence_put(fence)
348 *
349 * Release a reference to fence. If this was the last one, call
350 * the fence's release callback.
351 */
352 void
353 dma_fence_put(struct dma_fence *fence)
354 {
355
356 if (fence == NULL)
357 return;
358 KASSERT(dma_fence_referenced_p(fence));
359 kref_put(&fence->refcount, &dma_fence_release);
360 }
361
362 /*
363 * dma_fence_ensure_signal_enabled(fence)
364 *
365 * Internal subroutine. If the fence was already signalled,
366 * return -ENOENT. Otherwise, if the enable signalling callback
367 * has not been called yet, call it. If fails, signal the fence
368 * and return -ENOENT. If it succeeds, or if it had already been
369 * called, return zero to indicate success.
370 *
371 * Caller must hold the fence's lock.
372 */
373 static int
374 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
375 {
376 bool already_enabled;
377
378 KASSERT(dma_fence_referenced_p(fence));
379 KASSERT(spin_is_locked(fence->lock));
380
381 /* Determine whether signalling was enabled, and enable it. */
382 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
383 &fence->flags);
384
385 /* If the fence was already signalled, fail with -ENOENT. */
386 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
387 return -ENOENT;
388
389 /*
390 * Otherwise, if it wasn't enabled yet, try to enable
391 * signalling, or fail if the fence doesn't support that.
392 */
393 if (!already_enabled) {
394 if (fence->ops->enable_signaling == NULL)
395 return -ENOENT;
396 if (!(*fence->ops->enable_signaling)(fence)) {
397 /* If it failed, signal and return -ENOENT. */
398 dma_fence_signal_locked(fence);
399 return -ENOENT;
400 }
401 }
402
403 /* Success! */
404 return 0;
405 }
406
407 /*
408 * dma_fence_add_callback(fence, fcb, fn)
409 *
410 * If fence has been signalled, return -ENOENT. If the enable
411 * signalling callback hasn't been called yet, call it; if it
412 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
413 * fcb) when it is signalled, and return 0.
414 *
415 * The fence uses memory allocated by the caller in fcb from the
416 * time of dma_fence_add_callback either to the time of
417 * dma_fence_remove_callback, or just before calling fn.
418 */
419 int
420 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
421 dma_fence_func_t fn)
422 {
423 int ret;
424
425 KASSERT(dma_fence_referenced_p(fence));
426
427 /* Optimistically try to skip the lock if it's already signalled. */
428 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
429 ret = -ENOENT;
430 goto out0;
431 }
432
433 /* Acquire the lock. */
434 spin_lock(fence->lock);
435
436 /* Ensure signalling is enabled, or fail if we can't. */
437 ret = dma_fence_ensure_signal_enabled(fence);
438 if (ret)
439 goto out1;
440
441 /* Insert the callback. */
442 fcb->func = fn;
443 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
444 fcb->fcb_onqueue = true;
445 ret = 0;
446
447 /* Release the lock and we're done. */
448 out1: spin_unlock(fence->lock);
449 out0: if (ret) {
450 fcb->func = NULL;
451 fcb->fcb_onqueue = false;
452 }
453 return ret;
454 }
455
456 /*
457 * dma_fence_remove_callback(fence, fcb)
458 *
459 * Remove the callback fcb from fence. Return true if it was
460 * removed from the list, or false if it had already run and so
461 * was no longer queued anyway. Caller must have already called
462 * dma_fence_add_callback(fence, fcb).
463 */
464 bool
465 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
466 {
467 bool onqueue;
468
469 KASSERT(dma_fence_referenced_p(fence));
470
471 spin_lock(fence->lock);
472 onqueue = fcb->fcb_onqueue;
473 if (onqueue) {
474 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
475 fcb->fcb_onqueue = false;
476 }
477 spin_unlock(fence->lock);
478
479 return onqueue;
480 }
481
482 /*
483 * dma_fence_enable_sw_signaling(fence)
484 *
485 * If it hasn't been called yet and the fence hasn't been
486 * signalled yet, call the fence's enable_sw_signaling callback.
487 * If when that happens, the callback indicates failure by
488 * returning false, signal the fence.
489 */
490 void
491 dma_fence_enable_sw_signaling(struct dma_fence *fence)
492 {
493
494 KASSERT(dma_fence_referenced_p(fence));
495
496 spin_lock(fence->lock);
497 if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
498 (void)dma_fence_ensure_signal_enabled(fence);
499 spin_unlock(fence->lock);
500 }
501
502 /*
503 * dma_fence_is_signaled(fence)
504 *
505 * Test whether the fence has been signalled. If it has been
506 * signalled by dma_fence_signal(_locked), return true. If the
507 * signalled callback returns true indicating that some implicit
508 * external condition has changed, call the callbacks as if with
509 * dma_fence_signal.
510 */
511 bool
512 dma_fence_is_signaled(struct dma_fence *fence)
513 {
514 bool signaled;
515
516 KASSERT(dma_fence_referenced_p(fence));
517
518 spin_lock(fence->lock);
519 signaled = dma_fence_is_signaled_locked(fence);
520 spin_unlock(fence->lock);
521
522 return signaled;
523 }
524
525 /*
526 * dma_fence_is_signaled_locked(fence)
527 *
528 * Test whether the fence has been signalled. Like
529 * dma_fence_is_signaleed, but caller already holds the fence's lock.
530 */
531 bool
532 dma_fence_is_signaled_locked(struct dma_fence *fence)
533 {
534
535 KASSERT(dma_fence_referenced_p(fence));
536 KASSERT(spin_is_locked(fence->lock));
537
538 /* Check whether we already set the signalled bit. */
539 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
540 return true;
541
542 /* If there's a signalled callback, test it. */
543 if (fence->ops->signaled) {
544 if ((*fence->ops->signaled)(fence)) {
545 /*
546 * It's been signalled implicitly by some
547 * external phenomonen. Act as though someone
548 * has called dma_fence_signal.
549 */
550 dma_fence_signal_locked(fence);
551 return true;
552 }
553 }
554
555 return false;
556 }
557
558 /*
559 * dma_fence_set_error(fence, error)
560 *
561 * Set an error code prior to dma_fence_signal for use by a
562 * waiter to learn about success or failure of the fence.
563 */
564 void
565 dma_fence_set_error(struct dma_fence *fence, int error)
566 {
567
568 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
569 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
570 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
571 KASSERTMSG(error >= -ELAST, "%d", error);
572 KASSERTMSG(error < 0, "%d", error);
573
574 fence->error = error;
575 }
576
577 /*
578 * dma_fence_get_status(fence)
579 *
580 * Return 0 if fence has yet to be signalled, 1 if it has been
581 * signalled without error, or negative error code if
582 * dma_fence_set_error was used.
583 */
584 int
585 dma_fence_get_status(struct dma_fence *fence)
586 {
587 int ret;
588
589 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
590 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
591
592 spin_lock(fence->lock);
593 if (!dma_fence_is_signaled_locked(fence)) {
594 ret = 0;
595 } else if (fence->error) {
596 ret = fence->error;
597 KASSERTMSG(ret < 0, "%d", ret);
598 } else {
599 ret = 1;
600 }
601 spin_unlock(fence->lock);
602
603 return ret;
604 }
605
606 /*
607 * dma_fence_signal(fence)
608 *
609 * Signal the fence. If it has already been signalled, return
610 * -EINVAL. If it has not been signalled, call the enable
611 * signalling callback if it hasn't been called yet, and remove
612 * each registered callback from the queue and call it; then
613 * return 0.
614 */
615 int
616 dma_fence_signal(struct dma_fence *fence)
617 {
618 int ret;
619
620 KASSERT(dma_fence_referenced_p(fence));
621
622 spin_lock(fence->lock);
623 ret = dma_fence_signal_locked(fence);
624 spin_unlock(fence->lock);
625
626 return ret;
627 }
628
629 /*
630 * dma_fence_signal_locked(fence)
631 *
632 * Signal the fence. Like dma_fence_signal, but caller already
633 * holds the fence's lock.
634 */
635 int
636 dma_fence_signal_locked(struct dma_fence *fence)
637 {
638 struct dma_fence_cb *fcb, *next;
639
640 KASSERT(dma_fence_referenced_p(fence));
641 KASSERT(spin_is_locked(fence->lock));
642
643 /* If it's been signalled, fail; otherwise set the signalled bit. */
644 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
645 return -EINVAL;
646
647 /* Set the timestamp. */
648 fence->timestamp = ktime_get();
649 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
650
651 /* Wake waiters. */
652 cv_broadcast(&fence->f_cv);
653
654 /* Remove and call the callbacks. */
655 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
656 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
657 fcb->fcb_onqueue = false;
658 (*fcb->func)(fence, fcb);
659 }
660
661 /* Success! */
662 return 0;
663 }
664
665 struct wait_any {
666 struct dma_fence_cb fcb;
667 struct wait_any1 {
668 kmutex_t lock;
669 kcondvar_t cv;
670 bool done;
671 uint32_t *ip;
672 struct wait_any *cb;
673 } *common;
674 };
675
676 static void
677 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
678 {
679 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
680
681 KASSERT(dma_fence_referenced_p(fence));
682
683 mutex_enter(&cb->common->lock);
684 cb->common->done = true;
685 if (cb->common->ip)
686 *cb->common->ip = cb - cb->common->cb;
687 cv_broadcast(&cb->common->cv);
688 mutex_exit(&cb->common->lock);
689 }
690
691 /*
692 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
693 *
694 * Wait for any of fences[0], fences[1], fences[2], ...,
695 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
696 * to the index of the first one.
697 */
698 long
699 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
700 bool intr, long timeout, uint32_t *ip)
701 {
702 struct wait_any1 common;
703 struct wait_any *cb;
704 uint32_t i, j;
705 int start, end;
706 long ret = 0;
707
708 /* Allocate an array of callback records. */
709 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
710 if (cb == NULL) {
711 ret = -ENOMEM;
712 goto out0;
713 }
714
715 /* Initialize a mutex and condvar for the common wait. */
716 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
717 cv_init(&common.cv, "fence");
718 common.done = false;
719 common.ip = ip;
720 common.cb = cb;
721
722 /* Add a callback to each of the fences, or stop here if we can't. */
723 for (i = 0; i < nfences; i++) {
724 cb[i].common = &common;
725 KASSERT(dma_fence_referenced_p(fences[i]));
726 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
727 &wait_any_cb);
728 if (ret)
729 goto out1;
730 }
731
732 /*
733 * Test whether any of the fences has been signalled. If they
734 * have, stop here. If the haven't, we are guaranteed to be
735 * notified by one of the callbacks when they have.
736 */
737 for (j = 0; j < nfences; j++) {
738 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags)) {
739 if (ip)
740 *ip = j;
741 ret = 0;
742 goto out1;
743 }
744 }
745
746 /*
747 * None of them was ready immediately. Wait for one of the
748 * callbacks to notify us when it is done.
749 */
750 mutex_enter(&common.lock);
751 while (timeout > 0 && !common.done) {
752 start = getticks();
753 __insn_barrier();
754 if (intr) {
755 if (timeout != MAX_SCHEDULE_TIMEOUT) {
756 ret = -cv_timedwait_sig(&common.cv,
757 &common.lock, MIN(timeout, /* paranoia */
758 MAX_SCHEDULE_TIMEOUT));
759 } else {
760 ret = -cv_wait_sig(&common.cv, &common.lock);
761 }
762 } else {
763 if (timeout != MAX_SCHEDULE_TIMEOUT) {
764 ret = -cv_timedwait(&common.cv,
765 &common.lock, MIN(timeout, /* paranoia */
766 MAX_SCHEDULE_TIMEOUT));
767 } else {
768 cv_wait(&common.cv, &common.lock);
769 ret = 0;
770 }
771 }
772 end = getticks();
773 __insn_barrier();
774 if (ret) {
775 if (ret == -ERESTART)
776 ret = -ERESTARTSYS;
777 break;
778 }
779 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
780 }
781 mutex_exit(&common.lock);
782
783 /*
784 * Massage the return code: if we were interrupted, return
785 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
786 * return the remaining time.
787 */
788 if (ret < 0) {
789 if (ret == -EINTR || ret == -ERESTART)
790 ret = -ERESTARTSYS;
791 if (ret == -EWOULDBLOCK)
792 ret = 0;
793 } else {
794 KASSERT(ret == 0);
795 ret = timeout;
796 }
797
798 out1: while (i --> 0)
799 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
800 cv_destroy(&common.cv);
801 mutex_destroy(&common.lock);
802 kfree(cb);
803 out0: return ret;
804 }
805
806 /*
807 * dma_fence_wait_timeout(fence, intr, timeout)
808 *
809 * Wait until fence is signalled; or until interrupt, if intr is
810 * true; or until timeout, if positive. Return -ERESTARTSYS if
811 * interrupted, negative error code on any other error, zero on
812 * timeout, or positive number of ticks remaining if the fence is
813 * signalled before the timeout. Works by calling the fence wait
814 * callback.
815 *
816 * The timeout must be nonnegative and less than
817 * MAX_SCHEDULE_TIMEOUT.
818 */
819 long
820 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
821 {
822
823 KASSERT(dma_fence_referenced_p(fence));
824 KASSERT(timeout >= 0);
825 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
826
827 if (fence->ops->wait)
828 return (*fence->ops->wait)(fence, intr, timeout);
829 else
830 return dma_fence_default_wait(fence, intr, timeout);
831 }
832
833 /*
834 * dma_fence_wait(fence, intr)
835 *
836 * Wait until fence is signalled; or until interrupt, if intr is
837 * true. Return -ERESTARTSYS if interrupted, negative error code
838 * on any other error, zero on sucess. Works by calling the fence
839 * wait callback with MAX_SCHEDULE_TIMEOUT.
840 */
841 long
842 dma_fence_wait(struct dma_fence *fence, bool intr)
843 {
844 long ret;
845
846 KASSERT(dma_fence_referenced_p(fence));
847
848 if (fence->ops->wait)
849 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
850 else
851 ret = dma_fence_default_wait(fence, intr,
852 MAX_SCHEDULE_TIMEOUT);
853 KASSERT(ret != 0);
854
855 return (ret < 0 ? ret : 0);
856 }
857
858 /*
859 * dma_fence_default_wait(fence, intr, timeout)
860 *
861 * Default implementation of fence wait callback using a condition
862 * variable. If the fence is already signalled, return timeout,
863 * or 1 if timeout is zero meaning poll. If the enable signalling
864 * callback hasn't been called, call it, and if it fails, act as
865 * if the fence had been signalled. Otherwise, wait on the
866 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
867 * indefinitely.
868 */
869 long
870 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
871 {
872 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
873 kmutex_t *lock = &fence->lock->sl_lock;
874 long ret = 0;
875
876 KASSERT(dma_fence_referenced_p(fence));
877 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
878 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
879
880 /* Optimistically try to skip the lock if it's already signalled. */
881 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
882 return (timeout ? timeout : 1);
883
884 /* Acquire the lock. */
885 spin_lock(fence->lock);
886
887 /* Ensure signalling is enabled, or stop if already completed. */
888 if (dma_fence_ensure_signal_enabled(fence) != 0) {
889 spin_unlock(fence->lock);
890 return (timeout ? timeout : 1);
891 }
892
893 /* If merely polling, stop here. */
894 if (timeout == 0) {
895 spin_unlock(fence->lock);
896 return 0;
897 }
898
899 /* Find out what our deadline is so we can handle spurious wakeup. */
900 if (timeout < MAX_SCHEDULE_TIMEOUT) {
901 now = getticks();
902 __insn_barrier();
903 starttime = now;
904 deadline = starttime + timeout;
905 }
906
907 /* Wait until the signalled bit is set. */
908 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
909 /*
910 * If there's a timeout and we've passed the deadline,
911 * give up.
912 */
913 if (timeout < MAX_SCHEDULE_TIMEOUT) {
914 now = getticks();
915 __insn_barrier();
916 if (deadline <= now)
917 break;
918 }
919 if (intr) {
920 if (timeout < MAX_SCHEDULE_TIMEOUT) {
921 ret = -cv_timedwait_sig(&fence->f_cv, lock,
922 deadline - now);
923 } else {
924 ret = -cv_wait_sig(&fence->f_cv, lock);
925 }
926 } else {
927 if (timeout < MAX_SCHEDULE_TIMEOUT) {
928 ret = -cv_timedwait(&fence->f_cv, lock,
929 deadline - now);
930 } else {
931 cv_wait(&fence->f_cv, lock);
932 ret = 0;
933 }
934 }
935 /* If the wait failed, give up. */
936 if (ret) {
937 if (ret == -ERESTART)
938 ret = -ERESTARTSYS;
939 break;
940 }
941 }
942
943 /* All done. Release the lock. */
944 spin_unlock(fence->lock);
945
946 /* If cv_timedwait gave up, return 0 meaning timeout. */
947 if (ret == -EWOULDBLOCK) {
948 /* Only cv_timedwait and cv_timedwait_sig can return this. */
949 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
950 return 0;
951 }
952
953 /* If there was a timeout and the deadline passed, return 0. */
954 if (timeout < MAX_SCHEDULE_TIMEOUT) {
955 if (deadline <= now)
956 return 0;
957 }
958
959 /* If we were interrupted, return -ERESTARTSYS. */
960 if (ret == -EINTR || ret == -ERESTART)
961 return -ERESTARTSYS;
962
963 /* If there was any other kind of error, fail. */
964 if (ret)
965 return ret;
966
967 /*
968 * Success! Return the number of ticks left, at least 1, or 1
969 * if no timeout.
970 */
971 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
972 }
973
974 /*
975 * __dma_fence_signal(fence)
976 *
977 * Set fence's signalled bit, without waking waiters yet. Return
978 * true if it was newly set, false if it was already set.
979 */
980 bool
981 __dma_fence_signal(struct dma_fence *fence)
982 {
983
984 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
985 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
986
987 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
988 return false;
989
990 return true;
991 }
992
993 /*
994 * __dma_fence_signal_wake(fence)
995 *
996 * Set fence's timestamp and wake fence's waiters. Caller must
997 * have previously called __dma_fence_signal and it must have
998 * previously returned true.
999 */
1000 void
1001 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
1002 {
1003 struct dma_fence_cb *fcb, *next;
1004
1005 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1006 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1007
1008 spin_lock(fence->lock);
1009
1010 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
1011
1012 /* Set the timestamp. */
1013 fence->timestamp = timestamp;
1014 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1015
1016 /* Wake waiters. */
1017 cv_broadcast(&fence->f_cv);
1018
1019 /* Remove and call the callbacks. */
1020 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
1021 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
1022 fcb->fcb_onqueue = false;
1023 (*fcb->func)(fence, fcb);
1024 }
1025
1026 spin_unlock(fence->lock);
1027 }
1028