linux_dma_fence.c revision 1.27 1 /* $NetBSD: linux_dma_fence.c,v 1.27 2021/12/19 12:23:27 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.27 2021/12/19 12:23:27 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 #define FENCE_MAGIC_GOOD 0x607ba424048c37e5ULL
47 #define FENCE_MAGIC_BAD 0x7641ca721344505fULL
48
49 /*
50 * linux_dma_fence_trace
51 *
52 * True if we print DMA_FENCE_TRACE messages, false if not. These
53 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
54 * in boothowto.
55 */
56 int linux_dma_fence_trace = 0;
57
58 /*
59 * dma_fence_referenced_p(fence)
60 *
61 * True if fence has a positive reference count. True after
62 * dma_fence_init; after the last dma_fence_put, this becomes
63 * false. The fence must have been initialized and must not have
64 * been destroyed.
65 */
66 static inline bool __diagused
67 dma_fence_referenced_p(struct dma_fence *fence)
68 {
69
70 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
71 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
72
73 return kref_referenced_p(&fence->refcount);
74 }
75
76 /*
77 * dma_fence_init(fence, ops, lock, context, seqno)
78 *
79 * Initialize fence. Caller should call dma_fence_destroy when
80 * done, after all references have been released.
81 */
82 void
83 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
84 spinlock_t *lock, unsigned context, unsigned seqno)
85 {
86
87 kref_init(&fence->refcount);
88 fence->lock = lock;
89 fence->flags = 0;
90 fence->context = context;
91 fence->seqno = seqno;
92 fence->ops = ops;
93 fence->error = 0;
94 TAILQ_INIT(&fence->f_callbacks);
95 cv_init(&fence->f_cv, "dmafence");
96
97 #ifdef DIAGNOSTIC
98 fence->f_magic = FENCE_MAGIC_GOOD;
99 #endif
100 }
101
102 /*
103 * dma_fence_reset(fence)
104 *
105 * Ensure fence is in a quiescent state. Allowed either for newly
106 * initialized or freed fences, but not fences with more than one
107 * reference.
108 *
109 * XXX extension to Linux API
110 */
111 void
112 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
113 spinlock_t *lock, unsigned context, unsigned seqno)
114 {
115
116 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
117 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
118 KASSERT(kref_read(&fence->refcount) == 0 ||
119 kref_read(&fence->refcount) == 1);
120 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
121 KASSERT(fence->lock == lock);
122 KASSERT(fence->ops == ops);
123
124 kref_init(&fence->refcount);
125 fence->flags = 0;
126 fence->context = context;
127 fence->seqno = seqno;
128 fence->error = 0;
129 }
130
131 /*
132 * dma_fence_destroy(fence)
133 *
134 * Clean up memory initialized with dma_fence_init. This is meant
135 * to be used after a fence release callback.
136 *
137 * XXX extension to Linux API
138 */
139 void
140 dma_fence_destroy(struct dma_fence *fence)
141 {
142
143 KASSERT(!dma_fence_referenced_p(fence));
144
145 #ifdef DIAGNOSTIC
146 fence->f_magic = FENCE_MAGIC_BAD;
147 #endif
148
149 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
150 cv_destroy(&fence->f_cv);
151 }
152
153 static void
154 dma_fence_free_cb(struct rcu_head *rcu)
155 {
156 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
157
158 KASSERT(!dma_fence_referenced_p(fence));
159
160 dma_fence_destroy(fence);
161 kfree(fence);
162 }
163
164 /*
165 * dma_fence_free(fence)
166 *
167 * Schedule fence to be destroyed and then freed with kfree after
168 * any pending RCU read sections on all CPUs have completed.
169 * Caller must guarantee all references have been released. This
170 * is meant to be used after a fence release callback.
171 *
172 * NOTE: Callers assume kfree will be used. We don't even use
173 * kmalloc to allocate these -- caller is expected to allocate
174 * memory with kmalloc to be initialized with dma_fence_init.
175 */
176 void
177 dma_fence_free(struct dma_fence *fence)
178 {
179
180 KASSERT(!dma_fence_referenced_p(fence));
181
182 call_rcu(&fence->rcu, &dma_fence_free_cb);
183 }
184
185 /*
186 * dma_fence_context_alloc(n)
187 *
188 * Return the first of a contiguous sequence of unique
189 * identifiers, at least until the system wraps around.
190 */
191 unsigned
192 dma_fence_context_alloc(unsigned n)
193 {
194 static volatile unsigned next_context = 0;
195
196 return atomic_add_int_nv(&next_context, n) - n;
197 }
198
199 /*
200 * dma_fence_is_later(a, b)
201 *
202 * True if the sequence number of fence a is later than the
203 * sequence number of fence b. Since sequence numbers wrap
204 * around, we define this to mean that the sequence number of
205 * fence a is no more than INT_MAX past the sequence number of
206 * fence b.
207 *
208 * The two fences must have the same context.
209 */
210 bool
211 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
212 {
213
214 KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
215 KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
216 KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
217 KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
218 KASSERTMSG(a->context == b->context, "incommensurate fences"
219 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
220
221 return a->seqno - b->seqno < INT_MAX;
222 }
223
224 /*
225 * dma_fence_get_stub()
226 *
227 * Return a dma fence that is always already signalled.
228 */
229 struct dma_fence *
230 dma_fence_get_stub(void)
231 {
232 /*
233 * XXX This probably isn't good enough -- caller may try
234 * operations on this that require the lock, which will
235 * require us to create and destroy the lock on module
236 * load/unload.
237 */
238 static struct dma_fence fence = {
239 .refcount = {1}, /* always referenced */
240 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
241 };
242
243 return dma_fence_get(&fence);
244 }
245
246 /*
247 * dma_fence_get(fence)
248 *
249 * Acquire a reference to fence and return it, or return NULL if
250 * fence is NULL. The fence, if nonnull, must not be being
251 * destroyed.
252 */
253 struct dma_fence *
254 dma_fence_get(struct dma_fence *fence)
255 {
256
257 if (fence == NULL)
258 return NULL;
259
260 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
261 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
262
263 kref_get(&fence->refcount);
264 return fence;
265 }
266
267 /*
268 * dma_fence_get_rcu(fence)
269 *
270 * Attempt to acquire a reference to a fence that may be about to
271 * be destroyed, during a read section. Return the fence on
272 * success, or NULL on failure. The fence must be nonnull.
273 */
274 struct dma_fence *
275 dma_fence_get_rcu(struct dma_fence *fence)
276 {
277
278 __insn_barrier();
279 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
280 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
281 if (!kref_get_unless_zero(&fence->refcount))
282 return NULL;
283 return fence;
284 }
285
286 /*
287 * dma_fence_get_rcu_safe(fencep)
288 *
289 * Attempt to acquire a reference to the fence *fencep, which may
290 * be about to be destroyed, during a read section. If the value
291 * of *fencep changes after we read *fencep but before we
292 * increment its reference count, retry. Return *fencep on
293 * success, or NULL on failure.
294 */
295 struct dma_fence *
296 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
297 {
298 struct dma_fence *fence, *fence0;
299
300 retry:
301 fence = *fencep;
302
303 /* Load fence only once. */
304 __insn_barrier();
305
306 /* If there's nothing there, give up. */
307 if (fence == NULL)
308 return NULL;
309
310 /* Make sure we don't load stale fence guts. */
311 membar_datadep_consumer();
312
313 /* Try to acquire a reference. If we can't, try again. */
314 if (!dma_fence_get_rcu(fence))
315 goto retry;
316
317 /*
318 * Confirm that it's still the same fence. If not, release it
319 * and retry.
320 */
321 fence0 = *fencep;
322 __insn_barrier();
323 if (fence != fence0) {
324 dma_fence_put(fence);
325 goto retry;
326 }
327
328 /* Success! */
329 KASSERT(dma_fence_referenced_p(fence));
330 return fence;
331 }
332
333 static void
334 dma_fence_release(struct kref *refcount)
335 {
336 struct dma_fence *fence = container_of(refcount, struct dma_fence,
337 refcount);
338
339 KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
340 "fence %p has pending callbacks", fence);
341 KASSERT(!dma_fence_referenced_p(fence));
342
343 if (fence->ops->release)
344 (*fence->ops->release)(fence);
345 else
346 dma_fence_free(fence);
347 }
348
349 /*
350 * dma_fence_put(fence)
351 *
352 * Release a reference to fence. If this was the last one, call
353 * the fence's release callback.
354 */
355 void
356 dma_fence_put(struct dma_fence *fence)
357 {
358
359 if (fence == NULL)
360 return;
361 KASSERT(dma_fence_referenced_p(fence));
362 kref_put(&fence->refcount, &dma_fence_release);
363 }
364
365 /*
366 * dma_fence_ensure_signal_enabled(fence)
367 *
368 * Internal subroutine. If the fence was already signalled,
369 * return -ENOENT. Otherwise, if the enable signalling callback
370 * has not been called yet, call it. If fails, signal the fence
371 * and return -ENOENT. If it succeeds, or if it had already been
372 * called, return zero to indicate success.
373 *
374 * Caller must hold the fence's lock.
375 */
376 static int
377 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
378 {
379 bool already_enabled;
380
381 KASSERT(dma_fence_referenced_p(fence));
382 KASSERT(spin_is_locked(fence->lock));
383
384 /* Determine whether signalling was enabled, and enable it. */
385 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
386 &fence->flags);
387
388 /* If the fence was already signalled, fail with -ENOENT. */
389 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
390 return -ENOENT;
391
392 /*
393 * Otherwise, if it wasn't enabled yet, try to enable
394 * signalling, or fail if the fence doesn't support that.
395 */
396 if (!already_enabled) {
397 if (fence->ops->enable_signaling == NULL)
398 return -ENOENT;
399 if (!(*fence->ops->enable_signaling)(fence)) {
400 /* If it failed, signal and return -ENOENT. */
401 dma_fence_signal_locked(fence);
402 return -ENOENT;
403 }
404 }
405
406 /* Success! */
407 return 0;
408 }
409
410 /*
411 * dma_fence_add_callback(fence, fcb, fn)
412 *
413 * If fence has been signalled, return -ENOENT. If the enable
414 * signalling callback hasn't been called yet, call it; if it
415 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
416 * fcb) when it is signalled, and return 0.
417 *
418 * The fence uses memory allocated by the caller in fcb from the
419 * time of dma_fence_add_callback either to the time of
420 * dma_fence_remove_callback, or just before calling fn.
421 */
422 int
423 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
424 dma_fence_func_t fn)
425 {
426 int ret;
427
428 KASSERT(dma_fence_referenced_p(fence));
429
430 /* Optimistically try to skip the lock if it's already signalled. */
431 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
432 ret = -ENOENT;
433 goto out0;
434 }
435
436 /* Acquire the lock. */
437 spin_lock(fence->lock);
438
439 /* Ensure signalling is enabled, or fail if we can't. */
440 ret = dma_fence_ensure_signal_enabled(fence);
441 if (ret)
442 goto out1;
443
444 /* Insert the callback. */
445 fcb->func = fn;
446 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
447 fcb->fcb_onqueue = true;
448 ret = 0;
449
450 /* Release the lock and we're done. */
451 out1: spin_unlock(fence->lock);
452 out0: if (ret) {
453 fcb->func = NULL;
454 fcb->fcb_onqueue = false;
455 }
456 return ret;
457 }
458
459 /*
460 * dma_fence_remove_callback(fence, fcb)
461 *
462 * Remove the callback fcb from fence. Return true if it was
463 * removed from the list, or false if it had already run and so
464 * was no longer queued anyway. Caller must have already called
465 * dma_fence_add_callback(fence, fcb).
466 */
467 bool
468 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
469 {
470 bool onqueue;
471
472 KASSERT(dma_fence_referenced_p(fence));
473
474 spin_lock(fence->lock);
475 onqueue = fcb->fcb_onqueue;
476 if (onqueue) {
477 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
478 fcb->fcb_onqueue = false;
479 }
480 spin_unlock(fence->lock);
481
482 return onqueue;
483 }
484
485 /*
486 * dma_fence_enable_sw_signaling(fence)
487 *
488 * If it hasn't been called yet and the fence hasn't been
489 * signalled yet, call the fence's enable_sw_signaling callback.
490 * If when that happens, the callback indicates failure by
491 * returning false, signal the fence.
492 */
493 void
494 dma_fence_enable_sw_signaling(struct dma_fence *fence)
495 {
496
497 KASSERT(dma_fence_referenced_p(fence));
498
499 spin_lock(fence->lock);
500 if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
501 (void)dma_fence_ensure_signal_enabled(fence);
502 spin_unlock(fence->lock);
503 }
504
505 /*
506 * dma_fence_is_signaled(fence)
507 *
508 * Test whether the fence has been signalled. If it has been
509 * signalled by dma_fence_signal(_locked), return true. If the
510 * signalled callback returns true indicating that some implicit
511 * external condition has changed, call the callbacks as if with
512 * dma_fence_signal.
513 */
514 bool
515 dma_fence_is_signaled(struct dma_fence *fence)
516 {
517 bool signaled;
518
519 KASSERT(dma_fence_referenced_p(fence));
520
521 spin_lock(fence->lock);
522 signaled = dma_fence_is_signaled_locked(fence);
523 spin_unlock(fence->lock);
524
525 return signaled;
526 }
527
528 /*
529 * dma_fence_is_signaled_locked(fence)
530 *
531 * Test whether the fence has been signalled. Like
532 * dma_fence_is_signaleed, but caller already holds the fence's lock.
533 */
534 bool
535 dma_fence_is_signaled_locked(struct dma_fence *fence)
536 {
537
538 KASSERT(dma_fence_referenced_p(fence));
539 KASSERT(spin_is_locked(fence->lock));
540
541 /* Check whether we already set the signalled bit. */
542 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
543 return true;
544
545 /* If there's a signalled callback, test it. */
546 if (fence->ops->signaled) {
547 if ((*fence->ops->signaled)(fence)) {
548 /*
549 * It's been signalled implicitly by some
550 * external phenomonen. Act as though someone
551 * has called dma_fence_signal.
552 */
553 dma_fence_signal_locked(fence);
554 return true;
555 }
556 }
557
558 return false;
559 }
560
561 /*
562 * dma_fence_set_error(fence, error)
563 *
564 * Set an error code prior to dma_fence_signal for use by a
565 * waiter to learn about success or failure of the fence.
566 */
567 void
568 dma_fence_set_error(struct dma_fence *fence, int error)
569 {
570
571 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
572 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
573 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
574 KASSERTMSG(error >= -ELAST, "%d", error);
575 KASSERTMSG(error < 0, "%d", error);
576
577 fence->error = error;
578 }
579
580 /*
581 * dma_fence_get_status(fence)
582 *
583 * Return 0 if fence has yet to be signalled, 1 if it has been
584 * signalled without error, or negative error code if
585 * dma_fence_set_error was used.
586 */
587 int
588 dma_fence_get_status(struct dma_fence *fence)
589 {
590 int ret;
591
592 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
593 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
594
595 spin_lock(fence->lock);
596 if (!dma_fence_is_signaled_locked(fence)) {
597 ret = 0;
598 } else if (fence->error) {
599 ret = fence->error;
600 KASSERTMSG(ret < 0, "%d", ret);
601 } else {
602 ret = 1;
603 }
604 spin_unlock(fence->lock);
605
606 return ret;
607 }
608
609 /*
610 * dma_fence_signal(fence)
611 *
612 * Signal the fence. If it has already been signalled, return
613 * -EINVAL. If it has not been signalled, call the enable
614 * signalling callback if it hasn't been called yet, and remove
615 * each registered callback from the queue and call it; then
616 * return 0.
617 */
618 int
619 dma_fence_signal(struct dma_fence *fence)
620 {
621 int ret;
622
623 KASSERT(dma_fence_referenced_p(fence));
624
625 spin_lock(fence->lock);
626 ret = dma_fence_signal_locked(fence);
627 spin_unlock(fence->lock);
628
629 return ret;
630 }
631
632 /*
633 * dma_fence_signal_locked(fence)
634 *
635 * Signal the fence. Like dma_fence_signal, but caller already
636 * holds the fence's lock.
637 */
638 int
639 dma_fence_signal_locked(struct dma_fence *fence)
640 {
641 struct dma_fence_cb *fcb, *next;
642
643 KASSERT(dma_fence_referenced_p(fence));
644 KASSERT(spin_is_locked(fence->lock));
645
646 /* If it's been signalled, fail; otherwise set the signalled bit. */
647 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
648 return -EINVAL;
649
650 /* Set the timestamp. */
651 fence->timestamp = ktime_get();
652 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
653
654 /* Wake waiters. */
655 cv_broadcast(&fence->f_cv);
656
657 /* Remove and call the callbacks. */
658 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
659 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
660 fcb->fcb_onqueue = false;
661 (*fcb->func)(fence, fcb);
662 }
663
664 /* Success! */
665 return 0;
666 }
667
668 struct wait_any {
669 struct dma_fence_cb fcb;
670 struct wait_any1 {
671 kmutex_t lock;
672 kcondvar_t cv;
673 bool done;
674 uint32_t *ip;
675 struct wait_any *cb;
676 } *common;
677 };
678
679 static void
680 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
681 {
682 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
683
684 KASSERT(dma_fence_referenced_p(fence));
685
686 mutex_enter(&cb->common->lock);
687 cb->common->done = true;
688 if (cb->common->ip)
689 *cb->common->ip = cb - cb->common->cb;
690 cv_broadcast(&cb->common->cv);
691 mutex_exit(&cb->common->lock);
692 }
693
694 /*
695 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
696 *
697 * Wait for any of fences[0], fences[1], fences[2], ...,
698 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
699 * to the index of the first one.
700 */
701 long
702 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
703 bool intr, long timeout, uint32_t *ip)
704 {
705 struct wait_any1 common;
706 struct wait_any *cb;
707 uint32_t i, j;
708 int start, end;
709 long ret = 0;
710
711 /* Allocate an array of callback records. */
712 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
713 if (cb == NULL) {
714 ret = -ENOMEM;
715 goto out0;
716 }
717
718 /* Initialize a mutex and condvar for the common wait. */
719 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
720 cv_init(&common.cv, "fence");
721 common.done = false;
722 common.ip = ip;
723 common.cb = cb;
724
725 /* Add a callback to each of the fences, or stop here if we can't. */
726 for (i = 0; i < nfences; i++) {
727 cb[i].common = &common;
728 KASSERT(dma_fence_referenced_p(fences[i]));
729 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
730 &wait_any_cb);
731 if (ret)
732 goto out1;
733 }
734
735 /*
736 * Test whether any of the fences has been signalled. If they
737 * have, stop here. If the haven't, we are guaranteed to be
738 * notified by one of the callbacks when they have.
739 */
740 for (j = 0; j < nfences; j++) {
741 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags)) {
742 if (ip)
743 *ip = j;
744 ret = 0;
745 goto out1;
746 }
747 }
748
749 /*
750 * None of them was ready immediately. Wait for one of the
751 * callbacks to notify us when it is done.
752 */
753 mutex_enter(&common.lock);
754 while (timeout > 0 && !common.done) {
755 start = getticks();
756 __insn_barrier();
757 if (intr) {
758 if (timeout != MAX_SCHEDULE_TIMEOUT) {
759 ret = -cv_timedwait_sig(&common.cv,
760 &common.lock, MIN(timeout, /* paranoia */
761 MAX_SCHEDULE_TIMEOUT));
762 } else {
763 ret = -cv_wait_sig(&common.cv, &common.lock);
764 }
765 } else {
766 if (timeout != MAX_SCHEDULE_TIMEOUT) {
767 ret = -cv_timedwait(&common.cv,
768 &common.lock, MIN(timeout, /* paranoia */
769 MAX_SCHEDULE_TIMEOUT));
770 } else {
771 cv_wait(&common.cv, &common.lock);
772 ret = 0;
773 }
774 }
775 end = getticks();
776 __insn_barrier();
777 if (ret) {
778 if (ret == -ERESTART)
779 ret = -ERESTARTSYS;
780 break;
781 }
782 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
783 }
784 mutex_exit(&common.lock);
785
786 /*
787 * Massage the return code: if we were interrupted, return
788 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
789 * return the remaining time.
790 */
791 if (ret < 0) {
792 if (ret == -EINTR || ret == -ERESTART)
793 ret = -ERESTARTSYS;
794 if (ret == -EWOULDBLOCK)
795 ret = 0;
796 } else {
797 KASSERT(ret == 0);
798 ret = timeout;
799 }
800
801 out1: while (i --> 0)
802 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
803 cv_destroy(&common.cv);
804 mutex_destroy(&common.lock);
805 kfree(cb);
806 out0: return ret;
807 }
808
809 /*
810 * dma_fence_wait_timeout(fence, intr, timeout)
811 *
812 * Wait until fence is signalled; or until interrupt, if intr is
813 * true; or until timeout, if positive. Return -ERESTARTSYS if
814 * interrupted, negative error code on any other error, zero on
815 * timeout, or positive number of ticks remaining if the fence is
816 * signalled before the timeout. Works by calling the fence wait
817 * callback.
818 *
819 * The timeout must be nonnegative and less than
820 * MAX_SCHEDULE_TIMEOUT.
821 */
822 long
823 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
824 {
825
826 KASSERT(dma_fence_referenced_p(fence));
827 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
828 KASSERTMSG(timeout < MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
829
830 if (fence->ops->wait)
831 return (*fence->ops->wait)(fence, intr, timeout);
832 else
833 return dma_fence_default_wait(fence, intr, timeout);
834 }
835
836 /*
837 * dma_fence_wait(fence, intr)
838 *
839 * Wait until fence is signalled; or until interrupt, if intr is
840 * true. Return -ERESTARTSYS if interrupted, negative error code
841 * on any other error, zero on sucess. Works by calling the fence
842 * wait callback with MAX_SCHEDULE_TIMEOUT.
843 */
844 long
845 dma_fence_wait(struct dma_fence *fence, bool intr)
846 {
847 long ret;
848
849 KASSERT(dma_fence_referenced_p(fence));
850
851 if (fence->ops->wait)
852 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
853 else
854 ret = dma_fence_default_wait(fence, intr,
855 MAX_SCHEDULE_TIMEOUT);
856 KASSERT(ret != 0);
857
858 return (ret < 0 ? ret : 0);
859 }
860
861 /*
862 * dma_fence_default_wait(fence, intr, timeout)
863 *
864 * Default implementation of fence wait callback using a condition
865 * variable. If the fence is already signalled, return timeout,
866 * or 1 if timeout is zero meaning poll. If the enable signalling
867 * callback hasn't been called, call it, and if it fails, act as
868 * if the fence had been signalled. Otherwise, wait on the
869 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
870 * indefinitely.
871 */
872 long
873 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
874 {
875 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
876 kmutex_t *lock = &fence->lock->sl_lock;
877 long ret = 0;
878
879 KASSERT(dma_fence_referenced_p(fence));
880 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
881 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
882
883 /* Optimistically try to skip the lock if it's already signalled. */
884 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
885 return (timeout ? timeout : 1);
886
887 /* Acquire the lock. */
888 spin_lock(fence->lock);
889
890 /* Ensure signalling is enabled, or stop if already completed. */
891 if (dma_fence_ensure_signal_enabled(fence) != 0) {
892 spin_unlock(fence->lock);
893 return (timeout ? timeout : 1);
894 }
895
896 /* If merely polling, stop here. */
897 if (timeout == 0) {
898 spin_unlock(fence->lock);
899 return 0;
900 }
901
902 /* Find out what our deadline is so we can handle spurious wakeup. */
903 if (timeout < MAX_SCHEDULE_TIMEOUT) {
904 now = getticks();
905 __insn_barrier();
906 starttime = now;
907 deadline = starttime + timeout;
908 }
909
910 /* Wait until the signalled bit is set. */
911 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
912 /*
913 * If there's a timeout and we've passed the deadline,
914 * give up.
915 */
916 if (timeout < MAX_SCHEDULE_TIMEOUT) {
917 now = getticks();
918 __insn_barrier();
919 if (deadline <= now)
920 break;
921 }
922 if (intr) {
923 if (timeout < MAX_SCHEDULE_TIMEOUT) {
924 ret = -cv_timedwait_sig(&fence->f_cv, lock,
925 deadline - now);
926 } else {
927 ret = -cv_wait_sig(&fence->f_cv, lock);
928 }
929 } else {
930 if (timeout < MAX_SCHEDULE_TIMEOUT) {
931 ret = -cv_timedwait(&fence->f_cv, lock,
932 deadline - now);
933 } else {
934 cv_wait(&fence->f_cv, lock);
935 ret = 0;
936 }
937 }
938 /* If the wait failed, give up. */
939 if (ret) {
940 if (ret == -ERESTART)
941 ret = -ERESTARTSYS;
942 break;
943 }
944 }
945
946 /* All done. Release the lock. */
947 spin_unlock(fence->lock);
948
949 /* If cv_timedwait gave up, return 0 meaning timeout. */
950 if (ret == -EWOULDBLOCK) {
951 /* Only cv_timedwait and cv_timedwait_sig can return this. */
952 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
953 return 0;
954 }
955
956 /* If there was a timeout and the deadline passed, return 0. */
957 if (timeout < MAX_SCHEDULE_TIMEOUT) {
958 if (deadline <= now)
959 return 0;
960 }
961
962 /* If we were interrupted, return -ERESTARTSYS. */
963 if (ret == -EINTR || ret == -ERESTART)
964 return -ERESTARTSYS;
965
966 /* If there was any other kind of error, fail. */
967 if (ret)
968 return ret;
969
970 /*
971 * Success! Return the number of ticks left, at least 1, or 1
972 * if no timeout.
973 */
974 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
975 }
976
977 /*
978 * __dma_fence_signal(fence)
979 *
980 * Set fence's signalled bit, without waking waiters yet. Return
981 * true if it was newly set, false if it was already set.
982 */
983 bool
984 __dma_fence_signal(struct dma_fence *fence)
985 {
986
987 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
988 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
989
990 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
991 return false;
992
993 return true;
994 }
995
996 /*
997 * __dma_fence_signal_wake(fence)
998 *
999 * Set fence's timestamp and wake fence's waiters. Caller must
1000 * have previously called __dma_fence_signal and it must have
1001 * previously returned true.
1002 */
1003 void
1004 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
1005 {
1006 struct dma_fence_cb *fcb, *next;
1007
1008 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1009 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1010
1011 spin_lock(fence->lock);
1012
1013 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
1014
1015 /* Set the timestamp. */
1016 fence->timestamp = timestamp;
1017 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1018
1019 /* Wake waiters. */
1020 cv_broadcast(&fence->f_cv);
1021
1022 /* Remove and call the callbacks. */
1023 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
1024 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
1025 fcb->fcb_onqueue = false;
1026 (*fcb->func)(fence, fcb);
1027 }
1028
1029 spin_unlock(fence->lock);
1030 }
1031