linux_dma_fence.c revision 1.29 1 /* $NetBSD: linux_dma_fence.c,v 1.29 2021/12/19 12:30:56 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.29 2021/12/19 12:30:56 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 #define FENCE_MAGIC_GOOD 0x607ba424048c37e5ULL
47 #define FENCE_MAGIC_BAD 0x7641ca721344505fULL
48
49 /*
50 * linux_dma_fence_trace
51 *
52 * True if we print DMA_FENCE_TRACE messages, false if not. These
53 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
54 * in boothowto.
55 */
56 int linux_dma_fence_trace = 0;
57
58 /*
59 * dma_fence_referenced_p(fence)
60 *
61 * True if fence has a positive reference count. True after
62 * dma_fence_init; after the last dma_fence_put, this becomes
63 * false. The fence must have been initialized and must not have
64 * been destroyed.
65 */
66 static inline bool __diagused
67 dma_fence_referenced_p(struct dma_fence *fence)
68 {
69
70 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
71 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
72
73 return kref_referenced_p(&fence->refcount);
74 }
75
76 /*
77 * dma_fence_init(fence, ops, lock, context, seqno)
78 *
79 * Initialize fence. Caller should call dma_fence_destroy when
80 * done, after all references have been released.
81 */
82 void
83 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
84 spinlock_t *lock, unsigned context, unsigned seqno)
85 {
86
87 kref_init(&fence->refcount);
88 fence->lock = lock;
89 fence->flags = 0;
90 fence->context = context;
91 fence->seqno = seqno;
92 fence->ops = ops;
93 fence->error = 0;
94 TAILQ_INIT(&fence->f_callbacks);
95 cv_init(&fence->f_cv, "dmafence");
96
97 #ifdef DIAGNOSTIC
98 fence->f_magic = FENCE_MAGIC_GOOD;
99 #endif
100 }
101
102 /*
103 * dma_fence_reset(fence)
104 *
105 * Ensure fence is in a quiescent state. Allowed either for newly
106 * initialized or freed fences, but not fences with more than one
107 * reference.
108 *
109 * XXX extension to Linux API
110 */
111 void
112 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
113 spinlock_t *lock, unsigned context, unsigned seqno)
114 {
115
116 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
117 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
118 KASSERT(kref_read(&fence->refcount) == 0 ||
119 kref_read(&fence->refcount) == 1);
120 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
121 KASSERT(fence->lock == lock);
122 KASSERT(fence->ops == ops);
123
124 kref_init(&fence->refcount);
125 fence->flags = 0;
126 fence->context = context;
127 fence->seqno = seqno;
128 fence->error = 0;
129 }
130
131 /*
132 * dma_fence_destroy(fence)
133 *
134 * Clean up memory initialized with dma_fence_init. This is meant
135 * to be used after a fence release callback.
136 *
137 * XXX extension to Linux API
138 */
139 void
140 dma_fence_destroy(struct dma_fence *fence)
141 {
142
143 KASSERT(!dma_fence_referenced_p(fence));
144
145 #ifdef DIAGNOSTIC
146 fence->f_magic = FENCE_MAGIC_BAD;
147 #endif
148
149 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
150 cv_destroy(&fence->f_cv);
151 }
152
153 static void
154 dma_fence_free_cb(struct rcu_head *rcu)
155 {
156 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
157
158 KASSERT(!dma_fence_referenced_p(fence));
159
160 dma_fence_destroy(fence);
161 kfree(fence);
162 }
163
164 /*
165 * dma_fence_free(fence)
166 *
167 * Schedule fence to be destroyed and then freed with kfree after
168 * any pending RCU read sections on all CPUs have completed.
169 * Caller must guarantee all references have been released. This
170 * is meant to be used after a fence release callback.
171 *
172 * NOTE: Callers assume kfree will be used. We don't even use
173 * kmalloc to allocate these -- caller is expected to allocate
174 * memory with kmalloc to be initialized with dma_fence_init.
175 */
176 void
177 dma_fence_free(struct dma_fence *fence)
178 {
179
180 KASSERT(!dma_fence_referenced_p(fence));
181
182 call_rcu(&fence->rcu, &dma_fence_free_cb);
183 }
184
185 /*
186 * dma_fence_context_alloc(n)
187 *
188 * Return the first of a contiguous sequence of unique
189 * identifiers, at least until the system wraps around.
190 */
191 unsigned
192 dma_fence_context_alloc(unsigned n)
193 {
194 static volatile unsigned next_context = 0;
195
196 return atomic_add_int_nv(&next_context, n) - n;
197 }
198
199 /*
200 * dma_fence_is_later(a, b)
201 *
202 * True if the sequence number of fence a is later than the
203 * sequence number of fence b. Since sequence numbers wrap
204 * around, we define this to mean that the sequence number of
205 * fence a is no more than INT_MAX past the sequence number of
206 * fence b.
207 *
208 * The two fences must have the same context.
209 */
210 bool
211 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
212 {
213
214 KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
215 KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
216 KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
217 KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
218 KASSERTMSG(a->context == b->context, "incommensurate fences"
219 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
220
221 return a->seqno - b->seqno < INT_MAX;
222 }
223
224 /*
225 * dma_fence_get_stub()
226 *
227 * Return a dma fence that is always already signalled.
228 */
229 struct dma_fence *
230 dma_fence_get_stub(void)
231 {
232 /*
233 * XXX This probably isn't good enough -- caller may try
234 * operations on this that require the lock, which will
235 * require us to create and destroy the lock on module
236 * load/unload.
237 */
238 static struct dma_fence fence = {
239 .refcount = {1}, /* always referenced */
240 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
241 #ifdef DIAGNOSTIC
242 .f_magic = FENCE_MAGIC_GOOD,
243 #endif
244 };
245
246 return dma_fence_get(&fence);
247 }
248
249 /*
250 * dma_fence_get(fence)
251 *
252 * Acquire a reference to fence and return it, or return NULL if
253 * fence is NULL. The fence, if nonnull, must not be being
254 * destroyed.
255 */
256 struct dma_fence *
257 dma_fence_get(struct dma_fence *fence)
258 {
259
260 if (fence == NULL)
261 return NULL;
262
263 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
264 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
265
266 kref_get(&fence->refcount);
267 return fence;
268 }
269
270 /*
271 * dma_fence_get_rcu(fence)
272 *
273 * Attempt to acquire a reference to a fence that may be about to
274 * be destroyed, during a read section. Return the fence on
275 * success, or NULL on failure. The fence must be nonnull.
276 */
277 struct dma_fence *
278 dma_fence_get_rcu(struct dma_fence *fence)
279 {
280
281 __insn_barrier();
282 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
283 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
284 if (!kref_get_unless_zero(&fence->refcount))
285 return NULL;
286 return fence;
287 }
288
289 /*
290 * dma_fence_get_rcu_safe(fencep)
291 *
292 * Attempt to acquire a reference to the fence *fencep, which may
293 * be about to be destroyed, during a read section. If the value
294 * of *fencep changes after we read *fencep but before we
295 * increment its reference count, retry. Return *fencep on
296 * success, or NULL on failure.
297 */
298 struct dma_fence *
299 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
300 {
301 struct dma_fence *fence, *fence0;
302
303 retry:
304 fence = *fencep;
305
306 /* Load fence only once. */
307 __insn_barrier();
308
309 /* If there's nothing there, give up. */
310 if (fence == NULL)
311 return NULL;
312
313 /* Make sure we don't load stale fence guts. */
314 membar_datadep_consumer();
315
316 /* Try to acquire a reference. If we can't, try again. */
317 if (!dma_fence_get_rcu(fence))
318 goto retry;
319
320 /*
321 * Confirm that it's still the same fence. If not, release it
322 * and retry.
323 */
324 fence0 = *fencep;
325 __insn_barrier();
326 if (fence != fence0) {
327 dma_fence_put(fence);
328 goto retry;
329 }
330
331 /* Success! */
332 KASSERT(dma_fence_referenced_p(fence));
333 return fence;
334 }
335
336 static void
337 dma_fence_release(struct kref *refcount)
338 {
339 struct dma_fence *fence = container_of(refcount, struct dma_fence,
340 refcount);
341
342 KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
343 "fence %p has pending callbacks", fence);
344 KASSERT(!dma_fence_referenced_p(fence));
345
346 if (fence->ops->release)
347 (*fence->ops->release)(fence);
348 else
349 dma_fence_free(fence);
350 }
351
352 /*
353 * dma_fence_put(fence)
354 *
355 * Release a reference to fence. If this was the last one, call
356 * the fence's release callback.
357 */
358 void
359 dma_fence_put(struct dma_fence *fence)
360 {
361
362 if (fence == NULL)
363 return;
364 KASSERT(dma_fence_referenced_p(fence));
365 kref_put(&fence->refcount, &dma_fence_release);
366 }
367
368 /*
369 * dma_fence_ensure_signal_enabled(fence)
370 *
371 * Internal subroutine. If the fence was already signalled,
372 * return -ENOENT. Otherwise, if the enable signalling callback
373 * has not been called yet, call it. If fails, signal the fence
374 * and return -ENOENT. If it succeeds, or if it had already been
375 * called, return zero to indicate success.
376 *
377 * Caller must hold the fence's lock.
378 */
379 static int
380 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
381 {
382 bool already_enabled;
383
384 KASSERT(dma_fence_referenced_p(fence));
385 KASSERT(spin_is_locked(fence->lock));
386
387 /* Determine whether signalling was enabled, and enable it. */
388 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
389 &fence->flags);
390
391 /* If the fence was already signalled, fail with -ENOENT. */
392 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
393 return -ENOENT;
394
395 /*
396 * Otherwise, if it wasn't enabled yet, try to enable
397 * signalling, or fail if the fence doesn't support that.
398 */
399 if (!already_enabled) {
400 if (fence->ops->enable_signaling == NULL)
401 return -ENOENT;
402 if (!(*fence->ops->enable_signaling)(fence)) {
403 /* If it failed, signal and return -ENOENT. */
404 dma_fence_signal_locked(fence);
405 return -ENOENT;
406 }
407 }
408
409 /* Success! */
410 return 0;
411 }
412
413 /*
414 * dma_fence_add_callback(fence, fcb, fn)
415 *
416 * If fence has been signalled, return -ENOENT. If the enable
417 * signalling callback hasn't been called yet, call it; if it
418 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
419 * fcb) when it is signalled, and return 0.
420 *
421 * The fence uses memory allocated by the caller in fcb from the
422 * time of dma_fence_add_callback either to the time of
423 * dma_fence_remove_callback, or just before calling fn.
424 */
425 int
426 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
427 dma_fence_func_t fn)
428 {
429 int ret;
430
431 KASSERT(dma_fence_referenced_p(fence));
432
433 /* Optimistically try to skip the lock if it's already signalled. */
434 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
435 ret = -ENOENT;
436 goto out0;
437 }
438
439 /* Acquire the lock. */
440 spin_lock(fence->lock);
441
442 /* Ensure signalling is enabled, or fail if we can't. */
443 ret = dma_fence_ensure_signal_enabled(fence);
444 if (ret)
445 goto out1;
446
447 /* Insert the callback. */
448 fcb->func = fn;
449 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
450 fcb->fcb_onqueue = true;
451 ret = 0;
452
453 /* Release the lock and we're done. */
454 out1: spin_unlock(fence->lock);
455 out0: if (ret) {
456 fcb->func = NULL;
457 fcb->fcb_onqueue = false;
458 }
459 return ret;
460 }
461
462 /*
463 * dma_fence_remove_callback(fence, fcb)
464 *
465 * Remove the callback fcb from fence. Return true if it was
466 * removed from the list, or false if it had already run and so
467 * was no longer queued anyway. Caller must have already called
468 * dma_fence_add_callback(fence, fcb).
469 */
470 bool
471 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
472 {
473 bool onqueue;
474
475 KASSERT(dma_fence_referenced_p(fence));
476
477 spin_lock(fence->lock);
478 onqueue = fcb->fcb_onqueue;
479 if (onqueue) {
480 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
481 fcb->fcb_onqueue = false;
482 }
483 spin_unlock(fence->lock);
484
485 return onqueue;
486 }
487
488 /*
489 * dma_fence_enable_sw_signaling(fence)
490 *
491 * If it hasn't been called yet and the fence hasn't been
492 * signalled yet, call the fence's enable_sw_signaling callback.
493 * If when that happens, the callback indicates failure by
494 * returning false, signal the fence.
495 */
496 void
497 dma_fence_enable_sw_signaling(struct dma_fence *fence)
498 {
499
500 KASSERT(dma_fence_referenced_p(fence));
501
502 spin_lock(fence->lock);
503 if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
504 (void)dma_fence_ensure_signal_enabled(fence);
505 spin_unlock(fence->lock);
506 }
507
508 /*
509 * dma_fence_is_signaled(fence)
510 *
511 * Test whether the fence has been signalled. If it has been
512 * signalled by dma_fence_signal(_locked), return true. If the
513 * signalled callback returns true indicating that some implicit
514 * external condition has changed, call the callbacks as if with
515 * dma_fence_signal.
516 */
517 bool
518 dma_fence_is_signaled(struct dma_fence *fence)
519 {
520 bool signaled;
521
522 KASSERT(dma_fence_referenced_p(fence));
523
524 spin_lock(fence->lock);
525 signaled = dma_fence_is_signaled_locked(fence);
526 spin_unlock(fence->lock);
527
528 return signaled;
529 }
530
531 /*
532 * dma_fence_is_signaled_locked(fence)
533 *
534 * Test whether the fence has been signalled. Like
535 * dma_fence_is_signaleed, but caller already holds the fence's lock.
536 */
537 bool
538 dma_fence_is_signaled_locked(struct dma_fence *fence)
539 {
540
541 KASSERT(dma_fence_referenced_p(fence));
542 KASSERT(spin_is_locked(fence->lock));
543
544 /* Check whether we already set the signalled bit. */
545 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
546 return true;
547
548 /* If there's a signalled callback, test it. */
549 if (fence->ops->signaled) {
550 if ((*fence->ops->signaled)(fence)) {
551 /*
552 * It's been signalled implicitly by some
553 * external phenomonen. Act as though someone
554 * has called dma_fence_signal.
555 */
556 dma_fence_signal_locked(fence);
557 return true;
558 }
559 }
560
561 return false;
562 }
563
564 /*
565 * dma_fence_set_error(fence, error)
566 *
567 * Set an error code prior to dma_fence_signal for use by a
568 * waiter to learn about success or failure of the fence.
569 */
570 void
571 dma_fence_set_error(struct dma_fence *fence, int error)
572 {
573
574 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
575 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
576 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
577 KASSERTMSG(error >= -ELAST, "%d", error);
578 KASSERTMSG(error < 0, "%d", error);
579
580 fence->error = error;
581 }
582
583 /*
584 * dma_fence_get_status(fence)
585 *
586 * Return 0 if fence has yet to be signalled, 1 if it has been
587 * signalled without error, or negative error code if
588 * dma_fence_set_error was used.
589 */
590 int
591 dma_fence_get_status(struct dma_fence *fence)
592 {
593 int ret;
594
595 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
596 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
597
598 spin_lock(fence->lock);
599 if (!dma_fence_is_signaled_locked(fence)) {
600 ret = 0;
601 } else if (fence->error) {
602 ret = fence->error;
603 KASSERTMSG(ret < 0, "%d", ret);
604 } else {
605 ret = 1;
606 }
607 spin_unlock(fence->lock);
608
609 return ret;
610 }
611
612 /*
613 * dma_fence_signal(fence)
614 *
615 * Signal the fence. If it has already been signalled, return
616 * -EINVAL. If it has not been signalled, call the enable
617 * signalling callback if it hasn't been called yet, and remove
618 * each registered callback from the queue and call it; then
619 * return 0.
620 */
621 int
622 dma_fence_signal(struct dma_fence *fence)
623 {
624 int ret;
625
626 KASSERT(dma_fence_referenced_p(fence));
627
628 spin_lock(fence->lock);
629 ret = dma_fence_signal_locked(fence);
630 spin_unlock(fence->lock);
631
632 return ret;
633 }
634
635 /*
636 * dma_fence_signal_locked(fence)
637 *
638 * Signal the fence. Like dma_fence_signal, but caller already
639 * holds the fence's lock.
640 */
641 int
642 dma_fence_signal_locked(struct dma_fence *fence)
643 {
644 struct dma_fence_cb *fcb, *next;
645
646 KASSERT(dma_fence_referenced_p(fence));
647 KASSERT(spin_is_locked(fence->lock));
648
649 /* If it's been signalled, fail; otherwise set the signalled bit. */
650 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
651 return -EINVAL;
652
653 /* Set the timestamp. */
654 fence->timestamp = ktime_get();
655 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
656
657 /* Wake waiters. */
658 cv_broadcast(&fence->f_cv);
659
660 /* Remove and call the callbacks. */
661 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
662 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
663 fcb->fcb_onqueue = false;
664 (*fcb->func)(fence, fcb);
665 }
666
667 /* Success! */
668 return 0;
669 }
670
671 struct wait_any {
672 struct dma_fence_cb fcb;
673 struct wait_any1 {
674 kmutex_t lock;
675 kcondvar_t cv;
676 bool done;
677 uint32_t *ip;
678 struct wait_any *cb;
679 } *common;
680 };
681
682 static void
683 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
684 {
685 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
686
687 KASSERT(dma_fence_referenced_p(fence));
688
689 mutex_enter(&cb->common->lock);
690 cb->common->done = true;
691 if (cb->common->ip)
692 *cb->common->ip = cb - cb->common->cb;
693 cv_broadcast(&cb->common->cv);
694 mutex_exit(&cb->common->lock);
695 }
696
697 /*
698 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
699 *
700 * Wait for any of fences[0], fences[1], fences[2], ...,
701 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
702 * to the index of the first one.
703 */
704 long
705 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
706 bool intr, long timeout, uint32_t *ip)
707 {
708 struct wait_any1 common;
709 struct wait_any *cb;
710 uint32_t i, j;
711 int start, end;
712 long ret = 0;
713
714 /* Allocate an array of callback records. */
715 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
716 if (cb == NULL) {
717 ret = -ENOMEM;
718 goto out0;
719 }
720
721 /* Initialize a mutex and condvar for the common wait. */
722 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
723 cv_init(&common.cv, "fence");
724 common.done = false;
725 common.ip = ip;
726 common.cb = cb;
727
728 /* Add a callback to each of the fences, or stop here if we can't. */
729 for (i = 0; i < nfences; i++) {
730 cb[i].common = &common;
731 KASSERT(dma_fence_referenced_p(fences[i]));
732 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
733 &wait_any_cb);
734 if (ret)
735 goto out1;
736 }
737
738 /*
739 * Test whether any of the fences has been signalled. If they
740 * have, stop here. If the haven't, we are guaranteed to be
741 * notified by one of the callbacks when they have.
742 */
743 for (j = 0; j < nfences; j++) {
744 if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fences[j]->flags)) {
745 if (ip)
746 *ip = j;
747 ret = 0;
748 goto out1;
749 }
750 }
751
752 /*
753 * None of them was ready immediately. Wait for one of the
754 * callbacks to notify us when it is done.
755 */
756 mutex_enter(&common.lock);
757 while (timeout > 0 && !common.done) {
758 start = getticks();
759 __insn_barrier();
760 if (intr) {
761 if (timeout != MAX_SCHEDULE_TIMEOUT) {
762 ret = -cv_timedwait_sig(&common.cv,
763 &common.lock, MIN(timeout, /* paranoia */
764 MAX_SCHEDULE_TIMEOUT));
765 } else {
766 ret = -cv_wait_sig(&common.cv, &common.lock);
767 }
768 } else {
769 if (timeout != MAX_SCHEDULE_TIMEOUT) {
770 ret = -cv_timedwait(&common.cv,
771 &common.lock, MIN(timeout, /* paranoia */
772 MAX_SCHEDULE_TIMEOUT));
773 } else {
774 cv_wait(&common.cv, &common.lock);
775 ret = 0;
776 }
777 }
778 end = getticks();
779 __insn_barrier();
780 if (ret) {
781 if (ret == -ERESTART)
782 ret = -ERESTARTSYS;
783 break;
784 }
785 timeout -= MIN(timeout, (unsigned)end - (unsigned)start);
786 }
787 mutex_exit(&common.lock);
788
789 /*
790 * Massage the return code: if we were interrupted, return
791 * ERESTARTSYS; if cv_timedwait timed out, return 0; otherwise
792 * return the remaining time.
793 */
794 if (ret < 0) {
795 if (ret == -EINTR || ret == -ERESTART)
796 ret = -ERESTARTSYS;
797 if (ret == -EWOULDBLOCK)
798 ret = 0;
799 } else {
800 KASSERT(ret == 0);
801 ret = timeout;
802 }
803
804 out1: while (i --> 0)
805 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
806 cv_destroy(&common.cv);
807 mutex_destroy(&common.lock);
808 kfree(cb);
809 out0: return ret;
810 }
811
812 /*
813 * dma_fence_wait_timeout(fence, intr, timeout)
814 *
815 * Wait until fence is signalled; or until interrupt, if intr is
816 * true; or until timeout, if positive. Return -ERESTARTSYS if
817 * interrupted, negative error code on any other error, zero on
818 * timeout, or positive number of ticks remaining if the fence is
819 * signalled before the timeout. Works by calling the fence wait
820 * callback.
821 *
822 * The timeout must be nonnegative and at most
823 * MAX_SCHEDULE_TIMEOUT, which means wait indefinitely.
824 */
825 long
826 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
827 {
828
829 KASSERT(dma_fence_referenced_p(fence));
830 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
831 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
832
833 if (fence->ops->wait)
834 return (*fence->ops->wait)(fence, intr, timeout);
835 else
836 return dma_fence_default_wait(fence, intr, timeout);
837 }
838
839 /*
840 * dma_fence_wait(fence, intr)
841 *
842 * Wait until fence is signalled; or until interrupt, if intr is
843 * true. Return -ERESTARTSYS if interrupted, negative error code
844 * on any other error, zero on sucess. Works by calling the fence
845 * wait callback with MAX_SCHEDULE_TIMEOUT.
846 */
847 long
848 dma_fence_wait(struct dma_fence *fence, bool intr)
849 {
850 long ret;
851
852 KASSERT(dma_fence_referenced_p(fence));
853
854 if (fence->ops->wait)
855 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
856 else
857 ret = dma_fence_default_wait(fence, intr,
858 MAX_SCHEDULE_TIMEOUT);
859 KASSERT(ret != 0);
860
861 return (ret < 0 ? ret : 0);
862 }
863
864 /*
865 * dma_fence_default_wait(fence, intr, timeout)
866 *
867 * Default implementation of fence wait callback using a condition
868 * variable. If the fence is already signalled, return timeout,
869 * or 1 if timeout is zero meaning poll. If the enable signalling
870 * callback hasn't been called, call it, and if it fails, act as
871 * if the fence had been signalled. Otherwise, wait on the
872 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
873 * indefinitely.
874 */
875 long
876 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
877 {
878 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
879 kmutex_t *lock = &fence->lock->sl_lock;
880 long ret = 0;
881
882 KASSERT(dma_fence_referenced_p(fence));
883 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
884 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
885
886 /* Optimistically try to skip the lock if it's already signalled. */
887 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
888 return (timeout ? timeout : 1);
889
890 /* Acquire the lock. */
891 spin_lock(fence->lock);
892
893 /* Ensure signalling is enabled, or stop if already completed. */
894 if (dma_fence_ensure_signal_enabled(fence) != 0) {
895 spin_unlock(fence->lock);
896 return (timeout ? timeout : 1);
897 }
898
899 /* If merely polling, stop here. */
900 if (timeout == 0) {
901 spin_unlock(fence->lock);
902 return 0;
903 }
904
905 /* Find out what our deadline is so we can handle spurious wakeup. */
906 if (timeout < MAX_SCHEDULE_TIMEOUT) {
907 now = getticks();
908 __insn_barrier();
909 starttime = now;
910 deadline = starttime + timeout;
911 }
912
913 /* Wait until the signalled bit is set. */
914 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
915 /*
916 * If there's a timeout and we've passed the deadline,
917 * give up.
918 */
919 if (timeout < MAX_SCHEDULE_TIMEOUT) {
920 now = getticks();
921 __insn_barrier();
922 if (deadline <= now)
923 break;
924 }
925 if (intr) {
926 if (timeout < MAX_SCHEDULE_TIMEOUT) {
927 ret = -cv_timedwait_sig(&fence->f_cv, lock,
928 deadline - now);
929 } else {
930 ret = -cv_wait_sig(&fence->f_cv, lock);
931 }
932 } else {
933 if (timeout < MAX_SCHEDULE_TIMEOUT) {
934 ret = -cv_timedwait(&fence->f_cv, lock,
935 deadline - now);
936 } else {
937 cv_wait(&fence->f_cv, lock);
938 ret = 0;
939 }
940 }
941 /* If the wait failed, give up. */
942 if (ret) {
943 if (ret == -ERESTART)
944 ret = -ERESTARTSYS;
945 break;
946 }
947 }
948
949 /* All done. Release the lock. */
950 spin_unlock(fence->lock);
951
952 /* If cv_timedwait gave up, return 0 meaning timeout. */
953 if (ret == -EWOULDBLOCK) {
954 /* Only cv_timedwait and cv_timedwait_sig can return this. */
955 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
956 return 0;
957 }
958
959 /* If there was a timeout and the deadline passed, return 0. */
960 if (timeout < MAX_SCHEDULE_TIMEOUT) {
961 if (deadline <= now)
962 return 0;
963 }
964
965 /* If we were interrupted, return -ERESTARTSYS. */
966 if (ret == -EINTR || ret == -ERESTART)
967 return -ERESTARTSYS;
968
969 /* If there was any other kind of error, fail. */
970 if (ret)
971 return ret;
972
973 /*
974 * Success! Return the number of ticks left, at least 1, or 1
975 * if no timeout.
976 */
977 return (timeout < MAX_SCHEDULE_TIMEOUT ? MIN(deadline - now, 1) : 1);
978 }
979
980 /*
981 * __dma_fence_signal(fence)
982 *
983 * Set fence's signalled bit, without waking waiters yet. Return
984 * true if it was newly set, false if it was already set.
985 */
986 bool
987 __dma_fence_signal(struct dma_fence *fence)
988 {
989
990 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
991 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
992
993 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
994 return false;
995
996 return true;
997 }
998
999 /*
1000 * __dma_fence_signal_wake(fence)
1001 *
1002 * Set fence's timestamp and wake fence's waiters. Caller must
1003 * have previously called __dma_fence_signal and it must have
1004 * previously returned true.
1005 */
1006 void
1007 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
1008 {
1009 struct dma_fence_cb *fcb, *next;
1010
1011 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1012 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1013
1014 spin_lock(fence->lock);
1015
1016 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
1017
1018 /* Set the timestamp. */
1019 fence->timestamp = timestamp;
1020 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1021
1022 /* Wake waiters. */
1023 cv_broadcast(&fence->f_cv);
1024
1025 /* Remove and call the callbacks. */
1026 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
1027 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
1028 fcb->fcb_onqueue = false;
1029 (*fcb->func)(fence, fcb);
1030 }
1031
1032 spin_unlock(fence->lock);
1033 }
1034