linux_dma_fence.c revision 1.33 1 /* $NetBSD: linux_dma_fence.c,v 1.33 2021/12/19 12:34:58 riastradh Exp $ */
2
3 /*-
4 * Copyright (c) 2018 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Taylor R. Campbell.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 *
19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29 * POSSIBILITY OF SUCH DAMAGE.
30 */
31
32 #include <sys/cdefs.h>
33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.33 2021/12/19 12:34:58 riastradh Exp $");
34
35 #include <sys/atomic.h>
36 #include <sys/condvar.h>
37 #include <sys/queue.h>
38
39 #include <linux/atomic.h>
40 #include <linux/dma-fence.h>
41 #include <linux/errno.h>
42 #include <linux/kref.h>
43 #include <linux/sched.h>
44 #include <linux/spinlock.h>
45
46 #define FENCE_MAGIC_GOOD 0x607ba424048c37e5ULL
47 #define FENCE_MAGIC_BAD 0x7641ca721344505fULL
48
49 /*
50 * linux_dma_fence_trace
51 *
52 * True if we print DMA_FENCE_TRACE messages, false if not. These
53 * are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
54 * in boothowto.
55 */
56 int linux_dma_fence_trace = 0;
57
58 /*
59 * dma_fence_referenced_p(fence)
60 *
61 * True if fence has a positive reference count. True after
62 * dma_fence_init; after the last dma_fence_put, this becomes
63 * false. The fence must have been initialized and must not have
64 * been destroyed.
65 */
66 static inline bool __diagused
67 dma_fence_referenced_p(struct dma_fence *fence)
68 {
69
70 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
71 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
72
73 return kref_referenced_p(&fence->refcount);
74 }
75
76 /*
77 * dma_fence_init(fence, ops, lock, context, seqno)
78 *
79 * Initialize fence. Caller should call dma_fence_destroy when
80 * done, after all references have been released.
81 */
82 void
83 dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
84 spinlock_t *lock, unsigned context, unsigned seqno)
85 {
86
87 kref_init(&fence->refcount);
88 fence->lock = lock;
89 fence->flags = 0;
90 fence->context = context;
91 fence->seqno = seqno;
92 fence->ops = ops;
93 fence->error = 0;
94 TAILQ_INIT(&fence->f_callbacks);
95 cv_init(&fence->f_cv, "dmafence");
96
97 #ifdef DIAGNOSTIC
98 fence->f_magic = FENCE_MAGIC_GOOD;
99 #endif
100 }
101
102 /*
103 * dma_fence_reset(fence)
104 *
105 * Ensure fence is in a quiescent state. Allowed either for newly
106 * initialized or freed fences, but not fences with more than one
107 * reference.
108 *
109 * XXX extension to Linux API
110 */
111 void
112 dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
113 spinlock_t *lock, unsigned context, unsigned seqno)
114 {
115
116 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
117 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
118 KASSERT(kref_read(&fence->refcount) == 0 ||
119 kref_read(&fence->refcount) == 1);
120 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
121 KASSERT(fence->lock == lock);
122 KASSERT(fence->ops == ops);
123
124 kref_init(&fence->refcount);
125 fence->flags = 0;
126 fence->context = context;
127 fence->seqno = seqno;
128 fence->error = 0;
129 }
130
131 /*
132 * dma_fence_destroy(fence)
133 *
134 * Clean up memory initialized with dma_fence_init. This is meant
135 * to be used after a fence release callback.
136 *
137 * XXX extension to Linux API
138 */
139 void
140 dma_fence_destroy(struct dma_fence *fence)
141 {
142
143 KASSERT(!dma_fence_referenced_p(fence));
144
145 #ifdef DIAGNOSTIC
146 fence->f_magic = FENCE_MAGIC_BAD;
147 #endif
148
149 KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
150 cv_destroy(&fence->f_cv);
151 }
152
153 static void
154 dma_fence_free_cb(struct rcu_head *rcu)
155 {
156 struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
157
158 KASSERT(!dma_fence_referenced_p(fence));
159
160 dma_fence_destroy(fence);
161 kfree(fence);
162 }
163
164 /*
165 * dma_fence_free(fence)
166 *
167 * Schedule fence to be destroyed and then freed with kfree after
168 * any pending RCU read sections on all CPUs have completed.
169 * Caller must guarantee all references have been released. This
170 * is meant to be used after a fence release callback.
171 *
172 * NOTE: Callers assume kfree will be used. We don't even use
173 * kmalloc to allocate these -- caller is expected to allocate
174 * memory with kmalloc to be initialized with dma_fence_init.
175 */
176 void
177 dma_fence_free(struct dma_fence *fence)
178 {
179
180 KASSERT(!dma_fence_referenced_p(fence));
181
182 call_rcu(&fence->rcu, &dma_fence_free_cb);
183 }
184
185 /*
186 * dma_fence_context_alloc(n)
187 *
188 * Return the first of a contiguous sequence of unique
189 * identifiers, at least until the system wraps around.
190 */
191 unsigned
192 dma_fence_context_alloc(unsigned n)
193 {
194 static volatile unsigned next_context = 0;
195
196 return atomic_add_int_nv(&next_context, n) - n;
197 }
198
199 /*
200 * dma_fence_is_later(a, b)
201 *
202 * True if the sequence number of fence a is later than the
203 * sequence number of fence b. Since sequence numbers wrap
204 * around, we define this to mean that the sequence number of
205 * fence a is no more than INT_MAX past the sequence number of
206 * fence b.
207 *
208 * The two fences must have the same context.
209 */
210 bool
211 dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
212 {
213
214 KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
215 KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
216 KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
217 KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
218 KASSERTMSG(a->context == b->context, "incommensurate fences"
219 ": %u @ %p =/= %u @ %p", a->context, a, b->context, b);
220
221 return a->seqno - b->seqno < INT_MAX;
222 }
223
224 static const char *dma_fence_stub_name(struct dma_fence *f)
225 {
226
227 return "stub";
228 }
229
230 static const struct dma_fence_ops dma_fence_stub_ops = {
231 .get_driver_name = dma_fence_stub_name,
232 .get_timeline_name = dma_fence_stub_name,
233 };
234
235 /*
236 * dma_fence_get_stub()
237 *
238 * Return a dma fence that is always already signalled.
239 */
240 struct dma_fence *
241 dma_fence_get_stub(void)
242 {
243 /*
244 * XXX This probably isn't good enough -- caller may try
245 * operations on this that require the lock, which will
246 * require us to create and destroy the lock on module
247 * load/unload.
248 */
249 static struct dma_fence fence = {
250 .refcount = {1}, /* always referenced */
251 .flags = 1u << DMA_FENCE_FLAG_SIGNALED_BIT,
252 .ops = &dma_fence_stub_ops,
253 #ifdef DIAGNOSTIC
254 .f_magic = FENCE_MAGIC_GOOD,
255 #endif
256 };
257
258 return dma_fence_get(&fence);
259 }
260
261 /*
262 * dma_fence_get(fence)
263 *
264 * Acquire a reference to fence and return it, or return NULL if
265 * fence is NULL. The fence, if nonnull, must not be being
266 * destroyed.
267 */
268 struct dma_fence *
269 dma_fence_get(struct dma_fence *fence)
270 {
271
272 if (fence == NULL)
273 return NULL;
274
275 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
276 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
277
278 kref_get(&fence->refcount);
279 return fence;
280 }
281
282 /*
283 * dma_fence_get_rcu(fence)
284 *
285 * Attempt to acquire a reference to a fence that may be about to
286 * be destroyed, during a read section. Return the fence on
287 * success, or NULL on failure. The fence must be nonnull.
288 */
289 struct dma_fence *
290 dma_fence_get_rcu(struct dma_fence *fence)
291 {
292
293 __insn_barrier();
294 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
295 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
296 if (!kref_get_unless_zero(&fence->refcount))
297 return NULL;
298 return fence;
299 }
300
301 /*
302 * dma_fence_get_rcu_safe(fencep)
303 *
304 * Attempt to acquire a reference to the fence *fencep, which may
305 * be about to be destroyed, during a read section. If the value
306 * of *fencep changes after we read *fencep but before we
307 * increment its reference count, retry. Return *fencep on
308 * success, or NULL on failure.
309 */
310 struct dma_fence *
311 dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
312 {
313 struct dma_fence *fence, *fence0;
314
315 retry:
316 fence = *fencep;
317
318 /* Load fence only once. */
319 __insn_barrier();
320
321 /* If there's nothing there, give up. */
322 if (fence == NULL)
323 return NULL;
324
325 /* Make sure we don't load stale fence guts. */
326 membar_datadep_consumer();
327
328 /* Try to acquire a reference. If we can't, try again. */
329 if (!dma_fence_get_rcu(fence))
330 goto retry;
331
332 /*
333 * Confirm that it's still the same fence. If not, release it
334 * and retry.
335 */
336 fence0 = *fencep;
337 __insn_barrier();
338 if (fence != fence0) {
339 dma_fence_put(fence);
340 goto retry;
341 }
342
343 /* Success! */
344 KASSERT(dma_fence_referenced_p(fence));
345 return fence;
346 }
347
348 static void
349 dma_fence_release(struct kref *refcount)
350 {
351 struct dma_fence *fence = container_of(refcount, struct dma_fence,
352 refcount);
353
354 KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
355 "fence %p has pending callbacks", fence);
356 KASSERT(!dma_fence_referenced_p(fence));
357
358 if (fence->ops->release)
359 (*fence->ops->release)(fence);
360 else
361 dma_fence_free(fence);
362 }
363
364 /*
365 * dma_fence_put(fence)
366 *
367 * Release a reference to fence. If this was the last one, call
368 * the fence's release callback.
369 */
370 void
371 dma_fence_put(struct dma_fence *fence)
372 {
373
374 if (fence == NULL)
375 return;
376 KASSERT(dma_fence_referenced_p(fence));
377 kref_put(&fence->refcount, &dma_fence_release);
378 }
379
380 /*
381 * dma_fence_ensure_signal_enabled(fence)
382 *
383 * Internal subroutine. If the fence was already signalled,
384 * return -ENOENT. Otherwise, if the enable signalling callback
385 * has not been called yet, call it. If fails, signal the fence
386 * and return -ENOENT. If it succeeds, or if it had already been
387 * called, return zero to indicate success.
388 *
389 * Caller must hold the fence's lock.
390 */
391 static int
392 dma_fence_ensure_signal_enabled(struct dma_fence *fence)
393 {
394 bool already_enabled;
395
396 KASSERT(dma_fence_referenced_p(fence));
397 KASSERT(spin_is_locked(fence->lock));
398
399 /* Determine whether signalling was enabled, and enable it. */
400 already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
401 &fence->flags);
402
403 /* If the fence was already signalled, fail with -ENOENT. */
404 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
405 return -ENOENT;
406
407 /*
408 * Otherwise, if it wasn't enabled yet, try to enable
409 * signalling, or fail if the fence doesn't support that.
410 */
411 if (!already_enabled) {
412 if (fence->ops->enable_signaling == NULL)
413 return -ENOENT;
414 if (!(*fence->ops->enable_signaling)(fence)) {
415 /* If it failed, signal and return -ENOENT. */
416 dma_fence_signal_locked(fence);
417 return -ENOENT;
418 }
419 }
420
421 /* Success! */
422 return 0;
423 }
424
425 /*
426 * dma_fence_add_callback(fence, fcb, fn)
427 *
428 * If fence has been signalled, return -ENOENT. If the enable
429 * signalling callback hasn't been called yet, call it; if it
430 * fails, return -ENOENT. Otherwise, arrange to call fn(fence,
431 * fcb) when it is signalled, and return 0.
432 *
433 * The fence uses memory allocated by the caller in fcb from the
434 * time of dma_fence_add_callback either to the time of
435 * dma_fence_remove_callback, or just before calling fn.
436 */
437 int
438 dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
439 dma_fence_func_t fn)
440 {
441 int ret;
442
443 KASSERT(dma_fence_referenced_p(fence));
444
445 /* Optimistically try to skip the lock if it's already signalled. */
446 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
447 ret = -ENOENT;
448 goto out0;
449 }
450
451 /* Acquire the lock. */
452 spin_lock(fence->lock);
453
454 /* Ensure signalling is enabled, or fail if we can't. */
455 ret = dma_fence_ensure_signal_enabled(fence);
456 if (ret)
457 goto out1;
458
459 /* Insert the callback. */
460 fcb->func = fn;
461 TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
462 fcb->fcb_onqueue = true;
463 ret = 0;
464
465 /* Release the lock and we're done. */
466 out1: spin_unlock(fence->lock);
467 out0: if (ret) {
468 fcb->func = NULL;
469 fcb->fcb_onqueue = false;
470 }
471 return ret;
472 }
473
474 /*
475 * dma_fence_remove_callback(fence, fcb)
476 *
477 * Remove the callback fcb from fence. Return true if it was
478 * removed from the list, or false if it had already run and so
479 * was no longer queued anyway. Caller must have already called
480 * dma_fence_add_callback(fence, fcb).
481 */
482 bool
483 dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
484 {
485 bool onqueue;
486
487 KASSERT(dma_fence_referenced_p(fence));
488
489 spin_lock(fence->lock);
490 onqueue = fcb->fcb_onqueue;
491 if (onqueue) {
492 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
493 fcb->fcb_onqueue = false;
494 }
495 spin_unlock(fence->lock);
496
497 return onqueue;
498 }
499
500 /*
501 * dma_fence_enable_sw_signaling(fence)
502 *
503 * If it hasn't been called yet and the fence hasn't been
504 * signalled yet, call the fence's enable_sw_signaling callback.
505 * If when that happens, the callback indicates failure by
506 * returning false, signal the fence.
507 */
508 void
509 dma_fence_enable_sw_signaling(struct dma_fence *fence)
510 {
511
512 KASSERT(dma_fence_referenced_p(fence));
513
514 spin_lock(fence->lock);
515 if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
516 (void)dma_fence_ensure_signal_enabled(fence);
517 spin_unlock(fence->lock);
518 }
519
520 /*
521 * dma_fence_is_signaled(fence)
522 *
523 * Test whether the fence has been signalled. If it has been
524 * signalled by dma_fence_signal(_locked), return true. If the
525 * signalled callback returns true indicating that some implicit
526 * external condition has changed, call the callbacks as if with
527 * dma_fence_signal.
528 */
529 bool
530 dma_fence_is_signaled(struct dma_fence *fence)
531 {
532 bool signaled;
533
534 KASSERT(dma_fence_referenced_p(fence));
535
536 spin_lock(fence->lock);
537 signaled = dma_fence_is_signaled_locked(fence);
538 spin_unlock(fence->lock);
539
540 return signaled;
541 }
542
543 /*
544 * dma_fence_is_signaled_locked(fence)
545 *
546 * Test whether the fence has been signalled. Like
547 * dma_fence_is_signaleed, but caller already holds the fence's lock.
548 */
549 bool
550 dma_fence_is_signaled_locked(struct dma_fence *fence)
551 {
552
553 KASSERT(dma_fence_referenced_p(fence));
554 KASSERT(spin_is_locked(fence->lock));
555
556 /* Check whether we already set the signalled bit. */
557 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
558 return true;
559
560 /* If there's a signalled callback, test it. */
561 if (fence->ops->signaled) {
562 if ((*fence->ops->signaled)(fence)) {
563 /*
564 * It's been signalled implicitly by some
565 * external phenomonen. Act as though someone
566 * has called dma_fence_signal.
567 */
568 dma_fence_signal_locked(fence);
569 return true;
570 }
571 }
572
573 return false;
574 }
575
576 /*
577 * dma_fence_set_error(fence, error)
578 *
579 * Set an error code prior to dma_fence_signal for use by a
580 * waiter to learn about success or failure of the fence.
581 */
582 void
583 dma_fence_set_error(struct dma_fence *fence, int error)
584 {
585
586 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
587 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
588 KASSERT(!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)));
589 KASSERTMSG(error >= -ELAST, "%d", error);
590 KASSERTMSG(error < 0, "%d", error);
591
592 fence->error = error;
593 }
594
595 /*
596 * dma_fence_get_status(fence)
597 *
598 * Return 0 if fence has yet to be signalled, 1 if it has been
599 * signalled without error, or negative error code if
600 * dma_fence_set_error was used.
601 */
602 int
603 dma_fence_get_status(struct dma_fence *fence)
604 {
605 int ret;
606
607 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
608 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
609
610 spin_lock(fence->lock);
611 if (!dma_fence_is_signaled_locked(fence)) {
612 ret = 0;
613 } else if (fence->error) {
614 ret = fence->error;
615 KASSERTMSG(ret < 0, "%d", ret);
616 } else {
617 ret = 1;
618 }
619 spin_unlock(fence->lock);
620
621 return ret;
622 }
623
624 /*
625 * dma_fence_signal(fence)
626 *
627 * Signal the fence. If it has already been signalled, return
628 * -EINVAL. If it has not been signalled, call the enable
629 * signalling callback if it hasn't been called yet, and remove
630 * each registered callback from the queue and call it; then
631 * return 0.
632 */
633 int
634 dma_fence_signal(struct dma_fence *fence)
635 {
636 int ret;
637
638 KASSERT(dma_fence_referenced_p(fence));
639
640 spin_lock(fence->lock);
641 ret = dma_fence_signal_locked(fence);
642 spin_unlock(fence->lock);
643
644 return ret;
645 }
646
647 /*
648 * dma_fence_signal_locked(fence)
649 *
650 * Signal the fence. Like dma_fence_signal, but caller already
651 * holds the fence's lock.
652 */
653 int
654 dma_fence_signal_locked(struct dma_fence *fence)
655 {
656 struct dma_fence_cb *fcb, *next;
657
658 KASSERT(dma_fence_referenced_p(fence));
659 KASSERT(spin_is_locked(fence->lock));
660
661 /* If it's been signalled, fail; otherwise set the signalled bit. */
662 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
663 return -EINVAL;
664
665 /* Set the timestamp. */
666 fence->timestamp = ktime_get();
667 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
668
669 /* Wake waiters. */
670 cv_broadcast(&fence->f_cv);
671
672 /* Remove and call the callbacks. */
673 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
674 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
675 fcb->fcb_onqueue = false;
676 (*fcb->func)(fence, fcb);
677 }
678
679 /* Success! */
680 return 0;
681 }
682
683 struct wait_any {
684 struct dma_fence_cb fcb;
685 struct wait_any1 {
686 kmutex_t lock;
687 kcondvar_t cv;
688 struct wait_any *cb;
689 bool done;
690 } *common;
691 };
692
693 static void
694 wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
695 {
696 struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
697
698 KASSERT(dma_fence_referenced_p(fence));
699
700 mutex_enter(&cb->common->lock);
701 cb->common->done = true;
702 cv_broadcast(&cb->common->cv);
703 mutex_exit(&cb->common->lock);
704 }
705
706 /*
707 * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
708 *
709 * Wait for any of fences[0], fences[1], fences[2], ...,
710 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip
711 * to the index of the first one.
712 *
713 * Return -ERESTARTSYS if interrupted, 0 on timeout, or time
714 * remaining (at least 1) on success.
715 */
716 long
717 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
718 bool intr, long timeout, uint32_t *ip)
719 {
720 struct wait_any1 common;
721 struct wait_any *cb;
722 uint32_t i, j;
723 int start, end;
724 long ret = 0;
725
726 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
727 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
728
729 /* Optimistically check whether any are signalled. */
730 for (i = 0; i < nfences; i++) {
731 KASSERT(dma_fence_referenced_p(fences[i]));
732 if (dma_fence_is_signaled(fences[i])) {
733 if (ip)
734 *ip = i;
735 return MAX(1, timeout);
736 }
737 }
738
739 /*
740 * If timeout is zero, we're just polling, so stop here as if
741 * we timed out instantly.
742 */
743 if (timeout == 0)
744 return 0;
745
746 /* Allocate an array of callback records. */
747 cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
748 if (cb == NULL)
749 return -ENOMEM;
750
751 /* Initialize a mutex and condvar for the common wait. */
752 mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
753 cv_init(&common.cv, "fence");
754 common.cb = cb;
755 common.done = false;
756
757 /*
758 * Add a callback to each of the fences, or stop if already
759 * signalled.
760 */
761 for (i = 0; i < nfences; i++) {
762 cb[i].common = &common;
763 KASSERT(dma_fence_referenced_p(fences[i]));
764 ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
765 &wait_any_cb);
766 if (ret) {
767 KASSERT(ret == -ENOENT);
768 if (ip)
769 *ip = i;
770 ret = MAX(1, timeout);
771 goto out;
772 }
773 }
774
775 /*
776 * None of them was ready immediately. Wait for one of the
777 * callbacks to notify us when it is done.
778 */
779 mutex_enter(&common.lock);
780 while (!common.done) {
781 /* Wait for the time remaining. */
782 start = getticks();
783 if (intr) {
784 if (timeout != MAX_SCHEDULE_TIMEOUT) {
785 ret = -cv_timedwait_sig(&common.cv,
786 &common.lock, MIN(timeout, /* paranoia */
787 MAX_SCHEDULE_TIMEOUT));
788 } else {
789 ret = -cv_wait_sig(&common.cv, &common.lock);
790 }
791 } else {
792 if (timeout != MAX_SCHEDULE_TIMEOUT) {
793 ret = -cv_timedwait(&common.cv,
794 &common.lock, MIN(timeout, /* paranoia */
795 MAX_SCHEDULE_TIMEOUT));
796 } else {
797 cv_wait(&common.cv, &common.lock);
798 ret = 0;
799 }
800 }
801 end = getticks();
802
803 /* Deduct from time remaining. If none left, time out. */
804 if (timeout != MAX_SCHEDULE_TIMEOUT) {
805 timeout -= MIN(timeout,
806 (unsigned)end - (unsigned)start);
807 if (timeout == 0)
808 ret = -EWOULDBLOCK;
809 }
810
811 /* If the wait failed, give up. */
812 if (ret)
813 break;
814 }
815 mutex_exit(&common.lock);
816
817 /*
818 * Massage the return code if nonzero:
819 * - if we were interrupted, return -ERESTARTSYS;
820 * - if we timed out, return 0.
821 * No other failure is possible. On success, ret=0 but we
822 * check again below to verify anyway.
823 */
824 if (ret) {
825 KASSERTMSG((ret == -EINTR || ret == -ERESTART ||
826 ret == -EWOULDBLOCK), "ret=%ld", ret);
827 if (ret == -EINTR || ret == -ERESTART) {
828 ret = -ERESTARTSYS;
829 } else if (ret == -EWOULDBLOCK) {
830 KASSERT(timeout != MAX_SCHEDULE_TIMEOUT);
831 ret = 0; /* timed out */
832 }
833 }
834
835 KASSERT(ret != -ERESTART); /* would be confused with time left */
836
837 /*
838 * Test whether any of the fences has been signalled. If they
839 * have, return success.
840 */
841 for (j = 0; j < nfences; j++) {
842 if (dma_fence_is_signaled(fences[i])) {
843 if (ip)
844 *ip = j;
845 ret = MAX(1, timeout);
846 goto out;
847 }
848 }
849
850 /*
851 * If user passed MAX_SCHEDULE_TIMEOUT, we can't return 0
852 * meaning timed out because we're supposed to wait forever.
853 */
854 KASSERT(timeout == MAX_SCHEDULE_TIMEOUT ? ret != 0 : 1);
855
856 out: while (i --> 0)
857 (void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
858 cv_destroy(&common.cv);
859 mutex_destroy(&common.lock);
860 kfree(cb);
861 return ret;
862 }
863
864 /*
865 * dma_fence_wait_timeout(fence, intr, timeout)
866 *
867 * Wait until fence is signalled; or until interrupt, if intr is
868 * true; or until timeout, if positive. Return -ERESTARTSYS if
869 * interrupted, negative error code on any other error, zero on
870 * timeout, or positive number of ticks remaining if the fence is
871 * signalled before the timeout. Works by calling the fence wait
872 * callback.
873 *
874 * The timeout must be nonnegative and at most
875 * MAX_SCHEDULE_TIMEOUT, which means wait indefinitely.
876 */
877 long
878 dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
879 {
880
881 KASSERT(dma_fence_referenced_p(fence));
882 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
883 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
884
885 if (fence->ops->wait)
886 return (*fence->ops->wait)(fence, intr, timeout);
887 else
888 return dma_fence_default_wait(fence, intr, timeout);
889 }
890
891 /*
892 * dma_fence_wait(fence, intr)
893 *
894 * Wait until fence is signalled; or until interrupt, if intr is
895 * true. Return -ERESTARTSYS if interrupted, negative error code
896 * on any other error, zero on sucess. Works by calling the fence
897 * wait callback with MAX_SCHEDULE_TIMEOUT.
898 */
899 long
900 dma_fence_wait(struct dma_fence *fence, bool intr)
901 {
902 long ret;
903
904 KASSERT(dma_fence_referenced_p(fence));
905
906 if (fence->ops->wait)
907 ret = (*fence->ops->wait)(fence, intr, MAX_SCHEDULE_TIMEOUT);
908 else
909 ret = dma_fence_default_wait(fence, intr,
910 MAX_SCHEDULE_TIMEOUT);
911 KASSERT(ret != 0);
912 KASSERTMSG(ret == -ERESTARTSYS || ret == MAX_SCHEDULE_TIMEOUT,
913 "ret=%ld", ret);
914
915 return (ret < 0 ? ret : 0);
916 }
917
918 /*
919 * dma_fence_default_wait(fence, intr, timeout)
920 *
921 * Default implementation of fence wait callback using a condition
922 * variable. If the fence is already signalled, return timeout,
923 * or 1 if timeout is zero meaning poll. If the enable signalling
924 * callback hasn't been called, call it, and if it fails, act as
925 * if the fence had been signalled. Otherwise, wait on the
926 * internal condvar. If timeout is MAX_SCHEDULE_TIMEOUT, wait
927 * indefinitely.
928 */
929 long
930 dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
931 {
932 int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
933 kmutex_t *lock = &fence->lock->sl_lock;
934 long ret = 0;
935
936 KASSERT(dma_fence_referenced_p(fence));
937 KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
938 KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
939
940 /* Optimistically try to skip the lock if it's already signalled. */
941 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
942 return MAX(1, timeout);
943
944 /* Acquire the lock. */
945 spin_lock(fence->lock);
946
947 /* Ensure signalling is enabled, or stop if already completed. */
948 if (dma_fence_ensure_signal_enabled(fence) != 0) {
949 ret = MAX(1, timeout);
950 goto out;
951 }
952
953 /* If merely polling, stop here. */
954 if (timeout == 0) {
955 ret = 0;
956 goto out;
957 }
958
959 /* Find out what our deadline is so we can handle spurious wakeup. */
960 if (timeout < MAX_SCHEDULE_TIMEOUT) {
961 now = getticks();
962 starttime = now;
963 deadline = starttime + timeout;
964 }
965
966 /* Wait until the signalled bit is set. */
967 while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
968 /*
969 * If there's a timeout and we've passed the deadline,
970 * give up.
971 */
972 if (timeout < MAX_SCHEDULE_TIMEOUT) {
973 now = getticks();
974 if (deadline <= now) {
975 ret = -EWOULDBLOCK;
976 break;
977 }
978 }
979
980 /* Wait for the time remaining. */
981 if (intr) {
982 if (timeout < MAX_SCHEDULE_TIMEOUT) {
983 ret = -cv_timedwait_sig(&fence->f_cv, lock,
984 deadline - now);
985 } else {
986 ret = -cv_wait_sig(&fence->f_cv, lock);
987 }
988 } else {
989 if (timeout < MAX_SCHEDULE_TIMEOUT) {
990 ret = -cv_timedwait(&fence->f_cv, lock,
991 deadline - now);
992 } else {
993 cv_wait(&fence->f_cv, lock);
994 ret = 0;
995 }
996 }
997
998 /* If the wait failed, give up. */
999 if (ret)
1000 break;
1001 }
1002
1003 /*
1004 * Massage the return code if nonzero:
1005 * - if we were interrupted, return -ERESTARTSYS;
1006 * - if we timed out, return 0.
1007 * No other failure is possible. On success, ret=0 but we
1008 * check again below to verify anyway.
1009 */
1010 if (ret) {
1011 KASSERTMSG((ret == -EINTR || ret == -ERESTART ||
1012 ret == -EWOULDBLOCK), "ret=%ld", ret);
1013 if (ret == -EINTR || ret == -ERESTART) {
1014 ret = -ERESTARTSYS;
1015 } else if (ret == -EWOULDBLOCK) {
1016 KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
1017 ret = 0; /* timed out */
1018 }
1019 }
1020
1021 KASSERT(ret != -ERESTART); /* would be confused with time left */
1022
1023 /* Check again in case it was signalled after a wait. */
1024 if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
1025 if (timeout < MAX_SCHEDULE_TIMEOUT)
1026 ret = MAX(1, deadline - now);
1027 else
1028 ret = MAX_SCHEDULE_TIMEOUT;
1029 }
1030
1031 out: /* All done. Release the lock. */
1032 spin_unlock(fence->lock);
1033 return ret;
1034 }
1035
1036 /*
1037 * __dma_fence_signal(fence)
1038 *
1039 * Set fence's signalled bit, without waking waiters yet. Return
1040 * true if it was newly set, false if it was already set.
1041 */
1042 bool
1043 __dma_fence_signal(struct dma_fence *fence)
1044 {
1045
1046 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1047 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1048
1049 if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
1050 return false;
1051
1052 return true;
1053 }
1054
1055 /*
1056 * __dma_fence_signal_wake(fence)
1057 *
1058 * Set fence's timestamp and wake fence's waiters. Caller must
1059 * have previously called __dma_fence_signal and it must have
1060 * previously returned true.
1061 */
1062 void
1063 __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
1064 {
1065 struct dma_fence_cb *fcb, *next;
1066
1067 KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
1068 KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
1069
1070 spin_lock(fence->lock);
1071
1072 KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
1073
1074 /* Set the timestamp. */
1075 fence->timestamp = timestamp;
1076 set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
1077
1078 /* Wake waiters. */
1079 cv_broadcast(&fence->f_cv);
1080
1081 /* Remove and call the callbacks. */
1082 TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
1083 TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
1084 fcb->fcb_onqueue = false;
1085 (*fcb->func)(fence, fcb);
1086 }
1087
1088 spin_unlock(fence->lock);
1089 }
1090