Home | History | Annotate | Line # | Download | only in linux
      1  1.42  riastrad /*	$NetBSD: linux_dma_fence.c,v 1.42 2022/09/01 09:37:06 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*-
      4   1.1  riastrad  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5   1.1  riastrad  * All rights reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  riastrad  * by Taylor R. Campbell.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.1  riastrad  * modification, are permitted provided that the following conditions
     12   1.1  riastrad  * are met:
     13   1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.1  riastrad  *
     19   1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.1  riastrad #include <sys/cdefs.h>
     33  1.42  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_dma_fence.c,v 1.42 2022/09/01 09:37:06 riastradh Exp $");
     34   1.1  riastrad 
     35   1.1  riastrad #include <sys/atomic.h>
     36   1.1  riastrad #include <sys/condvar.h>
     37  1.38  riastrad #include <sys/lock.h>
     38   1.1  riastrad #include <sys/queue.h>
     39  1.36  riastrad #include <sys/sdt.h>
     40   1.1  riastrad 
     41   1.1  riastrad #include <linux/atomic.h>
     42   1.2  riastrad #include <linux/dma-fence.h>
     43   1.1  riastrad #include <linux/errno.h>
     44   1.1  riastrad #include <linux/kref.h>
     45   1.1  riastrad #include <linux/sched.h>
     46   1.1  riastrad #include <linux/spinlock.h>
     47   1.1  riastrad 
     48  1.24  riastrad #define	FENCE_MAGIC_GOOD	0x607ba424048c37e5ULL
     49  1.24  riastrad #define	FENCE_MAGIC_BAD		0x7641ca721344505fULL
     50  1.24  riastrad 
     51  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, init,
     52  1.36  riastrad     "struct dma_fence *"/*fence*/);
     53  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, reset,
     54  1.36  riastrad     "struct dma_fence *"/*fence*/);
     55  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, release,
     56  1.36  riastrad     "struct dma_fence *"/*fence*/);
     57  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, free,
     58  1.36  riastrad     "struct dma_fence *"/*fence*/);
     59  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, destroy,
     60  1.36  riastrad     "struct dma_fence *"/*fence*/);
     61  1.36  riastrad 
     62  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, enable_signaling,
     63  1.36  riastrad     "struct dma_fence *"/*fence*/);
     64  1.36  riastrad SDT_PROBE_DEFINE2(sdt, drm, fence, add_callback,
     65  1.36  riastrad     "struct dma_fence *"/*fence*/,
     66  1.36  riastrad     "struct dma_fence_callback *"/*callback*/);
     67  1.36  riastrad SDT_PROBE_DEFINE2(sdt, drm, fence, remove_callback,
     68  1.36  riastrad     "struct dma_fence *"/*fence*/,
     69  1.36  riastrad     "struct dma_fence_callback *"/*callback*/);
     70  1.36  riastrad SDT_PROBE_DEFINE2(sdt, drm, fence, callback,
     71  1.36  riastrad     "struct dma_fence *"/*fence*/,
     72  1.36  riastrad     "struct dma_fence_callback *"/*callback*/);
     73  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, test,
     74  1.36  riastrad     "struct dma_fence *"/*fence*/);
     75  1.36  riastrad SDT_PROBE_DEFINE2(sdt, drm, fence, set_error,
     76  1.36  riastrad     "struct dma_fence *"/*fence*/,
     77  1.36  riastrad     "int"/*error*/);
     78  1.36  riastrad SDT_PROBE_DEFINE1(sdt, drm, fence, signal,
     79  1.36  riastrad     "struct dma_fence *"/*fence*/);
     80  1.36  riastrad 
     81  1.36  riastrad SDT_PROBE_DEFINE3(sdt, drm, fence, wait_start,
     82  1.36  riastrad     "struct dma_fence *"/*fence*/,
     83  1.36  riastrad     "bool"/*intr*/,
     84  1.36  riastrad     "long"/*timeout*/);
     85  1.36  riastrad SDT_PROBE_DEFINE2(sdt, drm, fence, wait_done,
     86  1.36  riastrad     "struct dma_fence *"/*fence*/,
     87  1.36  riastrad     "long"/*ret*/);
     88  1.36  riastrad 
     89   1.1  riastrad /*
     90   1.2  riastrad  * linux_dma_fence_trace
     91   1.1  riastrad  *
     92   1.2  riastrad  *	True if we print DMA_FENCE_TRACE messages, false if not.  These
     93   1.2  riastrad  *	are extremely noisy, too much even for AB_VERBOSE and AB_DEBUG
     94   1.2  riastrad  *	in boothowto.
     95   1.1  riastrad  */
     96   1.2  riastrad int	linux_dma_fence_trace = 0;
     97   1.1  riastrad 
     98  1.42  riastrad static struct {
     99  1.42  riastrad 	spinlock_t		lock;
    100  1.42  riastrad 	struct dma_fence	fence;
    101  1.42  riastrad } dma_fence_stub __cacheline_aligned;
    102  1.41  riastrad 
    103  1.41  riastrad static const char *dma_fence_stub_name(struct dma_fence *f)
    104  1.41  riastrad {
    105  1.41  riastrad 
    106  1.42  riastrad 	KASSERT(f == &dma_fence_stub.fence);
    107  1.41  riastrad 	return "stub";
    108  1.41  riastrad }
    109  1.41  riastrad 
    110  1.41  riastrad static void
    111  1.41  riastrad dma_fence_stub_release(struct dma_fence *f)
    112  1.41  riastrad {
    113  1.41  riastrad 
    114  1.42  riastrad 	KASSERT(f == &dma_fence_stub.fence);
    115  1.41  riastrad 	dma_fence_destroy(f);
    116  1.41  riastrad }
    117  1.41  riastrad 
    118  1.41  riastrad static const struct dma_fence_ops dma_fence_stub_ops = {
    119  1.41  riastrad 	.get_driver_name = dma_fence_stub_name,
    120  1.41  riastrad 	.get_timeline_name = dma_fence_stub_name,
    121  1.41  riastrad 	.release = dma_fence_stub_release,
    122  1.41  riastrad };
    123  1.41  riastrad 
    124  1.41  riastrad /*
    125  1.41  riastrad  * linux_dma_fences_init(), linux_dma_fences_fini()
    126  1.41  riastrad  *
    127  1.41  riastrad  *	Set up and tear down module state.
    128  1.41  riastrad  */
    129  1.41  riastrad void
    130  1.41  riastrad linux_dma_fences_init(void)
    131  1.41  riastrad {
    132  1.41  riastrad 	int error __diagused;
    133  1.41  riastrad 
    134  1.42  riastrad 	spin_lock_init(&dma_fence_stub.lock);
    135  1.42  riastrad 	dma_fence_init(&dma_fence_stub.fence, &dma_fence_stub_ops,
    136  1.42  riastrad 	    &dma_fence_stub.lock, /*context*/0, /*seqno*/0);
    137  1.42  riastrad 	error = dma_fence_signal(&dma_fence_stub.fence);
    138  1.41  riastrad 	KASSERTMSG(error == 0, "error=%d", error);
    139  1.41  riastrad }
    140  1.41  riastrad 
    141  1.41  riastrad void
    142  1.41  riastrad linux_dma_fences_fini(void)
    143  1.41  riastrad {
    144  1.41  riastrad 
    145  1.42  riastrad 	dma_fence_put(&dma_fence_stub.fence);
    146  1.42  riastrad 	spin_lock_destroy(&dma_fence_stub.lock);
    147  1.41  riastrad }
    148  1.41  riastrad 
    149   1.1  riastrad /*
    150   1.2  riastrad  * dma_fence_referenced_p(fence)
    151   1.1  riastrad  *
    152   1.1  riastrad  *	True if fence has a positive reference count.  True after
    153   1.2  riastrad  *	dma_fence_init; after the last dma_fence_put, this becomes
    154  1.24  riastrad  *	false.  The fence must have been initialized and must not have
    155  1.24  riastrad  *	been destroyed.
    156   1.1  riastrad  */
    157   1.1  riastrad static inline bool __diagused
    158   1.2  riastrad dma_fence_referenced_p(struct dma_fence *fence)
    159   1.1  riastrad {
    160   1.1  riastrad 
    161  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
    162  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
    163  1.24  riastrad 
    164   1.1  riastrad 	return kref_referenced_p(&fence->refcount);
    165   1.1  riastrad }
    166   1.1  riastrad 
    167   1.1  riastrad /*
    168   1.2  riastrad  * dma_fence_init(fence, ops, lock, context, seqno)
    169   1.1  riastrad  *
    170   1.2  riastrad  *	Initialize fence.  Caller should call dma_fence_destroy when
    171   1.2  riastrad  *	done, after all references have been released.
    172   1.1  riastrad  */
    173   1.1  riastrad void
    174   1.2  riastrad dma_fence_init(struct dma_fence *fence, const struct dma_fence_ops *ops,
    175  1.38  riastrad     spinlock_t *lock, uint64_t context, uint64_t seqno)
    176   1.1  riastrad {
    177   1.1  riastrad 
    178   1.1  riastrad 	kref_init(&fence->refcount);
    179   1.1  riastrad 	fence->lock = lock;
    180   1.1  riastrad 	fence->flags = 0;
    181   1.1  riastrad 	fence->context = context;
    182   1.1  riastrad 	fence->seqno = seqno;
    183   1.1  riastrad 	fence->ops = ops;
    184  1.18  riastrad 	fence->error = 0;
    185   1.1  riastrad 	TAILQ_INIT(&fence->f_callbacks);
    186   1.2  riastrad 	cv_init(&fence->f_cv, "dmafence");
    187  1.24  riastrad 
    188  1.24  riastrad #ifdef DIAGNOSTIC
    189  1.24  riastrad 	fence->f_magic = FENCE_MAGIC_GOOD;
    190  1.24  riastrad #endif
    191  1.36  riastrad 
    192  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, init,  fence);
    193   1.1  riastrad }
    194   1.1  riastrad 
    195   1.1  riastrad /*
    196  1.18  riastrad  * dma_fence_reset(fence)
    197  1.18  riastrad  *
    198  1.18  riastrad  *	Ensure fence is in a quiescent state.  Allowed either for newly
    199  1.18  riastrad  *	initialized or freed fences, but not fences with more than one
    200  1.18  riastrad  *	reference.
    201  1.18  riastrad  *
    202  1.18  riastrad  *	XXX extension to Linux API
    203  1.18  riastrad  */
    204  1.18  riastrad void
    205  1.18  riastrad dma_fence_reset(struct dma_fence *fence, const struct dma_fence_ops *ops,
    206  1.38  riastrad     spinlock_t *lock, uint64_t context, uint64_t seqno)
    207  1.18  riastrad {
    208  1.18  riastrad 
    209  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
    210  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
    211  1.18  riastrad 	KASSERT(kref_read(&fence->refcount) == 0 ||
    212  1.18  riastrad 	    kref_read(&fence->refcount) == 1);
    213  1.18  riastrad 	KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
    214  1.18  riastrad 	KASSERT(fence->lock == lock);
    215  1.18  riastrad 	KASSERT(fence->ops == ops);
    216  1.18  riastrad 
    217  1.18  riastrad 	kref_init(&fence->refcount);
    218  1.18  riastrad 	fence->flags = 0;
    219  1.18  riastrad 	fence->context = context;
    220  1.18  riastrad 	fence->seqno = seqno;
    221  1.18  riastrad 	fence->error = 0;
    222  1.36  riastrad 
    223  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, reset,  fence);
    224  1.18  riastrad }
    225  1.18  riastrad 
    226  1.18  riastrad /*
    227   1.2  riastrad  * dma_fence_destroy(fence)
    228   1.1  riastrad  *
    229   1.2  riastrad  *	Clean up memory initialized with dma_fence_init.  This is meant
    230   1.2  riastrad  *	to be used after a fence release callback.
    231  1.19  riastrad  *
    232  1.19  riastrad  *	XXX extension to Linux API
    233   1.1  riastrad  */
    234   1.1  riastrad void
    235   1.2  riastrad dma_fence_destroy(struct dma_fence *fence)
    236   1.1  riastrad {
    237   1.1  riastrad 
    238   1.2  riastrad 	KASSERT(!dma_fence_referenced_p(fence));
    239   1.1  riastrad 
    240  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, destroy,  fence);
    241  1.36  riastrad 
    242  1.24  riastrad #ifdef DIAGNOSTIC
    243  1.24  riastrad 	fence->f_magic = FENCE_MAGIC_BAD;
    244  1.24  riastrad #endif
    245  1.24  riastrad 
    246   1.1  riastrad 	KASSERT(TAILQ_EMPTY(&fence->f_callbacks));
    247   1.1  riastrad 	cv_destroy(&fence->f_cv);
    248   1.1  riastrad }
    249   1.1  riastrad 
    250   1.1  riastrad static void
    251   1.2  riastrad dma_fence_free_cb(struct rcu_head *rcu)
    252   1.1  riastrad {
    253  1.19  riastrad 	struct dma_fence *fence = container_of(rcu, struct dma_fence, rcu);
    254   1.1  riastrad 
    255   1.2  riastrad 	KASSERT(!dma_fence_referenced_p(fence));
    256   1.1  riastrad 
    257   1.2  riastrad 	dma_fence_destroy(fence);
    258   1.1  riastrad 	kfree(fence);
    259   1.1  riastrad }
    260   1.1  riastrad 
    261   1.1  riastrad /*
    262   1.2  riastrad  * dma_fence_free(fence)
    263   1.1  riastrad  *
    264   1.1  riastrad  *	Schedule fence to be destroyed and then freed with kfree after
    265   1.1  riastrad  *	any pending RCU read sections on all CPUs have completed.
    266   1.1  riastrad  *	Caller must guarantee all references have been released.  This
    267   1.1  riastrad  *	is meant to be used after a fence release callback.
    268   1.1  riastrad  *
    269   1.1  riastrad  *	NOTE: Callers assume kfree will be used.  We don't even use
    270   1.1  riastrad  *	kmalloc to allocate these -- caller is expected to allocate
    271   1.2  riastrad  *	memory with kmalloc to be initialized with dma_fence_init.
    272   1.1  riastrad  */
    273   1.1  riastrad void
    274   1.2  riastrad dma_fence_free(struct dma_fence *fence)
    275   1.1  riastrad {
    276   1.1  riastrad 
    277   1.2  riastrad 	KASSERT(!dma_fence_referenced_p(fence));
    278   1.1  riastrad 
    279  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, free,  fence);
    280  1.36  riastrad 
    281  1.19  riastrad 	call_rcu(&fence->rcu, &dma_fence_free_cb);
    282   1.1  riastrad }
    283   1.1  riastrad 
    284   1.1  riastrad /*
    285   1.2  riastrad  * dma_fence_context_alloc(n)
    286   1.1  riastrad  *
    287   1.1  riastrad  *	Return the first of a contiguous sequence of unique
    288   1.1  riastrad  *	identifiers, at least until the system wraps around.
    289   1.1  riastrad  */
    290  1.38  riastrad uint64_t
    291   1.2  riastrad dma_fence_context_alloc(unsigned n)
    292   1.1  riastrad {
    293  1.38  riastrad 	static struct {
    294  1.38  riastrad 		volatile unsigned lock;
    295  1.38  riastrad 		uint64_t context;
    296  1.38  riastrad 	} S;
    297  1.38  riastrad 	uint64_t c;
    298   1.1  riastrad 
    299  1.40  riastrad 	while (__predict_false(atomic_swap_uint(&S.lock, 1)))
    300  1.38  riastrad 		SPINLOCK_BACKOFF_HOOK;
    301  1.40  riastrad 	membar_acquire();
    302  1.38  riastrad 	c = S.context;
    303  1.38  riastrad 	S.context += n;
    304  1.38  riastrad 	atomic_store_release(&S.lock, 0);
    305  1.38  riastrad 
    306  1.38  riastrad 	return c;
    307  1.38  riastrad }
    308  1.38  riastrad 
    309  1.38  riastrad /*
    310  1.38  riastrad  * __dma_fence_is_later(a, b, ops)
    311  1.38  riastrad  *
    312  1.38  riastrad  *	True if sequence number a is later than sequence number b,
    313  1.38  riastrad  *	according to the given fence ops.
    314  1.38  riastrad  *
    315  1.38  riastrad  *	- For fence ops with 64-bit sequence numbers, this is simply
    316  1.38  riastrad  *	  defined to be a > b as unsigned 64-bit integers.
    317  1.38  riastrad  *
    318  1.38  riastrad  *	- For fence ops with 32-bit sequence numbers, this is defined
    319  1.38  riastrad  *	  to mean that the 32-bit unsigned difference a - b is less
    320  1.38  riastrad  *	  than INT_MAX.
    321  1.38  riastrad  */
    322  1.38  riastrad bool
    323  1.38  riastrad __dma_fence_is_later(uint64_t a, uint64_t b, const struct dma_fence_ops *ops)
    324  1.38  riastrad {
    325  1.38  riastrad 
    326  1.38  riastrad 	if (ops->use_64bit_seqno)
    327  1.38  riastrad 		return a > b;
    328  1.38  riastrad 	else
    329  1.38  riastrad 		return (unsigned)a - (unsigned)b < INT_MAX;
    330   1.1  riastrad }
    331   1.1  riastrad 
    332   1.1  riastrad /*
    333   1.2  riastrad  * dma_fence_is_later(a, b)
    334   1.1  riastrad  *
    335   1.1  riastrad  *	True if the sequence number of fence a is later than the
    336   1.1  riastrad  *	sequence number of fence b.  Since sequence numbers wrap
    337   1.1  riastrad  *	around, we define this to mean that the sequence number of
    338   1.1  riastrad  *	fence a is no more than INT_MAX past the sequence number of
    339   1.1  riastrad  *	fence b.
    340   1.1  riastrad  *
    341  1.38  riastrad  *	The two fences must have the context.  Whether sequence numbers
    342  1.38  riastrad  *	are 32-bit is determined by a.
    343   1.1  riastrad  */
    344   1.1  riastrad bool
    345   1.2  riastrad dma_fence_is_later(struct dma_fence *a, struct dma_fence *b)
    346   1.1  riastrad {
    347   1.1  riastrad 
    348  1.24  riastrad 	KASSERTMSG(a->f_magic != FENCE_MAGIC_BAD, "fence %p", a);
    349  1.24  riastrad 	KASSERTMSG(a->f_magic == FENCE_MAGIC_GOOD, "fence %p", a);
    350  1.24  riastrad 	KASSERTMSG(b->f_magic != FENCE_MAGIC_BAD, "fence %p", b);
    351  1.24  riastrad 	KASSERTMSG(b->f_magic == FENCE_MAGIC_GOOD, "fence %p", b);
    352   1.1  riastrad 	KASSERTMSG(a->context == b->context, "incommensurate fences"
    353  1.38  riastrad 	    ": %"PRIu64" @ %p =/= %"PRIu64" @ %p",
    354  1.38  riastrad 	    a->context, a, b->context, b);
    355   1.1  riastrad 
    356  1.38  riastrad 	return __dma_fence_is_later(a->seqno, b->seqno, a->ops);
    357   1.1  riastrad }
    358   1.1  riastrad 
    359   1.1  riastrad /*
    360   1.9  riastrad  * dma_fence_get_stub()
    361   1.9  riastrad  *
    362   1.9  riastrad  *	Return a dma fence that is always already signalled.
    363   1.9  riastrad  */
    364   1.9  riastrad struct dma_fence *
    365   1.9  riastrad dma_fence_get_stub(void)
    366   1.9  riastrad {
    367   1.9  riastrad 
    368  1.42  riastrad 	return dma_fence_get(&dma_fence_stub.fence);
    369   1.9  riastrad }
    370   1.9  riastrad 
    371   1.9  riastrad /*
    372   1.2  riastrad  * dma_fence_get(fence)
    373   1.1  riastrad  *
    374  1.26  riastrad  *	Acquire a reference to fence and return it, or return NULL if
    375  1.26  riastrad  *	fence is NULL.  The fence, if nonnull, must not be being
    376  1.26  riastrad  *	destroyed.
    377   1.1  riastrad  */
    378   1.2  riastrad struct dma_fence *
    379   1.2  riastrad dma_fence_get(struct dma_fence *fence)
    380   1.1  riastrad {
    381   1.1  riastrad 
    382  1.26  riastrad 	if (fence == NULL)
    383  1.26  riastrad 		return NULL;
    384  1.26  riastrad 
    385  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
    386  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
    387  1.24  riastrad 
    388  1.26  riastrad 	kref_get(&fence->refcount);
    389   1.1  riastrad 	return fence;
    390   1.1  riastrad }
    391   1.1  riastrad 
    392   1.1  riastrad /*
    393   1.2  riastrad  * dma_fence_get_rcu(fence)
    394   1.1  riastrad  *
    395   1.1  riastrad  *	Attempt to acquire a reference to a fence that may be about to
    396   1.1  riastrad  *	be destroyed, during a read section.  Return the fence on
    397  1.26  riastrad  *	success, or NULL on failure.  The fence must be nonnull.
    398   1.1  riastrad  */
    399   1.2  riastrad struct dma_fence *
    400   1.2  riastrad dma_fence_get_rcu(struct dma_fence *fence)
    401   1.1  riastrad {
    402   1.1  riastrad 
    403   1.8  riastrad 	__insn_barrier();
    404  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
    405  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
    406   1.1  riastrad 	if (!kref_get_unless_zero(&fence->refcount))
    407   1.1  riastrad 		return NULL;
    408   1.1  riastrad 	return fence;
    409   1.1  riastrad }
    410   1.1  riastrad 
    411   1.3  riastrad /*
    412   1.3  riastrad  * dma_fence_get_rcu_safe(fencep)
    413   1.3  riastrad  *
    414   1.3  riastrad  *	Attempt to acquire a reference to the fence *fencep, which may
    415   1.3  riastrad  *	be about to be destroyed, during a read section.  If the value
    416   1.3  riastrad  *	of *fencep changes after we read *fencep but before we
    417   1.3  riastrad  *	increment its reference count, retry.  Return *fencep on
    418   1.3  riastrad  *	success, or NULL on failure.
    419   1.3  riastrad  */
    420   1.3  riastrad struct dma_fence *
    421   1.7  riastrad dma_fence_get_rcu_safe(struct dma_fence *volatile const *fencep)
    422   1.3  riastrad {
    423  1.39  riastrad 	struct dma_fence *fence;
    424   1.3  riastrad 
    425   1.3  riastrad retry:
    426  1.39  riastrad 	/*
    427  1.39  riastrad 	 * Load the fence, ensuring we observe the fully initialized
    428  1.39  riastrad 	 * content.
    429  1.39  riastrad 	 */
    430  1.39  riastrad 	if ((fence = atomic_load_consume(fencep)) == NULL)
    431   1.3  riastrad 		return NULL;
    432   1.3  riastrad 
    433   1.3  riastrad 	/* Try to acquire a reference.  If we can't, try again.  */
    434   1.3  riastrad 	if (!dma_fence_get_rcu(fence))
    435   1.3  riastrad 		goto retry;
    436   1.3  riastrad 
    437   1.3  riastrad 	/*
    438   1.3  riastrad 	 * Confirm that it's still the same fence.  If not, release it
    439   1.3  riastrad 	 * and retry.
    440   1.3  riastrad 	 */
    441  1.39  riastrad 	if (fence != atomic_load_relaxed(fencep)) {
    442   1.3  riastrad 		dma_fence_put(fence);
    443   1.3  riastrad 		goto retry;
    444   1.3  riastrad 	}
    445   1.3  riastrad 
    446   1.3  riastrad 	/* Success!  */
    447  1.24  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    448   1.3  riastrad 	return fence;
    449   1.3  riastrad }
    450   1.3  riastrad 
    451   1.1  riastrad static void
    452   1.2  riastrad dma_fence_release(struct kref *refcount)
    453   1.1  riastrad {
    454   1.2  riastrad 	struct dma_fence *fence = container_of(refcount, struct dma_fence,
    455   1.2  riastrad 	    refcount);
    456   1.1  riastrad 
    457  1.23  riastrad 	KASSERTMSG(TAILQ_EMPTY(&fence->f_callbacks),
    458  1.23  riastrad 	    "fence %p has pending callbacks", fence);
    459   1.2  riastrad 	KASSERT(!dma_fence_referenced_p(fence));
    460   1.1  riastrad 
    461  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, release,  fence);
    462  1.36  riastrad 
    463   1.1  riastrad 	if (fence->ops->release)
    464   1.1  riastrad 		(*fence->ops->release)(fence);
    465   1.1  riastrad 	else
    466   1.2  riastrad 		dma_fence_free(fence);
    467   1.1  riastrad }
    468   1.1  riastrad 
    469   1.1  riastrad /*
    470   1.2  riastrad  * dma_fence_put(fence)
    471   1.1  riastrad  *
    472   1.1  riastrad  *	Release a reference to fence.  If this was the last one, call
    473   1.1  riastrad  *	the fence's release callback.
    474   1.1  riastrad  */
    475   1.1  riastrad void
    476   1.2  riastrad dma_fence_put(struct dma_fence *fence)
    477   1.1  riastrad {
    478   1.1  riastrad 
    479   1.1  riastrad 	if (fence == NULL)
    480   1.1  riastrad 		return;
    481   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    482   1.2  riastrad 	kref_put(&fence->refcount, &dma_fence_release);
    483   1.1  riastrad }
    484   1.1  riastrad 
    485   1.1  riastrad /*
    486   1.2  riastrad  * dma_fence_ensure_signal_enabled(fence)
    487   1.1  riastrad  *
    488   1.1  riastrad  *	Internal subroutine.  If the fence was already signalled,
    489   1.1  riastrad  *	return -ENOENT.  Otherwise, if the enable signalling callback
    490   1.1  riastrad  *	has not been called yet, call it.  If fails, signal the fence
    491   1.1  riastrad  *	and return -ENOENT.  If it succeeds, or if it had already been
    492   1.1  riastrad  *	called, return zero to indicate success.
    493   1.1  riastrad  *
    494   1.1  riastrad  *	Caller must hold the fence's lock.
    495   1.1  riastrad  */
    496   1.1  riastrad static int
    497   1.2  riastrad dma_fence_ensure_signal_enabled(struct dma_fence *fence)
    498   1.1  riastrad {
    499  1.20  riastrad 	bool already_enabled;
    500   1.1  riastrad 
    501   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    502   1.1  riastrad 	KASSERT(spin_is_locked(fence->lock));
    503   1.1  riastrad 
    504  1.20  riastrad 	/* Determine whether signalling was enabled, and enable it.  */
    505  1.20  riastrad 	already_enabled = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
    506  1.20  riastrad 	    &fence->flags);
    507  1.20  riastrad 
    508   1.1  riastrad 	/* If the fence was already signalled, fail with -ENOENT.  */
    509   1.2  riastrad 	if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
    510   1.1  riastrad 		return -ENOENT;
    511   1.1  riastrad 
    512   1.1  riastrad 	/*
    513  1.20  riastrad 	 * Otherwise, if it wasn't enabled yet, try to enable
    514  1.35  riastrad 	 * signalling.
    515   1.1  riastrad 	 */
    516  1.36  riastrad 	if (!already_enabled && fence->ops->enable_signaling) {
    517  1.36  riastrad 		SDT_PROBE1(sdt, drm, fence, enable_signaling,  fence);
    518  1.36  riastrad 		if (!(*fence->ops->enable_signaling)(fence)) {
    519  1.36  riastrad 			/* If it failed, signal and return -ENOENT.  */
    520  1.36  riastrad 			dma_fence_signal_locked(fence);
    521  1.36  riastrad 			return -ENOENT;
    522  1.36  riastrad 		}
    523   1.1  riastrad 	}
    524   1.1  riastrad 
    525   1.1  riastrad 	/* Success!  */
    526   1.1  riastrad 	return 0;
    527   1.1  riastrad }
    528   1.1  riastrad 
    529   1.1  riastrad /*
    530   1.2  riastrad  * dma_fence_add_callback(fence, fcb, fn)
    531   1.1  riastrad  *
    532   1.1  riastrad  *	If fence has been signalled, return -ENOENT.  If the enable
    533   1.1  riastrad  *	signalling callback hasn't been called yet, call it; if it
    534   1.1  riastrad  *	fails, return -ENOENT.  Otherwise, arrange to call fn(fence,
    535   1.1  riastrad  *	fcb) when it is signalled, and return 0.
    536   1.1  riastrad  *
    537   1.1  riastrad  *	The fence uses memory allocated by the caller in fcb from the
    538   1.2  riastrad  *	time of dma_fence_add_callback either to the time of
    539   1.2  riastrad  *	dma_fence_remove_callback, or just before calling fn.
    540   1.1  riastrad  */
    541   1.1  riastrad int
    542   1.2  riastrad dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *fcb,
    543   1.2  riastrad     dma_fence_func_t fn)
    544   1.1  riastrad {
    545   1.1  riastrad 	int ret;
    546   1.1  riastrad 
    547   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    548   1.1  riastrad 
    549   1.1  riastrad 	/* Optimistically try to skip the lock if it's already signalled.  */
    550  1.34  riastrad 	if (atomic_load_relaxed(&fence->flags) &
    551  1.34  riastrad 	    (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
    552   1.1  riastrad 		ret = -ENOENT;
    553   1.1  riastrad 		goto out0;
    554   1.1  riastrad 	}
    555   1.1  riastrad 
    556   1.1  riastrad 	/* Acquire the lock.  */
    557   1.1  riastrad 	spin_lock(fence->lock);
    558   1.1  riastrad 
    559   1.1  riastrad 	/* Ensure signalling is enabled, or fail if we can't.  */
    560   1.2  riastrad 	ret = dma_fence_ensure_signal_enabled(fence);
    561   1.1  riastrad 	if (ret)
    562   1.1  riastrad 		goto out1;
    563   1.1  riastrad 
    564   1.1  riastrad 	/* Insert the callback.  */
    565  1.36  riastrad 	SDT_PROBE2(sdt, drm, fence, add_callback,  fence, fcb);
    566   1.4  riastrad 	fcb->func = fn;
    567   1.1  riastrad 	TAILQ_INSERT_TAIL(&fence->f_callbacks, fcb, fcb_entry);
    568   1.1  riastrad 	fcb->fcb_onqueue = true;
    569  1.21  riastrad 	ret = 0;
    570   1.1  riastrad 
    571   1.1  riastrad 	/* Release the lock and we're done.  */
    572   1.1  riastrad out1:	spin_unlock(fence->lock);
    573  1.21  riastrad out0:	if (ret) {
    574  1.21  riastrad 		fcb->func = NULL;
    575  1.21  riastrad 		fcb->fcb_onqueue = false;
    576  1.21  riastrad 	}
    577  1.21  riastrad 	return ret;
    578   1.1  riastrad }
    579   1.1  riastrad 
    580   1.1  riastrad /*
    581   1.2  riastrad  * dma_fence_remove_callback(fence, fcb)
    582   1.1  riastrad  *
    583   1.1  riastrad  *	Remove the callback fcb from fence.  Return true if it was
    584   1.1  riastrad  *	removed from the list, or false if it had already run and so
    585   1.1  riastrad  *	was no longer queued anyway.  Caller must have already called
    586   1.2  riastrad  *	dma_fence_add_callback(fence, fcb).
    587   1.1  riastrad  */
    588   1.1  riastrad bool
    589   1.2  riastrad dma_fence_remove_callback(struct dma_fence *fence, struct dma_fence_cb *fcb)
    590   1.1  riastrad {
    591   1.1  riastrad 	bool onqueue;
    592   1.1  riastrad 
    593   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    594   1.1  riastrad 
    595   1.1  riastrad 	spin_lock(fence->lock);
    596   1.1  riastrad 	onqueue = fcb->fcb_onqueue;
    597   1.1  riastrad 	if (onqueue) {
    598  1.36  riastrad 		SDT_PROBE2(sdt, drm, fence, remove_callback,  fence, fcb);
    599   1.1  riastrad 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
    600   1.1  riastrad 		fcb->fcb_onqueue = false;
    601   1.1  riastrad 	}
    602   1.1  riastrad 	spin_unlock(fence->lock);
    603   1.1  riastrad 
    604   1.1  riastrad 	return onqueue;
    605   1.1  riastrad }
    606   1.1  riastrad 
    607   1.1  riastrad /*
    608   1.2  riastrad  * dma_fence_enable_sw_signaling(fence)
    609   1.1  riastrad  *
    610   1.1  riastrad  *	If it hasn't been called yet and the fence hasn't been
    611   1.1  riastrad  *	signalled yet, call the fence's enable_sw_signaling callback.
    612   1.1  riastrad  *	If when that happens, the callback indicates failure by
    613   1.1  riastrad  *	returning false, signal the fence.
    614   1.1  riastrad  */
    615   1.1  riastrad void
    616   1.2  riastrad dma_fence_enable_sw_signaling(struct dma_fence *fence)
    617   1.1  riastrad {
    618   1.1  riastrad 
    619   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    620   1.1  riastrad 
    621   1.1  riastrad 	spin_lock(fence->lock);
    622  1.22  riastrad 	if ((fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0)
    623  1.22  riastrad 		(void)dma_fence_ensure_signal_enabled(fence);
    624   1.1  riastrad 	spin_unlock(fence->lock);
    625   1.1  riastrad }
    626   1.1  riastrad 
    627   1.1  riastrad /*
    628   1.2  riastrad  * dma_fence_is_signaled(fence)
    629   1.1  riastrad  *
    630   1.1  riastrad  *	Test whether the fence has been signalled.  If it has been
    631   1.2  riastrad  *	signalled by dma_fence_signal(_locked), return true.  If the
    632   1.1  riastrad  *	signalled callback returns true indicating that some implicit
    633   1.1  riastrad  *	external condition has changed, call the callbacks as if with
    634   1.2  riastrad  *	dma_fence_signal.
    635   1.1  riastrad  */
    636   1.1  riastrad bool
    637   1.2  riastrad dma_fence_is_signaled(struct dma_fence *fence)
    638   1.1  riastrad {
    639   1.1  riastrad 	bool signaled;
    640   1.1  riastrad 
    641   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    642   1.1  riastrad 
    643   1.1  riastrad 	spin_lock(fence->lock);
    644   1.2  riastrad 	signaled = dma_fence_is_signaled_locked(fence);
    645   1.1  riastrad 	spin_unlock(fence->lock);
    646   1.1  riastrad 
    647   1.1  riastrad 	return signaled;
    648   1.1  riastrad }
    649   1.1  riastrad 
    650   1.1  riastrad /*
    651   1.2  riastrad  * dma_fence_is_signaled_locked(fence)
    652   1.1  riastrad  *
    653   1.1  riastrad  *	Test whether the fence has been signalled.  Like
    654   1.2  riastrad  *	dma_fence_is_signaleed, but caller already holds the fence's lock.
    655   1.1  riastrad  */
    656   1.1  riastrad bool
    657   1.2  riastrad dma_fence_is_signaled_locked(struct dma_fence *fence)
    658   1.1  riastrad {
    659   1.1  riastrad 
    660   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    661   1.1  riastrad 	KASSERT(spin_is_locked(fence->lock));
    662   1.1  riastrad 
    663   1.1  riastrad 	/* Check whether we already set the signalled bit.  */
    664   1.2  riastrad 	if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
    665   1.1  riastrad 		return true;
    666   1.1  riastrad 
    667   1.1  riastrad 	/* If there's a signalled callback, test it.  */
    668   1.1  riastrad 	if (fence->ops->signaled) {
    669  1.36  riastrad 		SDT_PROBE1(sdt, drm, fence, test,  fence);
    670   1.1  riastrad 		if ((*fence->ops->signaled)(fence)) {
    671   1.1  riastrad 			/*
    672   1.1  riastrad 			 * It's been signalled implicitly by some
    673   1.1  riastrad 			 * external phenomonen.  Act as though someone
    674   1.2  riastrad 			 * has called dma_fence_signal.
    675   1.1  riastrad 			 */
    676   1.2  riastrad 			dma_fence_signal_locked(fence);
    677   1.1  riastrad 			return true;
    678   1.1  riastrad 		}
    679   1.1  riastrad 	}
    680   1.1  riastrad 
    681   1.1  riastrad 	return false;
    682   1.1  riastrad }
    683   1.1  riastrad 
    684   1.1  riastrad /*
    685   1.5  riastrad  * dma_fence_set_error(fence, error)
    686   1.5  riastrad  *
    687   1.5  riastrad  *	Set an error code prior to dma_fence_signal for use by a
    688   1.5  riastrad  *	waiter to learn about success or failure of the fence.
    689   1.5  riastrad  */
    690   1.5  riastrad void
    691   1.5  riastrad dma_fence_set_error(struct dma_fence *fence, int error)
    692   1.5  riastrad {
    693   1.5  riastrad 
    694  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
    695  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
    696  1.34  riastrad 	KASSERT((atomic_load_relaxed(&fence->flags) &
    697  1.34  riastrad 		(1u << DMA_FENCE_FLAG_SIGNALED_BIT)) == 0);
    698   1.6  riastrad 	KASSERTMSG(error >= -ELAST, "%d", error);
    699   1.5  riastrad 	KASSERTMSG(error < 0, "%d", error);
    700   1.5  riastrad 
    701  1.36  riastrad 	SDT_PROBE2(sdt, drm, fence, set_error,  fence, error);
    702   1.5  riastrad 	fence->error = error;
    703   1.5  riastrad }
    704   1.5  riastrad 
    705   1.5  riastrad /*
    706  1.10  riastrad  * dma_fence_get_status(fence)
    707  1.10  riastrad  *
    708  1.10  riastrad  *	Return 0 if fence has yet to be signalled, 1 if it has been
    709  1.10  riastrad  *	signalled without error, or negative error code if
    710  1.10  riastrad  *	dma_fence_set_error was used.
    711  1.10  riastrad  */
    712  1.10  riastrad int
    713  1.10  riastrad dma_fence_get_status(struct dma_fence *fence)
    714  1.10  riastrad {
    715  1.10  riastrad 	int ret;
    716  1.10  riastrad 
    717  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
    718  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
    719  1.24  riastrad 
    720  1.10  riastrad 	spin_lock(fence->lock);
    721  1.10  riastrad 	if (!dma_fence_is_signaled_locked(fence)) {
    722  1.10  riastrad 		ret = 0;
    723  1.10  riastrad 	} else if (fence->error) {
    724  1.10  riastrad 		ret = fence->error;
    725  1.10  riastrad 		KASSERTMSG(ret < 0, "%d", ret);
    726  1.10  riastrad 	} else {
    727  1.10  riastrad 		ret = 1;
    728  1.10  riastrad 	}
    729  1.10  riastrad 	spin_unlock(fence->lock);
    730  1.10  riastrad 
    731  1.10  riastrad 	return ret;
    732  1.10  riastrad }
    733  1.10  riastrad 
    734  1.10  riastrad /*
    735   1.2  riastrad  * dma_fence_signal(fence)
    736   1.1  riastrad  *
    737   1.1  riastrad  *	Signal the fence.  If it has already been signalled, return
    738   1.1  riastrad  *	-EINVAL.  If it has not been signalled, call the enable
    739   1.1  riastrad  *	signalling callback if it hasn't been called yet, and remove
    740   1.1  riastrad  *	each registered callback from the queue and call it; then
    741   1.1  riastrad  *	return 0.
    742   1.1  riastrad  */
    743   1.1  riastrad int
    744   1.2  riastrad dma_fence_signal(struct dma_fence *fence)
    745   1.1  riastrad {
    746   1.1  riastrad 	int ret;
    747   1.1  riastrad 
    748   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    749   1.1  riastrad 
    750   1.1  riastrad 	spin_lock(fence->lock);
    751   1.2  riastrad 	ret = dma_fence_signal_locked(fence);
    752   1.1  riastrad 	spin_unlock(fence->lock);
    753   1.1  riastrad 
    754   1.1  riastrad 	return ret;
    755   1.1  riastrad }
    756   1.1  riastrad 
    757   1.1  riastrad /*
    758   1.2  riastrad  * dma_fence_signal_locked(fence)
    759   1.1  riastrad  *
    760   1.2  riastrad  *	Signal the fence.  Like dma_fence_signal, but caller already
    761   1.2  riastrad  *	holds the fence's lock.
    762   1.1  riastrad  */
    763   1.1  riastrad int
    764   1.2  riastrad dma_fence_signal_locked(struct dma_fence *fence)
    765   1.1  riastrad {
    766   1.2  riastrad 	struct dma_fence_cb *fcb, *next;
    767   1.1  riastrad 
    768   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    769   1.1  riastrad 	KASSERT(spin_is_locked(fence->lock));
    770   1.1  riastrad 
    771   1.1  riastrad 	/* If it's been signalled, fail; otherwise set the signalled bit.  */
    772   1.2  riastrad 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
    773   1.1  riastrad 		return -EINVAL;
    774   1.1  riastrad 
    775  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, signal,  fence);
    776  1.36  riastrad 
    777  1.25  riastrad 	/* Set the timestamp.  */
    778  1.25  riastrad 	fence->timestamp = ktime_get();
    779  1.25  riastrad 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
    780  1.25  riastrad 
    781   1.1  riastrad 	/* Wake waiters.  */
    782   1.1  riastrad 	cv_broadcast(&fence->f_cv);
    783   1.1  riastrad 
    784   1.1  riastrad 	/* Remove and call the callbacks.  */
    785   1.1  riastrad 	TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
    786  1.36  riastrad 		SDT_PROBE2(sdt, drm, fence, callback,  fence, fcb);
    787   1.1  riastrad 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
    788   1.1  riastrad 		fcb->fcb_onqueue = false;
    789   1.4  riastrad 		(*fcb->func)(fence, fcb);
    790   1.1  riastrad 	}
    791   1.1  riastrad 
    792   1.1  riastrad 	/* Success! */
    793   1.1  riastrad 	return 0;
    794   1.1  riastrad }
    795   1.1  riastrad 
    796   1.1  riastrad struct wait_any {
    797   1.2  riastrad 	struct dma_fence_cb	fcb;
    798   1.1  riastrad 	struct wait_any1 {
    799   1.1  riastrad 		kmutex_t	lock;
    800   1.1  riastrad 		kcondvar_t	cv;
    801  1.31  riastrad 		struct wait_any	*cb;
    802   1.1  riastrad 		bool		done;
    803   1.1  riastrad 	}		*common;
    804   1.1  riastrad };
    805   1.1  riastrad 
    806   1.1  riastrad static void
    807   1.2  riastrad wait_any_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
    808   1.1  riastrad {
    809   1.1  riastrad 	struct wait_any *cb = container_of(fcb, struct wait_any, fcb);
    810   1.1  riastrad 
    811   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    812   1.1  riastrad 
    813   1.1  riastrad 	mutex_enter(&cb->common->lock);
    814   1.1  riastrad 	cb->common->done = true;
    815   1.1  riastrad 	cv_broadcast(&cb->common->cv);
    816   1.1  riastrad 	mutex_exit(&cb->common->lock);
    817   1.1  riastrad }
    818   1.1  riastrad 
    819   1.1  riastrad /*
    820  1.11  riastrad  * dma_fence_wait_any_timeout(fence, nfences, intr, timeout, ip)
    821   1.1  riastrad  *
    822   1.1  riastrad  *	Wait for any of fences[0], fences[1], fences[2], ...,
    823  1.13  riastrad  *	fences[nfences-1] to be signalled.  If ip is nonnull, set *ip
    824  1.13  riastrad  *	to the index of the first one.
    825  1.31  riastrad  *
    826  1.31  riastrad  *	Return -ERESTARTSYS if interrupted, 0 on timeout, or time
    827  1.31  riastrad  *	remaining (at least 1) on success.
    828   1.1  riastrad  */
    829   1.1  riastrad long
    830   1.2  riastrad dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences,
    831  1.11  riastrad     bool intr, long timeout, uint32_t *ip)
    832   1.1  riastrad {
    833   1.1  riastrad 	struct wait_any1 common;
    834   1.1  riastrad 	struct wait_any *cb;
    835   1.1  riastrad 	uint32_t i, j;
    836   1.1  riastrad 	int start, end;
    837   1.1  riastrad 	long ret = 0;
    838   1.1  riastrad 
    839  1.32  riastrad 	KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
    840  1.32  riastrad 	KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
    841  1.32  riastrad 
    842  1.31  riastrad 	/* Optimistically check whether any are signalled.  */
    843  1.31  riastrad 	for (i = 0; i < nfences; i++) {
    844  1.32  riastrad 		KASSERT(dma_fence_referenced_p(fences[i]));
    845  1.31  riastrad 		if (dma_fence_is_signaled(fences[i])) {
    846  1.31  riastrad 			if (ip)
    847  1.31  riastrad 				*ip = i;
    848  1.31  riastrad 			return MAX(1, timeout);
    849  1.31  riastrad 		}
    850  1.31  riastrad 	}
    851  1.31  riastrad 
    852  1.31  riastrad 	/*
    853  1.31  riastrad 	 * If timeout is zero, we're just polling, so stop here as if
    854  1.31  riastrad 	 * we timed out instantly.
    855  1.31  riastrad 	 */
    856  1.31  riastrad 	if (timeout == 0)
    857  1.31  riastrad 		return 0;
    858  1.31  riastrad 
    859   1.1  riastrad 	/* Allocate an array of callback records.  */
    860   1.1  riastrad 	cb = kcalloc(nfences, sizeof(cb[0]), GFP_KERNEL);
    861  1.32  riastrad 	if (cb == NULL)
    862  1.32  riastrad 		return -ENOMEM;
    863   1.1  riastrad 
    864   1.1  riastrad 	/* Initialize a mutex and condvar for the common wait.  */
    865   1.1  riastrad 	mutex_init(&common.lock, MUTEX_DEFAULT, IPL_VM);
    866   1.1  riastrad 	cv_init(&common.cv, "fence");
    867  1.31  riastrad 	common.cb = cb;
    868   1.1  riastrad 	common.done = false;
    869   1.1  riastrad 
    870  1.31  riastrad 	/*
    871  1.31  riastrad 	 * Add a callback to each of the fences, or stop if already
    872  1.31  riastrad 	 * signalled.
    873  1.31  riastrad 	 */
    874   1.1  riastrad 	for (i = 0; i < nfences; i++) {
    875   1.1  riastrad 		cb[i].common = &common;
    876   1.2  riastrad 		KASSERT(dma_fence_referenced_p(fences[i]));
    877   1.2  riastrad 		ret = dma_fence_add_callback(fences[i], &cb[i].fcb,
    878   1.2  riastrad 		    &wait_any_cb);
    879  1.31  riastrad 		if (ret) {
    880  1.31  riastrad 			KASSERT(ret == -ENOENT);
    881  1.11  riastrad 			if (ip)
    882  1.31  riastrad 				*ip = i;
    883  1.31  riastrad 			ret = MAX(1, timeout);
    884  1.32  riastrad 			goto out;
    885  1.11  riastrad 		}
    886   1.1  riastrad 	}
    887   1.1  riastrad 
    888   1.1  riastrad 	/*
    889   1.1  riastrad 	 * None of them was ready immediately.  Wait for one of the
    890   1.1  riastrad 	 * callbacks to notify us when it is done.
    891   1.1  riastrad 	 */
    892   1.1  riastrad 	mutex_enter(&common.lock);
    893  1.32  riastrad 	while (!common.done) {
    894  1.32  riastrad 		/* Wait for the time remaining.  */
    895   1.1  riastrad 		start = getticks();
    896   1.1  riastrad 		if (intr) {
    897   1.1  riastrad 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
    898   1.1  riastrad 				ret = -cv_timedwait_sig(&common.cv,
    899   1.1  riastrad 				    &common.lock, MIN(timeout, /* paranoia */
    900   1.1  riastrad 					MAX_SCHEDULE_TIMEOUT));
    901   1.1  riastrad 			} else {
    902   1.1  riastrad 				ret = -cv_wait_sig(&common.cv, &common.lock);
    903   1.1  riastrad 			}
    904   1.1  riastrad 		} else {
    905   1.1  riastrad 			if (timeout != MAX_SCHEDULE_TIMEOUT) {
    906   1.1  riastrad 				ret = -cv_timedwait(&common.cv,
    907   1.1  riastrad 				    &common.lock, MIN(timeout, /* paranoia */
    908   1.1  riastrad 					MAX_SCHEDULE_TIMEOUT));
    909   1.1  riastrad 			} else {
    910   1.1  riastrad 				cv_wait(&common.cv, &common.lock);
    911   1.1  riastrad 				ret = 0;
    912   1.1  riastrad 			}
    913   1.1  riastrad 		}
    914   1.1  riastrad 		end = getticks();
    915  1.32  riastrad 
    916  1.32  riastrad 		/* Deduct from time remaining.  If none left, time out.  */
    917  1.32  riastrad 		if (timeout != MAX_SCHEDULE_TIMEOUT) {
    918  1.32  riastrad 			timeout -= MIN(timeout,
    919  1.32  riastrad 			    (unsigned)end - (unsigned)start);
    920  1.32  riastrad 			if (timeout == 0)
    921  1.32  riastrad 				ret = -EWOULDBLOCK;
    922  1.32  riastrad 		}
    923  1.32  riastrad 
    924  1.32  riastrad 		/* If the wait failed, give up.  */
    925  1.31  riastrad 		if (ret)
    926   1.1  riastrad 			break;
    927   1.1  riastrad 	}
    928   1.1  riastrad 	mutex_exit(&common.lock);
    929   1.1  riastrad 
    930   1.1  riastrad 	/*
    931  1.32  riastrad 	 * Massage the return code if nonzero:
    932  1.32  riastrad 	 * - if we were interrupted, return -ERESTARTSYS;
    933  1.32  riastrad 	 * - if we timed out, return 0.
    934  1.32  riastrad 	 * No other failure is possible.  On success, ret=0 but we
    935  1.32  riastrad 	 * check again below to verify anyway.
    936  1.32  riastrad 	 */
    937  1.32  riastrad 	if (ret) {
    938  1.32  riastrad 		KASSERTMSG((ret == -EINTR || ret == -ERESTART ||
    939  1.32  riastrad 			ret == -EWOULDBLOCK), "ret=%ld", ret);
    940  1.32  riastrad 		if (ret == -EINTR || ret == -ERESTART) {
    941  1.32  riastrad 			ret = -ERESTARTSYS;
    942  1.32  riastrad 		} else if (ret == -EWOULDBLOCK) {
    943  1.32  riastrad 			KASSERT(timeout != MAX_SCHEDULE_TIMEOUT);
    944  1.32  riastrad 			ret = 0;	/* timed out */
    945  1.32  riastrad 		}
    946  1.32  riastrad 	}
    947  1.32  riastrad 
    948  1.32  riastrad 	KASSERT(ret != -ERESTART); /* would be confused with time left */
    949  1.32  riastrad 
    950  1.32  riastrad 	/*
    951  1.31  riastrad 	 * Test whether any of the fences has been signalled.  If they
    952  1.31  riastrad 	 * have, return success.
    953  1.31  riastrad 	 */
    954  1.31  riastrad 	for (j = 0; j < nfences; j++) {
    955  1.31  riastrad 		if (dma_fence_is_signaled(fences[i])) {
    956  1.31  riastrad 			if (ip)
    957  1.31  riastrad 				*ip = j;
    958  1.31  riastrad 			ret = MAX(1, timeout);
    959  1.32  riastrad 			goto out;
    960  1.31  riastrad 		}
    961  1.31  riastrad 	}
    962  1.31  riastrad 
    963  1.31  riastrad 	/*
    964  1.32  riastrad 	 * If user passed MAX_SCHEDULE_TIMEOUT, we can't return 0
    965  1.32  riastrad 	 * meaning timed out because we're supposed to wait forever.
    966   1.1  riastrad 	 */
    967  1.32  riastrad 	KASSERT(timeout == MAX_SCHEDULE_TIMEOUT ? ret != 0 : 1);
    968   1.1  riastrad 
    969  1.32  riastrad out:	while (i --> 0)
    970   1.2  riastrad 		(void)dma_fence_remove_callback(fences[i], &cb[i].fcb);
    971   1.1  riastrad 	cv_destroy(&common.cv);
    972   1.1  riastrad 	mutex_destroy(&common.lock);
    973   1.1  riastrad 	kfree(cb);
    974  1.32  riastrad 	return ret;
    975   1.1  riastrad }
    976   1.1  riastrad 
    977   1.1  riastrad /*
    978   1.2  riastrad  * dma_fence_wait_timeout(fence, intr, timeout)
    979   1.1  riastrad  *
    980   1.1  riastrad  *	Wait until fence is signalled; or until interrupt, if intr is
    981   1.1  riastrad  *	true; or until timeout, if positive.  Return -ERESTARTSYS if
    982   1.1  riastrad  *	interrupted, negative error code on any other error, zero on
    983   1.1  riastrad  *	timeout, or positive number of ticks remaining if the fence is
    984   1.1  riastrad  *	signalled before the timeout.  Works by calling the fence wait
    985   1.1  riastrad  *	callback.
    986   1.1  riastrad  *
    987  1.28  riastrad  *	The timeout must be nonnegative and at most
    988  1.28  riastrad  *	MAX_SCHEDULE_TIMEOUT, which means wait indefinitely.
    989   1.1  riastrad  */
    990   1.1  riastrad long
    991   1.2  riastrad dma_fence_wait_timeout(struct dma_fence *fence, bool intr, long timeout)
    992   1.1  riastrad {
    993  1.36  riastrad 	long ret;
    994   1.1  riastrad 
    995   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
    996  1.27  riastrad 	KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
    997  1.28  riastrad 	KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
    998   1.1  riastrad 
    999  1.36  riastrad 	SDT_PROBE3(sdt, drm, fence, wait_start,  fence, intr, timeout);
   1000  1.14  riastrad 	if (fence->ops->wait)
   1001  1.36  riastrad 		ret = (*fence->ops->wait)(fence, intr, timeout);
   1002  1.14  riastrad 	else
   1003  1.36  riastrad 		ret = dma_fence_default_wait(fence, intr, timeout);
   1004  1.36  riastrad 	SDT_PROBE2(sdt, drm, fence, wait_done,  fence, ret);
   1005  1.36  riastrad 
   1006  1.36  riastrad 	return ret;
   1007   1.1  riastrad }
   1008   1.1  riastrad 
   1009   1.1  riastrad /*
   1010   1.2  riastrad  * dma_fence_wait(fence, intr)
   1011   1.1  riastrad  *
   1012   1.1  riastrad  *	Wait until fence is signalled; or until interrupt, if intr is
   1013   1.1  riastrad  *	true.  Return -ERESTARTSYS if interrupted, negative error code
   1014   1.1  riastrad  *	on any other error, zero on sucess.  Works by calling the fence
   1015   1.1  riastrad  *	wait callback with MAX_SCHEDULE_TIMEOUT.
   1016   1.1  riastrad  */
   1017   1.1  riastrad long
   1018   1.2  riastrad dma_fence_wait(struct dma_fence *fence, bool intr)
   1019   1.1  riastrad {
   1020   1.1  riastrad 	long ret;
   1021   1.1  riastrad 
   1022   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
   1023   1.1  riastrad 
   1024  1.37  riastrad 	ret = dma_fence_wait_timeout(fence, intr, MAX_SCHEDULE_TIMEOUT);
   1025   1.1  riastrad 	KASSERT(ret != 0);
   1026  1.33  riastrad 	KASSERTMSG(ret == -ERESTARTSYS || ret == MAX_SCHEDULE_TIMEOUT,
   1027  1.33  riastrad 	    "ret=%ld", ret);
   1028   1.1  riastrad 
   1029   1.1  riastrad 	return (ret < 0 ? ret : 0);
   1030   1.1  riastrad }
   1031   1.1  riastrad 
   1032   1.1  riastrad /*
   1033   1.2  riastrad  * dma_fence_default_wait(fence, intr, timeout)
   1034   1.1  riastrad  *
   1035   1.1  riastrad  *	Default implementation of fence wait callback using a condition
   1036   1.1  riastrad  *	variable.  If the fence is already signalled, return timeout,
   1037  1.16  riastrad  *	or 1 if timeout is zero meaning poll.  If the enable signalling
   1038  1.16  riastrad  *	callback hasn't been called, call it, and if it fails, act as
   1039  1.16  riastrad  *	if the fence had been signalled.  Otherwise, wait on the
   1040  1.16  riastrad  *	internal condvar.  If timeout is MAX_SCHEDULE_TIMEOUT, wait
   1041  1.16  riastrad  *	indefinitely.
   1042   1.1  riastrad  */
   1043   1.1  riastrad long
   1044   1.2  riastrad dma_fence_default_wait(struct dma_fence *fence, bool intr, long timeout)
   1045   1.1  riastrad {
   1046   1.1  riastrad 	int starttime = 0, now = 0, deadline = 0; /* XXXGCC */
   1047   1.1  riastrad 	kmutex_t *lock = &fence->lock->sl_lock;
   1048   1.1  riastrad 	long ret = 0;
   1049   1.1  riastrad 
   1050   1.2  riastrad 	KASSERT(dma_fence_referenced_p(fence));
   1051   1.1  riastrad 	KASSERTMSG(timeout >= 0, "timeout %ld", timeout);
   1052   1.1  riastrad 	KASSERTMSG(timeout <= MAX_SCHEDULE_TIMEOUT, "timeout %ld", timeout);
   1053   1.1  riastrad 
   1054   1.1  riastrad 	/* Optimistically try to skip the lock if it's already signalled.  */
   1055  1.34  riastrad 	if (atomic_load_relaxed(&fence->flags) &
   1056  1.34  riastrad 	    (1u << DMA_FENCE_FLAG_SIGNALED_BIT))
   1057  1.32  riastrad 		return MAX(1, timeout);
   1058   1.1  riastrad 
   1059   1.1  riastrad 	/* Acquire the lock.  */
   1060   1.1  riastrad 	spin_lock(fence->lock);
   1061   1.1  riastrad 
   1062  1.16  riastrad 	/* Ensure signalling is enabled, or stop if already completed.  */
   1063  1.17  riastrad 	if (dma_fence_ensure_signal_enabled(fence) != 0) {
   1064  1.32  riastrad 		ret = MAX(1, timeout);
   1065  1.32  riastrad 		goto out;
   1066  1.17  riastrad 	}
   1067  1.16  riastrad 
   1068  1.16  riastrad 	/* If merely polling, stop here.  */
   1069  1.16  riastrad 	if (timeout == 0) {
   1070  1.32  riastrad 		ret = 0;
   1071  1.32  riastrad 		goto out;
   1072  1.16  riastrad 	}
   1073   1.1  riastrad 
   1074   1.1  riastrad 	/* Find out what our deadline is so we can handle spurious wakeup.  */
   1075   1.1  riastrad 	if (timeout < MAX_SCHEDULE_TIMEOUT) {
   1076   1.1  riastrad 		now = getticks();
   1077   1.1  riastrad 		starttime = now;
   1078   1.1  riastrad 		deadline = starttime + timeout;
   1079   1.1  riastrad 	}
   1080   1.1  riastrad 
   1081   1.1  riastrad 	/* Wait until the signalled bit is set.  */
   1082   1.2  riastrad 	while (!(fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT))) {
   1083   1.1  riastrad 		/*
   1084   1.1  riastrad 		 * If there's a timeout and we've passed the deadline,
   1085   1.1  riastrad 		 * give up.
   1086   1.1  riastrad 		 */
   1087   1.1  riastrad 		if (timeout < MAX_SCHEDULE_TIMEOUT) {
   1088   1.1  riastrad 			now = getticks();
   1089  1.32  riastrad 			if (deadline <= now) {
   1090  1.32  riastrad 				ret = -EWOULDBLOCK;
   1091   1.1  riastrad 				break;
   1092  1.32  riastrad 			}
   1093   1.1  riastrad 		}
   1094  1.32  riastrad 
   1095  1.32  riastrad 		/* Wait for the time remaining.  */
   1096   1.1  riastrad 		if (intr) {
   1097   1.1  riastrad 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
   1098   1.1  riastrad 				ret = -cv_timedwait_sig(&fence->f_cv, lock,
   1099   1.1  riastrad 				    deadline - now);
   1100   1.1  riastrad 			} else {
   1101   1.1  riastrad 				ret = -cv_wait_sig(&fence->f_cv, lock);
   1102   1.1  riastrad 			}
   1103   1.1  riastrad 		} else {
   1104   1.1  riastrad 			if (timeout < MAX_SCHEDULE_TIMEOUT) {
   1105   1.1  riastrad 				ret = -cv_timedwait(&fence->f_cv, lock,
   1106   1.1  riastrad 				    deadline - now);
   1107   1.1  riastrad 			} else {
   1108   1.1  riastrad 				cv_wait(&fence->f_cv, lock);
   1109   1.1  riastrad 				ret = 0;
   1110   1.1  riastrad 			}
   1111   1.1  riastrad 		}
   1112  1.32  riastrad 
   1113   1.1  riastrad 		/* If the wait failed, give up.  */
   1114  1.32  riastrad 		if (ret)
   1115   1.1  riastrad 			break;
   1116  1.32  riastrad 	}
   1117  1.32  riastrad 
   1118  1.32  riastrad 	/*
   1119  1.32  riastrad 	 * Massage the return code if nonzero:
   1120  1.32  riastrad 	 * - if we were interrupted, return -ERESTARTSYS;
   1121  1.32  riastrad 	 * - if we timed out, return 0.
   1122  1.32  riastrad 	 * No other failure is possible.  On success, ret=0 but we
   1123  1.32  riastrad 	 * check again below to verify anyway.
   1124  1.32  riastrad 	 */
   1125  1.32  riastrad 	if (ret) {
   1126  1.32  riastrad 		KASSERTMSG((ret == -EINTR || ret == -ERESTART ||
   1127  1.32  riastrad 			ret == -EWOULDBLOCK), "ret=%ld", ret);
   1128  1.32  riastrad 		if (ret == -EINTR || ret == -ERESTART) {
   1129  1.32  riastrad 			ret = -ERESTARTSYS;
   1130  1.32  riastrad 		} else if (ret == -EWOULDBLOCK) {
   1131  1.32  riastrad 			KASSERT(timeout < MAX_SCHEDULE_TIMEOUT);
   1132  1.32  riastrad 			ret = 0;	/* timed out */
   1133   1.1  riastrad 		}
   1134   1.1  riastrad 	}
   1135   1.1  riastrad 
   1136  1.32  riastrad 	KASSERT(ret != -ERESTART); /* would be confused with time left */
   1137   1.1  riastrad 
   1138  1.32  riastrad 	/* Check again in case it was signalled after a wait.  */
   1139  1.32  riastrad 	if (fence->flags & (1u << DMA_FENCE_FLAG_SIGNALED_BIT)) {
   1140  1.32  riastrad 		if (timeout < MAX_SCHEDULE_TIMEOUT)
   1141  1.32  riastrad 			ret = MAX(1, deadline - now);
   1142  1.32  riastrad 		else
   1143  1.32  riastrad 			ret = MAX_SCHEDULE_TIMEOUT;
   1144   1.1  riastrad 	}
   1145   1.1  riastrad 
   1146  1.32  riastrad out:	/* All done.  Release the lock.  */
   1147  1.32  riastrad 	spin_unlock(fence->lock);
   1148  1.32  riastrad 	return ret;
   1149   1.1  riastrad }
   1150  1.12  riastrad 
   1151  1.12  riastrad /*
   1152  1.12  riastrad  * __dma_fence_signal(fence)
   1153  1.12  riastrad  *
   1154  1.12  riastrad  *	Set fence's signalled bit, without waking waiters yet.  Return
   1155  1.12  riastrad  *	true if it was newly set, false if it was already set.
   1156  1.12  riastrad  */
   1157  1.12  riastrad bool
   1158  1.12  riastrad __dma_fence_signal(struct dma_fence *fence)
   1159  1.12  riastrad {
   1160  1.12  riastrad 
   1161  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
   1162  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
   1163  1.24  riastrad 
   1164  1.12  riastrad 	if (test_and_set_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
   1165  1.12  riastrad 		return false;
   1166  1.12  riastrad 
   1167  1.12  riastrad 	return true;
   1168  1.12  riastrad }
   1169  1.12  riastrad 
   1170  1.12  riastrad /*
   1171  1.12  riastrad  * __dma_fence_signal_wake(fence)
   1172  1.12  riastrad  *
   1173  1.25  riastrad  *	Set fence's timestamp and wake fence's waiters.  Caller must
   1174  1.25  riastrad  *	have previously called __dma_fence_signal and it must have
   1175  1.25  riastrad  *	previously returned true.
   1176  1.12  riastrad  */
   1177  1.12  riastrad void
   1178  1.12  riastrad __dma_fence_signal_wake(struct dma_fence *fence, ktime_t timestamp)
   1179  1.12  riastrad {
   1180  1.12  riastrad 	struct dma_fence_cb *fcb, *next;
   1181  1.12  riastrad 
   1182  1.24  riastrad 	KASSERTMSG(fence->f_magic != FENCE_MAGIC_BAD, "fence %p", fence);
   1183  1.24  riastrad 	KASSERTMSG(fence->f_magic == FENCE_MAGIC_GOOD, "fence %p", fence);
   1184  1.24  riastrad 
   1185  1.12  riastrad 	spin_lock(fence->lock);
   1186  1.12  riastrad 
   1187  1.12  riastrad 	KASSERT(fence->flags & DMA_FENCE_FLAG_SIGNALED_BIT);
   1188  1.12  riastrad 
   1189  1.36  riastrad 	SDT_PROBE1(sdt, drm, fence, signal,  fence);
   1190  1.36  riastrad 
   1191  1.25  riastrad 	/* Set the timestamp.  */
   1192  1.25  riastrad 	fence->timestamp = timestamp;
   1193  1.25  riastrad 	set_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags);
   1194  1.25  riastrad 
   1195  1.12  riastrad 	/* Wake waiters.  */
   1196  1.12  riastrad 	cv_broadcast(&fence->f_cv);
   1197  1.12  riastrad 
   1198  1.12  riastrad 	/* Remove and call the callbacks.  */
   1199  1.12  riastrad 	TAILQ_FOREACH_SAFE(fcb, &fence->f_callbacks, fcb_entry, next) {
   1200  1.12  riastrad 		TAILQ_REMOVE(&fence->f_callbacks, fcb, fcb_entry);
   1201  1.12  riastrad 		fcb->fcb_onqueue = false;
   1202  1.12  riastrad 		(*fcb->func)(fence, fcb);
   1203  1.12  riastrad 	}
   1204  1.12  riastrad 
   1205  1.12  riastrad 	spin_unlock(fence->lock);
   1206  1.12  riastrad }
   1207