Home | History | Annotate | Line # | Download | only in linux
linux_dma_resv.c revision 1.3
      1  1.3  riastrad /*	$NetBSD: linux_dma_resv.c,v 1.3 2021/12/19 10:37:47 riastradh Exp $	*/
      2  1.1  riastrad 
      3  1.1  riastrad /*-
      4  1.1  riastrad  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  1.1  riastrad  * All rights reserved.
      6  1.1  riastrad  *
      7  1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8  1.1  riastrad  * by Taylor R. Campbell.
      9  1.1  riastrad  *
     10  1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11  1.1  riastrad  * modification, are permitted provided that the following conditions
     12  1.1  riastrad  * are met:
     13  1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14  1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15  1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16  1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17  1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18  1.1  riastrad  *
     19  1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30  1.1  riastrad  */
     31  1.1  riastrad 
     32  1.1  riastrad #include <sys/cdefs.h>
     33  1.3  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.3 2021/12/19 10:37:47 riastradh Exp $");
     34  1.1  riastrad 
     35  1.1  riastrad #include <sys/param.h>
     36  1.1  riastrad #include <sys/poll.h>
     37  1.1  riastrad #include <sys/select.h>
     38  1.1  riastrad 
     39  1.1  riastrad #include <linux/dma-fence.h>
     40  1.1  riastrad #include <linux/dma-resv.h>
     41  1.1  riastrad #include <linux/seqlock.h>
     42  1.1  riastrad #include <linux/ww_mutex.h>
     43  1.1  riastrad 
     44  1.1  riastrad DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
     45  1.1  riastrad 
     46  1.1  riastrad static struct dma_resv_list *
     47  1.1  riastrad objlist_tryalloc(uint32_t n)
     48  1.1  riastrad {
     49  1.1  riastrad 	struct dma_resv_list *list;
     50  1.1  riastrad 
     51  1.1  riastrad 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
     52  1.1  riastrad 	if (list == NULL)
     53  1.1  riastrad 		return NULL;
     54  1.1  riastrad 	list->shared_max = n;
     55  1.1  riastrad 
     56  1.1  riastrad 	return list;
     57  1.1  riastrad }
     58  1.1  riastrad 
     59  1.1  riastrad static void
     60  1.1  riastrad objlist_free(struct dma_resv_list *list)
     61  1.1  riastrad {
     62  1.1  riastrad 	uint32_t n = list->shared_max;
     63  1.1  riastrad 
     64  1.1  riastrad 	kmem_free(list, offsetof(typeof(*list), shared[n]));
     65  1.1  riastrad }
     66  1.1  riastrad 
     67  1.1  riastrad static void
     68  1.1  riastrad objlist_free_cb(struct rcu_head *rcu)
     69  1.1  riastrad {
     70  1.1  riastrad 	struct dma_resv_list *list = container_of(rcu,
     71  1.1  riastrad 	    struct dma_resv_list, rol_rcu);
     72  1.1  riastrad 
     73  1.1  riastrad 	objlist_free(list);
     74  1.1  riastrad }
     75  1.1  riastrad 
     76  1.1  riastrad static void
     77  1.1  riastrad objlist_defer_free(struct dma_resv_list *list)
     78  1.1  riastrad {
     79  1.1  riastrad 
     80  1.1  riastrad 	call_rcu(&list->rol_rcu, objlist_free_cb);
     81  1.1  riastrad }
     82  1.1  riastrad 
     83  1.1  riastrad /*
     84  1.1  riastrad  * dma_resv_init(robj)
     85  1.1  riastrad  *
     86  1.1  riastrad  *	Initialize a reservation object.  Caller must later destroy it
     87  1.1  riastrad  *	with dma_resv_fini.
     88  1.1  riastrad  */
     89  1.1  riastrad void
     90  1.1  riastrad dma_resv_init(struct dma_resv *robj)
     91  1.1  riastrad {
     92  1.1  riastrad 
     93  1.1  riastrad 	ww_mutex_init(&robj->lock, &reservation_ww_class);
     94  1.1  riastrad 	seqcount_init(&robj->seq);
     95  1.1  riastrad 	robj->fence_excl = NULL;
     96  1.1  riastrad 	robj->fence = NULL;
     97  1.1  riastrad 	robj->robj_prealloc = NULL;
     98  1.1  riastrad }
     99  1.1  riastrad 
    100  1.1  riastrad /*
    101  1.1  riastrad  * dma_resv_fini(robj)
    102  1.1  riastrad  *
    103  1.1  riastrad  *	Destroy a reservation object, freeing any memory that had been
    104  1.1  riastrad  *	allocated for it.  Caller must have exclusive access to it.
    105  1.1  riastrad  */
    106  1.1  riastrad void
    107  1.1  riastrad dma_resv_fini(struct dma_resv *robj)
    108  1.1  riastrad {
    109  1.1  riastrad 	unsigned i;
    110  1.1  riastrad 
    111  1.1  riastrad 	if (robj->robj_prealloc)
    112  1.1  riastrad 		objlist_free(robj->robj_prealloc);
    113  1.1  riastrad 	if (robj->fence) {
    114  1.1  riastrad 		for (i = 0; i < robj->fence->shared_count; i++)
    115  1.1  riastrad 			dma_fence_put(robj->fence->shared[i]);
    116  1.1  riastrad 		objlist_free(robj->fence);
    117  1.1  riastrad 	}
    118  1.1  riastrad 	if (robj->fence_excl)
    119  1.1  riastrad 		dma_fence_put(robj->fence_excl);
    120  1.1  riastrad 	ww_mutex_destroy(&robj->lock);
    121  1.1  riastrad }
    122  1.1  riastrad 
    123  1.1  riastrad /*
    124  1.1  riastrad  * dma_resv_lock(robj, ctx)
    125  1.1  riastrad  *
    126  1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    127  1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    128  1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    129  1.1  riastrad  *	retry.
    130  1.1  riastrad  */
    131  1.1  riastrad int
    132  1.1  riastrad dma_resv_lock(struct dma_resv *robj,
    133  1.1  riastrad     struct ww_acquire_ctx *ctx)
    134  1.1  riastrad {
    135  1.1  riastrad 
    136  1.1  riastrad 	return ww_mutex_lock(&robj->lock, ctx);
    137  1.1  riastrad }
    138  1.1  riastrad 
    139  1.1  riastrad /*
    140  1.2  riastrad  * dma_resv_lock_slow(robj, ctx)
    141  1.2  riastrad  *
    142  1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    143  1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    144  1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    145  1.2  riastrad  *	and the caller has backed out all other locks.
    146  1.2  riastrad  */
    147  1.2  riastrad void
    148  1.2  riastrad dma_resv_lock_slow(struct dma_resv *robj,
    149  1.2  riastrad     struct ww_acquire_ctx *ctx)
    150  1.2  riastrad {
    151  1.2  riastrad 
    152  1.2  riastrad 	ww_mutex_lock_slow(&robj->lock, ctx);
    153  1.2  riastrad }
    154  1.2  riastrad 
    155  1.2  riastrad /*
    156  1.1  riastrad  * dma_resv_lock_interruptible(robj, ctx)
    157  1.1  riastrad  *
    158  1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    159  1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    160  1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    161  1.1  riastrad  *	retry, -ERESTART/-EINTR if interrupted.
    162  1.1  riastrad  */
    163  1.1  riastrad int
    164  1.1  riastrad dma_resv_lock_interruptible(struct dma_resv *robj,
    165  1.1  riastrad     struct ww_acquire_ctx *ctx)
    166  1.1  riastrad {
    167  1.1  riastrad 
    168  1.1  riastrad 	return ww_mutex_lock_interruptible(&robj->lock, ctx);
    169  1.1  riastrad }
    170  1.1  riastrad 
    171  1.1  riastrad /*
    172  1.2  riastrad  * dma_resv_lock_slow_interruptible(robj, ctx)
    173  1.2  riastrad  *
    174  1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    175  1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    176  1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    177  1.2  riastrad  *	and the caller has backed out all other locks.  Return 0 on
    178  1.2  riastrad  *	success, -ERESTART/-EINTR if interrupted.
    179  1.2  riastrad  */
    180  1.2  riastrad int
    181  1.2  riastrad dma_resv_lock_slow_interruptible(struct dma_resv *robj,
    182  1.2  riastrad     struct ww_acquire_ctx *ctx)
    183  1.2  riastrad {
    184  1.2  riastrad 
    185  1.2  riastrad 	return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
    186  1.2  riastrad }
    187  1.2  riastrad 
    188  1.2  riastrad /*
    189  1.1  riastrad  * dma_resv_trylock(robj)
    190  1.1  riastrad  *
    191  1.1  riastrad  *	Try to acquire a reservation object's lock without blocking.
    192  1.1  riastrad  *	Return true on success, false on failure.
    193  1.1  riastrad  */
    194  1.1  riastrad bool
    195  1.1  riastrad dma_resv_trylock(struct dma_resv *robj)
    196  1.1  riastrad {
    197  1.1  riastrad 
    198  1.1  riastrad 	return ww_mutex_trylock(&robj->lock);
    199  1.1  riastrad }
    200  1.1  riastrad 
    201  1.1  riastrad /*
    202  1.1  riastrad  * dma_resv_unlock(robj)
    203  1.1  riastrad  *
    204  1.1  riastrad  *	Release a reservation object's lock.
    205  1.1  riastrad  */
    206  1.1  riastrad void
    207  1.1  riastrad dma_resv_unlock(struct dma_resv *robj)
    208  1.1  riastrad {
    209  1.1  riastrad 
    210  1.1  riastrad 	return ww_mutex_unlock(&robj->lock);
    211  1.1  riastrad }
    212  1.1  riastrad 
    213  1.1  riastrad /*
    214  1.1  riastrad  * dma_resv_held(robj)
    215  1.1  riastrad  *
    216  1.1  riastrad  *	True if robj is locked.
    217  1.1  riastrad  */
    218  1.1  riastrad bool
    219  1.1  riastrad dma_resv_held(struct dma_resv *robj)
    220  1.1  riastrad {
    221  1.1  riastrad 
    222  1.1  riastrad 	return ww_mutex_is_locked(&robj->lock);
    223  1.1  riastrad }
    224  1.1  riastrad 
    225  1.1  riastrad /*
    226  1.1  riastrad  * dma_resv_assert_held(robj)
    227  1.1  riastrad  *
    228  1.1  riastrad  *	Panic if robj is not held, in DIAGNOSTIC builds.
    229  1.1  riastrad  */
    230  1.1  riastrad void
    231  1.1  riastrad dma_resv_assert_held(struct dma_resv *robj)
    232  1.1  riastrad {
    233  1.1  riastrad 
    234  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    235  1.1  riastrad }
    236  1.1  riastrad 
    237  1.1  riastrad /*
    238  1.1  riastrad  * dma_resv_get_excl(robj)
    239  1.1  riastrad  *
    240  1.1  riastrad  *	Return a pointer to the exclusive fence of the reservation
    241  1.1  riastrad  *	object robj.
    242  1.1  riastrad  *
    243  1.1  riastrad  *	Caller must have robj locked.
    244  1.1  riastrad  */
    245  1.1  riastrad struct dma_fence *
    246  1.1  riastrad dma_resv_get_excl(struct dma_resv *robj)
    247  1.1  riastrad {
    248  1.1  riastrad 
    249  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    250  1.1  riastrad 	return robj->fence_excl;
    251  1.1  riastrad }
    252  1.1  riastrad 
    253  1.1  riastrad /*
    254  1.1  riastrad  * dma_resv_get_list(robj)
    255  1.1  riastrad  *
    256  1.1  riastrad  *	Return a pointer to the shared fence list of the reservation
    257  1.1  riastrad  *	object robj.
    258  1.1  riastrad  *
    259  1.1  riastrad  *	Caller must have robj locked.
    260  1.1  riastrad  */
    261  1.1  riastrad struct dma_resv_list *
    262  1.1  riastrad dma_resv_get_list(struct dma_resv *robj)
    263  1.1  riastrad {
    264  1.1  riastrad 
    265  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    266  1.1  riastrad 	return robj->fence;
    267  1.1  riastrad }
    268  1.1  riastrad 
    269  1.1  riastrad /*
    270  1.1  riastrad  * dma_resv_reserve_shared(robj)
    271  1.1  riastrad  *
    272  1.1  riastrad  *	Reserve space in robj to add a shared fence.  To be used only
    273  1.1  riastrad  *	once before calling dma_resv_add_shared_fence.
    274  1.1  riastrad  *
    275  1.1  riastrad  *	Caller must have robj locked.
    276  1.1  riastrad  *
    277  1.1  riastrad  *	Internally, we start with room for four entries and double if
    278  1.1  riastrad  *	we don't have enough.  This is not guaranteed.
    279  1.1  riastrad  */
    280  1.1  riastrad int
    281  1.3  riastrad dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
    282  1.1  riastrad {
    283  1.1  riastrad 	struct dma_resv_list *list, *prealloc;
    284  1.1  riastrad 	uint32_t n, nalloc;
    285  1.1  riastrad 
    286  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    287  1.3  riastrad 	KASSERT(num_fences == 1);
    288  1.1  riastrad 
    289  1.1  riastrad 	list = robj->fence;
    290  1.1  riastrad 	prealloc = robj->robj_prealloc;
    291  1.1  riastrad 
    292  1.1  riastrad 	/* If there's an existing list, check it for space.  */
    293  1.1  riastrad 	if (list) {
    294  1.1  riastrad 		/* If there's too many already, give up.  */
    295  1.1  riastrad 		if (list->shared_count == UINT32_MAX)
    296  1.1  riastrad 			return -ENOMEM;
    297  1.1  riastrad 
    298  1.1  riastrad 		/* Add one more. */
    299  1.1  riastrad 		n = list->shared_count + 1;
    300  1.1  riastrad 
    301  1.1  riastrad 		/* If there's enough for one more, we're done.  */
    302  1.1  riastrad 		if (n <= list->shared_max)
    303  1.1  riastrad 			return 0;
    304  1.1  riastrad 	} else {
    305  1.1  riastrad 		/* No list already.  We need space for 1.  */
    306  1.1  riastrad 		n = 1;
    307  1.1  riastrad 	}
    308  1.1  riastrad 
    309  1.1  riastrad 	/* If not, maybe there's a preallocated list ready.  */
    310  1.1  riastrad 	if (prealloc != NULL) {
    311  1.1  riastrad 		/* If there's enough room in it, stop here.  */
    312  1.1  riastrad 		if (n <= prealloc->shared_max)
    313  1.1  riastrad 			return 0;
    314  1.1  riastrad 
    315  1.1  riastrad 		/* Try to double its capacity.  */
    316  1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
    317  1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    318  1.1  riastrad 		if (prealloc == NULL)
    319  1.1  riastrad 			return -ENOMEM;
    320  1.1  riastrad 
    321  1.1  riastrad 		/* Swap the new preallocated list and free the old one.  */
    322  1.1  riastrad 		objlist_free(robj->robj_prealloc);
    323  1.1  riastrad 		robj->robj_prealloc = prealloc;
    324  1.1  riastrad 	} else {
    325  1.1  riastrad 		/* Start with some spare.  */
    326  1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
    327  1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    328  1.1  riastrad 		if (prealloc == NULL)
    329  1.1  riastrad 			return -ENOMEM;
    330  1.1  riastrad 		/* Save the new preallocated list.  */
    331  1.1  riastrad 		robj->robj_prealloc = prealloc;
    332  1.1  riastrad 	}
    333  1.1  riastrad 
    334  1.1  riastrad 	/* Success!  */
    335  1.1  riastrad 	return 0;
    336  1.1  riastrad }
    337  1.1  riastrad 
    338  1.1  riastrad struct dma_resv_write_ticket {
    339  1.1  riastrad };
    340  1.1  riastrad 
    341  1.1  riastrad /*
    342  1.1  riastrad  * dma_resv_write_begin(robj, ticket)
    343  1.1  riastrad  *
    344  1.1  riastrad  *	Begin an atomic batch of writes to robj, and initialize opaque
    345  1.1  riastrad  *	ticket for it.  The ticket must be passed to
    346  1.1  riastrad  *	dma_resv_write_commit to commit the writes.
    347  1.1  riastrad  *
    348  1.1  riastrad  *	Caller must have robj locked.
    349  1.1  riastrad  *
    350  1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    351  1.1  riastrad  *	NOT serve as an acquire operation, however.
    352  1.1  riastrad  */
    353  1.1  riastrad static void
    354  1.1  riastrad dma_resv_write_begin(struct dma_resv *robj,
    355  1.1  riastrad     struct dma_resv_write_ticket *ticket)
    356  1.1  riastrad {
    357  1.1  riastrad 
    358  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    359  1.1  riastrad 
    360  1.1  riastrad 	write_seqcount_begin(&robj->seq);
    361  1.1  riastrad }
    362  1.1  riastrad 
    363  1.1  riastrad /*
    364  1.1  riastrad  * dma_resv_write_commit(robj, ticket)
    365  1.1  riastrad  *
    366  1.1  riastrad  *	Commit an atomic batch of writes to robj begun with the call to
    367  1.1  riastrad  *	dma_resv_write_begin that returned ticket.
    368  1.1  riastrad  *
    369  1.1  riastrad  *	Caller must have robj locked.
    370  1.1  riastrad  *
    371  1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    372  1.1  riastrad  *	NOT serve as a release operation, however.
    373  1.1  riastrad  */
    374  1.1  riastrad static void
    375  1.1  riastrad dma_resv_write_commit(struct dma_resv *robj,
    376  1.1  riastrad     struct dma_resv_write_ticket *ticket)
    377  1.1  riastrad {
    378  1.1  riastrad 
    379  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    380  1.1  riastrad 
    381  1.1  riastrad 	write_seqcount_end(&robj->seq);
    382  1.1  riastrad }
    383  1.1  riastrad 
    384  1.1  riastrad struct dma_resv_read_ticket {
    385  1.1  riastrad 	unsigned version;
    386  1.1  riastrad };
    387  1.1  riastrad 
    388  1.1  riastrad /*
    389  1.1  riastrad  * dma_resv_read_begin(robj, ticket)
    390  1.1  riastrad  *
    391  1.1  riastrad  *	Begin a read section, and initialize opaque ticket for it.  The
    392  1.1  riastrad  *	ticket must be passed to dma_resv_read_exit, and the
    393  1.1  riastrad  *	caller must be prepared to retry reading if it fails.
    394  1.1  riastrad  */
    395  1.1  riastrad static void
    396  1.1  riastrad dma_resv_read_begin(const struct dma_resv *robj,
    397  1.1  riastrad     struct dma_resv_read_ticket *ticket)
    398  1.1  riastrad {
    399  1.1  riastrad 
    400  1.1  riastrad 	ticket->version = read_seqcount_begin(&robj->seq);
    401  1.1  riastrad }
    402  1.1  riastrad 
    403  1.1  riastrad /*
    404  1.1  riastrad  * dma_resv_read_valid(robj, ticket)
    405  1.1  riastrad  *
    406  1.1  riastrad  *	Test whether the read sections are valid.  Return true on
    407  1.1  riastrad  *	success, or false on failure if the read ticket has been
    408  1.1  riastrad  *	invalidated.
    409  1.1  riastrad  */
    410  1.1  riastrad static bool
    411  1.1  riastrad dma_resv_read_valid(const struct dma_resv *robj,
    412  1.1  riastrad     struct dma_resv_read_ticket *ticket)
    413  1.1  riastrad {
    414  1.1  riastrad 
    415  1.1  riastrad 	return !read_seqcount_retry(&robj->seq, ticket->version);
    416  1.1  riastrad }
    417  1.1  riastrad 
    418  1.1  riastrad /*
    419  1.1  riastrad  * dma_resv_add_excl_fence(robj, fence)
    420  1.1  riastrad  *
    421  1.1  riastrad  *	Empty and release all of robj's shared fences, and clear and
    422  1.1  riastrad  *	release its exclusive fence.  If fence is nonnull, acquire a
    423  1.1  riastrad  *	reference to it and save it as robj's exclusive fence.
    424  1.1  riastrad  *
    425  1.1  riastrad  *	Caller must have robj locked.
    426  1.1  riastrad  */
    427  1.1  riastrad void
    428  1.1  riastrad dma_resv_add_excl_fence(struct dma_resv *robj,
    429  1.1  riastrad     struct dma_fence *fence)
    430  1.1  riastrad {
    431  1.1  riastrad 	struct dma_fence *old_fence = robj->fence_excl;
    432  1.1  riastrad 	struct dma_resv_list *old_list = robj->fence;
    433  1.1  riastrad 	uint32_t old_shared_count;
    434  1.1  riastrad 	struct dma_resv_write_ticket ticket;
    435  1.1  riastrad 
    436  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    437  1.1  riastrad 
    438  1.1  riastrad 	/*
    439  1.1  riastrad 	 * If we are setting rather than just removing a fence, acquire
    440  1.1  riastrad 	 * a reference for ourselves.
    441  1.1  riastrad 	 */
    442  1.1  riastrad 	if (fence)
    443  1.1  riastrad 		(void)dma_fence_get(fence);
    444  1.1  riastrad 
    445  1.1  riastrad 	/* If there are any shared fences, remember how many.  */
    446  1.1  riastrad 	if (old_list)
    447  1.1  riastrad 		old_shared_count = old_list->shared_count;
    448  1.1  riastrad 
    449  1.1  riastrad 	/* Begin an update.  */
    450  1.1  riastrad 	dma_resv_write_begin(robj, &ticket);
    451  1.1  riastrad 
    452  1.1  riastrad 	/* Replace the fence and zero the shared count.  */
    453  1.1  riastrad 	robj->fence_excl = fence;
    454  1.1  riastrad 	if (old_list)
    455  1.1  riastrad 		old_list->shared_count = 0;
    456  1.1  riastrad 
    457  1.1  riastrad 	/* Commit the update.  */
    458  1.1  riastrad 	dma_resv_write_commit(robj, &ticket);
    459  1.1  riastrad 
    460  1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    461  1.1  riastrad 	if (old_fence)
    462  1.1  riastrad 		dma_fence_put(old_fence);
    463  1.1  riastrad 
    464  1.1  riastrad 	/* Release any old shared fences.  */
    465  1.1  riastrad 	if (old_list) {
    466  1.1  riastrad 		while (old_shared_count--)
    467  1.1  riastrad 			dma_fence_put(old_list->shared[old_shared_count]);
    468  1.1  riastrad 	}
    469  1.1  riastrad }
    470  1.1  riastrad 
    471  1.1  riastrad /*
    472  1.1  riastrad  * dma_resv_add_shared_fence(robj, fence)
    473  1.1  riastrad  *
    474  1.1  riastrad  *	Acquire a reference to fence and add it to robj's shared list.
    475  1.1  riastrad  *	If any fence was already added with the same context number,
    476  1.1  riastrad  *	release it and replace it by this one.
    477  1.1  riastrad  *
    478  1.1  riastrad  *	Caller must have robj locked, and must have preceded with a
    479  1.1  riastrad  *	call to dma_resv_reserve_shared for each shared fence
    480  1.1  riastrad  *	added.
    481  1.1  riastrad  */
    482  1.1  riastrad void
    483  1.1  riastrad dma_resv_add_shared_fence(struct dma_resv *robj,
    484  1.1  riastrad     struct dma_fence *fence)
    485  1.1  riastrad {
    486  1.1  riastrad 	struct dma_resv_list *list = robj->fence;
    487  1.1  riastrad 	struct dma_resv_list *prealloc = robj->robj_prealloc;
    488  1.1  riastrad 	struct dma_resv_write_ticket ticket;
    489  1.1  riastrad 	struct dma_fence *replace = NULL;
    490  1.1  riastrad 	uint32_t i;
    491  1.1  riastrad 
    492  1.1  riastrad 	KASSERT(dma_resv_held(robj));
    493  1.1  riastrad 
    494  1.1  riastrad 	/* Acquire a reference to the fence.  */
    495  1.1  riastrad 	KASSERT(fence != NULL);
    496  1.1  riastrad 	(void)dma_fence_get(fence);
    497  1.1  riastrad 
    498  1.1  riastrad 	/* Check for a preallocated replacement list.  */
    499  1.1  riastrad 	if (prealloc == NULL) {
    500  1.1  riastrad 		/*
    501  1.1  riastrad 		 * If there is no preallocated replacement list, then
    502  1.1  riastrad 		 * there must be room in the current list.
    503  1.1  riastrad 		 */
    504  1.1  riastrad 		KASSERT(list != NULL);
    505  1.1  riastrad 		KASSERT(list->shared_count < list->shared_max);
    506  1.1  riastrad 
    507  1.1  riastrad 		/* Begin an update.  Implies membar_producer for fence.  */
    508  1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    509  1.1  riastrad 
    510  1.1  riastrad 		/* Find a fence with the same context number.  */
    511  1.1  riastrad 		for (i = 0; i < list->shared_count; i++) {
    512  1.1  riastrad 			if (list->shared[i]->context == fence->context) {
    513  1.1  riastrad 				replace = list->shared[i];
    514  1.1  riastrad 				list->shared[i] = fence;
    515  1.1  riastrad 				break;
    516  1.1  riastrad 			}
    517  1.1  riastrad 		}
    518  1.1  riastrad 
    519  1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    520  1.1  riastrad 		if (i == list->shared_count)
    521  1.1  riastrad 			list->shared[list->shared_count++] = fence;
    522  1.1  riastrad 
    523  1.1  riastrad 		/* Commit the update.  */
    524  1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    525  1.1  riastrad 	} else {
    526  1.1  riastrad 		/*
    527  1.1  riastrad 		 * There is a preallocated replacement list.  There may
    528  1.1  riastrad 		 * not be a current list.  If not, treat it as a zero-
    529  1.1  riastrad 		 * length list.
    530  1.1  riastrad 		 */
    531  1.1  riastrad 		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
    532  1.1  riastrad 
    533  1.1  riastrad 		/* There had better be room in the preallocated list.  */
    534  1.1  riastrad 		KASSERT(shared_count < prealloc->shared_max);
    535  1.1  riastrad 
    536  1.1  riastrad 		/*
    537  1.1  riastrad 		 * Copy the fences over, but replace if we find one
    538  1.1  riastrad 		 * with the same context number.
    539  1.1  riastrad 		 */
    540  1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    541  1.1  riastrad 			if (replace == NULL &&
    542  1.1  riastrad 			    list->shared[i]->context == fence->context) {
    543  1.1  riastrad 				replace = list->shared[i];
    544  1.1  riastrad 				prealloc->shared[i] = fence;
    545  1.1  riastrad 			} else {
    546  1.1  riastrad 				prealloc->shared[i] = list->shared[i];
    547  1.1  riastrad 			}
    548  1.1  riastrad 		}
    549  1.1  riastrad 		prealloc->shared_count = shared_count;
    550  1.1  riastrad 
    551  1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    552  1.1  riastrad 		if (replace == NULL)
    553  1.1  riastrad 			prealloc->shared[prealloc->shared_count++] = fence;
    554  1.1  riastrad 
    555  1.1  riastrad 		/*
    556  1.1  riastrad 		 * Now ready to replace the list.  Begin an update.
    557  1.1  riastrad 		 * Implies membar_producer for fence and prealloc.
    558  1.1  riastrad 		 */
    559  1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    560  1.1  riastrad 
    561  1.1  riastrad 		/* Replace the list.  */
    562  1.1  riastrad 		robj->fence = prealloc;
    563  1.1  riastrad 		robj->robj_prealloc = NULL;
    564  1.1  riastrad 
    565  1.1  riastrad 		/* Commit the update.  */
    566  1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    567  1.1  riastrad 
    568  1.1  riastrad 		/*
    569  1.1  riastrad 		 * If there is an old list, free it when convenient.
    570  1.1  riastrad 		 * (We are not in a position at this point to sleep
    571  1.1  riastrad 		 * waiting for activity on all CPUs.)
    572  1.1  riastrad 		 */
    573  1.1  riastrad 		if (list)
    574  1.1  riastrad 			objlist_defer_free(list);
    575  1.1  riastrad 	}
    576  1.1  riastrad 
    577  1.1  riastrad 	/* Release a fence if we replaced it.  */
    578  1.1  riastrad 	if (replace)
    579  1.1  riastrad 		dma_fence_put(replace);
    580  1.1  riastrad }
    581  1.1  riastrad 
    582  1.1  riastrad /*
    583  1.1  riastrad  * dma_resv_get_excl_rcu(robj)
    584  1.1  riastrad  *
    585  1.1  riastrad  *	Note: Caller need not call this from an RCU read section.
    586  1.1  riastrad  */
    587  1.1  riastrad struct dma_fence *
    588  1.1  riastrad dma_resv_get_excl_rcu(const struct dma_resv *robj)
    589  1.1  riastrad {
    590  1.1  riastrad 	struct dma_fence *fence;
    591  1.1  riastrad 
    592  1.1  riastrad 	rcu_read_lock();
    593  1.1  riastrad 	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
    594  1.1  riastrad 	rcu_read_unlock();
    595  1.1  riastrad 
    596  1.1  riastrad 	return fence;
    597  1.1  riastrad }
    598  1.1  riastrad 
    599  1.1  riastrad /*
    600  1.1  riastrad  * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
    601  1.1  riastrad  */
    602  1.1  riastrad int
    603  1.1  riastrad dma_resv_get_fences_rcu(const struct dma_resv *robj,
    604  1.1  riastrad     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
    605  1.1  riastrad {
    606  1.1  riastrad 	const struct dma_resv_list *list;
    607  1.1  riastrad 	struct dma_fence *fence;
    608  1.1  riastrad 	struct dma_fence **shared = NULL;
    609  1.1  riastrad 	unsigned shared_alloc, shared_count, i;
    610  1.1  riastrad 	struct dma_resv_read_ticket ticket;
    611  1.1  riastrad 
    612  1.1  riastrad top:
    613  1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    614  1.1  riastrad 	rcu_read_lock();
    615  1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    616  1.1  riastrad 
    617  1.1  riastrad 	/* If there is a shared list, grab it.  */
    618  1.1  riastrad 	list = robj->fence;
    619  1.1  riastrad 	__insn_barrier();
    620  1.1  riastrad 	if (list) {
    621  1.1  riastrad 		/* Make sure the content of the list has been published.  */
    622  1.1  riastrad 		membar_datadep_consumer();
    623  1.1  riastrad 
    624  1.1  riastrad 		/* Check whether we have a buffer.  */
    625  1.1  riastrad 		if (shared == NULL) {
    626  1.1  riastrad 			/*
    627  1.1  riastrad 			 * We don't have a buffer yet.  Try to allocate
    628  1.1  riastrad 			 * one without waiting.
    629  1.1  riastrad 			 */
    630  1.1  riastrad 			shared_alloc = list->shared_max;
    631  1.1  riastrad 			__insn_barrier();
    632  1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    633  1.1  riastrad 			    GFP_NOWAIT);
    634  1.1  riastrad 			if (shared == NULL) {
    635  1.1  riastrad 				/*
    636  1.1  riastrad 				 * Couldn't do it immediately.  Back
    637  1.1  riastrad 				 * out of RCU and allocate one with
    638  1.1  riastrad 				 * waiting.
    639  1.1  riastrad 				 */
    640  1.1  riastrad 				rcu_read_unlock();
    641  1.1  riastrad 				shared = kcalloc(shared_alloc,
    642  1.1  riastrad 				    sizeof(shared[0]), GFP_KERNEL);
    643  1.1  riastrad 				if (shared == NULL)
    644  1.1  riastrad 					return -ENOMEM;
    645  1.1  riastrad 				goto top;
    646  1.1  riastrad 			}
    647  1.1  riastrad 		} else if (shared_alloc < list->shared_max) {
    648  1.1  riastrad 			/*
    649  1.1  riastrad 			 * We have a buffer but it's too small.  We're
    650  1.1  riastrad 			 * already racing in this case, so just back
    651  1.1  riastrad 			 * out and wait to allocate a bigger one.
    652  1.1  riastrad 			 */
    653  1.1  riastrad 			shared_alloc = list->shared_max;
    654  1.1  riastrad 			__insn_barrier();
    655  1.1  riastrad 			rcu_read_unlock();
    656  1.1  riastrad 			kfree(shared);
    657  1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    658  1.1  riastrad 			    GFP_KERNEL);
    659  1.1  riastrad 			if (shared == NULL)
    660  1.1  riastrad 				return -ENOMEM;
    661  1.1  riastrad 		}
    662  1.1  riastrad 
    663  1.1  riastrad 		/*
    664  1.1  riastrad 		 * We got a buffer large enough.  Copy into the buffer
    665  1.1  riastrad 		 * and record the number of elements.
    666  1.1  riastrad 		 */
    667  1.1  riastrad 		memcpy(shared, list->shared, shared_alloc * sizeof(shared[0]));
    668  1.1  riastrad 		shared_count = list->shared_count;
    669  1.1  riastrad 	} else {
    670  1.1  riastrad 		/* No shared list: shared count is zero.  */
    671  1.1  riastrad 		shared_count = 0;
    672  1.1  riastrad 	}
    673  1.1  riastrad 
    674  1.1  riastrad 	/* If there is an exclusive fence, grab it.  */
    675  1.1  riastrad 	fence = robj->fence_excl;
    676  1.1  riastrad 	__insn_barrier();
    677  1.1  riastrad 	if (fence) {
    678  1.1  riastrad 		/* Make sure the content of the fence has been published.  */
    679  1.1  riastrad 		membar_datadep_consumer();
    680  1.1  riastrad 	}
    681  1.1  riastrad 
    682  1.1  riastrad 	/*
    683  1.1  riastrad 	 * We are done reading from robj and list.  Validate our
    684  1.1  riastrad 	 * parking ticket.  If it's invalid, do not pass go and do not
    685  1.1  riastrad 	 * collect $200.
    686  1.1  riastrad 	 */
    687  1.1  riastrad 	if (!dma_resv_read_valid(robj, &ticket))
    688  1.1  riastrad 		goto restart;
    689  1.1  riastrad 
    690  1.1  riastrad 	/*
    691  1.1  riastrad 	 * Try to get a reference to the exclusive fence, if there is
    692  1.1  riastrad 	 * one.  If we can't, start over.
    693  1.1  riastrad 	 */
    694  1.1  riastrad 	if (fence) {
    695  1.1  riastrad 		if (dma_fence_get_rcu(fence) == NULL)
    696  1.1  riastrad 			goto restart;
    697  1.1  riastrad 	}
    698  1.1  riastrad 
    699  1.1  riastrad 	/*
    700  1.1  riastrad 	 * Try to get a reference to all of the shared fences.
    701  1.1  riastrad 	 */
    702  1.1  riastrad 	for (i = 0; i < shared_count; i++) {
    703  1.1  riastrad 		if (dma_fence_get_rcu(shared[i]) == NULL)
    704  1.1  riastrad 			goto put_restart;
    705  1.1  riastrad 	}
    706  1.1  riastrad 
    707  1.1  riastrad 	/* Success!  */
    708  1.1  riastrad 	rcu_read_unlock();
    709  1.1  riastrad 	*fencep = fence;
    710  1.1  riastrad 	*nsharedp = shared_count;
    711  1.1  riastrad 	*sharedp = shared;
    712  1.1  riastrad 	return 0;
    713  1.1  riastrad 
    714  1.1  riastrad put_restart:
    715  1.1  riastrad 	/* Back out.  */
    716  1.1  riastrad 	while (i --> 0) {
    717  1.1  riastrad 		dma_fence_put(shared[i]);
    718  1.1  riastrad 		shared[i] = NULL; /* paranoia */
    719  1.1  riastrad 	}
    720  1.1  riastrad 	if (fence) {
    721  1.1  riastrad 		dma_fence_put(fence);
    722  1.1  riastrad 		fence = NULL;	/* paranoia */
    723  1.1  riastrad 	}
    724  1.1  riastrad 
    725  1.1  riastrad restart:
    726  1.1  riastrad 	rcu_read_unlock();
    727  1.1  riastrad 	goto top;
    728  1.1  riastrad }
    729  1.1  riastrad 
    730  1.1  riastrad /*
    731  1.1  riastrad  * dma_resv_copy_fences(dst, src)
    732  1.1  riastrad  *
    733  1.1  riastrad  *	Copy the exclusive fence and all the shared fences from src to
    734  1.1  riastrad  *	dst.
    735  1.1  riastrad  *
    736  1.1  riastrad  *	Caller must have dst locked.
    737  1.1  riastrad  */
    738  1.1  riastrad int
    739  1.1  riastrad dma_resv_copy_fences(struct dma_resv *dst_robj,
    740  1.1  riastrad     const struct dma_resv *src_robj)
    741  1.1  riastrad {
    742  1.1  riastrad 	const struct dma_resv_list *src_list;
    743  1.1  riastrad 	struct dma_resv_list *dst_list = NULL;
    744  1.1  riastrad 	struct dma_resv_list *old_list;
    745  1.1  riastrad 	struct dma_fence *fence = NULL;
    746  1.1  riastrad 	struct dma_fence *old_fence;
    747  1.1  riastrad 	uint32_t shared_count, i;
    748  1.1  riastrad 	struct dma_resv_read_ticket read_ticket;
    749  1.1  riastrad 	struct dma_resv_write_ticket write_ticket;
    750  1.1  riastrad 
    751  1.1  riastrad 	KASSERT(dma_resv_held(dst_robj));
    752  1.1  riastrad 
    753  1.1  riastrad top:
    754  1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    755  1.1  riastrad 	rcu_read_lock();
    756  1.1  riastrad 	dma_resv_read_begin(src_robj, &read_ticket);
    757  1.1  riastrad 
    758  1.1  riastrad 	/* Get the shared list.  */
    759  1.1  riastrad 	src_list = src_robj->fence;
    760  1.1  riastrad 	__insn_barrier();
    761  1.1  riastrad 	if (src_list) {
    762  1.1  riastrad 		/* Make sure the content of the list has been published.  */
    763  1.1  riastrad 		membar_datadep_consumer();
    764  1.1  riastrad 
    765  1.1  riastrad 		/* Find out how long it is.  */
    766  1.1  riastrad 		shared_count = src_list->shared_count;
    767  1.1  riastrad 
    768  1.1  riastrad 		/*
    769  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
    770  1.1  riastrad 		 * pointer and length.
    771  1.1  riastrad 		 */
    772  1.1  riastrad 		if (!dma_resv_read_valid(src_robj, &read_ticket))
    773  1.1  riastrad 			goto restart;
    774  1.1  riastrad 
    775  1.1  riastrad 		/* Allocate a new list.  */
    776  1.1  riastrad 		dst_list = objlist_tryalloc(shared_count);
    777  1.1  riastrad 		if (dst_list == NULL)
    778  1.1  riastrad 			return -ENOMEM;
    779  1.1  riastrad 
    780  1.1  riastrad 		/* Copy over all fences that are not yet signalled.  */
    781  1.1  riastrad 		dst_list->shared_count = 0;
    782  1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    783  1.1  riastrad 			if ((fence = dma_fence_get_rcu(src_list->shared[i]))
    784  1.1  riastrad 			    != NULL)
    785  1.1  riastrad 				goto restart;
    786  1.1  riastrad 			if (dma_fence_is_signaled(fence)) {
    787  1.1  riastrad 				dma_fence_put(fence);
    788  1.1  riastrad 				fence = NULL;
    789  1.1  riastrad 				continue;
    790  1.1  riastrad 			}
    791  1.1  riastrad 			dst_list->shared[dst_list->shared_count++] = fence;
    792  1.1  riastrad 			fence = NULL;
    793  1.1  riastrad 		}
    794  1.1  riastrad 	}
    795  1.1  riastrad 
    796  1.1  riastrad 	/* Get the exclusive fence.  */
    797  1.1  riastrad 	fence = src_robj->fence_excl;
    798  1.1  riastrad 	__insn_barrier();
    799  1.1  riastrad 	if (fence != NULL) {
    800  1.1  riastrad 		/* Make sure the content of the fence has been published.  */
    801  1.1  riastrad 		membar_datadep_consumer();
    802  1.1  riastrad 
    803  1.1  riastrad 		/*
    804  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
    805  1.1  riastrad 		 *
    806  1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
    807  1.1  riastrad 		 * pointer writes are supposed to be atomic.
    808  1.1  riastrad 		 */
    809  1.1  riastrad 		if (!dma_resv_read_valid(src_robj, &read_ticket)) {
    810  1.1  riastrad 			fence = NULL;
    811  1.1  riastrad 			goto restart;
    812  1.1  riastrad 		}
    813  1.1  riastrad 
    814  1.1  riastrad 		/*
    815  1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
    816  1.1  riastrad 		 * reference to it.
    817  1.1  riastrad 		 */
    818  1.1  riastrad 		if (!dma_fence_get_rcu(fence)) {
    819  1.1  riastrad 			fence = NULL;
    820  1.1  riastrad 			goto restart;
    821  1.1  riastrad 		}
    822  1.1  riastrad 	}
    823  1.1  riastrad 
    824  1.1  riastrad 	/* All done with src; exit the RCU read section.  */
    825  1.1  riastrad 	rcu_read_unlock();
    826  1.1  riastrad 
    827  1.1  riastrad 	/*
    828  1.1  riastrad 	 * We now have a snapshot of the shared and exclusive fences of
    829  1.1  riastrad 	 * src_robj and we have acquired references to them so they
    830  1.1  riastrad 	 * won't go away.  Transfer them over to dst_robj, releasing
    831  1.1  riastrad 	 * references to any that were there.
    832  1.1  riastrad 	 */
    833  1.1  riastrad 
    834  1.1  riastrad 	/* Get the old shared and exclusive fences, if any.  */
    835  1.1  riastrad 	old_list = dst_robj->fence;
    836  1.1  riastrad 	old_fence = dst_robj->fence_excl;
    837  1.1  riastrad 
    838  1.1  riastrad 	/* Begin an update.  */
    839  1.1  riastrad 	dma_resv_write_begin(dst_robj, &write_ticket);
    840  1.1  riastrad 
    841  1.1  riastrad 	/* Replace the fences.  */
    842  1.1  riastrad 	dst_robj->fence = dst_list;
    843  1.1  riastrad 	dst_robj->fence_excl = fence;
    844  1.1  riastrad 
    845  1.1  riastrad 	/* Commit the update.  */
    846  1.1  riastrad 	dma_resv_write_commit(dst_robj, &write_ticket);
    847  1.1  riastrad 
    848  1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    849  1.1  riastrad 	if (old_fence)
    850  1.1  riastrad 		dma_fence_put(old_fence);
    851  1.1  riastrad 
    852  1.1  riastrad 	/* Release any old shared fences.  */
    853  1.1  riastrad 	if (old_list) {
    854  1.1  riastrad 		for (i = old_list->shared_count; i --> 0;)
    855  1.1  riastrad 			dma_fence_put(old_list->shared[i]);
    856  1.1  riastrad 	}
    857  1.1  riastrad 
    858  1.1  riastrad 	/* Success!  */
    859  1.1  riastrad 	return 0;
    860  1.1  riastrad 
    861  1.1  riastrad restart:
    862  1.1  riastrad 	rcu_read_unlock();
    863  1.1  riastrad 	if (dst_list) {
    864  1.1  riastrad 		for (i = dst_list->shared_count; i --> 0;) {
    865  1.1  riastrad 			dma_fence_put(dst_list->shared[i]);
    866  1.1  riastrad 			dst_list->shared[i] = NULL;
    867  1.1  riastrad 		}
    868  1.1  riastrad 		objlist_free(dst_list);
    869  1.1  riastrad 		dst_list = NULL;
    870  1.1  riastrad 	}
    871  1.1  riastrad 	if (fence) {
    872  1.1  riastrad 		dma_fence_put(fence);
    873  1.1  riastrad 		fence = NULL;
    874  1.1  riastrad 	}
    875  1.1  riastrad 	goto top;
    876  1.1  riastrad }
    877  1.1  riastrad 
    878  1.1  riastrad /*
    879  1.1  riastrad  * dma_resv_test_signaled_rcu(robj, shared)
    880  1.1  riastrad  *
    881  1.1  riastrad  *	If shared is true, test whether all of the shared fences are
    882  1.1  riastrad  *	signalled, or if there are none, test whether the exclusive
    883  1.1  riastrad  *	fence is signalled.  If shared is false, test only whether the
    884  1.1  riastrad  *	exclusive fence is signalled.
    885  1.1  riastrad  *
    886  1.1  riastrad  *	XXX Why does this _not_ test the exclusive fence if shared is
    887  1.1  riastrad  *	true only if there are no shared fences?  This makes no sense.
    888  1.1  riastrad  */
    889  1.1  riastrad bool
    890  1.1  riastrad dma_resv_test_signaled_rcu(const struct dma_resv *robj,
    891  1.1  riastrad     bool shared)
    892  1.1  riastrad {
    893  1.1  riastrad 	struct dma_resv_read_ticket ticket;
    894  1.1  riastrad 	struct dma_resv_list *list;
    895  1.1  riastrad 	struct dma_fence *fence;
    896  1.1  riastrad 	uint32_t i, shared_count;
    897  1.1  riastrad 	bool signaled = true;
    898  1.1  riastrad 
    899  1.1  riastrad top:
    900  1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    901  1.1  riastrad 	rcu_read_lock();
    902  1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    903  1.1  riastrad 
    904  1.1  riastrad 	/* If shared is requested and there is a shared list, test it.  */
    905  1.1  riastrad 	if (!shared)
    906  1.1  riastrad 		goto excl;
    907  1.1  riastrad 	list = robj->fence;
    908  1.1  riastrad 	__insn_barrier();
    909  1.1  riastrad 	if (list) {
    910  1.1  riastrad 		/* Make sure the content of the list has been published.  */
    911  1.1  riastrad 		membar_datadep_consumer();
    912  1.1  riastrad 
    913  1.1  riastrad 		/* Find out how long it is.  */
    914  1.1  riastrad 		shared_count = list->shared_count;
    915  1.1  riastrad 
    916  1.1  riastrad 		/*
    917  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
    918  1.1  riastrad 		 * pointer and length.
    919  1.1  riastrad 		 */
    920  1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
    921  1.1  riastrad 			goto restart;
    922  1.1  riastrad 
    923  1.1  riastrad 		/*
    924  1.1  riastrad 		 * For each fence, if it is going away, restart.
    925  1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
    926  1.1  riastrad 		 * it is signalled.  Stop if we find any that is not
    927  1.1  riastrad 		 * signalled.
    928  1.1  riastrad 		 */
    929  1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    930  1.1  riastrad 			fence = dma_fence_get_rcu(list->shared[i]);
    931  1.1  riastrad 			if (fence == NULL)
    932  1.1  riastrad 				goto restart;
    933  1.1  riastrad 			signaled &= dma_fence_is_signaled(fence);
    934  1.1  riastrad 			dma_fence_put(fence);
    935  1.1  riastrad 			if (!signaled)
    936  1.1  riastrad 				goto out;
    937  1.1  riastrad 		}
    938  1.1  riastrad 	}
    939  1.1  riastrad 
    940  1.1  riastrad excl:
    941  1.1  riastrad 	/* If there is an exclusive fence, test it.  */
    942  1.1  riastrad 	fence = robj->fence_excl;
    943  1.1  riastrad 	__insn_barrier();
    944  1.1  riastrad 	if (fence) {
    945  1.1  riastrad 		/* Make sure the content of the fence has been published.  */
    946  1.1  riastrad 		membar_datadep_consumer();
    947  1.1  riastrad 
    948  1.1  riastrad 		/*
    949  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
    950  1.1  riastrad 		 *
    951  1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
    952  1.1  riastrad 		 * pointer writes are supposed to be atomic.
    953  1.1  riastrad 		 */
    954  1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
    955  1.1  riastrad 			goto restart;
    956  1.1  riastrad 
    957  1.1  riastrad 		/*
    958  1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
    959  1.1  riastrad 		 * reference to it to test whether it is signalled.
    960  1.1  riastrad 		 */
    961  1.1  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
    962  1.1  riastrad 			goto restart;
    963  1.1  riastrad 		signaled &= dma_fence_is_signaled(fence);
    964  1.1  riastrad 		dma_fence_put(fence);
    965  1.1  riastrad 		if (!signaled)
    966  1.1  riastrad 			goto out;
    967  1.1  riastrad 	}
    968  1.1  riastrad 
    969  1.1  riastrad out:	rcu_read_unlock();
    970  1.1  riastrad 	return signaled;
    971  1.1  riastrad 
    972  1.1  riastrad restart:
    973  1.1  riastrad 	rcu_read_unlock();
    974  1.1  riastrad 	goto top;
    975  1.1  riastrad }
    976  1.1  riastrad 
    977  1.1  riastrad /*
    978  1.1  riastrad  * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
    979  1.1  riastrad  *
    980  1.1  riastrad  *	If shared is true, wait for all of the shared fences to be
    981  1.1  riastrad  *	signalled, or if there are none, wait for the exclusive fence
    982  1.1  riastrad  *	to be signalled.  If shared is false, wait only for the
    983  1.1  riastrad  *	exclusive fence to be signalled.  If timeout is zero, don't
    984  1.1  riastrad  *	wait, only test.
    985  1.1  riastrad  *
    986  1.1  riastrad  *	XXX Why does this _not_ wait for the exclusive fence if shared
    987  1.1  riastrad  *	is true only if there are no shared fences?  This makes no
    988  1.1  riastrad  *	sense.
    989  1.1  riastrad  */
    990  1.1  riastrad long
    991  1.1  riastrad dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
    992  1.1  riastrad     bool shared, bool intr, unsigned long timeout)
    993  1.1  riastrad {
    994  1.1  riastrad 	struct dma_resv_read_ticket ticket;
    995  1.1  riastrad 	struct dma_resv_list *list;
    996  1.1  riastrad 	struct dma_fence *fence;
    997  1.1  riastrad 	uint32_t i, shared_count;
    998  1.1  riastrad 	long ret;
    999  1.1  riastrad 
   1000  1.1  riastrad 	if (timeout == 0)
   1001  1.1  riastrad 		return dma_resv_test_signaled_rcu(robj, shared);
   1002  1.1  riastrad 
   1003  1.1  riastrad top:
   1004  1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1005  1.1  riastrad 	rcu_read_lock();
   1006  1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1007  1.1  riastrad 
   1008  1.1  riastrad 	/* If shared is requested and there is a shared list, wait on it.  */
   1009  1.1  riastrad 	if (!shared)
   1010  1.1  riastrad 		goto excl;
   1011  1.1  riastrad 	list = robj->fence;
   1012  1.1  riastrad 	__insn_barrier();
   1013  1.1  riastrad 	if (list) {
   1014  1.1  riastrad 		/* Make sure the content of the list has been published.  */
   1015  1.1  riastrad 		membar_datadep_consumer();
   1016  1.1  riastrad 
   1017  1.1  riastrad 		/* Find out how long it is.  */
   1018  1.1  riastrad 		shared_count = list->shared_count;
   1019  1.1  riastrad 
   1020  1.1  riastrad 		/*
   1021  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
   1022  1.1  riastrad 		 * pointer and length.
   1023  1.1  riastrad 		 */
   1024  1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1025  1.1  riastrad 			goto restart;
   1026  1.1  riastrad 
   1027  1.1  riastrad 		/*
   1028  1.1  riastrad 		 * For each fence, if it is going away, restart.
   1029  1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1030  1.1  riastrad 		 * it is signalled.  Stop and wait if we find any that
   1031  1.1  riastrad 		 * is not signalled.
   1032  1.1  riastrad 		 */
   1033  1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1034  1.1  riastrad 			fence = dma_fence_get_rcu(list->shared[i]);
   1035  1.1  riastrad 			if (fence == NULL)
   1036  1.1  riastrad 				goto restart;
   1037  1.1  riastrad 			if (!dma_fence_is_signaled(fence))
   1038  1.1  riastrad 				goto wait;
   1039  1.1  riastrad 			dma_fence_put(fence);
   1040  1.1  riastrad 		}
   1041  1.1  riastrad 	}
   1042  1.1  riastrad 
   1043  1.1  riastrad excl:
   1044  1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1045  1.1  riastrad 	fence = robj->fence_excl;
   1046  1.1  riastrad 	__insn_barrier();
   1047  1.1  riastrad 	if (fence) {
   1048  1.1  riastrad 		/* Make sure the content of the fence has been published.  */
   1049  1.1  riastrad 		membar_datadep_consumer();
   1050  1.1  riastrad 
   1051  1.1  riastrad 		/*
   1052  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
   1053  1.1  riastrad 		 *
   1054  1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
   1055  1.1  riastrad 		 * pointer writes are supposed to be atomic.
   1056  1.1  riastrad 		 */
   1057  1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1058  1.1  riastrad 			goto restart;
   1059  1.1  riastrad 
   1060  1.1  riastrad 		/*
   1061  1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
   1062  1.1  riastrad 		 * reference to it to test whether it is signalled.  If
   1063  1.1  riastrad 		 * not, wait for it.
   1064  1.1  riastrad 		 */
   1065  1.1  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1066  1.1  riastrad 			goto restart;
   1067  1.1  riastrad 		if (!dma_fence_is_signaled(fence))
   1068  1.1  riastrad 			goto wait;
   1069  1.1  riastrad 		dma_fence_put(fence);
   1070  1.1  riastrad 	}
   1071  1.1  riastrad 
   1072  1.1  riastrad 	/* Success!  Return the number of ticks left.  */
   1073  1.1  riastrad 	rcu_read_unlock();
   1074  1.1  riastrad 	return timeout;
   1075  1.1  riastrad 
   1076  1.1  riastrad restart:
   1077  1.1  riastrad 	rcu_read_unlock();
   1078  1.1  riastrad 	goto top;
   1079  1.1  riastrad 
   1080  1.1  riastrad wait:
   1081  1.1  riastrad 	/*
   1082  1.1  riastrad 	 * Exit the RCU read section and wait for it.  If we time out
   1083  1.1  riastrad 	 * or fail, bail.  Otherwise, go back to the top.
   1084  1.1  riastrad 	 */
   1085  1.1  riastrad 	KASSERT(fence != NULL);
   1086  1.1  riastrad 	rcu_read_unlock();
   1087  1.1  riastrad 	ret = dma_fence_wait_timeout(fence, intr, timeout);
   1088  1.1  riastrad 	dma_fence_put(fence);
   1089  1.1  riastrad 	if (ret <= 0)
   1090  1.1  riastrad 		return ret;
   1091  1.1  riastrad 	KASSERT(ret <= timeout);
   1092  1.1  riastrad 	timeout = ret;
   1093  1.1  riastrad 	goto top;
   1094  1.1  riastrad }
   1095  1.1  riastrad 
   1096  1.1  riastrad /*
   1097  1.1  riastrad  * dma_resv_poll_init(rpoll, lock)
   1098  1.1  riastrad  *
   1099  1.1  riastrad  *	Initialize reservation poll state.
   1100  1.1  riastrad  */
   1101  1.1  riastrad void
   1102  1.1  riastrad dma_resv_poll_init(struct dma_resv_poll *rpoll)
   1103  1.1  riastrad {
   1104  1.1  riastrad 
   1105  1.1  riastrad 	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
   1106  1.1  riastrad 	selinit(&rpoll->rp_selq);
   1107  1.1  riastrad 	rpoll->rp_claimed = 0;
   1108  1.1  riastrad }
   1109  1.1  riastrad 
   1110  1.1  riastrad /*
   1111  1.1  riastrad  * dma_resv_poll_fini(rpoll)
   1112  1.1  riastrad  *
   1113  1.1  riastrad  *	Release any resource associated with reservation poll state.
   1114  1.1  riastrad  */
   1115  1.1  riastrad void
   1116  1.1  riastrad dma_resv_poll_fini(struct dma_resv_poll *rpoll)
   1117  1.1  riastrad {
   1118  1.1  riastrad 
   1119  1.1  riastrad 	KASSERT(rpoll->rp_claimed == 0);
   1120  1.1  riastrad 	seldestroy(&rpoll->rp_selq);
   1121  1.1  riastrad 	mutex_destroy(&rpoll->rp_lock);
   1122  1.1  riastrad }
   1123  1.1  riastrad 
   1124  1.1  riastrad /*
   1125  1.1  riastrad  * dma_resv_poll_cb(fence, fcb)
   1126  1.1  riastrad  *
   1127  1.1  riastrad  *	Callback to notify a reservation poll that a fence has
   1128  1.1  riastrad  *	completed.  Notify any waiters and allow the next poller to
   1129  1.1  riastrad  *	claim the callback.
   1130  1.1  riastrad  *
   1131  1.1  riastrad  *	If one thread is waiting for the exclusive fence only, and we
   1132  1.1  riastrad  *	spuriously notify them about a shared fence, tough.
   1133  1.1  riastrad  */
   1134  1.1  riastrad static void
   1135  1.1  riastrad dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
   1136  1.1  riastrad {
   1137  1.1  riastrad 	struct dma_resv_poll *rpoll = container_of(fcb,
   1138  1.1  riastrad 	    struct dma_resv_poll, rp_fcb);
   1139  1.1  riastrad 
   1140  1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1141  1.1  riastrad 	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
   1142  1.1  riastrad 	rpoll->rp_claimed = 0;
   1143  1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1144  1.1  riastrad }
   1145  1.1  riastrad 
   1146  1.1  riastrad /*
   1147  1.1  riastrad  * dma_resv_do_poll(robj, events, rpoll)
   1148  1.1  riastrad  *
   1149  1.1  riastrad  *	Poll for reservation object events using the reservation poll
   1150  1.1  riastrad  *	state in rpoll:
   1151  1.1  riastrad  *
   1152  1.1  riastrad  *	- POLLOUT	wait for all fences shared and exclusive
   1153  1.1  riastrad  *	- POLLIN	wait for the exclusive fence
   1154  1.1  riastrad  *
   1155  1.1  riastrad  *	Return the subset of events in events that are ready.  If any
   1156  1.1  riastrad  *	are requested but not ready, arrange to be notified with
   1157  1.1  riastrad  *	selnotify when they are.
   1158  1.1  riastrad  */
   1159  1.1  riastrad int
   1160  1.1  riastrad dma_resv_do_poll(const struct dma_resv *robj, int events,
   1161  1.1  riastrad     struct dma_resv_poll *rpoll)
   1162  1.1  riastrad {
   1163  1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1164  1.1  riastrad 	struct dma_resv_list *list;
   1165  1.1  riastrad 	struct dma_fence *fence;
   1166  1.1  riastrad 	uint32_t i, shared_count;
   1167  1.1  riastrad 	int revents;
   1168  1.1  riastrad 	bool recorded = false;	/* curlwp is on the selq */
   1169  1.1  riastrad 	bool claimed = false;	/* we claimed the callback */
   1170  1.1  riastrad 	bool callback = false;	/* we requested a callback */
   1171  1.1  riastrad 
   1172  1.1  riastrad 	/*
   1173  1.1  riastrad 	 * Start with the maximal set of events that could be ready.
   1174  1.1  riastrad 	 * We will eliminate the events that are definitely not ready
   1175  1.1  riastrad 	 * as we go at the same time as we add callbacks to notify us
   1176  1.1  riastrad 	 * that they may be ready.
   1177  1.1  riastrad 	 */
   1178  1.1  riastrad 	revents = events & (POLLIN|POLLOUT);
   1179  1.1  riastrad 	if (revents == 0)
   1180  1.1  riastrad 		return 0;
   1181  1.1  riastrad 
   1182  1.1  riastrad top:
   1183  1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1184  1.1  riastrad 	rcu_read_lock();
   1185  1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1186  1.1  riastrad 
   1187  1.1  riastrad 	/* If we want to wait for all fences, get the shared list.  */
   1188  1.1  riastrad 	if (!(events & POLLOUT))
   1189  1.1  riastrad 		goto excl;
   1190  1.1  riastrad 	list = robj->fence;
   1191  1.1  riastrad 	__insn_barrier();
   1192  1.1  riastrad 	if (list) do {
   1193  1.1  riastrad 		/* Make sure the content of the list has been published.  */
   1194  1.1  riastrad 		membar_datadep_consumer();
   1195  1.1  riastrad 
   1196  1.1  riastrad 		/* Find out how long it is.  */
   1197  1.1  riastrad 		shared_count = list->shared_count;
   1198  1.1  riastrad 
   1199  1.1  riastrad 		/*
   1200  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
   1201  1.1  riastrad 		 * pointer and length.
   1202  1.1  riastrad 		 */
   1203  1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1204  1.1  riastrad 			goto restart;
   1205  1.1  riastrad 
   1206  1.1  riastrad 		/*
   1207  1.1  riastrad 		 * For each fence, if it is going away, restart.
   1208  1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1209  1.1  riastrad 		 * it is signalled.  Stop and request a callback if we
   1210  1.1  riastrad 		 * find any that is not signalled.
   1211  1.1  riastrad 		 */
   1212  1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1213  1.1  riastrad 			fence = dma_fence_get_rcu(list->shared[i]);
   1214  1.1  riastrad 			if (fence == NULL)
   1215  1.1  riastrad 				goto restart;
   1216  1.1  riastrad 			if (!dma_fence_is_signaled(fence)) {
   1217  1.1  riastrad 				dma_fence_put(fence);
   1218  1.1  riastrad 				break;
   1219  1.1  riastrad 			}
   1220  1.1  riastrad 			dma_fence_put(fence);
   1221  1.1  riastrad 		}
   1222  1.1  riastrad 
   1223  1.1  riastrad 		/* If all shared fences have been signalled, move on.  */
   1224  1.1  riastrad 		if (i == shared_count)
   1225  1.1  riastrad 			break;
   1226  1.1  riastrad 
   1227  1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1228  1.1  riastrad 		if (!recorded)
   1229  1.1  riastrad 			goto record;
   1230  1.1  riastrad 
   1231  1.1  riastrad 		/*
   1232  1.1  riastrad 		 * If someone else claimed the callback, or we already
   1233  1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1234  1.1  riastrad 		 * assume the event is not ready.
   1235  1.1  riastrad 		 */
   1236  1.1  riastrad 		if (!claimed || callback) {
   1237  1.1  riastrad 			revents &= ~POLLOUT;
   1238  1.1  riastrad 			break;
   1239  1.1  riastrad 		}
   1240  1.1  riastrad 
   1241  1.1  riastrad 		/*
   1242  1.1  riastrad 		 * Otherwise, find the first fence that is not
   1243  1.1  riastrad 		 * signalled, request the callback, and clear POLLOUT
   1244  1.1  riastrad 		 * from the possible ready events.  If they are all
   1245  1.1  riastrad 		 * signalled, leave POLLOUT set; we will simulate the
   1246  1.1  riastrad 		 * callback later.
   1247  1.1  riastrad 		 */
   1248  1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1249  1.1  riastrad 			fence = dma_fence_get_rcu(list->shared[i]);
   1250  1.1  riastrad 			if (fence == NULL)
   1251  1.1  riastrad 				goto restart;
   1252  1.1  riastrad 			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1253  1.1  riastrad 				dma_resv_poll_cb)) {
   1254  1.1  riastrad 				dma_fence_put(fence);
   1255  1.1  riastrad 				revents &= ~POLLOUT;
   1256  1.1  riastrad 				callback = true;
   1257  1.1  riastrad 				break;
   1258  1.1  riastrad 			}
   1259  1.1  riastrad 			dma_fence_put(fence);
   1260  1.1  riastrad 		}
   1261  1.1  riastrad 	} while (0);
   1262  1.1  riastrad 
   1263  1.1  riastrad excl:
   1264  1.1  riastrad 	/* We always wait for at least the exclusive fence, so get it.  */
   1265  1.1  riastrad 	fence = robj->fence_excl;
   1266  1.1  riastrad 	__insn_barrier();
   1267  1.1  riastrad 	if (fence) do {
   1268  1.1  riastrad 		/* Make sure the content of the fence has been published.  */
   1269  1.1  riastrad 		membar_datadep_consumer();
   1270  1.1  riastrad 
   1271  1.1  riastrad 		/*
   1272  1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
   1273  1.1  riastrad 		 *
   1274  1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
   1275  1.1  riastrad 		 * pointer writes are supposed to be atomic.
   1276  1.1  riastrad 		 */
   1277  1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1278  1.1  riastrad 			goto restart;
   1279  1.1  riastrad 
   1280  1.1  riastrad 		/*
   1281  1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
   1282  1.1  riastrad 		 * reference to it to test whether it is signalled.  If
   1283  1.1  riastrad 		 * not, stop and request a callback.
   1284  1.1  riastrad 		 */
   1285  1.1  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1286  1.1  riastrad 			goto restart;
   1287  1.1  riastrad 		if (dma_fence_is_signaled(fence)) {
   1288  1.1  riastrad 			dma_fence_put(fence);
   1289  1.1  riastrad 			break;
   1290  1.1  riastrad 		}
   1291  1.1  riastrad 
   1292  1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1293  1.1  riastrad 		if (!recorded) {
   1294  1.1  riastrad 			dma_fence_put(fence);
   1295  1.1  riastrad 			goto record;
   1296  1.1  riastrad 		}
   1297  1.1  riastrad 
   1298  1.1  riastrad 		/*
   1299  1.1  riastrad 		 * If someone else claimed the callback, or we already
   1300  1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1301  1.1  riastrad 		 * assume the event is not ready.
   1302  1.1  riastrad 		 */
   1303  1.1  riastrad 		if (!claimed || callback) {
   1304  1.1  riastrad 			dma_fence_put(fence);
   1305  1.1  riastrad 			revents = 0;
   1306  1.1  riastrad 			break;
   1307  1.1  riastrad 		}
   1308  1.1  riastrad 
   1309  1.1  riastrad 		/*
   1310  1.1  riastrad 		 * Otherwise, try to request the callback, and clear
   1311  1.1  riastrad 		 * all possible ready events.  If the fence has been
   1312  1.1  riastrad 		 * signalled in the interim, leave the events set; we
   1313  1.1  riastrad 		 * will simulate the callback later.
   1314  1.1  riastrad 		 */
   1315  1.1  riastrad 		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1316  1.1  riastrad 			dma_resv_poll_cb)) {
   1317  1.1  riastrad 			dma_fence_put(fence);
   1318  1.1  riastrad 			revents = 0;
   1319  1.1  riastrad 			callback = true;
   1320  1.1  riastrad 			break;
   1321  1.1  riastrad 		}
   1322  1.1  riastrad 		dma_fence_put(fence);
   1323  1.1  riastrad 	} while (0);
   1324  1.1  riastrad 
   1325  1.1  riastrad 	/* All done reading the fences.  */
   1326  1.1  riastrad 	rcu_read_unlock();
   1327  1.1  riastrad 
   1328  1.1  riastrad 	if (claimed && !callback) {
   1329  1.1  riastrad 		/*
   1330  1.1  riastrad 		 * We claimed the callback but we didn't actually
   1331  1.1  riastrad 		 * request it because a fence was signalled while we
   1332  1.1  riastrad 		 * were claiming it.  Call it ourselves now.  The
   1333  1.1  riastrad 		 * callback doesn't use the fence nor rely on holding
   1334  1.1  riastrad 		 * any of the fence locks, so this is safe.
   1335  1.1  riastrad 		 */
   1336  1.1  riastrad 		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
   1337  1.1  riastrad 	}
   1338  1.1  riastrad 	return revents;
   1339  1.1  riastrad 
   1340  1.1  riastrad restart:
   1341  1.1  riastrad 	rcu_read_unlock();
   1342  1.1  riastrad 	goto top;
   1343  1.1  riastrad 
   1344  1.1  riastrad record:
   1345  1.1  riastrad 	rcu_read_unlock();
   1346  1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1347  1.1  riastrad 	selrecord(curlwp, &rpoll->rp_selq);
   1348  1.1  riastrad 	if (!rpoll->rp_claimed)
   1349  1.1  riastrad 		claimed = rpoll->rp_claimed = true;
   1350  1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1351  1.1  riastrad 	recorded = true;
   1352  1.1  riastrad 	goto top;
   1353  1.1  riastrad }
   1354  1.1  riastrad 
   1355  1.1  riastrad /*
   1356  1.1  riastrad  * dma_resv_kqfilter(robj, kn, rpoll)
   1357  1.1  riastrad  *
   1358  1.1  riastrad  *	Kqueue filter for reservation objects.  Currently not
   1359  1.1  riastrad  *	implemented because the logic to implement it is nontrivial,
   1360  1.1  riastrad  *	and userland will presumably never use it, so it would be
   1361  1.1  riastrad  *	dangerous to add never-tested complex code paths to the kernel.
   1362  1.1  riastrad  */
   1363  1.1  riastrad int
   1364  1.1  riastrad dma_resv_kqfilter(const struct dma_resv *robj,
   1365  1.1  riastrad     struct knote *kn, struct dma_resv_poll *rpoll)
   1366  1.1  riastrad {
   1367  1.1  riastrad 
   1368  1.1  riastrad 	return EINVAL;
   1369  1.1  riastrad }
   1370