Home | History | Annotate | Line # | Download | only in linux
linux_dma_resv.c revision 1.13
      1  1.13  riastrad /*	$NetBSD: linux_dma_resv.c,v 1.13 2021/12/19 12:26:13 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*-
      4   1.1  riastrad  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5   1.1  riastrad  * All rights reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  riastrad  * by Taylor R. Campbell.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.1  riastrad  * modification, are permitted provided that the following conditions
     12   1.1  riastrad  * are met:
     13   1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.1  riastrad  *
     19   1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.1  riastrad #include <sys/cdefs.h>
     33  1.13  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.13 2021/12/19 12:26:13 riastradh Exp $");
     34   1.1  riastrad 
     35   1.1  riastrad #include <sys/param.h>
     36   1.1  riastrad #include <sys/poll.h>
     37   1.1  riastrad #include <sys/select.h>
     38   1.1  riastrad 
     39   1.1  riastrad #include <linux/dma-fence.h>
     40   1.1  riastrad #include <linux/dma-resv.h>
     41   1.1  riastrad #include <linux/seqlock.h>
     42   1.1  riastrad #include <linux/ww_mutex.h>
     43   1.1  riastrad 
     44   1.1  riastrad DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
     45   1.1  riastrad 
     46   1.1  riastrad static struct dma_resv_list *
     47   1.1  riastrad objlist_tryalloc(uint32_t n)
     48   1.1  riastrad {
     49   1.1  riastrad 	struct dma_resv_list *list;
     50   1.1  riastrad 
     51   1.1  riastrad 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
     52   1.1  riastrad 	if (list == NULL)
     53   1.1  riastrad 		return NULL;
     54   1.1  riastrad 	list->shared_max = n;
     55   1.1  riastrad 
     56   1.1  riastrad 	return list;
     57   1.1  riastrad }
     58   1.1  riastrad 
     59   1.1  riastrad static void
     60   1.1  riastrad objlist_free(struct dma_resv_list *list)
     61   1.1  riastrad {
     62   1.1  riastrad 	uint32_t n = list->shared_max;
     63   1.1  riastrad 
     64   1.1  riastrad 	kmem_free(list, offsetof(typeof(*list), shared[n]));
     65   1.1  riastrad }
     66   1.1  riastrad 
     67   1.1  riastrad static void
     68   1.1  riastrad objlist_free_cb(struct rcu_head *rcu)
     69   1.1  riastrad {
     70   1.1  riastrad 	struct dma_resv_list *list = container_of(rcu,
     71   1.1  riastrad 	    struct dma_resv_list, rol_rcu);
     72   1.1  riastrad 
     73   1.1  riastrad 	objlist_free(list);
     74   1.1  riastrad }
     75   1.1  riastrad 
     76   1.1  riastrad static void
     77   1.1  riastrad objlist_defer_free(struct dma_resv_list *list)
     78   1.1  riastrad {
     79   1.1  riastrad 
     80   1.1  riastrad 	call_rcu(&list->rol_rcu, objlist_free_cb);
     81   1.1  riastrad }
     82   1.1  riastrad 
     83   1.1  riastrad /*
     84   1.1  riastrad  * dma_resv_init(robj)
     85   1.1  riastrad  *
     86   1.1  riastrad  *	Initialize a reservation object.  Caller must later destroy it
     87   1.1  riastrad  *	with dma_resv_fini.
     88   1.1  riastrad  */
     89   1.1  riastrad void
     90   1.1  riastrad dma_resv_init(struct dma_resv *robj)
     91   1.1  riastrad {
     92   1.1  riastrad 
     93   1.1  riastrad 	ww_mutex_init(&robj->lock, &reservation_ww_class);
     94   1.1  riastrad 	seqcount_init(&robj->seq);
     95   1.1  riastrad 	robj->fence_excl = NULL;
     96   1.1  riastrad 	robj->fence = NULL;
     97   1.1  riastrad 	robj->robj_prealloc = NULL;
     98   1.1  riastrad }
     99   1.1  riastrad 
    100   1.1  riastrad /*
    101   1.1  riastrad  * dma_resv_fini(robj)
    102   1.1  riastrad  *
    103   1.1  riastrad  *	Destroy a reservation object, freeing any memory that had been
    104   1.1  riastrad  *	allocated for it.  Caller must have exclusive access to it.
    105   1.1  riastrad  */
    106   1.1  riastrad void
    107   1.1  riastrad dma_resv_fini(struct dma_resv *robj)
    108   1.1  riastrad {
    109   1.1  riastrad 	unsigned i;
    110   1.1  riastrad 
    111  1.10  riastrad 	if (robj->robj_prealloc) {
    112   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    113  1.10  riastrad 		robj->robj_prealloc = NULL; /* paranoia */
    114  1.10  riastrad 	}
    115   1.1  riastrad 	if (robj->fence) {
    116  1.10  riastrad 		for (i = 0; i < robj->fence->shared_count; i++) {
    117   1.1  riastrad 			dma_fence_put(robj->fence->shared[i]);
    118  1.10  riastrad 			robj->fence->shared[i] = NULL; /* paranoia */
    119  1.10  riastrad 		}
    120   1.1  riastrad 		objlist_free(robj->fence);
    121  1.10  riastrad 		robj->fence = NULL; /* paranoia */
    122   1.1  riastrad 	}
    123  1.10  riastrad 	if (robj->fence_excl) {
    124   1.1  riastrad 		dma_fence_put(robj->fence_excl);
    125  1.10  riastrad 		robj->fence_excl = NULL; /* paranoia */
    126  1.10  riastrad 	}
    127   1.1  riastrad 	ww_mutex_destroy(&robj->lock);
    128   1.1  riastrad }
    129   1.1  riastrad 
    130   1.1  riastrad /*
    131   1.1  riastrad  * dma_resv_lock(robj, ctx)
    132   1.1  riastrad  *
    133   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    134   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    135   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    136   1.1  riastrad  *	retry.
    137   1.1  riastrad  */
    138   1.1  riastrad int
    139   1.1  riastrad dma_resv_lock(struct dma_resv *robj,
    140   1.1  riastrad     struct ww_acquire_ctx *ctx)
    141   1.1  riastrad {
    142   1.1  riastrad 
    143   1.1  riastrad 	return ww_mutex_lock(&robj->lock, ctx);
    144   1.1  riastrad }
    145   1.1  riastrad 
    146   1.1  riastrad /*
    147   1.2  riastrad  * dma_resv_lock_slow(robj, ctx)
    148   1.2  riastrad  *
    149   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    150   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    151   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    152   1.2  riastrad  *	and the caller has backed out all other locks.
    153   1.2  riastrad  */
    154   1.2  riastrad void
    155   1.2  riastrad dma_resv_lock_slow(struct dma_resv *robj,
    156   1.2  riastrad     struct ww_acquire_ctx *ctx)
    157   1.2  riastrad {
    158   1.2  riastrad 
    159   1.2  riastrad 	ww_mutex_lock_slow(&robj->lock, ctx);
    160   1.2  riastrad }
    161   1.2  riastrad 
    162   1.2  riastrad /*
    163   1.1  riastrad  * dma_resv_lock_interruptible(robj, ctx)
    164   1.1  riastrad  *
    165   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    166   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    167   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    168   1.1  riastrad  *	retry, -ERESTART/-EINTR if interrupted.
    169   1.1  riastrad  */
    170   1.1  riastrad int
    171   1.1  riastrad dma_resv_lock_interruptible(struct dma_resv *robj,
    172   1.1  riastrad     struct ww_acquire_ctx *ctx)
    173   1.1  riastrad {
    174   1.1  riastrad 
    175   1.1  riastrad 	return ww_mutex_lock_interruptible(&robj->lock, ctx);
    176   1.1  riastrad }
    177   1.1  riastrad 
    178   1.1  riastrad /*
    179   1.2  riastrad  * dma_resv_lock_slow_interruptible(robj, ctx)
    180   1.2  riastrad  *
    181   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    182   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    183   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    184   1.2  riastrad  *	and the caller has backed out all other locks.  Return 0 on
    185   1.2  riastrad  *	success, -ERESTART/-EINTR if interrupted.
    186   1.2  riastrad  */
    187   1.2  riastrad int
    188   1.2  riastrad dma_resv_lock_slow_interruptible(struct dma_resv *robj,
    189   1.2  riastrad     struct ww_acquire_ctx *ctx)
    190   1.2  riastrad {
    191   1.2  riastrad 
    192   1.2  riastrad 	return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
    193   1.2  riastrad }
    194   1.2  riastrad 
    195   1.2  riastrad /*
    196   1.1  riastrad  * dma_resv_trylock(robj)
    197   1.1  riastrad  *
    198   1.1  riastrad  *	Try to acquire a reservation object's lock without blocking.
    199   1.1  riastrad  *	Return true on success, false on failure.
    200   1.1  riastrad  */
    201   1.1  riastrad bool
    202   1.1  riastrad dma_resv_trylock(struct dma_resv *robj)
    203   1.1  riastrad {
    204   1.1  riastrad 
    205   1.1  riastrad 	return ww_mutex_trylock(&robj->lock);
    206   1.1  riastrad }
    207   1.1  riastrad 
    208   1.1  riastrad /*
    209   1.4  riastrad  * dma_resv_locking_ctx(robj)
    210   1.4  riastrad  *
    211   1.4  riastrad  *	Return a pointer to the ww_acquire_ctx used by the owner of
    212   1.4  riastrad  *	the reservation object's lock, or NULL if it is either not
    213   1.4  riastrad  *	owned or if it is locked without context.
    214   1.4  riastrad  */
    215   1.4  riastrad struct ww_acquire_ctx *
    216   1.4  riastrad dma_resv_locking_ctx(struct dma_resv *robj)
    217   1.4  riastrad {
    218   1.4  riastrad 
    219   1.4  riastrad 	return ww_mutex_locking_ctx(&robj->lock);
    220   1.4  riastrad }
    221   1.4  riastrad 
    222   1.4  riastrad /*
    223   1.1  riastrad  * dma_resv_unlock(robj)
    224   1.1  riastrad  *
    225   1.1  riastrad  *	Release a reservation object's lock.
    226   1.1  riastrad  */
    227   1.1  riastrad void
    228   1.1  riastrad dma_resv_unlock(struct dma_resv *robj)
    229   1.1  riastrad {
    230   1.1  riastrad 
    231   1.1  riastrad 	return ww_mutex_unlock(&robj->lock);
    232   1.1  riastrad }
    233   1.1  riastrad 
    234   1.1  riastrad /*
    235  1.11  riastrad  * dma_resv_is_locked(robj)
    236  1.11  riastrad  *
    237  1.11  riastrad  *	True if robj is locked.
    238  1.11  riastrad  */
    239  1.11  riastrad bool
    240  1.11  riastrad dma_resv_is_locked(struct dma_resv *robj)
    241  1.11  riastrad {
    242  1.11  riastrad 
    243  1.11  riastrad 	return ww_mutex_is_locked(&robj->lock);
    244  1.11  riastrad }
    245  1.11  riastrad 
    246  1.11  riastrad /*
    247   1.1  riastrad  * dma_resv_held(robj)
    248   1.1  riastrad  *
    249   1.1  riastrad  *	True if robj is locked.
    250   1.1  riastrad  */
    251   1.1  riastrad bool
    252   1.1  riastrad dma_resv_held(struct dma_resv *robj)
    253   1.1  riastrad {
    254   1.1  riastrad 
    255   1.1  riastrad 	return ww_mutex_is_locked(&robj->lock);
    256   1.1  riastrad }
    257   1.1  riastrad 
    258   1.1  riastrad /*
    259   1.1  riastrad  * dma_resv_assert_held(robj)
    260   1.1  riastrad  *
    261   1.1  riastrad  *	Panic if robj is not held, in DIAGNOSTIC builds.
    262   1.1  riastrad  */
    263   1.1  riastrad void
    264   1.1  riastrad dma_resv_assert_held(struct dma_resv *robj)
    265   1.1  riastrad {
    266   1.1  riastrad 
    267   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    268   1.1  riastrad }
    269   1.1  riastrad 
    270   1.1  riastrad /*
    271   1.1  riastrad  * dma_resv_get_excl(robj)
    272   1.1  riastrad  *
    273   1.1  riastrad  *	Return a pointer to the exclusive fence of the reservation
    274   1.1  riastrad  *	object robj.
    275   1.1  riastrad  *
    276   1.1  riastrad  *	Caller must have robj locked.
    277   1.1  riastrad  */
    278   1.1  riastrad struct dma_fence *
    279   1.1  riastrad dma_resv_get_excl(struct dma_resv *robj)
    280   1.1  riastrad {
    281   1.1  riastrad 
    282   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    283   1.1  riastrad 	return robj->fence_excl;
    284   1.1  riastrad }
    285   1.1  riastrad 
    286   1.1  riastrad /*
    287   1.1  riastrad  * dma_resv_get_list(robj)
    288   1.1  riastrad  *
    289   1.1  riastrad  *	Return a pointer to the shared fence list of the reservation
    290   1.1  riastrad  *	object robj.
    291   1.1  riastrad  *
    292   1.1  riastrad  *	Caller must have robj locked.
    293   1.1  riastrad  */
    294   1.1  riastrad struct dma_resv_list *
    295   1.1  riastrad dma_resv_get_list(struct dma_resv *robj)
    296   1.1  riastrad {
    297   1.1  riastrad 
    298   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    299   1.1  riastrad 	return robj->fence;
    300   1.1  riastrad }
    301   1.1  riastrad 
    302   1.1  riastrad /*
    303   1.1  riastrad  * dma_resv_reserve_shared(robj)
    304   1.1  riastrad  *
    305   1.1  riastrad  *	Reserve space in robj to add a shared fence.  To be used only
    306   1.1  riastrad  *	once before calling dma_resv_add_shared_fence.
    307   1.1  riastrad  *
    308   1.1  riastrad  *	Caller must have robj locked.
    309   1.1  riastrad  *
    310   1.1  riastrad  *	Internally, we start with room for four entries and double if
    311   1.1  riastrad  *	we don't have enough.  This is not guaranteed.
    312   1.1  riastrad  */
    313   1.1  riastrad int
    314   1.3  riastrad dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
    315   1.1  riastrad {
    316   1.1  riastrad 	struct dma_resv_list *list, *prealloc;
    317   1.1  riastrad 	uint32_t n, nalloc;
    318   1.1  riastrad 
    319   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    320   1.3  riastrad 	KASSERT(num_fences == 1);
    321   1.1  riastrad 
    322   1.1  riastrad 	list = robj->fence;
    323   1.1  riastrad 	prealloc = robj->robj_prealloc;
    324   1.1  riastrad 
    325   1.1  riastrad 	/* If there's an existing list, check it for space.  */
    326   1.1  riastrad 	if (list) {
    327   1.1  riastrad 		/* If there's too many already, give up.  */
    328   1.1  riastrad 		if (list->shared_count == UINT32_MAX)
    329   1.1  riastrad 			return -ENOMEM;
    330   1.1  riastrad 
    331   1.1  riastrad 		/* Add one more. */
    332   1.1  riastrad 		n = list->shared_count + 1;
    333   1.1  riastrad 
    334   1.1  riastrad 		/* If there's enough for one more, we're done.  */
    335   1.1  riastrad 		if (n <= list->shared_max)
    336   1.1  riastrad 			return 0;
    337   1.1  riastrad 	} else {
    338   1.1  riastrad 		/* No list already.  We need space for 1.  */
    339   1.1  riastrad 		n = 1;
    340   1.1  riastrad 	}
    341   1.1  riastrad 
    342   1.1  riastrad 	/* If not, maybe there's a preallocated list ready.  */
    343   1.1  riastrad 	if (prealloc != NULL) {
    344   1.1  riastrad 		/* If there's enough room in it, stop here.  */
    345   1.1  riastrad 		if (n <= prealloc->shared_max)
    346   1.1  riastrad 			return 0;
    347   1.1  riastrad 
    348   1.1  riastrad 		/* Try to double its capacity.  */
    349   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
    350   1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    351   1.1  riastrad 		if (prealloc == NULL)
    352   1.1  riastrad 			return -ENOMEM;
    353   1.1  riastrad 
    354   1.1  riastrad 		/* Swap the new preallocated list and free the old one.  */
    355   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    356   1.1  riastrad 		robj->robj_prealloc = prealloc;
    357   1.1  riastrad 	} else {
    358   1.1  riastrad 		/* Start with some spare.  */
    359   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
    360   1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    361   1.1  riastrad 		if (prealloc == NULL)
    362   1.1  riastrad 			return -ENOMEM;
    363   1.1  riastrad 		/* Save the new preallocated list.  */
    364   1.1  riastrad 		robj->robj_prealloc = prealloc;
    365   1.1  riastrad 	}
    366   1.1  riastrad 
    367   1.1  riastrad 	/* Success!  */
    368   1.1  riastrad 	return 0;
    369   1.1  riastrad }
    370   1.1  riastrad 
    371   1.1  riastrad struct dma_resv_write_ticket {
    372   1.1  riastrad };
    373   1.1  riastrad 
    374   1.1  riastrad /*
    375   1.1  riastrad  * dma_resv_write_begin(robj, ticket)
    376   1.1  riastrad  *
    377   1.1  riastrad  *	Begin an atomic batch of writes to robj, and initialize opaque
    378   1.1  riastrad  *	ticket for it.  The ticket must be passed to
    379   1.1  riastrad  *	dma_resv_write_commit to commit the writes.
    380   1.1  riastrad  *
    381   1.1  riastrad  *	Caller must have robj locked.
    382   1.1  riastrad  *
    383   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    384   1.1  riastrad  *	NOT serve as an acquire operation, however.
    385   1.1  riastrad  */
    386   1.1  riastrad static void
    387   1.1  riastrad dma_resv_write_begin(struct dma_resv *robj,
    388   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    389   1.1  riastrad {
    390   1.1  riastrad 
    391   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    392   1.1  riastrad 
    393   1.1  riastrad 	write_seqcount_begin(&robj->seq);
    394   1.1  riastrad }
    395   1.1  riastrad 
    396   1.1  riastrad /*
    397   1.1  riastrad  * dma_resv_write_commit(robj, ticket)
    398   1.1  riastrad  *
    399   1.1  riastrad  *	Commit an atomic batch of writes to robj begun with the call to
    400   1.1  riastrad  *	dma_resv_write_begin that returned ticket.
    401   1.1  riastrad  *
    402   1.1  riastrad  *	Caller must have robj locked.
    403   1.1  riastrad  *
    404   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    405   1.1  riastrad  *	NOT serve as a release operation, however.
    406   1.1  riastrad  */
    407   1.1  riastrad static void
    408   1.1  riastrad dma_resv_write_commit(struct dma_resv *robj,
    409   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    410   1.1  riastrad {
    411   1.1  riastrad 
    412   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    413   1.1  riastrad 
    414   1.1  riastrad 	write_seqcount_end(&robj->seq);
    415   1.1  riastrad }
    416   1.1  riastrad 
    417   1.1  riastrad struct dma_resv_read_ticket {
    418   1.1  riastrad 	unsigned version;
    419   1.1  riastrad };
    420   1.1  riastrad 
    421   1.1  riastrad /*
    422   1.1  riastrad  * dma_resv_read_begin(robj, ticket)
    423   1.1  riastrad  *
    424   1.1  riastrad  *	Begin a read section, and initialize opaque ticket for it.  The
    425   1.1  riastrad  *	ticket must be passed to dma_resv_read_exit, and the
    426   1.1  riastrad  *	caller must be prepared to retry reading if it fails.
    427   1.1  riastrad  */
    428   1.1  riastrad static void
    429   1.1  riastrad dma_resv_read_begin(const struct dma_resv *robj,
    430   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    431   1.1  riastrad {
    432   1.1  riastrad 
    433   1.1  riastrad 	ticket->version = read_seqcount_begin(&robj->seq);
    434   1.1  riastrad }
    435   1.1  riastrad 
    436   1.1  riastrad /*
    437   1.1  riastrad  * dma_resv_read_valid(robj, ticket)
    438   1.1  riastrad  *
    439   1.1  riastrad  *	Test whether the read sections are valid.  Return true on
    440   1.1  riastrad  *	success, or false on failure if the read ticket has been
    441   1.1  riastrad  *	invalidated.
    442   1.1  riastrad  */
    443   1.1  riastrad static bool
    444   1.1  riastrad dma_resv_read_valid(const struct dma_resv *robj,
    445   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    446   1.1  riastrad {
    447   1.1  riastrad 
    448   1.1  riastrad 	return !read_seqcount_retry(&robj->seq, ticket->version);
    449   1.1  riastrad }
    450   1.1  riastrad 
    451   1.1  riastrad /*
    452  1.12  riastrad  * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket)
    453  1.12  riastrad  *
    454  1.12  riastrad  *	Set *listp and *shared_countp to a snapshot of the pointer to
    455  1.12  riastrad  *	and length of the shared fence list of robj and return true, or
    456  1.12  riastrad  *	set them to NULL/0 and return false if a writer intervened so
    457  1.12  riastrad  *	the caller must start over.
    458  1.12  riastrad  *
    459  1.12  riastrad  *	Both *listp and *shared_countp are unconditionally initialized
    460  1.12  riastrad  *	on return.  They may be NULL/0 even on success, if there is no
    461  1.12  riastrad  *	shared list at the moment.  Does not take any fence references.
    462  1.12  riastrad  */
    463  1.12  riastrad static bool
    464  1.12  riastrad dma_resv_get_shared_reader(const struct dma_resv *robj,
    465  1.12  riastrad     const struct dma_resv_list **listp, unsigned *shared_countp,
    466  1.12  riastrad     struct dma_resv_read_ticket *ticket)
    467  1.12  riastrad {
    468  1.12  riastrad 	struct dma_resv_list *list;
    469  1.12  riastrad 	unsigned shared_count = 0;
    470  1.12  riastrad 
    471  1.12  riastrad 	/*
    472  1.12  riastrad 	 * Get the list and, if it is present, its length.  If the list
    473  1.12  riastrad 	 * is present, it has a valid length.  The atomic_load_consume
    474  1.12  riastrad 	 * pairs with the membar_producer in dma_resv_write_begin.
    475  1.12  riastrad 	 */
    476  1.12  riastrad 	list = atomic_load_consume(&robj->fence);
    477  1.12  riastrad 	shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0;
    478  1.12  riastrad 
    479  1.12  riastrad 	/*
    480  1.12  riastrad 	 * We are done reading from robj and list.  Validate our
    481  1.12  riastrad 	 * parking ticket.  If it's invalid, do not pass go and do not
    482  1.12  riastrad 	 * collect $200.
    483  1.12  riastrad 	 */
    484  1.12  riastrad 	if (!dma_resv_read_valid(robj, ticket))
    485  1.12  riastrad 		goto fail;
    486  1.12  riastrad 
    487  1.12  riastrad 	/* Success!  */
    488  1.12  riastrad 	*listp = list;
    489  1.12  riastrad 	*shared_countp = shared_count;
    490  1.12  riastrad 	return true;
    491  1.12  riastrad 
    492  1.12  riastrad fail:	*listp = NULL;
    493  1.12  riastrad 	*shared_countp = 0;
    494  1.12  riastrad 	return false;
    495  1.12  riastrad }
    496  1.12  riastrad 
    497  1.12  riastrad /*
    498  1.12  riastrad  * dma_resv_get_excl_reader(robj, fencep, ticket)
    499  1.12  riastrad  *
    500  1.12  riastrad  *	Set *fencep to the exclusive fence of robj and return true, or
    501  1.12  riastrad  *	set it to NULL and return false if either
    502  1.12  riastrad  *	(a) a writer intervened, or
    503  1.12  riastrad  *	(b) the fence is scheduled to be destroyed after this RCU grace
    504  1.12  riastrad  *	    period,
    505  1.12  riastrad  *	in either case meaning the caller must restart.
    506  1.12  riastrad  *
    507  1.12  riastrad  *	The value of *fencep is unconditionally initialized on return.
    508  1.12  riastrad  *	It may be NULL, if there is no exclusive fence at the moment.
    509  1.12  riastrad  *	If nonnull, *fencep is referenced; caller must dma_fence_put.
    510  1.12  riastrad  */
    511  1.12  riastrad static bool
    512  1.12  riastrad dma_resv_get_excl_reader(const struct dma_resv *robj,
    513  1.12  riastrad     struct dma_fence **fencep,
    514  1.12  riastrad     struct dma_resv_read_ticket *ticket)
    515  1.12  riastrad {
    516  1.12  riastrad 	struct dma_fence *fence;
    517  1.12  riastrad 
    518  1.12  riastrad 	/*
    519  1.12  riastrad 	 * Get the candidate fence pointer.  The atomic_load_consume
    520  1.12  riastrad 	 * pairs with the membar_consumer in dma_resv_write_begin.
    521  1.12  riastrad 	 */
    522  1.12  riastrad 	fence = atomic_load_consume(&robj->fence_excl);
    523  1.12  riastrad 
    524  1.12  riastrad 	/*
    525  1.12  riastrad 	 * The load of robj->fence_excl is atomic, but the caller may
    526  1.12  riastrad 	 * have previously loaded the shared fence list and should
    527  1.12  riastrad 	 * restart if its view of the entire dma_resv object is not a
    528  1.12  riastrad 	 * consistent snapshot.
    529  1.12  riastrad 	 */
    530  1.12  riastrad 	if (!dma_resv_read_valid(robj, ticket))
    531  1.12  riastrad 		goto fail;
    532  1.12  riastrad 
    533  1.12  riastrad 	/*
    534  1.12  riastrad 	 * If the fence is already scheduled to away after this RCU
    535  1.12  riastrad 	 * read section, give up.  Otherwise, take a reference so it
    536  1.12  riastrad 	 * won't go away until after dma_fence_put.
    537  1.12  riastrad 	 */
    538  1.12  riastrad 	if (fence != NULL &&
    539  1.12  riastrad 	    (fence = dma_fence_get_rcu(fence)) == NULL)
    540  1.12  riastrad 		goto fail;
    541  1.12  riastrad 
    542  1.12  riastrad 	/* Success!  */
    543  1.12  riastrad 	*fencep = fence;
    544  1.12  riastrad 	return true;
    545  1.12  riastrad 
    546  1.12  riastrad fail:	*fencep = NULL;
    547  1.12  riastrad 	return false;
    548  1.12  riastrad }
    549  1.12  riastrad 
    550  1.12  riastrad /*
    551   1.1  riastrad  * dma_resv_add_excl_fence(robj, fence)
    552   1.1  riastrad  *
    553   1.1  riastrad  *	Empty and release all of robj's shared fences, and clear and
    554   1.1  riastrad  *	release its exclusive fence.  If fence is nonnull, acquire a
    555   1.1  riastrad  *	reference to it and save it as robj's exclusive fence.
    556   1.1  riastrad  *
    557   1.1  riastrad  *	Caller must have robj locked.
    558   1.1  riastrad  */
    559   1.1  riastrad void
    560   1.1  riastrad dma_resv_add_excl_fence(struct dma_resv *robj,
    561   1.1  riastrad     struct dma_fence *fence)
    562   1.1  riastrad {
    563   1.1  riastrad 	struct dma_fence *old_fence = robj->fence_excl;
    564   1.1  riastrad 	struct dma_resv_list *old_list = robj->fence;
    565   1.1  riastrad 	uint32_t old_shared_count;
    566   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    567   1.1  riastrad 
    568   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    569   1.1  riastrad 
    570   1.1  riastrad 	/*
    571   1.1  riastrad 	 * If we are setting rather than just removing a fence, acquire
    572   1.1  riastrad 	 * a reference for ourselves.
    573   1.1  riastrad 	 */
    574   1.1  riastrad 	if (fence)
    575   1.1  riastrad 		(void)dma_fence_get(fence);
    576   1.1  riastrad 
    577   1.1  riastrad 	/* If there are any shared fences, remember how many.  */
    578   1.1  riastrad 	if (old_list)
    579   1.1  riastrad 		old_shared_count = old_list->shared_count;
    580   1.1  riastrad 
    581   1.7  riastrad 	/* Begin an update.  Implies membar_producer for fence.  */
    582   1.1  riastrad 	dma_resv_write_begin(robj, &ticket);
    583   1.1  riastrad 
    584   1.1  riastrad 	/* Replace the fence and zero the shared count.  */
    585   1.7  riastrad 	atomic_store_relaxed(&robj->fence_excl, fence);
    586   1.1  riastrad 	if (old_list)
    587   1.1  riastrad 		old_list->shared_count = 0;
    588   1.1  riastrad 
    589   1.1  riastrad 	/* Commit the update.  */
    590   1.1  riastrad 	dma_resv_write_commit(robj, &ticket);
    591   1.1  riastrad 
    592   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    593  1.10  riastrad 	if (old_fence) {
    594   1.1  riastrad 		dma_fence_put(old_fence);
    595  1.10  riastrad 		old_fence = NULL; /* paranoia */
    596  1.10  riastrad 	}
    597   1.1  riastrad 
    598   1.1  riastrad 	/* Release any old shared fences.  */
    599   1.1  riastrad 	if (old_list) {
    600  1.10  riastrad 		while (old_shared_count--) {
    601   1.1  riastrad 			dma_fence_put(old_list->shared[old_shared_count]);
    602  1.10  riastrad 			/* paranoia */
    603  1.10  riastrad 			old_list->shared[old_shared_count] = NULL;
    604  1.10  riastrad 		}
    605   1.1  riastrad 	}
    606   1.1  riastrad }
    607   1.1  riastrad 
    608   1.1  riastrad /*
    609   1.1  riastrad  * dma_resv_add_shared_fence(robj, fence)
    610   1.1  riastrad  *
    611   1.1  riastrad  *	Acquire a reference to fence and add it to robj's shared list.
    612   1.1  riastrad  *	If any fence was already added with the same context number,
    613   1.1  riastrad  *	release it and replace it by this one.
    614   1.1  riastrad  *
    615   1.1  riastrad  *	Caller must have robj locked, and must have preceded with a
    616   1.1  riastrad  *	call to dma_resv_reserve_shared for each shared fence
    617   1.1  riastrad  *	added.
    618   1.1  riastrad  */
    619   1.1  riastrad void
    620   1.1  riastrad dma_resv_add_shared_fence(struct dma_resv *robj,
    621   1.1  riastrad     struct dma_fence *fence)
    622   1.1  riastrad {
    623   1.1  riastrad 	struct dma_resv_list *list = robj->fence;
    624   1.1  riastrad 	struct dma_resv_list *prealloc = robj->robj_prealloc;
    625   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    626   1.1  riastrad 	struct dma_fence *replace = NULL;
    627   1.1  riastrad 	uint32_t i;
    628   1.1  riastrad 
    629   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    630   1.1  riastrad 
    631   1.1  riastrad 	/* Acquire a reference to the fence.  */
    632   1.1  riastrad 	KASSERT(fence != NULL);
    633   1.1  riastrad 	(void)dma_fence_get(fence);
    634   1.1  riastrad 
    635   1.1  riastrad 	/* Check for a preallocated replacement list.  */
    636   1.1  riastrad 	if (prealloc == NULL) {
    637   1.1  riastrad 		/*
    638   1.1  riastrad 		 * If there is no preallocated replacement list, then
    639   1.1  riastrad 		 * there must be room in the current list.
    640   1.1  riastrad 		 */
    641   1.1  riastrad 		KASSERT(list != NULL);
    642   1.1  riastrad 		KASSERT(list->shared_count < list->shared_max);
    643   1.1  riastrad 
    644   1.1  riastrad 		/* Begin an update.  Implies membar_producer for fence.  */
    645   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    646   1.1  riastrad 
    647   1.1  riastrad 		/* Find a fence with the same context number.  */
    648   1.1  riastrad 		for (i = 0; i < list->shared_count; i++) {
    649   1.1  riastrad 			if (list->shared[i]->context == fence->context) {
    650   1.1  riastrad 				replace = list->shared[i];
    651   1.7  riastrad 				atomic_store_relaxed(&list->shared[i], fence);
    652   1.1  riastrad 				break;
    653   1.1  riastrad 			}
    654   1.1  riastrad 		}
    655   1.1  riastrad 
    656   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    657   1.7  riastrad 		if (i == list->shared_count) {
    658   1.7  riastrad 			atomic_store_relaxed(&list->shared[list->shared_count],
    659   1.7  riastrad 			    fence);
    660   1.7  riastrad 			atomic_store_relaxed(&list->shared_count,
    661   1.7  riastrad 			    list->shared_count + 1);
    662   1.7  riastrad 		}
    663   1.1  riastrad 
    664   1.1  riastrad 		/* Commit the update.  */
    665   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    666   1.1  riastrad 	} else {
    667   1.1  riastrad 		/*
    668   1.1  riastrad 		 * There is a preallocated replacement list.  There may
    669   1.1  riastrad 		 * not be a current list.  If not, treat it as a zero-
    670   1.1  riastrad 		 * length list.
    671   1.1  riastrad 		 */
    672   1.1  riastrad 		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
    673   1.1  riastrad 
    674   1.1  riastrad 		/* There had better be room in the preallocated list.  */
    675   1.1  riastrad 		KASSERT(shared_count < prealloc->shared_max);
    676   1.1  riastrad 
    677   1.1  riastrad 		/*
    678   1.1  riastrad 		 * Copy the fences over, but replace if we find one
    679   1.1  riastrad 		 * with the same context number.
    680   1.1  riastrad 		 */
    681   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    682   1.1  riastrad 			if (replace == NULL &&
    683   1.1  riastrad 			    list->shared[i]->context == fence->context) {
    684   1.1  riastrad 				replace = list->shared[i];
    685   1.1  riastrad 				prealloc->shared[i] = fence;
    686   1.1  riastrad 			} else {
    687   1.1  riastrad 				prealloc->shared[i] = list->shared[i];
    688   1.1  riastrad 			}
    689   1.1  riastrad 		}
    690   1.1  riastrad 		prealloc->shared_count = shared_count;
    691   1.1  riastrad 
    692   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    693   1.1  riastrad 		if (replace == NULL)
    694   1.1  riastrad 			prealloc->shared[prealloc->shared_count++] = fence;
    695   1.1  riastrad 
    696   1.1  riastrad 		/*
    697   1.1  riastrad 		 * Now ready to replace the list.  Begin an update.
    698   1.1  riastrad 		 * Implies membar_producer for fence and prealloc.
    699   1.1  riastrad 		 */
    700   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    701   1.1  riastrad 
    702   1.1  riastrad 		/* Replace the list.  */
    703   1.7  riastrad 		atomic_store_relaxed(&robj->fence, prealloc);
    704   1.1  riastrad 		robj->robj_prealloc = NULL;
    705   1.1  riastrad 
    706   1.1  riastrad 		/* Commit the update.  */
    707   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    708   1.1  riastrad 
    709   1.1  riastrad 		/*
    710   1.1  riastrad 		 * If there is an old list, free it when convenient.
    711   1.1  riastrad 		 * (We are not in a position at this point to sleep
    712   1.1  riastrad 		 * waiting for activity on all CPUs.)
    713   1.1  riastrad 		 */
    714   1.1  riastrad 		if (list)
    715   1.1  riastrad 			objlist_defer_free(list);
    716   1.1  riastrad 	}
    717   1.1  riastrad 
    718   1.1  riastrad 	/* Release a fence if we replaced it.  */
    719  1.10  riastrad 	if (replace) {
    720   1.1  riastrad 		dma_fence_put(replace);
    721  1.10  riastrad 		replace = NULL;	/* paranoia */
    722  1.10  riastrad 	}
    723   1.1  riastrad }
    724   1.1  riastrad 
    725   1.1  riastrad /*
    726   1.1  riastrad  * dma_resv_get_excl_rcu(robj)
    727   1.1  riastrad  *
    728   1.1  riastrad  *	Note: Caller need not call this from an RCU read section.
    729   1.1  riastrad  */
    730   1.1  riastrad struct dma_fence *
    731   1.1  riastrad dma_resv_get_excl_rcu(const struct dma_resv *robj)
    732   1.1  riastrad {
    733   1.1  riastrad 	struct dma_fence *fence;
    734   1.1  riastrad 
    735   1.1  riastrad 	rcu_read_lock();
    736   1.1  riastrad 	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
    737   1.1  riastrad 	rcu_read_unlock();
    738   1.1  riastrad 
    739   1.1  riastrad 	return fence;
    740   1.1  riastrad }
    741   1.1  riastrad 
    742   1.1  riastrad /*
    743   1.1  riastrad  * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
    744  1.13  riastrad  *
    745  1.13  riastrad  *	Get a snapshot of the exclusive and shared fences of robj.  The
    746  1.13  riastrad  *	shared fences are returned as a pointer *sharedp to an array,
    747  1.13  riastrad  *	to be freed by the caller with kfree, of *nsharedp elements.
    748  1.13  riastrad  *
    749  1.13  riastrad  *	Returns zero on success, negative (Linux-style) error code on
    750  1.13  riastrad  *	failure.  On failure, *fencep, *nsharedp, and *sharedp are
    751  1.13  riastrad  *	untouched.
    752   1.1  riastrad  */
    753   1.1  riastrad int
    754   1.1  riastrad dma_resv_get_fences_rcu(const struct dma_resv *robj,
    755   1.1  riastrad     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
    756   1.1  riastrad {
    757  1.10  riastrad 	const struct dma_resv_list *list = NULL;
    758  1.10  riastrad 	struct dma_fence *fence = NULL;
    759   1.1  riastrad 	struct dma_fence **shared = NULL;
    760   1.1  riastrad 	unsigned shared_alloc, shared_count, i;
    761   1.1  riastrad 	struct dma_resv_read_ticket ticket;
    762   1.1  riastrad 
    763  1.10  riastrad top:	KASSERT(fence == NULL);
    764  1.10  riastrad 
    765   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    766   1.1  riastrad 	rcu_read_lock();
    767   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    768   1.1  riastrad 
    769  1.12  riastrad 	/* If there is a shared list, grab it.  */
    770  1.12  riastrad 	if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket))
    771  1.12  riastrad 		goto restart;
    772  1.12  riastrad 	if (list != NULL) {
    773   1.1  riastrad 
    774   1.1  riastrad 		/* Check whether we have a buffer.  */
    775   1.1  riastrad 		if (shared == NULL) {
    776   1.1  riastrad 			/*
    777   1.1  riastrad 			 * We don't have a buffer yet.  Try to allocate
    778   1.1  riastrad 			 * one without waiting.
    779   1.1  riastrad 			 */
    780   1.1  riastrad 			shared_alloc = list->shared_max;
    781   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    782   1.1  riastrad 			    GFP_NOWAIT);
    783   1.1  riastrad 			if (shared == NULL) {
    784   1.1  riastrad 				/*
    785   1.1  riastrad 				 * Couldn't do it immediately.  Back
    786   1.1  riastrad 				 * out of RCU and allocate one with
    787   1.1  riastrad 				 * waiting.
    788   1.1  riastrad 				 */
    789   1.1  riastrad 				rcu_read_unlock();
    790   1.1  riastrad 				shared = kcalloc(shared_alloc,
    791   1.1  riastrad 				    sizeof(shared[0]), GFP_KERNEL);
    792   1.1  riastrad 				if (shared == NULL)
    793   1.1  riastrad 					return -ENOMEM;
    794   1.1  riastrad 				goto top;
    795   1.1  riastrad 			}
    796   1.1  riastrad 		} else if (shared_alloc < list->shared_max) {
    797   1.1  riastrad 			/*
    798   1.1  riastrad 			 * We have a buffer but it's too small.  We're
    799   1.1  riastrad 			 * already racing in this case, so just back
    800   1.1  riastrad 			 * out and wait to allocate a bigger one.
    801   1.1  riastrad 			 */
    802   1.1  riastrad 			shared_alloc = list->shared_max;
    803   1.1  riastrad 			rcu_read_unlock();
    804   1.1  riastrad 			kfree(shared);
    805   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    806   1.1  riastrad 			    GFP_KERNEL);
    807   1.1  riastrad 			if (shared == NULL)
    808   1.1  riastrad 				return -ENOMEM;
    809   1.1  riastrad 		}
    810   1.1  riastrad 
    811   1.1  riastrad 		/*
    812   1.1  riastrad 		 * We got a buffer large enough.  Copy into the buffer
    813   1.7  riastrad 		 * and record the number of elements.  Could safely use
    814   1.7  riastrad 		 * memcpy here, because even if we race with a writer
    815   1.7  riastrad 		 * it'll invalidate the read ticket and we'll start
    816   1.7  riastrad 		 * ove, but atomic_load in a loop will pacify kcsan.
    817   1.7  riastrad 		 */
    818   1.7  riastrad 		for (i = 0; i < shared_count; i++)
    819   1.7  riastrad 			shared[i] = atomic_load_relaxed(&list->shared[i]);
    820   1.1  riastrad 	}
    821   1.1  riastrad 
    822   1.1  riastrad 	/* If there is an exclusive fence, grab it.  */
    823  1.10  riastrad 	KASSERT(fence == NULL);
    824  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
    825   1.1  riastrad 		goto restart;
    826   1.1  riastrad 
    827   1.1  riastrad 	/*
    828   1.1  riastrad 	 * Try to get a reference to all of the shared fences.
    829   1.1  riastrad 	 */
    830   1.1  riastrad 	for (i = 0; i < shared_count; i++) {
    831   1.7  riastrad 		if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL)
    832   1.1  riastrad 			goto put_restart;
    833   1.1  riastrad 	}
    834   1.1  riastrad 
    835   1.1  riastrad 	/* Success!  */
    836   1.1  riastrad 	rcu_read_unlock();
    837   1.1  riastrad 	*fencep = fence;
    838   1.1  riastrad 	*nsharedp = shared_count;
    839   1.1  riastrad 	*sharedp = shared;
    840   1.1  riastrad 	return 0;
    841   1.1  riastrad 
    842   1.1  riastrad put_restart:
    843   1.1  riastrad 	/* Back out.  */
    844   1.1  riastrad 	while (i --> 0) {
    845   1.1  riastrad 		dma_fence_put(shared[i]);
    846   1.1  riastrad 		shared[i] = NULL; /* paranoia */
    847   1.1  riastrad 	}
    848   1.1  riastrad 	if (fence) {
    849   1.1  riastrad 		dma_fence_put(fence);
    850  1.10  riastrad 		fence = NULL;
    851   1.1  riastrad 	}
    852   1.1  riastrad 
    853   1.1  riastrad restart:
    854  1.10  riastrad 	KASSERT(fence == NULL);
    855   1.1  riastrad 	rcu_read_unlock();
    856   1.1  riastrad 	goto top;
    857   1.1  riastrad }
    858   1.1  riastrad 
    859   1.1  riastrad /*
    860   1.1  riastrad  * dma_resv_copy_fences(dst, src)
    861   1.1  riastrad  *
    862   1.1  riastrad  *	Copy the exclusive fence and all the shared fences from src to
    863   1.1  riastrad  *	dst.
    864   1.1  riastrad  *
    865   1.1  riastrad  *	Caller must have dst locked.
    866   1.1  riastrad  */
    867   1.1  riastrad int
    868   1.1  riastrad dma_resv_copy_fences(struct dma_resv *dst_robj,
    869   1.1  riastrad     const struct dma_resv *src_robj)
    870   1.1  riastrad {
    871   1.1  riastrad 	const struct dma_resv_list *src_list;
    872   1.1  riastrad 	struct dma_resv_list *dst_list = NULL;
    873   1.1  riastrad 	struct dma_resv_list *old_list;
    874   1.1  riastrad 	struct dma_fence *fence = NULL;
    875   1.1  riastrad 	struct dma_fence *old_fence;
    876   1.1  riastrad 	uint32_t shared_count, i;
    877   1.1  riastrad 	struct dma_resv_read_ticket read_ticket;
    878   1.1  riastrad 	struct dma_resv_write_ticket write_ticket;
    879   1.1  riastrad 
    880   1.1  riastrad 	KASSERT(dma_resv_held(dst_robj));
    881   1.1  riastrad 
    882  1.10  riastrad top:	KASSERT(fence == NULL);
    883  1.10  riastrad 
    884   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    885   1.1  riastrad 	rcu_read_lock();
    886   1.1  riastrad 	dma_resv_read_begin(src_robj, &read_ticket);
    887   1.1  riastrad 
    888   1.1  riastrad 	/* Get the shared list.  */
    889  1.12  riastrad 	if (!dma_resv_get_shared_reader(src_robj, &src_list, &shared_count,
    890  1.12  riastrad 		&read_ticket))
    891  1.12  riastrad 		goto restart;
    892  1.12  riastrad 	if (src_list != NULL) {
    893   1.1  riastrad 		/* Allocate a new list.  */
    894   1.1  riastrad 		dst_list = objlist_tryalloc(shared_count);
    895   1.1  riastrad 		if (dst_list == NULL)
    896   1.1  riastrad 			return -ENOMEM;
    897   1.1  riastrad 
    898   1.1  riastrad 		/* Copy over all fences that are not yet signalled.  */
    899   1.1  riastrad 		dst_list->shared_count = 0;
    900   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    901  1.10  riastrad 			KASSERT(fence == NULL);
    902   1.7  riastrad 			fence = atomic_load_relaxed(&src_list->shared[i]);
    903   1.9  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
    904   1.1  riastrad 				goto restart;
    905   1.1  riastrad 			if (dma_fence_is_signaled(fence)) {
    906   1.1  riastrad 				dma_fence_put(fence);
    907   1.1  riastrad 				fence = NULL;
    908   1.1  riastrad 				continue;
    909   1.1  riastrad 			}
    910   1.1  riastrad 			dst_list->shared[dst_list->shared_count++] = fence;
    911   1.1  riastrad 			fence = NULL;
    912   1.1  riastrad 		}
    913   1.1  riastrad 	}
    914   1.1  riastrad 
    915   1.1  riastrad 	/* Get the exclusive fence.  */
    916  1.10  riastrad 	KASSERT(fence == NULL);
    917  1.12  riastrad 	if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket))
    918  1.12  riastrad 		goto restart;
    919   1.1  riastrad 
    920   1.1  riastrad 	/* All done with src; exit the RCU read section.  */
    921   1.1  riastrad 	rcu_read_unlock();
    922   1.1  riastrad 
    923   1.1  riastrad 	/*
    924   1.1  riastrad 	 * We now have a snapshot of the shared and exclusive fences of
    925   1.1  riastrad 	 * src_robj and we have acquired references to them so they
    926   1.1  riastrad 	 * won't go away.  Transfer them over to dst_robj, releasing
    927   1.1  riastrad 	 * references to any that were there.
    928   1.1  riastrad 	 */
    929   1.1  riastrad 
    930   1.1  riastrad 	/* Get the old shared and exclusive fences, if any.  */
    931   1.1  riastrad 	old_list = dst_robj->fence;
    932   1.1  riastrad 	old_fence = dst_robj->fence_excl;
    933   1.1  riastrad 
    934   1.7  riastrad 	/*
    935   1.7  riastrad 	 * Begin an update.  Implies membar_producer for dst_list and
    936   1.7  riastrad 	 * fence.
    937   1.7  riastrad 	 */
    938   1.1  riastrad 	dma_resv_write_begin(dst_robj, &write_ticket);
    939   1.1  riastrad 
    940   1.1  riastrad 	/* Replace the fences.  */
    941   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence, dst_list);
    942   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence_excl, fence);
    943   1.1  riastrad 
    944   1.1  riastrad 	/* Commit the update.  */
    945   1.1  riastrad 	dma_resv_write_commit(dst_robj, &write_ticket);
    946   1.1  riastrad 
    947   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    948  1.10  riastrad 	if (old_fence) {
    949   1.1  riastrad 		dma_fence_put(old_fence);
    950  1.10  riastrad 		old_fence = NULL; /* paranoia */
    951  1.10  riastrad 	}
    952   1.1  riastrad 
    953   1.1  riastrad 	/* Release any old shared fences.  */
    954   1.1  riastrad 	if (old_list) {
    955  1.10  riastrad 		for (i = old_list->shared_count; i --> 0;) {
    956   1.1  riastrad 			dma_fence_put(old_list->shared[i]);
    957  1.10  riastrad 			old_list->shared[i] = NULL; /* paranoia */
    958  1.10  riastrad 		}
    959  1.10  riastrad 		objlist_free(old_list);
    960  1.10  riastrad 		old_list = NULL; /* paranoia */
    961   1.1  riastrad 	}
    962   1.1  riastrad 
    963   1.1  riastrad 	/* Success!  */
    964   1.1  riastrad 	return 0;
    965   1.1  riastrad 
    966   1.1  riastrad restart:
    967  1.10  riastrad 	KASSERT(fence == NULL);
    968   1.1  riastrad 	rcu_read_unlock();
    969   1.1  riastrad 	if (dst_list) {
    970   1.1  riastrad 		for (i = dst_list->shared_count; i --> 0;) {
    971   1.1  riastrad 			dma_fence_put(dst_list->shared[i]);
    972  1.10  riastrad 			dst_list->shared[i] = NULL; /* paranoia */
    973   1.1  riastrad 		}
    974   1.1  riastrad 		objlist_free(dst_list);
    975   1.1  riastrad 		dst_list = NULL;
    976   1.1  riastrad 	}
    977   1.1  riastrad 	goto top;
    978   1.1  riastrad }
    979   1.1  riastrad 
    980   1.1  riastrad /*
    981   1.1  riastrad  * dma_resv_test_signaled_rcu(robj, shared)
    982   1.1  riastrad  *
    983   1.1  riastrad  *	If shared is true, test whether all of the shared fences are
    984   1.1  riastrad  *	signalled, or if there are none, test whether the exclusive
    985   1.1  riastrad  *	fence is signalled.  If shared is false, test only whether the
    986   1.1  riastrad  *	exclusive fence is signalled.
    987   1.1  riastrad  *
    988   1.1  riastrad  *	XXX Why does this _not_ test the exclusive fence if shared is
    989   1.1  riastrad  *	true only if there are no shared fences?  This makes no sense.
    990   1.1  riastrad  */
    991   1.1  riastrad bool
    992   1.1  riastrad dma_resv_test_signaled_rcu(const struct dma_resv *robj,
    993   1.1  riastrad     bool shared)
    994   1.1  riastrad {
    995   1.1  riastrad 	struct dma_resv_read_ticket ticket;
    996  1.12  riastrad 	const struct dma_resv_list *list;
    997  1.10  riastrad 	struct dma_fence *fence = NULL;
    998   1.1  riastrad 	uint32_t i, shared_count;
    999   1.1  riastrad 	bool signaled = true;
   1000   1.1  riastrad 
   1001  1.10  riastrad top:	KASSERT(fence == NULL);
   1002  1.10  riastrad 
   1003   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1004   1.1  riastrad 	rcu_read_lock();
   1005   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1006   1.1  riastrad 
   1007   1.1  riastrad 	/* If shared is requested and there is a shared list, test it.  */
   1008  1.12  riastrad 	if (shared) {
   1009  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1010  1.12  riastrad 			&ticket))
   1011   1.1  riastrad 			goto restart;
   1012  1.12  riastrad 	} else {
   1013  1.12  riastrad 		list = NULL;
   1014  1.12  riastrad 		shared_count = 0;
   1015  1.12  riastrad 	}
   1016  1.12  riastrad 	if (list != NULL) {
   1017   1.1  riastrad 		/*
   1018   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1019   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1020   1.1  riastrad 		 * it is signalled.  Stop if we find any that is not
   1021   1.1  riastrad 		 * signalled.
   1022   1.1  riastrad 		 */
   1023   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1024  1.10  riastrad 			KASSERT(fence == NULL);
   1025   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1026  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1027   1.1  riastrad 				goto restart;
   1028   1.1  riastrad 			signaled &= dma_fence_is_signaled(fence);
   1029   1.1  riastrad 			dma_fence_put(fence);
   1030  1.10  riastrad 			fence = NULL;
   1031   1.1  riastrad 			if (!signaled)
   1032   1.1  riastrad 				goto out;
   1033   1.1  riastrad 		}
   1034   1.1  riastrad 	}
   1035   1.1  riastrad 
   1036   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1037  1.10  riastrad 	KASSERT(fence == NULL);
   1038  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1039  1.12  riastrad 		goto restart;
   1040  1.12  riastrad 	if (fence != NULL) {
   1041  1.12  riastrad 		/* Test whether it is signalled.  If no, stop.  */
   1042   1.1  riastrad 		signaled &= dma_fence_is_signaled(fence);
   1043   1.1  riastrad 		dma_fence_put(fence);
   1044  1.10  riastrad 		fence = NULL;
   1045   1.1  riastrad 		if (!signaled)
   1046   1.1  riastrad 			goto out;
   1047   1.1  riastrad 	}
   1048   1.1  riastrad 
   1049  1.10  riastrad out:	KASSERT(fence == NULL);
   1050  1.10  riastrad 	rcu_read_unlock();
   1051   1.1  riastrad 	return signaled;
   1052   1.1  riastrad 
   1053   1.1  riastrad restart:
   1054  1.10  riastrad 	KASSERT(fence == NULL);
   1055   1.1  riastrad 	rcu_read_unlock();
   1056   1.1  riastrad 	goto top;
   1057   1.1  riastrad }
   1058   1.1  riastrad 
   1059   1.1  riastrad /*
   1060   1.1  riastrad  * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
   1061   1.1  riastrad  *
   1062   1.1  riastrad  *	If shared is true, wait for all of the shared fences to be
   1063   1.1  riastrad  *	signalled, or if there are none, wait for the exclusive fence
   1064   1.1  riastrad  *	to be signalled.  If shared is false, wait only for the
   1065   1.1  riastrad  *	exclusive fence to be signalled.  If timeout is zero, don't
   1066   1.1  riastrad  *	wait, only test.
   1067   1.1  riastrad  *
   1068   1.1  riastrad  *	XXX Why does this _not_ wait for the exclusive fence if shared
   1069   1.1  riastrad  *	is true only if there are no shared fences?  This makes no
   1070   1.1  riastrad  *	sense.
   1071   1.1  riastrad  */
   1072   1.1  riastrad long
   1073   1.1  riastrad dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
   1074   1.1  riastrad     bool shared, bool intr, unsigned long timeout)
   1075   1.1  riastrad {
   1076   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1077  1.12  riastrad 	const struct dma_resv_list *list;
   1078  1.10  riastrad 	struct dma_fence *fence = NULL;
   1079   1.1  riastrad 	uint32_t i, shared_count;
   1080   1.1  riastrad 	long ret;
   1081   1.1  riastrad 
   1082   1.1  riastrad 	if (timeout == 0)
   1083   1.1  riastrad 		return dma_resv_test_signaled_rcu(robj, shared);
   1084   1.1  riastrad 
   1085  1.10  riastrad top:	KASSERT(fence == NULL);
   1086  1.10  riastrad 
   1087   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1088   1.1  riastrad 	rcu_read_lock();
   1089   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1090   1.1  riastrad 
   1091   1.1  riastrad 	/* If shared is requested and there is a shared list, wait on it.  */
   1092  1.12  riastrad 	if (shared) {
   1093  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1094  1.12  riastrad 			&ticket))
   1095   1.1  riastrad 			goto restart;
   1096  1.12  riastrad 	} else {
   1097  1.12  riastrad 		list = NULL;
   1098  1.12  riastrad 		shared_count = 0;
   1099  1.12  riastrad 	}
   1100  1.12  riastrad 	if (list != NULL) {
   1101   1.1  riastrad 		/*
   1102   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1103   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1104   1.1  riastrad 		 * it is signalled.  Stop and wait if we find any that
   1105   1.1  riastrad 		 * is not signalled.
   1106   1.1  riastrad 		 */
   1107   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1108  1.10  riastrad 			KASSERT(fence == NULL);
   1109   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1110  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1111   1.1  riastrad 				goto restart;
   1112   1.1  riastrad 			if (!dma_fence_is_signaled(fence))
   1113   1.1  riastrad 				goto wait;
   1114   1.1  riastrad 			dma_fence_put(fence);
   1115  1.10  riastrad 			fence = NULL;
   1116   1.1  riastrad 		}
   1117   1.1  riastrad 	}
   1118   1.1  riastrad 
   1119   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1120  1.10  riastrad 	KASSERT(fence == NULL);
   1121  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1122  1.12  riastrad 		goto restart;
   1123  1.12  riastrad 	if (fence != NULL) {
   1124  1.12  riastrad 		/* Test whether it is signalled.  If no, wait.  */
   1125   1.1  riastrad 		if (!dma_fence_is_signaled(fence))
   1126   1.1  riastrad 			goto wait;
   1127   1.1  riastrad 		dma_fence_put(fence);
   1128  1.10  riastrad 		fence = NULL;
   1129   1.1  riastrad 	}
   1130   1.1  riastrad 
   1131   1.1  riastrad 	/* Success!  Return the number of ticks left.  */
   1132   1.1  riastrad 	rcu_read_unlock();
   1133  1.10  riastrad 	KASSERT(fence == NULL);
   1134   1.1  riastrad 	return timeout;
   1135   1.1  riastrad 
   1136   1.1  riastrad restart:
   1137  1.10  riastrad 	KASSERT(fence == NULL);
   1138   1.1  riastrad 	rcu_read_unlock();
   1139   1.1  riastrad 	goto top;
   1140   1.1  riastrad 
   1141   1.1  riastrad wait:
   1142   1.1  riastrad 	/*
   1143   1.5  riastrad 	 * Exit the RCU read section, wait for it, and release the
   1144   1.5  riastrad 	 * fence when we're done.  If we time out or fail, bail.
   1145   1.5  riastrad 	 * Otherwise, go back to the top.
   1146   1.1  riastrad 	 */
   1147   1.1  riastrad 	KASSERT(fence != NULL);
   1148   1.1  riastrad 	rcu_read_unlock();
   1149   1.1  riastrad 	ret = dma_fence_wait_timeout(fence, intr, timeout);
   1150   1.1  riastrad 	dma_fence_put(fence);
   1151  1.10  riastrad 	fence = NULL;
   1152   1.1  riastrad 	if (ret <= 0)
   1153   1.1  riastrad 		return ret;
   1154   1.1  riastrad 	KASSERT(ret <= timeout);
   1155   1.1  riastrad 	timeout = ret;
   1156   1.1  riastrad 	goto top;
   1157   1.1  riastrad }
   1158   1.1  riastrad 
   1159   1.1  riastrad /*
   1160   1.1  riastrad  * dma_resv_poll_init(rpoll, lock)
   1161   1.1  riastrad  *
   1162   1.1  riastrad  *	Initialize reservation poll state.
   1163   1.1  riastrad  */
   1164   1.1  riastrad void
   1165   1.1  riastrad dma_resv_poll_init(struct dma_resv_poll *rpoll)
   1166   1.1  riastrad {
   1167   1.1  riastrad 
   1168   1.1  riastrad 	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
   1169   1.1  riastrad 	selinit(&rpoll->rp_selq);
   1170   1.1  riastrad 	rpoll->rp_claimed = 0;
   1171   1.1  riastrad }
   1172   1.1  riastrad 
   1173   1.1  riastrad /*
   1174   1.1  riastrad  * dma_resv_poll_fini(rpoll)
   1175   1.1  riastrad  *
   1176   1.1  riastrad  *	Release any resource associated with reservation poll state.
   1177   1.1  riastrad  */
   1178   1.1  riastrad void
   1179   1.1  riastrad dma_resv_poll_fini(struct dma_resv_poll *rpoll)
   1180   1.1  riastrad {
   1181   1.1  riastrad 
   1182   1.1  riastrad 	KASSERT(rpoll->rp_claimed == 0);
   1183   1.1  riastrad 	seldestroy(&rpoll->rp_selq);
   1184   1.1  riastrad 	mutex_destroy(&rpoll->rp_lock);
   1185   1.1  riastrad }
   1186   1.1  riastrad 
   1187   1.1  riastrad /*
   1188   1.1  riastrad  * dma_resv_poll_cb(fence, fcb)
   1189   1.1  riastrad  *
   1190   1.1  riastrad  *	Callback to notify a reservation poll that a fence has
   1191   1.1  riastrad  *	completed.  Notify any waiters and allow the next poller to
   1192   1.1  riastrad  *	claim the callback.
   1193   1.1  riastrad  *
   1194   1.1  riastrad  *	If one thread is waiting for the exclusive fence only, and we
   1195   1.1  riastrad  *	spuriously notify them about a shared fence, tough.
   1196   1.1  riastrad  */
   1197   1.1  riastrad static void
   1198   1.1  riastrad dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
   1199   1.1  riastrad {
   1200   1.1  riastrad 	struct dma_resv_poll *rpoll = container_of(fcb,
   1201   1.1  riastrad 	    struct dma_resv_poll, rp_fcb);
   1202   1.1  riastrad 
   1203   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1204   1.1  riastrad 	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
   1205   1.1  riastrad 	rpoll->rp_claimed = 0;
   1206   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1207   1.1  riastrad }
   1208   1.1  riastrad 
   1209   1.1  riastrad /*
   1210   1.1  riastrad  * dma_resv_do_poll(robj, events, rpoll)
   1211   1.1  riastrad  *
   1212   1.1  riastrad  *	Poll for reservation object events using the reservation poll
   1213   1.1  riastrad  *	state in rpoll:
   1214   1.1  riastrad  *
   1215   1.1  riastrad  *	- POLLOUT	wait for all fences shared and exclusive
   1216   1.1  riastrad  *	- POLLIN	wait for the exclusive fence
   1217   1.1  riastrad  *
   1218   1.1  riastrad  *	Return the subset of events in events that are ready.  If any
   1219   1.1  riastrad  *	are requested but not ready, arrange to be notified with
   1220   1.1  riastrad  *	selnotify when they are.
   1221   1.1  riastrad  */
   1222   1.1  riastrad int
   1223   1.1  riastrad dma_resv_do_poll(const struct dma_resv *robj, int events,
   1224   1.1  riastrad     struct dma_resv_poll *rpoll)
   1225   1.1  riastrad {
   1226   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1227  1.12  riastrad 	const struct dma_resv_list *list;
   1228  1.10  riastrad 	struct dma_fence *fence = NULL;
   1229   1.1  riastrad 	uint32_t i, shared_count;
   1230   1.1  riastrad 	int revents;
   1231   1.1  riastrad 	bool recorded = false;	/* curlwp is on the selq */
   1232   1.1  riastrad 	bool claimed = false;	/* we claimed the callback */
   1233   1.1  riastrad 	bool callback = false;	/* we requested a callback */
   1234   1.1  riastrad 
   1235   1.1  riastrad 	/*
   1236   1.1  riastrad 	 * Start with the maximal set of events that could be ready.
   1237   1.1  riastrad 	 * We will eliminate the events that are definitely not ready
   1238   1.1  riastrad 	 * as we go at the same time as we add callbacks to notify us
   1239   1.1  riastrad 	 * that they may be ready.
   1240   1.1  riastrad 	 */
   1241   1.1  riastrad 	revents = events & (POLLIN|POLLOUT);
   1242   1.1  riastrad 	if (revents == 0)
   1243   1.1  riastrad 		return 0;
   1244   1.1  riastrad 
   1245  1.10  riastrad top:	KASSERT(fence == NULL);
   1246  1.10  riastrad 
   1247   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1248   1.1  riastrad 	rcu_read_lock();
   1249   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1250   1.1  riastrad 
   1251   1.1  riastrad 	/* If we want to wait for all fences, get the shared list.  */
   1252  1.12  riastrad 	if (events & POLLOUT) {
   1253  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1254  1.12  riastrad 			&ticket))
   1255   1.1  riastrad 			goto restart;
   1256  1.12  riastrad 	} else {
   1257  1.12  riastrad 		list = NULL;
   1258  1.12  riastrad 		shared_count = 0;
   1259  1.12  riastrad 	}
   1260  1.12  riastrad 	if (list != NULL) do {
   1261   1.1  riastrad 		/*
   1262   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1263   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1264   1.1  riastrad 		 * it is signalled.  Stop and request a callback if we
   1265   1.1  riastrad 		 * find any that is not signalled.
   1266   1.1  riastrad 		 */
   1267   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1268  1.10  riastrad 			KASSERT(fence == NULL);
   1269   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1270  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1271   1.1  riastrad 				goto restart;
   1272   1.1  riastrad 			if (!dma_fence_is_signaled(fence)) {
   1273   1.1  riastrad 				dma_fence_put(fence);
   1274  1.10  riastrad 				fence = NULL;
   1275   1.1  riastrad 				break;
   1276   1.1  riastrad 			}
   1277   1.1  riastrad 			dma_fence_put(fence);
   1278  1.10  riastrad 			fence = NULL;
   1279   1.1  riastrad 		}
   1280   1.1  riastrad 
   1281   1.1  riastrad 		/* If all shared fences have been signalled, move on.  */
   1282   1.1  riastrad 		if (i == shared_count)
   1283   1.1  riastrad 			break;
   1284   1.1  riastrad 
   1285   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1286   1.1  riastrad 		if (!recorded)
   1287   1.1  riastrad 			goto record;
   1288   1.1  riastrad 
   1289   1.1  riastrad 		/*
   1290   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1291   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1292   1.1  riastrad 		 * assume the event is not ready.
   1293   1.1  riastrad 		 */
   1294   1.1  riastrad 		if (!claimed || callback) {
   1295   1.1  riastrad 			revents &= ~POLLOUT;
   1296   1.1  riastrad 			break;
   1297   1.1  riastrad 		}
   1298   1.1  riastrad 
   1299   1.1  riastrad 		/*
   1300   1.1  riastrad 		 * Otherwise, find the first fence that is not
   1301   1.1  riastrad 		 * signalled, request the callback, and clear POLLOUT
   1302   1.1  riastrad 		 * from the possible ready events.  If they are all
   1303   1.1  riastrad 		 * signalled, leave POLLOUT set; we will simulate the
   1304   1.1  riastrad 		 * callback later.
   1305   1.1  riastrad 		 */
   1306   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1307  1.10  riastrad 			KASSERT(fence == NULL);
   1308   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1309  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1310   1.1  riastrad 				goto restart;
   1311   1.1  riastrad 			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1312   1.1  riastrad 				dma_resv_poll_cb)) {
   1313   1.1  riastrad 				dma_fence_put(fence);
   1314  1.10  riastrad 				fence = NULL;
   1315   1.1  riastrad 				revents &= ~POLLOUT;
   1316   1.1  riastrad 				callback = true;
   1317   1.1  riastrad 				break;
   1318   1.1  riastrad 			}
   1319   1.1  riastrad 			dma_fence_put(fence);
   1320  1.10  riastrad 			fence = NULL;
   1321   1.1  riastrad 		}
   1322   1.1  riastrad 	} while (0);
   1323   1.1  riastrad 
   1324   1.1  riastrad 	/* We always wait for at least the exclusive fence, so get it.  */
   1325  1.10  riastrad 	KASSERT(fence == NULL);
   1326  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1327  1.12  riastrad 		goto restart;
   1328  1.12  riastrad 	if (fence != NULL) do {
   1329   1.1  riastrad 		/*
   1330  1.12  riastrad 		 * Test whether it is signalled.  If not, stop and
   1331  1.12  riastrad 		 * request a callback.
   1332   1.1  riastrad 		 */
   1333   1.1  riastrad 		if (dma_fence_is_signaled(fence)) {
   1334   1.1  riastrad 			dma_fence_put(fence);
   1335  1.10  riastrad 			fence = NULL;
   1336   1.1  riastrad 			break;
   1337   1.1  riastrad 		}
   1338   1.1  riastrad 
   1339   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1340   1.1  riastrad 		if (!recorded) {
   1341   1.1  riastrad 			dma_fence_put(fence);
   1342  1.10  riastrad 			fence = NULL;
   1343   1.1  riastrad 			goto record;
   1344   1.1  riastrad 		}
   1345   1.1  riastrad 
   1346   1.1  riastrad 		/*
   1347   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1348   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1349   1.1  riastrad 		 * assume the event is not ready.
   1350   1.1  riastrad 		 */
   1351   1.1  riastrad 		if (!claimed || callback) {
   1352   1.1  riastrad 			dma_fence_put(fence);
   1353  1.10  riastrad 			fence = NULL;
   1354   1.1  riastrad 			revents = 0;
   1355   1.1  riastrad 			break;
   1356   1.1  riastrad 		}
   1357   1.1  riastrad 
   1358   1.1  riastrad 		/*
   1359   1.1  riastrad 		 * Otherwise, try to request the callback, and clear
   1360   1.1  riastrad 		 * all possible ready events.  If the fence has been
   1361   1.1  riastrad 		 * signalled in the interim, leave the events set; we
   1362   1.1  riastrad 		 * will simulate the callback later.
   1363   1.1  riastrad 		 */
   1364   1.1  riastrad 		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1365   1.1  riastrad 			dma_resv_poll_cb)) {
   1366   1.1  riastrad 			dma_fence_put(fence);
   1367  1.10  riastrad 			fence = NULL;
   1368   1.1  riastrad 			revents = 0;
   1369   1.1  riastrad 			callback = true;
   1370   1.1  riastrad 			break;
   1371   1.1  riastrad 		}
   1372   1.1  riastrad 		dma_fence_put(fence);
   1373  1.10  riastrad 		fence = NULL;
   1374   1.1  riastrad 	} while (0);
   1375  1.10  riastrad 	KASSERT(fence == NULL);
   1376   1.1  riastrad 
   1377   1.1  riastrad 	/* All done reading the fences.  */
   1378   1.1  riastrad 	rcu_read_unlock();
   1379   1.1  riastrad 
   1380   1.1  riastrad 	if (claimed && !callback) {
   1381   1.1  riastrad 		/*
   1382   1.1  riastrad 		 * We claimed the callback but we didn't actually
   1383   1.1  riastrad 		 * request it because a fence was signalled while we
   1384   1.1  riastrad 		 * were claiming it.  Call it ourselves now.  The
   1385   1.1  riastrad 		 * callback doesn't use the fence nor rely on holding
   1386   1.1  riastrad 		 * any of the fence locks, so this is safe.
   1387   1.1  riastrad 		 */
   1388   1.1  riastrad 		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
   1389   1.1  riastrad 	}
   1390   1.1  riastrad 	return revents;
   1391   1.1  riastrad 
   1392   1.1  riastrad restart:
   1393  1.10  riastrad 	KASSERT(fence == NULL);
   1394   1.1  riastrad 	rcu_read_unlock();
   1395   1.1  riastrad 	goto top;
   1396   1.1  riastrad 
   1397   1.1  riastrad record:
   1398  1.10  riastrad 	KASSERT(fence == NULL);
   1399   1.1  riastrad 	rcu_read_unlock();
   1400   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1401   1.1  riastrad 	selrecord(curlwp, &rpoll->rp_selq);
   1402   1.1  riastrad 	if (!rpoll->rp_claimed)
   1403   1.1  riastrad 		claimed = rpoll->rp_claimed = true;
   1404   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1405   1.1  riastrad 	recorded = true;
   1406   1.1  riastrad 	goto top;
   1407   1.1  riastrad }
   1408   1.1  riastrad 
   1409   1.1  riastrad /*
   1410   1.1  riastrad  * dma_resv_kqfilter(robj, kn, rpoll)
   1411   1.1  riastrad  *
   1412   1.1  riastrad  *	Kqueue filter for reservation objects.  Currently not
   1413   1.1  riastrad  *	implemented because the logic to implement it is nontrivial,
   1414   1.1  riastrad  *	and userland will presumably never use it, so it would be
   1415   1.1  riastrad  *	dangerous to add never-tested complex code paths to the kernel.
   1416   1.1  riastrad  */
   1417   1.1  riastrad int
   1418   1.1  riastrad dma_resv_kqfilter(const struct dma_resv *robj,
   1419   1.1  riastrad     struct knote *kn, struct dma_resv_poll *rpoll)
   1420   1.1  riastrad {
   1421   1.1  riastrad 
   1422   1.1  riastrad 	return EINVAL;
   1423   1.1  riastrad }
   1424