Home | History | Annotate | Line # | Download | only in linux
linux_dma_resv.c revision 1.19
      1  1.19  riastrad /*	$NetBSD: linux_dma_resv.c,v 1.19 2021/12/19 12:33:34 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*-
      4   1.1  riastrad  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5   1.1  riastrad  * All rights reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  riastrad  * by Taylor R. Campbell.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.1  riastrad  * modification, are permitted provided that the following conditions
     12   1.1  riastrad  * are met:
     13   1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.1  riastrad  *
     19   1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.1  riastrad #include <sys/cdefs.h>
     33  1.19  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.19 2021/12/19 12:33:34 riastradh Exp $");
     34   1.1  riastrad 
     35   1.1  riastrad #include <sys/param.h>
     36   1.1  riastrad #include <sys/poll.h>
     37   1.1  riastrad #include <sys/select.h>
     38   1.1  riastrad 
     39   1.1  riastrad #include <linux/dma-fence.h>
     40   1.1  riastrad #include <linux/dma-resv.h>
     41   1.1  riastrad #include <linux/seqlock.h>
     42   1.1  riastrad #include <linux/ww_mutex.h>
     43   1.1  riastrad 
     44   1.1  riastrad DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
     45   1.1  riastrad 
     46   1.1  riastrad static struct dma_resv_list *
     47   1.1  riastrad objlist_tryalloc(uint32_t n)
     48   1.1  riastrad {
     49   1.1  riastrad 	struct dma_resv_list *list;
     50   1.1  riastrad 
     51   1.1  riastrad 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
     52   1.1  riastrad 	if (list == NULL)
     53   1.1  riastrad 		return NULL;
     54   1.1  riastrad 	list->shared_max = n;
     55   1.1  riastrad 
     56   1.1  riastrad 	return list;
     57   1.1  riastrad }
     58   1.1  riastrad 
     59   1.1  riastrad static void
     60   1.1  riastrad objlist_free(struct dma_resv_list *list)
     61   1.1  riastrad {
     62   1.1  riastrad 	uint32_t n = list->shared_max;
     63   1.1  riastrad 
     64   1.1  riastrad 	kmem_free(list, offsetof(typeof(*list), shared[n]));
     65   1.1  riastrad }
     66   1.1  riastrad 
     67   1.1  riastrad static void
     68   1.1  riastrad objlist_free_cb(struct rcu_head *rcu)
     69   1.1  riastrad {
     70   1.1  riastrad 	struct dma_resv_list *list = container_of(rcu,
     71   1.1  riastrad 	    struct dma_resv_list, rol_rcu);
     72   1.1  riastrad 
     73   1.1  riastrad 	objlist_free(list);
     74   1.1  riastrad }
     75   1.1  riastrad 
     76   1.1  riastrad static void
     77   1.1  riastrad objlist_defer_free(struct dma_resv_list *list)
     78   1.1  riastrad {
     79   1.1  riastrad 
     80   1.1  riastrad 	call_rcu(&list->rol_rcu, objlist_free_cb);
     81   1.1  riastrad }
     82   1.1  riastrad 
     83   1.1  riastrad /*
     84   1.1  riastrad  * dma_resv_init(robj)
     85   1.1  riastrad  *
     86   1.1  riastrad  *	Initialize a reservation object.  Caller must later destroy it
     87   1.1  riastrad  *	with dma_resv_fini.
     88   1.1  riastrad  */
     89   1.1  riastrad void
     90   1.1  riastrad dma_resv_init(struct dma_resv *robj)
     91   1.1  riastrad {
     92   1.1  riastrad 
     93   1.1  riastrad 	ww_mutex_init(&robj->lock, &reservation_ww_class);
     94   1.1  riastrad 	seqcount_init(&robj->seq);
     95   1.1  riastrad 	robj->fence_excl = NULL;
     96   1.1  riastrad 	robj->fence = NULL;
     97   1.1  riastrad 	robj->robj_prealloc = NULL;
     98   1.1  riastrad }
     99   1.1  riastrad 
    100   1.1  riastrad /*
    101   1.1  riastrad  * dma_resv_fini(robj)
    102   1.1  riastrad  *
    103   1.1  riastrad  *	Destroy a reservation object, freeing any memory that had been
    104   1.1  riastrad  *	allocated for it.  Caller must have exclusive access to it.
    105   1.1  riastrad  */
    106   1.1  riastrad void
    107   1.1  riastrad dma_resv_fini(struct dma_resv *robj)
    108   1.1  riastrad {
    109   1.1  riastrad 	unsigned i;
    110   1.1  riastrad 
    111  1.10  riastrad 	if (robj->robj_prealloc) {
    112   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    113  1.10  riastrad 		robj->robj_prealloc = NULL; /* paranoia */
    114  1.10  riastrad 	}
    115   1.1  riastrad 	if (robj->fence) {
    116  1.10  riastrad 		for (i = 0; i < robj->fence->shared_count; i++) {
    117   1.1  riastrad 			dma_fence_put(robj->fence->shared[i]);
    118  1.10  riastrad 			robj->fence->shared[i] = NULL; /* paranoia */
    119  1.10  riastrad 		}
    120   1.1  riastrad 		objlist_free(robj->fence);
    121  1.10  riastrad 		robj->fence = NULL; /* paranoia */
    122   1.1  riastrad 	}
    123  1.10  riastrad 	if (robj->fence_excl) {
    124   1.1  riastrad 		dma_fence_put(robj->fence_excl);
    125  1.10  riastrad 		robj->fence_excl = NULL; /* paranoia */
    126  1.10  riastrad 	}
    127   1.1  riastrad 	ww_mutex_destroy(&robj->lock);
    128   1.1  riastrad }
    129   1.1  riastrad 
    130   1.1  riastrad /*
    131   1.1  riastrad  * dma_resv_lock(robj, ctx)
    132   1.1  riastrad  *
    133   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    134   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    135   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    136   1.1  riastrad  *	retry.
    137   1.1  riastrad  */
    138   1.1  riastrad int
    139   1.1  riastrad dma_resv_lock(struct dma_resv *robj,
    140   1.1  riastrad     struct ww_acquire_ctx *ctx)
    141   1.1  riastrad {
    142   1.1  riastrad 
    143   1.1  riastrad 	return ww_mutex_lock(&robj->lock, ctx);
    144   1.1  riastrad }
    145   1.1  riastrad 
    146   1.1  riastrad /*
    147   1.2  riastrad  * dma_resv_lock_slow(robj, ctx)
    148   1.2  riastrad  *
    149   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    150   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    151   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    152   1.2  riastrad  *	and the caller has backed out all other locks.
    153   1.2  riastrad  */
    154   1.2  riastrad void
    155   1.2  riastrad dma_resv_lock_slow(struct dma_resv *robj,
    156   1.2  riastrad     struct ww_acquire_ctx *ctx)
    157   1.2  riastrad {
    158   1.2  riastrad 
    159   1.2  riastrad 	ww_mutex_lock_slow(&robj->lock, ctx);
    160   1.2  riastrad }
    161   1.2  riastrad 
    162   1.2  riastrad /*
    163   1.1  riastrad  * dma_resv_lock_interruptible(robj, ctx)
    164   1.1  riastrad  *
    165   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    166   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    167   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    168   1.1  riastrad  *	retry, -ERESTART/-EINTR if interrupted.
    169   1.1  riastrad  */
    170   1.1  riastrad int
    171   1.1  riastrad dma_resv_lock_interruptible(struct dma_resv *robj,
    172   1.1  riastrad     struct ww_acquire_ctx *ctx)
    173   1.1  riastrad {
    174   1.1  riastrad 
    175   1.1  riastrad 	return ww_mutex_lock_interruptible(&robj->lock, ctx);
    176   1.1  riastrad }
    177   1.1  riastrad 
    178   1.1  riastrad /*
    179   1.2  riastrad  * dma_resv_lock_slow_interruptible(robj, ctx)
    180   1.2  riastrad  *
    181   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    182   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    183   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    184   1.2  riastrad  *	and the caller has backed out all other locks.  Return 0 on
    185   1.2  riastrad  *	success, -ERESTART/-EINTR if interrupted.
    186   1.2  riastrad  */
    187   1.2  riastrad int
    188   1.2  riastrad dma_resv_lock_slow_interruptible(struct dma_resv *robj,
    189   1.2  riastrad     struct ww_acquire_ctx *ctx)
    190   1.2  riastrad {
    191   1.2  riastrad 
    192   1.2  riastrad 	return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
    193   1.2  riastrad }
    194   1.2  riastrad 
    195   1.2  riastrad /*
    196   1.1  riastrad  * dma_resv_trylock(robj)
    197   1.1  riastrad  *
    198   1.1  riastrad  *	Try to acquire a reservation object's lock without blocking.
    199   1.1  riastrad  *	Return true on success, false on failure.
    200   1.1  riastrad  */
    201   1.1  riastrad bool
    202   1.1  riastrad dma_resv_trylock(struct dma_resv *robj)
    203   1.1  riastrad {
    204   1.1  riastrad 
    205   1.1  riastrad 	return ww_mutex_trylock(&robj->lock);
    206   1.1  riastrad }
    207   1.1  riastrad 
    208   1.1  riastrad /*
    209   1.4  riastrad  * dma_resv_locking_ctx(robj)
    210   1.4  riastrad  *
    211   1.4  riastrad  *	Return a pointer to the ww_acquire_ctx used by the owner of
    212   1.4  riastrad  *	the reservation object's lock, or NULL if it is either not
    213   1.4  riastrad  *	owned or if it is locked without context.
    214   1.4  riastrad  */
    215   1.4  riastrad struct ww_acquire_ctx *
    216   1.4  riastrad dma_resv_locking_ctx(struct dma_resv *robj)
    217   1.4  riastrad {
    218   1.4  riastrad 
    219   1.4  riastrad 	return ww_mutex_locking_ctx(&robj->lock);
    220   1.4  riastrad }
    221   1.4  riastrad 
    222   1.4  riastrad /*
    223   1.1  riastrad  * dma_resv_unlock(robj)
    224   1.1  riastrad  *
    225   1.1  riastrad  *	Release a reservation object's lock.
    226   1.1  riastrad  */
    227   1.1  riastrad void
    228   1.1  riastrad dma_resv_unlock(struct dma_resv *robj)
    229   1.1  riastrad {
    230   1.1  riastrad 
    231   1.1  riastrad 	return ww_mutex_unlock(&robj->lock);
    232   1.1  riastrad }
    233   1.1  riastrad 
    234   1.1  riastrad /*
    235  1.11  riastrad  * dma_resv_is_locked(robj)
    236  1.11  riastrad  *
    237  1.11  riastrad  *	True if robj is locked.
    238  1.11  riastrad  */
    239  1.11  riastrad bool
    240  1.11  riastrad dma_resv_is_locked(struct dma_resv *robj)
    241  1.11  riastrad {
    242  1.11  riastrad 
    243  1.11  riastrad 	return ww_mutex_is_locked(&robj->lock);
    244  1.11  riastrad }
    245  1.11  riastrad 
    246  1.11  riastrad /*
    247   1.1  riastrad  * dma_resv_held(robj)
    248   1.1  riastrad  *
    249   1.1  riastrad  *	True if robj is locked.
    250   1.1  riastrad  */
    251   1.1  riastrad bool
    252   1.1  riastrad dma_resv_held(struct dma_resv *robj)
    253   1.1  riastrad {
    254   1.1  riastrad 
    255   1.1  riastrad 	return ww_mutex_is_locked(&robj->lock);
    256   1.1  riastrad }
    257   1.1  riastrad 
    258   1.1  riastrad /*
    259   1.1  riastrad  * dma_resv_assert_held(robj)
    260   1.1  riastrad  *
    261   1.1  riastrad  *	Panic if robj is not held, in DIAGNOSTIC builds.
    262   1.1  riastrad  */
    263   1.1  riastrad void
    264   1.1  riastrad dma_resv_assert_held(struct dma_resv *robj)
    265   1.1  riastrad {
    266   1.1  riastrad 
    267   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    268   1.1  riastrad }
    269   1.1  riastrad 
    270   1.1  riastrad /*
    271   1.1  riastrad  * dma_resv_get_excl(robj)
    272   1.1  riastrad  *
    273   1.1  riastrad  *	Return a pointer to the exclusive fence of the reservation
    274   1.1  riastrad  *	object robj.
    275   1.1  riastrad  *
    276   1.1  riastrad  *	Caller must have robj locked.
    277   1.1  riastrad  */
    278   1.1  riastrad struct dma_fence *
    279   1.1  riastrad dma_resv_get_excl(struct dma_resv *robj)
    280   1.1  riastrad {
    281   1.1  riastrad 
    282   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    283   1.1  riastrad 	return robj->fence_excl;
    284   1.1  riastrad }
    285   1.1  riastrad 
    286   1.1  riastrad /*
    287   1.1  riastrad  * dma_resv_get_list(robj)
    288   1.1  riastrad  *
    289   1.1  riastrad  *	Return a pointer to the shared fence list of the reservation
    290   1.1  riastrad  *	object robj.
    291   1.1  riastrad  *
    292   1.1  riastrad  *	Caller must have robj locked.
    293   1.1  riastrad  */
    294   1.1  riastrad struct dma_resv_list *
    295   1.1  riastrad dma_resv_get_list(struct dma_resv *robj)
    296   1.1  riastrad {
    297   1.1  riastrad 
    298   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    299   1.1  riastrad 	return robj->fence;
    300   1.1  riastrad }
    301   1.1  riastrad 
    302   1.1  riastrad /*
    303  1.18  riastrad  * dma_resv_reserve_shared(robj, num_fences)
    304   1.1  riastrad  *
    305  1.18  riastrad  *	Reserve space in robj to add num_fences shared fences.  To be
    306  1.18  riastrad  *	used only once before calling dma_resv_add_shared_fence.
    307   1.1  riastrad  *
    308   1.1  riastrad  *	Caller must have robj locked.
    309   1.1  riastrad  *
    310   1.1  riastrad  *	Internally, we start with room for four entries and double if
    311   1.1  riastrad  *	we don't have enough.  This is not guaranteed.
    312   1.1  riastrad  */
    313   1.1  riastrad int
    314   1.3  riastrad dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
    315   1.1  riastrad {
    316   1.1  riastrad 	struct dma_resv_list *list, *prealloc;
    317   1.1  riastrad 	uint32_t n, nalloc;
    318   1.1  riastrad 
    319   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    320   1.1  riastrad 
    321   1.1  riastrad 	list = robj->fence;
    322   1.1  riastrad 	prealloc = robj->robj_prealloc;
    323   1.1  riastrad 
    324   1.1  riastrad 	/* If there's an existing list, check it for space.  */
    325   1.1  riastrad 	if (list) {
    326   1.1  riastrad 		/* If there's too many already, give up.  */
    327  1.18  riastrad 		if (list->shared_count > UINT32_MAX - num_fences)
    328   1.1  riastrad 			return -ENOMEM;
    329   1.1  riastrad 
    330  1.18  riastrad 		/* Add some more. */
    331  1.18  riastrad 		n = list->shared_count + num_fences;
    332   1.1  riastrad 
    333   1.1  riastrad 		/* If there's enough for one more, we're done.  */
    334   1.1  riastrad 		if (n <= list->shared_max)
    335   1.1  riastrad 			return 0;
    336   1.1  riastrad 	} else {
    337  1.18  riastrad 		/* No list already.  We need space for num_fences.  */
    338  1.18  riastrad 		n = num_fences;
    339   1.1  riastrad 	}
    340   1.1  riastrad 
    341   1.1  riastrad 	/* If not, maybe there's a preallocated list ready.  */
    342   1.1  riastrad 	if (prealloc != NULL) {
    343   1.1  riastrad 		/* If there's enough room in it, stop here.  */
    344   1.1  riastrad 		if (n <= prealloc->shared_max)
    345   1.1  riastrad 			return 0;
    346   1.1  riastrad 
    347   1.1  riastrad 		/* Try to double its capacity.  */
    348   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
    349   1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    350   1.1  riastrad 		if (prealloc == NULL)
    351   1.1  riastrad 			return -ENOMEM;
    352   1.1  riastrad 
    353   1.1  riastrad 		/* Swap the new preallocated list and free the old one.  */
    354   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    355   1.1  riastrad 		robj->robj_prealloc = prealloc;
    356   1.1  riastrad 	} else {
    357   1.1  riastrad 		/* Start with some spare.  */
    358   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
    359   1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    360   1.1  riastrad 		if (prealloc == NULL)
    361   1.1  riastrad 			return -ENOMEM;
    362   1.1  riastrad 		/* Save the new preallocated list.  */
    363   1.1  riastrad 		robj->robj_prealloc = prealloc;
    364   1.1  riastrad 	}
    365   1.1  riastrad 
    366   1.1  riastrad 	/* Success!  */
    367   1.1  riastrad 	return 0;
    368   1.1  riastrad }
    369   1.1  riastrad 
    370   1.1  riastrad struct dma_resv_write_ticket {
    371   1.1  riastrad };
    372   1.1  riastrad 
    373   1.1  riastrad /*
    374   1.1  riastrad  * dma_resv_write_begin(robj, ticket)
    375   1.1  riastrad  *
    376   1.1  riastrad  *	Begin an atomic batch of writes to robj, and initialize opaque
    377   1.1  riastrad  *	ticket for it.  The ticket must be passed to
    378   1.1  riastrad  *	dma_resv_write_commit to commit the writes.
    379   1.1  riastrad  *
    380   1.1  riastrad  *	Caller must have robj locked.
    381   1.1  riastrad  *
    382   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    383   1.1  riastrad  *	NOT serve as an acquire operation, however.
    384   1.1  riastrad  */
    385   1.1  riastrad static void
    386   1.1  riastrad dma_resv_write_begin(struct dma_resv *robj,
    387   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    388   1.1  riastrad {
    389   1.1  riastrad 
    390   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    391   1.1  riastrad 
    392   1.1  riastrad 	write_seqcount_begin(&robj->seq);
    393   1.1  riastrad }
    394   1.1  riastrad 
    395   1.1  riastrad /*
    396   1.1  riastrad  * dma_resv_write_commit(robj, ticket)
    397   1.1  riastrad  *
    398   1.1  riastrad  *	Commit an atomic batch of writes to robj begun with the call to
    399   1.1  riastrad  *	dma_resv_write_begin that returned ticket.
    400   1.1  riastrad  *
    401   1.1  riastrad  *	Caller must have robj locked.
    402   1.1  riastrad  *
    403   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    404   1.1  riastrad  *	NOT serve as a release operation, however.
    405   1.1  riastrad  */
    406   1.1  riastrad static void
    407   1.1  riastrad dma_resv_write_commit(struct dma_resv *robj,
    408   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    409   1.1  riastrad {
    410   1.1  riastrad 
    411   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    412   1.1  riastrad 
    413   1.1  riastrad 	write_seqcount_end(&robj->seq);
    414   1.1  riastrad }
    415   1.1  riastrad 
    416   1.1  riastrad struct dma_resv_read_ticket {
    417   1.1  riastrad 	unsigned version;
    418   1.1  riastrad };
    419   1.1  riastrad 
    420   1.1  riastrad /*
    421   1.1  riastrad  * dma_resv_read_begin(robj, ticket)
    422   1.1  riastrad  *
    423   1.1  riastrad  *	Begin a read section, and initialize opaque ticket for it.  The
    424   1.1  riastrad  *	ticket must be passed to dma_resv_read_exit, and the
    425   1.1  riastrad  *	caller must be prepared to retry reading if it fails.
    426   1.1  riastrad  */
    427   1.1  riastrad static void
    428   1.1  riastrad dma_resv_read_begin(const struct dma_resv *robj,
    429   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    430   1.1  riastrad {
    431   1.1  riastrad 
    432   1.1  riastrad 	ticket->version = read_seqcount_begin(&robj->seq);
    433   1.1  riastrad }
    434   1.1  riastrad 
    435   1.1  riastrad /*
    436   1.1  riastrad  * dma_resv_read_valid(robj, ticket)
    437   1.1  riastrad  *
    438   1.1  riastrad  *	Test whether the read sections are valid.  Return true on
    439   1.1  riastrad  *	success, or false on failure if the read ticket has been
    440   1.1  riastrad  *	invalidated.
    441   1.1  riastrad  */
    442   1.1  riastrad static bool
    443   1.1  riastrad dma_resv_read_valid(const struct dma_resv *robj,
    444   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    445   1.1  riastrad {
    446   1.1  riastrad 
    447   1.1  riastrad 	return !read_seqcount_retry(&robj->seq, ticket->version);
    448   1.1  riastrad }
    449   1.1  riastrad 
    450   1.1  riastrad /*
    451  1.12  riastrad  * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket)
    452  1.12  riastrad  *
    453  1.12  riastrad  *	Set *listp and *shared_countp to a snapshot of the pointer to
    454  1.12  riastrad  *	and length of the shared fence list of robj and return true, or
    455  1.12  riastrad  *	set them to NULL/0 and return false if a writer intervened so
    456  1.12  riastrad  *	the caller must start over.
    457  1.12  riastrad  *
    458  1.12  riastrad  *	Both *listp and *shared_countp are unconditionally initialized
    459  1.12  riastrad  *	on return.  They may be NULL/0 even on success, if there is no
    460  1.12  riastrad  *	shared list at the moment.  Does not take any fence references.
    461  1.12  riastrad  */
    462  1.12  riastrad static bool
    463  1.12  riastrad dma_resv_get_shared_reader(const struct dma_resv *robj,
    464  1.12  riastrad     const struct dma_resv_list **listp, unsigned *shared_countp,
    465  1.12  riastrad     struct dma_resv_read_ticket *ticket)
    466  1.12  riastrad {
    467  1.12  riastrad 	struct dma_resv_list *list;
    468  1.12  riastrad 	unsigned shared_count = 0;
    469  1.12  riastrad 
    470  1.12  riastrad 	/*
    471  1.12  riastrad 	 * Get the list and, if it is present, its length.  If the list
    472  1.12  riastrad 	 * is present, it has a valid length.  The atomic_load_consume
    473  1.12  riastrad 	 * pairs with the membar_producer in dma_resv_write_begin.
    474  1.12  riastrad 	 */
    475  1.12  riastrad 	list = atomic_load_consume(&robj->fence);
    476  1.12  riastrad 	shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0;
    477  1.12  riastrad 
    478  1.12  riastrad 	/*
    479  1.12  riastrad 	 * We are done reading from robj and list.  Validate our
    480  1.12  riastrad 	 * parking ticket.  If it's invalid, do not pass go and do not
    481  1.12  riastrad 	 * collect $200.
    482  1.12  riastrad 	 */
    483  1.12  riastrad 	if (!dma_resv_read_valid(robj, ticket))
    484  1.12  riastrad 		goto fail;
    485  1.12  riastrad 
    486  1.12  riastrad 	/* Success!  */
    487  1.12  riastrad 	*listp = list;
    488  1.12  riastrad 	*shared_countp = shared_count;
    489  1.12  riastrad 	return true;
    490  1.12  riastrad 
    491  1.12  riastrad fail:	*listp = NULL;
    492  1.12  riastrad 	*shared_countp = 0;
    493  1.12  riastrad 	return false;
    494  1.12  riastrad }
    495  1.12  riastrad 
    496  1.12  riastrad /*
    497  1.12  riastrad  * dma_resv_get_excl_reader(robj, fencep, ticket)
    498  1.12  riastrad  *
    499  1.12  riastrad  *	Set *fencep to the exclusive fence of robj and return true, or
    500  1.12  riastrad  *	set it to NULL and return false if either
    501  1.12  riastrad  *	(a) a writer intervened, or
    502  1.12  riastrad  *	(b) the fence is scheduled to be destroyed after this RCU grace
    503  1.12  riastrad  *	    period,
    504  1.12  riastrad  *	in either case meaning the caller must restart.
    505  1.12  riastrad  *
    506  1.12  riastrad  *	The value of *fencep is unconditionally initialized on return.
    507  1.12  riastrad  *	It may be NULL, if there is no exclusive fence at the moment.
    508  1.12  riastrad  *	If nonnull, *fencep is referenced; caller must dma_fence_put.
    509  1.12  riastrad  */
    510  1.12  riastrad static bool
    511  1.12  riastrad dma_resv_get_excl_reader(const struct dma_resv *robj,
    512  1.12  riastrad     struct dma_fence **fencep,
    513  1.12  riastrad     struct dma_resv_read_ticket *ticket)
    514  1.12  riastrad {
    515  1.12  riastrad 	struct dma_fence *fence;
    516  1.12  riastrad 
    517  1.12  riastrad 	/*
    518  1.12  riastrad 	 * Get the candidate fence pointer.  The atomic_load_consume
    519  1.12  riastrad 	 * pairs with the membar_consumer in dma_resv_write_begin.
    520  1.12  riastrad 	 */
    521  1.12  riastrad 	fence = atomic_load_consume(&robj->fence_excl);
    522  1.12  riastrad 
    523  1.12  riastrad 	/*
    524  1.12  riastrad 	 * The load of robj->fence_excl is atomic, but the caller may
    525  1.12  riastrad 	 * have previously loaded the shared fence list and should
    526  1.12  riastrad 	 * restart if its view of the entire dma_resv object is not a
    527  1.12  riastrad 	 * consistent snapshot.
    528  1.12  riastrad 	 */
    529  1.12  riastrad 	if (!dma_resv_read_valid(robj, ticket))
    530  1.12  riastrad 		goto fail;
    531  1.12  riastrad 
    532  1.12  riastrad 	/*
    533  1.12  riastrad 	 * If the fence is already scheduled to away after this RCU
    534  1.12  riastrad 	 * read section, give up.  Otherwise, take a reference so it
    535  1.12  riastrad 	 * won't go away until after dma_fence_put.
    536  1.12  riastrad 	 */
    537  1.12  riastrad 	if (fence != NULL &&
    538  1.12  riastrad 	    (fence = dma_fence_get_rcu(fence)) == NULL)
    539  1.12  riastrad 		goto fail;
    540  1.12  riastrad 
    541  1.12  riastrad 	/* Success!  */
    542  1.12  riastrad 	*fencep = fence;
    543  1.12  riastrad 	return true;
    544  1.12  riastrad 
    545  1.12  riastrad fail:	*fencep = NULL;
    546  1.12  riastrad 	return false;
    547  1.12  riastrad }
    548  1.12  riastrad 
    549  1.12  riastrad /*
    550   1.1  riastrad  * dma_resv_add_excl_fence(robj, fence)
    551   1.1  riastrad  *
    552   1.1  riastrad  *	Empty and release all of robj's shared fences, and clear and
    553   1.1  riastrad  *	release its exclusive fence.  If fence is nonnull, acquire a
    554   1.1  riastrad  *	reference to it and save it as robj's exclusive fence.
    555   1.1  riastrad  *
    556   1.1  riastrad  *	Caller must have robj locked.
    557   1.1  riastrad  */
    558   1.1  riastrad void
    559   1.1  riastrad dma_resv_add_excl_fence(struct dma_resv *robj,
    560   1.1  riastrad     struct dma_fence *fence)
    561   1.1  riastrad {
    562   1.1  riastrad 	struct dma_fence *old_fence = robj->fence_excl;
    563   1.1  riastrad 	struct dma_resv_list *old_list = robj->fence;
    564   1.1  riastrad 	uint32_t old_shared_count;
    565   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    566   1.1  riastrad 
    567   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    568   1.1  riastrad 
    569   1.1  riastrad 	/*
    570   1.1  riastrad 	 * If we are setting rather than just removing a fence, acquire
    571   1.1  riastrad 	 * a reference for ourselves.
    572   1.1  riastrad 	 */
    573   1.1  riastrad 	if (fence)
    574   1.1  riastrad 		(void)dma_fence_get(fence);
    575   1.1  riastrad 
    576   1.1  riastrad 	/* If there are any shared fences, remember how many.  */
    577   1.1  riastrad 	if (old_list)
    578   1.1  riastrad 		old_shared_count = old_list->shared_count;
    579   1.1  riastrad 
    580   1.7  riastrad 	/* Begin an update.  Implies membar_producer for fence.  */
    581   1.1  riastrad 	dma_resv_write_begin(robj, &ticket);
    582   1.1  riastrad 
    583   1.1  riastrad 	/* Replace the fence and zero the shared count.  */
    584   1.7  riastrad 	atomic_store_relaxed(&robj->fence_excl, fence);
    585   1.1  riastrad 	if (old_list)
    586   1.1  riastrad 		old_list->shared_count = 0;
    587   1.1  riastrad 
    588   1.1  riastrad 	/* Commit the update.  */
    589   1.1  riastrad 	dma_resv_write_commit(robj, &ticket);
    590   1.1  riastrad 
    591   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    592  1.10  riastrad 	if (old_fence) {
    593   1.1  riastrad 		dma_fence_put(old_fence);
    594  1.10  riastrad 		old_fence = NULL; /* paranoia */
    595  1.10  riastrad 	}
    596   1.1  riastrad 
    597   1.1  riastrad 	/* Release any old shared fences.  */
    598   1.1  riastrad 	if (old_list) {
    599  1.10  riastrad 		while (old_shared_count--) {
    600   1.1  riastrad 			dma_fence_put(old_list->shared[old_shared_count]);
    601  1.10  riastrad 			/* paranoia */
    602  1.10  riastrad 			old_list->shared[old_shared_count] = NULL;
    603  1.10  riastrad 		}
    604   1.1  riastrad 	}
    605   1.1  riastrad }
    606   1.1  riastrad 
    607   1.1  riastrad /*
    608   1.1  riastrad  * dma_resv_add_shared_fence(robj, fence)
    609   1.1  riastrad  *
    610   1.1  riastrad  *	Acquire a reference to fence and add it to robj's shared list.
    611   1.1  riastrad  *	If any fence was already added with the same context number,
    612   1.1  riastrad  *	release it and replace it by this one.
    613   1.1  riastrad  *
    614   1.1  riastrad  *	Caller must have robj locked, and must have preceded with a
    615   1.1  riastrad  *	call to dma_resv_reserve_shared for each shared fence
    616   1.1  riastrad  *	added.
    617   1.1  riastrad  */
    618   1.1  riastrad void
    619   1.1  riastrad dma_resv_add_shared_fence(struct dma_resv *robj,
    620   1.1  riastrad     struct dma_fence *fence)
    621   1.1  riastrad {
    622   1.1  riastrad 	struct dma_resv_list *list = robj->fence;
    623   1.1  riastrad 	struct dma_resv_list *prealloc = robj->robj_prealloc;
    624   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    625   1.1  riastrad 	struct dma_fence *replace = NULL;
    626   1.1  riastrad 	uint32_t i;
    627   1.1  riastrad 
    628   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    629   1.1  riastrad 
    630   1.1  riastrad 	/* Acquire a reference to the fence.  */
    631   1.1  riastrad 	KASSERT(fence != NULL);
    632   1.1  riastrad 	(void)dma_fence_get(fence);
    633   1.1  riastrad 
    634   1.1  riastrad 	/* Check for a preallocated replacement list.  */
    635   1.1  riastrad 	if (prealloc == NULL) {
    636   1.1  riastrad 		/*
    637   1.1  riastrad 		 * If there is no preallocated replacement list, then
    638   1.1  riastrad 		 * there must be room in the current list.
    639   1.1  riastrad 		 */
    640   1.1  riastrad 		KASSERT(list != NULL);
    641   1.1  riastrad 		KASSERT(list->shared_count < list->shared_max);
    642   1.1  riastrad 
    643   1.1  riastrad 		/* Begin an update.  Implies membar_producer for fence.  */
    644   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    645   1.1  riastrad 
    646   1.1  riastrad 		/* Find a fence with the same context number.  */
    647   1.1  riastrad 		for (i = 0; i < list->shared_count; i++) {
    648   1.1  riastrad 			if (list->shared[i]->context == fence->context) {
    649   1.1  riastrad 				replace = list->shared[i];
    650   1.7  riastrad 				atomic_store_relaxed(&list->shared[i], fence);
    651   1.1  riastrad 				break;
    652   1.1  riastrad 			}
    653   1.1  riastrad 		}
    654   1.1  riastrad 
    655   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    656   1.7  riastrad 		if (i == list->shared_count) {
    657   1.7  riastrad 			atomic_store_relaxed(&list->shared[list->shared_count],
    658   1.7  riastrad 			    fence);
    659   1.7  riastrad 			atomic_store_relaxed(&list->shared_count,
    660   1.7  riastrad 			    list->shared_count + 1);
    661   1.7  riastrad 		}
    662   1.1  riastrad 
    663   1.1  riastrad 		/* Commit the update.  */
    664   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    665   1.1  riastrad 	} else {
    666   1.1  riastrad 		/*
    667   1.1  riastrad 		 * There is a preallocated replacement list.  There may
    668   1.1  riastrad 		 * not be a current list.  If not, treat it as a zero-
    669   1.1  riastrad 		 * length list.
    670   1.1  riastrad 		 */
    671   1.1  riastrad 		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
    672   1.1  riastrad 
    673   1.1  riastrad 		/* There had better be room in the preallocated list.  */
    674   1.1  riastrad 		KASSERT(shared_count < prealloc->shared_max);
    675   1.1  riastrad 
    676   1.1  riastrad 		/*
    677   1.1  riastrad 		 * Copy the fences over, but replace if we find one
    678   1.1  riastrad 		 * with the same context number.
    679   1.1  riastrad 		 */
    680   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    681   1.1  riastrad 			if (replace == NULL &&
    682   1.1  riastrad 			    list->shared[i]->context == fence->context) {
    683   1.1  riastrad 				replace = list->shared[i];
    684   1.1  riastrad 				prealloc->shared[i] = fence;
    685   1.1  riastrad 			} else {
    686   1.1  riastrad 				prealloc->shared[i] = list->shared[i];
    687   1.1  riastrad 			}
    688   1.1  riastrad 		}
    689   1.1  riastrad 		prealloc->shared_count = shared_count;
    690   1.1  riastrad 
    691   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    692   1.1  riastrad 		if (replace == NULL)
    693   1.1  riastrad 			prealloc->shared[prealloc->shared_count++] = fence;
    694   1.1  riastrad 
    695   1.1  riastrad 		/*
    696   1.1  riastrad 		 * Now ready to replace the list.  Begin an update.
    697   1.1  riastrad 		 * Implies membar_producer for fence and prealloc.
    698   1.1  riastrad 		 */
    699   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    700   1.1  riastrad 
    701   1.1  riastrad 		/* Replace the list.  */
    702   1.7  riastrad 		atomic_store_relaxed(&robj->fence, prealloc);
    703   1.1  riastrad 		robj->robj_prealloc = NULL;
    704   1.1  riastrad 
    705   1.1  riastrad 		/* Commit the update.  */
    706   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    707   1.1  riastrad 
    708   1.1  riastrad 		/*
    709   1.1  riastrad 		 * If there is an old list, free it when convenient.
    710   1.1  riastrad 		 * (We are not in a position at this point to sleep
    711   1.1  riastrad 		 * waiting for activity on all CPUs.)
    712   1.1  riastrad 		 */
    713   1.1  riastrad 		if (list)
    714   1.1  riastrad 			objlist_defer_free(list);
    715   1.1  riastrad 	}
    716   1.1  riastrad 
    717   1.1  riastrad 	/* Release a fence if we replaced it.  */
    718  1.10  riastrad 	if (replace) {
    719   1.1  riastrad 		dma_fence_put(replace);
    720  1.10  riastrad 		replace = NULL;	/* paranoia */
    721  1.10  riastrad 	}
    722   1.1  riastrad }
    723   1.1  riastrad 
    724   1.1  riastrad /*
    725   1.1  riastrad  * dma_resv_get_excl_rcu(robj)
    726   1.1  riastrad  *
    727   1.1  riastrad  *	Note: Caller need not call this from an RCU read section.
    728   1.1  riastrad  */
    729   1.1  riastrad struct dma_fence *
    730   1.1  riastrad dma_resv_get_excl_rcu(const struct dma_resv *robj)
    731   1.1  riastrad {
    732   1.1  riastrad 	struct dma_fence *fence;
    733   1.1  riastrad 
    734   1.1  riastrad 	rcu_read_lock();
    735   1.1  riastrad 	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
    736   1.1  riastrad 	rcu_read_unlock();
    737   1.1  riastrad 
    738   1.1  riastrad 	return fence;
    739   1.1  riastrad }
    740   1.1  riastrad 
    741   1.1  riastrad /*
    742   1.1  riastrad  * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
    743  1.13  riastrad  *
    744  1.13  riastrad  *	Get a snapshot of the exclusive and shared fences of robj.  The
    745  1.13  riastrad  *	shared fences are returned as a pointer *sharedp to an array,
    746  1.13  riastrad  *	to be freed by the caller with kfree, of *nsharedp elements.
    747  1.17  riastrad  *	If fencep is null, then add the exclusive fence, if any, at the
    748  1.17  riastrad  *	end of the array instead.
    749  1.13  riastrad  *
    750  1.13  riastrad  *	Returns zero on success, negative (Linux-style) error code on
    751  1.13  riastrad  *	failure.  On failure, *fencep, *nsharedp, and *sharedp are
    752  1.13  riastrad  *	untouched.
    753   1.1  riastrad  */
    754   1.1  riastrad int
    755   1.1  riastrad dma_resv_get_fences_rcu(const struct dma_resv *robj,
    756   1.1  riastrad     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
    757   1.1  riastrad {
    758  1.10  riastrad 	const struct dma_resv_list *list = NULL;
    759  1.10  riastrad 	struct dma_fence *fence = NULL;
    760   1.1  riastrad 	struct dma_fence **shared = NULL;
    761   1.1  riastrad 	unsigned shared_alloc, shared_count, i;
    762   1.1  riastrad 	struct dma_resv_read_ticket ticket;
    763   1.1  riastrad 
    764  1.10  riastrad top:	KASSERT(fence == NULL);
    765  1.10  riastrad 
    766   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    767   1.1  riastrad 	rcu_read_lock();
    768   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    769   1.1  riastrad 
    770  1.12  riastrad 	/* If there is a shared list, grab it.  */
    771  1.12  riastrad 	if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket))
    772  1.12  riastrad 		goto restart;
    773  1.12  riastrad 	if (list != NULL) {
    774   1.1  riastrad 
    775  1.17  riastrad 		/*
    776  1.17  riastrad 		 * Avoid arithmetic overflow with `+ 1' below.
    777  1.17  riastrad 		 * Strictly speaking we don't need this if the caller
    778  1.17  riastrad 		 * specified fencep or if there is no exclusive fence,
    779  1.17  riastrad 		 * but it is simpler to not have to consider those
    780  1.17  riastrad 		 * cases.
    781  1.17  riastrad 		 */
    782  1.17  riastrad 		KASSERT(shared_count <= list->shared_max);
    783  1.17  riastrad 		if (list->shared_max == UINT_MAX)
    784  1.17  riastrad 			return -ENOMEM;
    785  1.17  riastrad 
    786   1.1  riastrad 		/* Check whether we have a buffer.  */
    787   1.1  riastrad 		if (shared == NULL) {
    788   1.1  riastrad 			/*
    789   1.1  riastrad 			 * We don't have a buffer yet.  Try to allocate
    790   1.1  riastrad 			 * one without waiting.
    791   1.1  riastrad 			 */
    792  1.17  riastrad 			shared_alloc = list->shared_max + 1;
    793   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    794   1.1  riastrad 			    GFP_NOWAIT);
    795   1.1  riastrad 			if (shared == NULL) {
    796   1.1  riastrad 				/*
    797   1.1  riastrad 				 * Couldn't do it immediately.  Back
    798   1.1  riastrad 				 * out of RCU and allocate one with
    799   1.1  riastrad 				 * waiting.
    800   1.1  riastrad 				 */
    801   1.1  riastrad 				rcu_read_unlock();
    802   1.1  riastrad 				shared = kcalloc(shared_alloc,
    803   1.1  riastrad 				    sizeof(shared[0]), GFP_KERNEL);
    804   1.1  riastrad 				if (shared == NULL)
    805   1.1  riastrad 					return -ENOMEM;
    806   1.1  riastrad 				goto top;
    807   1.1  riastrad 			}
    808  1.17  riastrad 		} else if (shared_alloc < list->shared_max + 1) {
    809   1.1  riastrad 			/*
    810   1.1  riastrad 			 * We have a buffer but it's too small.  We're
    811   1.1  riastrad 			 * already racing in this case, so just back
    812   1.1  riastrad 			 * out and wait to allocate a bigger one.
    813   1.1  riastrad 			 */
    814  1.17  riastrad 			shared_alloc = list->shared_max + 1;
    815   1.1  riastrad 			rcu_read_unlock();
    816   1.1  riastrad 			kfree(shared);
    817   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    818   1.1  riastrad 			    GFP_KERNEL);
    819   1.1  riastrad 			if (shared == NULL)
    820   1.1  riastrad 				return -ENOMEM;
    821  1.19  riastrad 			goto top;
    822   1.1  riastrad 		}
    823   1.1  riastrad 
    824   1.1  riastrad 		/*
    825   1.1  riastrad 		 * We got a buffer large enough.  Copy into the buffer
    826   1.7  riastrad 		 * and record the number of elements.  Could safely use
    827   1.7  riastrad 		 * memcpy here, because even if we race with a writer
    828   1.7  riastrad 		 * it'll invalidate the read ticket and we'll start
    829  1.14  riastrad 		 * over, but atomic_load in a loop will pacify kcsan.
    830   1.7  riastrad 		 */
    831   1.7  riastrad 		for (i = 0; i < shared_count; i++)
    832   1.7  riastrad 			shared[i] = atomic_load_relaxed(&list->shared[i]);
    833  1.14  riastrad 
    834  1.14  riastrad 		/* If anything changed while we were copying, restart.  */
    835  1.14  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
    836  1.14  riastrad 			goto restart;
    837   1.1  riastrad 	}
    838   1.1  riastrad 
    839   1.1  riastrad 	/* If there is an exclusive fence, grab it.  */
    840  1.10  riastrad 	KASSERT(fence == NULL);
    841  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
    842   1.1  riastrad 		goto restart;
    843   1.1  riastrad 
    844   1.1  riastrad 	/*
    845   1.1  riastrad 	 * Try to get a reference to all of the shared fences.
    846   1.1  riastrad 	 */
    847   1.1  riastrad 	for (i = 0; i < shared_count; i++) {
    848   1.7  riastrad 		if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL)
    849   1.1  riastrad 			goto put_restart;
    850   1.1  riastrad 	}
    851   1.1  riastrad 
    852   1.1  riastrad 	/* Success!  */
    853   1.1  riastrad 	rcu_read_unlock();
    854  1.17  riastrad 	if (fencep) {
    855  1.17  riastrad 		*fencep = fence;
    856  1.17  riastrad 	} else if (fence) {
    857  1.17  riastrad 		KASSERT(shared_count < UINT_MAX);
    858  1.17  riastrad 		shared[shared_count++] = fence;
    859  1.17  riastrad 	}
    860   1.1  riastrad 	*nsharedp = shared_count;
    861   1.1  riastrad 	*sharedp = shared;
    862   1.1  riastrad 	return 0;
    863   1.1  riastrad 
    864   1.1  riastrad put_restart:
    865   1.1  riastrad 	/* Back out.  */
    866   1.1  riastrad 	while (i --> 0) {
    867   1.1  riastrad 		dma_fence_put(shared[i]);
    868   1.1  riastrad 		shared[i] = NULL; /* paranoia */
    869   1.1  riastrad 	}
    870   1.1  riastrad 	if (fence) {
    871   1.1  riastrad 		dma_fence_put(fence);
    872  1.10  riastrad 		fence = NULL;
    873   1.1  riastrad 	}
    874   1.1  riastrad 
    875   1.1  riastrad restart:
    876  1.10  riastrad 	KASSERT(fence == NULL);
    877   1.1  riastrad 	rcu_read_unlock();
    878   1.1  riastrad 	goto top;
    879   1.1  riastrad }
    880   1.1  riastrad 
    881   1.1  riastrad /*
    882   1.1  riastrad  * dma_resv_copy_fences(dst, src)
    883   1.1  riastrad  *
    884   1.1  riastrad  *	Copy the exclusive fence and all the shared fences from src to
    885   1.1  riastrad  *	dst.
    886   1.1  riastrad  *
    887   1.1  riastrad  *	Caller must have dst locked.
    888   1.1  riastrad  */
    889   1.1  riastrad int
    890   1.1  riastrad dma_resv_copy_fences(struct dma_resv *dst_robj,
    891   1.1  riastrad     const struct dma_resv *src_robj)
    892   1.1  riastrad {
    893   1.1  riastrad 	const struct dma_resv_list *src_list;
    894   1.1  riastrad 	struct dma_resv_list *dst_list = NULL;
    895   1.1  riastrad 	struct dma_resv_list *old_list;
    896   1.1  riastrad 	struct dma_fence *fence = NULL;
    897   1.1  riastrad 	struct dma_fence *old_fence;
    898   1.1  riastrad 	uint32_t shared_count, i;
    899   1.1  riastrad 	struct dma_resv_read_ticket read_ticket;
    900   1.1  riastrad 	struct dma_resv_write_ticket write_ticket;
    901   1.1  riastrad 
    902   1.1  riastrad 	KASSERT(dma_resv_held(dst_robj));
    903   1.1  riastrad 
    904  1.10  riastrad top:	KASSERT(fence == NULL);
    905  1.10  riastrad 
    906   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    907   1.1  riastrad 	rcu_read_lock();
    908   1.1  riastrad 	dma_resv_read_begin(src_robj, &read_ticket);
    909   1.1  riastrad 
    910   1.1  riastrad 	/* Get the shared list.  */
    911  1.12  riastrad 	if (!dma_resv_get_shared_reader(src_robj, &src_list, &shared_count,
    912  1.12  riastrad 		&read_ticket))
    913  1.12  riastrad 		goto restart;
    914  1.12  riastrad 	if (src_list != NULL) {
    915   1.1  riastrad 		/* Allocate a new list.  */
    916   1.1  riastrad 		dst_list = objlist_tryalloc(shared_count);
    917   1.1  riastrad 		if (dst_list == NULL)
    918   1.1  riastrad 			return -ENOMEM;
    919   1.1  riastrad 
    920   1.1  riastrad 		/* Copy over all fences that are not yet signalled.  */
    921   1.1  riastrad 		dst_list->shared_count = 0;
    922   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    923  1.10  riastrad 			KASSERT(fence == NULL);
    924   1.7  riastrad 			fence = atomic_load_relaxed(&src_list->shared[i]);
    925   1.9  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
    926   1.1  riastrad 				goto restart;
    927   1.1  riastrad 			if (dma_fence_is_signaled(fence)) {
    928   1.1  riastrad 				dma_fence_put(fence);
    929   1.1  riastrad 				fence = NULL;
    930   1.1  riastrad 				continue;
    931   1.1  riastrad 			}
    932   1.1  riastrad 			dst_list->shared[dst_list->shared_count++] = fence;
    933   1.1  riastrad 			fence = NULL;
    934   1.1  riastrad 		}
    935  1.14  riastrad 
    936  1.14  riastrad 		/* If anything changed while we were copying, restart.  */
    937  1.14  riastrad 		if (!dma_resv_read_valid(src_robj, &read_ticket))
    938  1.14  riastrad 			goto restart;
    939   1.1  riastrad 	}
    940   1.1  riastrad 
    941   1.1  riastrad 	/* Get the exclusive fence.  */
    942  1.10  riastrad 	KASSERT(fence == NULL);
    943  1.12  riastrad 	if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket))
    944  1.12  riastrad 		goto restart;
    945   1.1  riastrad 
    946   1.1  riastrad 	/* All done with src; exit the RCU read section.  */
    947   1.1  riastrad 	rcu_read_unlock();
    948   1.1  riastrad 
    949   1.1  riastrad 	/*
    950   1.1  riastrad 	 * We now have a snapshot of the shared and exclusive fences of
    951   1.1  riastrad 	 * src_robj and we have acquired references to them so they
    952   1.1  riastrad 	 * won't go away.  Transfer them over to dst_robj, releasing
    953   1.1  riastrad 	 * references to any that were there.
    954   1.1  riastrad 	 */
    955   1.1  riastrad 
    956   1.1  riastrad 	/* Get the old shared and exclusive fences, if any.  */
    957   1.1  riastrad 	old_list = dst_robj->fence;
    958   1.1  riastrad 	old_fence = dst_robj->fence_excl;
    959   1.1  riastrad 
    960   1.7  riastrad 	/*
    961   1.7  riastrad 	 * Begin an update.  Implies membar_producer for dst_list and
    962   1.7  riastrad 	 * fence.
    963   1.7  riastrad 	 */
    964   1.1  riastrad 	dma_resv_write_begin(dst_robj, &write_ticket);
    965   1.1  riastrad 
    966   1.1  riastrad 	/* Replace the fences.  */
    967   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence, dst_list);
    968   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence_excl, fence);
    969   1.1  riastrad 
    970   1.1  riastrad 	/* Commit the update.  */
    971   1.1  riastrad 	dma_resv_write_commit(dst_robj, &write_ticket);
    972   1.1  riastrad 
    973   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    974  1.10  riastrad 	if (old_fence) {
    975   1.1  riastrad 		dma_fence_put(old_fence);
    976  1.10  riastrad 		old_fence = NULL; /* paranoia */
    977  1.10  riastrad 	}
    978   1.1  riastrad 
    979   1.1  riastrad 	/* Release any old shared fences.  */
    980   1.1  riastrad 	if (old_list) {
    981  1.10  riastrad 		for (i = old_list->shared_count; i --> 0;) {
    982   1.1  riastrad 			dma_fence_put(old_list->shared[i]);
    983  1.10  riastrad 			old_list->shared[i] = NULL; /* paranoia */
    984  1.10  riastrad 		}
    985  1.10  riastrad 		objlist_free(old_list);
    986  1.10  riastrad 		old_list = NULL; /* paranoia */
    987   1.1  riastrad 	}
    988   1.1  riastrad 
    989   1.1  riastrad 	/* Success!  */
    990   1.1  riastrad 	return 0;
    991   1.1  riastrad 
    992   1.1  riastrad restart:
    993  1.10  riastrad 	KASSERT(fence == NULL);
    994   1.1  riastrad 	rcu_read_unlock();
    995   1.1  riastrad 	if (dst_list) {
    996   1.1  riastrad 		for (i = dst_list->shared_count; i --> 0;) {
    997   1.1  riastrad 			dma_fence_put(dst_list->shared[i]);
    998  1.10  riastrad 			dst_list->shared[i] = NULL; /* paranoia */
    999   1.1  riastrad 		}
   1000   1.1  riastrad 		objlist_free(dst_list);
   1001   1.1  riastrad 		dst_list = NULL;
   1002   1.1  riastrad 	}
   1003   1.1  riastrad 	goto top;
   1004   1.1  riastrad }
   1005   1.1  riastrad 
   1006   1.1  riastrad /*
   1007   1.1  riastrad  * dma_resv_test_signaled_rcu(robj, shared)
   1008   1.1  riastrad  *
   1009   1.1  riastrad  *	If shared is true, test whether all of the shared fences are
   1010   1.1  riastrad  *	signalled, or if there are none, test whether the exclusive
   1011   1.1  riastrad  *	fence is signalled.  If shared is false, test only whether the
   1012   1.1  riastrad  *	exclusive fence is signalled.
   1013   1.1  riastrad  *
   1014   1.1  riastrad  *	XXX Why does this _not_ test the exclusive fence if shared is
   1015   1.1  riastrad  *	true only if there are no shared fences?  This makes no sense.
   1016   1.1  riastrad  */
   1017   1.1  riastrad bool
   1018   1.1  riastrad dma_resv_test_signaled_rcu(const struct dma_resv *robj,
   1019   1.1  riastrad     bool shared)
   1020   1.1  riastrad {
   1021   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1022  1.12  riastrad 	const struct dma_resv_list *list;
   1023  1.10  riastrad 	struct dma_fence *fence = NULL;
   1024   1.1  riastrad 	uint32_t i, shared_count;
   1025   1.1  riastrad 	bool signaled = true;
   1026   1.1  riastrad 
   1027  1.10  riastrad top:	KASSERT(fence == NULL);
   1028  1.10  riastrad 
   1029   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1030   1.1  riastrad 	rcu_read_lock();
   1031   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1032   1.1  riastrad 
   1033   1.1  riastrad 	/* If shared is requested and there is a shared list, test it.  */
   1034  1.12  riastrad 	if (shared) {
   1035  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1036  1.12  riastrad 			&ticket))
   1037   1.1  riastrad 			goto restart;
   1038  1.12  riastrad 	} else {
   1039  1.12  riastrad 		list = NULL;
   1040  1.12  riastrad 		shared_count = 0;
   1041  1.12  riastrad 	}
   1042  1.12  riastrad 	if (list != NULL) {
   1043   1.1  riastrad 		/*
   1044   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1045   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1046   1.1  riastrad 		 * it is signalled.  Stop if we find any that is not
   1047   1.1  riastrad 		 * signalled.
   1048   1.1  riastrad 		 */
   1049   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1050  1.10  riastrad 			KASSERT(fence == NULL);
   1051   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1052  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1053   1.1  riastrad 				goto restart;
   1054   1.1  riastrad 			signaled &= dma_fence_is_signaled(fence);
   1055   1.1  riastrad 			dma_fence_put(fence);
   1056  1.10  riastrad 			fence = NULL;
   1057   1.1  riastrad 			if (!signaled)
   1058   1.1  riastrad 				goto out;
   1059   1.1  riastrad 		}
   1060  1.14  riastrad 
   1061  1.14  riastrad 		/* If anything changed while we were testing, restart.  */
   1062  1.14  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1063  1.14  riastrad 			goto restart;
   1064   1.1  riastrad 	}
   1065  1.15  riastrad 	if (shared_count)
   1066  1.15  riastrad 		goto out;
   1067   1.1  riastrad 
   1068   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1069  1.10  riastrad 	KASSERT(fence == NULL);
   1070  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1071  1.12  riastrad 		goto restart;
   1072  1.12  riastrad 	if (fence != NULL) {
   1073  1.12  riastrad 		/* Test whether it is signalled.  If no, stop.  */
   1074   1.1  riastrad 		signaled &= dma_fence_is_signaled(fence);
   1075   1.1  riastrad 		dma_fence_put(fence);
   1076  1.10  riastrad 		fence = NULL;
   1077   1.1  riastrad 		if (!signaled)
   1078   1.1  riastrad 			goto out;
   1079   1.1  riastrad 	}
   1080   1.1  riastrad 
   1081  1.10  riastrad out:	KASSERT(fence == NULL);
   1082  1.10  riastrad 	rcu_read_unlock();
   1083   1.1  riastrad 	return signaled;
   1084   1.1  riastrad 
   1085   1.1  riastrad restart:
   1086  1.10  riastrad 	KASSERT(fence == NULL);
   1087   1.1  riastrad 	rcu_read_unlock();
   1088   1.1  riastrad 	goto top;
   1089   1.1  riastrad }
   1090   1.1  riastrad 
   1091   1.1  riastrad /*
   1092   1.1  riastrad  * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
   1093   1.1  riastrad  *
   1094   1.1  riastrad  *	If shared is true, wait for all of the shared fences to be
   1095   1.1  riastrad  *	signalled, or if there are none, wait for the exclusive fence
   1096   1.1  riastrad  *	to be signalled.  If shared is false, wait only for the
   1097   1.1  riastrad  *	exclusive fence to be signalled.  If timeout is zero, don't
   1098   1.1  riastrad  *	wait, only test.
   1099   1.1  riastrad  *
   1100   1.1  riastrad  *	XXX Why does this _not_ wait for the exclusive fence if shared
   1101   1.1  riastrad  *	is true only if there are no shared fences?  This makes no
   1102   1.1  riastrad  *	sense.
   1103   1.1  riastrad  */
   1104   1.1  riastrad long
   1105   1.1  riastrad dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
   1106   1.1  riastrad     bool shared, bool intr, unsigned long timeout)
   1107   1.1  riastrad {
   1108   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1109  1.12  riastrad 	const struct dma_resv_list *list;
   1110  1.10  riastrad 	struct dma_fence *fence = NULL;
   1111   1.1  riastrad 	uint32_t i, shared_count;
   1112   1.1  riastrad 	long ret;
   1113   1.1  riastrad 
   1114   1.1  riastrad 	if (timeout == 0)
   1115   1.1  riastrad 		return dma_resv_test_signaled_rcu(robj, shared);
   1116   1.1  riastrad 
   1117  1.10  riastrad top:	KASSERT(fence == NULL);
   1118  1.10  riastrad 
   1119   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1120   1.1  riastrad 	rcu_read_lock();
   1121   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1122   1.1  riastrad 
   1123   1.1  riastrad 	/* If shared is requested and there is a shared list, wait on it.  */
   1124  1.12  riastrad 	if (shared) {
   1125  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1126  1.12  riastrad 			&ticket))
   1127   1.1  riastrad 			goto restart;
   1128  1.12  riastrad 	} else {
   1129  1.12  riastrad 		list = NULL;
   1130  1.12  riastrad 		shared_count = 0;
   1131  1.12  riastrad 	}
   1132  1.12  riastrad 	if (list != NULL) {
   1133   1.1  riastrad 		/*
   1134   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1135   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1136   1.1  riastrad 		 * it is signalled.  Stop and wait if we find any that
   1137   1.1  riastrad 		 * is not signalled.
   1138   1.1  riastrad 		 */
   1139   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1140  1.10  riastrad 			KASSERT(fence == NULL);
   1141   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1142  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1143   1.1  riastrad 				goto restart;
   1144   1.1  riastrad 			if (!dma_fence_is_signaled(fence))
   1145   1.1  riastrad 				goto wait;
   1146   1.1  riastrad 			dma_fence_put(fence);
   1147  1.10  riastrad 			fence = NULL;
   1148   1.1  riastrad 		}
   1149  1.14  riastrad 
   1150  1.14  riastrad 		/* If anything changed while we were testing, restart.  */
   1151  1.14  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1152  1.14  riastrad 			goto restart;
   1153   1.1  riastrad 	}
   1154  1.15  riastrad 	if (shared_count)
   1155  1.15  riastrad 		goto out;
   1156   1.1  riastrad 
   1157   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1158  1.10  riastrad 	KASSERT(fence == NULL);
   1159  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1160  1.12  riastrad 		goto restart;
   1161  1.12  riastrad 	if (fence != NULL) {
   1162  1.12  riastrad 		/* Test whether it is signalled.  If no, wait.  */
   1163   1.1  riastrad 		if (!dma_fence_is_signaled(fence))
   1164   1.1  riastrad 			goto wait;
   1165   1.1  riastrad 		dma_fence_put(fence);
   1166  1.10  riastrad 		fence = NULL;
   1167   1.1  riastrad 	}
   1168   1.1  riastrad 
   1169  1.15  riastrad out:	/* Success!  Return the number of ticks left.  */
   1170   1.1  riastrad 	rcu_read_unlock();
   1171  1.10  riastrad 	KASSERT(fence == NULL);
   1172   1.1  riastrad 	return timeout;
   1173   1.1  riastrad 
   1174   1.1  riastrad restart:
   1175  1.10  riastrad 	KASSERT(fence == NULL);
   1176   1.1  riastrad 	rcu_read_unlock();
   1177   1.1  riastrad 	goto top;
   1178   1.1  riastrad 
   1179   1.1  riastrad wait:
   1180   1.1  riastrad 	/*
   1181   1.5  riastrad 	 * Exit the RCU read section, wait for it, and release the
   1182   1.5  riastrad 	 * fence when we're done.  If we time out or fail, bail.
   1183   1.5  riastrad 	 * Otherwise, go back to the top.
   1184   1.1  riastrad 	 */
   1185   1.1  riastrad 	KASSERT(fence != NULL);
   1186   1.1  riastrad 	rcu_read_unlock();
   1187   1.1  riastrad 	ret = dma_fence_wait_timeout(fence, intr, timeout);
   1188   1.1  riastrad 	dma_fence_put(fence);
   1189  1.10  riastrad 	fence = NULL;
   1190   1.1  riastrad 	if (ret <= 0)
   1191   1.1  riastrad 		return ret;
   1192   1.1  riastrad 	KASSERT(ret <= timeout);
   1193   1.1  riastrad 	timeout = ret;
   1194   1.1  riastrad 	goto top;
   1195   1.1  riastrad }
   1196   1.1  riastrad 
   1197   1.1  riastrad /*
   1198   1.1  riastrad  * dma_resv_poll_init(rpoll, lock)
   1199   1.1  riastrad  *
   1200   1.1  riastrad  *	Initialize reservation poll state.
   1201   1.1  riastrad  */
   1202   1.1  riastrad void
   1203   1.1  riastrad dma_resv_poll_init(struct dma_resv_poll *rpoll)
   1204   1.1  riastrad {
   1205   1.1  riastrad 
   1206   1.1  riastrad 	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
   1207   1.1  riastrad 	selinit(&rpoll->rp_selq);
   1208   1.1  riastrad 	rpoll->rp_claimed = 0;
   1209   1.1  riastrad }
   1210   1.1  riastrad 
   1211   1.1  riastrad /*
   1212   1.1  riastrad  * dma_resv_poll_fini(rpoll)
   1213   1.1  riastrad  *
   1214   1.1  riastrad  *	Release any resource associated with reservation poll state.
   1215   1.1  riastrad  */
   1216   1.1  riastrad void
   1217   1.1  riastrad dma_resv_poll_fini(struct dma_resv_poll *rpoll)
   1218   1.1  riastrad {
   1219   1.1  riastrad 
   1220   1.1  riastrad 	KASSERT(rpoll->rp_claimed == 0);
   1221   1.1  riastrad 	seldestroy(&rpoll->rp_selq);
   1222   1.1  riastrad 	mutex_destroy(&rpoll->rp_lock);
   1223   1.1  riastrad }
   1224   1.1  riastrad 
   1225   1.1  riastrad /*
   1226   1.1  riastrad  * dma_resv_poll_cb(fence, fcb)
   1227   1.1  riastrad  *
   1228   1.1  riastrad  *	Callback to notify a reservation poll that a fence has
   1229   1.1  riastrad  *	completed.  Notify any waiters and allow the next poller to
   1230   1.1  riastrad  *	claim the callback.
   1231   1.1  riastrad  *
   1232   1.1  riastrad  *	If one thread is waiting for the exclusive fence only, and we
   1233   1.1  riastrad  *	spuriously notify them about a shared fence, tough.
   1234   1.1  riastrad  */
   1235   1.1  riastrad static void
   1236   1.1  riastrad dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
   1237   1.1  riastrad {
   1238   1.1  riastrad 	struct dma_resv_poll *rpoll = container_of(fcb,
   1239   1.1  riastrad 	    struct dma_resv_poll, rp_fcb);
   1240   1.1  riastrad 
   1241   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1242   1.1  riastrad 	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
   1243   1.1  riastrad 	rpoll->rp_claimed = 0;
   1244   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1245   1.1  riastrad }
   1246   1.1  riastrad 
   1247   1.1  riastrad /*
   1248   1.1  riastrad  * dma_resv_do_poll(robj, events, rpoll)
   1249   1.1  riastrad  *
   1250   1.1  riastrad  *	Poll for reservation object events using the reservation poll
   1251   1.1  riastrad  *	state in rpoll:
   1252   1.1  riastrad  *
   1253   1.1  riastrad  *	- POLLOUT	wait for all fences shared and exclusive
   1254   1.1  riastrad  *	- POLLIN	wait for the exclusive fence
   1255   1.1  riastrad  *
   1256   1.1  riastrad  *	Return the subset of events in events that are ready.  If any
   1257   1.1  riastrad  *	are requested but not ready, arrange to be notified with
   1258   1.1  riastrad  *	selnotify when they are.
   1259   1.1  riastrad  */
   1260   1.1  riastrad int
   1261   1.1  riastrad dma_resv_do_poll(const struct dma_resv *robj, int events,
   1262   1.1  riastrad     struct dma_resv_poll *rpoll)
   1263   1.1  riastrad {
   1264   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1265  1.12  riastrad 	const struct dma_resv_list *list;
   1266  1.10  riastrad 	struct dma_fence *fence = NULL;
   1267   1.1  riastrad 	uint32_t i, shared_count;
   1268   1.1  riastrad 	int revents;
   1269   1.1  riastrad 	bool recorded = false;	/* curlwp is on the selq */
   1270   1.1  riastrad 	bool claimed = false;	/* we claimed the callback */
   1271   1.1  riastrad 	bool callback = false;	/* we requested a callback */
   1272   1.1  riastrad 
   1273   1.1  riastrad 	/*
   1274   1.1  riastrad 	 * Start with the maximal set of events that could be ready.
   1275   1.1  riastrad 	 * We will eliminate the events that are definitely not ready
   1276   1.1  riastrad 	 * as we go at the same time as we add callbacks to notify us
   1277   1.1  riastrad 	 * that they may be ready.
   1278   1.1  riastrad 	 */
   1279   1.1  riastrad 	revents = events & (POLLIN|POLLOUT);
   1280   1.1  riastrad 	if (revents == 0)
   1281   1.1  riastrad 		return 0;
   1282   1.1  riastrad 
   1283  1.10  riastrad top:	KASSERT(fence == NULL);
   1284  1.10  riastrad 
   1285   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1286   1.1  riastrad 	rcu_read_lock();
   1287   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1288   1.1  riastrad 
   1289   1.1  riastrad 	/* If we want to wait for all fences, get the shared list.  */
   1290  1.12  riastrad 	if (events & POLLOUT) {
   1291  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1292  1.12  riastrad 			&ticket))
   1293   1.1  riastrad 			goto restart;
   1294  1.12  riastrad 	} else {
   1295  1.12  riastrad 		list = NULL;
   1296  1.12  riastrad 		shared_count = 0;
   1297  1.12  riastrad 	}
   1298  1.12  riastrad 	if (list != NULL) do {
   1299   1.1  riastrad 		/*
   1300   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1301   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1302   1.1  riastrad 		 * it is signalled.  Stop and request a callback if we
   1303   1.1  riastrad 		 * find any that is not signalled.
   1304   1.1  riastrad 		 */
   1305   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1306  1.10  riastrad 			KASSERT(fence == NULL);
   1307   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1308  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1309   1.1  riastrad 				goto restart;
   1310   1.1  riastrad 			if (!dma_fence_is_signaled(fence)) {
   1311   1.1  riastrad 				dma_fence_put(fence);
   1312  1.10  riastrad 				fence = NULL;
   1313   1.1  riastrad 				break;
   1314   1.1  riastrad 			}
   1315   1.1  riastrad 			dma_fence_put(fence);
   1316  1.10  riastrad 			fence = NULL;
   1317   1.1  riastrad 		}
   1318   1.1  riastrad 
   1319   1.1  riastrad 		/* If all shared fences have been signalled, move on.  */
   1320   1.1  riastrad 		if (i == shared_count)
   1321   1.1  riastrad 			break;
   1322   1.1  riastrad 
   1323   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1324   1.1  riastrad 		if (!recorded)
   1325   1.1  riastrad 			goto record;
   1326   1.1  riastrad 
   1327   1.1  riastrad 		/*
   1328   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1329   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1330   1.1  riastrad 		 * assume the event is not ready.
   1331   1.1  riastrad 		 */
   1332   1.1  riastrad 		if (!claimed || callback) {
   1333   1.1  riastrad 			revents &= ~POLLOUT;
   1334   1.1  riastrad 			break;
   1335   1.1  riastrad 		}
   1336   1.1  riastrad 
   1337   1.1  riastrad 		/*
   1338   1.1  riastrad 		 * Otherwise, find the first fence that is not
   1339   1.1  riastrad 		 * signalled, request the callback, and clear POLLOUT
   1340   1.1  riastrad 		 * from the possible ready events.  If they are all
   1341   1.1  riastrad 		 * signalled, leave POLLOUT set; we will simulate the
   1342   1.1  riastrad 		 * callback later.
   1343   1.1  riastrad 		 */
   1344   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1345  1.10  riastrad 			KASSERT(fence == NULL);
   1346   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1347  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1348   1.1  riastrad 				goto restart;
   1349   1.1  riastrad 			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1350   1.1  riastrad 				dma_resv_poll_cb)) {
   1351   1.1  riastrad 				dma_fence_put(fence);
   1352  1.10  riastrad 				fence = NULL;
   1353   1.1  riastrad 				revents &= ~POLLOUT;
   1354   1.1  riastrad 				callback = true;
   1355   1.1  riastrad 				break;
   1356   1.1  riastrad 			}
   1357   1.1  riastrad 			dma_fence_put(fence);
   1358  1.10  riastrad 			fence = NULL;
   1359   1.1  riastrad 		}
   1360   1.1  riastrad 	} while (0);
   1361   1.1  riastrad 
   1362   1.1  riastrad 	/* We always wait for at least the exclusive fence, so get it.  */
   1363  1.10  riastrad 	KASSERT(fence == NULL);
   1364  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1365  1.12  riastrad 		goto restart;
   1366  1.12  riastrad 	if (fence != NULL) do {
   1367   1.1  riastrad 		/*
   1368  1.12  riastrad 		 * Test whether it is signalled.  If not, stop and
   1369  1.12  riastrad 		 * request a callback.
   1370   1.1  riastrad 		 */
   1371  1.16  riastrad 		if (dma_fence_is_signaled(fence))
   1372   1.1  riastrad 			break;
   1373   1.1  riastrad 
   1374   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1375   1.1  riastrad 		if (!recorded) {
   1376   1.1  riastrad 			dma_fence_put(fence);
   1377  1.10  riastrad 			fence = NULL;
   1378   1.1  riastrad 			goto record;
   1379   1.1  riastrad 		}
   1380   1.1  riastrad 
   1381   1.1  riastrad 		/*
   1382   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1383   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1384   1.1  riastrad 		 * assume the event is not ready.
   1385   1.1  riastrad 		 */
   1386   1.1  riastrad 		if (!claimed || callback) {
   1387   1.1  riastrad 			revents = 0;
   1388   1.1  riastrad 			break;
   1389   1.1  riastrad 		}
   1390   1.1  riastrad 
   1391   1.1  riastrad 		/*
   1392   1.1  riastrad 		 * Otherwise, try to request the callback, and clear
   1393   1.1  riastrad 		 * all possible ready events.  If the fence has been
   1394   1.1  riastrad 		 * signalled in the interim, leave the events set; we
   1395   1.1  riastrad 		 * will simulate the callback later.
   1396   1.1  riastrad 		 */
   1397   1.1  riastrad 		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1398   1.1  riastrad 			dma_resv_poll_cb)) {
   1399   1.1  riastrad 			revents = 0;
   1400   1.1  riastrad 			callback = true;
   1401   1.1  riastrad 			break;
   1402   1.1  riastrad 		}
   1403  1.16  riastrad 	} while (0);
   1404  1.16  riastrad 	if (fence != NULL) {
   1405   1.1  riastrad 		dma_fence_put(fence);
   1406  1.10  riastrad 		fence = NULL;
   1407  1.16  riastrad 	}
   1408   1.1  riastrad 
   1409   1.1  riastrad 	/* All done reading the fences.  */
   1410   1.1  riastrad 	rcu_read_unlock();
   1411   1.1  riastrad 
   1412   1.1  riastrad 	if (claimed && !callback) {
   1413   1.1  riastrad 		/*
   1414   1.1  riastrad 		 * We claimed the callback but we didn't actually
   1415   1.1  riastrad 		 * request it because a fence was signalled while we
   1416   1.1  riastrad 		 * were claiming it.  Call it ourselves now.  The
   1417   1.1  riastrad 		 * callback doesn't use the fence nor rely on holding
   1418   1.1  riastrad 		 * any of the fence locks, so this is safe.
   1419   1.1  riastrad 		 */
   1420   1.1  riastrad 		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
   1421   1.1  riastrad 	}
   1422   1.1  riastrad 	return revents;
   1423   1.1  riastrad 
   1424   1.1  riastrad restart:
   1425  1.10  riastrad 	KASSERT(fence == NULL);
   1426   1.1  riastrad 	rcu_read_unlock();
   1427   1.1  riastrad 	goto top;
   1428   1.1  riastrad 
   1429   1.1  riastrad record:
   1430  1.10  riastrad 	KASSERT(fence == NULL);
   1431   1.1  riastrad 	rcu_read_unlock();
   1432   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1433   1.1  riastrad 	selrecord(curlwp, &rpoll->rp_selq);
   1434   1.1  riastrad 	if (!rpoll->rp_claimed)
   1435   1.1  riastrad 		claimed = rpoll->rp_claimed = true;
   1436   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1437   1.1  riastrad 	recorded = true;
   1438   1.1  riastrad 	goto top;
   1439   1.1  riastrad }
   1440   1.1  riastrad 
   1441   1.1  riastrad /*
   1442   1.1  riastrad  * dma_resv_kqfilter(robj, kn, rpoll)
   1443   1.1  riastrad  *
   1444   1.1  riastrad  *	Kqueue filter for reservation objects.  Currently not
   1445   1.1  riastrad  *	implemented because the logic to implement it is nontrivial,
   1446   1.1  riastrad  *	and userland will presumably never use it, so it would be
   1447   1.1  riastrad  *	dangerous to add never-tested complex code paths to the kernel.
   1448   1.1  riastrad  */
   1449   1.1  riastrad int
   1450   1.1  riastrad dma_resv_kqfilter(const struct dma_resv *robj,
   1451   1.1  riastrad     struct knote *kn, struct dma_resv_poll *rpoll)
   1452   1.1  riastrad {
   1453   1.1  riastrad 
   1454   1.1  riastrad 	return EINVAL;
   1455   1.1  riastrad }
   1456