Home | History | Annotate | Line # | Download | only in linux
linux_dma_resv.c revision 1.11
      1  1.11  riastrad /*	$NetBSD: linux_dma_resv.c,v 1.11 2021/12/19 12:21:30 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*-
      4   1.1  riastrad  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5   1.1  riastrad  * All rights reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  riastrad  * by Taylor R. Campbell.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.1  riastrad  * modification, are permitted provided that the following conditions
     12   1.1  riastrad  * are met:
     13   1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.1  riastrad  *
     19   1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.1  riastrad #include <sys/cdefs.h>
     33  1.11  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.11 2021/12/19 12:21:30 riastradh Exp $");
     34   1.1  riastrad 
     35   1.1  riastrad #include <sys/param.h>
     36   1.1  riastrad #include <sys/poll.h>
     37   1.1  riastrad #include <sys/select.h>
     38   1.1  riastrad 
     39   1.1  riastrad #include <linux/dma-fence.h>
     40   1.1  riastrad #include <linux/dma-resv.h>
     41   1.1  riastrad #include <linux/seqlock.h>
     42   1.1  riastrad #include <linux/ww_mutex.h>
     43   1.1  riastrad 
     44   1.1  riastrad DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
     45   1.1  riastrad 
     46   1.1  riastrad static struct dma_resv_list *
     47   1.1  riastrad objlist_tryalloc(uint32_t n)
     48   1.1  riastrad {
     49   1.1  riastrad 	struct dma_resv_list *list;
     50   1.1  riastrad 
     51   1.1  riastrad 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
     52   1.1  riastrad 	if (list == NULL)
     53   1.1  riastrad 		return NULL;
     54   1.1  riastrad 	list->shared_max = n;
     55   1.1  riastrad 
     56   1.1  riastrad 	return list;
     57   1.1  riastrad }
     58   1.1  riastrad 
     59   1.1  riastrad static void
     60   1.1  riastrad objlist_free(struct dma_resv_list *list)
     61   1.1  riastrad {
     62   1.1  riastrad 	uint32_t n = list->shared_max;
     63   1.1  riastrad 
     64   1.1  riastrad 	kmem_free(list, offsetof(typeof(*list), shared[n]));
     65   1.1  riastrad }
     66   1.1  riastrad 
     67   1.1  riastrad static void
     68   1.1  riastrad objlist_free_cb(struct rcu_head *rcu)
     69   1.1  riastrad {
     70   1.1  riastrad 	struct dma_resv_list *list = container_of(rcu,
     71   1.1  riastrad 	    struct dma_resv_list, rol_rcu);
     72   1.1  riastrad 
     73   1.1  riastrad 	objlist_free(list);
     74   1.1  riastrad }
     75   1.1  riastrad 
     76   1.1  riastrad static void
     77   1.1  riastrad objlist_defer_free(struct dma_resv_list *list)
     78   1.1  riastrad {
     79   1.1  riastrad 
     80   1.1  riastrad 	call_rcu(&list->rol_rcu, objlist_free_cb);
     81   1.1  riastrad }
     82   1.1  riastrad 
     83   1.1  riastrad /*
     84   1.1  riastrad  * dma_resv_init(robj)
     85   1.1  riastrad  *
     86   1.1  riastrad  *	Initialize a reservation object.  Caller must later destroy it
     87   1.1  riastrad  *	with dma_resv_fini.
     88   1.1  riastrad  */
     89   1.1  riastrad void
     90   1.1  riastrad dma_resv_init(struct dma_resv *robj)
     91   1.1  riastrad {
     92   1.1  riastrad 
     93   1.1  riastrad 	ww_mutex_init(&robj->lock, &reservation_ww_class);
     94   1.1  riastrad 	seqcount_init(&robj->seq);
     95   1.1  riastrad 	robj->fence_excl = NULL;
     96   1.1  riastrad 	robj->fence = NULL;
     97   1.1  riastrad 	robj->robj_prealloc = NULL;
     98   1.1  riastrad }
     99   1.1  riastrad 
    100   1.1  riastrad /*
    101   1.1  riastrad  * dma_resv_fini(robj)
    102   1.1  riastrad  *
    103   1.1  riastrad  *	Destroy a reservation object, freeing any memory that had been
    104   1.1  riastrad  *	allocated for it.  Caller must have exclusive access to it.
    105   1.1  riastrad  */
    106   1.1  riastrad void
    107   1.1  riastrad dma_resv_fini(struct dma_resv *robj)
    108   1.1  riastrad {
    109   1.1  riastrad 	unsigned i;
    110   1.1  riastrad 
    111  1.10  riastrad 	if (robj->robj_prealloc) {
    112   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    113  1.10  riastrad 		robj->robj_prealloc = NULL; /* paranoia */
    114  1.10  riastrad 	}
    115   1.1  riastrad 	if (robj->fence) {
    116  1.10  riastrad 		for (i = 0; i < robj->fence->shared_count; i++) {
    117   1.1  riastrad 			dma_fence_put(robj->fence->shared[i]);
    118  1.10  riastrad 			robj->fence->shared[i] = NULL; /* paranoia */
    119  1.10  riastrad 		}
    120   1.1  riastrad 		objlist_free(robj->fence);
    121  1.10  riastrad 		robj->fence = NULL; /* paranoia */
    122   1.1  riastrad 	}
    123  1.10  riastrad 	if (robj->fence_excl) {
    124   1.1  riastrad 		dma_fence_put(robj->fence_excl);
    125  1.10  riastrad 		robj->fence_excl = NULL; /* paranoia */
    126  1.10  riastrad 	}
    127   1.1  riastrad 	ww_mutex_destroy(&robj->lock);
    128   1.1  riastrad }
    129   1.1  riastrad 
    130   1.1  riastrad /*
    131   1.1  riastrad  * dma_resv_lock(robj, ctx)
    132   1.1  riastrad  *
    133   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    134   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    135   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    136   1.1  riastrad  *	retry.
    137   1.1  riastrad  */
    138   1.1  riastrad int
    139   1.1  riastrad dma_resv_lock(struct dma_resv *robj,
    140   1.1  riastrad     struct ww_acquire_ctx *ctx)
    141   1.1  riastrad {
    142   1.1  riastrad 
    143   1.1  riastrad 	return ww_mutex_lock(&robj->lock, ctx);
    144   1.1  riastrad }
    145   1.1  riastrad 
    146   1.1  riastrad /*
    147   1.2  riastrad  * dma_resv_lock_slow(robj, ctx)
    148   1.2  riastrad  *
    149   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    150   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    151   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    152   1.2  riastrad  *	and the caller has backed out all other locks.
    153   1.2  riastrad  */
    154   1.2  riastrad void
    155   1.2  riastrad dma_resv_lock_slow(struct dma_resv *robj,
    156   1.2  riastrad     struct ww_acquire_ctx *ctx)
    157   1.2  riastrad {
    158   1.2  riastrad 
    159   1.2  riastrad 	ww_mutex_lock_slow(&robj->lock, ctx);
    160   1.2  riastrad }
    161   1.2  riastrad 
    162   1.2  riastrad /*
    163   1.1  riastrad  * dma_resv_lock_interruptible(robj, ctx)
    164   1.1  riastrad  *
    165   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    166   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    167   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    168   1.1  riastrad  *	retry, -ERESTART/-EINTR if interrupted.
    169   1.1  riastrad  */
    170   1.1  riastrad int
    171   1.1  riastrad dma_resv_lock_interruptible(struct dma_resv *robj,
    172   1.1  riastrad     struct ww_acquire_ctx *ctx)
    173   1.1  riastrad {
    174   1.1  riastrad 
    175   1.1  riastrad 	return ww_mutex_lock_interruptible(&robj->lock, ctx);
    176   1.1  riastrad }
    177   1.1  riastrad 
    178   1.1  riastrad /*
    179   1.2  riastrad  * dma_resv_lock_slow_interruptible(robj, ctx)
    180   1.2  riastrad  *
    181   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    182   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    183   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    184   1.2  riastrad  *	and the caller has backed out all other locks.  Return 0 on
    185   1.2  riastrad  *	success, -ERESTART/-EINTR if interrupted.
    186   1.2  riastrad  */
    187   1.2  riastrad int
    188   1.2  riastrad dma_resv_lock_slow_interruptible(struct dma_resv *robj,
    189   1.2  riastrad     struct ww_acquire_ctx *ctx)
    190   1.2  riastrad {
    191   1.2  riastrad 
    192   1.2  riastrad 	return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
    193   1.2  riastrad }
    194   1.2  riastrad 
    195   1.2  riastrad /*
    196   1.1  riastrad  * dma_resv_trylock(robj)
    197   1.1  riastrad  *
    198   1.1  riastrad  *	Try to acquire a reservation object's lock without blocking.
    199   1.1  riastrad  *	Return true on success, false on failure.
    200   1.1  riastrad  */
    201   1.1  riastrad bool
    202   1.1  riastrad dma_resv_trylock(struct dma_resv *robj)
    203   1.1  riastrad {
    204   1.1  riastrad 
    205   1.1  riastrad 	return ww_mutex_trylock(&robj->lock);
    206   1.1  riastrad }
    207   1.1  riastrad 
    208   1.1  riastrad /*
    209   1.4  riastrad  * dma_resv_locking_ctx(robj)
    210   1.4  riastrad  *
    211   1.4  riastrad  *	Return a pointer to the ww_acquire_ctx used by the owner of
    212   1.4  riastrad  *	the reservation object's lock, or NULL if it is either not
    213   1.4  riastrad  *	owned or if it is locked without context.
    214   1.4  riastrad  */
    215   1.4  riastrad struct ww_acquire_ctx *
    216   1.4  riastrad dma_resv_locking_ctx(struct dma_resv *robj)
    217   1.4  riastrad {
    218   1.4  riastrad 
    219   1.4  riastrad 	return ww_mutex_locking_ctx(&robj->lock);
    220   1.4  riastrad }
    221   1.4  riastrad 
    222   1.4  riastrad /*
    223   1.1  riastrad  * dma_resv_unlock(robj)
    224   1.1  riastrad  *
    225   1.1  riastrad  *	Release a reservation object's lock.
    226   1.1  riastrad  */
    227   1.1  riastrad void
    228   1.1  riastrad dma_resv_unlock(struct dma_resv *robj)
    229   1.1  riastrad {
    230   1.1  riastrad 
    231   1.1  riastrad 	return ww_mutex_unlock(&robj->lock);
    232   1.1  riastrad }
    233   1.1  riastrad 
    234   1.1  riastrad /*
    235  1.11  riastrad  * dma_resv_is_locked(robj)
    236  1.11  riastrad  *
    237  1.11  riastrad  *	True if robj is locked.
    238  1.11  riastrad  */
    239  1.11  riastrad bool
    240  1.11  riastrad dma_resv_is_locked(struct dma_resv *robj)
    241  1.11  riastrad {
    242  1.11  riastrad 
    243  1.11  riastrad 	return ww_mutex_is_locked(&robj->lock);
    244  1.11  riastrad }
    245  1.11  riastrad 
    246  1.11  riastrad /*
    247   1.1  riastrad  * dma_resv_held(robj)
    248   1.1  riastrad  *
    249   1.1  riastrad  *	True if robj is locked.
    250   1.1  riastrad  */
    251   1.1  riastrad bool
    252   1.1  riastrad dma_resv_held(struct dma_resv *robj)
    253   1.1  riastrad {
    254   1.1  riastrad 
    255   1.1  riastrad 	return ww_mutex_is_locked(&robj->lock);
    256   1.1  riastrad }
    257   1.1  riastrad 
    258   1.1  riastrad /*
    259   1.1  riastrad  * dma_resv_assert_held(robj)
    260   1.1  riastrad  *
    261   1.1  riastrad  *	Panic if robj is not held, in DIAGNOSTIC builds.
    262   1.1  riastrad  */
    263   1.1  riastrad void
    264   1.1  riastrad dma_resv_assert_held(struct dma_resv *robj)
    265   1.1  riastrad {
    266   1.1  riastrad 
    267   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    268   1.1  riastrad }
    269   1.1  riastrad 
    270   1.1  riastrad /*
    271   1.1  riastrad  * dma_resv_get_excl(robj)
    272   1.1  riastrad  *
    273   1.1  riastrad  *	Return a pointer to the exclusive fence of the reservation
    274   1.1  riastrad  *	object robj.
    275   1.1  riastrad  *
    276   1.1  riastrad  *	Caller must have robj locked.
    277   1.1  riastrad  */
    278   1.1  riastrad struct dma_fence *
    279   1.1  riastrad dma_resv_get_excl(struct dma_resv *robj)
    280   1.1  riastrad {
    281   1.1  riastrad 
    282   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    283   1.1  riastrad 	return robj->fence_excl;
    284   1.1  riastrad }
    285   1.1  riastrad 
    286   1.1  riastrad /*
    287   1.1  riastrad  * dma_resv_get_list(robj)
    288   1.1  riastrad  *
    289   1.1  riastrad  *	Return a pointer to the shared fence list of the reservation
    290   1.1  riastrad  *	object robj.
    291   1.1  riastrad  *
    292   1.1  riastrad  *	Caller must have robj locked.
    293   1.1  riastrad  */
    294   1.1  riastrad struct dma_resv_list *
    295   1.1  riastrad dma_resv_get_list(struct dma_resv *robj)
    296   1.1  riastrad {
    297   1.1  riastrad 
    298   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    299   1.1  riastrad 	return robj->fence;
    300   1.1  riastrad }
    301   1.1  riastrad 
    302   1.1  riastrad /*
    303   1.1  riastrad  * dma_resv_reserve_shared(robj)
    304   1.1  riastrad  *
    305   1.1  riastrad  *	Reserve space in robj to add a shared fence.  To be used only
    306   1.1  riastrad  *	once before calling dma_resv_add_shared_fence.
    307   1.1  riastrad  *
    308   1.1  riastrad  *	Caller must have robj locked.
    309   1.1  riastrad  *
    310   1.1  riastrad  *	Internally, we start with room for four entries and double if
    311   1.1  riastrad  *	we don't have enough.  This is not guaranteed.
    312   1.1  riastrad  */
    313   1.1  riastrad int
    314   1.3  riastrad dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
    315   1.1  riastrad {
    316   1.1  riastrad 	struct dma_resv_list *list, *prealloc;
    317   1.1  riastrad 	uint32_t n, nalloc;
    318   1.1  riastrad 
    319   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    320   1.3  riastrad 	KASSERT(num_fences == 1);
    321   1.1  riastrad 
    322   1.1  riastrad 	list = robj->fence;
    323   1.1  riastrad 	prealloc = robj->robj_prealloc;
    324   1.1  riastrad 
    325   1.1  riastrad 	/* If there's an existing list, check it for space.  */
    326   1.1  riastrad 	if (list) {
    327   1.1  riastrad 		/* If there's too many already, give up.  */
    328   1.1  riastrad 		if (list->shared_count == UINT32_MAX)
    329   1.1  riastrad 			return -ENOMEM;
    330   1.1  riastrad 
    331   1.1  riastrad 		/* Add one more. */
    332   1.1  riastrad 		n = list->shared_count + 1;
    333   1.1  riastrad 
    334   1.1  riastrad 		/* If there's enough for one more, we're done.  */
    335   1.1  riastrad 		if (n <= list->shared_max)
    336   1.1  riastrad 			return 0;
    337   1.1  riastrad 	} else {
    338   1.1  riastrad 		/* No list already.  We need space for 1.  */
    339   1.1  riastrad 		n = 1;
    340   1.1  riastrad 	}
    341   1.1  riastrad 
    342   1.1  riastrad 	/* If not, maybe there's a preallocated list ready.  */
    343   1.1  riastrad 	if (prealloc != NULL) {
    344   1.1  riastrad 		/* If there's enough room in it, stop here.  */
    345   1.1  riastrad 		if (n <= prealloc->shared_max)
    346   1.1  riastrad 			return 0;
    347   1.1  riastrad 
    348   1.1  riastrad 		/* Try to double its capacity.  */
    349   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
    350   1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    351   1.1  riastrad 		if (prealloc == NULL)
    352   1.1  riastrad 			return -ENOMEM;
    353   1.1  riastrad 
    354   1.1  riastrad 		/* Swap the new preallocated list and free the old one.  */
    355   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    356   1.1  riastrad 		robj->robj_prealloc = prealloc;
    357   1.1  riastrad 	} else {
    358   1.1  riastrad 		/* Start with some spare.  */
    359   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
    360   1.1  riastrad 		prealloc = objlist_tryalloc(nalloc);
    361   1.1  riastrad 		if (prealloc == NULL)
    362   1.1  riastrad 			return -ENOMEM;
    363   1.1  riastrad 		/* Save the new preallocated list.  */
    364   1.1  riastrad 		robj->robj_prealloc = prealloc;
    365   1.1  riastrad 	}
    366   1.1  riastrad 
    367   1.1  riastrad 	/* Success!  */
    368   1.1  riastrad 	return 0;
    369   1.1  riastrad }
    370   1.1  riastrad 
    371   1.1  riastrad struct dma_resv_write_ticket {
    372   1.1  riastrad };
    373   1.1  riastrad 
    374   1.1  riastrad /*
    375   1.1  riastrad  * dma_resv_write_begin(robj, ticket)
    376   1.1  riastrad  *
    377   1.1  riastrad  *	Begin an atomic batch of writes to robj, and initialize opaque
    378   1.1  riastrad  *	ticket for it.  The ticket must be passed to
    379   1.1  riastrad  *	dma_resv_write_commit to commit the writes.
    380   1.1  riastrad  *
    381   1.1  riastrad  *	Caller must have robj locked.
    382   1.1  riastrad  *
    383   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    384   1.1  riastrad  *	NOT serve as an acquire operation, however.
    385   1.1  riastrad  */
    386   1.1  riastrad static void
    387   1.1  riastrad dma_resv_write_begin(struct dma_resv *robj,
    388   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    389   1.1  riastrad {
    390   1.1  riastrad 
    391   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    392   1.1  riastrad 
    393   1.1  riastrad 	write_seqcount_begin(&robj->seq);
    394   1.1  riastrad }
    395   1.1  riastrad 
    396   1.1  riastrad /*
    397   1.1  riastrad  * dma_resv_write_commit(robj, ticket)
    398   1.1  riastrad  *
    399   1.1  riastrad  *	Commit an atomic batch of writes to robj begun with the call to
    400   1.1  riastrad  *	dma_resv_write_begin that returned ticket.
    401   1.1  riastrad  *
    402   1.1  riastrad  *	Caller must have robj locked.
    403   1.1  riastrad  *
    404   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    405   1.1  riastrad  *	NOT serve as a release operation, however.
    406   1.1  riastrad  */
    407   1.1  riastrad static void
    408   1.1  riastrad dma_resv_write_commit(struct dma_resv *robj,
    409   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    410   1.1  riastrad {
    411   1.1  riastrad 
    412   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    413   1.1  riastrad 
    414   1.1  riastrad 	write_seqcount_end(&robj->seq);
    415   1.1  riastrad }
    416   1.1  riastrad 
    417   1.1  riastrad struct dma_resv_read_ticket {
    418   1.1  riastrad 	unsigned version;
    419   1.1  riastrad };
    420   1.1  riastrad 
    421   1.1  riastrad /*
    422   1.1  riastrad  * dma_resv_read_begin(robj, ticket)
    423   1.1  riastrad  *
    424   1.1  riastrad  *	Begin a read section, and initialize opaque ticket for it.  The
    425   1.1  riastrad  *	ticket must be passed to dma_resv_read_exit, and the
    426   1.1  riastrad  *	caller must be prepared to retry reading if it fails.
    427   1.1  riastrad  */
    428   1.1  riastrad static void
    429   1.1  riastrad dma_resv_read_begin(const struct dma_resv *robj,
    430   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    431   1.1  riastrad {
    432   1.1  riastrad 
    433   1.1  riastrad 	ticket->version = read_seqcount_begin(&robj->seq);
    434   1.1  riastrad }
    435   1.1  riastrad 
    436   1.1  riastrad /*
    437   1.1  riastrad  * dma_resv_read_valid(robj, ticket)
    438   1.1  riastrad  *
    439   1.1  riastrad  *	Test whether the read sections are valid.  Return true on
    440   1.1  riastrad  *	success, or false on failure if the read ticket has been
    441   1.1  riastrad  *	invalidated.
    442   1.1  riastrad  */
    443   1.1  riastrad static bool
    444   1.1  riastrad dma_resv_read_valid(const struct dma_resv *robj,
    445   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    446   1.1  riastrad {
    447   1.1  riastrad 
    448   1.1  riastrad 	return !read_seqcount_retry(&robj->seq, ticket->version);
    449   1.1  riastrad }
    450   1.1  riastrad 
    451   1.1  riastrad /*
    452   1.1  riastrad  * dma_resv_add_excl_fence(robj, fence)
    453   1.1  riastrad  *
    454   1.1  riastrad  *	Empty and release all of robj's shared fences, and clear and
    455   1.1  riastrad  *	release its exclusive fence.  If fence is nonnull, acquire a
    456   1.1  riastrad  *	reference to it and save it as robj's exclusive fence.
    457   1.1  riastrad  *
    458   1.1  riastrad  *	Caller must have robj locked.
    459   1.1  riastrad  */
    460   1.1  riastrad void
    461   1.1  riastrad dma_resv_add_excl_fence(struct dma_resv *robj,
    462   1.1  riastrad     struct dma_fence *fence)
    463   1.1  riastrad {
    464   1.1  riastrad 	struct dma_fence *old_fence = robj->fence_excl;
    465   1.1  riastrad 	struct dma_resv_list *old_list = robj->fence;
    466   1.1  riastrad 	uint32_t old_shared_count;
    467   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    468   1.1  riastrad 
    469   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    470   1.1  riastrad 
    471   1.1  riastrad 	/*
    472   1.1  riastrad 	 * If we are setting rather than just removing a fence, acquire
    473   1.1  riastrad 	 * a reference for ourselves.
    474   1.1  riastrad 	 */
    475   1.1  riastrad 	if (fence)
    476   1.1  riastrad 		(void)dma_fence_get(fence);
    477   1.1  riastrad 
    478   1.1  riastrad 	/* If there are any shared fences, remember how many.  */
    479   1.1  riastrad 	if (old_list)
    480   1.1  riastrad 		old_shared_count = old_list->shared_count;
    481   1.1  riastrad 
    482   1.7  riastrad 	/* Begin an update.  Implies membar_producer for fence.  */
    483   1.1  riastrad 	dma_resv_write_begin(robj, &ticket);
    484   1.1  riastrad 
    485   1.1  riastrad 	/* Replace the fence and zero the shared count.  */
    486   1.7  riastrad 	atomic_store_relaxed(&robj->fence_excl, fence);
    487   1.1  riastrad 	if (old_list)
    488   1.1  riastrad 		old_list->shared_count = 0;
    489   1.1  riastrad 
    490   1.1  riastrad 	/* Commit the update.  */
    491   1.1  riastrad 	dma_resv_write_commit(robj, &ticket);
    492   1.1  riastrad 
    493   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    494  1.10  riastrad 	if (old_fence) {
    495   1.1  riastrad 		dma_fence_put(old_fence);
    496  1.10  riastrad 		old_fence = NULL; /* paranoia */
    497  1.10  riastrad 	}
    498   1.1  riastrad 
    499   1.1  riastrad 	/* Release any old shared fences.  */
    500   1.1  riastrad 	if (old_list) {
    501  1.10  riastrad 		while (old_shared_count--) {
    502   1.1  riastrad 			dma_fence_put(old_list->shared[old_shared_count]);
    503  1.10  riastrad 			/* paranoia */
    504  1.10  riastrad 			old_list->shared[old_shared_count] = NULL;
    505  1.10  riastrad 		}
    506   1.1  riastrad 	}
    507   1.1  riastrad }
    508   1.1  riastrad 
    509   1.1  riastrad /*
    510   1.1  riastrad  * dma_resv_add_shared_fence(robj, fence)
    511   1.1  riastrad  *
    512   1.1  riastrad  *	Acquire a reference to fence and add it to robj's shared list.
    513   1.1  riastrad  *	If any fence was already added with the same context number,
    514   1.1  riastrad  *	release it and replace it by this one.
    515   1.1  riastrad  *
    516   1.1  riastrad  *	Caller must have robj locked, and must have preceded with a
    517   1.1  riastrad  *	call to dma_resv_reserve_shared for each shared fence
    518   1.1  riastrad  *	added.
    519   1.1  riastrad  */
    520   1.1  riastrad void
    521   1.1  riastrad dma_resv_add_shared_fence(struct dma_resv *robj,
    522   1.1  riastrad     struct dma_fence *fence)
    523   1.1  riastrad {
    524   1.1  riastrad 	struct dma_resv_list *list = robj->fence;
    525   1.1  riastrad 	struct dma_resv_list *prealloc = robj->robj_prealloc;
    526   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    527   1.1  riastrad 	struct dma_fence *replace = NULL;
    528   1.1  riastrad 	uint32_t i;
    529   1.1  riastrad 
    530   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    531   1.1  riastrad 
    532   1.1  riastrad 	/* Acquire a reference to the fence.  */
    533   1.1  riastrad 	KASSERT(fence != NULL);
    534   1.1  riastrad 	(void)dma_fence_get(fence);
    535   1.1  riastrad 
    536   1.1  riastrad 	/* Check for a preallocated replacement list.  */
    537   1.1  riastrad 	if (prealloc == NULL) {
    538   1.1  riastrad 		/*
    539   1.1  riastrad 		 * If there is no preallocated replacement list, then
    540   1.1  riastrad 		 * there must be room in the current list.
    541   1.1  riastrad 		 */
    542   1.1  riastrad 		KASSERT(list != NULL);
    543   1.1  riastrad 		KASSERT(list->shared_count < list->shared_max);
    544   1.1  riastrad 
    545   1.1  riastrad 		/* Begin an update.  Implies membar_producer for fence.  */
    546   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    547   1.1  riastrad 
    548   1.1  riastrad 		/* Find a fence with the same context number.  */
    549   1.1  riastrad 		for (i = 0; i < list->shared_count; i++) {
    550   1.1  riastrad 			if (list->shared[i]->context == fence->context) {
    551   1.1  riastrad 				replace = list->shared[i];
    552   1.7  riastrad 				atomic_store_relaxed(&list->shared[i], fence);
    553   1.1  riastrad 				break;
    554   1.1  riastrad 			}
    555   1.1  riastrad 		}
    556   1.1  riastrad 
    557   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    558   1.7  riastrad 		if (i == list->shared_count) {
    559   1.7  riastrad 			atomic_store_relaxed(&list->shared[list->shared_count],
    560   1.7  riastrad 			    fence);
    561   1.7  riastrad 			atomic_store_relaxed(&list->shared_count,
    562   1.7  riastrad 			    list->shared_count + 1);
    563   1.7  riastrad 		}
    564   1.1  riastrad 
    565   1.1  riastrad 		/* Commit the update.  */
    566   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    567   1.1  riastrad 	} else {
    568   1.1  riastrad 		/*
    569   1.1  riastrad 		 * There is a preallocated replacement list.  There may
    570   1.1  riastrad 		 * not be a current list.  If not, treat it as a zero-
    571   1.1  riastrad 		 * length list.
    572   1.1  riastrad 		 */
    573   1.1  riastrad 		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
    574   1.1  riastrad 
    575   1.1  riastrad 		/* There had better be room in the preallocated list.  */
    576   1.1  riastrad 		KASSERT(shared_count < prealloc->shared_max);
    577   1.1  riastrad 
    578   1.1  riastrad 		/*
    579   1.1  riastrad 		 * Copy the fences over, but replace if we find one
    580   1.1  riastrad 		 * with the same context number.
    581   1.1  riastrad 		 */
    582   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    583   1.1  riastrad 			if (replace == NULL &&
    584   1.1  riastrad 			    list->shared[i]->context == fence->context) {
    585   1.1  riastrad 				replace = list->shared[i];
    586   1.1  riastrad 				prealloc->shared[i] = fence;
    587   1.1  riastrad 			} else {
    588   1.1  riastrad 				prealloc->shared[i] = list->shared[i];
    589   1.1  riastrad 			}
    590   1.1  riastrad 		}
    591   1.1  riastrad 		prealloc->shared_count = shared_count;
    592   1.1  riastrad 
    593   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    594   1.1  riastrad 		if (replace == NULL)
    595   1.1  riastrad 			prealloc->shared[prealloc->shared_count++] = fence;
    596   1.1  riastrad 
    597   1.1  riastrad 		/*
    598   1.1  riastrad 		 * Now ready to replace the list.  Begin an update.
    599   1.1  riastrad 		 * Implies membar_producer for fence and prealloc.
    600   1.1  riastrad 		 */
    601   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    602   1.1  riastrad 
    603   1.1  riastrad 		/* Replace the list.  */
    604   1.7  riastrad 		atomic_store_relaxed(&robj->fence, prealloc);
    605   1.1  riastrad 		robj->robj_prealloc = NULL;
    606   1.1  riastrad 
    607   1.1  riastrad 		/* Commit the update.  */
    608   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    609   1.1  riastrad 
    610   1.1  riastrad 		/*
    611   1.1  riastrad 		 * If there is an old list, free it when convenient.
    612   1.1  riastrad 		 * (We are not in a position at this point to sleep
    613   1.1  riastrad 		 * waiting for activity on all CPUs.)
    614   1.1  riastrad 		 */
    615   1.1  riastrad 		if (list)
    616   1.1  riastrad 			objlist_defer_free(list);
    617   1.1  riastrad 	}
    618   1.1  riastrad 
    619   1.1  riastrad 	/* Release a fence if we replaced it.  */
    620  1.10  riastrad 	if (replace) {
    621   1.1  riastrad 		dma_fence_put(replace);
    622  1.10  riastrad 		replace = NULL;	/* paranoia */
    623  1.10  riastrad 	}
    624   1.1  riastrad }
    625   1.1  riastrad 
    626   1.1  riastrad /*
    627   1.1  riastrad  * dma_resv_get_excl_rcu(robj)
    628   1.1  riastrad  *
    629   1.1  riastrad  *	Note: Caller need not call this from an RCU read section.
    630   1.1  riastrad  */
    631   1.1  riastrad struct dma_fence *
    632   1.1  riastrad dma_resv_get_excl_rcu(const struct dma_resv *robj)
    633   1.1  riastrad {
    634   1.1  riastrad 	struct dma_fence *fence;
    635   1.1  riastrad 
    636   1.1  riastrad 	rcu_read_lock();
    637   1.1  riastrad 	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
    638   1.1  riastrad 	rcu_read_unlock();
    639   1.1  riastrad 
    640   1.1  riastrad 	return fence;
    641   1.1  riastrad }
    642   1.1  riastrad 
    643   1.1  riastrad /*
    644   1.1  riastrad  * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
    645   1.1  riastrad  */
    646   1.1  riastrad int
    647   1.1  riastrad dma_resv_get_fences_rcu(const struct dma_resv *robj,
    648   1.1  riastrad     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
    649   1.1  riastrad {
    650  1.10  riastrad 	const struct dma_resv_list *list = NULL;
    651  1.10  riastrad 	struct dma_fence *fence = NULL;
    652   1.1  riastrad 	struct dma_fence **shared = NULL;
    653   1.1  riastrad 	unsigned shared_alloc, shared_count, i;
    654   1.1  riastrad 	struct dma_resv_read_ticket ticket;
    655   1.1  riastrad 
    656  1.10  riastrad top:	KASSERT(fence == NULL);
    657  1.10  riastrad 
    658   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    659   1.1  riastrad 	rcu_read_lock();
    660   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    661   1.1  riastrad 
    662   1.7  riastrad 	/*
    663   1.7  riastrad 	 * If there is a shared list, grab it.  The atomic_load_consume
    664   1.7  riastrad 	 * here pairs with the membar_producer in dma_resv_write_begin
    665   1.7  riastrad 	 * to ensure the content of robj->fence is initialized before
    666   1.7  riastrad 	 * we witness the pointer.
    667   1.7  riastrad 	 */
    668   1.6  riastrad 	if ((list = atomic_load_consume(&robj->fence)) != NULL) {
    669   1.1  riastrad 
    670   1.1  riastrad 		/* Check whether we have a buffer.  */
    671   1.1  riastrad 		if (shared == NULL) {
    672   1.1  riastrad 			/*
    673   1.1  riastrad 			 * We don't have a buffer yet.  Try to allocate
    674   1.1  riastrad 			 * one without waiting.
    675   1.1  riastrad 			 */
    676   1.1  riastrad 			shared_alloc = list->shared_max;
    677   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    678   1.1  riastrad 			    GFP_NOWAIT);
    679   1.1  riastrad 			if (shared == NULL) {
    680   1.1  riastrad 				/*
    681   1.1  riastrad 				 * Couldn't do it immediately.  Back
    682   1.1  riastrad 				 * out of RCU and allocate one with
    683   1.1  riastrad 				 * waiting.
    684   1.1  riastrad 				 */
    685   1.1  riastrad 				rcu_read_unlock();
    686   1.1  riastrad 				shared = kcalloc(shared_alloc,
    687   1.1  riastrad 				    sizeof(shared[0]), GFP_KERNEL);
    688   1.1  riastrad 				if (shared == NULL)
    689   1.1  riastrad 					return -ENOMEM;
    690   1.1  riastrad 				goto top;
    691   1.1  riastrad 			}
    692   1.1  riastrad 		} else if (shared_alloc < list->shared_max) {
    693   1.1  riastrad 			/*
    694   1.1  riastrad 			 * We have a buffer but it's too small.  We're
    695   1.1  riastrad 			 * already racing in this case, so just back
    696   1.1  riastrad 			 * out and wait to allocate a bigger one.
    697   1.1  riastrad 			 */
    698   1.1  riastrad 			shared_alloc = list->shared_max;
    699   1.1  riastrad 			rcu_read_unlock();
    700   1.1  riastrad 			kfree(shared);
    701   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    702   1.1  riastrad 			    GFP_KERNEL);
    703   1.1  riastrad 			if (shared == NULL)
    704   1.1  riastrad 				return -ENOMEM;
    705   1.1  riastrad 		}
    706   1.1  riastrad 
    707   1.1  riastrad 		/*
    708   1.1  riastrad 		 * We got a buffer large enough.  Copy into the buffer
    709   1.7  riastrad 		 * and record the number of elements.  Could safely use
    710   1.7  riastrad 		 * memcpy here, because even if we race with a writer
    711   1.7  riastrad 		 * it'll invalidate the read ticket and we'll start
    712   1.7  riastrad 		 * ove, but atomic_load in a loop will pacify kcsan.
    713   1.7  riastrad 		 */
    714   1.7  riastrad 		shared_count = atomic_load_relaxed(&list->shared_count);
    715   1.7  riastrad 		for (i = 0; i < shared_count; i++)
    716   1.7  riastrad 			shared[i] = atomic_load_relaxed(&list->shared[i]);
    717   1.1  riastrad 	} else {
    718   1.1  riastrad 		/* No shared list: shared count is zero.  */
    719   1.1  riastrad 		shared_count = 0;
    720   1.1  riastrad 	}
    721   1.1  riastrad 
    722   1.1  riastrad 	/* If there is an exclusive fence, grab it.  */
    723  1.10  riastrad 	KASSERT(fence == NULL);
    724   1.6  riastrad 	fence = atomic_load_consume(&robj->fence_excl);
    725   1.1  riastrad 
    726   1.1  riastrad 	/*
    727   1.1  riastrad 	 * We are done reading from robj and list.  Validate our
    728   1.1  riastrad 	 * parking ticket.  If it's invalid, do not pass go and do not
    729   1.1  riastrad 	 * collect $200.
    730   1.1  riastrad 	 */
    731  1.10  riastrad 	if (!dma_resv_read_valid(robj, &ticket)) {
    732  1.10  riastrad 		fence = NULL;
    733   1.1  riastrad 		goto restart;
    734  1.10  riastrad 	}
    735   1.1  riastrad 
    736   1.1  riastrad 	/*
    737   1.1  riastrad 	 * Try to get a reference to the exclusive fence, if there is
    738   1.1  riastrad 	 * one.  If we can't, start over.
    739   1.1  riastrad 	 */
    740   1.1  riastrad 	if (fence) {
    741   1.8  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
    742   1.1  riastrad 			goto restart;
    743   1.1  riastrad 	}
    744   1.1  riastrad 
    745   1.1  riastrad 	/*
    746   1.1  riastrad 	 * Try to get a reference to all of the shared fences.
    747   1.1  riastrad 	 */
    748   1.1  riastrad 	for (i = 0; i < shared_count; i++) {
    749   1.7  riastrad 		if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL)
    750   1.1  riastrad 			goto put_restart;
    751   1.1  riastrad 	}
    752   1.1  riastrad 
    753   1.1  riastrad 	/* Success!  */
    754   1.1  riastrad 	rcu_read_unlock();
    755   1.1  riastrad 	*fencep = fence;
    756   1.1  riastrad 	*nsharedp = shared_count;
    757   1.1  riastrad 	*sharedp = shared;
    758   1.1  riastrad 	return 0;
    759   1.1  riastrad 
    760   1.1  riastrad put_restart:
    761   1.1  riastrad 	/* Back out.  */
    762   1.1  riastrad 	while (i --> 0) {
    763   1.1  riastrad 		dma_fence_put(shared[i]);
    764   1.1  riastrad 		shared[i] = NULL; /* paranoia */
    765   1.1  riastrad 	}
    766   1.1  riastrad 	if (fence) {
    767   1.1  riastrad 		dma_fence_put(fence);
    768  1.10  riastrad 		fence = NULL;
    769   1.1  riastrad 	}
    770   1.1  riastrad 
    771   1.1  riastrad restart:
    772  1.10  riastrad 	KASSERT(fence == NULL);
    773   1.1  riastrad 	rcu_read_unlock();
    774   1.1  riastrad 	goto top;
    775   1.1  riastrad }
    776   1.1  riastrad 
    777   1.1  riastrad /*
    778   1.1  riastrad  * dma_resv_copy_fences(dst, src)
    779   1.1  riastrad  *
    780   1.1  riastrad  *	Copy the exclusive fence and all the shared fences from src to
    781   1.1  riastrad  *	dst.
    782   1.1  riastrad  *
    783   1.1  riastrad  *	Caller must have dst locked.
    784   1.1  riastrad  */
    785   1.1  riastrad int
    786   1.1  riastrad dma_resv_copy_fences(struct dma_resv *dst_robj,
    787   1.1  riastrad     const struct dma_resv *src_robj)
    788   1.1  riastrad {
    789   1.1  riastrad 	const struct dma_resv_list *src_list;
    790   1.1  riastrad 	struct dma_resv_list *dst_list = NULL;
    791   1.1  riastrad 	struct dma_resv_list *old_list;
    792   1.1  riastrad 	struct dma_fence *fence = NULL;
    793   1.1  riastrad 	struct dma_fence *old_fence;
    794   1.1  riastrad 	uint32_t shared_count, i;
    795   1.1  riastrad 	struct dma_resv_read_ticket read_ticket;
    796   1.1  riastrad 	struct dma_resv_write_ticket write_ticket;
    797   1.1  riastrad 
    798   1.1  riastrad 	KASSERT(dma_resv_held(dst_robj));
    799   1.1  riastrad 
    800  1.10  riastrad top:	KASSERT(fence == NULL);
    801  1.10  riastrad 
    802   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    803   1.1  riastrad 	rcu_read_lock();
    804   1.1  riastrad 	dma_resv_read_begin(src_robj, &read_ticket);
    805   1.1  riastrad 
    806   1.1  riastrad 	/* Get the shared list.  */
    807   1.6  riastrad 	if ((src_list = atomic_load_consume(&src_robj->fence)) != NULL) {
    808   1.1  riastrad 
    809   1.1  riastrad 		/* Find out how long it is.  */
    810   1.7  riastrad 		shared_count = atomic_load_relaxed(&src_list->shared_count);
    811   1.1  riastrad 
    812   1.1  riastrad 		/*
    813   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
    814   1.1  riastrad 		 * pointer and length.
    815   1.1  riastrad 		 */
    816   1.1  riastrad 		if (!dma_resv_read_valid(src_robj, &read_ticket))
    817   1.1  riastrad 			goto restart;
    818   1.1  riastrad 
    819   1.1  riastrad 		/* Allocate a new list.  */
    820   1.1  riastrad 		dst_list = objlist_tryalloc(shared_count);
    821   1.1  riastrad 		if (dst_list == NULL)
    822   1.1  riastrad 			return -ENOMEM;
    823   1.1  riastrad 
    824   1.1  riastrad 		/* Copy over all fences that are not yet signalled.  */
    825   1.1  riastrad 		dst_list->shared_count = 0;
    826   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    827  1.10  riastrad 			KASSERT(fence == NULL);
    828   1.7  riastrad 			fence = atomic_load_relaxed(&src_list->shared[i]);
    829   1.9  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
    830   1.1  riastrad 				goto restart;
    831   1.1  riastrad 			if (dma_fence_is_signaled(fence)) {
    832   1.1  riastrad 				dma_fence_put(fence);
    833   1.1  riastrad 				fence = NULL;
    834   1.1  riastrad 				continue;
    835   1.1  riastrad 			}
    836   1.1  riastrad 			dst_list->shared[dst_list->shared_count++] = fence;
    837   1.1  riastrad 			fence = NULL;
    838   1.1  riastrad 		}
    839   1.1  riastrad 	}
    840   1.1  riastrad 
    841   1.1  riastrad 	/* Get the exclusive fence.  */
    842  1.10  riastrad 	KASSERT(fence == NULL);
    843   1.6  riastrad 	if ((fence = atomic_load_consume(&src_robj->fence_excl)) != NULL) {
    844   1.1  riastrad 
    845   1.1  riastrad 		/*
    846   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
    847   1.1  riastrad 		 *
    848   1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
    849   1.1  riastrad 		 * pointer writes are supposed to be atomic.
    850   1.1  riastrad 		 */
    851   1.1  riastrad 		if (!dma_resv_read_valid(src_robj, &read_ticket)) {
    852   1.1  riastrad 			fence = NULL;
    853   1.1  riastrad 			goto restart;
    854   1.1  riastrad 		}
    855   1.1  riastrad 
    856   1.1  riastrad 		/*
    857   1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
    858   1.1  riastrad 		 * reference to it.
    859   1.1  riastrad 		 */
    860   1.1  riastrad 		if (!dma_fence_get_rcu(fence)) {
    861   1.1  riastrad 			fence = NULL;
    862   1.1  riastrad 			goto restart;
    863   1.1  riastrad 		}
    864   1.1  riastrad 	}
    865   1.1  riastrad 
    866   1.1  riastrad 	/* All done with src; exit the RCU read section.  */
    867   1.1  riastrad 	rcu_read_unlock();
    868   1.1  riastrad 
    869   1.1  riastrad 	/*
    870   1.1  riastrad 	 * We now have a snapshot of the shared and exclusive fences of
    871   1.1  riastrad 	 * src_robj and we have acquired references to them so they
    872   1.1  riastrad 	 * won't go away.  Transfer them over to dst_robj, releasing
    873   1.1  riastrad 	 * references to any that were there.
    874   1.1  riastrad 	 */
    875   1.1  riastrad 
    876   1.1  riastrad 	/* Get the old shared and exclusive fences, if any.  */
    877   1.1  riastrad 	old_list = dst_robj->fence;
    878   1.1  riastrad 	old_fence = dst_robj->fence_excl;
    879   1.1  riastrad 
    880   1.7  riastrad 	/*
    881   1.7  riastrad 	 * Begin an update.  Implies membar_producer for dst_list and
    882   1.7  riastrad 	 * fence.
    883   1.7  riastrad 	 */
    884   1.1  riastrad 	dma_resv_write_begin(dst_robj, &write_ticket);
    885   1.1  riastrad 
    886   1.1  riastrad 	/* Replace the fences.  */
    887   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence, dst_list);
    888   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence_excl, fence);
    889   1.1  riastrad 
    890   1.1  riastrad 	/* Commit the update.  */
    891   1.1  riastrad 	dma_resv_write_commit(dst_robj, &write_ticket);
    892   1.1  riastrad 
    893   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    894  1.10  riastrad 	if (old_fence) {
    895   1.1  riastrad 		dma_fence_put(old_fence);
    896  1.10  riastrad 		old_fence = NULL; /* paranoia */
    897  1.10  riastrad 	}
    898   1.1  riastrad 
    899   1.1  riastrad 	/* Release any old shared fences.  */
    900   1.1  riastrad 	if (old_list) {
    901  1.10  riastrad 		for (i = old_list->shared_count; i --> 0;) {
    902   1.1  riastrad 			dma_fence_put(old_list->shared[i]);
    903  1.10  riastrad 			old_list->shared[i] = NULL; /* paranoia */
    904  1.10  riastrad 		}
    905  1.10  riastrad 		objlist_free(old_list);
    906  1.10  riastrad 		old_list = NULL; /* paranoia */
    907   1.1  riastrad 	}
    908   1.1  riastrad 
    909   1.1  riastrad 	/* Success!  */
    910   1.1  riastrad 	return 0;
    911   1.1  riastrad 
    912   1.1  riastrad restart:
    913  1.10  riastrad 	KASSERT(fence == NULL);
    914   1.1  riastrad 	rcu_read_unlock();
    915   1.1  riastrad 	if (dst_list) {
    916   1.1  riastrad 		for (i = dst_list->shared_count; i --> 0;) {
    917   1.1  riastrad 			dma_fence_put(dst_list->shared[i]);
    918  1.10  riastrad 			dst_list->shared[i] = NULL; /* paranoia */
    919   1.1  riastrad 		}
    920   1.1  riastrad 		objlist_free(dst_list);
    921   1.1  riastrad 		dst_list = NULL;
    922   1.1  riastrad 	}
    923   1.1  riastrad 	goto top;
    924   1.1  riastrad }
    925   1.1  riastrad 
    926   1.1  riastrad /*
    927   1.1  riastrad  * dma_resv_test_signaled_rcu(robj, shared)
    928   1.1  riastrad  *
    929   1.1  riastrad  *	If shared is true, test whether all of the shared fences are
    930   1.1  riastrad  *	signalled, or if there are none, test whether the exclusive
    931   1.1  riastrad  *	fence is signalled.  If shared is false, test only whether the
    932   1.1  riastrad  *	exclusive fence is signalled.
    933   1.1  riastrad  *
    934   1.1  riastrad  *	XXX Why does this _not_ test the exclusive fence if shared is
    935   1.1  riastrad  *	true only if there are no shared fences?  This makes no sense.
    936   1.1  riastrad  */
    937   1.1  riastrad bool
    938   1.1  riastrad dma_resv_test_signaled_rcu(const struct dma_resv *robj,
    939   1.1  riastrad     bool shared)
    940   1.1  riastrad {
    941   1.1  riastrad 	struct dma_resv_read_ticket ticket;
    942   1.1  riastrad 	struct dma_resv_list *list;
    943  1.10  riastrad 	struct dma_fence *fence = NULL;
    944   1.1  riastrad 	uint32_t i, shared_count;
    945   1.1  riastrad 	bool signaled = true;
    946   1.1  riastrad 
    947  1.10  riastrad top:	KASSERT(fence == NULL);
    948  1.10  riastrad 
    949   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    950   1.1  riastrad 	rcu_read_lock();
    951   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    952   1.1  riastrad 
    953   1.1  riastrad 	/* If shared is requested and there is a shared list, test it.  */
    954  1.10  riastrad 	if (shared && (list = atomic_load_consume(&robj->fence)) != NULL) {
    955   1.1  riastrad 
    956   1.1  riastrad 		/* Find out how long it is.  */
    957   1.7  riastrad 		shared_count = atomic_load_relaxed(&list->shared_count);
    958   1.1  riastrad 
    959   1.1  riastrad 		/*
    960   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
    961   1.1  riastrad 		 * pointer and length.
    962   1.1  riastrad 		 */
    963   1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
    964   1.1  riastrad 			goto restart;
    965   1.1  riastrad 
    966   1.1  riastrad 		/*
    967   1.1  riastrad 		 * For each fence, if it is going away, restart.
    968   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
    969   1.1  riastrad 		 * it is signalled.  Stop if we find any that is not
    970   1.1  riastrad 		 * signalled.
    971   1.1  riastrad 		 */
    972   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    973  1.10  riastrad 			KASSERT(fence == NULL);
    974   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
    975  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
    976   1.1  riastrad 				goto restart;
    977   1.1  riastrad 			signaled &= dma_fence_is_signaled(fence);
    978   1.1  riastrad 			dma_fence_put(fence);
    979  1.10  riastrad 			fence = NULL;
    980   1.1  riastrad 			if (!signaled)
    981   1.1  riastrad 				goto out;
    982   1.1  riastrad 		}
    983   1.1  riastrad 	}
    984   1.1  riastrad 
    985   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
    986  1.10  riastrad 	KASSERT(fence == NULL);
    987   1.6  riastrad 	if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) {
    988   1.1  riastrad 
    989   1.1  riastrad 		/*
    990   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
    991   1.1  riastrad 		 *
    992   1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
    993   1.1  riastrad 		 * pointer writes are supposed to be atomic.
    994   1.1  riastrad 		 */
    995  1.10  riastrad 		if (!dma_resv_read_valid(robj, &ticket)) {
    996  1.10  riastrad 			fence = NULL;
    997   1.1  riastrad 			goto restart;
    998  1.10  riastrad 		}
    999   1.1  riastrad 
   1000   1.1  riastrad 		/*
   1001   1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
   1002   1.1  riastrad 		 * reference to it to test whether it is signalled.
   1003   1.1  riastrad 		 */
   1004   1.1  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1005   1.1  riastrad 			goto restart;
   1006   1.1  riastrad 		signaled &= dma_fence_is_signaled(fence);
   1007   1.1  riastrad 		dma_fence_put(fence);
   1008  1.10  riastrad 		fence = NULL;
   1009   1.1  riastrad 		if (!signaled)
   1010   1.1  riastrad 			goto out;
   1011   1.1  riastrad 	}
   1012   1.1  riastrad 
   1013  1.10  riastrad out:	KASSERT(fence == NULL);
   1014  1.10  riastrad 	rcu_read_unlock();
   1015   1.1  riastrad 	return signaled;
   1016   1.1  riastrad 
   1017   1.1  riastrad restart:
   1018  1.10  riastrad 	KASSERT(fence == NULL);
   1019   1.1  riastrad 	rcu_read_unlock();
   1020   1.1  riastrad 	goto top;
   1021   1.1  riastrad }
   1022   1.1  riastrad 
   1023   1.1  riastrad /*
   1024   1.1  riastrad  * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
   1025   1.1  riastrad  *
   1026   1.1  riastrad  *	If shared is true, wait for all of the shared fences to be
   1027   1.1  riastrad  *	signalled, or if there are none, wait for the exclusive fence
   1028   1.1  riastrad  *	to be signalled.  If shared is false, wait only for the
   1029   1.1  riastrad  *	exclusive fence to be signalled.  If timeout is zero, don't
   1030   1.1  riastrad  *	wait, only test.
   1031   1.1  riastrad  *
   1032   1.1  riastrad  *	XXX Why does this _not_ wait for the exclusive fence if shared
   1033   1.1  riastrad  *	is true only if there are no shared fences?  This makes no
   1034   1.1  riastrad  *	sense.
   1035   1.1  riastrad  */
   1036   1.1  riastrad long
   1037   1.1  riastrad dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
   1038   1.1  riastrad     bool shared, bool intr, unsigned long timeout)
   1039   1.1  riastrad {
   1040   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1041   1.1  riastrad 	struct dma_resv_list *list;
   1042  1.10  riastrad 	struct dma_fence *fence = NULL;
   1043   1.1  riastrad 	uint32_t i, shared_count;
   1044   1.1  riastrad 	long ret;
   1045   1.1  riastrad 
   1046   1.1  riastrad 	if (timeout == 0)
   1047   1.1  riastrad 		return dma_resv_test_signaled_rcu(robj, shared);
   1048   1.1  riastrad 
   1049  1.10  riastrad top:	KASSERT(fence == NULL);
   1050  1.10  riastrad 
   1051   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1052   1.1  riastrad 	rcu_read_lock();
   1053   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1054   1.1  riastrad 
   1055   1.1  riastrad 	/* If shared is requested and there is a shared list, wait on it.  */
   1056  1.10  riastrad 	if (shared && (list = atomic_load_consume(&robj->fence)) != NULL) {
   1057   1.1  riastrad 
   1058   1.1  riastrad 		/* Find out how long it is.  */
   1059   1.1  riastrad 		shared_count = list->shared_count;
   1060   1.1  riastrad 
   1061   1.1  riastrad 		/*
   1062   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
   1063   1.1  riastrad 		 * pointer and length.
   1064   1.1  riastrad 		 */
   1065   1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1066   1.1  riastrad 			goto restart;
   1067   1.1  riastrad 
   1068   1.1  riastrad 		/*
   1069   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1070   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1071   1.1  riastrad 		 * it is signalled.  Stop and wait if we find any that
   1072   1.1  riastrad 		 * is not signalled.
   1073   1.1  riastrad 		 */
   1074   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1075  1.10  riastrad 			KASSERT(fence == NULL);
   1076   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1077  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1078   1.1  riastrad 				goto restart;
   1079   1.1  riastrad 			if (!dma_fence_is_signaled(fence))
   1080   1.1  riastrad 				goto wait;
   1081   1.1  riastrad 			dma_fence_put(fence);
   1082  1.10  riastrad 			fence = NULL;
   1083   1.1  riastrad 		}
   1084   1.1  riastrad 	}
   1085   1.1  riastrad 
   1086   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1087  1.10  riastrad 	KASSERT(fence == NULL);
   1088   1.6  riastrad 	if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) {
   1089   1.1  riastrad 
   1090   1.1  riastrad 		/*
   1091   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
   1092   1.1  riastrad 		 *
   1093   1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
   1094   1.1  riastrad 		 * pointer writes are supposed to be atomic.
   1095   1.1  riastrad 		 */
   1096  1.10  riastrad 		if (!dma_resv_read_valid(robj, &ticket)) {
   1097  1.10  riastrad 			fence = NULL;
   1098   1.1  riastrad 			goto restart;
   1099  1.10  riastrad 		}
   1100   1.1  riastrad 
   1101   1.1  riastrad 		/*
   1102   1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
   1103   1.1  riastrad 		 * reference to it to test whether it is signalled.  If
   1104   1.1  riastrad 		 * not, wait for it.
   1105   1.1  riastrad 		 */
   1106   1.1  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1107   1.1  riastrad 			goto restart;
   1108   1.1  riastrad 		if (!dma_fence_is_signaled(fence))
   1109   1.1  riastrad 			goto wait;
   1110   1.1  riastrad 		dma_fence_put(fence);
   1111  1.10  riastrad 		fence = NULL;
   1112   1.1  riastrad 	}
   1113   1.1  riastrad 
   1114   1.1  riastrad 	/* Success!  Return the number of ticks left.  */
   1115   1.1  riastrad 	rcu_read_unlock();
   1116  1.10  riastrad 	KASSERT(fence == NULL);
   1117   1.1  riastrad 	return timeout;
   1118   1.1  riastrad 
   1119   1.1  riastrad restart:
   1120  1.10  riastrad 	KASSERT(fence == NULL);
   1121   1.1  riastrad 	rcu_read_unlock();
   1122   1.1  riastrad 	goto top;
   1123   1.1  riastrad 
   1124   1.1  riastrad wait:
   1125   1.1  riastrad 	/*
   1126   1.5  riastrad 	 * Exit the RCU read section, wait for it, and release the
   1127   1.5  riastrad 	 * fence when we're done.  If we time out or fail, bail.
   1128   1.5  riastrad 	 * Otherwise, go back to the top.
   1129   1.1  riastrad 	 */
   1130   1.1  riastrad 	KASSERT(fence != NULL);
   1131   1.1  riastrad 	rcu_read_unlock();
   1132   1.1  riastrad 	ret = dma_fence_wait_timeout(fence, intr, timeout);
   1133   1.1  riastrad 	dma_fence_put(fence);
   1134  1.10  riastrad 	fence = NULL;
   1135   1.1  riastrad 	if (ret <= 0)
   1136   1.1  riastrad 		return ret;
   1137   1.1  riastrad 	KASSERT(ret <= timeout);
   1138   1.1  riastrad 	timeout = ret;
   1139   1.1  riastrad 	goto top;
   1140   1.1  riastrad }
   1141   1.1  riastrad 
   1142   1.1  riastrad /*
   1143   1.1  riastrad  * dma_resv_poll_init(rpoll, lock)
   1144   1.1  riastrad  *
   1145   1.1  riastrad  *	Initialize reservation poll state.
   1146   1.1  riastrad  */
   1147   1.1  riastrad void
   1148   1.1  riastrad dma_resv_poll_init(struct dma_resv_poll *rpoll)
   1149   1.1  riastrad {
   1150   1.1  riastrad 
   1151   1.1  riastrad 	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
   1152   1.1  riastrad 	selinit(&rpoll->rp_selq);
   1153   1.1  riastrad 	rpoll->rp_claimed = 0;
   1154   1.1  riastrad }
   1155   1.1  riastrad 
   1156   1.1  riastrad /*
   1157   1.1  riastrad  * dma_resv_poll_fini(rpoll)
   1158   1.1  riastrad  *
   1159   1.1  riastrad  *	Release any resource associated with reservation poll state.
   1160   1.1  riastrad  */
   1161   1.1  riastrad void
   1162   1.1  riastrad dma_resv_poll_fini(struct dma_resv_poll *rpoll)
   1163   1.1  riastrad {
   1164   1.1  riastrad 
   1165   1.1  riastrad 	KASSERT(rpoll->rp_claimed == 0);
   1166   1.1  riastrad 	seldestroy(&rpoll->rp_selq);
   1167   1.1  riastrad 	mutex_destroy(&rpoll->rp_lock);
   1168   1.1  riastrad }
   1169   1.1  riastrad 
   1170   1.1  riastrad /*
   1171   1.1  riastrad  * dma_resv_poll_cb(fence, fcb)
   1172   1.1  riastrad  *
   1173   1.1  riastrad  *	Callback to notify a reservation poll that a fence has
   1174   1.1  riastrad  *	completed.  Notify any waiters and allow the next poller to
   1175   1.1  riastrad  *	claim the callback.
   1176   1.1  riastrad  *
   1177   1.1  riastrad  *	If one thread is waiting for the exclusive fence only, and we
   1178   1.1  riastrad  *	spuriously notify them about a shared fence, tough.
   1179   1.1  riastrad  */
   1180   1.1  riastrad static void
   1181   1.1  riastrad dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
   1182   1.1  riastrad {
   1183   1.1  riastrad 	struct dma_resv_poll *rpoll = container_of(fcb,
   1184   1.1  riastrad 	    struct dma_resv_poll, rp_fcb);
   1185   1.1  riastrad 
   1186   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1187   1.1  riastrad 	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
   1188   1.1  riastrad 	rpoll->rp_claimed = 0;
   1189   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1190   1.1  riastrad }
   1191   1.1  riastrad 
   1192   1.1  riastrad /*
   1193   1.1  riastrad  * dma_resv_do_poll(robj, events, rpoll)
   1194   1.1  riastrad  *
   1195   1.1  riastrad  *	Poll for reservation object events using the reservation poll
   1196   1.1  riastrad  *	state in rpoll:
   1197   1.1  riastrad  *
   1198   1.1  riastrad  *	- POLLOUT	wait for all fences shared and exclusive
   1199   1.1  riastrad  *	- POLLIN	wait for the exclusive fence
   1200   1.1  riastrad  *
   1201   1.1  riastrad  *	Return the subset of events in events that are ready.  If any
   1202   1.1  riastrad  *	are requested but not ready, arrange to be notified with
   1203   1.1  riastrad  *	selnotify when they are.
   1204   1.1  riastrad  */
   1205   1.1  riastrad int
   1206   1.1  riastrad dma_resv_do_poll(const struct dma_resv *robj, int events,
   1207   1.1  riastrad     struct dma_resv_poll *rpoll)
   1208   1.1  riastrad {
   1209   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1210   1.1  riastrad 	struct dma_resv_list *list;
   1211  1.10  riastrad 	struct dma_fence *fence = NULL;
   1212   1.1  riastrad 	uint32_t i, shared_count;
   1213   1.1  riastrad 	int revents;
   1214   1.1  riastrad 	bool recorded = false;	/* curlwp is on the selq */
   1215   1.1  riastrad 	bool claimed = false;	/* we claimed the callback */
   1216   1.1  riastrad 	bool callback = false;	/* we requested a callback */
   1217   1.1  riastrad 
   1218   1.1  riastrad 	/*
   1219   1.1  riastrad 	 * Start with the maximal set of events that could be ready.
   1220   1.1  riastrad 	 * We will eliminate the events that are definitely not ready
   1221   1.1  riastrad 	 * as we go at the same time as we add callbacks to notify us
   1222   1.1  riastrad 	 * that they may be ready.
   1223   1.1  riastrad 	 */
   1224   1.1  riastrad 	revents = events & (POLLIN|POLLOUT);
   1225   1.1  riastrad 	if (revents == 0)
   1226   1.1  riastrad 		return 0;
   1227   1.1  riastrad 
   1228  1.10  riastrad top:	KASSERT(fence == NULL);
   1229  1.10  riastrad 
   1230   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1231   1.1  riastrad 	rcu_read_lock();
   1232   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1233   1.1  riastrad 
   1234   1.1  riastrad 	/* If we want to wait for all fences, get the shared list.  */
   1235  1.10  riastrad 	if ((events & POLLOUT) != 0 &&
   1236  1.10  riastrad 	    (list = atomic_load_consume(&robj->fence)) != NULL) do {
   1237   1.1  riastrad 
   1238   1.1  riastrad 		/* Find out how long it is.  */
   1239   1.1  riastrad 		shared_count = list->shared_count;
   1240   1.1  riastrad 
   1241   1.1  riastrad 		/*
   1242   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the list
   1243   1.1  riastrad 		 * pointer and length.
   1244   1.1  riastrad 		 */
   1245   1.1  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1246   1.1  riastrad 			goto restart;
   1247   1.1  riastrad 
   1248   1.1  riastrad 		/*
   1249   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1250   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1251   1.1  riastrad 		 * it is signalled.  Stop and request a callback if we
   1252   1.1  riastrad 		 * find any that is not signalled.
   1253   1.1  riastrad 		 */
   1254   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1255  1.10  riastrad 			KASSERT(fence == NULL);
   1256   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1257  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1258   1.1  riastrad 				goto restart;
   1259   1.1  riastrad 			if (!dma_fence_is_signaled(fence)) {
   1260   1.1  riastrad 				dma_fence_put(fence);
   1261  1.10  riastrad 				fence = NULL;
   1262   1.1  riastrad 				break;
   1263   1.1  riastrad 			}
   1264   1.1  riastrad 			dma_fence_put(fence);
   1265  1.10  riastrad 			fence = NULL;
   1266   1.1  riastrad 		}
   1267   1.1  riastrad 
   1268   1.1  riastrad 		/* If all shared fences have been signalled, move on.  */
   1269   1.1  riastrad 		if (i == shared_count)
   1270   1.1  riastrad 			break;
   1271   1.1  riastrad 
   1272   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1273   1.1  riastrad 		if (!recorded)
   1274   1.1  riastrad 			goto record;
   1275   1.1  riastrad 
   1276   1.1  riastrad 		/*
   1277   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1278   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1279   1.1  riastrad 		 * assume the event is not ready.
   1280   1.1  riastrad 		 */
   1281   1.1  riastrad 		if (!claimed || callback) {
   1282   1.1  riastrad 			revents &= ~POLLOUT;
   1283   1.1  riastrad 			break;
   1284   1.1  riastrad 		}
   1285   1.1  riastrad 
   1286   1.1  riastrad 		/*
   1287   1.1  riastrad 		 * Otherwise, find the first fence that is not
   1288   1.1  riastrad 		 * signalled, request the callback, and clear POLLOUT
   1289   1.1  riastrad 		 * from the possible ready events.  If they are all
   1290   1.1  riastrad 		 * signalled, leave POLLOUT set; we will simulate the
   1291   1.1  riastrad 		 * callback later.
   1292   1.1  riastrad 		 */
   1293   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1294  1.10  riastrad 			KASSERT(fence == NULL);
   1295   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1296  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1297   1.1  riastrad 				goto restart;
   1298   1.1  riastrad 			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1299   1.1  riastrad 				dma_resv_poll_cb)) {
   1300   1.1  riastrad 				dma_fence_put(fence);
   1301  1.10  riastrad 				fence = NULL;
   1302   1.1  riastrad 				revents &= ~POLLOUT;
   1303   1.1  riastrad 				callback = true;
   1304   1.1  riastrad 				break;
   1305   1.1  riastrad 			}
   1306   1.1  riastrad 			dma_fence_put(fence);
   1307  1.10  riastrad 			fence = NULL;
   1308   1.1  riastrad 		}
   1309   1.1  riastrad 	} while (0);
   1310   1.1  riastrad 
   1311   1.1  riastrad 	/* We always wait for at least the exclusive fence, so get it.  */
   1312  1.10  riastrad 	KASSERT(fence == NULL);
   1313   1.6  riastrad 	if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) do {
   1314   1.1  riastrad 
   1315   1.1  riastrad 		/*
   1316   1.1  riastrad 		 * Make sure we saw a consistent snapshot of the fence.
   1317   1.1  riastrad 		 *
   1318   1.1  riastrad 		 * XXX I'm not actually sure this is necessary since
   1319   1.1  riastrad 		 * pointer writes are supposed to be atomic.
   1320   1.1  riastrad 		 */
   1321  1.10  riastrad 		if (!dma_resv_read_valid(robj, &ticket)) {
   1322  1.10  riastrad 			fence = NULL;
   1323   1.1  riastrad 			goto restart;
   1324  1.10  riastrad 		}
   1325   1.1  riastrad 
   1326   1.1  riastrad 		/*
   1327   1.1  riastrad 		 * If it is going away, restart.  Otherwise, acquire a
   1328   1.1  riastrad 		 * reference to it to test whether it is signalled.  If
   1329   1.1  riastrad 		 * not, stop and request a callback.
   1330   1.1  riastrad 		 */
   1331   1.1  riastrad 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1332   1.1  riastrad 			goto restart;
   1333   1.1  riastrad 		if (dma_fence_is_signaled(fence)) {
   1334   1.1  riastrad 			dma_fence_put(fence);
   1335  1.10  riastrad 			fence = NULL;
   1336   1.1  riastrad 			break;
   1337   1.1  riastrad 		}
   1338   1.1  riastrad 
   1339   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1340   1.1  riastrad 		if (!recorded) {
   1341   1.1  riastrad 			dma_fence_put(fence);
   1342  1.10  riastrad 			fence = NULL;
   1343   1.1  riastrad 			goto record;
   1344   1.1  riastrad 		}
   1345   1.1  riastrad 
   1346   1.1  riastrad 		/*
   1347   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1348   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1349   1.1  riastrad 		 * assume the event is not ready.
   1350   1.1  riastrad 		 */
   1351   1.1  riastrad 		if (!claimed || callback) {
   1352   1.1  riastrad 			dma_fence_put(fence);
   1353  1.10  riastrad 			fence = NULL;
   1354   1.1  riastrad 			revents = 0;
   1355   1.1  riastrad 			break;
   1356   1.1  riastrad 		}
   1357   1.1  riastrad 
   1358   1.1  riastrad 		/*
   1359   1.1  riastrad 		 * Otherwise, try to request the callback, and clear
   1360   1.1  riastrad 		 * all possible ready events.  If the fence has been
   1361   1.1  riastrad 		 * signalled in the interim, leave the events set; we
   1362   1.1  riastrad 		 * will simulate the callback later.
   1363   1.1  riastrad 		 */
   1364   1.1  riastrad 		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1365   1.1  riastrad 			dma_resv_poll_cb)) {
   1366   1.1  riastrad 			dma_fence_put(fence);
   1367  1.10  riastrad 			fence = NULL;
   1368   1.1  riastrad 			revents = 0;
   1369   1.1  riastrad 			callback = true;
   1370   1.1  riastrad 			break;
   1371   1.1  riastrad 		}
   1372   1.1  riastrad 		dma_fence_put(fence);
   1373  1.10  riastrad 		fence = NULL;
   1374   1.1  riastrad 	} while (0);
   1375  1.10  riastrad 	KASSERT(fence == NULL);
   1376   1.1  riastrad 
   1377   1.1  riastrad 	/* All done reading the fences.  */
   1378   1.1  riastrad 	rcu_read_unlock();
   1379   1.1  riastrad 
   1380   1.1  riastrad 	if (claimed && !callback) {
   1381   1.1  riastrad 		/*
   1382   1.1  riastrad 		 * We claimed the callback but we didn't actually
   1383   1.1  riastrad 		 * request it because a fence was signalled while we
   1384   1.1  riastrad 		 * were claiming it.  Call it ourselves now.  The
   1385   1.1  riastrad 		 * callback doesn't use the fence nor rely on holding
   1386   1.1  riastrad 		 * any of the fence locks, so this is safe.
   1387   1.1  riastrad 		 */
   1388   1.1  riastrad 		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
   1389   1.1  riastrad 	}
   1390   1.1  riastrad 	return revents;
   1391   1.1  riastrad 
   1392   1.1  riastrad restart:
   1393  1.10  riastrad 	KASSERT(fence == NULL);
   1394   1.1  riastrad 	rcu_read_unlock();
   1395   1.1  riastrad 	goto top;
   1396   1.1  riastrad 
   1397   1.1  riastrad record:
   1398  1.10  riastrad 	KASSERT(fence == NULL);
   1399   1.1  riastrad 	rcu_read_unlock();
   1400   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1401   1.1  riastrad 	selrecord(curlwp, &rpoll->rp_selq);
   1402   1.1  riastrad 	if (!rpoll->rp_claimed)
   1403   1.1  riastrad 		claimed = rpoll->rp_claimed = true;
   1404   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1405   1.1  riastrad 	recorded = true;
   1406   1.1  riastrad 	goto top;
   1407   1.1  riastrad }
   1408   1.1  riastrad 
   1409   1.1  riastrad /*
   1410   1.1  riastrad  * dma_resv_kqfilter(robj, kn, rpoll)
   1411   1.1  riastrad  *
   1412   1.1  riastrad  *	Kqueue filter for reservation objects.  Currently not
   1413   1.1  riastrad  *	implemented because the logic to implement it is nontrivial,
   1414   1.1  riastrad  *	and userland will presumably never use it, so it would be
   1415   1.1  riastrad  *	dangerous to add never-tested complex code paths to the kernel.
   1416   1.1  riastrad  */
   1417   1.1  riastrad int
   1418   1.1  riastrad dma_resv_kqfilter(const struct dma_resv *robj,
   1419   1.1  riastrad     struct knote *kn, struct dma_resv_poll *rpoll)
   1420   1.1  riastrad {
   1421   1.1  riastrad 
   1422   1.1  riastrad 	return EINVAL;
   1423   1.1  riastrad }
   1424