Home | History | Annotate | Line # | Download | only in linux
linux_dma_resv.c revision 1.22
      1  1.22  riastrad /*	$NetBSD: linux_dma_resv.c,v 1.22 2022/02/15 22:51:03 riastradh Exp $	*/
      2   1.1  riastrad 
      3   1.1  riastrad /*-
      4   1.1  riastrad  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5   1.1  riastrad  * All rights reserved.
      6   1.1  riastrad  *
      7   1.1  riastrad  * This code is derived from software contributed to The NetBSD Foundation
      8   1.1  riastrad  * by Taylor R. Campbell.
      9   1.1  riastrad  *
     10   1.1  riastrad  * Redistribution and use in source and binary forms, with or without
     11   1.1  riastrad  * modification, are permitted provided that the following conditions
     12   1.1  riastrad  * are met:
     13   1.1  riastrad  * 1. Redistributions of source code must retain the above copyright
     14   1.1  riastrad  *    notice, this list of conditions and the following disclaimer.
     15   1.1  riastrad  * 2. Redistributions in binary form must reproduce the above copyright
     16   1.1  riastrad  *    notice, this list of conditions and the following disclaimer in the
     17   1.1  riastrad  *    documentation and/or other materials provided with the distribution.
     18   1.1  riastrad  *
     19   1.1  riastrad  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20   1.1  riastrad  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21   1.1  riastrad  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22   1.1  riastrad  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23   1.1  riastrad  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24   1.1  riastrad  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25   1.1  riastrad  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26   1.1  riastrad  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27   1.1  riastrad  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28   1.1  riastrad  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29   1.1  riastrad  * POSSIBILITY OF SUCH DAMAGE.
     30   1.1  riastrad  */
     31   1.1  riastrad 
     32   1.1  riastrad #include <sys/cdefs.h>
     33  1.22  riastrad __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.22 2022/02/15 22:51:03 riastradh Exp $");
     34   1.1  riastrad 
     35   1.1  riastrad #include <sys/param.h>
     36   1.1  riastrad #include <sys/poll.h>
     37   1.1  riastrad #include <sys/select.h>
     38   1.1  riastrad 
     39   1.1  riastrad #include <linux/dma-fence.h>
     40   1.1  riastrad #include <linux/dma-resv.h>
     41   1.1  riastrad #include <linux/seqlock.h>
     42   1.1  riastrad #include <linux/ww_mutex.h>
     43   1.1  riastrad 
     44   1.1  riastrad DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
     45   1.1  riastrad 
     46   1.1  riastrad static struct dma_resv_list *
     47   1.1  riastrad objlist_tryalloc(uint32_t n)
     48   1.1  riastrad {
     49   1.1  riastrad 	struct dma_resv_list *list;
     50   1.1  riastrad 
     51   1.1  riastrad 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
     52   1.1  riastrad 	if (list == NULL)
     53   1.1  riastrad 		return NULL;
     54   1.1  riastrad 	list->shared_max = n;
     55   1.1  riastrad 
     56   1.1  riastrad 	return list;
     57   1.1  riastrad }
     58   1.1  riastrad 
     59  1.22  riastrad static struct dma_resv_list *
     60  1.22  riastrad objlist_alloc(uint32_t n)
     61  1.22  riastrad {
     62  1.22  riastrad 	struct dma_resv_list *list;
     63  1.22  riastrad 
     64  1.22  riastrad 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_SLEEP);
     65  1.22  riastrad 	list->shared_max = n;
     66  1.22  riastrad 
     67  1.22  riastrad 	return list;
     68  1.22  riastrad }
     69  1.22  riastrad 
     70   1.1  riastrad static void
     71   1.1  riastrad objlist_free(struct dma_resv_list *list)
     72   1.1  riastrad {
     73   1.1  riastrad 	uint32_t n = list->shared_max;
     74   1.1  riastrad 
     75   1.1  riastrad 	kmem_free(list, offsetof(typeof(*list), shared[n]));
     76   1.1  riastrad }
     77   1.1  riastrad 
     78   1.1  riastrad static void
     79   1.1  riastrad objlist_free_cb(struct rcu_head *rcu)
     80   1.1  riastrad {
     81   1.1  riastrad 	struct dma_resv_list *list = container_of(rcu,
     82   1.1  riastrad 	    struct dma_resv_list, rol_rcu);
     83   1.1  riastrad 
     84   1.1  riastrad 	objlist_free(list);
     85   1.1  riastrad }
     86   1.1  riastrad 
     87   1.1  riastrad static void
     88   1.1  riastrad objlist_defer_free(struct dma_resv_list *list)
     89   1.1  riastrad {
     90   1.1  riastrad 
     91   1.1  riastrad 	call_rcu(&list->rol_rcu, objlist_free_cb);
     92   1.1  riastrad }
     93   1.1  riastrad 
     94   1.1  riastrad /*
     95   1.1  riastrad  * dma_resv_init(robj)
     96   1.1  riastrad  *
     97   1.1  riastrad  *	Initialize a reservation object.  Caller must later destroy it
     98   1.1  riastrad  *	with dma_resv_fini.
     99   1.1  riastrad  */
    100   1.1  riastrad void
    101   1.1  riastrad dma_resv_init(struct dma_resv *robj)
    102   1.1  riastrad {
    103   1.1  riastrad 
    104   1.1  riastrad 	ww_mutex_init(&robj->lock, &reservation_ww_class);
    105   1.1  riastrad 	seqcount_init(&robj->seq);
    106   1.1  riastrad 	robj->fence_excl = NULL;
    107   1.1  riastrad 	robj->fence = NULL;
    108   1.1  riastrad 	robj->robj_prealloc = NULL;
    109   1.1  riastrad }
    110   1.1  riastrad 
    111   1.1  riastrad /*
    112   1.1  riastrad  * dma_resv_fini(robj)
    113   1.1  riastrad  *
    114   1.1  riastrad  *	Destroy a reservation object, freeing any memory that had been
    115   1.1  riastrad  *	allocated for it.  Caller must have exclusive access to it.
    116   1.1  riastrad  */
    117   1.1  riastrad void
    118   1.1  riastrad dma_resv_fini(struct dma_resv *robj)
    119   1.1  riastrad {
    120   1.1  riastrad 	unsigned i;
    121   1.1  riastrad 
    122  1.10  riastrad 	if (robj->robj_prealloc) {
    123   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    124  1.10  riastrad 		robj->robj_prealloc = NULL; /* paranoia */
    125  1.10  riastrad 	}
    126   1.1  riastrad 	if (robj->fence) {
    127  1.10  riastrad 		for (i = 0; i < robj->fence->shared_count; i++) {
    128   1.1  riastrad 			dma_fence_put(robj->fence->shared[i]);
    129  1.10  riastrad 			robj->fence->shared[i] = NULL; /* paranoia */
    130  1.10  riastrad 		}
    131   1.1  riastrad 		objlist_free(robj->fence);
    132  1.10  riastrad 		robj->fence = NULL; /* paranoia */
    133   1.1  riastrad 	}
    134  1.10  riastrad 	if (robj->fence_excl) {
    135   1.1  riastrad 		dma_fence_put(robj->fence_excl);
    136  1.10  riastrad 		robj->fence_excl = NULL; /* paranoia */
    137  1.10  riastrad 	}
    138   1.1  riastrad 	ww_mutex_destroy(&robj->lock);
    139   1.1  riastrad }
    140   1.1  riastrad 
    141   1.1  riastrad /*
    142   1.1  riastrad  * dma_resv_lock(robj, ctx)
    143   1.1  riastrad  *
    144   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    145   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    146   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    147   1.1  riastrad  *	retry.
    148   1.1  riastrad  */
    149   1.1  riastrad int
    150   1.1  riastrad dma_resv_lock(struct dma_resv *robj,
    151   1.1  riastrad     struct ww_acquire_ctx *ctx)
    152   1.1  riastrad {
    153   1.1  riastrad 
    154   1.1  riastrad 	return ww_mutex_lock(&robj->lock, ctx);
    155   1.1  riastrad }
    156   1.1  riastrad 
    157   1.1  riastrad /*
    158   1.2  riastrad  * dma_resv_lock_slow(robj, ctx)
    159   1.2  riastrad  *
    160   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    161   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    162   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    163   1.2  riastrad  *	and the caller has backed out all other locks.
    164   1.2  riastrad  */
    165   1.2  riastrad void
    166   1.2  riastrad dma_resv_lock_slow(struct dma_resv *robj,
    167   1.2  riastrad     struct ww_acquire_ctx *ctx)
    168   1.2  riastrad {
    169   1.2  riastrad 
    170   1.2  riastrad 	ww_mutex_lock_slow(&robj->lock, ctx);
    171   1.2  riastrad }
    172   1.2  riastrad 
    173   1.2  riastrad /*
    174   1.1  riastrad  * dma_resv_lock_interruptible(robj, ctx)
    175   1.1  riastrad  *
    176   1.1  riastrad  *	Acquire a reservation object's lock.  Return 0 on success,
    177   1.1  riastrad  *	-EALREADY if caller already holds it, -EDEADLK if a
    178   1.1  riastrad  *	higher-priority owner holds it and the caller must back out and
    179  1.21  riastrad  *	retry, -EINTR if interrupted.
    180   1.1  riastrad  */
    181   1.1  riastrad int
    182   1.1  riastrad dma_resv_lock_interruptible(struct dma_resv *robj,
    183   1.1  riastrad     struct ww_acquire_ctx *ctx)
    184   1.1  riastrad {
    185   1.1  riastrad 
    186   1.1  riastrad 	return ww_mutex_lock_interruptible(&robj->lock, ctx);
    187   1.1  riastrad }
    188   1.1  riastrad 
    189   1.1  riastrad /*
    190   1.2  riastrad  * dma_resv_lock_slow_interruptible(robj, ctx)
    191   1.2  riastrad  *
    192   1.2  riastrad  *	Acquire a reservation object's lock.  Caller must not hold
    193   1.2  riastrad  *	this lock or any others -- this is to be used in slow paths
    194   1.2  riastrad  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    195   1.2  riastrad  *	and the caller has backed out all other locks.  Return 0 on
    196  1.21  riastrad  *	success, -EINTR if interrupted.
    197   1.2  riastrad  */
    198   1.2  riastrad int
    199   1.2  riastrad dma_resv_lock_slow_interruptible(struct dma_resv *robj,
    200   1.2  riastrad     struct ww_acquire_ctx *ctx)
    201   1.2  riastrad {
    202   1.2  riastrad 
    203   1.2  riastrad 	return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
    204   1.2  riastrad }
    205   1.2  riastrad 
    206   1.2  riastrad /*
    207   1.1  riastrad  * dma_resv_trylock(robj)
    208   1.1  riastrad  *
    209   1.1  riastrad  *	Try to acquire a reservation object's lock without blocking.
    210   1.1  riastrad  *	Return true on success, false on failure.
    211   1.1  riastrad  */
    212   1.1  riastrad bool
    213   1.1  riastrad dma_resv_trylock(struct dma_resv *robj)
    214   1.1  riastrad {
    215   1.1  riastrad 
    216   1.1  riastrad 	return ww_mutex_trylock(&robj->lock);
    217   1.1  riastrad }
    218   1.1  riastrad 
    219   1.1  riastrad /*
    220   1.4  riastrad  * dma_resv_locking_ctx(robj)
    221   1.4  riastrad  *
    222   1.4  riastrad  *	Return a pointer to the ww_acquire_ctx used by the owner of
    223   1.4  riastrad  *	the reservation object's lock, or NULL if it is either not
    224   1.4  riastrad  *	owned or if it is locked without context.
    225   1.4  riastrad  */
    226   1.4  riastrad struct ww_acquire_ctx *
    227   1.4  riastrad dma_resv_locking_ctx(struct dma_resv *robj)
    228   1.4  riastrad {
    229   1.4  riastrad 
    230   1.4  riastrad 	return ww_mutex_locking_ctx(&robj->lock);
    231   1.4  riastrad }
    232   1.4  riastrad 
    233   1.4  riastrad /*
    234   1.1  riastrad  * dma_resv_unlock(robj)
    235   1.1  riastrad  *
    236   1.1  riastrad  *	Release a reservation object's lock.
    237   1.1  riastrad  */
    238   1.1  riastrad void
    239   1.1  riastrad dma_resv_unlock(struct dma_resv *robj)
    240   1.1  riastrad {
    241   1.1  riastrad 
    242   1.1  riastrad 	return ww_mutex_unlock(&robj->lock);
    243   1.1  riastrad }
    244   1.1  riastrad 
    245   1.1  riastrad /*
    246  1.11  riastrad  * dma_resv_is_locked(robj)
    247  1.11  riastrad  *
    248  1.11  riastrad  *	True if robj is locked.
    249  1.11  riastrad  */
    250  1.11  riastrad bool
    251  1.11  riastrad dma_resv_is_locked(struct dma_resv *robj)
    252  1.11  riastrad {
    253  1.11  riastrad 
    254  1.11  riastrad 	return ww_mutex_is_locked(&robj->lock);
    255  1.11  riastrad }
    256  1.11  riastrad 
    257  1.11  riastrad /*
    258   1.1  riastrad  * dma_resv_held(robj)
    259   1.1  riastrad  *
    260   1.1  riastrad  *	True if robj is locked.
    261   1.1  riastrad  */
    262   1.1  riastrad bool
    263   1.1  riastrad dma_resv_held(struct dma_resv *robj)
    264   1.1  riastrad {
    265   1.1  riastrad 
    266   1.1  riastrad 	return ww_mutex_is_locked(&robj->lock);
    267   1.1  riastrad }
    268   1.1  riastrad 
    269   1.1  riastrad /*
    270   1.1  riastrad  * dma_resv_assert_held(robj)
    271   1.1  riastrad  *
    272   1.1  riastrad  *	Panic if robj is not held, in DIAGNOSTIC builds.
    273   1.1  riastrad  */
    274   1.1  riastrad void
    275   1.1  riastrad dma_resv_assert_held(struct dma_resv *robj)
    276   1.1  riastrad {
    277   1.1  riastrad 
    278   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    279   1.1  riastrad }
    280   1.1  riastrad 
    281   1.1  riastrad /*
    282   1.1  riastrad  * dma_resv_get_excl(robj)
    283   1.1  riastrad  *
    284   1.1  riastrad  *	Return a pointer to the exclusive fence of the reservation
    285   1.1  riastrad  *	object robj.
    286   1.1  riastrad  *
    287   1.1  riastrad  *	Caller must have robj locked.
    288   1.1  riastrad  */
    289   1.1  riastrad struct dma_fence *
    290   1.1  riastrad dma_resv_get_excl(struct dma_resv *robj)
    291   1.1  riastrad {
    292   1.1  riastrad 
    293   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    294   1.1  riastrad 	return robj->fence_excl;
    295   1.1  riastrad }
    296   1.1  riastrad 
    297   1.1  riastrad /*
    298   1.1  riastrad  * dma_resv_get_list(robj)
    299   1.1  riastrad  *
    300   1.1  riastrad  *	Return a pointer to the shared fence list of the reservation
    301   1.1  riastrad  *	object robj.
    302   1.1  riastrad  *
    303   1.1  riastrad  *	Caller must have robj locked.
    304   1.1  riastrad  */
    305   1.1  riastrad struct dma_resv_list *
    306   1.1  riastrad dma_resv_get_list(struct dma_resv *robj)
    307   1.1  riastrad {
    308   1.1  riastrad 
    309   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    310   1.1  riastrad 	return robj->fence;
    311   1.1  riastrad }
    312   1.1  riastrad 
    313   1.1  riastrad /*
    314  1.18  riastrad  * dma_resv_reserve_shared(robj, num_fences)
    315   1.1  riastrad  *
    316  1.18  riastrad  *	Reserve space in robj to add num_fences shared fences.  To be
    317  1.18  riastrad  *	used only once before calling dma_resv_add_shared_fence.
    318   1.1  riastrad  *
    319   1.1  riastrad  *	Caller must have robj locked.
    320   1.1  riastrad  *
    321   1.1  riastrad  *	Internally, we start with room for four entries and double if
    322   1.1  riastrad  *	we don't have enough.  This is not guaranteed.
    323   1.1  riastrad  */
    324   1.1  riastrad int
    325   1.3  riastrad dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
    326   1.1  riastrad {
    327   1.1  riastrad 	struct dma_resv_list *list, *prealloc;
    328   1.1  riastrad 	uint32_t n, nalloc;
    329   1.1  riastrad 
    330   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    331   1.1  riastrad 
    332   1.1  riastrad 	list = robj->fence;
    333   1.1  riastrad 	prealloc = robj->robj_prealloc;
    334   1.1  riastrad 
    335   1.1  riastrad 	/* If there's an existing list, check it for space.  */
    336   1.1  riastrad 	if (list) {
    337   1.1  riastrad 		/* If there's too many already, give up.  */
    338  1.18  riastrad 		if (list->shared_count > UINT32_MAX - num_fences)
    339   1.1  riastrad 			return -ENOMEM;
    340   1.1  riastrad 
    341  1.18  riastrad 		/* Add some more. */
    342  1.18  riastrad 		n = list->shared_count + num_fences;
    343   1.1  riastrad 
    344   1.1  riastrad 		/* If there's enough for one more, we're done.  */
    345   1.1  riastrad 		if (n <= list->shared_max)
    346   1.1  riastrad 			return 0;
    347   1.1  riastrad 	} else {
    348  1.18  riastrad 		/* No list already.  We need space for num_fences.  */
    349  1.18  riastrad 		n = num_fences;
    350   1.1  riastrad 	}
    351   1.1  riastrad 
    352   1.1  riastrad 	/* If not, maybe there's a preallocated list ready.  */
    353   1.1  riastrad 	if (prealloc != NULL) {
    354   1.1  riastrad 		/* If there's enough room in it, stop here.  */
    355   1.1  riastrad 		if (n <= prealloc->shared_max)
    356   1.1  riastrad 			return 0;
    357   1.1  riastrad 
    358   1.1  riastrad 		/* Try to double its capacity.  */
    359   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
    360  1.22  riastrad 		prealloc = objlist_alloc(nalloc);
    361   1.1  riastrad 
    362   1.1  riastrad 		/* Swap the new preallocated list and free the old one.  */
    363   1.1  riastrad 		objlist_free(robj->robj_prealloc);
    364   1.1  riastrad 		robj->robj_prealloc = prealloc;
    365   1.1  riastrad 	} else {
    366   1.1  riastrad 		/* Start with some spare.  */
    367   1.1  riastrad 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
    368  1.22  riastrad 		prealloc = objlist_alloc(nalloc);
    369  1.22  riastrad 
    370   1.1  riastrad 		/* Save the new preallocated list.  */
    371   1.1  riastrad 		robj->robj_prealloc = prealloc;
    372   1.1  riastrad 	}
    373   1.1  riastrad 
    374   1.1  riastrad 	/* Success!  */
    375   1.1  riastrad 	return 0;
    376   1.1  riastrad }
    377   1.1  riastrad 
    378   1.1  riastrad struct dma_resv_write_ticket {
    379   1.1  riastrad };
    380   1.1  riastrad 
    381   1.1  riastrad /*
    382   1.1  riastrad  * dma_resv_write_begin(robj, ticket)
    383   1.1  riastrad  *
    384   1.1  riastrad  *	Begin an atomic batch of writes to robj, and initialize opaque
    385   1.1  riastrad  *	ticket for it.  The ticket must be passed to
    386   1.1  riastrad  *	dma_resv_write_commit to commit the writes.
    387   1.1  riastrad  *
    388   1.1  riastrad  *	Caller must have robj locked.
    389   1.1  riastrad  *
    390   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    391   1.1  riastrad  *	NOT serve as an acquire operation, however.
    392   1.1  riastrad  */
    393   1.1  riastrad static void
    394   1.1  riastrad dma_resv_write_begin(struct dma_resv *robj,
    395   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    396   1.1  riastrad {
    397   1.1  riastrad 
    398   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    399   1.1  riastrad 
    400   1.1  riastrad 	write_seqcount_begin(&robj->seq);
    401   1.1  riastrad }
    402   1.1  riastrad 
    403   1.1  riastrad /*
    404   1.1  riastrad  * dma_resv_write_commit(robj, ticket)
    405   1.1  riastrad  *
    406   1.1  riastrad  *	Commit an atomic batch of writes to robj begun with the call to
    407   1.1  riastrad  *	dma_resv_write_begin that returned ticket.
    408   1.1  riastrad  *
    409   1.1  riastrad  *	Caller must have robj locked.
    410   1.1  riastrad  *
    411   1.1  riastrad  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    412   1.1  riastrad  *	NOT serve as a release operation, however.
    413   1.1  riastrad  */
    414   1.1  riastrad static void
    415   1.1  riastrad dma_resv_write_commit(struct dma_resv *robj,
    416   1.1  riastrad     struct dma_resv_write_ticket *ticket)
    417   1.1  riastrad {
    418   1.1  riastrad 
    419   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    420   1.1  riastrad 
    421   1.1  riastrad 	write_seqcount_end(&robj->seq);
    422   1.1  riastrad }
    423   1.1  riastrad 
    424   1.1  riastrad struct dma_resv_read_ticket {
    425   1.1  riastrad 	unsigned version;
    426   1.1  riastrad };
    427   1.1  riastrad 
    428   1.1  riastrad /*
    429   1.1  riastrad  * dma_resv_read_begin(robj, ticket)
    430   1.1  riastrad  *
    431   1.1  riastrad  *	Begin a read section, and initialize opaque ticket for it.  The
    432   1.1  riastrad  *	ticket must be passed to dma_resv_read_exit, and the
    433   1.1  riastrad  *	caller must be prepared to retry reading if it fails.
    434   1.1  riastrad  */
    435   1.1  riastrad static void
    436   1.1  riastrad dma_resv_read_begin(const struct dma_resv *robj,
    437   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    438   1.1  riastrad {
    439   1.1  riastrad 
    440   1.1  riastrad 	ticket->version = read_seqcount_begin(&robj->seq);
    441   1.1  riastrad }
    442   1.1  riastrad 
    443   1.1  riastrad /*
    444   1.1  riastrad  * dma_resv_read_valid(robj, ticket)
    445   1.1  riastrad  *
    446   1.1  riastrad  *	Test whether the read sections are valid.  Return true on
    447   1.1  riastrad  *	success, or false on failure if the read ticket has been
    448   1.1  riastrad  *	invalidated.
    449   1.1  riastrad  */
    450   1.1  riastrad static bool
    451   1.1  riastrad dma_resv_read_valid(const struct dma_resv *robj,
    452   1.1  riastrad     struct dma_resv_read_ticket *ticket)
    453   1.1  riastrad {
    454   1.1  riastrad 
    455   1.1  riastrad 	return !read_seqcount_retry(&robj->seq, ticket->version);
    456   1.1  riastrad }
    457   1.1  riastrad 
    458   1.1  riastrad /*
    459  1.12  riastrad  * dma_resv_get_shared_reader(robj, listp, shared_countp, ticket)
    460  1.12  riastrad  *
    461  1.12  riastrad  *	Set *listp and *shared_countp to a snapshot of the pointer to
    462  1.12  riastrad  *	and length of the shared fence list of robj and return true, or
    463  1.12  riastrad  *	set them to NULL/0 and return false if a writer intervened so
    464  1.12  riastrad  *	the caller must start over.
    465  1.12  riastrad  *
    466  1.12  riastrad  *	Both *listp and *shared_countp are unconditionally initialized
    467  1.12  riastrad  *	on return.  They may be NULL/0 even on success, if there is no
    468  1.12  riastrad  *	shared list at the moment.  Does not take any fence references.
    469  1.12  riastrad  */
    470  1.12  riastrad static bool
    471  1.12  riastrad dma_resv_get_shared_reader(const struct dma_resv *robj,
    472  1.12  riastrad     const struct dma_resv_list **listp, unsigned *shared_countp,
    473  1.12  riastrad     struct dma_resv_read_ticket *ticket)
    474  1.12  riastrad {
    475  1.12  riastrad 	struct dma_resv_list *list;
    476  1.12  riastrad 	unsigned shared_count = 0;
    477  1.12  riastrad 
    478  1.12  riastrad 	/*
    479  1.12  riastrad 	 * Get the list and, if it is present, its length.  If the list
    480  1.12  riastrad 	 * is present, it has a valid length.  The atomic_load_consume
    481  1.12  riastrad 	 * pairs with the membar_producer in dma_resv_write_begin.
    482  1.12  riastrad 	 */
    483  1.12  riastrad 	list = atomic_load_consume(&robj->fence);
    484  1.12  riastrad 	shared_count = list ? atomic_load_relaxed(&list->shared_count) : 0;
    485  1.12  riastrad 
    486  1.12  riastrad 	/*
    487  1.12  riastrad 	 * We are done reading from robj and list.  Validate our
    488  1.12  riastrad 	 * parking ticket.  If it's invalid, do not pass go and do not
    489  1.12  riastrad 	 * collect $200.
    490  1.12  riastrad 	 */
    491  1.12  riastrad 	if (!dma_resv_read_valid(robj, ticket))
    492  1.12  riastrad 		goto fail;
    493  1.12  riastrad 
    494  1.12  riastrad 	/* Success!  */
    495  1.12  riastrad 	*listp = list;
    496  1.12  riastrad 	*shared_countp = shared_count;
    497  1.12  riastrad 	return true;
    498  1.12  riastrad 
    499  1.12  riastrad fail:	*listp = NULL;
    500  1.12  riastrad 	*shared_countp = 0;
    501  1.12  riastrad 	return false;
    502  1.12  riastrad }
    503  1.12  riastrad 
    504  1.12  riastrad /*
    505  1.12  riastrad  * dma_resv_get_excl_reader(robj, fencep, ticket)
    506  1.12  riastrad  *
    507  1.12  riastrad  *	Set *fencep to the exclusive fence of robj and return true, or
    508  1.12  riastrad  *	set it to NULL and return false if either
    509  1.12  riastrad  *	(a) a writer intervened, or
    510  1.12  riastrad  *	(b) the fence is scheduled to be destroyed after this RCU grace
    511  1.12  riastrad  *	    period,
    512  1.12  riastrad  *	in either case meaning the caller must restart.
    513  1.12  riastrad  *
    514  1.12  riastrad  *	The value of *fencep is unconditionally initialized on return.
    515  1.12  riastrad  *	It may be NULL, if there is no exclusive fence at the moment.
    516  1.12  riastrad  *	If nonnull, *fencep is referenced; caller must dma_fence_put.
    517  1.12  riastrad  */
    518  1.12  riastrad static bool
    519  1.12  riastrad dma_resv_get_excl_reader(const struct dma_resv *robj,
    520  1.12  riastrad     struct dma_fence **fencep,
    521  1.12  riastrad     struct dma_resv_read_ticket *ticket)
    522  1.12  riastrad {
    523  1.12  riastrad 	struct dma_fence *fence;
    524  1.12  riastrad 
    525  1.12  riastrad 	/*
    526  1.12  riastrad 	 * Get the candidate fence pointer.  The atomic_load_consume
    527  1.12  riastrad 	 * pairs with the membar_consumer in dma_resv_write_begin.
    528  1.12  riastrad 	 */
    529  1.12  riastrad 	fence = atomic_load_consume(&robj->fence_excl);
    530  1.12  riastrad 
    531  1.12  riastrad 	/*
    532  1.12  riastrad 	 * The load of robj->fence_excl is atomic, but the caller may
    533  1.12  riastrad 	 * have previously loaded the shared fence list and should
    534  1.12  riastrad 	 * restart if its view of the entire dma_resv object is not a
    535  1.12  riastrad 	 * consistent snapshot.
    536  1.12  riastrad 	 */
    537  1.12  riastrad 	if (!dma_resv_read_valid(robj, ticket))
    538  1.12  riastrad 		goto fail;
    539  1.12  riastrad 
    540  1.12  riastrad 	/*
    541  1.12  riastrad 	 * If the fence is already scheduled to away after this RCU
    542  1.12  riastrad 	 * read section, give up.  Otherwise, take a reference so it
    543  1.12  riastrad 	 * won't go away until after dma_fence_put.
    544  1.12  riastrad 	 */
    545  1.12  riastrad 	if (fence != NULL &&
    546  1.12  riastrad 	    (fence = dma_fence_get_rcu(fence)) == NULL)
    547  1.12  riastrad 		goto fail;
    548  1.12  riastrad 
    549  1.12  riastrad 	/* Success!  */
    550  1.12  riastrad 	*fencep = fence;
    551  1.12  riastrad 	return true;
    552  1.12  riastrad 
    553  1.12  riastrad fail:	*fencep = NULL;
    554  1.12  riastrad 	return false;
    555  1.12  riastrad }
    556  1.12  riastrad 
    557  1.12  riastrad /*
    558   1.1  riastrad  * dma_resv_add_excl_fence(robj, fence)
    559   1.1  riastrad  *
    560   1.1  riastrad  *	Empty and release all of robj's shared fences, and clear and
    561   1.1  riastrad  *	release its exclusive fence.  If fence is nonnull, acquire a
    562   1.1  riastrad  *	reference to it and save it as robj's exclusive fence.
    563   1.1  riastrad  *
    564   1.1  riastrad  *	Caller must have robj locked.
    565   1.1  riastrad  */
    566   1.1  riastrad void
    567   1.1  riastrad dma_resv_add_excl_fence(struct dma_resv *robj,
    568   1.1  riastrad     struct dma_fence *fence)
    569   1.1  riastrad {
    570   1.1  riastrad 	struct dma_fence *old_fence = robj->fence_excl;
    571   1.1  riastrad 	struct dma_resv_list *old_list = robj->fence;
    572   1.1  riastrad 	uint32_t old_shared_count;
    573   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    574   1.1  riastrad 
    575   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    576   1.1  riastrad 
    577   1.1  riastrad 	/*
    578   1.1  riastrad 	 * If we are setting rather than just removing a fence, acquire
    579   1.1  riastrad 	 * a reference for ourselves.
    580   1.1  riastrad 	 */
    581   1.1  riastrad 	if (fence)
    582   1.1  riastrad 		(void)dma_fence_get(fence);
    583   1.1  riastrad 
    584   1.1  riastrad 	/* If there are any shared fences, remember how many.  */
    585   1.1  riastrad 	if (old_list)
    586   1.1  riastrad 		old_shared_count = old_list->shared_count;
    587   1.1  riastrad 
    588   1.7  riastrad 	/* Begin an update.  Implies membar_producer for fence.  */
    589   1.1  riastrad 	dma_resv_write_begin(robj, &ticket);
    590   1.1  riastrad 
    591   1.1  riastrad 	/* Replace the fence and zero the shared count.  */
    592   1.7  riastrad 	atomic_store_relaxed(&robj->fence_excl, fence);
    593   1.1  riastrad 	if (old_list)
    594   1.1  riastrad 		old_list->shared_count = 0;
    595   1.1  riastrad 
    596   1.1  riastrad 	/* Commit the update.  */
    597   1.1  riastrad 	dma_resv_write_commit(robj, &ticket);
    598   1.1  riastrad 
    599   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
    600  1.10  riastrad 	if (old_fence) {
    601   1.1  riastrad 		dma_fence_put(old_fence);
    602  1.10  riastrad 		old_fence = NULL; /* paranoia */
    603  1.10  riastrad 	}
    604   1.1  riastrad 
    605   1.1  riastrad 	/* Release any old shared fences.  */
    606   1.1  riastrad 	if (old_list) {
    607  1.10  riastrad 		while (old_shared_count--) {
    608   1.1  riastrad 			dma_fence_put(old_list->shared[old_shared_count]);
    609  1.10  riastrad 			/* paranoia */
    610  1.10  riastrad 			old_list->shared[old_shared_count] = NULL;
    611  1.10  riastrad 		}
    612   1.1  riastrad 	}
    613   1.1  riastrad }
    614   1.1  riastrad 
    615   1.1  riastrad /*
    616   1.1  riastrad  * dma_resv_add_shared_fence(robj, fence)
    617   1.1  riastrad  *
    618   1.1  riastrad  *	Acquire a reference to fence and add it to robj's shared list.
    619   1.1  riastrad  *	If any fence was already added with the same context number,
    620   1.1  riastrad  *	release it and replace it by this one.
    621   1.1  riastrad  *
    622   1.1  riastrad  *	Caller must have robj locked, and must have preceded with a
    623   1.1  riastrad  *	call to dma_resv_reserve_shared for each shared fence
    624   1.1  riastrad  *	added.
    625   1.1  riastrad  */
    626   1.1  riastrad void
    627   1.1  riastrad dma_resv_add_shared_fence(struct dma_resv *robj,
    628   1.1  riastrad     struct dma_fence *fence)
    629   1.1  riastrad {
    630   1.1  riastrad 	struct dma_resv_list *list = robj->fence;
    631   1.1  riastrad 	struct dma_resv_list *prealloc = robj->robj_prealloc;
    632   1.1  riastrad 	struct dma_resv_write_ticket ticket;
    633   1.1  riastrad 	struct dma_fence *replace = NULL;
    634   1.1  riastrad 	uint32_t i;
    635   1.1  riastrad 
    636   1.1  riastrad 	KASSERT(dma_resv_held(robj));
    637   1.1  riastrad 
    638   1.1  riastrad 	/* Acquire a reference to the fence.  */
    639   1.1  riastrad 	KASSERT(fence != NULL);
    640   1.1  riastrad 	(void)dma_fence_get(fence);
    641   1.1  riastrad 
    642   1.1  riastrad 	/* Check for a preallocated replacement list.  */
    643   1.1  riastrad 	if (prealloc == NULL) {
    644   1.1  riastrad 		/*
    645   1.1  riastrad 		 * If there is no preallocated replacement list, then
    646   1.1  riastrad 		 * there must be room in the current list.
    647   1.1  riastrad 		 */
    648   1.1  riastrad 		KASSERT(list != NULL);
    649   1.1  riastrad 		KASSERT(list->shared_count < list->shared_max);
    650   1.1  riastrad 
    651   1.1  riastrad 		/* Begin an update.  Implies membar_producer for fence.  */
    652   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    653   1.1  riastrad 
    654   1.1  riastrad 		/* Find a fence with the same context number.  */
    655   1.1  riastrad 		for (i = 0; i < list->shared_count; i++) {
    656   1.1  riastrad 			if (list->shared[i]->context == fence->context) {
    657   1.1  riastrad 				replace = list->shared[i];
    658   1.7  riastrad 				atomic_store_relaxed(&list->shared[i], fence);
    659   1.1  riastrad 				break;
    660   1.1  riastrad 			}
    661   1.1  riastrad 		}
    662   1.1  riastrad 
    663   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    664   1.7  riastrad 		if (i == list->shared_count) {
    665   1.7  riastrad 			atomic_store_relaxed(&list->shared[list->shared_count],
    666   1.7  riastrad 			    fence);
    667   1.7  riastrad 			atomic_store_relaxed(&list->shared_count,
    668   1.7  riastrad 			    list->shared_count + 1);
    669   1.7  riastrad 		}
    670   1.1  riastrad 
    671   1.1  riastrad 		/* Commit the update.  */
    672   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    673   1.1  riastrad 	} else {
    674   1.1  riastrad 		/*
    675   1.1  riastrad 		 * There is a preallocated replacement list.  There may
    676   1.1  riastrad 		 * not be a current list.  If not, treat it as a zero-
    677   1.1  riastrad 		 * length list.
    678   1.1  riastrad 		 */
    679   1.1  riastrad 		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
    680   1.1  riastrad 
    681   1.1  riastrad 		/* There had better be room in the preallocated list.  */
    682   1.1  riastrad 		KASSERT(shared_count < prealloc->shared_max);
    683   1.1  riastrad 
    684   1.1  riastrad 		/*
    685   1.1  riastrad 		 * Copy the fences over, but replace if we find one
    686   1.1  riastrad 		 * with the same context number.
    687   1.1  riastrad 		 */
    688   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    689   1.1  riastrad 			if (replace == NULL &&
    690   1.1  riastrad 			    list->shared[i]->context == fence->context) {
    691   1.1  riastrad 				replace = list->shared[i];
    692   1.1  riastrad 				prealloc->shared[i] = fence;
    693   1.1  riastrad 			} else {
    694   1.1  riastrad 				prealloc->shared[i] = list->shared[i];
    695   1.1  riastrad 			}
    696   1.1  riastrad 		}
    697   1.1  riastrad 		prealloc->shared_count = shared_count;
    698   1.1  riastrad 
    699   1.1  riastrad 		/* If we didn't find one, add it at the end.  */
    700  1.22  riastrad 		if (replace == NULL) {
    701  1.22  riastrad 			KASSERT(prealloc->shared_count < prealloc->shared_max);
    702   1.1  riastrad 			prealloc->shared[prealloc->shared_count++] = fence;
    703  1.22  riastrad 		}
    704   1.1  riastrad 
    705   1.1  riastrad 		/*
    706   1.1  riastrad 		 * Now ready to replace the list.  Begin an update.
    707   1.1  riastrad 		 * Implies membar_producer for fence and prealloc.
    708   1.1  riastrad 		 */
    709   1.1  riastrad 		dma_resv_write_begin(robj, &ticket);
    710   1.1  riastrad 
    711   1.1  riastrad 		/* Replace the list.  */
    712   1.7  riastrad 		atomic_store_relaxed(&robj->fence, prealloc);
    713   1.1  riastrad 		robj->robj_prealloc = NULL;
    714   1.1  riastrad 
    715   1.1  riastrad 		/* Commit the update.  */
    716   1.1  riastrad 		dma_resv_write_commit(robj, &ticket);
    717   1.1  riastrad 
    718   1.1  riastrad 		/*
    719   1.1  riastrad 		 * If there is an old list, free it when convenient.
    720   1.1  riastrad 		 * (We are not in a position at this point to sleep
    721   1.1  riastrad 		 * waiting for activity on all CPUs.)
    722   1.1  riastrad 		 */
    723   1.1  riastrad 		if (list)
    724   1.1  riastrad 			objlist_defer_free(list);
    725   1.1  riastrad 	}
    726   1.1  riastrad 
    727   1.1  riastrad 	/* Release a fence if we replaced it.  */
    728  1.10  riastrad 	if (replace) {
    729   1.1  riastrad 		dma_fence_put(replace);
    730  1.10  riastrad 		replace = NULL;	/* paranoia */
    731  1.10  riastrad 	}
    732   1.1  riastrad }
    733   1.1  riastrad 
    734   1.1  riastrad /*
    735   1.1  riastrad  * dma_resv_get_excl_rcu(robj)
    736   1.1  riastrad  *
    737   1.1  riastrad  *	Note: Caller need not call this from an RCU read section.
    738   1.1  riastrad  */
    739   1.1  riastrad struct dma_fence *
    740   1.1  riastrad dma_resv_get_excl_rcu(const struct dma_resv *robj)
    741   1.1  riastrad {
    742   1.1  riastrad 	struct dma_fence *fence;
    743   1.1  riastrad 
    744   1.1  riastrad 	rcu_read_lock();
    745   1.1  riastrad 	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
    746   1.1  riastrad 	rcu_read_unlock();
    747   1.1  riastrad 
    748   1.1  riastrad 	return fence;
    749   1.1  riastrad }
    750   1.1  riastrad 
    751   1.1  riastrad /*
    752   1.1  riastrad  * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
    753  1.13  riastrad  *
    754  1.13  riastrad  *	Get a snapshot of the exclusive and shared fences of robj.  The
    755  1.13  riastrad  *	shared fences are returned as a pointer *sharedp to an array,
    756  1.13  riastrad  *	to be freed by the caller with kfree, of *nsharedp elements.
    757  1.17  riastrad  *	If fencep is null, then add the exclusive fence, if any, at the
    758  1.17  riastrad  *	end of the array instead.
    759  1.13  riastrad  *
    760  1.13  riastrad  *	Returns zero on success, negative (Linux-style) error code on
    761  1.13  riastrad  *	failure.  On failure, *fencep, *nsharedp, and *sharedp are
    762  1.13  riastrad  *	untouched.
    763   1.1  riastrad  */
    764   1.1  riastrad int
    765   1.1  riastrad dma_resv_get_fences_rcu(const struct dma_resv *robj,
    766   1.1  riastrad     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
    767   1.1  riastrad {
    768  1.10  riastrad 	const struct dma_resv_list *list = NULL;
    769  1.10  riastrad 	struct dma_fence *fence = NULL;
    770   1.1  riastrad 	struct dma_fence **shared = NULL;
    771  1.20  riastrad 	unsigned shared_alloc = 0, shared_count, i;
    772   1.1  riastrad 	struct dma_resv_read_ticket ticket;
    773   1.1  riastrad 
    774  1.10  riastrad top:	KASSERT(fence == NULL);
    775  1.10  riastrad 
    776   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    777   1.1  riastrad 	rcu_read_lock();
    778   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
    779   1.1  riastrad 
    780  1.12  riastrad 	/* If there is a shared list, grab it.  */
    781  1.12  riastrad 	if (!dma_resv_get_shared_reader(robj, &list, &shared_count, &ticket))
    782  1.12  riastrad 		goto restart;
    783  1.12  riastrad 	if (list != NULL) {
    784   1.1  riastrad 
    785  1.17  riastrad 		/*
    786  1.17  riastrad 		 * Avoid arithmetic overflow with `+ 1' below.
    787  1.17  riastrad 		 * Strictly speaking we don't need this if the caller
    788  1.17  riastrad 		 * specified fencep or if there is no exclusive fence,
    789  1.17  riastrad 		 * but it is simpler to not have to consider those
    790  1.17  riastrad 		 * cases.
    791  1.17  riastrad 		 */
    792  1.17  riastrad 		KASSERT(shared_count <= list->shared_max);
    793  1.17  riastrad 		if (list->shared_max == UINT_MAX)
    794  1.17  riastrad 			return -ENOMEM;
    795  1.17  riastrad 
    796   1.1  riastrad 		/* Check whether we have a buffer.  */
    797   1.1  riastrad 		if (shared == NULL) {
    798   1.1  riastrad 			/*
    799   1.1  riastrad 			 * We don't have a buffer yet.  Try to allocate
    800   1.1  riastrad 			 * one without waiting.
    801   1.1  riastrad 			 */
    802  1.17  riastrad 			shared_alloc = list->shared_max + 1;
    803   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    804   1.1  riastrad 			    GFP_NOWAIT);
    805   1.1  riastrad 			if (shared == NULL) {
    806   1.1  riastrad 				/*
    807   1.1  riastrad 				 * Couldn't do it immediately.  Back
    808   1.1  riastrad 				 * out of RCU and allocate one with
    809   1.1  riastrad 				 * waiting.
    810   1.1  riastrad 				 */
    811   1.1  riastrad 				rcu_read_unlock();
    812   1.1  riastrad 				shared = kcalloc(shared_alloc,
    813   1.1  riastrad 				    sizeof(shared[0]), GFP_KERNEL);
    814   1.1  riastrad 				if (shared == NULL)
    815   1.1  riastrad 					return -ENOMEM;
    816   1.1  riastrad 				goto top;
    817   1.1  riastrad 			}
    818  1.17  riastrad 		} else if (shared_alloc < list->shared_max + 1) {
    819   1.1  riastrad 			/*
    820   1.1  riastrad 			 * We have a buffer but it's too small.  We're
    821   1.1  riastrad 			 * already racing in this case, so just back
    822   1.1  riastrad 			 * out and wait to allocate a bigger one.
    823   1.1  riastrad 			 */
    824  1.17  riastrad 			shared_alloc = list->shared_max + 1;
    825   1.1  riastrad 			rcu_read_unlock();
    826   1.1  riastrad 			kfree(shared);
    827   1.1  riastrad 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    828   1.1  riastrad 			    GFP_KERNEL);
    829   1.1  riastrad 			if (shared == NULL)
    830   1.1  riastrad 				return -ENOMEM;
    831  1.19  riastrad 			goto top;
    832   1.1  riastrad 		}
    833   1.1  riastrad 
    834   1.1  riastrad 		/*
    835   1.1  riastrad 		 * We got a buffer large enough.  Copy into the buffer
    836   1.7  riastrad 		 * and record the number of elements.  Could safely use
    837   1.7  riastrad 		 * memcpy here, because even if we race with a writer
    838   1.7  riastrad 		 * it'll invalidate the read ticket and we'll start
    839  1.14  riastrad 		 * over, but atomic_load in a loop will pacify kcsan.
    840   1.7  riastrad 		 */
    841   1.7  riastrad 		for (i = 0; i < shared_count; i++)
    842   1.7  riastrad 			shared[i] = atomic_load_relaxed(&list->shared[i]);
    843  1.14  riastrad 
    844  1.14  riastrad 		/* If anything changed while we were copying, restart.  */
    845  1.14  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
    846  1.14  riastrad 			goto restart;
    847   1.1  riastrad 	}
    848   1.1  riastrad 
    849   1.1  riastrad 	/* If there is an exclusive fence, grab it.  */
    850  1.10  riastrad 	KASSERT(fence == NULL);
    851  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
    852   1.1  riastrad 		goto restart;
    853   1.1  riastrad 
    854   1.1  riastrad 	/*
    855   1.1  riastrad 	 * Try to get a reference to all of the shared fences.
    856   1.1  riastrad 	 */
    857   1.1  riastrad 	for (i = 0; i < shared_count; i++) {
    858   1.7  riastrad 		if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL)
    859   1.1  riastrad 			goto put_restart;
    860   1.1  riastrad 	}
    861   1.1  riastrad 
    862   1.1  riastrad 	/* Success!  */
    863   1.1  riastrad 	rcu_read_unlock();
    864  1.20  riastrad 	KASSERT(shared_count <= shared_alloc);
    865  1.20  riastrad 	KASSERT(shared_alloc == 0 || shared_count < shared_alloc);
    866  1.20  riastrad 	KASSERT(shared_alloc <= UINT_MAX);
    867  1.17  riastrad 	if (fencep) {
    868  1.17  riastrad 		*fencep = fence;
    869  1.17  riastrad 	} else if (fence) {
    870  1.20  riastrad 		if (shared_count) {
    871  1.20  riastrad 			shared[shared_count++] = fence;
    872  1.20  riastrad 		} else {
    873  1.20  riastrad 			shared = kmalloc(sizeof(shared[0]), GFP_KERNEL);
    874  1.20  riastrad 			shared[0] = fence;
    875  1.20  riastrad 			shared_count = 1;
    876  1.20  riastrad 		}
    877  1.17  riastrad 	}
    878   1.1  riastrad 	*nsharedp = shared_count;
    879   1.1  riastrad 	*sharedp = shared;
    880   1.1  riastrad 	return 0;
    881   1.1  riastrad 
    882   1.1  riastrad put_restart:
    883   1.1  riastrad 	/* Back out.  */
    884   1.1  riastrad 	while (i --> 0) {
    885   1.1  riastrad 		dma_fence_put(shared[i]);
    886   1.1  riastrad 		shared[i] = NULL; /* paranoia */
    887   1.1  riastrad 	}
    888   1.1  riastrad 	if (fence) {
    889   1.1  riastrad 		dma_fence_put(fence);
    890  1.10  riastrad 		fence = NULL;
    891   1.1  riastrad 	}
    892   1.1  riastrad 
    893   1.1  riastrad restart:
    894  1.10  riastrad 	KASSERT(fence == NULL);
    895   1.1  riastrad 	rcu_read_unlock();
    896   1.1  riastrad 	goto top;
    897   1.1  riastrad }
    898   1.1  riastrad 
    899   1.1  riastrad /*
    900   1.1  riastrad  * dma_resv_copy_fences(dst, src)
    901   1.1  riastrad  *
    902   1.1  riastrad  *	Copy the exclusive fence and all the shared fences from src to
    903   1.1  riastrad  *	dst.
    904   1.1  riastrad  *
    905   1.1  riastrad  *	Caller must have dst locked.
    906   1.1  riastrad  */
    907   1.1  riastrad int
    908   1.1  riastrad dma_resv_copy_fences(struct dma_resv *dst_robj,
    909   1.1  riastrad     const struct dma_resv *src_robj)
    910   1.1  riastrad {
    911   1.1  riastrad 	const struct dma_resv_list *src_list;
    912   1.1  riastrad 	struct dma_resv_list *dst_list = NULL;
    913   1.1  riastrad 	struct dma_resv_list *old_list;
    914   1.1  riastrad 	struct dma_fence *fence = NULL;
    915   1.1  riastrad 	struct dma_fence *old_fence;
    916   1.1  riastrad 	uint32_t shared_count, i;
    917   1.1  riastrad 	struct dma_resv_read_ticket read_ticket;
    918   1.1  riastrad 	struct dma_resv_write_ticket write_ticket;
    919   1.1  riastrad 
    920   1.1  riastrad 	KASSERT(dma_resv_held(dst_robj));
    921   1.1  riastrad 
    922  1.10  riastrad top:	KASSERT(fence == NULL);
    923  1.10  riastrad 
    924   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
    925   1.1  riastrad 	rcu_read_lock();
    926   1.1  riastrad 	dma_resv_read_begin(src_robj, &read_ticket);
    927   1.1  riastrad 
    928   1.1  riastrad 	/* Get the shared list.  */
    929  1.12  riastrad 	if (!dma_resv_get_shared_reader(src_robj, &src_list, &shared_count,
    930  1.12  riastrad 		&read_ticket))
    931  1.12  riastrad 		goto restart;
    932  1.22  riastrad 	if (src_list) {
    933  1.22  riastrad 		/* Allocate a new list, if necessary.  */
    934   1.1  riastrad 		if (dst_list == NULL)
    935  1.22  riastrad 			dst_list = objlist_tryalloc(shared_count);
    936  1.22  riastrad 		if (dst_list == NULL || dst_list->shared_max < shared_count) {
    937  1.22  riastrad 			rcu_read_unlock();
    938  1.22  riastrad 			if (dst_list) {
    939  1.22  riastrad 				objlist_free(dst_list);
    940  1.22  riastrad 				dst_list = NULL;
    941  1.22  riastrad 			}
    942  1.22  riastrad 			dst_list = objlist_alloc(shared_count);
    943  1.22  riastrad 			dst_list->shared_count = 0; /* paranoia */
    944  1.22  riastrad 			goto top;
    945  1.22  riastrad 		}
    946   1.1  riastrad 
    947   1.1  riastrad 		/* Copy over all fences that are not yet signalled.  */
    948   1.1  riastrad 		dst_list->shared_count = 0;
    949   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
    950  1.10  riastrad 			KASSERT(fence == NULL);
    951   1.7  riastrad 			fence = atomic_load_relaxed(&src_list->shared[i]);
    952   1.9  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
    953   1.1  riastrad 				goto restart;
    954   1.1  riastrad 			if (dma_fence_is_signaled(fence)) {
    955   1.1  riastrad 				dma_fence_put(fence);
    956   1.1  riastrad 				fence = NULL;
    957   1.1  riastrad 				continue;
    958   1.1  riastrad 			}
    959   1.1  riastrad 			dst_list->shared[dst_list->shared_count++] = fence;
    960   1.1  riastrad 			fence = NULL;
    961   1.1  riastrad 		}
    962  1.14  riastrad 
    963  1.14  riastrad 		/* If anything changed while we were copying, restart.  */
    964  1.14  riastrad 		if (!dma_resv_read_valid(src_robj, &read_ticket))
    965  1.14  riastrad 			goto restart;
    966   1.1  riastrad 	}
    967   1.1  riastrad 
    968   1.1  riastrad 	/* Get the exclusive fence.  */
    969  1.10  riastrad 	KASSERT(fence == NULL);
    970  1.12  riastrad 	if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket))
    971  1.12  riastrad 		goto restart;
    972   1.1  riastrad 
    973   1.1  riastrad 	/* All done with src; exit the RCU read section.  */
    974   1.1  riastrad 	rcu_read_unlock();
    975   1.1  riastrad 
    976   1.1  riastrad 	/*
    977   1.1  riastrad 	 * We now have a snapshot of the shared and exclusive fences of
    978   1.1  riastrad 	 * src_robj and we have acquired references to them so they
    979   1.1  riastrad 	 * won't go away.  Transfer them over to dst_robj, releasing
    980   1.1  riastrad 	 * references to any that were there.
    981   1.1  riastrad 	 */
    982   1.1  riastrad 
    983   1.1  riastrad 	/* Get the old shared and exclusive fences, if any.  */
    984   1.1  riastrad 	old_list = dst_robj->fence;
    985   1.1  riastrad 	old_fence = dst_robj->fence_excl;
    986   1.1  riastrad 
    987   1.7  riastrad 	/*
    988   1.7  riastrad 	 * Begin an update.  Implies membar_producer for dst_list and
    989   1.7  riastrad 	 * fence.
    990   1.7  riastrad 	 */
    991   1.1  riastrad 	dma_resv_write_begin(dst_robj, &write_ticket);
    992   1.1  riastrad 
    993   1.1  riastrad 	/* Replace the fences.  */
    994   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence, dst_list);
    995   1.6  riastrad 	atomic_store_relaxed(&dst_robj->fence_excl, fence);
    996   1.1  riastrad 
    997   1.1  riastrad 	/* Commit the update.  */
    998   1.1  riastrad 	dma_resv_write_commit(dst_robj, &write_ticket);
    999   1.1  riastrad 
   1000   1.1  riastrad 	/* Release the old exclusive fence, if any.  */
   1001  1.10  riastrad 	if (old_fence) {
   1002   1.1  riastrad 		dma_fence_put(old_fence);
   1003  1.10  riastrad 		old_fence = NULL; /* paranoia */
   1004  1.10  riastrad 	}
   1005   1.1  riastrad 
   1006   1.1  riastrad 	/* Release any old shared fences.  */
   1007   1.1  riastrad 	if (old_list) {
   1008  1.10  riastrad 		for (i = old_list->shared_count; i --> 0;) {
   1009   1.1  riastrad 			dma_fence_put(old_list->shared[i]);
   1010  1.10  riastrad 			old_list->shared[i] = NULL; /* paranoia */
   1011  1.10  riastrad 		}
   1012  1.10  riastrad 		objlist_free(old_list);
   1013  1.10  riastrad 		old_list = NULL; /* paranoia */
   1014   1.1  riastrad 	}
   1015   1.1  riastrad 
   1016   1.1  riastrad 	/* Success!  */
   1017   1.1  riastrad 	return 0;
   1018   1.1  riastrad 
   1019   1.1  riastrad restart:
   1020  1.10  riastrad 	KASSERT(fence == NULL);
   1021   1.1  riastrad 	rcu_read_unlock();
   1022   1.1  riastrad 	if (dst_list) {
   1023   1.1  riastrad 		for (i = dst_list->shared_count; i --> 0;) {
   1024   1.1  riastrad 			dma_fence_put(dst_list->shared[i]);
   1025  1.10  riastrad 			dst_list->shared[i] = NULL; /* paranoia */
   1026   1.1  riastrad 		}
   1027  1.22  riastrad 		/* reuse dst_list allocation for the next attempt */
   1028   1.1  riastrad 	}
   1029   1.1  riastrad 	goto top;
   1030   1.1  riastrad }
   1031   1.1  riastrad 
   1032   1.1  riastrad /*
   1033   1.1  riastrad  * dma_resv_test_signaled_rcu(robj, shared)
   1034   1.1  riastrad  *
   1035   1.1  riastrad  *	If shared is true, test whether all of the shared fences are
   1036   1.1  riastrad  *	signalled, or if there are none, test whether the exclusive
   1037   1.1  riastrad  *	fence is signalled.  If shared is false, test only whether the
   1038   1.1  riastrad  *	exclusive fence is signalled.
   1039   1.1  riastrad  *
   1040   1.1  riastrad  *	XXX Why does this _not_ test the exclusive fence if shared is
   1041   1.1  riastrad  *	true only if there are no shared fences?  This makes no sense.
   1042   1.1  riastrad  */
   1043   1.1  riastrad bool
   1044   1.1  riastrad dma_resv_test_signaled_rcu(const struct dma_resv *robj,
   1045   1.1  riastrad     bool shared)
   1046   1.1  riastrad {
   1047   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1048  1.12  riastrad 	const struct dma_resv_list *list;
   1049  1.10  riastrad 	struct dma_fence *fence = NULL;
   1050   1.1  riastrad 	uint32_t i, shared_count;
   1051   1.1  riastrad 	bool signaled = true;
   1052   1.1  riastrad 
   1053  1.10  riastrad top:	KASSERT(fence == NULL);
   1054  1.10  riastrad 
   1055   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1056   1.1  riastrad 	rcu_read_lock();
   1057   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1058   1.1  riastrad 
   1059   1.1  riastrad 	/* If shared is requested and there is a shared list, test it.  */
   1060  1.12  riastrad 	if (shared) {
   1061  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1062  1.12  riastrad 			&ticket))
   1063   1.1  riastrad 			goto restart;
   1064  1.12  riastrad 	} else {
   1065  1.12  riastrad 		list = NULL;
   1066  1.12  riastrad 		shared_count = 0;
   1067  1.12  riastrad 	}
   1068  1.12  riastrad 	if (list != NULL) {
   1069   1.1  riastrad 		/*
   1070   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1071   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1072   1.1  riastrad 		 * it is signalled.  Stop if we find any that is not
   1073   1.1  riastrad 		 * signalled.
   1074   1.1  riastrad 		 */
   1075   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1076  1.10  riastrad 			KASSERT(fence == NULL);
   1077   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1078  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1079   1.1  riastrad 				goto restart;
   1080   1.1  riastrad 			signaled &= dma_fence_is_signaled(fence);
   1081   1.1  riastrad 			dma_fence_put(fence);
   1082  1.10  riastrad 			fence = NULL;
   1083   1.1  riastrad 			if (!signaled)
   1084   1.1  riastrad 				goto out;
   1085   1.1  riastrad 		}
   1086  1.14  riastrad 
   1087  1.14  riastrad 		/* If anything changed while we were testing, restart.  */
   1088  1.14  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1089  1.14  riastrad 			goto restart;
   1090   1.1  riastrad 	}
   1091  1.15  riastrad 	if (shared_count)
   1092  1.15  riastrad 		goto out;
   1093   1.1  riastrad 
   1094   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1095  1.10  riastrad 	KASSERT(fence == NULL);
   1096  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1097  1.12  riastrad 		goto restart;
   1098  1.12  riastrad 	if (fence != NULL) {
   1099  1.12  riastrad 		/* Test whether it is signalled.  If no, stop.  */
   1100   1.1  riastrad 		signaled &= dma_fence_is_signaled(fence);
   1101   1.1  riastrad 		dma_fence_put(fence);
   1102  1.10  riastrad 		fence = NULL;
   1103   1.1  riastrad 		if (!signaled)
   1104   1.1  riastrad 			goto out;
   1105   1.1  riastrad 	}
   1106   1.1  riastrad 
   1107  1.10  riastrad out:	KASSERT(fence == NULL);
   1108  1.10  riastrad 	rcu_read_unlock();
   1109   1.1  riastrad 	return signaled;
   1110   1.1  riastrad 
   1111   1.1  riastrad restart:
   1112  1.10  riastrad 	KASSERT(fence == NULL);
   1113   1.1  riastrad 	rcu_read_unlock();
   1114   1.1  riastrad 	goto top;
   1115   1.1  riastrad }
   1116   1.1  riastrad 
   1117   1.1  riastrad /*
   1118   1.1  riastrad  * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
   1119   1.1  riastrad  *
   1120   1.1  riastrad  *	If shared is true, wait for all of the shared fences to be
   1121   1.1  riastrad  *	signalled, or if there are none, wait for the exclusive fence
   1122   1.1  riastrad  *	to be signalled.  If shared is false, wait only for the
   1123   1.1  riastrad  *	exclusive fence to be signalled.  If timeout is zero, don't
   1124   1.1  riastrad  *	wait, only test.
   1125   1.1  riastrad  *
   1126   1.1  riastrad  *	XXX Why does this _not_ wait for the exclusive fence if shared
   1127   1.1  riastrad  *	is true only if there are no shared fences?  This makes no
   1128   1.1  riastrad  *	sense.
   1129   1.1  riastrad  */
   1130   1.1  riastrad long
   1131   1.1  riastrad dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
   1132   1.1  riastrad     bool shared, bool intr, unsigned long timeout)
   1133   1.1  riastrad {
   1134   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1135  1.12  riastrad 	const struct dma_resv_list *list;
   1136  1.10  riastrad 	struct dma_fence *fence = NULL;
   1137   1.1  riastrad 	uint32_t i, shared_count;
   1138   1.1  riastrad 	long ret;
   1139   1.1  riastrad 
   1140   1.1  riastrad 	if (timeout == 0)
   1141   1.1  riastrad 		return dma_resv_test_signaled_rcu(robj, shared);
   1142   1.1  riastrad 
   1143  1.10  riastrad top:	KASSERT(fence == NULL);
   1144  1.10  riastrad 
   1145   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1146   1.1  riastrad 	rcu_read_lock();
   1147   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1148   1.1  riastrad 
   1149   1.1  riastrad 	/* If shared is requested and there is a shared list, wait on it.  */
   1150  1.12  riastrad 	if (shared) {
   1151  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1152  1.12  riastrad 			&ticket))
   1153   1.1  riastrad 			goto restart;
   1154  1.12  riastrad 	} else {
   1155  1.12  riastrad 		list = NULL;
   1156  1.12  riastrad 		shared_count = 0;
   1157  1.12  riastrad 	}
   1158  1.12  riastrad 	if (list != NULL) {
   1159   1.1  riastrad 		/*
   1160   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1161   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1162   1.1  riastrad 		 * it is signalled.  Stop and wait if we find any that
   1163   1.1  riastrad 		 * is not signalled.
   1164   1.1  riastrad 		 */
   1165   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1166  1.10  riastrad 			KASSERT(fence == NULL);
   1167   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1168  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1169   1.1  riastrad 				goto restart;
   1170   1.1  riastrad 			if (!dma_fence_is_signaled(fence))
   1171   1.1  riastrad 				goto wait;
   1172   1.1  riastrad 			dma_fence_put(fence);
   1173  1.10  riastrad 			fence = NULL;
   1174   1.1  riastrad 		}
   1175  1.14  riastrad 
   1176  1.14  riastrad 		/* If anything changed while we were testing, restart.  */
   1177  1.14  riastrad 		if (!dma_resv_read_valid(robj, &ticket))
   1178  1.14  riastrad 			goto restart;
   1179   1.1  riastrad 	}
   1180  1.15  riastrad 	if (shared_count)
   1181  1.15  riastrad 		goto out;
   1182   1.1  riastrad 
   1183   1.1  riastrad 	/* If there is an exclusive fence, test it.  */
   1184  1.10  riastrad 	KASSERT(fence == NULL);
   1185  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1186  1.12  riastrad 		goto restart;
   1187  1.12  riastrad 	if (fence != NULL) {
   1188  1.12  riastrad 		/* Test whether it is signalled.  If no, wait.  */
   1189   1.1  riastrad 		if (!dma_fence_is_signaled(fence))
   1190   1.1  riastrad 			goto wait;
   1191   1.1  riastrad 		dma_fence_put(fence);
   1192  1.10  riastrad 		fence = NULL;
   1193   1.1  riastrad 	}
   1194   1.1  riastrad 
   1195  1.15  riastrad out:	/* Success!  Return the number of ticks left.  */
   1196   1.1  riastrad 	rcu_read_unlock();
   1197  1.10  riastrad 	KASSERT(fence == NULL);
   1198   1.1  riastrad 	return timeout;
   1199   1.1  riastrad 
   1200   1.1  riastrad restart:
   1201  1.10  riastrad 	KASSERT(fence == NULL);
   1202   1.1  riastrad 	rcu_read_unlock();
   1203   1.1  riastrad 	goto top;
   1204   1.1  riastrad 
   1205   1.1  riastrad wait:
   1206   1.1  riastrad 	/*
   1207   1.5  riastrad 	 * Exit the RCU read section, wait for it, and release the
   1208   1.5  riastrad 	 * fence when we're done.  If we time out or fail, bail.
   1209   1.5  riastrad 	 * Otherwise, go back to the top.
   1210   1.1  riastrad 	 */
   1211   1.1  riastrad 	KASSERT(fence != NULL);
   1212   1.1  riastrad 	rcu_read_unlock();
   1213   1.1  riastrad 	ret = dma_fence_wait_timeout(fence, intr, timeout);
   1214   1.1  riastrad 	dma_fence_put(fence);
   1215  1.10  riastrad 	fence = NULL;
   1216   1.1  riastrad 	if (ret <= 0)
   1217   1.1  riastrad 		return ret;
   1218   1.1  riastrad 	KASSERT(ret <= timeout);
   1219   1.1  riastrad 	timeout = ret;
   1220   1.1  riastrad 	goto top;
   1221   1.1  riastrad }
   1222   1.1  riastrad 
   1223   1.1  riastrad /*
   1224   1.1  riastrad  * dma_resv_poll_init(rpoll, lock)
   1225   1.1  riastrad  *
   1226   1.1  riastrad  *	Initialize reservation poll state.
   1227   1.1  riastrad  */
   1228   1.1  riastrad void
   1229   1.1  riastrad dma_resv_poll_init(struct dma_resv_poll *rpoll)
   1230   1.1  riastrad {
   1231   1.1  riastrad 
   1232   1.1  riastrad 	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
   1233   1.1  riastrad 	selinit(&rpoll->rp_selq);
   1234   1.1  riastrad 	rpoll->rp_claimed = 0;
   1235   1.1  riastrad }
   1236   1.1  riastrad 
   1237   1.1  riastrad /*
   1238   1.1  riastrad  * dma_resv_poll_fini(rpoll)
   1239   1.1  riastrad  *
   1240   1.1  riastrad  *	Release any resource associated with reservation poll state.
   1241   1.1  riastrad  */
   1242   1.1  riastrad void
   1243   1.1  riastrad dma_resv_poll_fini(struct dma_resv_poll *rpoll)
   1244   1.1  riastrad {
   1245   1.1  riastrad 
   1246   1.1  riastrad 	KASSERT(rpoll->rp_claimed == 0);
   1247   1.1  riastrad 	seldestroy(&rpoll->rp_selq);
   1248   1.1  riastrad 	mutex_destroy(&rpoll->rp_lock);
   1249   1.1  riastrad }
   1250   1.1  riastrad 
   1251   1.1  riastrad /*
   1252   1.1  riastrad  * dma_resv_poll_cb(fence, fcb)
   1253   1.1  riastrad  *
   1254   1.1  riastrad  *	Callback to notify a reservation poll that a fence has
   1255   1.1  riastrad  *	completed.  Notify any waiters and allow the next poller to
   1256   1.1  riastrad  *	claim the callback.
   1257   1.1  riastrad  *
   1258   1.1  riastrad  *	If one thread is waiting for the exclusive fence only, and we
   1259   1.1  riastrad  *	spuriously notify them about a shared fence, tough.
   1260   1.1  riastrad  */
   1261   1.1  riastrad static void
   1262   1.1  riastrad dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
   1263   1.1  riastrad {
   1264   1.1  riastrad 	struct dma_resv_poll *rpoll = container_of(fcb,
   1265   1.1  riastrad 	    struct dma_resv_poll, rp_fcb);
   1266   1.1  riastrad 
   1267   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1268   1.1  riastrad 	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
   1269   1.1  riastrad 	rpoll->rp_claimed = 0;
   1270   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1271   1.1  riastrad }
   1272   1.1  riastrad 
   1273   1.1  riastrad /*
   1274   1.1  riastrad  * dma_resv_do_poll(robj, events, rpoll)
   1275   1.1  riastrad  *
   1276   1.1  riastrad  *	Poll for reservation object events using the reservation poll
   1277   1.1  riastrad  *	state in rpoll:
   1278   1.1  riastrad  *
   1279   1.1  riastrad  *	- POLLOUT	wait for all fences shared and exclusive
   1280   1.1  riastrad  *	- POLLIN	wait for the exclusive fence
   1281   1.1  riastrad  *
   1282   1.1  riastrad  *	Return the subset of events in events that are ready.  If any
   1283   1.1  riastrad  *	are requested but not ready, arrange to be notified with
   1284   1.1  riastrad  *	selnotify when they are.
   1285   1.1  riastrad  */
   1286   1.1  riastrad int
   1287   1.1  riastrad dma_resv_do_poll(const struct dma_resv *robj, int events,
   1288   1.1  riastrad     struct dma_resv_poll *rpoll)
   1289   1.1  riastrad {
   1290   1.1  riastrad 	struct dma_resv_read_ticket ticket;
   1291  1.12  riastrad 	const struct dma_resv_list *list;
   1292  1.10  riastrad 	struct dma_fence *fence = NULL;
   1293   1.1  riastrad 	uint32_t i, shared_count;
   1294   1.1  riastrad 	int revents;
   1295   1.1  riastrad 	bool recorded = false;	/* curlwp is on the selq */
   1296   1.1  riastrad 	bool claimed = false;	/* we claimed the callback */
   1297   1.1  riastrad 	bool callback = false;	/* we requested a callback */
   1298   1.1  riastrad 
   1299   1.1  riastrad 	/*
   1300   1.1  riastrad 	 * Start with the maximal set of events that could be ready.
   1301   1.1  riastrad 	 * We will eliminate the events that are definitely not ready
   1302   1.1  riastrad 	 * as we go at the same time as we add callbacks to notify us
   1303   1.1  riastrad 	 * that they may be ready.
   1304   1.1  riastrad 	 */
   1305   1.1  riastrad 	revents = events & (POLLIN|POLLOUT);
   1306   1.1  riastrad 	if (revents == 0)
   1307   1.1  riastrad 		return 0;
   1308   1.1  riastrad 
   1309  1.10  riastrad top:	KASSERT(fence == NULL);
   1310  1.10  riastrad 
   1311   1.1  riastrad 	/* Enter an RCU read section and get a read ticket.  */
   1312   1.1  riastrad 	rcu_read_lock();
   1313   1.1  riastrad 	dma_resv_read_begin(robj, &ticket);
   1314   1.1  riastrad 
   1315   1.1  riastrad 	/* If we want to wait for all fences, get the shared list.  */
   1316  1.12  riastrad 	if (events & POLLOUT) {
   1317  1.12  riastrad 		if (!dma_resv_get_shared_reader(robj, &list, &shared_count,
   1318  1.12  riastrad 			&ticket))
   1319   1.1  riastrad 			goto restart;
   1320  1.12  riastrad 	} else {
   1321  1.12  riastrad 		list = NULL;
   1322  1.12  riastrad 		shared_count = 0;
   1323  1.12  riastrad 	}
   1324  1.12  riastrad 	if (list != NULL) do {
   1325   1.1  riastrad 		/*
   1326   1.1  riastrad 		 * For each fence, if it is going away, restart.
   1327   1.1  riastrad 		 * Otherwise, acquire a reference to it to test whether
   1328   1.1  riastrad 		 * it is signalled.  Stop and request a callback if we
   1329   1.1  riastrad 		 * find any that is not signalled.
   1330   1.1  riastrad 		 */
   1331   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1332  1.10  riastrad 			KASSERT(fence == NULL);
   1333   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1334  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1335   1.1  riastrad 				goto restart;
   1336   1.1  riastrad 			if (!dma_fence_is_signaled(fence)) {
   1337   1.1  riastrad 				dma_fence_put(fence);
   1338  1.10  riastrad 				fence = NULL;
   1339   1.1  riastrad 				break;
   1340   1.1  riastrad 			}
   1341   1.1  riastrad 			dma_fence_put(fence);
   1342  1.10  riastrad 			fence = NULL;
   1343   1.1  riastrad 		}
   1344   1.1  riastrad 
   1345   1.1  riastrad 		/* If all shared fences have been signalled, move on.  */
   1346   1.1  riastrad 		if (i == shared_count)
   1347   1.1  riastrad 			break;
   1348   1.1  riastrad 
   1349   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1350   1.1  riastrad 		if (!recorded)
   1351   1.1  riastrad 			goto record;
   1352   1.1  riastrad 
   1353   1.1  riastrad 		/*
   1354   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1355   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1356   1.1  riastrad 		 * assume the event is not ready.
   1357   1.1  riastrad 		 */
   1358   1.1  riastrad 		if (!claimed || callback) {
   1359   1.1  riastrad 			revents &= ~POLLOUT;
   1360   1.1  riastrad 			break;
   1361   1.1  riastrad 		}
   1362   1.1  riastrad 
   1363   1.1  riastrad 		/*
   1364   1.1  riastrad 		 * Otherwise, find the first fence that is not
   1365   1.1  riastrad 		 * signalled, request the callback, and clear POLLOUT
   1366   1.1  riastrad 		 * from the possible ready events.  If they are all
   1367   1.1  riastrad 		 * signalled, leave POLLOUT set; we will simulate the
   1368   1.1  riastrad 		 * callback later.
   1369   1.1  riastrad 		 */
   1370   1.1  riastrad 		for (i = 0; i < shared_count; i++) {
   1371  1.10  riastrad 			KASSERT(fence == NULL);
   1372   1.7  riastrad 			fence = atomic_load_relaxed(&list->shared[i]);
   1373  1.10  riastrad 			if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1374   1.1  riastrad 				goto restart;
   1375   1.1  riastrad 			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1376   1.1  riastrad 				dma_resv_poll_cb)) {
   1377   1.1  riastrad 				dma_fence_put(fence);
   1378  1.10  riastrad 				fence = NULL;
   1379   1.1  riastrad 				revents &= ~POLLOUT;
   1380   1.1  riastrad 				callback = true;
   1381   1.1  riastrad 				break;
   1382   1.1  riastrad 			}
   1383   1.1  riastrad 			dma_fence_put(fence);
   1384  1.10  riastrad 			fence = NULL;
   1385   1.1  riastrad 		}
   1386   1.1  riastrad 	} while (0);
   1387   1.1  riastrad 
   1388   1.1  riastrad 	/* We always wait for at least the exclusive fence, so get it.  */
   1389  1.10  riastrad 	KASSERT(fence == NULL);
   1390  1.12  riastrad 	if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
   1391  1.12  riastrad 		goto restart;
   1392  1.12  riastrad 	if (fence != NULL) do {
   1393   1.1  riastrad 		/*
   1394  1.12  riastrad 		 * Test whether it is signalled.  If not, stop and
   1395  1.12  riastrad 		 * request a callback.
   1396   1.1  riastrad 		 */
   1397  1.16  riastrad 		if (dma_fence_is_signaled(fence))
   1398   1.1  riastrad 			break;
   1399   1.1  riastrad 
   1400   1.1  riastrad 		/* Put ourselves on the selq if we haven't already.  */
   1401   1.1  riastrad 		if (!recorded) {
   1402   1.1  riastrad 			dma_fence_put(fence);
   1403  1.10  riastrad 			fence = NULL;
   1404   1.1  riastrad 			goto record;
   1405   1.1  riastrad 		}
   1406   1.1  riastrad 
   1407   1.1  riastrad 		/*
   1408   1.1  riastrad 		 * If someone else claimed the callback, or we already
   1409   1.1  riastrad 		 * requested it, we're guaranteed to be notified, so
   1410   1.1  riastrad 		 * assume the event is not ready.
   1411   1.1  riastrad 		 */
   1412   1.1  riastrad 		if (!claimed || callback) {
   1413   1.1  riastrad 			revents = 0;
   1414   1.1  riastrad 			break;
   1415   1.1  riastrad 		}
   1416   1.1  riastrad 
   1417   1.1  riastrad 		/*
   1418   1.1  riastrad 		 * Otherwise, try to request the callback, and clear
   1419   1.1  riastrad 		 * all possible ready events.  If the fence has been
   1420   1.1  riastrad 		 * signalled in the interim, leave the events set; we
   1421   1.1  riastrad 		 * will simulate the callback later.
   1422   1.1  riastrad 		 */
   1423   1.1  riastrad 		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1424   1.1  riastrad 			dma_resv_poll_cb)) {
   1425   1.1  riastrad 			revents = 0;
   1426   1.1  riastrad 			callback = true;
   1427   1.1  riastrad 			break;
   1428   1.1  riastrad 		}
   1429  1.16  riastrad 	} while (0);
   1430  1.16  riastrad 	if (fence != NULL) {
   1431   1.1  riastrad 		dma_fence_put(fence);
   1432  1.10  riastrad 		fence = NULL;
   1433  1.16  riastrad 	}
   1434   1.1  riastrad 
   1435   1.1  riastrad 	/* All done reading the fences.  */
   1436   1.1  riastrad 	rcu_read_unlock();
   1437   1.1  riastrad 
   1438   1.1  riastrad 	if (claimed && !callback) {
   1439   1.1  riastrad 		/*
   1440   1.1  riastrad 		 * We claimed the callback but we didn't actually
   1441   1.1  riastrad 		 * request it because a fence was signalled while we
   1442   1.1  riastrad 		 * were claiming it.  Call it ourselves now.  The
   1443   1.1  riastrad 		 * callback doesn't use the fence nor rely on holding
   1444   1.1  riastrad 		 * any of the fence locks, so this is safe.
   1445   1.1  riastrad 		 */
   1446   1.1  riastrad 		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
   1447   1.1  riastrad 	}
   1448   1.1  riastrad 	return revents;
   1449   1.1  riastrad 
   1450   1.1  riastrad restart:
   1451  1.10  riastrad 	KASSERT(fence == NULL);
   1452   1.1  riastrad 	rcu_read_unlock();
   1453   1.1  riastrad 	goto top;
   1454   1.1  riastrad 
   1455   1.1  riastrad record:
   1456  1.10  riastrad 	KASSERT(fence == NULL);
   1457   1.1  riastrad 	rcu_read_unlock();
   1458   1.1  riastrad 	mutex_enter(&rpoll->rp_lock);
   1459   1.1  riastrad 	selrecord(curlwp, &rpoll->rp_selq);
   1460   1.1  riastrad 	if (!rpoll->rp_claimed)
   1461   1.1  riastrad 		claimed = rpoll->rp_claimed = true;
   1462   1.1  riastrad 	mutex_exit(&rpoll->rp_lock);
   1463   1.1  riastrad 	recorded = true;
   1464   1.1  riastrad 	goto top;
   1465   1.1  riastrad }
   1466   1.1  riastrad 
   1467   1.1  riastrad /*
   1468   1.1  riastrad  * dma_resv_kqfilter(robj, kn, rpoll)
   1469   1.1  riastrad  *
   1470   1.1  riastrad  *	Kqueue filter for reservation objects.  Currently not
   1471   1.1  riastrad  *	implemented because the logic to implement it is nontrivial,
   1472   1.1  riastrad  *	and userland will presumably never use it, so it would be
   1473   1.1  riastrad  *	dangerous to add never-tested complex code paths to the kernel.
   1474   1.1  riastrad  */
   1475   1.1  riastrad int
   1476   1.1  riastrad dma_resv_kqfilter(const struct dma_resv *robj,
   1477   1.1  riastrad     struct knote *kn, struct dma_resv_poll *rpoll)
   1478   1.1  riastrad {
   1479   1.1  riastrad 
   1480   1.1  riastrad 	return EINVAL;
   1481   1.1  riastrad }
   1482