Home | History | Annotate | Line # | Download | only in linux
linux_dma_resv.c revision 1.8
      1 /*	$NetBSD: linux_dma_resv.c,v 1.8 2021/12/19 12:09:35 riastradh Exp $	*/
      2 
      3 /*-
      4  * Copyright (c) 2018 The NetBSD Foundation, Inc.
      5  * All rights reserved.
      6  *
      7  * This code is derived from software contributed to The NetBSD Foundation
      8  * by Taylor R. Campbell.
      9  *
     10  * Redistribution and use in source and binary forms, with or without
     11  * modification, are permitted provided that the following conditions
     12  * are met:
     13  * 1. Redistributions of source code must retain the above copyright
     14  *    notice, this list of conditions and the following disclaimer.
     15  * 2. Redistributions in binary form must reproduce the above copyright
     16  *    notice, this list of conditions and the following disclaimer in the
     17  *    documentation and/or other materials provided with the distribution.
     18  *
     19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
     20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
     21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
     23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
     24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
     25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
     26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
     27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
     28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
     29  * POSSIBILITY OF SUCH DAMAGE.
     30  */
     31 
     32 #include <sys/cdefs.h>
     33 __KERNEL_RCSID(0, "$NetBSD: linux_dma_resv.c,v 1.8 2021/12/19 12:09:35 riastradh Exp $");
     34 
     35 #include <sys/param.h>
     36 #include <sys/poll.h>
     37 #include <sys/select.h>
     38 
     39 #include <linux/dma-fence.h>
     40 #include <linux/dma-resv.h>
     41 #include <linux/seqlock.h>
     42 #include <linux/ww_mutex.h>
     43 
     44 DEFINE_WW_CLASS(reservation_ww_class __cacheline_aligned);
     45 
     46 static struct dma_resv_list *
     47 objlist_tryalloc(uint32_t n)
     48 {
     49 	struct dma_resv_list *list;
     50 
     51 	list = kmem_alloc(offsetof(typeof(*list), shared[n]), KM_NOSLEEP);
     52 	if (list == NULL)
     53 		return NULL;
     54 	list->shared_max = n;
     55 
     56 	return list;
     57 }
     58 
     59 static void
     60 objlist_free(struct dma_resv_list *list)
     61 {
     62 	uint32_t n = list->shared_max;
     63 
     64 	kmem_free(list, offsetof(typeof(*list), shared[n]));
     65 }
     66 
     67 static void
     68 objlist_free_cb(struct rcu_head *rcu)
     69 {
     70 	struct dma_resv_list *list = container_of(rcu,
     71 	    struct dma_resv_list, rol_rcu);
     72 
     73 	objlist_free(list);
     74 }
     75 
     76 static void
     77 objlist_defer_free(struct dma_resv_list *list)
     78 {
     79 
     80 	call_rcu(&list->rol_rcu, objlist_free_cb);
     81 }
     82 
     83 /*
     84  * dma_resv_init(robj)
     85  *
     86  *	Initialize a reservation object.  Caller must later destroy it
     87  *	with dma_resv_fini.
     88  */
     89 void
     90 dma_resv_init(struct dma_resv *robj)
     91 {
     92 
     93 	ww_mutex_init(&robj->lock, &reservation_ww_class);
     94 	seqcount_init(&robj->seq);
     95 	robj->fence_excl = NULL;
     96 	robj->fence = NULL;
     97 	robj->robj_prealloc = NULL;
     98 }
     99 
    100 /*
    101  * dma_resv_fini(robj)
    102  *
    103  *	Destroy a reservation object, freeing any memory that had been
    104  *	allocated for it.  Caller must have exclusive access to it.
    105  */
    106 void
    107 dma_resv_fini(struct dma_resv *robj)
    108 {
    109 	unsigned i;
    110 
    111 	if (robj->robj_prealloc)
    112 		objlist_free(robj->robj_prealloc);
    113 	if (robj->fence) {
    114 		for (i = 0; i < robj->fence->shared_count; i++)
    115 			dma_fence_put(robj->fence->shared[i]);
    116 		objlist_free(robj->fence);
    117 	}
    118 	if (robj->fence_excl)
    119 		dma_fence_put(robj->fence_excl);
    120 	ww_mutex_destroy(&robj->lock);
    121 }
    122 
    123 /*
    124  * dma_resv_lock(robj, ctx)
    125  *
    126  *	Acquire a reservation object's lock.  Return 0 on success,
    127  *	-EALREADY if caller already holds it, -EDEADLK if a
    128  *	higher-priority owner holds it and the caller must back out and
    129  *	retry.
    130  */
    131 int
    132 dma_resv_lock(struct dma_resv *robj,
    133     struct ww_acquire_ctx *ctx)
    134 {
    135 
    136 	return ww_mutex_lock(&robj->lock, ctx);
    137 }
    138 
    139 /*
    140  * dma_resv_lock_slow(robj, ctx)
    141  *
    142  *	Acquire a reservation object's lock.  Caller must not hold
    143  *	this lock or any others -- this is to be used in slow paths
    144  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    145  *	and the caller has backed out all other locks.
    146  */
    147 void
    148 dma_resv_lock_slow(struct dma_resv *robj,
    149     struct ww_acquire_ctx *ctx)
    150 {
    151 
    152 	ww_mutex_lock_slow(&robj->lock, ctx);
    153 }
    154 
    155 /*
    156  * dma_resv_lock_interruptible(robj, ctx)
    157  *
    158  *	Acquire a reservation object's lock.  Return 0 on success,
    159  *	-EALREADY if caller already holds it, -EDEADLK if a
    160  *	higher-priority owner holds it and the caller must back out and
    161  *	retry, -ERESTART/-EINTR if interrupted.
    162  */
    163 int
    164 dma_resv_lock_interruptible(struct dma_resv *robj,
    165     struct ww_acquire_ctx *ctx)
    166 {
    167 
    168 	return ww_mutex_lock_interruptible(&robj->lock, ctx);
    169 }
    170 
    171 /*
    172  * dma_resv_lock_slow_interruptible(robj, ctx)
    173  *
    174  *	Acquire a reservation object's lock.  Caller must not hold
    175  *	this lock or any others -- this is to be used in slow paths
    176  *	after dma_resv_lock or dma_resv_lock_interruptible has failed
    177  *	and the caller has backed out all other locks.  Return 0 on
    178  *	success, -ERESTART/-EINTR if interrupted.
    179  */
    180 int
    181 dma_resv_lock_slow_interruptible(struct dma_resv *robj,
    182     struct ww_acquire_ctx *ctx)
    183 {
    184 
    185 	return ww_mutex_lock_slow_interruptible(&robj->lock, ctx);
    186 }
    187 
    188 /*
    189  * dma_resv_trylock(robj)
    190  *
    191  *	Try to acquire a reservation object's lock without blocking.
    192  *	Return true on success, false on failure.
    193  */
    194 bool
    195 dma_resv_trylock(struct dma_resv *robj)
    196 {
    197 
    198 	return ww_mutex_trylock(&robj->lock);
    199 }
    200 
    201 /*
    202  * dma_resv_locking_ctx(robj)
    203  *
    204  *	Return a pointer to the ww_acquire_ctx used by the owner of
    205  *	the reservation object's lock, or NULL if it is either not
    206  *	owned or if it is locked without context.
    207  */
    208 struct ww_acquire_ctx *
    209 dma_resv_locking_ctx(struct dma_resv *robj)
    210 {
    211 
    212 	return ww_mutex_locking_ctx(&robj->lock);
    213 }
    214 
    215 /*
    216  * dma_resv_unlock(robj)
    217  *
    218  *	Release a reservation object's lock.
    219  */
    220 void
    221 dma_resv_unlock(struct dma_resv *robj)
    222 {
    223 
    224 	return ww_mutex_unlock(&robj->lock);
    225 }
    226 
    227 /*
    228  * dma_resv_held(robj)
    229  *
    230  *	True if robj is locked.
    231  */
    232 bool
    233 dma_resv_held(struct dma_resv *robj)
    234 {
    235 
    236 	return ww_mutex_is_locked(&robj->lock);
    237 }
    238 
    239 /*
    240  * dma_resv_assert_held(robj)
    241  *
    242  *	Panic if robj is not held, in DIAGNOSTIC builds.
    243  */
    244 void
    245 dma_resv_assert_held(struct dma_resv *robj)
    246 {
    247 
    248 	KASSERT(dma_resv_held(robj));
    249 }
    250 
    251 /*
    252  * dma_resv_get_excl(robj)
    253  *
    254  *	Return a pointer to the exclusive fence of the reservation
    255  *	object robj.
    256  *
    257  *	Caller must have robj locked.
    258  */
    259 struct dma_fence *
    260 dma_resv_get_excl(struct dma_resv *robj)
    261 {
    262 
    263 	KASSERT(dma_resv_held(robj));
    264 	return robj->fence_excl;
    265 }
    266 
    267 /*
    268  * dma_resv_get_list(robj)
    269  *
    270  *	Return a pointer to the shared fence list of the reservation
    271  *	object robj.
    272  *
    273  *	Caller must have robj locked.
    274  */
    275 struct dma_resv_list *
    276 dma_resv_get_list(struct dma_resv *robj)
    277 {
    278 
    279 	KASSERT(dma_resv_held(robj));
    280 	return robj->fence;
    281 }
    282 
    283 /*
    284  * dma_resv_reserve_shared(robj)
    285  *
    286  *	Reserve space in robj to add a shared fence.  To be used only
    287  *	once before calling dma_resv_add_shared_fence.
    288  *
    289  *	Caller must have robj locked.
    290  *
    291  *	Internally, we start with room for four entries and double if
    292  *	we don't have enough.  This is not guaranteed.
    293  */
    294 int
    295 dma_resv_reserve_shared(struct dma_resv *robj, unsigned int num_fences)
    296 {
    297 	struct dma_resv_list *list, *prealloc;
    298 	uint32_t n, nalloc;
    299 
    300 	KASSERT(dma_resv_held(robj));
    301 	KASSERT(num_fences == 1);
    302 
    303 	list = robj->fence;
    304 	prealloc = robj->robj_prealloc;
    305 
    306 	/* If there's an existing list, check it for space.  */
    307 	if (list) {
    308 		/* If there's too many already, give up.  */
    309 		if (list->shared_count == UINT32_MAX)
    310 			return -ENOMEM;
    311 
    312 		/* Add one more. */
    313 		n = list->shared_count + 1;
    314 
    315 		/* If there's enough for one more, we're done.  */
    316 		if (n <= list->shared_max)
    317 			return 0;
    318 	} else {
    319 		/* No list already.  We need space for 1.  */
    320 		n = 1;
    321 	}
    322 
    323 	/* If not, maybe there's a preallocated list ready.  */
    324 	if (prealloc != NULL) {
    325 		/* If there's enough room in it, stop here.  */
    326 		if (n <= prealloc->shared_max)
    327 			return 0;
    328 
    329 		/* Try to double its capacity.  */
    330 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : 2*n;
    331 		prealloc = objlist_tryalloc(nalloc);
    332 		if (prealloc == NULL)
    333 			return -ENOMEM;
    334 
    335 		/* Swap the new preallocated list and free the old one.  */
    336 		objlist_free(robj->robj_prealloc);
    337 		robj->robj_prealloc = prealloc;
    338 	} else {
    339 		/* Start with some spare.  */
    340 		nalloc = n > UINT32_MAX/2 ? UINT32_MAX : MAX(2*n, 4);
    341 		prealloc = objlist_tryalloc(nalloc);
    342 		if (prealloc == NULL)
    343 			return -ENOMEM;
    344 		/* Save the new preallocated list.  */
    345 		robj->robj_prealloc = prealloc;
    346 	}
    347 
    348 	/* Success!  */
    349 	return 0;
    350 }
    351 
    352 struct dma_resv_write_ticket {
    353 };
    354 
    355 /*
    356  * dma_resv_write_begin(robj, ticket)
    357  *
    358  *	Begin an atomic batch of writes to robj, and initialize opaque
    359  *	ticket for it.  The ticket must be passed to
    360  *	dma_resv_write_commit to commit the writes.
    361  *
    362  *	Caller must have robj locked.
    363  *
    364  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    365  *	NOT serve as an acquire operation, however.
    366  */
    367 static void
    368 dma_resv_write_begin(struct dma_resv *robj,
    369     struct dma_resv_write_ticket *ticket)
    370 {
    371 
    372 	KASSERT(dma_resv_held(robj));
    373 
    374 	write_seqcount_begin(&robj->seq);
    375 }
    376 
    377 /*
    378  * dma_resv_write_commit(robj, ticket)
    379  *
    380  *	Commit an atomic batch of writes to robj begun with the call to
    381  *	dma_resv_write_begin that returned ticket.
    382  *
    383  *	Caller must have robj locked.
    384  *
    385  *	Implies membar_producer, i.e. store-before-store barrier.  Does
    386  *	NOT serve as a release operation, however.
    387  */
    388 static void
    389 dma_resv_write_commit(struct dma_resv *robj,
    390     struct dma_resv_write_ticket *ticket)
    391 {
    392 
    393 	KASSERT(dma_resv_held(robj));
    394 
    395 	write_seqcount_end(&robj->seq);
    396 }
    397 
    398 struct dma_resv_read_ticket {
    399 	unsigned version;
    400 };
    401 
    402 /*
    403  * dma_resv_read_begin(robj, ticket)
    404  *
    405  *	Begin a read section, and initialize opaque ticket for it.  The
    406  *	ticket must be passed to dma_resv_read_exit, and the
    407  *	caller must be prepared to retry reading if it fails.
    408  */
    409 static void
    410 dma_resv_read_begin(const struct dma_resv *robj,
    411     struct dma_resv_read_ticket *ticket)
    412 {
    413 
    414 	ticket->version = read_seqcount_begin(&robj->seq);
    415 }
    416 
    417 /*
    418  * dma_resv_read_valid(robj, ticket)
    419  *
    420  *	Test whether the read sections are valid.  Return true on
    421  *	success, or false on failure if the read ticket has been
    422  *	invalidated.
    423  */
    424 static bool
    425 dma_resv_read_valid(const struct dma_resv *robj,
    426     struct dma_resv_read_ticket *ticket)
    427 {
    428 
    429 	return !read_seqcount_retry(&robj->seq, ticket->version);
    430 }
    431 
    432 /*
    433  * dma_resv_add_excl_fence(robj, fence)
    434  *
    435  *	Empty and release all of robj's shared fences, and clear and
    436  *	release its exclusive fence.  If fence is nonnull, acquire a
    437  *	reference to it and save it as robj's exclusive fence.
    438  *
    439  *	Caller must have robj locked.
    440  */
    441 void
    442 dma_resv_add_excl_fence(struct dma_resv *robj,
    443     struct dma_fence *fence)
    444 {
    445 	struct dma_fence *old_fence = robj->fence_excl;
    446 	struct dma_resv_list *old_list = robj->fence;
    447 	uint32_t old_shared_count;
    448 	struct dma_resv_write_ticket ticket;
    449 
    450 	KASSERT(dma_resv_held(robj));
    451 
    452 	/*
    453 	 * If we are setting rather than just removing a fence, acquire
    454 	 * a reference for ourselves.
    455 	 */
    456 	if (fence)
    457 		(void)dma_fence_get(fence);
    458 
    459 	/* If there are any shared fences, remember how many.  */
    460 	if (old_list)
    461 		old_shared_count = old_list->shared_count;
    462 
    463 	/* Begin an update.  Implies membar_producer for fence.  */
    464 	dma_resv_write_begin(robj, &ticket);
    465 
    466 	/* Replace the fence and zero the shared count.  */
    467 	atomic_store_relaxed(&robj->fence_excl, fence);
    468 	if (old_list)
    469 		old_list->shared_count = 0;
    470 
    471 	/* Commit the update.  */
    472 	dma_resv_write_commit(robj, &ticket);
    473 
    474 	/* Release the old exclusive fence, if any.  */
    475 	if (old_fence)
    476 		dma_fence_put(old_fence);
    477 
    478 	/* Release any old shared fences.  */
    479 	if (old_list) {
    480 		while (old_shared_count--)
    481 			dma_fence_put(old_list->shared[old_shared_count]);
    482 	}
    483 }
    484 
    485 /*
    486  * dma_resv_add_shared_fence(robj, fence)
    487  *
    488  *	Acquire a reference to fence and add it to robj's shared list.
    489  *	If any fence was already added with the same context number,
    490  *	release it and replace it by this one.
    491  *
    492  *	Caller must have robj locked, and must have preceded with a
    493  *	call to dma_resv_reserve_shared for each shared fence
    494  *	added.
    495  */
    496 void
    497 dma_resv_add_shared_fence(struct dma_resv *robj,
    498     struct dma_fence *fence)
    499 {
    500 	struct dma_resv_list *list = robj->fence;
    501 	struct dma_resv_list *prealloc = robj->robj_prealloc;
    502 	struct dma_resv_write_ticket ticket;
    503 	struct dma_fence *replace = NULL;
    504 	uint32_t i;
    505 
    506 	KASSERT(dma_resv_held(robj));
    507 
    508 	/* Acquire a reference to the fence.  */
    509 	KASSERT(fence != NULL);
    510 	(void)dma_fence_get(fence);
    511 
    512 	/* Check for a preallocated replacement list.  */
    513 	if (prealloc == NULL) {
    514 		/*
    515 		 * If there is no preallocated replacement list, then
    516 		 * there must be room in the current list.
    517 		 */
    518 		KASSERT(list != NULL);
    519 		KASSERT(list->shared_count < list->shared_max);
    520 
    521 		/* Begin an update.  Implies membar_producer for fence.  */
    522 		dma_resv_write_begin(robj, &ticket);
    523 
    524 		/* Find a fence with the same context number.  */
    525 		for (i = 0; i < list->shared_count; i++) {
    526 			if (list->shared[i]->context == fence->context) {
    527 				replace = list->shared[i];
    528 				atomic_store_relaxed(&list->shared[i], fence);
    529 				break;
    530 			}
    531 		}
    532 
    533 		/* If we didn't find one, add it at the end.  */
    534 		if (i == list->shared_count) {
    535 			atomic_store_relaxed(&list->shared[list->shared_count],
    536 			    fence);
    537 			atomic_store_relaxed(&list->shared_count,
    538 			    list->shared_count + 1);
    539 		}
    540 
    541 		/* Commit the update.  */
    542 		dma_resv_write_commit(robj, &ticket);
    543 	} else {
    544 		/*
    545 		 * There is a preallocated replacement list.  There may
    546 		 * not be a current list.  If not, treat it as a zero-
    547 		 * length list.
    548 		 */
    549 		uint32_t shared_count = (list == NULL? 0 : list->shared_count);
    550 
    551 		/* There had better be room in the preallocated list.  */
    552 		KASSERT(shared_count < prealloc->shared_max);
    553 
    554 		/*
    555 		 * Copy the fences over, but replace if we find one
    556 		 * with the same context number.
    557 		 */
    558 		for (i = 0; i < shared_count; i++) {
    559 			if (replace == NULL &&
    560 			    list->shared[i]->context == fence->context) {
    561 				replace = list->shared[i];
    562 				prealloc->shared[i] = fence;
    563 			} else {
    564 				prealloc->shared[i] = list->shared[i];
    565 			}
    566 		}
    567 		prealloc->shared_count = shared_count;
    568 
    569 		/* If we didn't find one, add it at the end.  */
    570 		if (replace == NULL)
    571 			prealloc->shared[prealloc->shared_count++] = fence;
    572 
    573 		/*
    574 		 * Now ready to replace the list.  Begin an update.
    575 		 * Implies membar_producer for fence and prealloc.
    576 		 */
    577 		dma_resv_write_begin(robj, &ticket);
    578 
    579 		/* Replace the list.  */
    580 		atomic_store_relaxed(&robj->fence, prealloc);
    581 		robj->robj_prealloc = NULL;
    582 
    583 		/* Commit the update.  */
    584 		dma_resv_write_commit(robj, &ticket);
    585 
    586 		/*
    587 		 * If there is an old list, free it when convenient.
    588 		 * (We are not in a position at this point to sleep
    589 		 * waiting for activity on all CPUs.)
    590 		 */
    591 		if (list)
    592 			objlist_defer_free(list);
    593 	}
    594 
    595 	/* Release a fence if we replaced it.  */
    596 	if (replace)
    597 		dma_fence_put(replace);
    598 }
    599 
    600 /*
    601  * dma_resv_get_excl_rcu(robj)
    602  *
    603  *	Note: Caller need not call this from an RCU read section.
    604  */
    605 struct dma_fence *
    606 dma_resv_get_excl_rcu(const struct dma_resv *robj)
    607 {
    608 	struct dma_fence *fence;
    609 
    610 	rcu_read_lock();
    611 	fence = dma_fence_get_rcu_safe(&robj->fence_excl);
    612 	rcu_read_unlock();
    613 
    614 	return fence;
    615 }
    616 
    617 /*
    618  * dma_resv_get_fences_rcu(robj, fencep, nsharedp, sharedp)
    619  */
    620 int
    621 dma_resv_get_fences_rcu(const struct dma_resv *robj,
    622     struct dma_fence **fencep, unsigned *nsharedp, struct dma_fence ***sharedp)
    623 {
    624 	const struct dma_resv_list *list;
    625 	struct dma_fence *fence;
    626 	struct dma_fence **shared = NULL;
    627 	unsigned shared_alloc, shared_count, i;
    628 	struct dma_resv_read_ticket ticket;
    629 
    630 top:
    631 	/* Enter an RCU read section and get a read ticket.  */
    632 	rcu_read_lock();
    633 	dma_resv_read_begin(robj, &ticket);
    634 
    635 	/*
    636 	 * If there is a shared list, grab it.  The atomic_load_consume
    637 	 * here pairs with the membar_producer in dma_resv_write_begin
    638 	 * to ensure the content of robj->fence is initialized before
    639 	 * we witness the pointer.
    640 	 */
    641 	if ((list = atomic_load_consume(&robj->fence)) != NULL) {
    642 
    643 		/* Check whether we have a buffer.  */
    644 		if (shared == NULL) {
    645 			/*
    646 			 * We don't have a buffer yet.  Try to allocate
    647 			 * one without waiting.
    648 			 */
    649 			shared_alloc = list->shared_max;
    650 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    651 			    GFP_NOWAIT);
    652 			if (shared == NULL) {
    653 				/*
    654 				 * Couldn't do it immediately.  Back
    655 				 * out of RCU and allocate one with
    656 				 * waiting.
    657 				 */
    658 				rcu_read_unlock();
    659 				shared = kcalloc(shared_alloc,
    660 				    sizeof(shared[0]), GFP_KERNEL);
    661 				if (shared == NULL)
    662 					return -ENOMEM;
    663 				goto top;
    664 			}
    665 		} else if (shared_alloc < list->shared_max) {
    666 			/*
    667 			 * We have a buffer but it's too small.  We're
    668 			 * already racing in this case, so just back
    669 			 * out and wait to allocate a bigger one.
    670 			 */
    671 			shared_alloc = list->shared_max;
    672 			rcu_read_unlock();
    673 			kfree(shared);
    674 			shared = kcalloc(shared_alloc, sizeof(shared[0]),
    675 			    GFP_KERNEL);
    676 			if (shared == NULL)
    677 				return -ENOMEM;
    678 		}
    679 
    680 		/*
    681 		 * We got a buffer large enough.  Copy into the buffer
    682 		 * and record the number of elements.  Could safely use
    683 		 * memcpy here, because even if we race with a writer
    684 		 * it'll invalidate the read ticket and we'll start
    685 		 * ove, but atomic_load in a loop will pacify kcsan.
    686 		 */
    687 		shared_count = atomic_load_relaxed(&list->shared_count);
    688 		for (i = 0; i < shared_count; i++)
    689 			shared[i] = atomic_load_relaxed(&list->shared[i]);
    690 	} else {
    691 		/* No shared list: shared count is zero.  */
    692 		shared_count = 0;
    693 	}
    694 
    695 	/* If there is an exclusive fence, grab it.  */
    696 	fence = atomic_load_consume(&robj->fence_excl);
    697 
    698 	/*
    699 	 * We are done reading from robj and list.  Validate our
    700 	 * parking ticket.  If it's invalid, do not pass go and do not
    701 	 * collect $200.
    702 	 */
    703 	if (!dma_resv_read_valid(robj, &ticket))
    704 		goto restart;
    705 
    706 	/*
    707 	 * Try to get a reference to the exclusive fence, if there is
    708 	 * one.  If we can't, start over.
    709 	 */
    710 	if (fence) {
    711 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
    712 			goto restart;
    713 	}
    714 
    715 	/*
    716 	 * Try to get a reference to all of the shared fences.
    717 	 */
    718 	for (i = 0; i < shared_count; i++) {
    719 		if (dma_fence_get_rcu(atomic_load_relaxed(&shared[i])) == NULL)
    720 			goto put_restart;
    721 	}
    722 
    723 	/* Success!  */
    724 	rcu_read_unlock();
    725 	*fencep = fence;
    726 	*nsharedp = shared_count;
    727 	*sharedp = shared;
    728 	return 0;
    729 
    730 put_restart:
    731 	/* Back out.  */
    732 	while (i --> 0) {
    733 		dma_fence_put(shared[i]);
    734 		shared[i] = NULL; /* paranoia */
    735 	}
    736 	if (fence) {
    737 		dma_fence_put(fence);
    738 		fence = NULL;	/* paranoia */
    739 	}
    740 
    741 restart:
    742 	rcu_read_unlock();
    743 	goto top;
    744 }
    745 
    746 /*
    747  * dma_resv_copy_fences(dst, src)
    748  *
    749  *	Copy the exclusive fence and all the shared fences from src to
    750  *	dst.
    751  *
    752  *	Caller must have dst locked.
    753  */
    754 int
    755 dma_resv_copy_fences(struct dma_resv *dst_robj,
    756     const struct dma_resv *src_robj)
    757 {
    758 	const struct dma_resv_list *src_list;
    759 	struct dma_resv_list *dst_list = NULL;
    760 	struct dma_resv_list *old_list;
    761 	struct dma_fence *fence = NULL;
    762 	struct dma_fence *old_fence;
    763 	uint32_t shared_count, i;
    764 	struct dma_resv_read_ticket read_ticket;
    765 	struct dma_resv_write_ticket write_ticket;
    766 
    767 	KASSERT(dma_resv_held(dst_robj));
    768 
    769 top:
    770 	/* Enter an RCU read section and get a read ticket.  */
    771 	rcu_read_lock();
    772 	dma_resv_read_begin(src_robj, &read_ticket);
    773 
    774 	/* Get the shared list.  */
    775 	if ((src_list = atomic_load_consume(&src_robj->fence)) != NULL) {
    776 
    777 		/* Find out how long it is.  */
    778 		shared_count = atomic_load_relaxed(&src_list->shared_count);
    779 
    780 		/*
    781 		 * Make sure we saw a consistent snapshot of the list
    782 		 * pointer and length.
    783 		 */
    784 		if (!dma_resv_read_valid(src_robj, &read_ticket))
    785 			goto restart;
    786 
    787 		/* Allocate a new list.  */
    788 		dst_list = objlist_tryalloc(shared_count);
    789 		if (dst_list == NULL)
    790 			return -ENOMEM;
    791 
    792 		/* Copy over all fences that are not yet signalled.  */
    793 		dst_list->shared_count = 0;
    794 		for (i = 0; i < shared_count; i++) {
    795 			fence = atomic_load_relaxed(&src_list->shared[i]);
    796 			if ((fence = dma_fence_get_rcu(fence)) != NULL)
    797 				goto restart;
    798 			if (dma_fence_is_signaled(fence)) {
    799 				dma_fence_put(fence);
    800 				fence = NULL;
    801 				continue;
    802 			}
    803 			dst_list->shared[dst_list->shared_count++] = fence;
    804 			fence = NULL;
    805 		}
    806 	}
    807 
    808 	/* Get the exclusive fence.  */
    809 	if ((fence = atomic_load_consume(&src_robj->fence_excl)) != NULL) {
    810 
    811 		/*
    812 		 * Make sure we saw a consistent snapshot of the fence.
    813 		 *
    814 		 * XXX I'm not actually sure this is necessary since
    815 		 * pointer writes are supposed to be atomic.
    816 		 */
    817 		if (!dma_resv_read_valid(src_robj, &read_ticket)) {
    818 			fence = NULL;
    819 			goto restart;
    820 		}
    821 
    822 		/*
    823 		 * If it is going away, restart.  Otherwise, acquire a
    824 		 * reference to it.
    825 		 */
    826 		if (!dma_fence_get_rcu(fence)) {
    827 			fence = NULL;
    828 			goto restart;
    829 		}
    830 	}
    831 
    832 	/* All done with src; exit the RCU read section.  */
    833 	rcu_read_unlock();
    834 
    835 	/*
    836 	 * We now have a snapshot of the shared and exclusive fences of
    837 	 * src_robj and we have acquired references to them so they
    838 	 * won't go away.  Transfer them over to dst_robj, releasing
    839 	 * references to any that were there.
    840 	 */
    841 
    842 	/* Get the old shared and exclusive fences, if any.  */
    843 	old_list = dst_robj->fence;
    844 	old_fence = dst_robj->fence_excl;
    845 
    846 	/*
    847 	 * Begin an update.  Implies membar_producer for dst_list and
    848 	 * fence.
    849 	 */
    850 	dma_resv_write_begin(dst_robj, &write_ticket);
    851 
    852 	/* Replace the fences.  */
    853 	atomic_store_relaxed(&dst_robj->fence, dst_list);
    854 	atomic_store_relaxed(&dst_robj->fence_excl, fence);
    855 
    856 	/* Commit the update.  */
    857 	dma_resv_write_commit(dst_robj, &write_ticket);
    858 
    859 	/* Release the old exclusive fence, if any.  */
    860 	if (old_fence)
    861 		dma_fence_put(old_fence);
    862 
    863 	/* Release any old shared fences.  */
    864 	if (old_list) {
    865 		for (i = old_list->shared_count; i --> 0;)
    866 			dma_fence_put(old_list->shared[i]);
    867 	}
    868 
    869 	/* Success!  */
    870 	return 0;
    871 
    872 restart:
    873 	rcu_read_unlock();
    874 	if (dst_list) {
    875 		for (i = dst_list->shared_count; i --> 0;) {
    876 			dma_fence_put(dst_list->shared[i]);
    877 			dst_list->shared[i] = NULL;
    878 		}
    879 		objlist_free(dst_list);
    880 		dst_list = NULL;
    881 	}
    882 	if (fence) {
    883 		dma_fence_put(fence);
    884 		fence = NULL;
    885 	}
    886 	goto top;
    887 }
    888 
    889 /*
    890  * dma_resv_test_signaled_rcu(robj, shared)
    891  *
    892  *	If shared is true, test whether all of the shared fences are
    893  *	signalled, or if there are none, test whether the exclusive
    894  *	fence is signalled.  If shared is false, test only whether the
    895  *	exclusive fence is signalled.
    896  *
    897  *	XXX Why does this _not_ test the exclusive fence if shared is
    898  *	true only if there are no shared fences?  This makes no sense.
    899  */
    900 bool
    901 dma_resv_test_signaled_rcu(const struct dma_resv *robj,
    902     bool shared)
    903 {
    904 	struct dma_resv_read_ticket ticket;
    905 	struct dma_resv_list *list;
    906 	struct dma_fence *fence;
    907 	uint32_t i, shared_count;
    908 	bool signaled = true;
    909 
    910 top:
    911 	/* Enter an RCU read section and get a read ticket.  */
    912 	rcu_read_lock();
    913 	dma_resv_read_begin(robj, &ticket);
    914 
    915 	/* If shared is requested and there is a shared list, test it.  */
    916 	if (!shared)
    917 		goto excl;
    918 	if ((list = atomic_load_consume(&robj->fence)) != NULL) {
    919 
    920 		/* Find out how long it is.  */
    921 		shared_count = atomic_load_relaxed(&list->shared_count);
    922 
    923 		/*
    924 		 * Make sure we saw a consistent snapshot of the list
    925 		 * pointer and length.
    926 		 */
    927 		if (!dma_resv_read_valid(robj, &ticket))
    928 			goto restart;
    929 
    930 		/*
    931 		 * For each fence, if it is going away, restart.
    932 		 * Otherwise, acquire a reference to it to test whether
    933 		 * it is signalled.  Stop if we find any that is not
    934 		 * signalled.
    935 		 */
    936 		for (i = 0; i < shared_count; i++) {
    937 			fence = atomic_load_relaxed(&list->shared[i]);
    938 			fence = dma_fence_get_rcu(fence);
    939 			if (fence == NULL)
    940 				goto restart;
    941 			signaled &= dma_fence_is_signaled(fence);
    942 			dma_fence_put(fence);
    943 			if (!signaled)
    944 				goto out;
    945 		}
    946 	}
    947 
    948 excl:
    949 	/* If there is an exclusive fence, test it.  */
    950 	if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) {
    951 
    952 		/*
    953 		 * Make sure we saw a consistent snapshot of the fence.
    954 		 *
    955 		 * XXX I'm not actually sure this is necessary since
    956 		 * pointer writes are supposed to be atomic.
    957 		 */
    958 		if (!dma_resv_read_valid(robj, &ticket))
    959 			goto restart;
    960 
    961 		/*
    962 		 * If it is going away, restart.  Otherwise, acquire a
    963 		 * reference to it to test whether it is signalled.
    964 		 */
    965 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
    966 			goto restart;
    967 		signaled &= dma_fence_is_signaled(fence);
    968 		dma_fence_put(fence);
    969 		if (!signaled)
    970 			goto out;
    971 	}
    972 
    973 out:	rcu_read_unlock();
    974 	return signaled;
    975 
    976 restart:
    977 	rcu_read_unlock();
    978 	goto top;
    979 }
    980 
    981 /*
    982  * dma_resv_wait_timeout_rcu(robj, shared, intr, timeout)
    983  *
    984  *	If shared is true, wait for all of the shared fences to be
    985  *	signalled, or if there are none, wait for the exclusive fence
    986  *	to be signalled.  If shared is false, wait only for the
    987  *	exclusive fence to be signalled.  If timeout is zero, don't
    988  *	wait, only test.
    989  *
    990  *	XXX Why does this _not_ wait for the exclusive fence if shared
    991  *	is true only if there are no shared fences?  This makes no
    992  *	sense.
    993  */
    994 long
    995 dma_resv_wait_timeout_rcu(const struct dma_resv *robj,
    996     bool shared, bool intr, unsigned long timeout)
    997 {
    998 	struct dma_resv_read_ticket ticket;
    999 	struct dma_resv_list *list;
   1000 	struct dma_fence *fence;
   1001 	uint32_t i, shared_count;
   1002 	long ret;
   1003 
   1004 	if (timeout == 0)
   1005 		return dma_resv_test_signaled_rcu(robj, shared);
   1006 
   1007 top:
   1008 	/* Enter an RCU read section and get a read ticket.  */
   1009 	rcu_read_lock();
   1010 	dma_resv_read_begin(robj, &ticket);
   1011 
   1012 	/* If shared is requested and there is a shared list, wait on it.  */
   1013 	if (!shared)
   1014 		goto excl;
   1015 	if ((list = atomic_load_consume(&robj->fence)) != NULL) {
   1016 
   1017 		/* Find out how long it is.  */
   1018 		shared_count = list->shared_count;
   1019 
   1020 		/*
   1021 		 * Make sure we saw a consistent snapshot of the list
   1022 		 * pointer and length.
   1023 		 */
   1024 		if (!dma_resv_read_valid(robj, &ticket))
   1025 			goto restart;
   1026 
   1027 		/*
   1028 		 * For each fence, if it is going away, restart.
   1029 		 * Otherwise, acquire a reference to it to test whether
   1030 		 * it is signalled.  Stop and wait if we find any that
   1031 		 * is not signalled.
   1032 		 */
   1033 		for (i = 0; i < shared_count; i++) {
   1034 			fence = atomic_load_relaxed(&list->shared[i]);
   1035 			fence = dma_fence_get_rcu(fence);
   1036 			if (fence == NULL)
   1037 				goto restart;
   1038 			if (!dma_fence_is_signaled(fence))
   1039 				goto wait;
   1040 			dma_fence_put(fence);
   1041 		}
   1042 	}
   1043 
   1044 excl:
   1045 	/* If there is an exclusive fence, test it.  */
   1046 	if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) {
   1047 
   1048 		/*
   1049 		 * Make sure we saw a consistent snapshot of the fence.
   1050 		 *
   1051 		 * XXX I'm not actually sure this is necessary since
   1052 		 * pointer writes are supposed to be atomic.
   1053 		 */
   1054 		if (!dma_resv_read_valid(robj, &ticket))
   1055 			goto restart;
   1056 
   1057 		/*
   1058 		 * If it is going away, restart.  Otherwise, acquire a
   1059 		 * reference to it to test whether it is signalled.  If
   1060 		 * not, wait for it.
   1061 		 */
   1062 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1063 			goto restart;
   1064 		if (!dma_fence_is_signaled(fence))
   1065 			goto wait;
   1066 		dma_fence_put(fence);
   1067 	}
   1068 
   1069 	/* Success!  Return the number of ticks left.  */
   1070 	rcu_read_unlock();
   1071 	return timeout;
   1072 
   1073 restart:
   1074 	rcu_read_unlock();
   1075 	goto top;
   1076 
   1077 wait:
   1078 	/*
   1079 	 * Exit the RCU read section, wait for it, and release the
   1080 	 * fence when we're done.  If we time out or fail, bail.
   1081 	 * Otherwise, go back to the top.
   1082 	 */
   1083 	KASSERT(fence != NULL);
   1084 	rcu_read_unlock();
   1085 	ret = dma_fence_wait_timeout(fence, intr, timeout);
   1086 	dma_fence_put(fence);
   1087 	if (ret <= 0)
   1088 		return ret;
   1089 	KASSERT(ret <= timeout);
   1090 	timeout = ret;
   1091 	goto top;
   1092 }
   1093 
   1094 /*
   1095  * dma_resv_poll_init(rpoll, lock)
   1096  *
   1097  *	Initialize reservation poll state.
   1098  */
   1099 void
   1100 dma_resv_poll_init(struct dma_resv_poll *rpoll)
   1101 {
   1102 
   1103 	mutex_init(&rpoll->rp_lock, MUTEX_DEFAULT, IPL_VM);
   1104 	selinit(&rpoll->rp_selq);
   1105 	rpoll->rp_claimed = 0;
   1106 }
   1107 
   1108 /*
   1109  * dma_resv_poll_fini(rpoll)
   1110  *
   1111  *	Release any resource associated with reservation poll state.
   1112  */
   1113 void
   1114 dma_resv_poll_fini(struct dma_resv_poll *rpoll)
   1115 {
   1116 
   1117 	KASSERT(rpoll->rp_claimed == 0);
   1118 	seldestroy(&rpoll->rp_selq);
   1119 	mutex_destroy(&rpoll->rp_lock);
   1120 }
   1121 
   1122 /*
   1123  * dma_resv_poll_cb(fence, fcb)
   1124  *
   1125  *	Callback to notify a reservation poll that a fence has
   1126  *	completed.  Notify any waiters and allow the next poller to
   1127  *	claim the callback.
   1128  *
   1129  *	If one thread is waiting for the exclusive fence only, and we
   1130  *	spuriously notify them about a shared fence, tough.
   1131  */
   1132 static void
   1133 dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
   1134 {
   1135 	struct dma_resv_poll *rpoll = container_of(fcb,
   1136 	    struct dma_resv_poll, rp_fcb);
   1137 
   1138 	mutex_enter(&rpoll->rp_lock);
   1139 	selnotify(&rpoll->rp_selq, 0, NOTE_SUBMIT);
   1140 	rpoll->rp_claimed = 0;
   1141 	mutex_exit(&rpoll->rp_lock);
   1142 }
   1143 
   1144 /*
   1145  * dma_resv_do_poll(robj, events, rpoll)
   1146  *
   1147  *	Poll for reservation object events using the reservation poll
   1148  *	state in rpoll:
   1149  *
   1150  *	- POLLOUT	wait for all fences shared and exclusive
   1151  *	- POLLIN	wait for the exclusive fence
   1152  *
   1153  *	Return the subset of events in events that are ready.  If any
   1154  *	are requested but not ready, arrange to be notified with
   1155  *	selnotify when they are.
   1156  */
   1157 int
   1158 dma_resv_do_poll(const struct dma_resv *robj, int events,
   1159     struct dma_resv_poll *rpoll)
   1160 {
   1161 	struct dma_resv_read_ticket ticket;
   1162 	struct dma_resv_list *list;
   1163 	struct dma_fence *fence;
   1164 	uint32_t i, shared_count;
   1165 	int revents;
   1166 	bool recorded = false;	/* curlwp is on the selq */
   1167 	bool claimed = false;	/* we claimed the callback */
   1168 	bool callback = false;	/* we requested a callback */
   1169 
   1170 	/*
   1171 	 * Start with the maximal set of events that could be ready.
   1172 	 * We will eliminate the events that are definitely not ready
   1173 	 * as we go at the same time as we add callbacks to notify us
   1174 	 * that they may be ready.
   1175 	 */
   1176 	revents = events & (POLLIN|POLLOUT);
   1177 	if (revents == 0)
   1178 		return 0;
   1179 
   1180 top:
   1181 	/* Enter an RCU read section and get a read ticket.  */
   1182 	rcu_read_lock();
   1183 	dma_resv_read_begin(robj, &ticket);
   1184 
   1185 	/* If we want to wait for all fences, get the shared list.  */
   1186 	if (!(events & POLLOUT))
   1187 		goto excl;
   1188 	if ((list = atomic_load_consume(&robj->fence)) != NULL) do {
   1189 
   1190 		/* Find out how long it is.  */
   1191 		shared_count = list->shared_count;
   1192 
   1193 		/*
   1194 		 * Make sure we saw a consistent snapshot of the list
   1195 		 * pointer and length.
   1196 		 */
   1197 		if (!dma_resv_read_valid(robj, &ticket))
   1198 			goto restart;
   1199 
   1200 		/*
   1201 		 * For each fence, if it is going away, restart.
   1202 		 * Otherwise, acquire a reference to it to test whether
   1203 		 * it is signalled.  Stop and request a callback if we
   1204 		 * find any that is not signalled.
   1205 		 */
   1206 		for (i = 0; i < shared_count; i++) {
   1207 			fence = atomic_load_relaxed(&list->shared[i]);
   1208 			fence = dma_fence_get_rcu(fence);
   1209 			if (fence == NULL)
   1210 				goto restart;
   1211 			if (!dma_fence_is_signaled(fence)) {
   1212 				dma_fence_put(fence);
   1213 				break;
   1214 			}
   1215 			dma_fence_put(fence);
   1216 		}
   1217 
   1218 		/* If all shared fences have been signalled, move on.  */
   1219 		if (i == shared_count)
   1220 			break;
   1221 
   1222 		/* Put ourselves on the selq if we haven't already.  */
   1223 		if (!recorded)
   1224 			goto record;
   1225 
   1226 		/*
   1227 		 * If someone else claimed the callback, or we already
   1228 		 * requested it, we're guaranteed to be notified, so
   1229 		 * assume the event is not ready.
   1230 		 */
   1231 		if (!claimed || callback) {
   1232 			revents &= ~POLLOUT;
   1233 			break;
   1234 		}
   1235 
   1236 		/*
   1237 		 * Otherwise, find the first fence that is not
   1238 		 * signalled, request the callback, and clear POLLOUT
   1239 		 * from the possible ready events.  If they are all
   1240 		 * signalled, leave POLLOUT set; we will simulate the
   1241 		 * callback later.
   1242 		 */
   1243 		for (i = 0; i < shared_count; i++) {
   1244 			fence = atomic_load_relaxed(&list->shared[i]);
   1245 			fence = dma_fence_get_rcu(fence);
   1246 			if (fence == NULL)
   1247 				goto restart;
   1248 			if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1249 				dma_resv_poll_cb)) {
   1250 				dma_fence_put(fence);
   1251 				revents &= ~POLLOUT;
   1252 				callback = true;
   1253 				break;
   1254 			}
   1255 			dma_fence_put(fence);
   1256 		}
   1257 	} while (0);
   1258 
   1259 excl:
   1260 	/* We always wait for at least the exclusive fence, so get it.  */
   1261 	if ((fence = atomic_load_consume(&robj->fence_excl)) != NULL) do {
   1262 
   1263 		/*
   1264 		 * Make sure we saw a consistent snapshot of the fence.
   1265 		 *
   1266 		 * XXX I'm not actually sure this is necessary since
   1267 		 * pointer writes are supposed to be atomic.
   1268 		 */
   1269 		if (!dma_resv_read_valid(robj, &ticket))
   1270 			goto restart;
   1271 
   1272 		/*
   1273 		 * If it is going away, restart.  Otherwise, acquire a
   1274 		 * reference to it to test whether it is signalled.  If
   1275 		 * not, stop and request a callback.
   1276 		 */
   1277 		if ((fence = dma_fence_get_rcu(fence)) == NULL)
   1278 			goto restart;
   1279 		if (dma_fence_is_signaled(fence)) {
   1280 			dma_fence_put(fence);
   1281 			break;
   1282 		}
   1283 
   1284 		/* Put ourselves on the selq if we haven't already.  */
   1285 		if (!recorded) {
   1286 			dma_fence_put(fence);
   1287 			goto record;
   1288 		}
   1289 
   1290 		/*
   1291 		 * If someone else claimed the callback, or we already
   1292 		 * requested it, we're guaranteed to be notified, so
   1293 		 * assume the event is not ready.
   1294 		 */
   1295 		if (!claimed || callback) {
   1296 			dma_fence_put(fence);
   1297 			revents = 0;
   1298 			break;
   1299 		}
   1300 
   1301 		/*
   1302 		 * Otherwise, try to request the callback, and clear
   1303 		 * all possible ready events.  If the fence has been
   1304 		 * signalled in the interim, leave the events set; we
   1305 		 * will simulate the callback later.
   1306 		 */
   1307 		if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
   1308 			dma_resv_poll_cb)) {
   1309 			dma_fence_put(fence);
   1310 			revents = 0;
   1311 			callback = true;
   1312 			break;
   1313 		}
   1314 		dma_fence_put(fence);
   1315 	} while (0);
   1316 
   1317 	/* All done reading the fences.  */
   1318 	rcu_read_unlock();
   1319 
   1320 	if (claimed && !callback) {
   1321 		/*
   1322 		 * We claimed the callback but we didn't actually
   1323 		 * request it because a fence was signalled while we
   1324 		 * were claiming it.  Call it ourselves now.  The
   1325 		 * callback doesn't use the fence nor rely on holding
   1326 		 * any of the fence locks, so this is safe.
   1327 		 */
   1328 		dma_resv_poll_cb(NULL, &rpoll->rp_fcb);
   1329 	}
   1330 	return revents;
   1331 
   1332 restart:
   1333 	rcu_read_unlock();
   1334 	goto top;
   1335 
   1336 record:
   1337 	rcu_read_unlock();
   1338 	mutex_enter(&rpoll->rp_lock);
   1339 	selrecord(curlwp, &rpoll->rp_selq);
   1340 	if (!rpoll->rp_claimed)
   1341 		claimed = rpoll->rp_claimed = true;
   1342 	mutex_exit(&rpoll->rp_lock);
   1343 	recorded = true;
   1344 	goto top;
   1345 }
   1346 
   1347 /*
   1348  * dma_resv_kqfilter(robj, kn, rpoll)
   1349  *
   1350  *	Kqueue filter for reservation objects.  Currently not
   1351  *	implemented because the logic to implement it is nontrivial,
   1352  *	and userland will presumably never use it, so it would be
   1353  *	dangerous to add never-tested complex code paths to the kernel.
   1354  */
   1355 int
   1356 dma_resv_kqfilter(const struct dma_resv *robj,
   1357     struct knote *kn, struct dma_resv_poll *rpoll)
   1358 {
   1359 
   1360 	return EINVAL;
   1361 }
   1362