Lines Matching refs:fence
39 #include <linux/dma-fence.h>
107 robj->fence = NULL;
126 if (robj->fence) {
127 for (i = 0; i < robj->fence->shared_count; i++) {
128 dma_fence_put(robj->fence->shared[i]);
129 robj->fence->shared[i] = NULL; /* paranoia */
131 objlist_free(robj->fence);
132 robj->fence = NULL; /* paranoia */
284 * Return a pointer to the exclusive fence of the reservation
300 * Return a pointer to the shared fence list of the reservation
310 return robj->fence;
332 list = robj->fence;
462 * and length of the shared fence list of robj and return true, or
468 * shared list at the moment. Does not take any fence references.
483 list = atomic_load_consume(&robj->fence);
507 * Set *fencep to the exclusive fence of robj and return true, or
510 * (b) the fence is scheduled to be destroyed after this RCU grace
515 * It may be NULL, if there is no exclusive fence at the moment.
523 struct dma_fence *fence;
526 * Get the candidate fence pointer. The atomic_load_consume
529 fence = atomic_load_consume(&robj->fence_excl);
533 * have previously loaded the shared fence list and should
541 * If the fence is already scheduled to away after this RCU
545 if (fence != NULL &&
546 (fence = dma_fence_get_rcu(fence)) == NULL)
550 *fencep = fence;
558 * dma_resv_add_excl_fence(robj, fence)
561 * release its exclusive fence. If fence is nonnull, acquire a
562 * reference to it and save it as robj's exclusive fence.
568 struct dma_fence *fence)
571 struct dma_resv_list *old_list = robj->fence;
578 * If we are setting rather than just removing a fence, acquire
581 if (fence)
582 (void)dma_fence_get(fence);
588 /* Begin an update. Implies membar_producer for fence. */
591 /* Replace the fence and zero the shared count. */
592 atomic_store_relaxed(&robj->fence_excl, fence);
599 /* Release the old exclusive fence, if any. */
616 * dma_resv_add_shared_fence(robj, fence)
618 * Acquire a reference to fence and add it to robj's shared list.
619 * If any fence was already added with the same context number,
623 * call to dma_resv_reserve_shared for each shared fence
628 struct dma_fence *fence)
630 struct dma_resv_list *list = robj->fence;
638 /* Acquire a reference to the fence. */
639 KASSERT(fence != NULL);
640 (void)dma_fence_get(fence);
651 /* Begin an update. Implies membar_producer for fence. */
654 /* Find a fence with the same context number. */
656 fence->context) {
658 atomic_store_relaxed(&list->shared[i], fence);
666 fence);
690 list->shared[i]->context == fence->context) {
692 prealloc->shared[i] = fence;
702 prealloc->shared[prealloc->shared_count++] = fence;
707 * Implies membar_producer for fence and prealloc.
712 atomic_store_relaxed(&robj->fence, prealloc);
727 /* Release a fence if we replaced it. */
742 struct dma_fence *fence;
745 fence = dma_fence_get_rcu_safe(&robj->fence_excl);
748 return fence;
757 * If fencep is null, then add the exclusive fence, if any, at the
769 struct dma_fence *fence = NULL;
774 top: KASSERT(fence == NULL);
788 * specified fencep or if there is no exclusive fence,
849 /* If there is an exclusive fence, grab it. */
850 KASSERT(fence == NULL);
851 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
868 *fencep = fence;
869 } else if (fence) {
871 shared[shared_count++] = fence;
874 shared[0] = fence;
888 if (fence) {
889 dma_fence_put(fence);
890 fence = NULL;
894 KASSERT(fence == NULL);
902 * Copy the exclusive fence and all the shared fences from src to
914 struct dma_fence *fence = NULL;
922 top: KASSERT(fence == NULL);
950 KASSERT(fence == NULL);
951 fence = atomic_load_relaxed(&src_list->shared[i]);
952 if ((fence = dma_fence_get_rcu(fence)) == NULL)
954 if (dma_fence_is_signaled(fence)) {
955 dma_fence_put(fence);
956 fence = NULL;
959 dst_list->shared[dst_list->shared_count++] = fence;
960 fence = NULL;
968 /* Get the exclusive fence. */
969 KASSERT(fence == NULL);
970 if (!dma_resv_get_excl_reader(src_robj, &fence, &read_ticket))
984 old_list = dst_robj->fence;
989 * fence.
994 atomic_store_relaxed(&dst_robj->fence, dst_list);
995 atomic_store_relaxed(&dst_robj->fence_excl, fence);
1000 /* Release the old exclusive fence, if any. */
1020 KASSERT(fence == NULL);
1037 * fence is signalled. If shared is false, test only whether the
1038 * exclusive fence is signalled.
1040 * XXX Why does this _not_ test the exclusive fence if shared is
1049 struct dma_fence *fence = NULL;
1053 top: KASSERT(fence == NULL);
1070 * For each fence, if it is going away, restart.
1076 KASSERT(fence == NULL);
1077 fence = atomic_load_relaxed(&list->shared[i]);
1078 if ((fence = dma_fence_get_rcu(fence)) == NULL)
1080 signaled &= dma_fence_is_signaled(fence);
1081 dma_fence_put(fence);
1082 fence = NULL;
1094 /* If there is an exclusive fence, test it. */
1095 KASSERT(fence == NULL);
1096 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1098 if (fence != NULL) {
1100 signaled &= dma_fence_is_signaled(fence);
1101 dma_fence_put(fence);
1102 fence = NULL;
1107 out: KASSERT(fence == NULL);
1112 KASSERT(fence == NULL);
1121 * signalled, or if there are none, wait for the exclusive fence
1123 * exclusive fence to be signalled. If timeout is zero, don't
1126 * XXX Why does this _not_ wait for the exclusive fence if shared
1136 struct dma_fence *fence = NULL;
1143 top: KASSERT(fence == NULL);
1160 * For each fence, if it is going away, restart.
1166 KASSERT(fence == NULL);
1167 fence = atomic_load_relaxed(&list->shared[i]);
1168 if ((fence = dma_fence_get_rcu(fence)) == NULL)
1170 if (!dma_fence_is_signaled(fence))
1172 dma_fence_put(fence);
1173 fence = NULL;
1183 /* If there is an exclusive fence, test it. */
1184 KASSERT(fence == NULL);
1185 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1187 if (fence != NULL) {
1189 if (!dma_fence_is_signaled(fence))
1191 dma_fence_put(fence);
1192 fence = NULL;
1197 KASSERT(fence == NULL);
1201 KASSERT(fence == NULL);
1208 * fence when we're done. If we time out or fail, bail.
1211 KASSERT(fence != NULL);
1213 ret = dma_fence_wait_timeout(fence, intr, timeout);
1214 dma_fence_put(fence);
1215 fence = NULL;
1252 * dma_resv_poll_cb(fence, fcb)
1254 * Callback to notify a reservation poll that a fence has
1258 * If one thread is waiting for the exclusive fence only, and we
1259 * spuriously notify them about a shared fence, tough.
1262 dma_resv_poll_cb(struct dma_fence *fence, struct dma_fence_cb *fcb)
1280 * - POLLIN wait for the exclusive fence
1292 fence = NULL;
1309 top: KASSERT(fence == NULL);
1326 * For each fence, if it is going away, restart.
1332 KASSERT(fence == NULL);
1333 fence = atomic_load_relaxed(&list->shared[i]);
1334 if ((fence = dma_fence_get_rcu(fence)) == NULL)
1336 if (!dma_fence_is_signaled(fence)) {
1337 dma_fence_put(fence);
1338 fence = NULL;
1341 dma_fence_put(fence);
1342 fence = NULL;
1364 * Otherwise, find the first fence that is not
1371 KASSERT(fence == NULL);
1372 fence = atomic_load_relaxed(&list->shared[i]);
1373 if ((fence = dma_fence_get_rcu(fence)) == NULL)
1375 if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
1377 dma_fence_put(fence);
1378 fence = NULL;
1383 dma_fence_put(fence);
1384 fence = NULL;
1388 /* We always wait for at least the exclusive fence, so get it. */
1389 KASSERT(fence == NULL);
1390 if (!dma_resv_get_excl_reader(robj, &fence, &ticket))
1392 if (fence != NULL) do {
1397 if (dma_fence_is_signaled(fence))
1402 dma_fence_put(fence);
1403 fence = NULL;
1419 * all possible ready events. If the fence has been
1423 if (!dma_fence_add_callback(fence, &rpoll->rp_fcb,
1430 if (fence != NULL) {
1431 dma_fence_put(fence);
1432 fence = NULL;
1441 * request it because a fence was signalled while we
1443 * callback doesn't use the fence nor rely on holding
1444 * any of the fence locks, so this is safe.
1451 KASSERT(fence == NULL);
1456 KASSERT(fence == NULL);