/src/sys/external/bsd/drm2/linux/ |
linux_dma_fence_array.c | 102 if (dma_fence_add_callback(A->fences[i], &C->dfac_cb, 104 error = A->fences[i]->error; 139 dma_fence_put(A->fences[i]); 141 kfree(A->fences); 155 dma_fence_array_create(int num_fences, struct dma_fence **fences, 168 A->fences = fences;
|
linux_dma_fence.c | 199 * initialized or freed fences, but not fences with more than one 341 * The two fences must have the context. Whether sequence numbers 352 KASSERTMSG(a->context == b->context, "incommensurate fences" 822 * Wait for any of fences[0], fences[1], fences[2], ..., 823 * fences[nfences-1] to be signalled. If ip is nonnull, set *ip 830 dma_fence_wait_any_timeout(struct dma_fence **fences, uint32_t nfences, 844 KASSERT(dma_fence_referenced_p(fences[i])) [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_ids.c | 111 * amdgpu_pasid_free_delayed - free pasid when fences signal 113 * @resv: reservation object with the fences to wait for 116 * Free the pasid only after all the fences in resv are signaled. 121 struct dma_fence *fence, **fences; local in function:amdgpu_pasid_free_delayed 126 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); 136 fence = fences[0]; 137 kfree(fences); 142 array = dma_fence_array_create(count, fences, context, 145 kfree(fences); 168 * block for all the fences to complete 215 struct dma_fence **fences; local in function:amdgpu_vmid_grab_idle [all...] |
amdgpu_sync.c | 57 hash_init(sync->fences); 142 hash_for_each_possible(sync->fences, e, node, f->context) { 182 hash_add(sync->fences, &e->node, f->context); 208 * @sync: sync object to add fences from reservation object to 239 /* We only want to trigger KFD eviction fences on 240 * evict or move jobs. Skip KFD fences otherwise. 249 * command submissions or KFD evictions fences 286 hash_for_each_safe(sync->fences, i, tmp, e, node) { 297 /* For fences from the same ring it is sufficient 328 hash_for_each_safe(sync->fences, i, tmp, e, node) [all...] |
amdgpu_sa.c | 215 struct dma_fence **fences, 237 fences[i] = NULL; 246 fences[i] = sa_bo->fence; 287 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; local in function:amdgpu_sa_bo_new 322 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 325 if (fences[i]) 326 fences[count++] = dma_fence_get(fences[i]); 330 t = dma_fence_wait_any_timeout(fences, count, false, 334 dma_fence_put(fences[i]) [all...] |
amdgpu_sync.h | 37 * Container for fences used to sync command submissions. 40 DECLARE_HASHTABLE(fences, 4);
|
amdgpu_dma_buf.c | 147 struct dma_fence **fences; local in function:__dma_resv_make_exclusive 151 if (!dma_resv_get_list(obj)) /* no shared fences to convert */ 154 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); 161 dma_resv_add_excl_fence(obj, fences[0]); 162 dma_fence_put(fences[0]); 163 kfree(fences); 167 array = dma_fence_array_create(count, fences, 181 dma_fence_put(fences[count]); 182 kfree(fences); 214 * We only create shared fences for internal use, but importer [all...] |
amdgpu_jpeg.c | 81 unsigned int fences = 0; local in function:amdgpu_jpeg_idle_work_handler 88 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); 91 if (fences == 0)
|
amdgpu_ctx.h | 39 struct dma_fence *fences[]; member in struct:amdgpu_ctx_entity
|
amdgpu_fence.c | 52 * Fences 53 * Fences mark an event in the GPUs pipeline and are used 166 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 270 ptr = &drv->fences[last_seq]; 310 * amdgpu_fence_wait_empty - wait for all fences to signal 315 * Wait for all fences on the requested ring to signal (all asics). 316 * Returns 0 if the fences have passed, error for all other cases. 327 ptr = &ring->fence_drv.fences[seq & ring->fence_drv.num_fences_mask]; 348 * Wait for all fences on the requested ring to signal (all asics). 366 * amdgpu_fence_count_emitted - get the count of emitted fences [all...] |
amdgpu_debugfs.c | 1057 struct dma_fence **fences) 1073 ptr = &drv->fences[last_seq]; 1081 fences[last_seq] = fence; 1086 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences, 1093 fence = fences[i]; 1131 ptr = &drv->fences[preempt_seq]; 1148 struct dma_fence **fences = NULL; local in function:amdgpu_debugfs_ib_preempt 1164 fences = kcalloc(length, sizeof(void *), GFP_KERNEL); 1165 if (!fences) 1191 /* swap out the old fences */ [all...] |
amdgpu_ctx.c | 79 entity = kcalloc(1, offsetof(typeof(*entity), fences[amdgpu_sched_jobs]), 182 dma_fence_put(entity->fences[i]); 473 other = centity->fences[idx]; 478 centity->fences[idx] = fence; 510 fence = dma_fence_get(centity->fences[seq & (amdgpu_sched_jobs - 1)]); 549 other = dma_fence_get(centity->fences[idx]);
|
amdgpu_cs.c | 947 /* MM engine doesn't support user fences */ 1515 * amdgpu_cs_wait_all_fence - wait on all fences to signal 1520 * @fences: array of drm_amdgpu_fence 1525 struct drm_amdgpu_fence *fences) 1535 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1565 * @fences: array of drm_amdgpu_fence 1570 struct drm_amdgpu_fence *fences) 1588 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]); 1638 struct drm_amdgpu_fence *fences; local in function:amdgpu_cs_wait_fences_ioctl 1641 /* Get the fences from userspace * [all...] |
amdgpu_vcn.c | 289 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; local in function:amdgpu_vcn_idle_work_handler 312 fences += fence[j]; 315 if (fences == 0) { 337 unsigned int fences = 0; local in function:amdgpu_vcn_ring_begin_use 341 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]); 343 if (fences)
|
amdgpu_vcn_v1_0.c | 1780 unsigned int fences = 0, i; local in function:vcn_v1_0_idle_work_handler 1783 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1788 if (fences) 1801 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec); 1802 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec); 1804 if (fences == 0) { 1832 unsigned int fences = 0, i; local in function:vcn_v1_0_ring_begin_use 1835 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]); 1837 if (fences)
|
amdgpu_trace.h | 151 __field(u32, fences) 158 __entry->fences = amdgpu_fence_count_emitted( 161 TP_printk("bo_list=%p, ring=%u, dw=%u, fences=%u", 163 __entry->fences)
|
amdgpu_uvd.c | 1201 unsigned fences = 0, i, j; local in function:amdgpu_uvd_idle_work_handler 1206 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring); 1208 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]); 1212 if (fences == 0) {
|
/src/sys/external/bsd/drm2/include/linux/ |
dma-fence-array.h | 51 struct dma_fence **fences; member in struct:dma_fence_array
|
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_sa.c | 262 struct radeon_fence **fences, 292 fences[i] = sa_bo->fence; 331 struct radeon_fence *fences[RADEON_NUM_RINGS]; local in function:radeon_sa_bo_new 354 fences[i] = NULL; 372 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 375 radeon_fence_ref(fences[i]); 379 r = radeon_fence_wait_any(rdev, fences, false); 381 radeon_fence_unref(&fences[i]); 390 r = radeon_fence_wait_any(rdev, fences, false); 392 radeon_fence_unref(&fences[i]) [all...] |
radeon_trace.h | 38 __field(u32, fences) 44 __entry->fences = radeon_fence_count_emitted( 47 TP_printk("ring=%u, dw=%u, fences=%u", 49 __entry->fences)
|
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
i915_sw_fence.c | 233 /* Test a chain of fences, A waits on B who waits on C */ 313 /* Test multiple fences (AB) waiting on a single event (C) */ 458 struct i915_sw_fence **fences; local in function:test_chain 461 /* Test a long chain of fences */ 462 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL); 463 if (!fences) 467 fences[i] = alloc_fence(); 468 if (!fences[i]) { 475 ret = i915_sw_fence_await_sw_fence_gfp(fences[i] [all...] |
/src/sys/external/bsd/drm2/dist/drm/virtio/ |
virtgpu_fence.c | 110 list_add_tail(&fence->node, &drv->fences); 128 list_for_each_entry_safe(fence, tmp, &drv->fences, node) {
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_execbuffer.c | 2471 __free_fence_array(struct drm_syncobj **fences, unsigned int n) 2474 drm_syncobj_put(ptr_mask_bits(fences[n], 2)); 2475 kvfree(fences); 2484 struct drm_syncobj **fences; local in function:get_fence_array 2495 SIZE_MAX / sizeof(*fences))) 2502 fences = kvmalloc_array(nfences, sizeof(*fences), 2504 if (!fences) 2531 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2); 2534 return fences; 3003 struct drm_syncobj **fences = NULL; local in function:i915_gem_execbuffer2_ioctl [all...] |
i915_gem_wait.c | 70 * If both shared fences and an exclusive fence exist, 71 * then by construction the shared fences must be later 73 * all the shared fences, we know that the exclusive fence 74 * must all be signaled. If all the shared fences are 76 * floating references on the fences/requests. 89 * Opportunistically prune the fences iff we know they have *all* been 141 __fence_set_priority(array->fences[i], attr);
|
/src/sys/external/bsd/drm2/dist/drm/ |
drm_gem.c | 862 * shared and/or exclusive fences. 865 * @wait_all: if true, wait on all fences, else wait on just exclusive fence 1407 * for your shared fences (if applicable), submit your job, then 1481 * drm_gem_fence_array_add - Adds the fence to an array of fences to be 1482 * waited on, deduplicating fences from the same context. 1533 * own fences. 1538 * shared fences in the reservation object). 1545 struct dma_fence **fences; local in function:drm_gem_fence_array_add_implicit 1556 &fence_count, &fences); 1561 ret = drm_gem_fence_array_add(fence_array, fences[i]) [all...] |