HomeSort by: relevance | last modified time | path
    Searched defs:fences (Results 1 - 18 of 18) sorted by relevancy

  /src/sys/external/bsd/drm2/include/linux/
dma-fence-array.h 51 struct dma_fence **fences; member in struct:dma_fence_array
  /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/
amdgpu_ctx.h 39 struct dma_fence *fences[]; member in struct:amdgpu_ctx_entity
amdgpu_jpeg.c 81 unsigned int fences = 0; local in function:amdgpu_jpeg_idle_work_handler
88 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec);
91 if (fences == 0)
amdgpu_sa.c 215 struct dma_fence **fences,
237 fences[i] = NULL;
246 fences[i] = sa_bo->fence;
287 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; local in function:amdgpu_sa_bo_new
322 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries));
325 if (fences[i])
326 fences[count++] = dma_fence_get(fences[i]);
330 t = dma_fence_wait_any_timeout(fences, count, false,
334 dma_fence_put(fences[i])
    [all...]
amdgpu_dma_buf.c 147 struct dma_fence **fences; local in function:__dma_resv_make_exclusive
151 if (!dma_resv_get_list(obj)) /* no shared fences to convert */
154 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences);
161 dma_resv_add_excl_fence(obj, fences[0]);
162 dma_fence_put(fences[0]);
163 kfree(fences);
167 array = dma_fence_array_create(count, fences,
181 dma_fence_put(fences[count]);
182 kfree(fences);
214 * We only create shared fences for internal use, but importer
    [all...]
amdgpu_ids.c 111 * amdgpu_pasid_free_delayed - free pasid when fences signal
113 * @resv: reservation object with the fences to wait for
116 * Free the pasid only after all the fences in resv are signaled.
121 struct dma_fence *fence, **fences; local in function:amdgpu_pasid_free_delayed
126 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences);
136 fence = fences[0];
137 kfree(fences);
142 array = dma_fence_array_create(count, fences, context,
145 kfree(fences);
168 * block for all the fences to complete
215 struct dma_fence **fences; local in function:amdgpu_vmid_grab_idle
    [all...]
amdgpu_vcn.c 289 unsigned int fences = 0, fence[AMDGPU_MAX_VCN_INSTANCES] = {0}; local in function:amdgpu_vcn_idle_work_handler
312 fences += fence[j];
315 if (fences == 0) {
337 unsigned int fences = 0; local in function:amdgpu_vcn_ring_begin_use
341 fences += amdgpu_fence_count_emitted(&adev->vcn.inst[ring->me].ring_enc[i]);
343 if (fences)
amdgpu_cs.c 947 /* MM engine doesn't support user fences */
1515 * amdgpu_cs_wait_all_fence - wait on all fences to signal
1520 * @fences: array of drm_amdgpu_fence
1525 struct drm_amdgpu_fence *fences)
1535 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1565 * @fences: array of drm_amdgpu_fence
1570 struct drm_amdgpu_fence *fences)
1588 fence = amdgpu_cs_get_fence(adev, filp, &fences[i]);
1638 struct drm_amdgpu_fence *fences; local in function:amdgpu_cs_wait_fences_ioctl
1641 /* Get the fences from userspace *
    [all...]
amdgpu_debugfs.c 1057 struct dma_fence **fences)
1073 ptr = &drv->fences[last_seq];
1081 fences[last_seq] = fence;
1086 static void amdgpu_ib_preempt_signal_fences(struct dma_fence **fences,
1093 fence = fences[i];
1131 ptr = &drv->fences[preempt_seq];
1148 struct dma_fence **fences = NULL; local in function:amdgpu_debugfs_ib_preempt
1164 fences = kcalloc(length, sizeof(void *), GFP_KERNEL);
1165 if (!fences)
1191 /* swap out the old fences */
    [all...]
amdgpu_vcn_v1_0.c 1780 unsigned int fences = 0, i; local in function:vcn_v1_0_idle_work_handler
1783 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1788 if (fences)
1801 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst->ring_dec);
1802 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_dec);
1804 if (fences == 0) {
1832 unsigned int fences = 0, i; local in function:vcn_v1_0_ring_begin_use
1835 fences += amdgpu_fence_count_emitted(&adev->vcn.inst->ring_enc[i]);
1837 if (fences)
amdgpu_ring.h 73 * Fences.
87 struct dma_fence **fences; member in struct:amdgpu_fence_driver
amdgpu_uvd.c 1201 unsigned fences = 0, i, j; local in function:amdgpu_uvd_idle_work_handler
1206 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
1208 fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
1212 if (fences == 0) {
  /src/sys/external/bsd/drm2/dist/drm/radeon/
radeon_sa.c 262 struct radeon_fence **fences,
292 fences[i] = sa_bo->fence;
331 struct radeon_fence *fences[RADEON_NUM_RINGS]; local in function:radeon_sa_bo_new
354 fences[i] = NULL;
372 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
375 radeon_fence_ref(fences[i]);
379 r = radeon_fence_wait_any(rdev, fences, false);
381 radeon_fence_unref(&fences[i]);
390 r = radeon_fence_wait_any(rdev, fences, false);
392 radeon_fence_unref(&fences[i])
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/selftests/
i915_sw_fence.c 233 /* Test a chain of fences, A waits on B who waits on C */
313 /* Test multiple fences (AB) waiting on a single event (C) */
458 struct i915_sw_fence **fences; local in function:test_chain
461 /* Test a long chain of fences */
462 fences = kmalloc_array(nfences, sizeof(*fences), GFP_KERNEL);
463 if (!fences)
467 fences[i] = alloc_fence();
468 if (!fences[i]) {
475 ret = i915_sw_fence_await_sw_fence_gfp(fences[i]
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/
drm_gem.c 862 * shared and/or exclusive fences.
865 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
1407 * for your shared fences (if applicable), submit your job, then
1481 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1482 * waited on, deduplicating fences from the same context.
1533 * own fences.
1538 * shared fences in the reservation object).
1545 struct dma_fence **fences; local in function:drm_gem_fence_array_add_implicit
1556 &fence_count, &fences);
1561 ret = drm_gem_fence_array_add(fence_array, fences[i])
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/virtio/
virtgpu_drv.h 99 struct list_head fences; member in struct:virtio_gpu_fence_driver
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/
i915_gem_execbuffer.c 2471 __free_fence_array(struct drm_syncobj **fences, unsigned int n)
2474 drm_syncobj_put(ptr_mask_bits(fences[n], 2));
2475 kvfree(fences);
2484 struct drm_syncobj **fences; local in function:get_fence_array
2495 SIZE_MAX / sizeof(*fences)))
2502 fences = kvmalloc_array(nfences, sizeof(*fences),
2504 if (!fences)
2531 fences[n] = ptr_pack_bits(syncobj, fence.flags, 2);
2534 return fences;
3003 struct drm_syncobj **fences = NULL; local in function:i915_gem_execbuffer2_ioctl
    [all...]
  /src/sys/external/bsd/drm2/dist/include/uapi/drm/
amdgpu_drm.h 446 /** This points to uint64_t * which points to fences */
447 __u64 fences; member in struct:drm_amdgpu_wait_fences_in

Completed in 24 milliseconds