/src/sys/external/bsd/drm2/include/linux/ |
dma-fence-array.h | 51 struct dma_fence **fences; member in struct:dma_fence_array
|
dma-fence-array.h | 51 struct dma_fence **fences; member in struct:dma_fence_array
|
dma-fence-array.h | 51 struct dma_fence **fences; member in struct:dma_fence_array
|
dma-fence-array.h | 51 struct dma_fence **fences; member in struct:dma_fence_array
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_ctx.h | 39 struct dma_fence *fences[]; member in struct:amdgpu_ctx_entity
|
amdgpu_jpeg.c | 81 unsigned int fences = 0; local in function:amdgpu_jpeg_idle_work_handler 88 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); 91 if (fences == 0)
|
amdgpu_sa.c | 215 struct dma_fence **fences, 237 fences[i] = NULL; 246 fences[i] = sa_bo->fence; 287 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; local in function:amdgpu_sa_bo_new 322 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 325 if (fences[i]) 326 fences[count++] = dma_fence_get(fences[i]); 330 t = dma_fence_wait_any_timeout(fences, count, false, 334 dma_fence_put(fences[i]) [all...] |
amdgpu_ctx.h | 39 struct dma_fence *fences[]; member in struct:amdgpu_ctx_entity
|
amdgpu_jpeg.c | 81 unsigned int fences = 0; local in function:amdgpu_jpeg_idle_work_handler 88 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); 91 if (fences == 0)
|
amdgpu_sa.c | 215 struct dma_fence **fences, 237 fences[i] = NULL; 246 fences[i] = sa_bo->fence; 287 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; local in function:amdgpu_sa_bo_new 322 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 325 if (fences[i]) 326 fences[count++] = dma_fence_get(fences[i]); 330 t = dma_fence_wait_any_timeout(fences, count, false, 334 dma_fence_put(fences[i]) [all...] |
amdgpu_ctx.h | 39 struct dma_fence *fences[]; member in struct:amdgpu_ctx_entity
|
amdgpu_jpeg.c | 81 unsigned int fences = 0; local in function:amdgpu_jpeg_idle_work_handler 88 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); 91 if (fences == 0)
|
amdgpu_sa.c | 215 struct dma_fence **fences, 237 fences[i] = NULL; 246 fences[i] = sa_bo->fence; 287 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; local in function:amdgpu_sa_bo_new 322 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 325 if (fences[i]) 326 fences[count++] = dma_fence_get(fences[i]); 330 t = dma_fence_wait_any_timeout(fences, count, false, 334 dma_fence_put(fences[i]) [all...] |
amdgpu_ctx.h | 39 struct dma_fence *fences[]; member in struct:amdgpu_ctx_entity
|
amdgpu_jpeg.c | 81 unsigned int fences = 0; local in function:amdgpu_jpeg_idle_work_handler 88 fences += amdgpu_fence_count_emitted(&adev->jpeg.inst[i].ring_dec); 91 if (fences == 0)
|
amdgpu_sa.c | 215 struct dma_fence **fences, 237 fences[i] = NULL; 246 fences[i] = sa_bo->fence; 287 struct dma_fence *fences[AMDGPU_SA_NUM_FENCE_LISTS]; local in function:amdgpu_sa_bo_new 322 } while (amdgpu_sa_bo_next_hole(sa_manager, fences, tries)); 325 if (fences[i]) 326 fences[count++] = dma_fence_get(fences[i]); 330 t = dma_fence_wait_any_timeout(fences, count, false, 334 dma_fence_put(fences[i]) [all...] |
amdgpu_dma_buf.c | 147 struct dma_fence **fences; local in function:__dma_resv_make_exclusive 151 if (!dma_resv_get_list(obj)) /* no shared fences to convert */ 154 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); 161 dma_resv_add_excl_fence(obj, fences[0]); 162 dma_fence_put(fences[0]); 163 kfree(fences); 167 array = dma_fence_array_create(count, fences, 181 dma_fence_put(fences[count]); 182 kfree(fences); 214 * We only create shared fences for internal use, but importer [all...] |
amdgpu_ids.c | 111 * amdgpu_pasid_free_delayed - free pasid when fences signal 113 * @resv: reservation object with the fences to wait for 116 * Free the pasid only after all the fences in resv are signaled. 121 struct dma_fence *fence, **fences; local in function:amdgpu_pasid_free_delayed 126 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); 136 fence = fences[0]; 137 kfree(fences); 142 array = dma_fence_array_create(count, fences, context, 145 kfree(fences); 168 * block for all the fences to complete 215 struct dma_fence **fences; local in function:amdgpu_vmid_grab_idle [all...] |
amdgpu_dma_buf.c | 147 struct dma_fence **fences; local in function:__dma_resv_make_exclusive 151 if (!dma_resv_get_list(obj)) /* no shared fences to convert */ 154 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); 161 dma_resv_add_excl_fence(obj, fences[0]); 162 dma_fence_put(fences[0]); 163 kfree(fences); 167 array = dma_fence_array_create(count, fences, 181 dma_fence_put(fences[count]); 182 kfree(fences); 214 * We only create shared fences for internal use, but importer [all...] |
amdgpu_ids.c | 111 * amdgpu_pasid_free_delayed - free pasid when fences signal 113 * @resv: reservation object with the fences to wait for 116 * Free the pasid only after all the fences in resv are signaled. 121 struct dma_fence *fence, **fences; local in function:amdgpu_pasid_free_delayed 126 r = dma_resv_get_fences_rcu(resv, NULL, &count, &fences); 136 fence = fences[0]; 137 kfree(fences); 142 array = dma_fence_array_create(count, fences, context, 145 kfree(fences); 168 * block for all the fences to complete 215 struct dma_fence **fences; local in function:amdgpu_vmid_grab_idle [all...] |
amdgpu_dma_buf.c | 147 struct dma_fence **fences; local in function:__dma_resv_make_exclusive 151 if (!dma_resv_get_list(obj)) /* no shared fences to convert */ 154 r = dma_resv_get_fences_rcu(obj, NULL, &count, &fences); 161 dma_resv_add_excl_fence(obj, fences[0]); 162 dma_fence_put(fences[0]); 163 kfree(fences); 167 array = dma_fence_array_create(count, fences, 181 dma_fence_put(fences[count]); 182 kfree(fences); 214 * We only create shared fences for internal use, but importer [all...] |
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_sa.c | 262 struct radeon_fence **fences, 292 fences[i] = sa_bo->fence; 331 struct radeon_fence *fences[RADEON_NUM_RINGS]; local in function:radeon_sa_bo_new 354 fences[i] = NULL; 372 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 375 radeon_fence_ref(fences[i]); 379 r = radeon_fence_wait_any(rdev, fences, false); 381 radeon_fence_unref(&fences[i]); 390 r = radeon_fence_wait_any(rdev, fences, false); 392 radeon_fence_unref(&fences[i]) [all...] |
radeon_sa.c | 262 struct radeon_fence **fences, 292 fences[i] = sa_bo->fence; 331 struct radeon_fence *fences[RADEON_NUM_RINGS]; local in function:radeon_sa_bo_new 354 fences[i] = NULL; 372 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 375 radeon_fence_ref(fences[i]); 379 r = radeon_fence_wait_any(rdev, fences, false); 381 radeon_fence_unref(&fences[i]); 390 r = radeon_fence_wait_any(rdev, fences, false); 392 radeon_fence_unref(&fences[i]) [all...] |
radeon_sa.c | 262 struct radeon_fence **fences, 292 fences[i] = sa_bo->fence; 331 struct radeon_fence *fences[RADEON_NUM_RINGS]; local in function:radeon_sa_bo_new 354 fences[i] = NULL; 372 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 375 radeon_fence_ref(fences[i]); 379 r = radeon_fence_wait_any(rdev, fences, false); 381 radeon_fence_unref(&fences[i]); 390 r = radeon_fence_wait_any(rdev, fences, false); 392 radeon_fence_unref(&fences[i]) [all...] |
radeon_sa.c | 262 struct radeon_fence **fences, 292 fences[i] = sa_bo->fence; 331 struct radeon_fence *fences[RADEON_NUM_RINGS]; local in function:radeon_sa_bo_new 354 fences[i] = NULL; 372 } while (radeon_sa_bo_next_hole(sa_manager, fences, tries)); 375 radeon_fence_ref(fences[i]); 379 r = radeon_fence_wait_any(rdev, fences, false); 381 radeon_fence_unref(&fences[i]); 390 r = radeon_fence_wait_any(rdev, fences, false); 392 radeon_fence_unref(&fences[i]) [all...] |