| /src/sys/external/bsd/drm2/dist/include/drm/ttm/ |
| ttm_execbuf_util.h | 44 * @bo: refcounted buffer object pointer. 50 struct ttm_buffer_object *bo; member in struct:ttm_validate_buffer
|
| /src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
| amdgpu_mn.c | 72 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); local 73 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 83 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 87 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 102 * We temporarily evict the BO attached to this range. This necessitates 109 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); local 110 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 119 amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm); 130 * amdgpu_mn_register - register a BO for notifier update [all...] |
| amdgpu_gart.h | 49 struct amdgpu_bo *bo; member in struct:amdgpu_gart
|
| amdgpu_bo_list.c | 61 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); local 63 amdgpu_bo_unref(&bo); 102 struct amdgpu_bo *bo; local 115 bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); 118 usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); 121 amdgpu_bo_unref(&bo); 126 amdgpu_bo_unref(&bo); 138 entry->tv.bo = &bo->tbo 161 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); local 166 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(array[i].tv.bo); local 221 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); local [all...] |
| amdgpu_dma_buf.c | 49 * @obj: GEM BO 51 * Sets up an in-kernel virtual mapping of the BO's memory. 58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local 61 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 62 &bo->dma_buf_vmap); 66 return bo->dma_buf_vmap.virtual; 71 * @obj: GEM BO 74 * Tears down the in-kernel virtual mapping of the BO's memory. 78 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj) local 198 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local 242 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local 272 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local 310 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local 334 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gem); local 385 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); local 416 struct amdgpu_bo *bo; local [all...] |
| amdgpu_ttm.h | 80 struct ttm_buffer_object *bo; member in struct:amdgpu_copy_mem 92 u64 amdgpu_vram_mgr_bo_visible_size(struct amdgpu_bo *bo); 113 int amdgpu_fill_buffer(struct amdgpu_bo *bo, 124 int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); 128 int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); 131 static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo,
|
| /src/sys/external/bsd/drm2/dist/drm/nouveau/ |
| nv10_fence.h | 17 struct nouveau_bo *bo; member in struct:nv10_fence_priv
|
| nouveau_bo.h | 19 struct ttm_buffer_object bo; member in struct:nouveau_bo 52 nouveau_bo(struct ttm_buffer_object *bo) 54 return container_of(bo, struct nouveau_bo, bo); 67 ttm_bo_get(&ref->bo); 68 *pnvbo = nouveau_bo(&ref->bo); 73 ttm_bo_put(&prev->bo);
|
| nouveau_fence.h | 95 struct nouveau_bo *bo; member in struct:nv84_fence_priv
|
| /src/sys/external/bsd/drm2/dist/drm/qxl/ |
| qxl_prime.c | 38 struct qxl_bo *bo = gem_to_qxl_bo(obj); local 40 return qxl_bo_pin(bo); 45 struct qxl_bo *bo = gem_to_qxl_bo(obj); local 47 qxl_bo_unpin(bo); 64 struct qxl_bo *bo = gem_to_qxl_bo(obj); local 68 ret = qxl_bo_kmap(bo, &ptr); 77 struct qxl_bo *bo = gem_to_qxl_bo(obj); local 79 qxl_bo_kunmap(bo);
|
| qxl_debugfs.c | 62 struct qxl_bo *bo; local 64 list_for_each_entry(bo, &qdev->gem.objects, list) { 69 fobj = rcu_dereference(bo->tbo.base.resv->fence); 74 (unsigned long)bo->tbo.base.size, 75 bo->pin_count, rel);
|
| qxl_object.c | 37 struct qxl_bo *bo; local 40 bo = to_qxl_bo(tbo); 41 qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; 43 qxl_surface_evict(qdev, bo, false); 44 WARN_ON_ONCE(bo->map_count > 0); 46 list_del_init(&bo->list); 48 drm_gem_object_release(&bo->tbo.base); 49 kfree(bo); 52 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 54 if (bo->destroy == &qxl_ttm_bo_destroy 108 struct qxl_bo *bo; local 321 struct qxl_bo *bo, *n; local [all...] |
| /src/sys/external/bsd/drm2/dist/drm/radeon/ |
| radeon_mn.c | 57 struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier); local 61 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 67 r = radeon_bo_reserve(bo, true); 69 DRM_ERROR("(%ld) failed to reserve user bo\n", r); 73 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 76 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 78 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 79 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx) [all...] |
| radeon_prime.c | 41 struct radeon_bo *bo = gem_to_radeon_bo(obj); local 42 int npages = bo->tbo.num_pages; 44 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 49 struct radeon_bo *bo = gem_to_radeon_bo(obj); local 52 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 53 &bo->dma_buf_vmap); 57 return bo->dma_buf_vmap.virtual; 62 struct radeon_bo *bo = gem_to_radeon_bo(obj); local 64 ttm_bo_kunmap(&bo->dma_buf_vmap) 73 struct radeon_bo *bo; local 93 struct radeon_bo *bo = gem_to_radeon_bo(obj); local 111 struct radeon_bo *bo = gem_to_radeon_bo(obj); local 128 struct radeon_bo *bo = gem_to_radeon_bo(gobj); local [all...] |
| /src/sys/external/bsd/drm2/dist/drm/ttm/ |
| ttm_execbuf_util.c | 45 struct ttm_buffer_object *bo = entry->bo; local 47 dma_resv_unlock(bo->base.resv); 61 struct ttm_buffer_object *bo = entry->bo; local 63 ttm_bo_move_to_lru_tail(bo, NULL); 64 dma_resv_unlock(bo->base.resv); 99 struct ttm_buffer_object *bo = entry->bo; local 101 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket) 172 struct ttm_buffer_object *bo = entry->bo; local [all...] |
| ttm_bo_vm.c | 50 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, 56 if (likely(!bo->moving)) 62 if (dma_fence_is_signaled(bo->moving)) 74 ttm_bo_get(bo); 76 (void) dma_fence_wait(bo->moving, true); 77 dma_resv_unlock(bo->base.resv); 78 ttm_bo_put(bo); 85 err = dma_fence_wait(bo->moving, true); 93 dma_fence_put(bo->moving); 94 bo->moving = NULL 187 struct ttm_buffer_object *bo = vma->vm_private_data; local 334 struct ttm_buffer_object *bo = vma->vm_private_data; local 354 struct ttm_buffer_object *bo = vma->vm_private_data; local 364 struct ttm_buffer_object *bo = vma->vm_private_data; local 414 struct ttm_buffer_object *bo = vma->vm_private_data; local 461 struct ttm_buffer_object *bo = NULL; local 506 struct ttm_buffer_object *bo; local [all...] |
| /src/sys/external/bsd/drm2/dist/drm/virtio/ |
| virtgpu_object.c | 71 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); local 72 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 74 if (bo->pages) 75 virtio_gpu_object_detach(vgdev, bo); 76 if (bo->created) 77 virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); 78 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 100 struct virtio_gpu_object *bo; local 102 bo = kzalloc(sizeof(*bo), GFP_KERNEL) 117 struct virtio_gpu_object *bo; local [all...] |
| virtgpu_plane.c | 115 struct virtio_gpu_object *bo = local 128 virtio_gpu_array_add_obj(objs, &bo->base.base); 140 struct virtio_gpu_object *bo; local 164 bo = gem_to_virtio_gpu_obj(plane->state->fb->obj[0]); 165 if (bo->dumb) 174 bo->hw_res_handle, 182 bo->hw_res_handle, 189 virtio_gpu_cmd_resource_flush(vgdev, bo->hw_res_handle, 204 struct virtio_gpu_object *bo; local 210 bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]) 242 struct virtio_gpu_object *bo = NULL; local [all...] |
| /src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/engine/gr/ |
| nouveau_nvkm_engine_gr_ctxgp100.c | 60 u32 bo = ao + size; local 86 mmio_wr32(info, o + 0xf4, bo); 88 bo += grctx->attrib_nr_max * gr->ppc_tpc_max;
|
| nouveau_nvkm_engine_gr_ctxgp102.c | 56 u32 bo = ao + size; local 85 mmio_wr32(info, o + 0xf4, bo); 87 bo += gs;
|
| nouveau_nvkm_engine_gr_ctxgf108.c | 752 u32 bo = 0; local 753 u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total; 767 mmio_skip(info, o + 0x20, (t << 28) | (b << 16) | ++bo); 768 mmio_wr32(info, o + 0x20, (t << 28) | (b << 16) | --bo); 769 bo += grctx->attrib_nr_max;
|
| nouveau_nvkm_engine_gr_ctxgf117.c | 260 u32 bo = 0; local 261 u32 ao = bo + grctx->attrib_nr_max * gr->tpc_total; 277 mmio_skip(info, o + 0xc0, (t << 28) | (b << 16) | ++bo); 278 mmio_wr32(info, o + 0xc0, (t << 28) | (b << 16) | --bo); 279 bo += grctx->attrib_nr_max * gr->ppc_tpc_nr[gpc][ppc];
|
| nouveau_nvkm_engine_gr_ctxgv100.c | 77 u32 bo = ao + size; local 103 mmio_wr32(info, o + 0xf4, bo); 105 bo += gs;
|
| /src/usr.sbin/extattrctl/ |
| extattrctl.c | 169 const char *bo; local 197 bo = needswap ? "big-endian" : "little-endian"; 199 bo = needswap ? "little-endian" : "big-endian"; 203 argv[0], rw32(uef.uef_version), rw32(uef.uef_size), bo);
|
| /src/sys/external/bsd/drm2/ttm/ |
| ttm_bo_vm.c | 83 struct ttm_buffer_object *const bo = container_of(uobj, local 86 (void)ttm_bo_get(bo); 92 struct ttm_buffer_object *bo = container_of(uobj, local 95 ttm_bo_put(bo); 99 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf) 103 if (__predict_true(!bo->moving)) 109 if (dma_fence_is_signaled(bo->moving)) 119 ttm_bo_get(bo); 121 (void) dma_fence_wait(bo->moving, true); 122 dma_resv_unlock(bo->base.resv) 173 struct ttm_buffer_object *const bo = container_of(uobj, local 316 struct ttm_buffer_object *const bo = container_of(uobj, local 354 struct ttm_buffer_object *bo; local 410 struct ttm_buffer_object *bo = NULL; local [all...] |