/src/sys/external/bsd/drm2/dist/drm/qxl/ |
qxl_object.h | 36 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 39 struct drm_device *ddev = bo->tbo.base.dev; 50 ttm_bo_unreserve(&bo->tbo); 55 return bo->tbo.offset; 60 return bo->tbo.num_pages << PAGE_SHIFT; 65 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); 73 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 76 struct drm_device *ddev = bo->tbo.base.dev; 84 *mem_type = bo->tbo.mem.mem_type; 86 r = ttm_bo_wait(&bo->tbo, true, no_wait) [all...] |
qxl_object.c | 35 static void qxl_ttm_bo_destroy(struct ttm_buffer_object *tbo) 40 bo = to_qxl_bo(tbo); 41 qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; 48 drm_gem_object_release(&bo->tbo.base); 67 if (qbo->tbo.base.size <= PAGE_SIZE) 121 r = drm_gem_object_init(&qdev->ddev, &bo->tbo.base, size); 126 bo->tbo.base.funcs = &qxl_object_funcs; 137 r = ttm_bo_init(&qdev->mman.bdev, &bo->tbo, size, type, 162 r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap) [all...] |
qxl_gem.c | 40 struct ttm_buffer_object *tbo; local in function:qxl_gem_object_free 46 tbo = &qobj->tbo; 47 ttm_bo_put(tbo); 71 *obj = &qbo->tbo.base;
|
qxl_debugfs.c | 69 fobj = rcu_dereference(bo->tbo.base.resv->fence); 74 (unsigned long)bo->tbo.base.size,
|
qxl_drv.h | 77 struct ttm_buffer_object tbo; member in struct:qxl_bo 81 /* Protected by tbo.reserved */ 99 #define gem_to_qxl_bo(gobj) container_of((gobj), struct qxl_bo, tbo.base) 100 #define to_qxl_bo(tobj) container_of((tobj), struct qxl_bo, tbo) 313 (bo->tbo.mem.mem_type == TTM_PL_VRAM) 316 WARN_ON_ONCE((bo->tbo.offset & slot->gpu_offset) != slot->gpu_offset); 318 /* TODO - need to hold one of the locks to read tbo.offset */ 319 return slot->high_bits | (bo->tbo.offset - slot->gpu_offset + offset);
|
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_mn.c | 61 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 73 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 79 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
|
radeon_prime.c | 42 int npages = bo->tbo.num_pages; 44 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 52 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 88 return &bo->tbo.base; 129 if (radeon_ttm_tt_has_userptr(bo->tbo.ttm))
|
radeon_object.h | 70 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 81 ttm_bo_unreserve(&bo->tbo); 95 return bo->tbo.offset; 100 return bo->tbo.num_pages << PAGE_SHIFT; 105 return (bo->tbo.num_pages << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; 110 return (bo->tbo.mem.page_alignment << PAGE_SHIFT) / RADEON_GPU_PAGE_SIZE; 121 return drm_vma_node_offset_addr(&bo->tbo.base.vma_node);
|
radeon_object.c | 64 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 82 static void radeon_ttm_bo_destroy(struct ttm_buffer_object *tbo) 86 bo = container_of(tbo, struct radeon_bo, tbo); 88 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 95 if (bo->tbo.base.import_attach) 96 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); 97 drm_gem_object_release(&bo->tbo.base); 219 drm_gem_private_object_init(rdev->ddev, &bo->tbo.base, size) 326 struct ttm_buffer_object *tbo; local in function:radeon_bo_unref [all...] |
radeon_gem.c | 94 *obj = &robj->tbo.base; 127 r = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 343 r = radeon_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 370 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 444 if (radeon_ttm_tt_has_userptr(robj->tbo.ttm)) { 476 r = dma_resv_test_signaled_rcu(robj->tbo.base.resv, true); 482 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 505 ret = dma_resv_wait_timeout_rcu(robj->tbo.base.resv, true, true, 30 * HZ); 512 cur_placement = READ_ONCE(robj->tbo.mem.mem_type); 583 tv.bo = &bo_va->bo->tbo; [all...] |
radeon_benchmark.c | 130 dobj->tbo.base.resv); 141 dobj->tbo.base.resv);
|
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_object.c | 71 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 73 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 77 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 82 static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo) 84 struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); 85 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); 92 if (bo->tbo.base.import_attach) 93 drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg); 94 drm_gem_object_release(&bo->tbo.base) 860 struct ttm_buffer_object *tbo; local in function:amdgpu_bo_unref [all...] |
amdgpu_object.h | 86 /* Protected by tbo.reserved */ 91 struct ttm_buffer_object tbo; member in struct:amdgpu_bo 119 static inline struct amdgpu_bo *ttm_to_amdgpu_bo(struct ttm_buffer_object *tbo) 121 return container_of(tbo, struct amdgpu_bo, tbo); 162 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 165 r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 176 ttm_bo_unreserve(&bo->tbo); 181 return bo->tbo.num_pages << PAGE_SHIFT; 186 return (bo->tbo.num_pages << PAGE_SHIFT) / AMDGPU_GPU_PAGE_SIZE [all...] |
amdgpu_dma_buf.c | 61 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 108 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 126 if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || 199 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 221 r = __dma_resv_make_exclusive(bo->tbo.base.resv); 243 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 280 sgt = drm_prime_pages_to_sg(bo->tbo.ttm->pages, bo->tbo.num_pages); 335 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev) [all...] |
amdgpu_mn.c | 73 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 83 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 110 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
|
amdgpu_gtt_mgr.c | 40 struct ttm_buffer_object *tbo; member in struct:amdgpu_gtt_node 180 * @tbo: TTM BO we need this range for 187 struct ttm_buffer_object *tbo, 231 * @tbo: TTM BO we need this range for 238 struct ttm_buffer_object *tbo, 247 if ((&tbo->mem == mem || tbo->mem.mem_type != TTM_PL_TT) && 263 node->tbo = tbo; 267 r = amdgpu_gtt_mgr_alloc(man, tbo, place, mem) [all...] |
amdgpu_gem.c | 95 *obj = &bo->tbo.base; 132 struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); 143 mm = amdgpu_ttm_tt_get_usermm(abo->tbo.ttm); 152 abo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 173 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 187 tv.bo = &bo->tbo; 270 resv = vm->root.base.bo->tbo.base.resv; 336 r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); 347 r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages) [all...] |
amdgpu_amdkfd_gpuvm.c | 206 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 226 * NOTE: Must be called with BO reserved i.e. bo->tbo.resv->lock held. 231 struct dma_resv *resv = bo->tbo.base.resv; 290 if (WARN(amdgpu_ttm_tt_get_usermm(bo->tbo.ttm), 296 ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); 323 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 359 struct amdgpu_device *adev = amdgpu_ttm_adev(pd->tbo.bdev); 371 struct amdgpu_device *bo_adev = amdgpu_ttm_adev(mem->bo->tbo.bdev); 423 unsigned long bo_size = bo->tbo.mem.size; 494 entry->bo = &bo->tbo; [all...] |
amdgpu_gmc.c | 53 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 56 switch (bo->tbo.mem.mem_type) { 58 ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); 72 *flags = amdgpu_ttm_tt_pde_flags(bo->tbo.ttm, &bo->tbo.mem); 82 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 134 * @tbo: TTM BO which needs the address, must be in GTT domain
|
amdgpu_gem.h | 36 #define gem_to_amdgpu_bo(gobj) container_of((gobj), struct amdgpu_bo, tbo.base)
|
amdgpu_vm.c | 239 if (bo->tbo.type == ttm_bo_type_kernel) 338 if (bo->tbo.base.resv != vm->root.base.bo->tbo.base.resv) 342 if (bo->tbo.type == ttm_bo_type_kernel && bo->parent) 348 amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type)) 596 entry->tv.bo = &vm->root.base.bo->tbo; 628 if (abo->tbo.base.resv == vm->root.base.bo->tbo.base.resv) 663 ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move); 665 ttm_bo_move_to_lru_tail(&bo->shadow->tbo, [all...] |
amdgpu_benchmark.c | 106 r = amdgpu_ttm_alloc_gart(&sobj->tbo); 125 r = amdgpu_ttm_alloc_gart(&dobj->tbo);
|
amdgpu_csa.c | 82 csa_tv.bo = &bo->tbo;
|
/src/sys/external/bsd/drm2/amdgpu/ |
amdgpufb.c | 169 const unsigned num_pages __diagused = rbo->tbo.num_pages; 174 KASSERT(rbo->tbo.mem.bus.is_iomem); 176 if (ISSET(rbo->tbo.mem.placement, TTM_PL_FLAG_WC)) 179 return bus_space_mmap(rbo->tbo.bdev->memt, rbo->tbo.mem.bus.base, 180 rbo->tbo.mem.bus.offset + offset, prot, flags);
|
/src/sys/external/bsd/drm2/radeon/ |
radeondrmkmsfb.c | 174 const unsigned num_pages __diagused = rbo->tbo.num_pages; 177 KASSERT(rbo->tbo.mem.bus.is_iomem); 179 if (ISSET(rbo->tbo.mem.placement, TTM_PL_FLAG_WC)) 182 return bus_space_mmap(rbo->tbo.bdev->memt, 183 rbo->tbo.mem.bus.base, rbo->tbo.mem.bus.offset + offset,
|