/src/sys/external/bsd/drm2/dist/drm/qxl/ |
qxl_object.h | 32 static inline int qxl_bo_reserve(struct qxl_bo *bo, bool no_wait) 36 r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); 39 struct drm_device *ddev = bo->tbo.base.dev; 41 dev_err(ddev->dev, "%p reserve failed\n", bo); 48 static inline void qxl_bo_unreserve(struct qxl_bo *bo) 50 ttm_bo_unreserve(&bo->tbo); 53 static inline u64 qxl_bo_gpu_offset(struct qxl_bo *bo) 55 return bo->tbo.offset; 58 static inline unsigned long qxl_bo_size(struct qxl_bo *bo) 60 return bo->tbo.num_pages << PAGE_SHIFT [all...] |
qxl_object.c | 37 struct qxl_bo *bo; local in function:qxl_ttm_bo_destroy 40 bo = to_qxl_bo(tbo); 41 qdev = (struct qxl_device *)bo->tbo.base.dev->dev_private; 43 qxl_surface_evict(qdev, bo, false); 44 WARN_ON_ONCE(bo->map_count > 0); 46 list_del_init(&bo->list); 48 drm_gem_object_release(&bo->tbo.base); 49 kfree(bo); 52 bool qxl_ttm_bo_is_qxl_bo(struct ttm_buffer_object *bo) 54 if (bo->destroy == &qxl_ttm_bo_destroy 108 struct qxl_bo *bo; local in function:qxl_bo_create 321 struct qxl_bo *bo, *n; local in function:qxl_bo_force_delete [all...] |
qxl_prime.c | 38 struct qxl_bo *bo = gem_to_qxl_bo(obj); local in function:qxl_gem_prime_pin 40 return qxl_bo_pin(bo); 45 struct qxl_bo *bo = gem_to_qxl_bo(obj); local in function:qxl_gem_prime_unpin 47 qxl_bo_unpin(bo); 64 struct qxl_bo *bo = gem_to_qxl_bo(obj); local in function:qxl_gem_prime_vmap 68 ret = qxl_bo_kmap(bo, &ptr); 77 struct qxl_bo *bo = gem_to_qxl_bo(obj); local in function:qxl_gem_prime_vunmap 79 qxl_bo_kunmap(bo);
|
qxl_release.c | 45 /* put an alloc/dealloc surface cmd into one bo and round up to 128 */ 169 struct qxl_bo *bo; local in function:qxl_release_free_list 173 bo = to_qxl_bo(entry->tv.bo); 174 qxl_bo_unref(&bo); 207 struct qxl_bo **bo) 209 /* pin releases bo's they are too messy to evict */ 211 QXL_GEM_DOMAIN_VRAM, NULL, bo); 214 int qxl_release_list_add(struct qxl_release *release, struct qxl_bo *bo) 219 if (entry->tv.bo == &bo->tbo 273 struct qxl_bo *bo = to_qxl_bo(entry->tv.bo); local in function:qxl_release_reserve_list 301 struct qxl_bo *bo; local in function:qxl_alloc_surface_release_reserved 329 struct qxl_bo *bo; local in function:qxl_alloc_release_reserved 414 struct qxl_bo *bo = release->release_bo; local in function:qxl_release_map 427 struct qxl_bo *bo = release->release_bo; local in function:qxl_release_unmap 436 struct ttm_buffer_object *bo; local in function:qxl_release_fence_buffer_objects [all...] |
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_mn.c | 57 struct radeon_bo *bo = container_of(mn, struct radeon_bo, notifier); local in function:radeon_mn_invalidate 61 if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound) 67 r = radeon_bo_reserve(bo, true); 69 DRM_ERROR("(%ld) failed to reserve user bo\n", r); 73 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 76 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 78 radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU); 79 r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx) [all...] |
radeon_prime.c | 41 struct radeon_bo *bo = gem_to_radeon_bo(obj); local in function:radeon_gem_prime_get_sg_table 42 int npages = bo->tbo.num_pages; 44 return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages); 49 struct radeon_bo *bo = gem_to_radeon_bo(obj); local in function:radeon_gem_prime_vmap 52 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 53 &bo->dma_buf_vmap); 57 return bo->dma_buf_vmap.virtual; 62 struct radeon_bo *bo = gem_to_radeon_bo(obj); local in function:radeon_gem_prime_vunmap 64 ttm_bo_kunmap(&bo->dma_buf_vmap) 73 struct radeon_bo *bo; local in function:radeon_gem_prime_import_sg_table 93 struct radeon_bo *bo = gem_to_radeon_bo(obj); local in function:radeon_gem_prime_pin 111 struct radeon_bo *bo = gem_to_radeon_bo(obj); local in function:radeon_gem_prime_unpin 128 struct radeon_bo *bo = gem_to_radeon_bo(gobj); local in function:radeon_gem_prime_export [all...] |
radeon_object.c | 53 static void radeon_bo_clear_surface_reg(struct radeon_bo *bo); 56 * To exclude mutual BO access we rely on bo_reserve exclusion, as all 60 static void radeon_update_memory_usage(struct radeon_bo *bo, 63 struct radeon_device *rdev = bo->rdev; 64 u64 size = (u64)bo->tbo.num_pages << PAGE_SHIFT; 84 struct radeon_bo *bo; local in function:radeon_ttm_bo_destroy 86 bo = container_of(tbo, struct radeon_bo, tbo); 88 radeon_update_memory_usage(bo, bo->tbo.mem.mem_type, -1); 90 mutex_lock(&bo->rdev->gem.mutex) 196 struct radeon_bo *bo; local in function:radeon_bo_create 444 struct radeon_bo *bo, *n; local in function:radeon_bo_force_delete 566 struct radeon_bo *bo = lobj->robj; local in function:radeon_bo_list_validate [all...] |
radeon_object.h | 58 * radeon_bo_reserve - reserve bo 59 * @bo: bo structure 66 static inline int radeon_bo_reserve(struct radeon_bo *bo, bool no_intr) 70 r = ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 73 dev_err(bo->rdev->dev, "%p reserve failed\n", bo); 79 static inline void radeon_bo_unreserve(struct radeon_bo *bo) 81 ttm_bo_unreserve(&bo->tbo); 85 * radeon_bo_gpu_offset - return GPU offset of bo [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_mn.c | 72 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); local in function:amdgpu_mn_invalidate_gfx 73 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 83 r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, true, false, 87 DRM_ERROR("(%ld) failed to wait for user bo\n", r); 102 * We temporarily evict the BO attached to this range. This necessitates 109 struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); local in function:amdgpu_mn_invalidate_hsa 110 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 119 amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm); 130 * amdgpu_mn_register - register a BO for notifier update [all...] |
amdgpu_object.c | 62 * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting 64 * @bo: &amdgpu_bo buffer object 66 * This function is called when a BO stops being pinned, and updates the 69 static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo) 71 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 73 if (bo->tbo.mem.mem_type == TTM_PL_VRAM) { 74 atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size); 75 atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo), 77 } else if (bo->tbo.mem.mem_type == TTM_PL_TT) { 78 atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size) 85 struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo); local in function:amdgpu_bo_destroy 530 struct amdgpu_bo *bo; local in function:amdgpu_bo_do_create [all...] |
amdgpu_mn.h | 36 int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); 37 void amdgpu_mn_unregister(struct amdgpu_bo *bo); 39 static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) 45 static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {}
|
amdgpu_object.h | 53 /* bo virtual addresses in a vm */ 65 /* User space allocated BO in a VM */ 69 /* protected by bo being reserved */ 152 * amdgpu_bo_reserve - reserve bo 153 * @bo: bo structure 160 static inline int amdgpu_bo_reserve(struct amdgpu_bo *bo, bool no_intr) 162 struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); 165 r = __ttm_bo_reserve(&bo->tbo, !no_intr, false, NULL); 168 dev_err(adev->dev, "%p reserve failed\n", bo); [all...] |
amdgpu_dma_buf.c | 49 * @obj: GEM BO 51 * Sets up an in-kernel virtual mapping of the BO's memory. 58 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local in function:amdgpu_gem_prime_vmap 61 ret = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, 62 &bo->dma_buf_vmap); 66 return bo->dma_buf_vmap.virtual; 71 * @obj: GEM BO 74 * Tears down the in-kernel virtual mapping of the BO's memory. 78 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj) local in function:amdgpu_gem_prime_vunmap 198 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local in function:amdgpu_dma_buf_attach 242 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local in function:amdgpu_dma_buf_detach 272 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local in function:amdgpu_dma_buf_map 310 struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); local in function:amdgpu_dma_buf_unmap 334 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gem); local in function:amdgpu_dma_buf_begin_cpu_access 385 struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); local in function:amdgpu_gem_prime_export 416 struct amdgpu_bo *bo; local in function:amdgpu_dma_buf_create_obj [all...] |
amdgpu_csa.h | 34 int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, 37 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, 39 void amdgpu_free_static_csa(struct amdgpu_bo **bo);
|
amdgpu_csa.c | 42 int amdgpu_allocate_static_csa(struct amdgpu_device *adev, struct amdgpu_bo **bo, 49 domain, bo, 51 if (!*bo) 59 void amdgpu_free_static_csa(struct amdgpu_bo **bo) 61 amdgpu_bo_free_kernel(bo, NULL, NULL); 71 struct amdgpu_bo *bo, struct amdgpu_bo_va **bo_va, 82 csa_tv.bo = &bo->tbo; 94 *bo_va = amdgpu_vm_bo_add(adev, vm, bo);
|
/src/sys/external/bsd/drm2/dist/drm/virtio/ |
virtgpu_object.c | 71 struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); local in function:virtio_gpu_free_object 72 struct virtio_gpu_device *vgdev = bo->base.base.dev->dev_private; 74 if (bo->pages) 75 virtio_gpu_object_detach(vgdev, bo); 76 if (bo->created) 77 virtio_gpu_cmd_unref_resource(vgdev, bo->hw_res_handle); 78 virtio_gpu_resource_id_put(vgdev, bo->hw_res_handle); 100 struct virtio_gpu_object *bo; local in function:virtio_gpu_create_object 102 bo = kzalloc(sizeof(*bo), GFP_KERNEL) 117 struct virtio_gpu_object *bo; local in function:virtio_gpu_object_create [all...] |
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_bo.c | 65 * ttm_global_mutex - protecting the global BO state 86 static void ttm_bo_default_destroy(struct ttm_buffer_object *bo) 88 kfree(bo); 120 static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, 127 bo, bo->mem.num_pages, bo->mem.size >> 10, 128 bo->mem.size >> 20); 136 ttm_mem_type_debug(bo->bdev, &p, mem_type); 176 struct ttm_buffer_object *bo local in function:ttm_bo_release_list 635 struct ttm_buffer_object *bo; local in function:ttm_bo_delayed_delete 677 struct ttm_buffer_object *bo = local in function:ttm_bo_release 857 struct ttm_buffer_object *bo = NULL, *busy_bo = NULL; local in function:ttm_mem_evict_first 1478 struct ttm_buffer_object *bo; local in function:ttm_bo_create 1914 struct ttm_buffer_object *bo; local in function:ttm_bo_swapout [all...] |
ttm_bo_vm.c | 50 static vm_fault_t ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, 56 if (likely(!bo->moving)) 62 if (dma_fence_is_signaled(bo->moving)) 74 ttm_bo_get(bo); 76 (void) dma_fence_wait(bo->moving, true); 77 dma_resv_unlock(bo->base.resv); 78 ttm_bo_put(bo); 85 err = dma_fence_wait(bo->moving, true); 93 dma_fence_put(bo->moving); 94 bo->moving = NULL 187 struct ttm_buffer_object *bo = vma->vm_private_data; local in function:ttm_bo_vm_fault_reserved 334 struct ttm_buffer_object *bo = vma->vm_private_data; local in function:ttm_bo_vm_fault 354 struct ttm_buffer_object *bo = vma->vm_private_data; local in function:ttm_bo_vm_open 364 struct ttm_buffer_object *bo = vma->vm_private_data; local in function:ttm_bo_vm_close 414 struct ttm_buffer_object *bo = vma->vm_private_data; local in function:ttm_bo_vm_access 461 struct ttm_buffer_object *bo = NULL; local in function:ttm_bo_vm_lookup 506 struct ttm_buffer_object *bo; local in function:ttm_bo_mmap [all...] |
ttm_execbuf_util.c | 45 struct ttm_buffer_object *bo = entry->bo; local in function:ttm_eu_backoff_reservation_reverse 47 dma_resv_unlock(bo->base.resv); 61 struct ttm_buffer_object *bo = entry->bo; local in function:ttm_eu_backoff_reservation 63 ttm_bo_move_to_lru_tail(bo, NULL); 64 dma_resv_unlock(bo->base.resv); 99 struct ttm_buffer_object *bo = entry->bo; local in function:ttm_eu_reserve_buffers 101 ret = __ttm_bo_reserve(bo, intr, (ticket == NULL), ticket) 172 struct ttm_buffer_object *bo = entry->bo; local in function:ttm_eu_fence_buffer_objects [all...] |
ttm_bo_util.c | 50 struct ttm_buffer_object *bo; member in struct:ttm_transfer_obj 58 void ttm_bo_free_old_node(struct ttm_buffer_object *bo) 60 ttm_bo_mem_put(bo, &bo->mem); 63 int ttm_bo_move_ttm(struct ttm_buffer_object *bo, 67 struct ttm_tt *ttm = bo->ttm; 68 struct ttm_mem_reg *old_mem = &bo->mem; 72 ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); 81 ttm_bo_free_old_node(bo); 126 struct ttm_buffer_object *bo; local in function:ttm_mem_io_evict 763 struct ttm_buffer_object *bo = map->bo; local in function:ttm_bo_kunmap [all...] |
/src/sys/external/bsd/drm2/dist/include/drm/ttm/ |
ttm_bo_driver.h | 84 * @bo: Pointer to the buffer object we're allocating space for. 109 struct ttm_buffer_object *bo, 229 * @bo: The buffer object to create the ttm for. 237 struct ttm_tt *(*ttm_tt_create)(struct ttm_buffer_object *bo, 289 * @bo: the buffer object to be evicted 292 * Check with the driver if it is valuable to evict a BO to make room 295 bool (*eviction_valuable)(struct ttm_buffer_object *bo, 300 * @bo: the buffer object to be evicted 302 * Return the bo flags for a buffer which is not mapped to the hardware. 304 * finished, they'll end up in bo->mem.flag [all...] |
/src/sys/external/bsd/drm2/ttm/ |
ttm_bo_vm.c | 83 struct ttm_buffer_object *const bo = container_of(uobj, local in function:ttm_bo_uvm_reference 86 (void)ttm_bo_get(bo); 92 struct ttm_buffer_object *bo = container_of(uobj, local in function:ttm_bo_uvm_detach 95 ttm_bo_put(bo); 99 ttm_bo_vm_fault_idle(struct ttm_buffer_object *bo, struct uvm_faultinfo *vmf) 103 if (__predict_true(!bo->moving)) 109 if (dma_fence_is_signaled(bo->moving)) 119 ttm_bo_get(bo); 121 (void) dma_fence_wait(bo->moving, true); 122 dma_resv_unlock(bo->base.resv) 173 struct ttm_buffer_object *const bo = container_of(uobj, local in function:ttm_bo_uvm_fault_reserved 316 struct ttm_buffer_object *const bo = container_of(uobj, local in function:ttm_bo_uvm_fault 354 struct ttm_buffer_object *bo; local in function:ttm_bo_mmap_object 410 struct ttm_buffer_object *bo = NULL; local in function:ttm_bo_uvm_lookup [all...] |
/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_bo.c | 56 * @bo: Pointer to the TTM buffer object. 61 vmw_buffer_object(struct ttm_buffer_object *bo) 63 return container_of(bo, struct vmw_buffer_object, base); 71 * @bo: Pointer to the TTM buffer object. 76 vmw_user_buffer_object(struct ttm_buffer_object *bo) 78 struct vmw_buffer_object *vmw_bo = vmw_buffer_object(bo); 100 struct ttm_buffer_object *bo = &buf->base; local in function:vmw_bo_pin_in_placement 110 ret = ttm_bo_reserve(bo, interruptible, false, NULL); 115 ret = ttm_bo_mem_compat(placement, &bo->mem, 118 ret = ttm_bo_validate(bo, placement, &ctx) 149 struct ttm_buffer_object *bo = &buf->base; local in function:vmw_bo_pin_in_vram_or_gmr 224 struct ttm_buffer_object *bo = &buf->base; local in function:vmw_bo_pin_in_start_of_vram 293 struct ttm_buffer_object *bo = &buf->base; local in function:vmw_bo_unpin 345 struct ttm_buffer_object *bo = &vbo->base; local in function:vmw_bo_pin_reserved 394 struct ttm_buffer_object *bo = &vbo->base; local in function:vmw_bo_map_and_cache 702 struct ttm_buffer_object *bo = &user_bo->vbo.base; local in function:vmw_user_bo_synccpu_grab [all...] |
/src/sys/external/bsd/drm2/dist/drm/nouveau/ |
nv10_fence.h | 17 struct nouveau_bo *bo; member in struct:nv10_fence_priv
|
nouveau_nv50_fence.c | 45 struct ttm_mem_reg *reg = &priv->bo->bo.mem; 89 0, 0x0000, NULL, NULL, &priv->bo); 91 ret = nouveau_bo_pin(priv->bo, TTM_PL_FLAG_VRAM, false); 93 ret = nouveau_bo_map(priv->bo); 95 nouveau_bo_unpin(priv->bo); 98 nouveau_bo_ref(NULL, &priv->bo); 106 nouveau_bo_wr32(priv->bo, 0x000, 0x00000000);
|