/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_rs400.c | 243 u32 *gtt = rdev->gart.ptr; local in function:rs400_gart_set_page 244 gtt[i] = cpu_to_le32(lower_32_bits(entry));
|
radeon_ttm.c | 166 * first, but only set GTT as busy placement, so this 167 * BO will be evicted to GTT rather than causing other 519 struct radeon_ttm_tt *gtt = (void *)ttm; local in function:radeon_ttm_tt_pin_userptr 525 int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY); 532 if (curproc->p_vmspace != gtt->usermm) 535 if (current->mm != gtt->usermm) 539 if (gtt->userflags & RADEON_GEM_USERPTR_ANONONLY) { 542 unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE; 548 vm_map_lock_read(>t->usermm->vm_map); 549 ok = uvm_map_lookup_entry(>t->usermm->vm_map 662 struct radeon_ttm_tt *gtt = (void *)ttm; local in function:radeon_ttm_tt_unpin_userptr 699 struct radeon_ttm_tt *gtt = (void*)ttm; local in function:radeon_ttm_backend_bind 728 struct radeon_ttm_tt *gtt = (void *)ttm; local in function:radeon_ttm_backend_unbind 740 struct radeon_ttm_tt *gtt = (void *)ttm; local in function:radeon_ttm_backend_destroy 756 struct radeon_ttm_tt *gtt; local in function:radeon_ttm_tt_create 789 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); local in function:radeon_ttm_tt_populate 852 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); local in function:radeon_ttm_tt_unpopulate 898 struct radeon_ttm_tt *gtt = container_of(ttm, struct radeon_ttm_tt, local in function:radeon_ttm_tt_swapout 918 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); local in function:radeon_ttm_tt_set_userptr 935 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); local in function:radeon_ttm_tt_has_userptr 945 struct radeon_ttm_tt *gtt = radeon_ttm_tt_to_gtt(ttm); local in function:radeon_ttm_tt_is_readonly [all...] |
radeon_r100.c | 706 u32 *gtt = rdev->gart.ptr; local in function:r100_pci_gart_set_page 707 gtt[i] = cpu_to_le32(lower_32_bits(entry));
|
radeon.h | 473 struct dentry *gtt; member in struct:radeon_mman
|
/src/sys/external/bsd/drm2/dist/drm/qxl/ |
qxl_ttm.c | 168 struct qxl_ttm_tt *gtt = (void *)ttm; local in function:qxl_ttm_backend_bind 170 gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); 187 struct qxl_ttm_tt *gtt = (void *)ttm; local in function:qxl_ttm_backend_destroy 189 ttm_tt_fini(>t->ttm); 190 kfree(gtt); 203 struct qxl_ttm_tt *gtt; local in function:qxl_ttm_tt_create 206 gtt = kzalloc(sizeof(struct qxl_ttm_tt), GFP_KERNEL); 207 if (gtt == NULL) 209 gtt->ttm.func = &qxl_backend_func; 210 gtt->qdev = qdev [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_object_types.h | 44 * of pages before to binding them into the GTT, and put_pages() is 50 * pages to a different memory domain within the GTT). put_pages() 246 * The gtt page sizes we are allowed to use given the 255 * The actual gtt page size usage. Since we can have 258 * struct also lives in each vma, therefore the gtt 261 unsigned int gtt; member in struct:drm_i915_gem_object::__anonbfddbbad0408::i915_page_sizes
|
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
huge_pages.c | 355 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { 356 pr_err("unsupported page_sizes.gtt=%u, supported=%u\n", 357 vma->page_sizes.gtt & ~supported, supported); 373 if (obj->mm.page_sizes.gtt) { 374 pr_err("obj->page_sizes.gtt(%u) should never be set\n", 375 obj->mm.page_sizes.gtt); 514 if (vma->page_sizes.gtt != page_size) { 515 pr_err("%s page_sizes.gtt=%u, expected=%u\n", 516 __func__, vma->page_sizes.gtt, 603 if (vma->page_sizes.gtt != page_size) 819 unsigned int gtt; member in struct:igt_mock_ppgtt_64K::object_info [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_ttm.c | 110 /* GTT memory */ 196 * first, but only set GTT as busy placement, so this 197 * BO will be evicted to GTT rather than causing other 207 /* Move to GTT memory */ 269 * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer. 509 /* create space/pages for new_mem in GTT space */ 521 pr_err("Failed to find GTT space for blit from VRAM\n"); 531 /* Bind the memory to the GTT space */ 537 /* blit VRAM to GTT */ 565 /* make space in GTT for old_mem buffer * 830 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_get_user_pages 940 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_get_user_pages_done 1034 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_unpin_userptr 1072 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_gart_bind 1115 struct amdgpu_ttm_tt *gtt = (void*)ttm; local in function:amdgpu_ttm_backend_bind 1162 struct amdgpu_ttm_tt *gtt = (void*)bo->ttm; local in function:amdgpu_ttm_alloc_gart 1244 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_backend_unbind 1264 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_backend_destroy 1291 struct amdgpu_ttm_tt *gtt; local in function:amdgpu_ttm_tt_create 1320 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_populate 1388 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_unpopulate 1433 struct amdgpu_ttm_tt *gtt = container_of(ttm, struct amdgpu_ttm_tt, local in function:amdgpu_ttm_tt_swapout 1490 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_get_usermm 1513 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_affect_userptr 1534 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_is_userptr 1547 struct amdgpu_ttm_tt *gtt = (void *)ttm; local in function:amdgpu_ttm_tt_is_readonly 2178 struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; local in function:amdgpu_map_buffer [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gvt/ |
gtt.c | 1 /* $NetBSD: gtt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $ */ 4 * GTT virtualization 39 __KERNEL_RCSID(0, "$NetBSD: gtt.c,v 1.2 2021/12/18 23:45:31 riastradh Exp $"); 363 #define GTT_SPTE_FLAG_64K_SPLITED BIT(52) /* splited 64K gtt entry */ 558 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 585 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 607 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 619 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 630 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; 640 struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops 1529 struct intel_gvt_gtt *gtt = &gvt->gtt; local in function:ppgtt_allocate_oos_page 1783 struct intel_gvt_gtt *gtt = &gvt->gtt; local in function:invalidate_ppgtt_mm 1813 struct intel_gvt_gtt *gtt = &gvt->gtt; local in function:shadow_ppgtt_mm 2355 struct intel_vgpu_gtt *gtt = &vgpu->gtt; local in function:alloc_scratch_pages 2464 struct intel_vgpu_gtt *gtt = &vgpu->gtt; local in function:intel_vgpu_init_gtt 2538 struct intel_gvt_gtt *gtt = &gvt->gtt; local in function:clean_spt_oos 2555 struct intel_gvt_gtt *gtt = &gvt->gtt; local in function:setup_spt_oos [all...] |
gvt.h | 43 #include "gtt.h" 192 struct intel_vgpu_gtt gtt; member in struct:intel_vgpu 320 struct intel_gvt_gtt gtt; member in struct:intel_gvt
|
/src/sys/external/bsd/drm2/dist/include/uapi/drm/ |
amdgpu_drm.h | 117 /* Flag that USWC attributes should be used for GTT */ 121 /* Flag that create shadow bo(GTT) while allocating vram bo */ 715 /* the used GTT size */ 719 /* Query information about VRAM and GTT domains */ 729 /* Query memory about VRAM and GTT domains */ 914 struct drm_amdgpu_heap_info gtt; member in struct:drm_amdgpu_memory_info
|