/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_vma.h | 54 static inline bool i915_vma_is_active(struct i915_vma *vma) 56 return !i915_active_is_idle(&vma->active); 59 int __must_check __i915_vma_move_to_active(struct i915_vma *vma, 61 int __must_check i915_vma_move_to_active(struct i915_vma *vma, 74 static inline bool i915_vma_is_ggtt(const struct i915_vma *vma) 76 return test_bit(I915_VMA_GGTT_BIT, __i915_vma_flags_const(vma)); 79 static inline bool i915_vma_has_ggtt_write(const struct i915_vma *vma) 81 return test_bit(I915_VMA_GGTT_WRITE_BIT, __i915_vma_flags_const(vma)); 84 static inline void i915_vma_set_ggtt_write(struct i915_vma *vma) 86 GEM_BUG_ON(!i915_vma_is_ggtt(vma)); [all...] |
i915_vma.c | 58 void i915_vma_free(struct i915_vma *vma) 60 mutex_destroy(&vma->pages_mutex); 61 return kmem_cache_free(global.slab_vmas, vma); 68 static void vma_print_allocator(struct i915_vma *vma, const char *reason) 74 if (!vma->node.stack) { 75 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: unknown owner\n", 76 vma->node.start, vma->node.size, reason); 80 nr_entries = stack_depot_fetch(vma->node.stack, &entries); 82 DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n" 130 const struct i915_vma *vma = vn; local in function:compare_vma_key 159 struct i915_vma *vma; local in function:vma_create 342 struct i915_vma *vma; local in function:i915_vma_instance 361 struct i915_vma *vma; member in struct:i915_vma_work 370 struct i915_vma *vma = vw->vma; local in function:__vma_bind 564 struct i915_vma *vma; local in function:i915_vma_unpin_and_release 1112 struct i915_vma *vma = container_of(ref, typeof(*vma), ref); local in function:i915_vma_release 1141 struct i915_vma *vma, *next; local in function:i915_vma_parked [all...] |
i915_gem_evict.c | 51 * Not everything in the GGTT is tracked via vma (otherwise we 62 struct i915_vma *vma, 66 if (i915_vma_is_pinned(vma)) 69 list_add(&vma->evict_link, unwind); 70 return drm_mm_scan_add_block(scan, &vma->node); 87 * This function is used by the object/vma binding code. 105 struct i915_vma *vma, *next; local in function:i915_gem_evict_something 139 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { 155 if (i915_vma_is_active(vma)) { 156 if (vma == active) 265 struct i915_vma *vma, *next; local in function:i915_gem_evict_for_node 386 struct i915_vma *vma, *vn; local in function:i915_gem_evict_vm [all...] |
i915_gem_fence_reg.c | 79 struct i915_vma *vma) 97 if (vma) { 98 unsigned int stride = i915_gem_object_get_stride(vma->obj); 100 GEM_BUG_ON(!i915_vma_is_map_and_fenceable(vma)); 101 GEM_BUG_ON(!IS_ALIGNED(vma->node.start, I965_FENCE_PAGE)); 102 GEM_BUG_ON(!IS_ALIGNED(vma->fence_size, I965_FENCE_PAGE)); 105 val = (vma->node.start + vma->fence_size - I965_FENCE_PAGE) << 32; 106 val |= vma->node.start; 108 if (i915_gem_object_get_tiling(vma->obj) == I915_TILING_Y 510 struct i915_vma *vma = READ_ONCE(reg->vma); local in function:i915_gem_restore_fences [all...] |
i915_mm.c | 88 * @vma: user vma to map to 96 int remap_io_mapping(struct vm_area_struct *vma, 104 GEM_BUG_ON((vma->vm_flags & EXPECTED_FLAGS) != EXPECTED_FLAGS); 107 r.mm = vma->vm_mm; 110 (pgprot_val(vma->vm_page_prot) & ~_PAGE_CACHE_MASK)); 114 zap_vma_ptes(vma, addr, (r.pfn - pfn) << PAGE_SHIFT); 123 * @vma: user vma to map to 131 int remap_io_sg(struct vm_area_struct *vma, [all...] |
/src/sys/external/bsd/drm2/dist/drm/nouveau/ |
nouveau_vmm.c | 34 nouveau_vma_unmap(struct nouveau_vma *vma) 36 if (vma->mem) { 37 nvif_vmm_unmap(&vma->vmm->vmm, vma->addr); 38 vma->mem = NULL; 43 nouveau_vma_map(struct nouveau_vma *vma, struct nouveau_mem *mem) 45 struct nvif_vma tmp = { .addr = vma->addr }; 46 int ret = nouveau_mem_map(mem, &vma->vmm->vmm, &tmp); 49 vma->mem = mem; 56 struct nouveau_vma *vma; local in function:nouveau_vma_find 69 struct nouveau_vma *vma = *pvma; local in function:nouveau_vma_del 86 struct nouveau_vma *vma; local in function:nouveau_vma_new [all...] |
/src/sys/arch/evbppc/conf/ |
Makefile.explora.inc | 10 echo ${OBJCOPY} --adjust-vma 0x81000000 $@ $@.img ; \ 11 ${OBJCOPY} --adjust-vma 0x81000000 $@ $@.img
|
/src/sys/external/bsd/drm2/dist/drm/ |
drm_vm.c | 67 struct vm_area_struct *vma; member in struct:drm_vma_entry 71 static void drm_vm_open(struct vm_area_struct *vma); 72 static void drm_vm_close(struct vm_area_struct *vma); 75 struct vm_area_struct *vma) 77 pgprot_t tmp = vm_get_page_prot(vma->vm_flags); 89 if (efi_range_is_wc(vma->vm_start, vma->vm_end - 90 vma->vm_start)) 100 static pgprot_t drm_dma_prot(uint32_t map_type, struct vm_area_struct *vma) 102 pgprot_t tmp = vm_get_page_prot(vma->vm_flags) 123 struct vm_area_struct *vma = vmf->vma; local in function:drm_vm_fault 212 struct vm_area_struct *vma = vmf->vma; local in function:drm_vm_shm_fault 314 struct vm_area_struct *vma = vmf->vma; local in function:drm_vm_dma_fault 349 struct vm_area_struct *vma = vmf->vma; local in function:drm_vm_sg_fault 670 struct drm_vma_entry *vma, *vma_temp; local in function:drm_legacy_vma_flush [all...] |
/src/sys/dev/mca/ |
mcabusprint.c | 11 mcabusprint(void *vma, const char *pnp) 14 struct mcabus_attach_args *ma = vma;
|
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_ring.c | 31 struct i915_vma *vma = ring->vma; local in function:intel_ring_pin 42 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma); 44 if (vma->obj->stolen) 49 ret = i915_vma_pin(vma, 0, 0, flags); 53 if (i915_vma_is_map_and_fenceable(vma)) 54 addr = (void __force *)i915_vma_pin_iomap(vma); 56 addr = i915_gem_object_pin_map(vma->obj, 57 i915_coherent_map_type(vma->vm->i915)); 63 i915_vma_make_unshrinkable(vma); 89 struct i915_vma *vma = ring->vma; local in function:intel_ring_unpin 109 struct i915_vma *vma; local in function:create_ring_vma 142 struct i915_vma *vma; local in function:intel_engine_create_ring [all...] |
intel_ppgtt.c | 168 static int ppgtt_bind_vma(struct i915_vma *vma, 176 err = vma->vm->allocate_va_range(vma->vm, 177 vma->node.start, vma->size); 181 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)); 186 if (i915_gem_object_is_readonly(vma->obj)) 189 GEM_BUG_ON(!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))); 190 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags) [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
i915_gem_gtt.c | 334 struct i915_vma *vma; local in function:close_object_list 336 vma = i915_vma_instance(obj, vm, NULL); 337 if (!IS_ERR(vma)) 338 ignored = i915_vma_unbind(vma); 339 /* Only ppgtt vma may be closed before the object is freed */ 340 if (!IS_ERR(vma) && !i915_vma_is_ggtt(vma)) 341 i915_vma_close(vma); 358 struct i915_vma *vma; local in function:fill_hole 362 /* Try binding many VMA working inwards from either edge * 577 struct i915_vma *vma; local in function:walk_hole 647 struct i915_vma *vma; local in function:pot_hole 733 struct i915_vma *vma; local in function:drunk_hole 839 struct i915_vma *vma; local in function:__shrink_hole 943 struct i915_vma *vma; local in function:shrink_boom 1319 struct i915_vma *vma; local in function:igt_gtt_reserve 1371 struct i915_vma *vma; local in function:igt_gtt_reserve 1421 struct i915_vma *vma; local in function:igt_gtt_reserve 1535 struct i915_vma *vma; local in function:igt_gtt_insert 1581 struct i915_vma *vma; local in function:igt_gtt_insert 1600 struct i915_vma *vma; local in function:igt_gtt_insert 1644 struct i915_vma *vma; local in function:igt_gtt_insert 1789 struct i915_vma *vma; local in function:igt_cs_tlb [all...] |
i915_vma.c | 41 static bool assert_vma(struct i915_vma *vma, 47 if (vma->vm != rcu_access_pointer(ctx->vm)) { 48 pr_err("VMA created with wrong VM\n"); 52 if (vma->size != obj->base.size) { 53 pr_err("VMA created with wrong size, found %llu, expected %zu\n", 54 vma->size, obj->base.size); 58 if (vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL) { 59 pr_err("VMA created with wrong type [%d]\n", 60 vma->ggtt_view.type); 72 struct i915_vma *vma; local in function:checked_vma_instance 123 struct i915_vma *vma; local in function:create_vmas 316 struct i915_vma *vma; local in function:igt_vma_pin1 538 struct i915_vma *vma; local in function:igt_vma_rotate_remap 717 struct i915_vma *vma; local in function:igt_vma_partial 895 struct i915_vma *vma; local in function:igt_vma_remapped_gtt [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/selftests/ |
igt_gem_utils.c | 45 igt_emit_store_dw(struct i915_vma *vma, 51 const int gen = INTEL_GEN(vma->vm->i915); 58 obj = i915_gem_object_create_internal(vma->vm->i915, size); 68 GEM_BUG_ON(offset + (count - 1) * PAGE_SIZE > vma->node.size); 69 offset += vma->node.start; 93 intel_gt_chipset_flush(vma->vm->gt); 95 vma = i915_vma_instance(obj, vma->vm, NULL); 96 if (IS_ERR(vma)) { 97 err = PTR_ERR(vma); [all...] |
igt_gem_utils.h | 25 igt_emit_store_dw(struct i915_vma *vma, 31 struct i915_vma *vma, u64 offset,
|
huge_pages.c | 337 static int igt_check_page_sizes(struct i915_vma *vma) 339 struct drm_i915_private *i915 = vma->vm->i915; 341 struct drm_i915_gem_object *obj = vma->obj; 345 err = i915_vma_sync(vma); 349 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.sg)) { 351 vma->page_sizes.sg & ~supported, supported); 355 if (!HAS_PAGE_SIZES(i915, vma->page_sizes.gtt)) { 357 vma->page_sizes.gtt & ~supported, supported); 361 if (vma->page_sizes.phys != obj->mm.page_sizes.phys) { 362 pr_err("vma->page_sizes.phys(%u) != obj->mm.page_sizes.phys(%u)\n" 388 struct i915_vma *vma; local in function:igt_mock_exhaust_device_supported_pages 469 struct i915_vma *vma; local in function:igt_mock_memory_region_huge_pages 568 struct i915_vma *vma; local in function:igt_mock_ppgtt_misaligned_dma 678 struct i915_vma *vma; local in function:close_object_list 705 struct i915_vma *vma; local in function:igt_mock_ppgtt_huge_fill 875 struct i915_vma *vma; local in function:igt_mock_ppgtt_64K 1069 struct i915_vma *vma; local in function:__igt_write_huge 1559 struct i915_vma *vma; local in function:igt_ppgtt_pin_update 1698 struct i915_vma *vma; local in function:igt_tmpfs_fallback 1758 struct i915_vma *vma; local in function:igt_shrink_thp [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_client_blt.c | 19 struct i915_vma *vma; member in struct:i915_sleeve 25 static int vma_set_pages(struct i915_vma *vma) 27 struct i915_sleeve *sleeve = vma->private; 29 vma->pages = sleeve->pages; 30 vma->page_sizes = sleeve->page_sizes; 35 static void vma_clear_pages(struct i915_vma *vma) 37 GEM_BUG_ON(!vma->pages); 38 vma->pages = NULL; 41 static int vma_bind(struct i915_vma *vma, 45 return vma->vm->vma_ops.bind_vma(vma, cache_level, flags) 66 struct i915_vma *vma; local in function:create_sleeve 164 struct i915_vma *vma = w->sleeve->vma; local in function:clear_pages_worker [all...] |
i915_gem_object_blt.h | 21 struct i915_vma *vma, 28 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq); 29 void intel_emit_vma_release(struct intel_context *ce, struct i915_vma *vma);
|
i915_gem_object_blt.c | 21 struct i915_vma *vma, 38 count = div_u64(round_up(vma->size, block_size), block_size); 53 rem = vma->size; 54 offset = vma->node.start; 110 int intel_emit_vma_mark_active(struct i915_vma *vma, struct i915_request *rq) 114 i915_vma_lock(vma); 115 err = i915_request_await_object(rq, vma->obj, false); 117 err = i915_vma_move_to_active(vma, rq, 0); 118 i915_vma_unlock(vma); 122 return intel_engine_pool_mark_active(vma->private, rq) 138 struct i915_vma *vma; local in function:i915_gem_object_fill_blt 325 struct i915_vma *vma[2], *batch; local in function:i915_gem_object_copy_blt [all...] |
i915_gem_object.c | 66 spin_lock_init(&obj->vma.lock); 67 INIT_LIST_HEAD(&obj->vma.list); 150 struct i915_vma *vma; local in function:i915_gem_close_object 154 * vma, in the same fd namespace, by virtue of flink/open. 158 vma = radix_tree_delete(&ctx->handles_vma, lut->handle); 159 if (vma) { 160 GEM_BUG_ON(vma->obj != obj); 161 GEM_BUG_ON(!atomic_read(&vma->open_count)); 162 if (atomic_dec_and_test(&vma->open_count) && 163 !i915_vma_is_ggtt(vma)) 206 struct i915_vma *vma; local in function:__i915_gem_free_objects 339 struct i915_vma *vma; local in function:i915_gem_object_flush_write_domain [all...] |
i915_gem_tiling.c | 166 static bool i915_vma_fence_prepare(struct i915_vma *vma, 169 struct drm_i915_private *i915 = vma->vm->i915; 172 if (!i915_vma_is_map_and_fenceable(vma)) 175 size = i915_gem_fence_size(i915, vma->size, tiling_mode, stride); 176 if (vma->node.size < size) 179 alignment = i915_gem_fence_alignment(i915, vma->size, tiling_mode, stride); 180 if (!IS_ALIGNED(vma->node.start, alignment)) 192 struct i915_vma *vma; local in function:i915_gem_object_fence_prepare 199 for_each_ggtt_vma(vma, obj) { 200 if (i915_vma_fence_prepare(vma, tiling_mode, stride) 217 struct i915_vma *vma; local in function:i915_gem_object_set_tiling [all...] |
/src/sys/external/bsd/drm2/dist/drm/nouveau/nvkm/subdev/mmu/ |
nouveau_nvkm_subdev_mmu_vmm.c | 757 struct nvkm_vma *vma = kzalloc(sizeof(*vma), GFP_KERNEL); local in function:nvkm_vma_new 758 if (vma) { 759 vma->addr = addr; 760 vma->size = size; 761 vma->page = NVKM_VMA_PAGE_NONE; 762 vma->refd = NVKM_VMA_PAGE_NONE; 764 return vma; 768 nvkm_vma_tail(struct nvkm_vma *vma, u64 tail) 772 BUG_ON(vma->size == tail) 1081 struct nvkm_vma *vma; local in function:nvkm_vmm_dump 1090 struct nvkm_vma *vma; local in function:nvkm_vmm_dtor 1102 struct nvkm_vma *vma = rb_entry(node, typeof(*vma), tree); local in function:nvkm_vmm_dtor 1148 struct nvkm_vma *vma; local in function:nvkm_vmm_ctor_managed 1169 struct nvkm_vma *vma; local in function:nvkm_vmm_ctor 1308 struct nvkm_vma *vma = nvkm_vmm_node_search(vmm, addr); local in function:nvkm_vmm_pfn_unmap 1346 struct nvkm_vma *vma, *tmp; local in function:nvkm_vmm_pfn_map 1760 struct nvkm_vma *vma = *pvma; local in function:nvkm_vmm_put 1777 struct nvkm_vma *vma = NULL, *tmp; local in function:nvkm_vmm_get_locked [all...] |
nouveau_nvkm_subdev_mmu_uvmm.c | 121 struct nvkm_vma *vma; local in function:nvkm_uvmm_mthd_unmap 131 vma = nvkm_vmm_node_search(vmm, addr); 132 if (ret = -ENOENT, !vma || vma->addr != addr) { 134 addr, vma ? vma->addr : ~(u64)0); 138 if (ret = -ENOENT, (!vma->user && !client->super) || vma->busy) { 140 vma->user, !client->super, vma->busy) 165 struct nvkm_vma *vma; local in function:nvkm_uvmm_mthd_map 243 struct nvkm_vma *vma; local in function:nvkm_uvmm_mthd_put 281 struct nvkm_vma *vma; local in function:nvkm_uvmm_mthd_get [all...] |
/src/sys/external/bsd/drm2/dist/drm/vmwgfx/ |
vmwgfx_ttm_glue.c | 35 int vmw_mmap(struct file *filp, struct vm_area_struct *vma) 46 int ret = ttm_bo_mmap(filp, vma, &dev_priv->bdev); 51 vma->vm_ops = &vmw_vm_ops; 54 if ((vma->vm_flags & (VM_SHARED | VM_MAYWRITE)) != VM_MAYWRITE) 55 vma->vm_flags = (vma->vm_flags & ~VM_MIXEDMAP) | VM_PFNMAP;
|
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_bo_vm.c | 75 up_read(&vmf->vma->vm_mm->mmap_sem); 146 up_read(&vmf->vma->vm_mm->mmap_sem); 186 struct vm_area_struct *vma = vmf->vma; local in function:ttm_bo_vm_fault_reserved 187 struct ttm_buffer_object *bo = vma->vm_private_data; 247 page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + 248 vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); 249 page_last = vma_pages(vma) + vma->vm_pgoff - 298 * the value of @vma->vm_page_prot in the caching- an 332 struct vm_area_struct *vma = vmf->vma; local in function:ttm_bo_vm_fault [all...] |