/src/sys/external/bsd/drm2/dist/drm/i915/selftests/ |
mock_gtt.c | 32 static void mock_insert_page(struct i915_address_space *vm, 40 static void mock_insert_entries(struct i915_address_space *vm, 59 static void mock_cleanup(struct i915_address_space *vm) 63 static void mock_clear_range(struct i915_address_space *vm, 76 ppgtt->vm.gt = &i915->gt; 77 ppgtt->vm.i915 = i915; 78 ppgtt->vm.total = round_down(U64_MAX, PAGE_SIZE); 79 ppgtt->vm.file = ERR_PTR(-ENODEV); 81 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT); 83 ppgtt->vm.clear_range = mock_clear_range [all...] |
i915_gem_gtt.c | 164 if (!ppgtt->vm.allocate_va_range) 175 limit = min(ppgtt->vm.total, limit); 179 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 0, size); 191 ppgtt->vm.clear_range(&ppgtt->vm, 0, size); 196 err = ppgtt->vm.allocate_va_range(&ppgtt->vm, 211 i915_vm_put(&ppgtt->vm); 215 static int lowlevel_hole(struct i915_address_space *vm, 1256 struct i915_address_space *vm; local in function:exercise_mock 1786 struct i915_address_space *vm; local in function:igt_cs_tlb [all...] |
i915_gem_evict.c | 60 obj = i915_gem_object_create_internal(ggtt->vm.i915, 78 count, ggtt->vm.total / PAGE_SIZE); 104 if (list_empty(&ggtt->vm.bound_list)) { 116 list_for_each_entry(vma, &ggtt->vm.bound_list, vm_link) 131 i915_gem_drain_freed_objects(ggtt->vm.i915); 148 mutex_lock(&ggtt->vm.mutex); 149 err = i915_gem_evict_something(&ggtt->vm, 153 mutex_unlock(&ggtt->vm.mutex); 163 mutex_lock(&ggtt->vm.mutex); 164 err = i915_gem_evict_something(&ggtt->vm, [all...] |
/src/sys/kern/ |
subr_vmem.c | 110 #define VMEM_CONDVAR_INIT(vm, wchan) cv_init(&vm->vm_cv, wchan) 111 #define VMEM_CONDVAR_DESTROY(vm) cv_destroy(&vm->vm_cv) 112 #define VMEM_CONDVAR_WAIT(vm) cv_wait(&vm->vm_cv, &vm->vm_lock) 113 #define VMEM_CONDVAR_BROADCAST(vm) cv_broadcast(&vm->vm_cv) 120 #define VMEM_CONDVAR_INIT(vm, wchan) __nothin 616 vmem_t *vm = qc->qc_vmem; local in function:qc_poolpage_alloc 629 vmem_t *vm = qc->qc_vmem; local in function:qc_poolpage_free 1562 vmem_t *vm; local in function:vmem_rehash_all 1692 vmem_t *vm; local in function:vmem_whatis 1711 const vmem_t *vm; local in function:vmem_printall 1721 const vmem_t *vm = (const void *)addr; local in function:vmem_print 1796 vmem_t *vm; local in function:main [all...] |
sys_process.c | 154 struct vmspace *vm; local in function:process_domem 169 vm = p->p_vmspace; 171 if ((l->l_flag & LW_WEXIT) || vm->vm_refcnt < 1) 177 error = uvm_io(&vm->vm_map, uio, pax_mprotect_prot(l)); 183 uvmspace_free(vm);
|
/src/sys/external/bsd/drm2/dist/drm/i915/gt/ |
intel_gtt.c | 57 static struct page *vm_alloc_page(struct i915_address_space *vm, gfp_t gfp) 62 if (I915_SELFTEST_ONLY(should_fail(&vm->fault_attr, 1))) 63 i915_gem_shrink_all(vm->i915); 65 page = stash_pop_page(&vm->free_pages); 69 if (!vm->pt_kmap_wc) 73 page = stash_pop_page(&vm->i915->mm.wc_stash); 101 stash_push_pagevec(&vm->i915->mm.wc_stash, &stack); 103 /* Push any surplus WC pages onto the local VM stash */ 105 stash_push_pagevec(&vm->free_pages, &stack); 117 static void vm_free_pages_release(struct i915_address_space *vm, 218 struct i915_address_space *vm = local in function:__i915_vm_release 229 struct i915_address_space *vm = local in function:i915_vm_release [all...] |
gen8_ppgtt.c | 35 struct drm_i915_private *i915 = ppgtt->vm.i915; 36 struct intel_uncore *uncore = ppgtt->vm.gt->uncore; 47 if (i915_vm_is_4lvl(&ppgtt->vm)) { 126 gen8_pd_top_count(const struct i915_address_space *vm) 128 unsigned int shift = __gen8_pte_shift(vm->top); 129 return (vm->total + (1ull << shift) - 1) >> shift; 133 gen8_pdp_for_page_index(struct i915_address_space * const vm, const u64 idx) 135 struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(vm); 137 if (vm->top == 2) 140 return i915_pd_entry(ppgtt->pd, gen8_pd_index(idx, vm->top)) 705 struct i915_address_space *vm = &ppgtt->vm; local in function:gen8_preallocate_top_level_pdp [all...] |
intel_ggtt.c | 53 struct drm_i915_private *i915 = ggtt->vm.i915; 55 i915_address_space_init(&ggtt->vm, VM_CLASS_GGTT); 57 ggtt->vm.is_ggtt = true; 60 ggtt->vm.has_read_only = IS_VALLEYVIEW(i915); 63 ggtt->vm.mm.color_adjust = i915_ggtt_color_adjust; 69 ggtt->vm.cleanup(&ggtt->vm); 81 ggtt->vm.cleanup(&ggtt->vm); 135 struct drm_i915_private *i915 = ggtt->vm.i915 416 struct i915_address_space *vm; member in struct:insert_page 444 struct i915_address_space *vm; member in struct:insert_entries 471 struct i915_address_space *vm; member in struct:clear_range 732 struct i915_address_space *vm = vma->vm; local in function:aliasing_gtt_unbind_vma 738 struct i915_address_space *vm = local in function:aliasing_gtt_unbind_vma [all...] |
intel_gtt.h | 77 #define ggtt_total_entries(ggtt) ((ggtt)->vm.total >> PAGE_SHIFT) 279 * Since the vm may be shared between multiple contexts, we count how 317 int (*allocate_va_range)(struct i915_address_space *vm, 319 void (*clear_range)(struct i915_address_space *vm, 321 void (*insert_page)(struct i915_address_space *vm, 326 void (*insert_entries)(struct i915_address_space *vm, 330 void (*cleanup)(struct i915_address_space *vm); 347 struct i915_address_space vm; member in struct:i915_ggtt 409 struct i915_address_space vm; member in struct:i915_ppgtt 414 #define i915_is_ggtt(vm) ((vm)->is_ggtt [all...] |
intel_ppgtt.c | 18 struct i915_page_table *alloc_pt(struct i915_address_space *vm) 26 if (unlikely(setup_page_dma(vm, &pt->base))) { 47 struct i915_page_directory *alloc_pd(struct i915_address_space *vm) 55 if (unlikely(setup_page_dma(vm, px_base(pd)))) { 64 void free_pd(struct i915_address_space *vm, struct i915_page_dma *pd) 66 cleanup_page_dma(vm, pd); 163 trace_i915_ppgtt_create(&ppgtt->vm); 176 err = vma->vm->allocate_va_range(vma->vm, 190 vma->vm->insert_entries(vma->vm, vma, cache_level, pte_flags) [all...] |
gen6_ppgtt.c | 89 static void gen6_ppgtt_clear_range(struct i915_address_space *vm, 92 struct gen6_ppgtt * const ppgtt = to_gen6_ppgtt(i915_vm_to_ppgtt(vm)); 94 const gen6_pte_t scratch_pte = vm->scratch[0].encode; 105 GEM_BUG_ON(px_base(pt) == px_base(&vm->scratch[1])); 128 static void gen6_ppgtt_insert_entries(struct i915_address_space *vm, 133 struct i915_ppgtt *ppgtt = i915_vm_to_ppgtt(vm); 138 const u32 pte_encode = vm->pte_encode(0, cache_level, flags); 142 GEM_BUG_ON(pd->entry[act_pt] == &vm->scratch[1]); 210 gen6_ggtt_invalidate(ppgtt->base.vm.gt->ggtt); 216 static int gen6_alloc_va_range(struct i915_address_space *vm, 275 struct i915_address_space * const vm = &ppgtt->base.vm; local in function:gen6_ppgtt_init_scratch [all...] |
/src/sys/external/bsd/drm2/include/linux/sched/ |
mm.h | 39 mmgrab(struct vmspace *vm) 41 uvmspace_addref(vm); 45 mmdrop(struct vmspace *vm) 47 uvmspace_free(vm);
|
/src/sys/external/bsd/drm2/dist/drm/radeon/ |
radeon_vm.c | 44 * for the entire GPU, there are multiple VM page tables active 45 * at any given time. The VM page tables can contain a mix 49 * Each VM has an ID associated with it and there is a page table 85 * radeon_vm_manager_init - init the vm manager 89 * Init the vm manager (cayman+). 107 * radeon_vm_manager_fini - tear down the vm manager 111 * Tear down the VM manager (cayman+). 127 * radeon_vm_get_bos - add the vm BOs to a validation list 129 * @vm: vm providing the BO 459 struct radeon_vm *vm = bo_va->vm; local in function:radeon_vm_bo_set_addr 923 struct radeon_vm *vm = bo_va->vm; local in function:radeon_vm_bo_update 1129 struct radeon_vm *vm = bo_va->vm; local in function:radeon_vm_bo_rmv [all...] |
/src/usr.sbin/videomode/ |
videomode.c | 114 struct grfvideo_mode vm; local in function:dump_mode 126 vm.mode_num = 0; 127 if (ioctl(grffd, GRFGETVMODE, &vm) == 0) 128 dump_vm(&vm); 136 vm.mode_num = m; 137 if (ioctl(grffd, GRFGETVMODE, &vm) == -1) 139 dump_vm(&vm); 155 dump_vm(struct grfvideo_mode *vm) 157 (void)printf("%d: %s\n", vm->mode_num, vm->mode_descr) [all...] |
/src/sys/external/bsd/drm2/dist/drm/amd/amdgpu/ |
amdgpu_vm.c | 50 * for the entire GPU, there are multiple VM page tables active 51 * at any given time. The VM page tables can contain a mix 55 * Each VM has an ID associated with it and there is a page table 92 * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS 96 static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) 98 mutex_lock(&vm->eviction_lock); 99 vm->saved_flags = memalloc_nofs_save(); 102 static inline int amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) 104 if (mutex_trylock(&vm->eviction_lock)) { 105 vm->saved_flags = memalloc_nofs_save() 235 struct amdgpu_vm *vm = vm_bo->vm; local in function:amdgpu_vm_bo_evicted 626 struct amdgpu_vm *vm = bo_base->vm; local in function:amdgpu_vm_del_from_lru_notify 1778 struct amdgpu_vm *vm = bo_va->base.vm; local in function:amdgpu_vm_bo_update 2176 struct amdgpu_vm *vm = bo_va->base.vm; local in function:amdgpu_vm_bo_insert_map 2217 struct amdgpu_vm *vm = bo_va->base.vm; local in function:amdgpu_vm_bo_map 2339 struct amdgpu_vm *vm = bo_va->base.vm; local in function:amdgpu_vm_bo_unmap 2542 struct amdgpu_vm *vm = bo_va->base.vm; local in function:amdgpu_vm_bo_rmv 2643 struct amdgpu_vm *vm = bo_base->vm; local in function:amdgpu_vm_bo_invalidate 3287 struct amdgpu_vm *vm; local in function:amdgpu_vm_get_task_info 3343 struct amdgpu_vm *vm; local in function:amdgpu_vm_handle_fault [all...] |
amdgpu_vm.h | 100 /* How to programm VM fault handling */ 137 /* base structure for tracking BO usage in a VM */ 140 struct amdgpu_vm *vm; member in struct:amdgpu_vm_bo_base 191 * Encapsulate some VM table update parameters to reduce 203 * @vm: optional amdgpu_vm we do this update for 205 struct amdgpu_vm *vm; member in struct:amdgpu_vm_update_params 250 * use vm_eviction_lock/unlock(vm) 262 /* per VM BOs moved, but not yet updated in the PT */ 265 /* All BOs of this VM not currently in the state machine */ 288 /* dedicated to vm */ [all...] |
/src/tests/bin/sh/ |
t_patterns.sh | 726 vm() function 736 vm abc \# a bc; vm aaab \# a aab; vm aaab \## 'a*a' b # 3 737 vm aaab % ab aa; vm xawab %% 'a*ab' x; vm abcd \# xyz abcd 738 vm file.c % .c 'f le' IFS=i ; vm file.c % .c file IFS=i Q 739 vm file.c % ?c file ; vm file.c % '"?c"' file.c # 9 1 [all...] |
/src/lib/libkvm/ |
kvm_m68k.c | 88 struct vmstate *vm; local in function:_kvm_initvtop 90 vm = (struct vmstate *)_kvm_malloc(kd, sizeof (*vm)); 91 if (vm == 0) 94 kd->vmst = vm; 107 vm->ops = nop->ops; 112 for (vm->pgshift = 0; (1 << vm->pgshift) < h->page_size; vm->pgshift++) 114 if ((1 << vm->pgshift) != h->page_size [all...] |
kvm_vax.c | 38 * vm code will one day obsolete this module. Furthermore, I hope it 78 struct vmstate *vm; local in function:_kvm_initvtop 82 vm = (struct vmstate *)_kvm_malloc(kd, sizeof(*vm)); 83 if (vm == 0) 86 kd->vmst = vm; 98 vm->end = (u_long)nl[0].n_value; 107 * mapping information in kd->vm. Returns the result in pa, and returns
|
/src/sys/uvm/ |
uvm_unix.c | 44 * uvm_unix.c: traditional sbrk/grow interface to vm. 74 struct vmspace *vm = p->p_vmspace; local in function:sys_obreak 79 obreak = (vaddr_t)vm->vm_daddr; 88 obreak = round_page(obreak + ptoa(vm->vm_dsize)); 105 error = uvm_map(&vm->vm_map, &obreak, nbreak - obreak, NULL, 119 vm->vm_dsize += atop(nbreak - obreak); 121 uvm_deallocate(&vm->vm_map, nbreak, obreak - nbreak); 122 vm->vm_dsize -= atop(obreak - nbreak); 136 struct vmspace *vm = p->p_vmspace; local in function:uvm_grow 143 if (sp < (vaddr_t)vm->vm_minsaddr [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/ |
i915_gem_evict.c | 75 * @vm: address space to evict from 97 i915_gem_evict_something(struct i915_address_space *vm, 111 lockdep_assert_held(&vm->mutex); 112 trace_i915_gem_evict(vm, min_size, alignment, flags); 130 drm_mm_scan_init_with_range(&scan, &vm->mm, 134 intel_gt_retire_requests(vm->gt); 139 list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) { 167 list_move_tail(&vma->vm_link, &vm->bound_list); 188 if (!i915_is_ggtt(vm) || flags & PIN_NONBLOCK) 207 ret = ggtt_flush(vm->gt) [all...] |
i915_gem_gtt.c | 91 if (intel_gt_retire_requests_timeout(ggtt->vm.gt, 104 * @vm: the &struct i915_address_space 127 int i915_gem_gtt_reserve(struct i915_address_space *vm, 137 GEM_BUG_ON(range_overflows(offset, size, vm->total)); 138 GEM_BUG_ON(vm == &vm->i915->ggtt.alias->vm); 145 err = drm_mm_reserve_node(&vm->mm, node); 152 err = i915_gem_evict_for_node(vm, node, flags); 154 err = drm_mm_reserve_node(&vm->mm, node) [all...] |
i915_vma.c | 112 struct i915_address_space *vm; member in struct:i915_vma_key 121 long cmp = i915_vma_compare(__UNCONST(a), b->vm, 132 long cmp = i915_vma_compare(__UNCONST(vma), key->vm, key->view); 156 struct i915_address_space *vm, 163 GEM_BUG_ON(vm == &vm->gt->ggtt->alias->vm); 171 vma->vm = i915_vm_get(vm); 172 vma->ops = &vm->vma_ops 1052 struct i915_address_space *vm = vma->vm; local in function:i915_ggtt_pin 1146 struct i915_address_space *vm = vma->vm; local in function:i915_vma_parked 1358 struct i915_address_space *vm = vma->vm; local in function:i915_vma_unbind [all...] |
/src/sys/external/bsd/drm2/include/ |
i915_trace.h | 76 "struct i915_address_space *"/*vm*/, 81 trace_i915_gem_evict(struct i915_address_space *vm, 85 vm->i915->drm.primary->index, vm, size, align, flags); 90 "struct i915_address_space *"/*vm*/, 96 trace_i915_gem_evict_node(struct i915_address_space *vm, 100 vm->i915->drm.primary->index, vm, 107 "struct i915_address_space *"/*vm*/); 109 trace_i915_gem_evict_vm(struct i915_address_space *vm) [all...] |
/src/sys/external/bsd/drm2/dist/drm/i915/gem/ |
i915_gem_context.h | 154 return rcu_dereference_protected(ctx->vm, lockdep_is_held(&ctx->mutex)); 160 struct i915_address_space *vm; local in function:i915_gem_context_get_vm_rcu 163 vm = rcu_dereference(ctx->vm); 164 if (!vm) 165 vm = &ctx->i915->ggtt.vm; 166 vm = i915_vm_get(vm); 169 return vm; [all...] |