/src/sys/uvm/ |
uvm_object.h | 87 #define UVM_OBJ_IS_KERN_OBJECT(uobj) \ 88 ((uobj)->uo_refs == UVM_OBJ_KERN) 97 #define UVM_OBJ_IS_VNODE(uobj) \ 98 ((uobj)->pgops == &uvm_vnodeops) 100 #define UVM_OBJ_IS_DEVICE(uobj) \ 101 ((uobj)->pgops == &uvm_deviceops) 103 #define UVM_OBJ_IS_VTEXT(uobj) \ 104 (UVM_OBJ_IS_VNODE(uobj) && uvn_text_p(uobj)) 106 #define UVM_OBJ_IS_CLEAN(uobj) \ [all...] |
uvm_object.c | 131 uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end, 140 rw_enter(uobj->vmobjlock, RW_WRITER); 147 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0, 154 rw_enter(uobj->vmobjlock, RW_WRITER); 167 rw_exit(uobj->vmobjlock); 169 rw_enter(uobj->vmobjlock, RW_WRITER); 179 uao_dropswap(uobj, i); 198 rw_exit(uobj->vmobjlock); 204 uvm_obj_unwirepages(uobj, start, offset) 261 struct uvm_object *uobj = pg->uobject; local in function:uvm_obj_page_tag_p 272 struct uvm_object *uobj = pg->uobject; local in function:uvm_obj_page_set_tag 283 struct uvm_object *uobj = pg->uobject; local in function:uvm_obj_page_clear_tag [all...] |
uvm_device.c | 294 udv_reference(struct uvm_object *uobj) 298 rw_enter(uobj->vmobjlock, RW_WRITER); 299 uobj->uo_refs++; 300 UVMHIST_LOG(maphist, "<- done (uobj=%#jx, ref = %jd)", 301 (uintptr_t)uobj, uobj->uo_refs,0,0); 302 rw_exit(uobj->vmobjlock); 314 udv_detach(struct uvm_object *uobj) 316 struct uvm_device *udv = (struct uvm_device *)uobj; 323 rw_enter(uobj->vmobjlock, RW_WRITER) 382 struct uvm_object *uobj = entry->object.uvm_obj; local in function:udv_fault [all...] |
uvm_vnode.c | 120 uvn_reference(struct uvm_object *uobj) 122 vref((struct vnode *)uobj); 135 uvn_detach(struct uvm_object *uobj) 137 vrele((struct vnode *)uobj); 148 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) 150 struct vnode *vp = (struct vnode *)uobj; 153 KASSERT(rw_write_held(uobj->vmobjlock)); 170 uvn_get(struct uvm_object *uobj, voff_t offset, 175 struct vnode *vp = (struct vnode *)uobj; 184 uvn_alloc_ractx(uobj); 438 struct uvm_object *uobj = &vp->v_uobj; local in function:uvm_vnp_setsize [all...] |
uvm_page_status.c | 62 struct uvm_object * const uobj __diagused = pg->uobject; 66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 86 struct uvm_object * const uobj = pg->uobject; local in function:uvm_pagemarkdirty 93 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 105 if (uobj != NULL) { 113 if (uvm_obj_clean_p(uobj) && 114 uobj->pgops->pgo_markdirty != NULL) { 115 (*uobj->pgops->pgo_markdirty)(uobj); 128 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) = [all...] |
uvm_bio.c | 79 #define UBC_HASH(uobj, offset) \ 80 (((((u_long)(uobj)) >> 8) + (((u_long)(offset)) >> PAGE_SHIFT)) & \ 95 struct uvm_object * uobj; /* mapped object */ member in struct:ubc_map 96 voff_t offset; /* offset into uobj */ 110 struct uvm_object uobj; /* glue for uvm_map() */ member in struct:ubc_object 181 uvm_obj_init(&ubc_object.uobj, &ubc_pager, true, UVM_OBJ_KERN); 215 ubc_nwins << ubc_winshift, &ubc_object.uobj, 0, (vsize_t)va, 311 struct uvm_object *uobj; local in function:ubc_fault 327 uvmfault_unlockall(ufi, NULL, &ubc_object.uobj); 366 uobj = umap->uobj 646 struct uvm_object *uobj; local in function:ubc_release [all...] |
uvm_aobj.c | 248 uao_find_swslot(struct uvm_object *uobj, int pageidx) 250 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 253 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 288 uao_set_swslot(struct uvm_object *uobj, int pageidx, int slot) 290 struct uvm_aobj *aobj = (struct uvm_aobj *)uobj; 297 KASSERT(rw_write_held(uobj->vmobjlock) || uobj->uo_refs == 0); 298 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 369 struct uvm_object *uobj = &aobj->u_obj; local in function:uao_free 371 KASSERT(UVM_OBJ_IS_AOBJ(uobj)); 1244 struct uvm_object *uobj = &aobj->u_obj; local in function:uao_pagein_page [all...] |
uvm_readahead.c | 118 ra_startio(struct uvm_object *uobj, off_t off, size_t sz) 122 DPRINTF(("%s: uobj=%p, off=%" PRIu64 ", endoff=%" PRIu64 "\n", 123 __func__, uobj, off, endoff)); 125 KASSERT(rw_write_held(uobj->vmobjlock)); 134 struct vm_page *pg = uvm_pagelookup(uobj, trunc_page(endoff - 1)); 161 error = (*uobj->pgops->pgo_get)(uobj, off, NULL, 163 rw_enter(uobj->vmobjlock, RW_WRITER); 219 uvm_ra_request(struct uvm_ractx *ra, int advice, struct uvm_object *uobj, 223 KASSERT(rw_write_held(uobj->vmobjlock)) [all...] |
uvm_fault.c | 65 * uobj | d/c | | d/c | | V | +----+ | 71 * no amap or uobj is present. this is an error. 75 * I/O takes place in upper level anon and uobj is not touched. 79 * case [2]: lower layer fault [uobj] 80 * 2A: [read on non-NULL uobj] or [write to non-copy_on_write area] 82 * 2B: [write to copy_on_write] or [read on NULL uobj] 83 * data is "promoted" from uobj to a new anon. 84 * if uobj is null, then we zero fill. 88 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ) 111 * - ensure source page is resident (if uobj) 553 struct uvm_object *uobj; local in function:uvmfault_promote 912 struct uvm_object * const uobj = local in function:uvm_fault_internal 982 struct uvm_object *uobj; local in function:uvm_fault_check 1427 struct uvm_object *uobj; local in function:uvm_fault_upper 1861 struct uvm_object *uobj = ufi->entry->object.uvm_obj; local in function:uvm_fault_lower 1982 struct uvm_object *uobj = ufi->entry->object.uvm_obj; local in function:uvm_fault_lower_lookup 2121 struct uvm_object *uobj = *ruobj; local in function:uvm_fault_lower_io [all...] |
uvm_fault_i.h | 73 struct uvm_object *uobj) 76 if (uobj) 77 rw_exit(uobj->vmobjlock);
|
uvm_loan.c | 142 struct uvm_object *uobj = ufi->entry->object.uvm_obj; local in function:uvm_loanentry 170 /* locked: map, amap, uobj */ 173 } else if (uobj) { 178 uvmfault_unlockall(ufi, aref->ar_amap, uobj); 181 /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */ 184 KASSERT(rv > 0 || uobj == NULL || 185 !rw_write_held(uobj->vmobjlock)); 334 * => called with map, amap, uobj locked 511 uvm_loanuobjchunk(struct uvm_object *uobj, voff_t pgoff, int orignpages, 516 rw_enter(uobj->vmobjlock, RW_WRITER) 601 struct uvm_object *uobj = ufi->entry->object.uvm_obj; local in function:uvm_loanuobj [all...] |
uvm_page_array.c | 45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj, 51 ar->ar_uobj = uobj; 139 struct uvm_object *uobj = ar->ar_uobj; local in function:uvm_page_array_fill 149 KASSERT(rw_lock_held(uobj->vmobjlock)); 161 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, 167 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, 200 KDASSERT(pg->uobject == uobj);
|
uvm_mmap.c | 129 struct uvm_object *uobj; local in function:sys_mincore 201 uobj = entry->object.uvm_obj; /* lower layer */ 205 if (uobj != NULL) 206 rw_enter(uobj->vmobjlock, RW_READER); 225 if (uobj != NULL && pgi == 0) { 227 pg = uvm_pagelookup(uobj, 241 if (uobj != NULL) 242 rw_exit(uobj->vmobjlock); 283 struct uvm_object *uobj; local in function:sys_mmap 377 &advice, &uobj, &maxprot) 1007 struct uvm_object *uobj; local in function:uvm_mmap_dev [all...] |
uvm_mremap.c | 46 struct uvm_object *uobj; local in function:uvm_mapent_extend 73 uobj = entry->object.uvm_obj; 74 if (uobj) { 83 if (uobj->pgops->pgo_reference) 84 uobj->pgops->pgo_reference(uobj); 85 reserved_entry->object.uvm_obj = uobj;
|
/src/sys/external/bsd/drm2/include/linux/ |
shmem_fs.h | 50 shmem_read_mapping_page_gfp(struct uvm_object *uobj, voff_t i, gfp_t gfp) 55 error = uvm_obj_wirepages(uobj, i*PAGE_SIZE, (i + 1)*PAGE_SIZE, NULL); 59 rw_enter(uobj->vmobjlock, RW_READER); 60 vm_page = uvm_pagelookup(uobj, i*PAGE_SIZE); 61 rw_exit(uobj->vmobjlock); 68 shmem_read_mapping_page(struct uvm_object *uobj, voff_t i) 70 return shmem_read_mapping_page_gfp(uobj, i, GFP_KERNEL); 74 shmem_truncate_range(struct uvm_object *uobj, voff_t start, voff_t end) 85 rw_enter(uobj->vmobjlock, RW_WRITER); 86 (*uobj->pgops->pgo_put)(uobj, start, end, flags) [all...] |
fs.h | 43 file_inode(struct uvm_object *uobj) 45 return uobj;
|
pagemap.h | 56 mapping_gfp_mask(struct uvm_object *uobj __unused)
|
/src/tests/rump/kernspace/ |
busypage.c | 45 static struct uvm_object *uobj; variable in typeref:struct:uvm_object * 60 rw_enter(uobj->vmobjlock, RW_READER); 61 uvm_pagewait(testpg, uobj->vmobjlock, "tw"); 74 uobj = uao_create(1, 0); 75 rw_enter(uobj->vmobjlock, RW_WRITER); 76 testpg = uvm_pagealloc(uobj, 0, NULL, 0); 77 rw_exit(uobj->vmobjlock); 93 rw_enter(uobj->vmobjlock, RW_WRITER); 95 rw_exit(uobj->vmobjlock);
|
/src/sys/rump/librump/rumpvfs/ |
vm_vfs.c | 42 struct uvm_object *uobj = pgs[0]->uobject; local in function:uvm_aio_aiodone_pages 46 rw_enter(uobj->vmobjlock, RW_WRITER); 63 rw_exit(uobj->vmobjlock); 72 struct uvm_object *uobj = NULL; local in function:uvm_aio_aiodone 88 if (uobj == NULL) { 89 uobj = pgs[i]->uobject; 90 KASSERT(uobj != NULL); 92 KASSERT(uobj == pgs[i]->uobject);
|
/src/usr.bin/pmap/ |
pmap.h | 78 #define UVM_OBJ_IS_VNODE(uobj) ((uobj)->pgops == uvm_vnodeops) 79 #define UVM_OBJ_IS_AOBJ(uobj) ((uobj)->pgops == aobj_pager) 80 #define UVM_OBJ_IS_DEVICE(uobj) ((uobj)->pgops == uvm_deviceops) 81 #define UVM_OBJ_IS_UBCPAGER(uobj) ((uobj)->pgops == ubc_pager)
|
/src/sys/compat/linux32/arch/aarch64/ |
linux32_exec_machdep.c | 56 struct uvm_object *uobj; local in function:vmcmd_linux32_kuser_helper_map 67 uobj = *e->e_sigobject; 68 if (uobj == NULL) 74 (*uobj->pgops->pgo_reference)(uobj); 75 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz), uobj, 0, 0, 79 (*uobj->pgops->pgo_detach)(uobj);
|
/src/sys/external/bsd/drm2/ttm/ |
ttm_bus_dma.c | 99 struct uvm_object *const uobj = ttm_dma->ttm.swap_storage; local in function:ttm_bus_dma_put 120 KASSERT(uobj->pgops->pgo_put); 123 rw_enter(uobj->vmobjlock, RW_WRITER); 124 (void)(*uobj->pgops->pgo_put)(uobj, 0, size, flags); 125 /* pgo_put unlocks uobj->vmobjlock. */
|
/src/sys/rump/librump/rumpkern/ |
vm.c | 174 uvm_pagealloc_strat(struct uvm_object *uobj, voff_t off, struct vm_anon *anon, 179 KASSERT(uobj && rw_write_held(uobj->vmobjlock)); 189 pg->uobject = uobj; 191 if (radix_tree_insert_node(&uobj->uo_pages, off >> PAGE_SHIFT, 197 if (UVM_OBJ_IS_VNODE(uobj)) { 198 if (uobj->uo_npages == 0) { 199 struct vnode *vp = (struct vnode *)uobj; 206 uobj->uo_npages++; 218 if (!UVM_OBJ_IS_AOBJ(uobj)) { 238 struct uvm_object *uobj = pg->uobject; local in function:uvm_pagefree 1148 struct uvm_object *uobj; local in function:processpage [all...] |
/src/sys/modules/examples/fopsmapper/ |
fopsmapper.c | 89 struct uvm_object *uobj; member in struct:fopsmapper_softc 126 fo->uobj = uao_create(size, 0); 132 * uobj on success. 134 error = uvm_map(kernel_map, &va, fo->bufsize, fo->uobj, 0, 0, 138 uao_detach(fo->uobj); 144 /* Get the reference of uobj */ 145 uao_reference(fo->uobj); 146 *uobjp = fo->uobj;
|
/src/sys/external/bsd/drm2/drm/ |
drm_gem_vm.c | 50 drm_gem_pager_reference(struct uvm_object *uobj) 52 struct drm_gem_object *const obj = container_of(uobj, 59 drm_gem_pager_detach(struct uvm_object *uobj) 61 struct drm_gem_object *const obj = container_of(uobj,
|