/src/sys/external/bsd/drm2/ttm/ |
ttm_bus_dma.c | 99 struct uvm_object *const uobj = ttm_dma->ttm.swap_storage; local in function:ttm_bus_dma_put 120 KASSERT(uobj->pgops->pgo_put); 123 rw_enter(uobj->vmobjlock, RW_WRITER); 124 (void)(*uobj->pgops->pgo_put)(uobj, 0, size, flags); 125 /* pgo_put unlocks uobj->vmobjlock. */
|
ttm_bo_vm.c | 81 ttm_bo_uvm_reference(struct uvm_object *uobj) 83 struct ttm_buffer_object *const bo = container_of(uobj, 90 ttm_bo_uvm_detach(struct uvm_object *uobj) 92 struct ttm_buffer_object *bo = container_of(uobj, 172 struct uvm_object *const uobj = vmf->entry->object.uvm_obj; local in function:ttm_bo_uvm_fault_reserved 173 struct ttm_buffer_object *const bo = container_of(uobj, 315 struct uvm_object *const uobj = vmf->entry->object.uvm_obj; local in function:ttm_bo_uvm_fault 316 struct ttm_buffer_object *const bo = container_of(uobj, 321 rw_exit(uobj->vmobjlock);
|
/src/sys/uvm/ |
uvm_page_status.c | 62 struct uvm_object * const uobj __diagused = pg->uobject; 66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 86 struct uvm_object * const uobj = pg->uobject; local in function:uvm_pagemarkdirty 93 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) == 105 if (uobj != NULL) { 113 if (uvm_obj_clean_p(uobj) && 114 uobj->pgops->pgo_markdirty != NULL) { 115 (*uobj->pgops->pgo_markdirty)(uobj); 128 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) = [all...] |
uvm_anon.c | 278 struct uvm_object *uobj; local in function:uvm_anon_pagein 310 uobj = pg->uobject; 325 if (uobj) { 326 rw_exit(uobj->vmobjlock);
|
uvm_page_array.c | 45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj, 51 ar->ar_uobj = uobj; 139 struct uvm_object *uobj = ar->ar_uobj; local in function:uvm_page_array_fill 149 KASSERT(rw_lock_held(uobj->vmobjlock)); 161 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, 167 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages, 200 KDASSERT(pg->uobject == uobj);
|
uvm_device.c | 294 udv_reference(struct uvm_object *uobj) 298 rw_enter(uobj->vmobjlock, RW_WRITER); 299 uobj->uo_refs++; 300 UVMHIST_LOG(maphist, "<- done (uobj=%#jx, ref = %jd)", 301 (uintptr_t)uobj, uobj->uo_refs,0,0); 302 rw_exit(uobj->vmobjlock); 314 udv_detach(struct uvm_object *uobj) 316 struct uvm_device *udv = (struct uvm_device *)uobj; 323 rw_enter(uobj->vmobjlock, RW_WRITER) 382 struct uvm_object *uobj = entry->object.uvm_obj; local in function:udv_fault [all...] |
uvm_mremap.c | 46 struct uvm_object *uobj; local in function:uvm_mapent_extend 73 uobj = entry->object.uvm_obj; 74 if (uobj) { 83 if (uobj->pgops->pgo_reference) 84 uobj->pgops->pgo_reference(uobj); 85 reserved_entry->object.uvm_obj = uobj;
|
uvm_object.c | 131 uvm_obj_wirepages(struct uvm_object *uobj, off_t start, off_t end, 140 rw_enter(uobj->vmobjlock, RW_WRITER); 147 error = (*uobj->pgops->pgo_get)(uobj, offset, pgs, &npages, 0, 154 rw_enter(uobj->vmobjlock, RW_WRITER); 167 rw_exit(uobj->vmobjlock); 169 rw_enter(uobj->vmobjlock, RW_WRITER); 179 uao_dropswap(uobj, i); 198 rw_exit(uobj->vmobjlock); 204 uvm_obj_unwirepages(uobj, start, offset) 261 struct uvm_object *uobj = pg->uobject; local in function:uvm_obj_page_tag_p 272 struct uvm_object *uobj = pg->uobject; local in function:uvm_obj_page_set_tag 283 struct uvm_object *uobj = pg->uobject; local in function:uvm_obj_page_clear_tag [all...] |
uvm_vnode.c | 120 uvn_reference(struct uvm_object *uobj) 122 vref((struct vnode *)uobj); 135 uvn_detach(struct uvm_object *uobj) 137 vrele((struct vnode *)uobj); 148 uvn_put(struct uvm_object *uobj, voff_t offlo, voff_t offhi, int flags) 150 struct vnode *vp = (struct vnode *)uobj; 153 KASSERT(rw_write_held(uobj->vmobjlock)); 170 uvn_get(struct uvm_object *uobj, voff_t offset, 175 struct vnode *vp = (struct vnode *)uobj; 184 uvn_alloc_ractx(uobj); 438 struct uvm_object *uobj = &vp->v_uobj; local in function:uvm_vnp_setsize [all...] |
uvm_loan.c | 142 struct uvm_object *uobj = ufi->entry->object.uvm_obj; local in function:uvm_loanentry 170 /* locked: map, amap, uobj */ 173 } else if (uobj) { 178 uvmfault_unlockall(ufi, aref->ar_amap, uobj); 181 /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */ 184 KASSERT(rv > 0 || uobj == NULL || 185 !rw_write_held(uobj->vmobjlock)); 334 * => called with map, amap, uobj locked 511 uvm_loanuobjchunk(struct uvm_object *uobj, voff_t pgoff, int orignpages, 516 rw_enter(uobj->vmobjlock, RW_WRITER) 601 struct uvm_object *uobj = ufi->entry->object.uvm_obj; local in function:uvm_loanuobj [all...] |
uvm_pager.c | 329 struct uvm_object *uobj; local in function:uvm_aio_aiodone_pages 341 uobj = NULL; 346 uobj = pg->uobject; 347 slock = uobj->vmobjlock; 371 KASSERT(swap || pg->uobject == uobj); 393 if (write && uobj != NULL) {
|
uvm_km.c | 454 struct uvm_object * const uobj = uvm_kernel_object; local in function:uvm_km_pgremove 466 rw_enter(uobj->vmobjlock, RW_WRITER); 470 pg = uvm_pagelookup(uobj, curoff); 472 uvm_pagewait(pg, uobj->vmobjlock, "km_pgrm"); 473 rw_enter(uobj->vmobjlock, RW_WRITER); 483 uao_find_swslot(uobj, curoff >> PAGE_SHIFT) > 0) { 486 uao_dropswap(uobj, curoff >> PAGE_SHIFT); 491 rw_exit(uobj->vmobjlock);
|
uvm_mmap.c | 129 struct uvm_object *uobj; local in function:sys_mincore 201 uobj = entry->object.uvm_obj; /* lower layer */ 205 if (uobj != NULL) 206 rw_enter(uobj->vmobjlock, RW_READER); 225 if (uobj != NULL && pgi == 0) { 227 pg = uvm_pagelookup(uobj, 241 if (uobj != NULL) 242 rw_exit(uobj->vmobjlock); 283 struct uvm_object *uobj; local in function:sys_mmap 377 &advice, &uobj, &maxprot) 1007 struct uvm_object *uobj; local in function:uvm_mmap_dev [all...] |
/src/tests/rump/kernspace/ |
busypage.c | 45 static struct uvm_object *uobj; variable in typeref:struct:uvm_object * 60 rw_enter(uobj->vmobjlock, RW_READER); 61 uvm_pagewait(testpg, uobj->vmobjlock, "tw"); 74 uobj = uao_create(1, 0); 75 rw_enter(uobj->vmobjlock, RW_WRITER); 76 testpg = uvm_pagealloc(uobj, 0, NULL, 0); 77 rw_exit(uobj->vmobjlock); 93 rw_enter(uobj->vmobjlock, RW_WRITER); 95 rw_exit(uobj->vmobjlock);
|
/src/sys/rump/librump/rumpvfs/ |
vm_vfs.c | 42 struct uvm_object *uobj = pgs[0]->uobject; local in function:uvm_aio_aiodone_pages 46 rw_enter(uobj->vmobjlock, RW_WRITER); 63 rw_exit(uobj->vmobjlock); 72 struct uvm_object *uobj = NULL; local in function:uvm_aio_aiodone 88 if (uobj == NULL) { 89 uobj = pgs[i]->uobject; 90 KASSERT(uobj != NULL); 92 KASSERT(uobj == pgs[i]->uobject);
|
/src/sys/external/bsd/drm2/drm/ |
drm_vm.c | 60 struct uvm_object *uobj; local in function:drm_legacy_mmap_object 70 uobj = udv_attach(devno, prot, offset, size); 71 if (uobj == NULL) 74 *uobjp = uobj;
|
drm_gem_cma_helper.c | 184 struct uvm_object *uobj = entry->object.uvm_obj; local in function:drm_gem_cma_fault 186 container_of(uobj, struct drm_gem_object, gemo_uvmobj); 222 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj); 228 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
|
/src/sys/compat/linux32/arch/aarch64/ |
linux32_exec_machdep.c | 56 struct uvm_object *uobj; local in function:vmcmd_linux32_kuser_helper_map 67 uobj = *e->e_sigobject; 68 if (uobj == NULL) 74 (*uobj->pgops->pgo_reference)(uobj); 75 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz), uobj, 0, 0, 79 (*uobj->pgops->pgo_detach)(uobj);
|
/src/sys/modules/examples/fopsmapper/ |
fopsmapper.c | 89 struct uvm_object *uobj; member in struct:fopsmapper_softc 126 fo->uobj = uao_create(size, 0); 132 * uobj on success. 134 error = uvm_map(kernel_map, &va, fo->bufsize, fo->uobj, 0, 0, 138 uao_detach(fo->uobj); 144 /* Get the reference of uobj */ 145 uao_reference(fo->uobj); 146 *uobjp = fo->uobj;
|
/src/sys/ufs/lfs/ |
ulfs_inode.c | 176 struct uvm_object *uobj; local in function:ulfs_balloc_range 191 uobj = &vp->v_uobj; 214 rw_enter(uobj->vmobjlock, RW_WRITER); 241 rw_enter(uobj->vmobjlock, RW_WRITER); 256 rw_exit(uobj->vmobjlock);
|
/src/sys/ufs/ufs/ |
ufs_inode.c | 207 struct uvm_object *uobj; local in function:ufs_balloc_range 222 uobj = &vp->v_uobj; 245 rw_enter(uobj->vmobjlock, RW_WRITER); 272 rw_enter(uobj->vmobjlock, RW_WRITER); 287 rw_exit(uobj->vmobjlock);
|
/src/sys/kern/ |
exec_subr.c | 181 struct uvm_object *uobj; local in function:vmcmd_map_pagedvn 222 uobj = &vp->v_uobj; 226 uobj, cmd->ev_offset, 0, 230 uobj->pgops->pgo_detach(uobj);
|
/src/sys/dev/nvmm/ |
nvmm_internal.h | 75 struct uvm_object *uobj; member in struct:nvmm_hmapping
|
/src/sys/external/bsd/drm2/dist/drm/ttm/ |
ttm_tt.c | 423 struct uvm_object *uobj = ttm->swap_storage; local in function:ttm_tt_wire 432 KASSERT(uobj != NULL); 434 error = uvm_obj_wirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT), 440 rw_enter(uobj->vmobjlock, RW_READER); 442 vm_page = uvm_pagelookup(uobj, ptoa(i)); 445 rw_exit(uobj->vmobjlock); 462 struct uvm_object *uobj = ttm->swap_storage; local in function:ttm_tt_unwire 469 KASSERT(uobj != NULL); 471 uvm_obj_unwirepages(uobj, 0, (ttm->num_pages << PAGE_SHIFT));
|
/src/sys/fs/tmpfs/ |
tmpfs_subr.c | 905 struct uvm_object *uobj = node->tn_spec.tn_reg.tn_aobj; local in function:tmpfs_reg_resize 933 ubc_zerorange(uobj, newsize, zerolen, UBC_VNODE_FLAGS(vp)); 944 rw_enter(uobj->vmobjlock, RW_WRITER); 945 uao_dropswap_range(uobj, newpages, oldpages); 946 rw_exit(uobj->vmobjlock);
|