HomeSort by: relevance | last modified time | path
    Searched defs:uobj (Results 1 - 25 of 106) sorted by relevancy

1 2 3 4 5

  /src/sys/external/bsd/drm2/ttm/
ttm_bus_dma.c 99 struct uvm_object *const uobj = ttm_dma->ttm.swap_storage; local
120 KASSERT(uobj->pgops->pgo_put);
123 rw_enter(uobj->vmobjlock, RW_WRITER);
124 (void)(*uobj->pgops->pgo_put)(uobj, 0, size, flags);
125 /* pgo_put unlocks uobj->vmobjlock. */
ttm_bus_dma.c 99 struct uvm_object *const uobj = ttm_dma->ttm.swap_storage; local
120 KASSERT(uobj->pgops->pgo_put);
123 rw_enter(uobj->vmobjlock, RW_WRITER);
124 (void)(*uobj->pgops->pgo_put)(uobj, 0, size, flags);
125 /* pgo_put unlocks uobj->vmobjlock. */
  /src/sys/rump/librump/rumpvfs/
vm_vfs.c 42 struct uvm_object *uobj = pgs[0]->uobject; local
46 rw_enter(uobj->vmobjlock, RW_WRITER);
63 rw_exit(uobj->vmobjlock);
72 struct uvm_object *uobj = NULL; local
88 if (uobj == NULL) {
89 uobj = pgs[i]->uobject;
90 KASSERT(uobj != NULL);
92 KASSERT(uobj == pgs[i]->uobject);
vm_vfs.c 42 struct uvm_object *uobj = pgs[0]->uobject; local
46 rw_enter(uobj->vmobjlock, RW_WRITER);
63 rw_exit(uobj->vmobjlock);
72 struct uvm_object *uobj = NULL; local
88 if (uobj == NULL) {
89 uobj = pgs[i]->uobject;
90 KASSERT(uobj != NULL);
92 KASSERT(uobj == pgs[i]->uobject);
  /src/sys/uvm/
uvm_page_status.c 62 struct uvm_object * const uobj __diagused = pg->uobject;
66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
86 struct uvm_object * const uobj = pg->uobject; local
93 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
105 if (uobj != NULL) {
113 if (uvm_obj_clean_p(uobj) &&
114 uobj->pgops->pgo_markdirty != NULL) {
115 (*uobj->pgops->pgo_markdirty)(uobj);
128 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) =
    [all...]
uvm_page_status.c 62 struct uvm_object * const uobj __diagused = pg->uobject;
66 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
86 struct uvm_object * const uobj = pg->uobject; local
93 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) ==
105 if (uobj != NULL) {
113 if (uvm_obj_clean_p(uobj) &&
114 uobj->pgops->pgo_markdirty != NULL) {
115 (*uobj->pgops->pgo_markdirty)(uobj);
128 KASSERT(uobj == NULL || ((pg->flags & PG_CLEAN) == 0) =
    [all...]
uvm_anon.c 278 struct uvm_object *uobj; local
310 uobj = pg->uobject;
325 if (uobj) {
326 rw_exit(uobj->vmobjlock);
uvm_anon.c 278 struct uvm_object *uobj; local
310 uobj = pg->uobject;
325 if (uobj) {
326 rw_exit(uobj->vmobjlock);
uvm_page_array.c 45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
51 ar->ar_uobj = uobj;
139 struct uvm_object *uobj = ar->ar_uobj; local
149 KASSERT(rw_lock_held(uobj->vmobjlock));
161 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
167 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
200 KDASSERT(pg->uobject == uobj);
uvm_page_array.c 45 uvm_page_array_init(struct uvm_page_array *ar, struct uvm_object *uobj,
51 ar->ar_uobj = uobj;
139 struct uvm_object *uobj = ar->ar_uobj; local
149 KASSERT(rw_lock_held(uobj->vmobjlock));
161 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
167 &uobj->uo_pages, off >> PAGE_SHIFT, (void **)ar->ar_pages,
200 KDASSERT(pg->uobject == uobj);
uvm_device.c 294 udv_reference(struct uvm_object *uobj)
298 rw_enter(uobj->vmobjlock, RW_WRITER);
299 uobj->uo_refs++;
300 UVMHIST_LOG(maphist, "<- done (uobj=%#jx, ref = %jd)",
301 (uintptr_t)uobj, uobj->uo_refs,0,0);
302 rw_exit(uobj->vmobjlock);
314 udv_detach(struct uvm_object *uobj)
316 struct uvm_device *udv = (struct uvm_device *)uobj;
323 rw_enter(uobj->vmobjlock, RW_WRITER)
382 struct uvm_object *uobj = entry->object.uvm_obj; local
    [all...]
uvm_mremap.c 46 struct uvm_object *uobj; local
73 uobj = entry->object.uvm_obj;
74 if (uobj) {
83 if (uobj->pgops->pgo_reference)
84 uobj->pgops->pgo_reference(uobj);
85 reserved_entry->object.uvm_obj = uobj;
  /src/tests/rump/kernspace/
busypage.c 45 static struct uvm_object *uobj; variable in typeref:struct:uvm_object
60 rw_enter(uobj->vmobjlock, RW_READER);
61 uvm_pagewait(testpg, uobj->vmobjlock, "tw");
74 uobj = uao_create(1, 0);
75 rw_enter(uobj->vmobjlock, RW_WRITER);
76 testpg = uvm_pagealloc(uobj, 0, NULL, 0);
77 rw_exit(uobj->vmobjlock);
93 rw_enter(uobj->vmobjlock, RW_WRITER);
95 rw_exit(uobj->vmobjlock);
busypage.c 45 static struct uvm_object *uobj; variable in typeref:struct:uvm_object
60 rw_enter(uobj->vmobjlock, RW_READER);
61 uvm_pagewait(testpg, uobj->vmobjlock, "tw");
74 uobj = uao_create(1, 0);
75 rw_enter(uobj->vmobjlock, RW_WRITER);
76 testpg = uvm_pagealloc(uobj, 0, NULL, 0);
77 rw_exit(uobj->vmobjlock);
93 rw_enter(uobj->vmobjlock, RW_WRITER);
95 rw_exit(uobj->vmobjlock);
  /src/sys/compat/linux32/arch/aarch64/
linux32_exec_machdep.c 56 struct uvm_object *uobj; local
67 uobj = *e->e_sigobject;
68 if (uobj == NULL)
74 (*uobj->pgops->pgo_reference)(uobj);
75 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz), uobj, 0, 0,
79 (*uobj->pgops->pgo_detach)(uobj);
linux32_exec_machdep.c 56 struct uvm_object *uobj; local
67 uobj = *e->e_sigobject;
68 if (uobj == NULL)
74 (*uobj->pgops->pgo_reference)(uobj);
75 error = uvm_map(&p->p_vmspace->vm_map, &va, round_page(sz), uobj, 0, 0,
79 (*uobj->pgops->pgo_detach)(uobj);
  /src/sys/external/bsd/drm2/drm/
drm_vm.c 60 struct uvm_object *uobj; local
70 uobj = udv_attach(devno, prot, offset, size);
71 if (uobj == NULL)
74 *uobjp = uobj;
drm_vm.c 60 struct uvm_object *uobj; local
70 uobj = udv_attach(devno, prot, offset, size);
71 if (uobj == NULL)
74 *uobjp = uobj;
drm_gem_cma_helper.c 184 struct uvm_object *uobj = entry->object.uvm_obj; local
186 container_of(uobj, struct drm_gem_object, gemo_uvmobj);
222 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
228 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
drm_gem_cma_helper.c 184 struct uvm_object *uobj = entry->object.uvm_obj; local
186 container_of(uobj, struct drm_gem_object, gemo_uvmobj);
222 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
228 uvmfault_unlockall(ufi, ufi->entry->aref.ar_amap, uobj);
  /src/sys/modules/examples/fopsmapper/
fopsmapper.c 89 struct uvm_object *uobj; member in struct:fopsmapper_softc
126 fo->uobj = uao_create(size, 0);
132 * uobj on success.
134 error = uvm_map(kernel_map, &va, fo->bufsize, fo->uobj, 0, 0,
138 uao_detach(fo->uobj);
144 /* Get the reference of uobj */
145 uao_reference(fo->uobj);
146 *uobjp = fo->uobj;
fopsmapper.c 89 struct uvm_object *uobj; member in struct:fopsmapper_softc
126 fo->uobj = uao_create(size, 0);
132 * uobj on success.
134 error = uvm_map(kernel_map, &va, fo->bufsize, fo->uobj, 0, 0,
138 uao_detach(fo->uobj);
144 /* Get the reference of uobj */
145 uao_reference(fo->uobj);
146 *uobjp = fo->uobj;
  /src/sys/ufs/lfs/
ulfs_inode.c 176 struct uvm_object *uobj; local
191 uobj = &vp->v_uobj;
214 rw_enter(uobj->vmobjlock, RW_WRITER);
241 rw_enter(uobj->vmobjlock, RW_WRITER);
256 rw_exit(uobj->vmobjlock);
ulfs_inode.c 176 struct uvm_object *uobj; local
191 uobj = &vp->v_uobj;
214 rw_enter(uobj->vmobjlock, RW_WRITER);
241 rw_enter(uobj->vmobjlock, RW_WRITER);
256 rw_exit(uobj->vmobjlock);
  /src/sys/ufs/ufs/
ufs_inode.c 209 struct uvm_object *uobj; local
224 uobj = &vp->v_uobj;
247 rw_enter(uobj->vmobjlock, RW_WRITER);
274 rw_enter(uobj->vmobjlock, RW_WRITER);
289 rw_exit(uobj->vmobjlock);

Completed in 46 milliseconds

1 2 3 4 5