Home | History | Annotate | Download | only in uvm

Lines Matching defs:uobjpage

548     struct vm_page *uobjpage,
565 } else if (uobjpage != PGO_DONTCARE) {
567 opg = uobjpage;
580 KASSERT(uobjpage != NULL);
1808 struct vm_amap *amap, struct uvm_object *uobj, struct vm_page *uobjpage)
1862 struct vm_page *uobjpage;
1876 uobjpage = NULL;
1879 uobjpage = pages[flt->centeridx];
1889 * then we've got a pointer to it as "uobjpage" and we've already
1895 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
1903 * note that uobjpage can not be PGO_DONTCARE at this point. we now
1904 * set uobjpage to PGO_DONTCARE if we are doing a zero fill. if we
1910 uobjpage = PGO_DONTCARE;
1913 KASSERT(uobjpage != PGO_DONTCARE);
1920 * if uobjpage is not null then we do not need to do I/O to get the
1921 * uobjpage.
1923 * if uobjpage is null, then we need to unlock and ask the pager to
1928 if (uobjpage) {
1932 error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1939 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
1948 * - at this point uobjpage can not be NULL
1949 * - at this point uobjpage can not be PG_RELEASED (since we checked
1951 * - at this point uobjpage could be waited on (handle later)
1952 * - uobjpage can be from a different object if tmpfs (vnode vs UAO)
1955 KASSERT(uobjpage != NULL);
1957 uobjpage->uobject->vmobjlock == uobj->vmobjlock);
1958 KASSERT(uobj == NULL || !UVM_OBJ_IS_CLEAN(uobjpage->uobject) ||
1959 uvm_pagegetdirty(uobjpage) == UVM_PAGE_STATUS_CLEAN);
1962 error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
1964 error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
2037 UVMHIST_LOG(maphist, " got uobjpage (%#jx) "
2270 struct uvm_object *uobj, struct vm_page *uobjpage)
2281 * set "pg" to the page we want to map in (uobjpage, usually)
2286 UVM_OBJ_NEEDS_WRITEFAULT(uobjpage->uobject))
2288 pg = uobjpage; /* map in the actual object */
2290 KASSERT(uobjpage != PGO_DONTCARE);
2297 if (uobjpage->loan_count) {
2298 uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
2300 KASSERT(pg == uobjpage);
2320 struct vm_page *uobjpage = *ruobjpage;
2334 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, uobjpage);
2340 pg = uvm_loanbreak(uobjpage);
2379 struct uvm_object *uobj, struct vm_page *uobjpage)
2402 error = uvmfault_promote(ufi, NULL, uobjpage, &anon, &flt->anon_spare);
2418 if (uobjpage != PGO_DONTCARE) {
2427 pmap_page_protect(uobjpage, VM_PROT_NONE);
2434 " promote uobjpage %#jx to anon/page %#jx/%#jx",
2435 (uintptr_t)uobjpage, (uintptr_t)anon, (uintptr_t)pg, 0);
2476 * Note: pg is either the uobjpage or the new page in the new anon.