Home | History | Annotate | Download | only in uvm

Lines Matching refs:pg

188 	struct vm_page *pg;
194 pg = anons[lcv]->an_page;
195 if (pg && (pg->flags & PG_BUSY) == 0) {
196 uvm_pagelock(pg);
197 uvm_pagedeactivate(pg);
198 uvm_pageunlock(pg);
274 struct vm_page *pg;
301 pg = anon->an_page;
309 if (pg && pg->loan_count)
310 pg = uvm_anon_lockloanpg(anon);
317 if (pg) {
326 if ((pg->flags & PG_BUSY) == 0) {
337 if (pg->uobject) {
342 uvm_pagewait(pg, pg->uobject->vmobjlock, "anonget1");
348 uvm_pagewait(pg, anon->an_lock, "anonget2");
361 pg = uvm_pagealloc(NULL,
364 if (pg == NULL) {
387 error = uvm_swap_get(pg, anon->an_swslot,
437 if ((pg->flags & PG_RELEASED) != 0) {
447 uvm_pagefree(pg);
457 if ((pg->flags & PG_RELEASED) != 0) {
484 uvm_pagelock(pg);
485 uvm_pageactivate(pg);
486 uvm_pagewakeup(pg);
487 uvm_pageunlock(pg);
488 pg->flags &= ~(PG_BUSY|PG_FAKE);
489 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_UNKNOWN);
490 UVM_PAGE_OWN(pg, NULL);
555 struct vm_page *pg;
602 pg = uvm_pagealloc(NULL, ufi->orig_rvaddr, anon,
604 if (pg == NULL) {
608 pg = NULL;
615 if (pg == NULL) {
638 * copy the page [pg now dirty]
648 uvm_pagecopy(opg, pg);
650 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
662 pg = anon->an_page;
663 pg->flags &= ~(PG_BUSY|PG_FAKE);
664 UVM_PAGE_OWN(pg, NULL);
1026 * the entry is wired or we are fault-wiring the pg.
1332 struct vm_page *pg = anon->an_page;
1341 if (pg && pg->loan_count == 0 && (pg->flags & PG_BUSY) == 0 &&
1344 pg, anon->an_ref > 1);
1371 vaddr_t currva, struct vm_page *pg, bool readonly)
1377 KASSERT(pg->uobject == NULL);
1378 KASSERT(pg->uanon != NULL);
1379 KASSERT(rw_lock_op(pg->uanon->an_lock) == flt->upper_lock_type);
1380 KASSERT(uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
1387 if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) {
1388 uvm_pagelock(pg);
1389 uvm_pageenqueue(pg);
1390 uvm_pageunlock(pg);
1394 " MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx",
1395 (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
1405 VM_PAGE_TO_PHYS(pg),
1603 struct vm_page *pg;
1628 pg = anon->an_page;
1631 KASSERT((pg->flags & (PG_BUSY | PG_FAKE)) == 0);
1643 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1656 struct vm_page *pg;
1660 pg = anon->an_page;
1664 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1674 struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
1688 KASSERT(uvm_pagegetdirty(pg) != UVM_PAGE_STATUS_CLEAN);
1695 " MAPPING: anon: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
1696 (uintptr_t)pmap, va, (uintptr_t)pg, flt->promote);
1697 if (pmap_enter(pmap, va, VM_PAGE_TO_PHYS(pg),
1724 uvm_pagelock(pg);
1725 uvm_pageenqueue(pg);
1726 uvm_pageunlock(pg);
1748 uvm_fault_upper_done(ufi, flt, anon, pg);
1766 struct vm_anon *anon, struct vm_page *pg)
1777 uvm_pagelock(pg);
1778 uvm_pagewire(pg);
1779 uvm_pageunlock(pg);
1788 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1790 } else if (uvmpdpol_pageactivate_p(pg)) {
1796 uvm_pagelock(pg);
1797 uvm_pageactivate(pg);
1798 uvm_pageunlock(pg);
2056 vaddr_t currva, struct vm_page *pg)
2058 const bool readonly = uvm_pagereadonly_p(pg) || pg->loan_count > 0;
2072 if (!uvmpdpol_pageisqueued_p(pg) && pg->wire_count == 0) {
2073 uvm_pagelock(pg);
2074 uvm_pageenqueue(pg);
2075 uvm_pageunlock(pg);
2079 " MAPPING: n obj: pm=%#jx, va=%#jx, pg=%#jx",
2080 (uintptr_t)ufi->orig_map->pmap, currva, (uintptr_t)pg, 0);
2090 KASSERT((pg->flags & PG_PAGEOUT) == 0);
2091 KASSERT((pg->flags & PG_RELEASED) == 0);
2092 KASSERT(!UVM_OBJ_IS_CLEAN(pg->uobject) ||
2093 uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN);
2094 KASSERT((pg->flags & PG_BUSY) == 0);
2095 KASSERT(rw_lock_op(pg->uobject->vmobjlock) == flt->lower_lock_type);
2103 VM_PAGE_TO_PHYS(pg), mapprot, mapflags);
2122 struct vm_page *pg;
2154 pg = NULL;
2155 error = uobj->pgops->pgo_get(uobj, uoff, &pg, &gotpages,
2157 /* locked: pg(if no error) */
2194 uobj = pg->uobject;
2197 KASSERT((pg->flags & PG_BUSY) != 0);
2200 uvm_pagelock(pg);
2201 uvm_pageactivate(pg);
2202 uvm_pageunlock(pg);
2204 /* locked(locked): maps(read), amap(if !null), uobj, pg */
2205 /* locked(!locked): uobj, pg */
2213 if ((pg->flags & PG_RELEASED) != 0 ||
2225 if ((pg->flags & PG_RELEASED) == 0) {
2226 pg->flags &= ~PG_BUSY;
2227 uvm_pagelock(pg);
2228 uvm_pagewakeup(pg);
2229 uvm_pageunlock(pg);
2230 UVM_PAGE_OWN(pg, NULL);
2233 uvm_pagefree(pg);
2249 * we have the data in pg. we are holding object lock (so the page
2256 *ruobjpage = pg;
2272 struct vm_page *pg;
2281 * set "pg" to the page we want to map in (uobjpage, usually)
2288 pg = uobjpage; /* map in the actual object */
2298 uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
2300 KASSERT(pg == uobjpage);
2301 KASSERT((pg->flags & PG_BUSY) == 0);
2302 return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg);
2319 struct vm_page *pg;
2340 pg = uvm_loanbreak(uobjpage);
2341 if (pg == NULL) {
2351 *rpg = pg;
2352 *ruobjpage = pg;
2359 uvm_pagelock(pg);
2360 uvm_pagewakeup(pg);
2361 uvm_pageunlock(pg);
2362 pg->flags &= ~PG_BUSY;
2363 UVM_PAGE_OWN(pg, NULL);
2383 struct vm_page *pg;
2412 pg = anon->an_page;
2435 (uintptr_t)uobjpage, (uintptr_t)anon, (uintptr_t)pg, 0);
2446 (uintptr_t)anon, (uintptr_t)pg
2449 return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg);
2461 struct vm_anon *anon, struct vm_page *pg)
2464 const bool readonly = uvm_pagereadonly_p(pg);
2472 * anon(if !null), pg(if anon), unlock_uobj(if !null)
2476 * Note: pg is either the uobjpage or the new page in the new anon.
2486 * note that pg can't be PG_RELEASED or PG_BUSY since we did
2490 KASSERT((pg->flags & PG_RELEASED) == 0);
2491 KASSERT((pg->flags & PG_BUSY) == 0);
2499 " MAPPING: case2: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
2501 (uintptr_t)pg, flt->promote);
2504 "entry=%p map=%p orig_rvaddr=%p pg=%p",
2507 (void *)ufi->orig_rvaddr, pg);
2510 VM_PAGE_TO_PHYS(pg),
2529 uvm_pagelock(pg);
2530 uvm_pageenqueue(pg);
2531 uvm_pagewakeup(pg);
2532 uvm_pageunlock(pg);
2548 uvm_fault_lower_done(ufi, flt, uobj, pg);
2563 struct uvm_object *uobj, struct vm_page *pg)
2569 uvm_pagelock(pg);
2570 uvm_pagewire(pg);
2571 uvm_pageunlock(pg);
2572 if (pg->flags & PG_AOBJ) {
2580 * use pg->uobject here. if the page is from a
2586 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
2587 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
2588 uao_dropswap(pg->uobject, pg->offset >> PAGE_SHIFT);
2590 } else if (uvmpdpol_pageactivate_p(pg)) {
2596 uvm_pagelock(pg);
2597 uvm_pageactivate(pg);
2598 uvm_pageunlock(pg);
2671 struct vm_page *pg;
2725 pg = PHYS_TO_VM_PAGE(pa);
2726 if (pg) {
2727 uvm_pagelock(pg);
2728 uvm_pageunwire(pg);
2729 uvm_pageunlock(pg);