Lines Matching refs:pg
202 * => call should have already set pg's object and offset pointers
207 uvm_pageinsert_object(struct uvm_object *uobj, struct vm_page *pg)
210 KASSERT(uobj == pg->uobject);
212 KASSERT((pg->flags & PG_TABLED) == 0);
214 if ((pg->flags & PG_STAT) != 0) {
216 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
218 if ((pg->flags & PG_FILE) != 0) {
235 pg->flags |= PG_TABLED;
240 uvm_pageinsert_tree(struct uvm_object *uobj, struct vm_page *pg)
242 const uint64_t idx = pg->offset >> PAGE_SHIFT;
247 error = radix_tree_insert_node(&uobj->uo_pages, idx, pg);
251 if ((pg->flags & PG_CLEAN) == 0) {
252 uvm_obj_page_set_dirty(pg);
254 KASSERT(((pg->flags & PG_CLEAN) == 0) ==
255 uvm_obj_page_dirty_p(pg));
266 uvm_pageremove_object(struct uvm_object *uobj, struct vm_page *pg)
269 KASSERT(uobj == pg->uobject);
271 KASSERT(pg->flags & PG_TABLED);
273 if ((pg->flags & PG_STAT) != 0) {
275 const unsigned int status = pg->flags & (PG_CLEAN | PG_DIRTY);
277 if ((pg->flags & PG_FILE) != 0) {
295 pg->flags &= ~PG_TABLED;
296 pg->uobject = NULL;
300 uvm_pageremove_tree(struct uvm_object *uobj, struct vm_page *pg)
306 opg = radix_tree_remove_node(&uobj->uo_pages, pg->offset >> PAGE_SHIFT);
307 KASSERT(pg == opg);
675 uvm_vm_page_to_phys(const struct vm_page *pg)
678 return pg->phys_addr & ~(PAGE_SIZE - 1);
703 uvm_page_numa_lookup(struct vm_page *pg)
711 pa = VM_PAGE_TO_PHYS(pg);
719 printf("uvm_page_numa_lookup: failed, first pg=%p pa=%#"
720 PRIxPADDR "\n", pg, VM_PAGE_TO_PHYS(pg));
737 struct vm_page *pg;
794 while ((pg = LIST_FIRST(ohead)) != NULL) {
795 LIST_REMOVE(pg, pageq.list);
808 uvm_page_get_bucket(pg) == ob);
810 uvm_page_get_freelist(pg));
812 nb = uvm_page_numa_lookup(pg);
814 nb = atop(VM_PAGE_TO_PHYS(pg))
818 uvm_page_set_bucket(pg, nb);
821 nc = VM_PGCOLOR(pg);
823 LIST_INSERT_HEAD(nhead, pg, pageq.list);
1008 struct vm_page *pg;
1040 pg = LIST_FIRST(&pgb->pgb_colors[c]);
1041 if (__predict_true(pg != NULL)) {
1046 LIST_REMOVE(pg, pageq.list);
1047 KASSERT(pg->flags == PG_FREE);
1048 pg->flags = PG_BUSY | PG_CLEAN | PG_FAKE;
1064 KASSERT(uvm_page_get_bucket(pg) == b);
1069 return pg;
1089 struct vm_page *pg;
1092 if ((pg = uvm_pgflcache_alloc(ucpu, f, *trycolorp)) != NULL) {
1095 return pg;
1103 pg = uvm_pagealloc_pgb(ucpu, f, b, trycolorp, flags);
1104 if (pg != NULL) {
1105 return pg;
1137 struct vm_page *pg;
1180 pg = uvm_pagealloc_pgfl(ucpu, lcv, &color, flags);
1181 if (pg != NULL) {
1196 pg = uvm_pagealloc_pgfl(ucpu, free_list, &color, flags);
1197 if (pg != NULL) {
1221 pg = uvm_pgflcache_alloc(ucpu, lcv, color);
1222 if (pg != NULL) {
1227 pg = uvm_pagealloc_pgb(ucpu, lcv,
1229 if (pg != NULL) {
1257 KASSERT(pg->flags == (PG_BUSY|PG_CLEAN|PG_FAKE));
1261 * that pg->uobject and pg->uanon are NULL. we only need to take
1265 mutex_enter(&pg->interlock);
1267 pg->offset = off;
1268 pg->uobject = obj;
1269 pg->uanon = anon;
1270 KASSERT(uvm_page_owner_locked_p(pg, true));
1272 anon->an_page = pg;
1273 pg->flags |= PG_ANON;
1274 mutex_exit(&pg->interlock);
1280 pg->flags |= PG_FILE;
1282 pg->flags |= PG_AOBJ;
1284 uvm_pageinsert_object(obj, pg);
1285 mutex_exit(&pg->interlock);
1286 error = uvm_pageinsert_tree(obj, pg);
1288 mutex_enter(&pg->interlock);
1289 uvm_pageremove_object(obj, pg);
1290 mutex_exit(&pg->interlock);
1291 uvm_pagefree(pg);
1297 pg->owner_tag = NULL;
1299 UVM_PAGE_OWN(pg, "new alloc");
1304 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1306 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1309 return(pg);
1323 struct vm_page *pg __diagused;
1337 pg = radix_tree_replace_node(&uobj->uo_pages, idx, newpg);
1338 KASSERT(pg == oldpg);
1362 uvm_pagerealloc(struct vm_page *pg, struct uvm_object *newobj, voff_t newoff)
1370 if (pg->uobject) {
1371 uvm_pageremove_tree(pg->uobject, pg);
1372 uvm_pageremove_object(pg->uobject, pg);
1380 mutex_enter(&pg->interlock);
1381 pg->uobject = newobj;
1382 pg->offset = newoff;
1384 pg->flags |= PG_FILE;
1386 pg->flags |= PG_AOBJ;
1388 uvm_pageinsert_object(newobj, pg);
1389 mutex_exit(&pg->interlock);
1390 error = uvm_pageinsert_tree(newobj, pg);
1392 mutex_enter(&pg->interlock);
1393 uvm_pageremove_object(newobj, pg);
1394 mutex_exit(&pg->interlock);
1407 * => assumes all valid mappings of pg are gone
1411 uvm_pagefree(struct vm_page *pg)
1421 if (pg->uobject == (void *)0xdeadbeef &&
1422 pg->uanon == (void *)0xdeadbeef) {
1423 panic("uvm_pagefree: freeing free page %p", pg);
1427 KASSERT((pg->flags & PG_PAGEOUT) == 0);
1428 KASSERT(!(pg->flags & PG_FREE));
1429 KASSERT(pg->uobject == NULL || rw_write_held(pg->uobject->vmobjlock));
1430 KASSERT(pg->uobject != NULL || pg->uanon == NULL ||
1431 rw_write_held(pg->uanon->an_lock));
1437 if (pg->uobject != NULL) {
1438 uvm_pageremove_tree(pg->uobject, pg);
1445 if (pg->loan_count) {
1446 KASSERT(pg->wire_count == 0);
1459 uvm_pagelock(pg);
1461 if (pg->uobject != NULL) {
1462 uvm_pageremove_object(pg->uobject, pg);
1463 pg->flags &= ~(PG_FILE|PG_AOBJ);
1464 } else if (pg->uanon != NULL) {
1465 if ((pg->flags & PG_ANON) == 0) {
1466 pg->loan_count--;
1468 const unsigned status = uvm_pagegetdirty(pg);
1469 pg->flags &= ~PG_ANON;
1472 pg->uanon->an_page = NULL;
1473 pg->uanon = NULL;
1475 if (pg->pqflags & PQ_WANTED) {
1476 wakeup(pg);
1478 pg->pqflags &= ~PQ_WANTED;
1479 pg->flags &= ~(PG_BUSY|PG_RELEASED|PG_PAGER1);
1481 pg->owner_tag = NULL;
1483 KASSERT((pg->flags & PG_STAT) == 0);
1484 if (pg->loan_count) {
1485 KASSERT(pg->uobject == NULL);
1486 if (pg->uanon == NULL) {
1487 uvm_pagedequeue(pg);
1489 uvm_pageunlock(pg);
1492 } else if (pg->uobject != NULL || pg->uanon != NULL ||
1493 pg->wire_count != 0) {
1494 uvm_pagelock(pg);
1503 if (pg->uobject != NULL) {
1504 uvm_pageremove_object(pg->uobject, pg);
1505 } else if (pg->uanon != NULL) {
1506 const unsigned int status = uvm_pagegetdirty(pg);
1507 pg->uanon->an_page = NULL;
1508 pg->uanon = NULL;
1516 if (pg->wire_count) {
1517 pg->wire_count = 0;
1524 if ((pg->pqflags & PQ_WANTED) != 0) {
1525 pg->pqflags &= ~PQ_WANTED;
1526 wakeup(pg);
1532 uvm_pagedequeue(pg);
1533 uvm_pageunlock(pg);
1535 KASSERT(!uvmpdpol_pageisqueued_p(pg));
1543 pg->uobject = (void *)0xdeadbeef;
1544 pg->uanon = (void *)0xdeadbeef;
1550 bucket = uvm_page_get_bucket(pg);
1551 if (bucket == ucpu->pgflbucket && uvm_pgflcache_free(ucpu, pg)) {
1557 pgfl = &uvm.page_free[uvm_page_get_freelist(pg)];
1563 pg->flags = PG_FREE;
1564 LIST_INSERT_HEAD(&pgb->pgb_colors[VM_PGCOLOR(pg)], pg, pageq.list);
1583 struct vm_page *pg;
1589 pg = pgs[i];
1590 if (pg == NULL || pg == PGO_DONTCARE) {
1594 KASSERT(uvm_page_owner_locked_p(pg, true));
1595 KASSERT(pg->flags & PG_BUSY);
1597 if (pg->flags & PG_PAGEOUT) {
1598 pg->flags &= ~PG_PAGEOUT;
1599 pg->flags |= PG_RELEASED;
1603 if (pg->flags & PG_RELEASED) {
1604 UVMHIST_LOG(ubchist, "releasing pg %#jx",
1605 (uintptr_t)pg, 0, 0, 0);
1606 KASSERT(pg->uobject != NULL ||
1607 (pg->uanon != NULL && pg->uanon->an_ref > 0));
1608 pg->flags &= ~PG_RELEASED;
1609 uvm_pagefree(pg);
1611 UVMHIST_LOG(ubchist, "unbusying pg %#jx",
1612 (uintptr_t)pg, 0, 0, 0);
1613 KASSERT((pg->flags & PG_FAKE) == 0);
1614 pg->flags &= ~PG_BUSY;
1615 uvm_pagelock(pg);
1616 uvm_pagewakeup(pg);
1617 uvm_pageunlock(pg);
1618 UVM_PAGE_OWN(pg, NULL);
1635 uvm_pagewait(struct vm_page *pg, krwlock_t *lock, const char *wmesg)
1639 KASSERT((pg->flags & PG_BUSY) != 0);
1640 KASSERT(uvm_page_owner_locked_p(pg, false));
1642 mutex_enter(&pg->interlock);
1643 pg->pqflags |= PQ_WANTED;
1645 UVM_UNLOCK_AND_WAIT(pg, &pg->interlock, false, wmesg, 0);
1655 uvm_pagewakeup(struct vm_page *pg)
1659 KASSERT(mutex_owned(&pg->interlock));
1661 UVMHIST_LOG(ubchist, "waking pg %#jx", (uintptr_t)pg, 0, 0, 0);
1663 if ((pg->pqflags & PQ_WANTED) != 0) {
1664 wakeup(pg);
1665 pg->pqflags &= ~PQ_WANTED;
1676 uvm_pagewanted_p(struct vm_page *pg)
1679 KASSERT(uvm_page_owner_locked_p(pg, true));
1681 return (atomic_load_relaxed(&pg->pqflags) & PQ_WANTED) != 0;
1695 uvm_page_own(struct vm_page *pg, const char *tag)
1698 KASSERT((pg->flags & (PG_PAGEOUT|PG_RELEASED)) == 0);
1699 KASSERT(uvm_page_owner_locked_p(pg, true));
1703 KASSERT((pg->flags & PG_BUSY) != 0);
1704 if (pg->owner_tag) {
1706 "by proc %d.%d [%s]\n", pg,
1707 pg->owner, pg->lowner, pg->owner_tag);
1710 pg->owner = curproc->p_pid;
1711 pg->lowner = curlwp->l_lid;
1712 pg->owner_tag = tag;
1717 KASSERT((pg->flags & PG_BUSY) == 0);
1718 if (pg->owner_tag == NULL) {
1720 "page (%p)\n", pg);
1723 pg->owner_tag = NULL;
1737 struct vm_page *pg;
1741 pg = radix_tree_lookup_node(&obj->uo_pages, off >> PAGE_SHIFT);
1743 KASSERT(pg == NULL || obj->uo_npages != 0);
1744 KASSERT(pg == NULL || (pg->flags & (PG_RELEASED|PG_PAGEOUT)) == 0 ||
1745 (pg->flags & PG_BUSY) != 0);
1746 return pg;
1753 * => caller must hold pg->interlock
1757 uvm_pagewire(struct vm_page *pg)
1760 KASSERT(uvm_page_owner_locked_p(pg, true));
1761 KASSERT(mutex_owned(&pg->interlock));
1763 if ((pg->flags & PG_READAHEAD) != 0) {
1765 pg->flags &= ~PG_READAHEAD;
1768 if (pg->wire_count == 0) {
1769 uvm_pagedequeue(pg);
1772 pg->wire_count++;
1773 KASSERT(pg->wire_count > 0); /* detect wraparound */
1781 * => caller must hold pg->interlock
1785 uvm_pageunwire(struct vm_page *pg)
1788 KASSERT(uvm_page_owner_locked_p(pg, true));
1789 KASSERT(pg->wire_count != 0);
1790 KASSERT(!uvmpdpol_pageisqueued_p(pg));
1791 KASSERT(mutex_owned(&pg->interlock));
1792 pg->wire_count--;
1793 if (pg->wire_count == 0) {
1794 uvm_pageactivate(pg);
1805 * => object that page belongs to must be locked (so we can adjust pg->flags)
1807 * => caller must hold pg->interlock
1811 uvm_pagedeactivate(struct vm_page *pg)
1814 KASSERT(uvm_page_owner_locked_p(pg, false));
1815 KASSERT(mutex_owned(&pg->interlock));
1816 if (pg->wire_count == 0) {
1817 KASSERT(uvmpdpol_pageisqueued_p(pg));
1818 uvmpdpol_pagedeactivate(pg);
1826 * => caller must hold pg->interlock
1830 uvm_pageactivate(struct vm_page *pg)
1833 KASSERT(uvm_page_owner_locked_p(pg, false));
1834 KASSERT(mutex_owned(&pg->interlock));
1836 if ((pg->flags & PG_READAHEAD) != 0) {
1838 pg->flags &= ~PG_READAHEAD;
1841 if (pg->wire_count == 0) {
1842 uvmpdpol_pageactivate(pg);
1850 * => caller must hold pg->interlock
1853 uvm_pagedequeue(struct vm_page *pg)
1856 KASSERT(uvm_page_owner_locked_p(pg, true));
1857 KASSERT(mutex_owned(&pg->interlock));
1858 if (uvmpdpol_pageisqueued_p(pg)) {
1859 uvmpdpol_pagedequeue(pg);
1868 * => caller must hold pg->interlock
1871 uvm_pageenqueue(struct vm_page *pg)
1874 KASSERT(uvm_page_owner_locked_p(pg, false));
1875 KASSERT(mutex_owned(&pg->interlock));
1876 if (pg->wire_count == 0 && !uvmpdpol_pageisqueued_p(pg)) {
1877 uvmpdpol_pageenqueue(pg);
1885 uvm_pagelock(struct vm_page *pg)
1888 mutex_enter(&pg->interlock);
1911 pg->interlock
1914 uvm_pageunlock(struct vm_page *pg)
1917 if ((pg->pqflags & PQ_INTENT_SET) == 0 ||
1918 (pg->pqflags & PQ_INTENT_QUEUED) != 0) {
1919 mutex_exit(&pg->interlock);
1922 pg->pqflags |= PQ_INTENT_QUEUED;
1923 mutex_exit(&pg->interlock);
1924 uvmpdpol_pagerealize(pg);
1932 * => caller must hold pg->interlock
1968 * to protect pg->flags.
1972 uvm_pagezero(struct vm_page *pg)
1975 uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
1976 pmap_zero_page(VM_PAGE_TO_PHYS(pg));
1983 * to protect pg->flags.
2010 uvm_page_lookup_freelist(struct vm_page *pg)
2014 upm = uvm_physseg_find(atop(VM_PAGE_TO_PHYS(pg)), NULL);
2025 uvm_page_owner_locked_p(struct vm_page *pg, bool exclusive)
2028 if (pg->uobject != NULL) {
2030 ? rw_write_held(pg->uobject->vmobjlock)
2031 : rw_lock_held(pg->uobject->vmobjlock);
2033 if (pg->uanon != NULL) {
2035 ? rw_write_held(pg->uanon->an_lock)
2036 : rw_lock_held(pg->uanon->an_lock);
2046 uvm_pagereadonly_p(struct vm_page *pg)
2048 struct uvm_object * const uobj = pg->uobject;
2051 KASSERT(uobj != NULL || rw_lock_held(pg->uanon->an_lock));
2052 if ((pg->flags & PG_RDONLY) != 0) {
2055 if (uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_CLEAN) {
2078 struct vm_page *pg;
2084 pg = pgs[i];
2092 KASSERT(pg != NULL);
2093 KASSERT(pg != PGO_DONTCARE);
2095 pa = VM_PAGE_TO_PHYS(pg);
2121 uvm_page_printit(struct vm_page *pg, bool full,
2130 (*pr)("PAGE %p:\n", pg);
2131 snprintb(pgbuf, sizeof(pgbuf), page_flagbits, pg->flags);
2133 snprintb(pgbuf, sizeof(pgbuf), page_pqflagbits, pg->pqflags);
2136 pg->uobject, pg->uanon, (long long)pg->offset);
2138 pg->loan_count, pg->wire_count, uvm_page_get_bucket(pg),
2139 uvm_page_get_freelist(pg));
2140 (*pr)(" pa=0x%lx\n", (long)VM_PAGE_TO_PHYS(pg));
2142 if (pg->flags & PG_BUSY)
2144 pg->owner, pg->lowner, pg->owner_tag);
2155 if ((pg->flags & PG_FREE) == 0) {
2156 if (pg->flags & PG_ANON) {
2157 if (pg->uanon == NULL || pg->uanon->an_page != pg)
2159 (pg->uanon) ? pg->uanon->an_page : NULL);
2163 uobj = pg->uobject;
2166 tpg = uvm_pagelookup(uobj, pg->offset);
2176 if (pg->flags & PG_FREE) {
2177 int fl = uvm_page_get_freelist(pg);
2178 int b = uvm_page_get_bucket(pg);
2180 pgl = &pgb->pgb_colors[VM_PGCOLOR(pg)];
2183 if (tpg == pg) {
2203 struct vm_page *pg;
2216 pg = PHYS_TO_VM_PAGE(ptoa(pfn));
2219 pg, pg->flags, pg->pqflags, pg->uobject,
2220 pg->uanon);
2222 if (pg->flags & PG_BUSY)
2223 (*pr)(" %d [%s]", pg->owner, pg->owner_tag);