Lines Matching refs:pg
61 * loans are tracked by pg->loan_count. an O->A page will have both
65 * each loan of a page to the kernel bumps the pg->wire_count. the
80 * locking: to read pg->loan_count either the owner or pg->interlock
81 * must be locked. to modify pg->loan_count, both the owner of the page
82 * and pg->interlock must be locked. pg->flags is (as always) locked by
88 * from dying pg->interlock should be locked. this forces us to sometimes
346 struct vm_page *pg;
359 pg = anon->an_page;
360 if (pg && (pg->flags & PG_ANON) != 0 && anon->an_ref == 1) {
361 if (pg->wire_count > 0) {
363 (uintptr_t)pg, 0, 0, 0);
369 pmap_page_protect(pg, VM_PROT_READ);
415 pg = anon->an_page;
416 if (pg->wire_count > 0) {
417 UVMHIST_LOG(loanhist, "->K wired %#jx", (uintptr_t)pg, 0, 0, 0);
418 KASSERT(pg->uobject == NULL);
422 if (pg->loan_count == 0) {
423 pmap_page_protect(pg, VM_PROT_READ);
425 uvm_pagelock(pg);
426 pg->loan_count++;
427 KASSERT(pg->loan_count > 0); /* detect wrap-around */
428 uvm_pageactivate(pg);
429 uvm_pageunlock(pg);
430 **output = pg;
434 if (pg->uobject)
435 rw_exit(pg->uobject->vmobjlock);
457 struct vm_page *pg = pgpp[i];
459 KASSERT(pg->uobject != NULL);
460 KASSERT(pg->uobject == pgpp[0]->uobject);
461 KASSERT(!(pg->flags & (PG_RELEASED|PG_PAGEOUT)));
462 KASSERT(rw_write_held(pg->uobject->vmobjlock));
463 KASSERT(busied == ((pg->flags & PG_BUSY) != 0));
465 if (pg->wire_count > 0) {
466 UVMHIST_LOG(loanhist, "wired %#jx", (uintptr_t)pg,
471 if (pg->loan_count == 0) {
472 pmap_page_protect(pg, VM_PROT_READ);
474 uvm_pagelock(pg);
475 pg->loan_count++;
476 KASSERT(pg->loan_count > 0); /* detect wrap-around */
477 uvm_pageactivate(pg);
478 uvm_pageunlock(pg);
602 struct vm_page *pg;
620 pg = NULL;
623 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_LOCKED);
649 &pg, &npages, 0, VM_PROT_READ, MADV_NORMAL, PGO_SYNCIO);
667 uobj = pg->uobject;
676 if ((pg->flags & PG_RELEASED) != 0 ||
688 if ((pg->flags & PG_RELEASED) == 0) {
689 uvm_pagelock(pg);
690 uvm_pagewakeup(pg);
691 uvm_pageunlock(pg);
692 pg->flags &= ~PG_BUSY;
693 UVM_PAGE_OWN(pg, NULL);
701 if (pg->flags & PG_RELEASED) {
702 uvm_pagefree(pg);
714 KASSERT(uobj->vmobjlock == pg->uobject->vmobjlock);
717 * at this point we have the page we want ("pg") and we have
723 if (uvm_loanpage(&pg, 1, false)) {
728 **output = pg;
741 if (pg->uanon) {
743 anon = pg->uanon;
745 uvm_pagelock(pg);
746 uvm_pagewakeup(pg);
747 uvm_pageunlock(pg);
748 pg->flags &= ~PG_BUSY;
749 UVM_PAGE_OWN(pg, NULL);
764 if (pg->wire_count > 0) {
765 UVMHIST_LOG(loanhist, "wired %#jx", (uintptr_t)pg, 0, 0, 0);
768 if (pg->loan_count == 0) {
769 pmap_page_protect(pg, VM_PROT_READ);
771 uvm_pagelock(pg);
772 pg->loan_count++;
773 KASSERT(pg->loan_count > 0); /* detect wrap-around */
774 pg->uanon = anon;
775 anon->an_page = pg;
777 uvm_pageactivate(pg);
778 uvm_pagewakeup(pg);
779 uvm_pageunlock(pg);
780 pg->flags &= ~PG_BUSY;
781 UVM_PAGE_OWN(pg, NULL);
793 uvm_pagelock(pg);
794 uvm_pagewakeup(pg);
795 uvm_pageunlock(pg);
796 pg->flags &= ~PG_BUSY;
797 UVM_PAGE_OWN(pg, NULL);
824 struct vm_page *pg;
835 pg = uvm_pagelookup(&uvm_loanzero_object, 0);
836 if (__predict_false(pg == NULL)) {
837 while ((pg = uvm_pagealloc(&uvm_loanzero_object, 0, NULL,
852 pg->flags &= ~(PG_BUSY|PG_FAKE);
853 pg->flags |= PG_RDONLY;
854 uvm_pagelock(pg);
855 uvm_pageactivate(pg);
856 uvm_pagewakeup(pg);
857 uvm_pageunlock(pg);
858 UVM_PAGE_OWN(pg, NULL);
862 mutex_enter(&pg->interlock);
863 pg->loan_count++;
864 KASSERT(pg->loan_count > 0); /* detect wrap-around */
865 mutex_exit(&pg->interlock);
867 **output = pg;
879 if (pg->uanon) {
880 anon = pg->uanon;
901 anon->an_page = pg;
902 pg->uanon = anon;
903 uvm_pagelock(pg);
904 pg->loan_count++;
905 KASSERT(pg->loan_count > 0); /* detect wrap-around */
906 uvm_pageactivate(pg);
907 uvm_pageunlock(pg);
952 struct vm_page *pg;
956 pg = *ploans++;
964 mutex_enter(&pg->interlock);
966 while (pg->uobject != NULL || pg->uanon != NULL) {
967 if (pg->uobject != NULL) {
968 slock = pg->uobject->vmobjlock;
970 slock = pg->uanon->an_lock;
976 kpause("livelock", false, 1, &pg->interlock);
991 KASSERT(pg->loan_count > 0);
992 pg->loan_count--;
993 if (pg->uobject == NULL && pg->uanon != NULL &&
994 (pg->flags & PG_ANON) == 0) {
995 KASSERT(pg->loan_count > 0);
996 pg->loan_count--;
997 pg->flags |= PG_ANON;
999 mutex_exit(&pg->interlock);
1000 if (pg->loan_count == 0 && pg->uobject == NULL &&
1001 pg->uanon == NULL) {
1002 KASSERT((pg->flags & PG_BUSY) == 0);
1003 uvm_pagefree(pg);
1035 struct vm_page *pg;
1053 pg = uvm_pagelookup(uobj, 0);
1054 KASSERT(pg != NULL);
1056 uvm_pagelock(pg);
1057 if (pg->uanon) {
1058 uvm_pageactivate(pg);
1060 uvm_pagedequeue(pg);
1062 uvm_pageunlock(pg);
1098 struct vm_page *pg;
1105 pg = uvm_pagealloc(NULL, 0, NULL, 0);
1106 if (pg == NULL)
1117 uvm_pagecopy(uobjpage, pg); /* old -> new */
1118 pg->flags &= ~PG_FAKE;
1119 KASSERT(uvm_pagegetdirty(pg) == UVM_PAGE_STATUS_DIRTY);
1133 uvm_pagelock2(uobjpage, pg);
1142 uvm_pagereplace(uobjpage, pg);
1149 uvm_pageactivate(pg);
1150 uvm_pageunlock2(uobjpage, pg);
1153 * done! loan is broken and "pg" is
1157 return pg;