Lines Matching defs:flt
837 struct uvm_faultctx flt = {
901 error = uvm_fault_check(&ufi, &flt, &anons, maxprot);
905 error = uvm_fault_upper_lookup(&ufi, &flt, anons, pages);
909 if (pages[flt.centeridx] == PGO_DONTCARE)
910 error = uvm_fault_upper(&ufi, &flt, anons);
922 flt.startva, pages, flt.npages,
923 flt.centeridx, flt.access_type,
946 error = uvm_fault_lower(&ufi, &flt, pages);
951 if (flt.anon_spare != NULL) {
952 flt.anon_spare->an_ref--;
953 KASSERT(flt.anon_spare->an_ref == 0);
954 KASSERT(flt.anon_spare->an_lock == NULL);
955 uvm_anfree(flt.anon_spare);
973 * => initialize/adjust many members of flt.
978 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1014 if ((check_prot & flt->access_type) != flt->access_type) {
1017 ufi->entry->protection, flt->access_type, 0, 0);
1029 flt->enter_prot = ufi->entry->protection;
1031 flt->wire_mapping = true;
1032 flt->wire_paging = true;
1033 flt->narrow = true;
1036 if (flt->wire_mapping) {
1037 flt->access_type = flt->enter_prot; /* full access for wired */
1038 flt->cow_now = (check_prot & VM_PROT_WRITE) != 0;
1040 flt->cow_now = (flt->access_type & VM_PROT_WRITE) != 0;
1043 if (flt->wire_paging) {
1045 flt->upper_lock_type = RW_WRITER;
1046 flt->lower_lock_type = RW_WRITER;
1049 flt->promote = false;
1059 if (flt->cow_now || (ufi->entry->object.uvm_obj == NULL)) {
1076 flt->enter_prot &= ~VM_PROT_WRITE;
1104 (flt->access_type & VM_PROT_WRITE) != 0) {
1106 flt->narrow = true;
1116 if (flt->narrow == false) {
1123 flt->startva = ufi->orig_rvaddr - (nback << PAGE_SHIFT);
1131 flt->npages = nback + nforw + 1;
1132 flt->centeridx = nback;
1134 flt->narrow = true; /* ensure only once per-fault */
1140 flt->startva = ufi->orig_rvaddr;
1141 flt->npages = 1;
1142 flt->centeridx = 0;
1146 const voff_t eoff = flt->startva - ufi->entry->start;
1150 flt->narrow, nback, nforw, flt->startva);
1168 flt->upper_lock_type = RW_WRITER;
1169 } else if ((flt->access_type & VM_PROT_WRITE) != 0) {
1173 flt->upper_lock_type = RW_WRITER;
1175 amap_lock(amap, flt->upper_lock_type);
1176 amap_lookups(&ufi->entry->aref, eoff, *ranons, flt->npages);
1178 if ((flt->access_type & VM_PROT_WRITE) != 0) {
1183 flt->lower_lock_type = RW_WRITER;
1190 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1213 flt->lower_lock_type = RW_WRITER;
1223 flt->startva += (nback << PAGE_SHIFT);
1224 flt->npages -= nback;
1225 flt->centeridx = 0;
1231 KASSERT(flt->startva <= ufi->orig_rvaddr);
1233 flt->startva + (flt->npages << PAGE_SHIFT));
1242 uvm_fault_upper_upgrade(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1248 KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
1254 if (__predict_true(flt->upper_lock_type == RW_WRITER)) {
1264 flt->upper_lock_type = RW_WRITER;
1272 KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
1289 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1301 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1309 currva = flt->startva;
1312 for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
1326 if (lcv == flt->centeridx) { /* save center for later! */
1343 uvm_fault_upper_neighbor(ufi, flt, currva,
1354 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1370 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1379 KASSERT(rw_lock_op(pg->uanon->an_lock) == flt->upper_lock_type);
1406 readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
1407 flt->enter_prot,
1408 PMAP_CANFAIL | (flt->wire_mapping ? PMAP_WIRED : 0));
1422 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1426 struct vm_anon * const anon = anons[flt->centeridx];
1432 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1470 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1488 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1491 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1498 error = uvm_fault_upper_loan(ufi, flt, anon, &uobj);
1516 if (flt->cow_now && anon->an_ref > 1) {
1517 flt->promote = true;
1518 error = uvm_fault_upper_promote(ufi, flt, uobj, anon);
1520 error = uvm_fault_upper_direct(ufi, flt, uobj, anon);
1528 * 1. if not cow'ing now, simply adjust flt->enter_prot.
1534 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1541 if (!flt->cow_now) {
1548 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1567 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1598 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1610 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1619 &flt->anon_spare);
1643 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1652 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1662 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
1664 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1673 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1683 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1687 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1696 (uintptr_t)pmap, va, (uintptr_t)pg, flt->promote);
1698 flt->enter_prot, flt->access_type | PMAP_CANFAIL |
1699 (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
1748 uvm_fault_upper_done(ufi, flt, anon, pg);
1765 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1768 const bool wire_paging = flt->wire_paging;
1807 uvm_fault_lower_upgrade(struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1814 KASSERT(flt->lower_lock_type == rw_lock_op(uobj->vmobjlock));
1820 if (__predict_true(flt->lower_lock_type == RW_WRITER)) {
1830 flt->lower_lock_type = RW_WRITER;
1838 KASSERT(flt->lower_lock_type == rw_lock_op(uobj->vmobjlock));
1857 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
1878 uvm_fault_lower_lookup(ufi, flt, pages);
1879 uobjpage = pages[flt->centeridx];
1898 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1900 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1911 flt->promote = true; /* always need anon here */
1914 flt->promote = flt->cow_now && UVM_ET_ISCOPYONWRITE(ufi->entry);
1917 flt->promote, (uobj == NULL), 0,0);
1932 error = uvm_fault_lower_io(ufi, flt, &uobj, &uobjpage);
1942 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1944 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
1961 if (!flt->promote) {
1962 error = uvm_fault_lower_direct(ufi, flt, uobj, uobjpage);
1964 error = uvm_fault_lower_promote(ufi, flt, uobj, uobjpage);
1979 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
1988 rw_enter(uobj->vmobjlock, flt->lower_lock_type);
1995 gotpages = flt->npages;
1997 ufi->entry->offset + flt->startva - ufi->entry->start,
1998 pages, &gotpages, flt->centeridx,
1999 flt->access_type & MASK(ufi->entry), ufi->entry->advice,
2002 KASSERT(rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2009 pages[flt->centeridx] = NULL;
2014 currva = flt->startva;
2015 for (lcv = 0; lcv < flt->npages; lcv++, currva += PAGE_SIZE) {
2036 if (lcv == flt->centeridx) {
2040 uvm_fault_lower_neighbor(ufi, flt, currva, curpg);
2055 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2095 KASSERT(rw_lock_op(pg->uobject->vmobjlock) == flt->lower_lock_type);
2098 readonly ? (flt->enter_prot & ~VM_PROT_WRITE) :
2099 flt->enter_prot & MASK(ufi->entry);
2101 PMAP_CANFAIL | (flt->wire_mapping ? (mapprot | PMAP_WIRED) : 0);
2117 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2133 access_type = flt->access_type & MASK(ufi->entry);
2137 KASSERT(rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2140 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, NULL);
2191 amap_lock(amap, flt->upper_lock_type);
2196 rw_enter(uobj->vmobjlock, flt->lower_lock_type);
2198 KASSERT(flt->lower_lock_type == RW_WRITER);
2263 * 1. adjust flt->enter_prot.
2269 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2287 flt->enter_prot &= ~VM_PROT_WRITE;
2298 uvm_fault_lower_direct_loan(ufi, flt, uobj, &pg, &uobjpage);
2302 return uvm_fault_lower_enter(ufi, flt, uobj, NULL, pg);
2308 * 1. if not cow'ing, adjust flt->enter_prot.
2314 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2324 if (!flt->cow_now) {
2327 flt->enter_prot = flt->enter_prot & ~VM_PROT_WRITE;
2334 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, uobjpage);
2378 struct uvm_faultinfo *ufi, struct uvm_faultctx *flt,
2390 error = uvm_fault_upper_upgrade(ufi, flt, amap, uobj);
2396 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2402 error = uvmfault_promote(ufi, NULL, uobjpage, &anon, &flt->anon_spare);
2449 return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg);
2459 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2480 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
2482 rw_lock_op(uobj->vmobjlock) == flt->lower_lock_type);
2501 (uintptr_t)pg, flt->promote);
2502 KASSERTMSG((flt->access_type & VM_PROT_WRITE) == 0 || !readonly,
2505 flt->promote, flt->cow_now, flt->access_type, flt->enter_prot,
2508 KASSERT((flt->access_type & VM_PROT_WRITE) == 0 || !readonly);
2511 readonly ? flt->enter_prot & ~VM_PROT_WRITE : flt->enter_prot,
2512 flt->access_type | PMAP_CANFAIL |
2513 (flt->wire_mapping ? PMAP_WIRED : 0)) != 0) {
2548 uvm_fault_lower_done(ufi, flt, uobj, pg);
2562 struct uvm_faultinfo *ufi, const struct uvm_faultctx *flt,
2568 if (flt->wire_paging) {