Home | History | Annotate | Download | only in uvm

Lines Matching defs:anon

73  *   case [1]: upper layer fault [anon active]
74 * 1A: [read] or [write with anon->an_ref == 1]
75 * I/O takes place in upper level anon and uobj is not touched.
76 * 1B: [write with anon->an_ref > 1]
77 * new anon is alloc'd and data is copied off ["COW"]
83 * data is "promoted" from uobj to a new anon.
88 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
101 * - at the same time check pmap for unmapped areas and anon for pages
106 * - ensure source anon is resident in RAM
107 * - if case 1B alloc new anon and copy from source
112 * - if case 2B alloc new anon and copy from source (could be zero
256 * uvmfault_anonget: get data in an anon into a non-busy, non-released
257 * page in that anon.
259 * => Map, amap and thus anon should be locked by caller.
266 * by the anon): if successful, return with the owning object locked.
272 struct vm_anon *anon)
279 KASSERT(rw_lock_held(anon->an_lock));
280 KASSERT(anon->an_lock == amap->am_lock);
284 if (anon->an_page) {
292 * Loop until we get the anon data, or fail.
301 pg = anon->an_page;
304 * If there is a resident page and it is loaned, then anon
310 pg = uvm_anon_lockloanpg(anon);
316 lock_type = rw_lock_op(anon->an_lock);
344 /* Owner of page is anon. */
346 UVMHIST_LOG(maphist, " unlock+wait on anon",0,
348 uvm_pagewait(pg, anon->an_lock, "anonget2");
363 anon, ufi != NULL ? UVM_FLAG_COLORMATCH : 0);
387 error = uvm_swap_get(pg, anon->an_swslot,
401 * Re-lock the map and anon.
406 rw_enter(anon->an_lock, lock_type);
414 * 1) Page was released during I/O: free anon and ReFault.
417 * case (i.e. drop anon lock if not locked).
426 * Remove the swap slot from the anon and
427 * mark the anon as having no real slot.
432 if (anon->an_swslot > 0) {
433 uvm_swap_markbad(anon->an_swslot, 1);
435 anon->an_swslot = SWSLOT_BAD;
452 rw_exit(anon->an_lock);
459 KASSERT(anon->an_ref == 0);
468 uvm_anon_release(anon);
502 rw_exit(anon->an_lock);
510 * the anon on us.
514 ufi->orig_rvaddr - ufi->entry->start) != anon) {
532 * uvmfault_promote: promote data to a new anon. used for 1B and 2B.
534 * 1. allocate an anon and a page.
539 * => on success, return a new locked anon via 'nanon'.
549 struct vm_anon **nanon, /* OUT: allocated anon */
554 struct vm_anon *anon;
561 /* anon COW */
586 anon = *spare;
589 anon = uvm_analloc();
591 if (anon) {
594 * The new anon is locked.
600 KASSERT(anon->an_lock == NULL);
601 anon->an_lock = amap->am_lock;
602 pg = uvm_pagealloc(NULL, ufi->orig_rvaddr, anon,
605 anon->an_lock = NULL;
616 /* save anon for the next try. */
617 if (anon != NULL) {
618 *spare = anon;
652 amap_add(&ufi->entry->aref, ufi->orig_rvaddr - ufi->entry->start, anon,
662 pg = anon->an_page;
666 *nanon = anon;
1281 * 2. check if anon exists. if not, page is lower.
1282 * 3. if anon exists, enter h/w mapping for neighbors.
1314 * unmapped or center page. check if any anon at this level.
1331 struct vm_anon *anon = anons[lcv];
1332 struct vm_page *pg = anon->an_page;
1334 KASSERT(anon->an_lock == amap->am_lock);
1344 pg, anon->an_ref > 1);
1355 /* (shadowed == true) if there is an anon at the faulting address */
1365 * => called with amap and anon locked.
1375 /* locked: amap, anon */
1394 " MAPPING: n anon: pm=%#jx, va=%#jx, pg=%#jx",
1414 * 1. acquire anon lock.
1415 * 2. get anon. let uvmfault_anonget do the dirty work.
1426 struct vm_anon * const anon = anons[flt->centeridx];
1431 /* locked: maps(read), amap, anon */
1433 KASSERT(anon->an_lock == amap->am_lock);
1436 * handle case 1: fault on an anon in our amap
1439 UVMHIST_LOG(maphist, " case 1 fault: anon=%#jx",
1440 (uintptr_t)anon, 0, 0, 0);
1444 * have the anon's memory resident. ensure that now.
1451 * also, if it is OK, then the anon's page is on the queues.
1456 error = uvmfault_anonget(ufi, amap, anon);
1485 uobj = anon->an_page->uobject; /* locked by anonget if !NULL */
1487 /* locked: maps(read), amap, anon, uobj(if one) */
1489 KASSERT(anon->an_lock == amap->am_lock);
1497 if (anon->an_page->loan_count) {
1498 error = uvm_fault_upper_loan(ufi, flt, anon, &uobj);
1505 * anon to transfer the data into. note that we have a lock
1506 * on anon, so no one can busy or release the page until we are done.
1513 * if we are out of anon VM we kill the process (XXX: could wait?).
1516 if (flt->cow_now && anon->an_ref > 1) {
1518 error = uvm_fault_upper_promote(ufi, flt, uobj, anon);
1520 error = uvm_fault_upper_direct(ufi, flt, uobj, anon);
1535 struct vm_anon *anon, struct uvm_object **ruobj)
1555 * anon then we need to look at the anon's ref count.
1557 * a normal copy-on-write fault into a new anon (this
1565 if (anon->an_ref == 1) {
1573 error = uvm_loanbreak_anon(anon, *ruobj);
1599 struct uvm_object *uobj, struct vm_anon *anon)
1602 struct vm_anon * const oanon = anon;
1618 error = uvmfault_promote(ufi, oanon, PGO_DONTCARE, &anon,
1628 pg = anon->an_page;
1630 KASSERT(anon->an_lock == oanon->an_lock);
1638 * note: oanon is still locked, as is the new anon. we
1640 * oanon != anon, we'll have to unlock anon, too.
1643 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1653 struct uvm_object *uobj, struct vm_anon *anon)
1655 struct vm_anon * const oanon = anon;
1660 pg = anon->an_page;
1661 if (anon->an_ref > 1) /* disallow writes to ref > 1 anons */
1664 return uvm_fault_upper_enter(ufi, flt, uobj, anon, pg, oanon);
1674 struct uvm_object *uobj, struct vm_anon *anon, struct vm_page *pg,
1682 /* locked: maps(read), amap, oanon, anon(if different from oanon) */
1684 KASSERT(anon->an_lock == amap->am_lock);
1695 " MAPPING: anon: pm=%#jx, va=%#jx, pg=%#jx, promote=%jd",
1748 uvm_fault_upper_done(ufi, flt, anon, pg);
1766 struct vm_anon *anon, struct vm_page *pg)
1784 * and since an anon with no swap cannot be clean,
1789 uvm_anon_dropswap(anon);
1885 * faulted on). if we have faulted on the upper (anon) layer
1886 * [i.e. case 1], then the anon we want is anons[centeridx] (we have
1906 * the data up to an anon during the fault.
1911 flt->promote = true; /* always need anon here */
2382 struct vm_anon *anon;
2399 * If we are going to promote the data to an anon we
2400 * allocate a blank anon here and plug it into our amap.
2402 error = uvmfault_promote(ufi, NULL, uobjpage, &anon, &flt->anon_spare);
2412 pg = anon->an_page;
2434 " promote uobjpage %#jx to anon/page %#jx/%#jx",
2435 (uintptr_t)uobjpage, (uintptr_t)anon, (uintptr_t)pg, 0);
2445 UVMHIST_LOG(maphist," zero fill anon/page %#jx/%#jx",
2446 (uintptr_t)anon
2449 return uvm_fault_lower_enter(ufi, flt, uobj, anon, pg);
2453 * uvm_fault_lower_enter: enter h/w mapping of lower page or anon page promoted
2461 struct vm_anon *anon, struct vm_page *pg)
2472 * anon(if !null), pg(if anon), unlock_uobj(if !null)
2474 * anon must be write locked (promotion). uobj can be either.
2476 * Note: pg is either the uobjpage or the new page in the new anon.
2483 KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
2528 if (anon != NULL) {