Home | History | Annotate | Download | only in uvm

Lines Matching defs:amap

29  * uvm_amap.c: amap operations
54 * avoid an endless loop, the amap cache's allocator cannot allocate
55 * memory from an amap (it currently goes through the kernel uobj, so
75 * what is ppref? ppref is an _optional_ amap feature which is used
81 * map (either by unmapping part of the amap, or gaining a reference
82 * to only a part of an amap). if the allocation of the array fails
110 * => ppref's amap must be locked
128 * => ppref's amap must be locked
145 * amap_alloc1: allocate an amap, but do not initialise the overlay.
154 struct vm_amap *amap;
158 amap = pool_cache_get(&uvm_amap_cache, nowait ? PR_NOWAIT : PR_WAITOK);
159 if (amap == NULL) {
162 KASSERT(amap->am_lock != NULL);
163 KASSERT(amap->am_nused == 0);
166 if (rw_obj_refcnt(amap->am_lock) > 1) {
169 oldlock = amap->am_lock;
171 amap->am_lock = newlock;
178 amap->am_ref = 1;
179 amap->am_flags = 0;
181 amap->am_ppref = NULL;
183 amap->am_maxslot = totalslots;
184 amap->am_nslot = slots;
190 amap->am_slots = kmem_alloc(totalslots * sizeof(int), kmflags);
191 if (amap->am_slots == NULL)
194 amap->am_bckptr = kmem_alloc(totalslots * sizeof(int), kmflags);
195 if (amap->am_bckptr == NULL)
198 amap->am_anon = kmem_alloc(totalslots * sizeof(struct vm_anon *),
200 if (amap->am_anon == NULL)
203 return amap;
206 kmem_free(amap->am_bckptr, totalslots * sizeof(int));
208 kmem_free(amap->am_slots, totalslots * sizeof(int));
210 pool_cache_put(&uvm_amap_cache, amap);
226 * amap_alloc: allocate an amap to manage "sz" bytes of anonymous VM
229 * => reference count to new amap is set to one
230 * => new amap is returned unlocked
236 struct vm_amap *amap;
243 amap = amap_alloc1(slots, padslots, waitf);
244 if (amap) {
245 memset(amap->am_anon, 0,
246 amap->am_maxslot * sizeof(struct vm_anon *));
249 UVMHIST_LOG(maphist,"<- done, amap = %#jx, sz=%jd", (uintptr_t)amap,
251 return(amap);
262 struct vm_amap *amap = obj;
265 amap->am_lock = rw_obj_tryalloc();
266 if (amap->am_lock == NULL) {
270 amap->am_lock = rw_obj_alloc();
272 amap->am_nused = 0;
273 amap->am_flags = 0;
276 LIST_INSERT_HEAD(&amap_list, amap, am_list);
289 struct vm_amap *amap = obj;
291 KASSERT(amap->am_nused == 0);
294 LIST_REMOVE(amap, am_list);
296 rw_obj_free(amap->am_lock);
300 * uvm_amap_init: initialize the amap system.
314 * amap_free: free an amap
316 * => the amap must be unlocked
317 * => the amap should have a zero reference count and be empty
320 amap_free(struct vm_amap *amap)
326 KASSERT(amap->am_ref == 0);
327 KASSERT(amap->am_nused == 0);
328 KASSERT((amap->am_flags & AMAP_SWAPOFF) == 0);
329 slots = amap->am_maxslot;
330 kmem_free(amap->am_slots, slots * sizeof(*amap->am_slots));
331 kmem_free(amap->am_bckptr, slots * sizeof(*amap->am_bckptr));
332 kmem_free(amap->am_anon, slots * sizeof(*amap->am_anon));
334 if (amap->am_ppref && amap->am_ppref != PPREF_NONE)
335 kmem_free(amap->am_ppref, slots * sizeof(*amap->am_ppref));
337 pool_cache_put(&uvm_amap_cache, amap);
338 UVMHIST_LOG(maphist,"<- done, freed amap = %#jx", (uintptr_t)amap,
343 * amap_extend: extend the size of an amap (if needed)
345 * => called from uvm_map when we want to extend an amap to cover
347 * => amap should be unlocked (we will lock it)
348 * => to safely extend an amap it should have a reference count of
354 struct vm_amap *amap = entry->aref.ar_amap;
372 * first, determine how many slots we need in the amap. don't
374 * there are some unused slots before us in the amap.
377 amap_lock(amap, RW_WRITER);
378 KASSERT(amap_refs(amap) == 1); /* amap can't be shared */
388 slotarea = amap->am_maxslot - slotmapped;
392 * Because this amap only has 1 ref, we know that there is
398 * entries which used this amap were removed. But without ppref,
405 * this function assumes that there are no anons in the amap
410 if (amap->am_ppref == PPREF_NONE) {
411 amap_wiperange(amap, 0, slotoff);
412 amap_wiperange(amap, slotendoff, amap->am_nslot - slotendoff);
415 KASSERT(amap->am_anon[i] == NULL);
417 for (i = slotendoff; i < amap->am_nslot - slotendoff; i++) {
418 KASSERT(amap->am_anon[i] == NULL);
428 if (amap->am_nslot >= slotneed) {
430 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
431 amap_pp_adjref(amap, slotoff + slotmapped,
435 amap_unlock(amap);
437 "<- done (case 1f), amap = %#jx, sltneed=%jd",
438 (uintptr_t)amap, slotneed, 0, 0);
446 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
447 amap_pp_adjref(amap, slotoff, slotadd, 1);
450 amap_unlock(amap);
452 "<- done (case 1b), amap = %#jx, sltneed=%jd",
453 (uintptr_t)amap, slotneed, 0, 0);
463 if (amap->am_maxslot >= slotneed) {
466 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
467 if ((slotoff + slotmapped) < amap->am_nslot)
468 amap_pp_adjref(amap,
470 (amap->am_nslot -
472 pp_setreflen(amap->am_ppref, amap->am_nslot, 1,
473 slotneed - amap->am_nslot);
476 amap->am_nslot = slotneed;
477 amap_unlock(amap);
484 UVMHIST_LOG(maphist,"<- done (case 2f), amap = %#jx, "
485 "slotneed=%jd", (uintptr_t)amap, slotneed, 0, 0);
489 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
494 memmove(amap->am_ppref + slotarea,
495 amap->am_ppref + slotoff,
501 pp_setreflen(amap->am_ppref,
503 pp_setreflen(amap->am_ppref,
512 memmove(amap->am_anon + slotarea,
513 amap->am_anon + slotoff,
515 memset(amap->am_anon + slotoff, 0,
522 memmove(amap->am_bckptr + slotarea,
523 amap->am_bckptr + slotoff,
529 for (i = 0; i < amap->am_nused; i++)
530 amap->am_slots[i] += (slotarea - slotoff);
534 * front of the amap by activating a few new
537 amap->am_nslot = amap->am_maxslot;
539 amap_unlock(amap);
541 UVMHIST_LOG(maphist,"<- done (case 2b), amap = %#jx, "
542 "slotneed=%jd", (uintptr_t)amap, slotneed, 0, 0);
548 * Case 3: we need to allocate a new amap and copy all the amap
549 * data over from old amap to the new one. Drop the lock before
556 amap_unlock(amap);
565 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
590 amap_lock(amap, RW_WRITER);
591 KASSERT(amap->am_maxslot < slotneed);
597 slotadded = slotalloc - amap->am_nslot;
602 oldsl = amap->am_slots;
604 memcpy(newsl, oldsl, sizeof(int) * amap->am_nused);
606 for (i = 0; i < amap->am_nused; i++)
608 amap->am_slots = newsl;
611 oldover = amap->am_anon;
614 sizeof(struct vm_anon *) * amap->am_nslot);
615 memset(newover + amap->am_nslot, 0,
623 amap->am_anon = newover;
626 oldbck = amap->am_bckptr;
628 memcpy(newbck, oldbck, sizeof(int) * amap->am_nslot);
632 amap->am_bckptr = newbck;
636 oldppref = amap->am_ppref;
640 sizeof(int) * amap->am_nslot);
641 memset(newppref + amap->am_nslot, 0,
647 amap->am_ppref = newppref;
649 (slotoff + slotmapped) < amap->am_nslot)
650 amap_pp_adjref(amap, slotoff + slotmapped,
651 (amap->am_nslot - (slotoff + slotmapped)), 1);
653 pp_setreflen(newppref, amap->am_nslot, 1,
654 slotneed - amap->am_nslot);
662 if (amap->am_ppref)
663 amap->am_ppref = PPREF_NONE;
669 amap->am_nslot = slotneed;
672 amap->am_nslot = slotalloc;
674 oldnslots = amap->am_maxslot;
675 amap->am_maxslot = slotalloc;
676 amap_unlock(amap);
685 UVMHIST_LOG(maphist,"<- done (case 3), amap = %#jx, slotneed=%jd",
686 (uintptr_t)amap, slotneed, 0, 0);
691 * amap_share_protect: change protection of anons in a shared amap
698 * all mappings of a shared amap.] we traverse am_anon or am_slots
699 * depending on the current state of the amap.
701 * => entry's map and amap must be locked by the caller
706 struct vm_amap *amap = entry->aref.ar_amap;
710 KASSERT(rw_write_held(amap->am_lock));
715 if (slots < amap->am_nused) {
720 anon = amap->am_anon[lcv];
734 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
735 slot = amap->am_slots[lcv];
739 anon = amap->am_anon[slot];
747 * amap_wipeout: wipeout all anon's in an amap; then free the amap!
750 * => amap must be locked.
754 amap_wipeout(struct vm_amap *amap)
759 UVMHIST_CALLARGS(maphist,"(amap=%#jx)", (uintptr_t)amap, 0,0,0);
761 KASSERT(rw_write_held(amap->am_lock));
762 KASSERT(amap->am_ref == 0);
764 if (__predict_false(amap->am_flags & AMAP_SWAPOFF)) {
768 amap_unlock(amap);
772 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
776 slot = amap->am_slots[lcv];
777 anon = amap->am_anon[slot];
781 KASSERT(anon->an_lock == amap->am_lock);
798 * Finally, destroy the amap.
801 amap->am_nused = 0;
802 amap_unlock(amap);
803 amap_free(amap);
809 * by copying the amap if necessary.
811 * => an entry with a null amap pointer will get a new (blank) one.
813 * => the amap currently attached to "entry" (if any) must be unlocked.
826 struct vm_amap *amap, *srcamap;
841 * Is there an amap to copy? If not, create one.
848 * Check to see if we have a large amap that we can
861 " chunk amap ==> clip %#jx->%#jx to %#jx->%#jx",
876 UVMHIST_LOG(maphist, "<- done [creating new amap %#jx->%#jx]",
880 * Allocate an initialised amap and install it.
894 * he amap we currently have. If so, then just take it over instead
897 * to the amap (via our locked map). If the value is greater than
898 * one, then allocate amap and re-check the value.
908 UVMHIST_LOG(maphist," amap=%#jx, ref=%jd, must copy it",
912 * Allocate a new amap (note: not initialised, etc).
916 amap = amap_alloc1(slots, 0, waitf);
917 if (amap == NULL) {
923 * Make the new amap share the source amap's lock, and then lock
925 * amap_swap_off() can become interested in the amap.
928 oldlock = amap->am_lock;
930 amap->am_lock = srcamap->am_lock;
932 rw_obj_hold(amap->am_lock);
943 /* Just take over the existing amap. */
946 /* Destroy the new (unused) amap. */
947 amap->am_ref--;
948 amap_free(amap);
956 UVMHIST_LOG(maphist, " copying amap now",0, 0, 0, 0);
958 amap->am_anon[lcv] =
960 if (amap->am_anon[lcv] == NULL)
962 KASSERT(amap->am_anon[lcv]->an_lock == srcamap->am_lock);
963 KASSERT(amap->am_anon[lcv]->an_ref > 0);
964 KASSERT(amap->am_nused < amap->am_maxslot);
965 amap->am_anon[lcv]->an_ref++;
966 amap->am_bckptr[lcv] = amap->am_nused;
967 amap->am_slots[amap->am_nused] = lcv;
968 amap->am_nused++;
970 memset(&amap->am_anon[lcv], 0,
971 (amap->am_maxslot - lcv) * sizeof(struct vm_anon *));
974 * Drop our reference to the old amap (srcamap) and unlock.
995 * Install new amap.
999 entry->aref.ar_amap = amap;
1005 * amap_cow_now: resolve all copy-on-write faults in an amap now for fork(2)
1017 * => if we run out of memory we will unlock the amap and sleep _with_ the
1028 struct vm_amap *amap = entry->aref.ar_amap;
1034 * note that if we unlock the amap then we must ReStart the "lcv" for
1040 amap_lock(amap, RW_WRITER);
1041 for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
1042 slot = amap->am_slots[lcv];
1043 anon = amap->am_anon[slot];
1044 KASSERT(anon->an_lock == amap->am_lock);
1083 uvm_pagewait(pg, amap->am_lock, "cownow");
1094 nanon->an_lock = amap->am_lock;
1100 amap_unlock(amap);
1113 * Also, setup its lock (share the with amap's lock).
1119 amap->am_anon[slot] = nanon;
1132 amap_unlock(amap);
1145 struct vm_amap *amap = origref->ar_amap;
1152 amap_lock(amap, RW_WRITER);
1153 KASSERT(amap->am_nslot - origref->ar_pageoff - leftslots > 0);
1156 /* Establish ppref before we add a duplicate reference to the amap. */
1157 if (amap->am_ppref == NULL) {
1158 amap_pp_establish(amap, origref->ar_pageoff);
1162 amap->am_ref++;
1164 amap_unlock(amap);
1170 * amap_pp_establish: add a ppref array to an amap, if possible.
1172 * => amap should be locked by caller.
1175 amap_pp_establish(struct vm_amap *amap, vaddr_t offset)
1177 const size_t sz = amap->am_maxslot * sizeof(*amap->am_ppref);
1179 KASSERT(rw_write_held(amap->am_lock));
1181 amap
1182 if (amap->am_ppref == NULL) {
1184 amap->am_ppref = PPREF_NONE;
1187 pp_setreflen(amap->am_ppref, 0, 0, offset);
1188 pp_setreflen(amap->am_ppref, offset, amap->am_ref,
1189 amap->am_nslot - offset);
1193 * amap_pp_adjref: adjust reference count to a part of an amap using the
1197 * => map and amap must be locked.
1200 amap_pp_adjref(struct vm_amap *amap, int curslot, vsize_t slotlen, int adjval)
1205 KASSERT(rw_write_held(amap->am_lock));
1208 ppref = amap->am_ppref;
1251 KASSERT(ref <= amap->am_ref);
1258 amap_wiperange(amap, lcv, len);
1264 * amap_wiperange: wipe out a range of an amap.
1265 * Note: different from amap_wipeout because the amap is kept intact.
1267 * => Both map and amap must be locked by caller.
1270 amap_wiperange(struct vm_amap *amap, int slotoff, int slots)
1275 KASSERT(rw_write_held(amap->am_lock));
1278 * We can either traverse the amap by am_anon or by am_slots.
1282 if (slots < amap->am_nused) {
1290 stop = amap->am_nused;
1300 if (amap->am_anon[curslot] == NULL)
1303 curslot = amap->am_slots[lcv];
1310 anon = amap->am_anon[curslot];
1311 KASSERT(anon->an_lock == amap->am_lock);
1314 * Remove anon from the amap.
1317 amap->am_anon[curslot] = NULL;
1318 ptr = amap->am_bckptr[curslot];
1319 last = amap->am_nused - 1;
1321 amap->am_slots[ptr] = amap->am_slots[last];
1322 amap->am_bckptr[amap->am_slots[ptr]] = ptr;
1324 amap->am_nused--;
1330 KASSERT(anon->an_lock == amap->am_lock);
1388 /* If am_nused == 0, the amap could be free - careful. */
1440 * amap_lookup: look up a page in an amap.
1442 * => amap should be locked by caller.
1447 struct vm_amap *amap = aref->ar_amap;
1452 KASSERT(rw_lock_held(amap->am_lock));
1456 an = amap->am_anon[slot];
1459 "<- done (amap=%#jx, offset=%#jx, result=%#jx)",
1460 (uintptr_t)amap, offset, (uintptr_t)an, 0);
1462 KASSERT(slot < amap->am_nslot);
1464 KASSERT(an == NULL || an->an_lock == amap->am_lock);
1469 * amap_lookups: look up a range of pages in an amap.
1471 * => amap should be locked by caller.
1477 struct vm_amap *amap = aref->ar_amap;
1481 KASSERT(rw_lock_held(amap->am_lock));
1487 slot, npages, amap->am_nslot, 0);
1489 KASSERT((slot + (npages - 1)) < amap->am_nslot);
1490 memcpy(anons, &amap->am_anon[slot], npages * sizeof(struct vm_anon *));
1499 KASSERT(an->an_lock == amap->am_lock);
1506 * amap_add: add (or replace) a page to an amap.
1508 * => amap should be locked by caller.
1509 * => anon must have the lock associated with this amap.
1515 struct vm_amap *amap = aref->ar_amap;
1519 KASSERT(rw_write_held(amap->am_lock));
1520 KASSERT(anon->an_lock == amap->am_lock);
1524 KASSERT(slot < amap->am_nslot);
1527 struct vm_anon *oanon = amap->am_anon[slot];
1530 if (oanon->an_page && (amap->am_flags & AMAP_SHARED) != 0) {
1537 KASSERT(amap->am_anon[slot] == NULL);
1538 KASSERT(amap->am_nused < amap->am_maxslot);
1539 amap->am_bckptr[slot] = amap->am_nused;
1540 amap->am_slots[amap->am_nused] = slot;
1541 amap->am_nused++;
1543 amap->am_anon[slot] = anon;
1545 "<- done (amap=%#jx, offset=%#x, anon=%#jx, rep=%d)",
1546 (uintptr_t)amap, offset, (uintptr_t)anon, replace);
1550 * amap_unadd: remove a page from an amap.
1552 * => amap should be locked by caller.
1557 struct vm_amap *amap = aref->ar_amap;
1561 KASSERT(rw_write_held(amap->am_lock));
1565 KASSERT(slot < amap->am_nslot);
1566 KASSERT(amap->am_anon[slot] != NULL);
1567 KASSERT(amap->am_anon[slot]->an_lock == amap->am_lock);
1569 amap->am_anon[slot] = NULL;
1570 ptr = amap->am_bckptr[slot];
1572 last = amap->am_nused - 1;
1575 amap->am_slots[ptr] = amap->am_slots[last];
1576 amap->am_bckptr[amap->am_slots[ptr]] = ptr;
1578 amap->am_nused--;
1579 UVMHIST_LOG(maphist, "<- done (amap=%#jx, slot=%#jx)",
1580 (uintptr_t)amap, slot,0, 0);
1584 * amap_adjref_anons: adjust the reference count(s) on amap and its anons.
1587 amap_adjref_anons(struct vm_amap *amap, vaddr_t offset, vsize_t len,
1592 KASSERT(rw_write_held(amap->am_lock));
1596 * so that the ppref values match the current amap refcount.
1599 if (amap->am_ppref == NULL) {
1600 amap_pp_establish(amap, offset);
1604 amap->am_ref += refv;
1607 if (amap->am_ppref && amap->am_ppref != PPREF_NONE) {
1608 amap_pp_adjref(amap, offset, len, refv);
1611 amap_unlock(amap);
1615 * amap_ref: gain a reference to an amap.
1617 * => amap must not be locked (we will lock).
1622 amap_ref(struct vm_amap *amap, vaddr_t offset, vsize_t len, int flags)
1626 amap_lock(amap, RW_WRITER);
1628 amap->am_flags |= AMAP_SHARED;
1630 amap_adjref_anons(amap, offset, len, 1, (flags & AMAP_REFALL) != 0);
1632 UVMHIST_LOG(maphist,"<- done! amap=%#jx", (uintptr_t)amap, 0, 0, 0);
1636 * amap_unref: remove a reference to an amap.
1638 * => All pmap-level references to this amap must be already removed.
1640 * => We will lock amap, so it must be unlocked.
1643 amap_unref(struct vm_amap *amap, vaddr_t offset, vsize_t len, bool all)
1647 amap_lock(amap, RW_WRITER);
1649 UVMHIST_LOG(maphist," amap=%#jx refs=%d, nused=%d",
1650 (uintptr_t)amap, amap->am_ref, amap->am_nused, 0);
1651 KASSERT(amap->am_ref > 0);
1653 if (amap->am_ref == 1) {
1656 * If the last reference - wipeout and destroy the amap.
1658 amap->am_ref--;
1659 amap_wipeout(amap);
1668 if (amap->am_ref == 2 && (amap->am_flags & AMAP_SHARED) != 0) {
1669 amap->am_flags &= ~AMAP_SHARED;
1671 amap_adjref_anons(amap, offset, len, -1, all);