Lines Matching defs:amap

61  * amap |  V  |       |  ---------> new |          |        | |  ^  |
71 * no amap or uobj is present. this is an error.
99 * - if we have an amap lock it and extract the anons
138 * implement (especially with structures like amap that can be referenced
259 * => Map, amap and thus anon should be locked by caller.
271 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
280 KASSERT(anon->an_lock == amap->am_lock);
339 uvmfault_unlockall(ufi, amap, NULL);
366 uvmfault_unlockall(ufi, amap, NULL);
377 uvmfault_unlockall(ufi, amap, NULL);
462 * Released while we had unlocked amap.
509 * Verify that no one has touched the amap and moved
516 uvmfault_unlockall(ufi, amap, NULL);
536 * 3. put it into amap.
552 struct vm_amap *amap = ufi->entry->aref.ar_amap;
579 KASSERT(amap != NULL);
581 KASSERT(rw_write_held(amap->am_lock));
582 KASSERT(oanon == NULL || amap->am_lock == oanon->an_lock);
601 anon->an_lock = amap->am_lock;
622 uvmfault_unlockall(ufi, amap, uobj);
920 /* locked: maps(read), amap(if there), uobj */
966 * 4. handle needs-copy (lazy amap copy).
968 * 6. look up anons (if amap exists).
981 struct vm_amap *amap;
1052 * handle "needs_copy" case. if we need to copy the amap we will
1084 amap = ufi->entry->aref.ar_amap; /* upper layer */
1092 if (amap == NULL && uobj == NULL) {
1103 if (uobj != NULL && amap != NULL &&
1151 UVMHIST_LOG(maphist, " entry=%#jx, amap=%#jx, obj=%#jx",
1152 (uintptr_t)ufi->entry, (uintptr_t)amap, (uintptr_t)uobj, 0);
1156 * if we've got an amap then lock it and extract current anons.
1159 if (amap) {
1160 if ((amap_flags(amap) & AMAP_SHARED) == 0) {
1162 * the amap isn't shared. get a writer lock to
1175 amap_lock(amap, flt->upper_lock_type);
1188 /* locked: maps(read), amap(if there) */
1189 KASSERT(amap == NULL ||
1190 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1202 if (amap)
1221 if (amap)
1243 struct vm_amap *amap, struct uvm_object *uobj)
1247 KASSERT(amap != NULL);
1248 KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
1265 if (__predict_false(!rw_tryupgrade(amap->am_lock))) {
1266 uvmfault_unlockall(ufi, amap, uobj);
1272 KASSERT(flt->upper_lock_type == rw_lock_op(amap->am_lock));
1277 * uvm_fault_upper_lookup: look up existing h/w mapping and amap.
1284 * => called with amap locked (if exists).
1292 struct vm_amap *amap = ufi->entry->aref.ar_amap;
1299 /* locked: maps(read), amap(if there) */
1300 KASSERT(amap == NULL ||
1301 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1304 * map in the backpages and frontpages we found in the amap in hopes
1316 if (amap == NULL || anons[lcv] == NULL) {
1334 KASSERT(anon->an_lock == amap->am_lock);
1352 /* locked: maps(read), amap(if there) */
1353 KASSERT(amap == NULL ||
1354 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1365 * => called with amap and anon locked.
1375 /* locked: amap, anon */
1425 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1431 /* locked: maps(read), amap, anon */
1432 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1433 KASSERT(anon->an_lock == amap->am_lock);
1436 * handle case 1: fault on an anon in our amap
1456 error = uvmfault_anonget(ufi, amap, anon);
1470 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1474 KASSERT(rw_write_held(amap->am_lock));
1487 /* locked: maps(read), amap, anon, uobj(if one) */
1488 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1489 KASSERT(anon->an_lock == amap->am_lock);
1537 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1567 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1571 KASSERT(rw_write_held(amap->am_lock));
1575 uvmfault_unlockall(ufi, amap, *ruobj);
1601 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1610 error = uvm_fault_upper_upgrade(ufi, flt, amap, NULL);
1614 KASSERT(rw_write_held(amap->am_lock));
1679 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
1682 /* locked: maps(read), amap, oanon, anon(if different from oanon) */
1683 KASSERT(rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1684 KASSERT(anon->an_lock == amap->am_lock);
1685 KASSERT(oanon->an_lock == amap->am_lock);
1736 uvmfault_unlockall(ufi, amap, uobj);
1755 uvmfault_unlockall(ufi, amap, uobj);
1808 struct vm_amap *amap, struct uvm_object *uobj, struct vm_page *uobjpage)
1832 uvmfault_unlockall(ufi, amap, uobj);
1860 struct vm_amap *amap __diagused = ufi->entry->aref.ar_amap;
1867 * now, if the desired page is not shadowed by the amap and we have
1895 * maps(read), amap(if there), uobj(if !null), uobjpage(if !null)
1897 KASSERT(amap == NULL ||
1898 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1939 * maps(read), amap(if !null), uobj(if !null), uobjpage(if uobj)
1941 KASSERT(amap == NULL ||
1942 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
1991 * Locked: maps(read), amap(if there), uobj
2061 /* locked: maps(read), amap(if there), uobj */
2120 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2136 /* Locked: maps(read), amap(if there), uobj */
2140 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, NULL);
2144 uvmfault_unlockall(ufi, amap, NULL);
2190 if (locked && amap)
2191 amap_lock(amap, flt->upper_lock_type);
2204 /* locked(locked): maps(read), amap(if !null), uobj, pg */
2209 * that amap slot is still free. if there is a problem,
2214 (locked && amap && amap_lookup(&ufi->entry->aref,
2217 uvmfault_unlockall(ufi, amap, NULL);
2253 /* locked: maps(read), amap(if !null), uobj */
2318 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2334 error = uvm_fault_lower_upgrade(ufi, flt, amap, uobj, uobjpage);
2343 uvmfault_unlockall(ufi, amap, uobj);
2381 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2387 KASSERT(amap != NULL);
2390 error = uvm_fault_upper_upgrade(ufi, flt, amap, uobj);
2394 KASSERT(rw_write_held(amap->am_lock));
2400 * allocate a blank anon here and plug it into our amap.
2422 * promote to shared amap? make sure all sharing
2426 if ((amap_flags(amap) & AMAP_SHARED) != 0) {
2463 struct vm_amap * const amap = ufi->entry->aref.ar_amap;
2471 * maps(read), amap(if !null), uobj(if !null),
2479 KASSERT(amap == NULL ||
2480 rw_lock_op(amap->am_lock) == flt->upper_lock_type);
2483 KASSERT(anon == NULL || anon->an_lock == amap->am_lock);
2535 uvmfault_unlockall(ufi, amap, uobj);
2550 uvmfault_unlockall(ufi, amap, uobj);