HomeSort by: relevance | last modified time | path
    Searched refs:amap (Results 1 - 10 of 10) sorted by relevancy

  /src/sys/uvm/
uvm_amap.c 29 * uvm_amap.c: amap operations
54 * avoid an endless loop, the amap cache's allocator cannot allocate
55 * memory from an amap (it currently goes through the kernel uobj, so
75 * what is ppref? ppref is an _optional_ amap feature which is used
81 * map (either by unmapping part of the amap, or gaining a reference
82 * to only a part of an amap). if the allocation of the array fails
110 * => ppref's amap must be locked
128 * => ppref's amap must be locked
145 * amap_alloc1: allocate an amap, but do not initialise the overlay.
154 struct vm_amap *amap; local
236 struct vm_amap *amap; local
262 struct vm_amap *amap = obj; local
289 struct vm_amap *amap = obj; local
354 struct vm_amap *amap = entry->aref.ar_amap; local
706 struct vm_amap *amap = entry->aref.ar_amap; local
826 struct vm_amap *amap, *srcamap; local
1028 struct vm_amap *amap = entry->aref.ar_amap; local
1145 struct vm_amap *amap = origref->ar_amap; local
1447 struct vm_amap *amap = aref->ar_amap; local
1477 struct vm_amap *amap = aref->ar_amap; local
1515 struct vm_amap *amap = aref->ar_amap; local
1557 struct vm_amap *amap = aref->ar_amap; local
    [all...]
uvm_fault.c 61 * amap | V | | ---------> new | | | | ^ |
71 * no amap or uobj is present. this is an error.
88 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
99 * - if we have an amap lock it and extract the anons
138 * implement (especially with structures like amap that can be referenced
259 * => Map, amap and thus anon should be locked by caller.
271 uvmfault_anonget(struct uvm_faultinfo *ufi, struct vm_amap *amap,
280 KASSERT(anon->an_lock == amap->am_lock);
339 uvmfault_unlockall(ufi, amap, NULL);
366 uvmfault_unlockall(ufi, amap, NULL)
552 struct vm_amap *amap = ufi->entry->aref.ar_amap; local
981 struct vm_amap *amap; local
1292 struct vm_amap *amap = ufi->entry->aref.ar_amap; local
1425 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
1537 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
1601 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
1679 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
2120 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
2318 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
2381 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
2463 struct vm_amap * const amap = ufi->entry->aref.ar_amap; local
    [all...]
uvm_fault_i.h 72 uvmfault_unlockall(struct uvm_faultinfo *ufi, struct vm_amap *amap,
78 if (amap)
79 amap_unlock(amap);
uvm_loan.c 170 /* locked: map, amap, uobj */
181 /* locked: if (rv > 0) => map, amap, uobj [o.w. unlocked] */
334 * => called with map, amap, uobj locked
589 * => called with map, amap, uobj locked
600 struct vm_amap *amap = ufi->entry->aref.ar_amap; local
614 /* locked: maps(read), amap(if there) */
616 /* locked: maps(read), amap(if there), uobj */
634 uvmfault_unlockall(ufi, amap, uobj);
643 uvmfault_unlockall(ufi, amap, NULL);
665 if (locked && amap)
825 struct vm_amap *amap = ufi->entry->aref.ar_amap; local
    [all...]
uvm_anon.c 100 * => anon must be removed from the amap (if anon was in an amap).
101 * => amap must be locked, if anon was owned by amap.
275 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon)
281 KASSERT(anon->an_lock == amap->am_lock);
287 switch (uvmfault_anonget(NULL, amap, anon)) {
uvm_mmap.c 130 struct vm_amap *amap; local
200 amap = entry->aref.ar_amap; /* upper layer */
203 if (amap != NULL)
204 amap_lock(amap, RW_READER);
210 if (amap != NULL) {
243 if (amap != NULL)
244 amap_unlock(amap);
929 /* XXX: defer amap create */
932 /* shared: create amap now */
uvm_map.c 1065 * => XXXCDC: need way to map in external amap?
1361 * can't extend a shared amap. note: no need to lock amap to
1417 * can't extend a shared amap. note: no need to lock amap to
1422 * merged with the previous entry which has an amap,
1423 * and the next entry also has an amap, we give up.
1426 * amap, new, amap -> give up second merge (single fwd extend)
1427 * amap, new, none -> double forward extend (extend again here
1585 struct vm_amap *amap = amap_alloc(size, to_add, local
3428 struct vm_amap * const amap = entry->aref.ar_amap; local
3968 struct vm_amap *amap; local
    [all...]
  /src/usr.bin/pmap/
pmap.c 391 struct kbit akbit, *amap; local
393 amap = &akbit;
394 P(amap) = vme->aref.ar_amap;
395 S(amap) = sizeof(struct vm_amap);
396 KDEREF(kd, amap);
397 dump_amap(kd, amap);
506 printf("%*s - %p: %#"PRIxVADDR"->%#"PRIxVADDR": obj=%p/%#" PRIx64 ", amap=%p/%d\n",
604 dump_amap(kvm_t *kd, struct kbit *amap)
613 if (S(amap) == (size_t)-1) {
615 S(amap) = sizeof(struct vm_amap)
    [all...]
pmap.h 137 struct vm_amap amap; member in union:kbit::__anon8308
  /src/lib/libkvm/
kvm_proc.c 185 struct vm_amap amap; local
223 if (KREAD(kd, addr, &amap))
229 if (slot > amap.am_nslot)
232 addr = (u_long)amap.am_anon + (offset / kd->nbpg) * sizeof(anonp);

Completed in 21 milliseconds