HomeSort by: relevance | last modified time | path
    Searched defs:anon (Results 1 - 14 of 14) sorted by relevancy

  /src/sys/uvm/
uvm_anon.c 29 * uvm_anon.c: uvm anon ops
63 struct vm_anon *anon = object; local in function:uvm_anon_ctor
65 anon->an_ref = 0;
66 anon->an_lock = NULL;
67 anon->an_page = NULL;
69 anon->an_swslot = 0;
75 * uvm_analloc: allocate a new anon.
77 * => anon will have no lock associated.
82 struct vm_anon *anon; local in function:uvm_analloc
84 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT)
    [all...]
uvm_coredump.c 162 struct vm_anon *anon; local in function:uvm_coredump_walkmap
163 anon = amap_lookup(&entry->aref,
170 if (anon != NULL &&
182 if (anon == NULL &&
uvm_loan.c 56 * A->K anon page to wired kernel page (e.g. mbuf data area)
57 * O->A uvm_object to anon loan (e.g. vnode page to an anon)
63 * of page is considered "owned" by the uvm_object (not the anon).
72 * object/anon which the page is owned by. this is a good side-effect,
78 * an anon may "adopt" an orphaned page.
143 struct vm_anon *anon; local in function:uvm_loanentry
161 * find the page we want. check the anon layer first.
165 anon = amap_lookup(aref, curaddr - ufi->entry->start);
167 anon = NULL
929 struct vm_anon *anon, *to_free = NULL; local in function:uvm_unloananon
    [all...]
uvm_mmap.c 131 struct vm_anon *anon; local in function:sys_mincore
212 anon = amap_lookup(&entry->aref,
214 /* Don't need to lock anon here. */
215 if (anon != NULL && anon->an_page != NULL) {
218 * Anon has the page for this entry
921 * handle anon vs. non-anon mappings. for non-anon mappings attach
uvm_pdaemon.c 386 struct vm_anon *anon = pg->uanon; local in function:uvmpd_page_owner_lock
392 if (uobj == (void *)0xdeadbeef || anon == (void *)0xdeadbeef) {
399 } else if (anon != NULL) {
400 slock = anon->an_lock;
401 KASSERTMSG(slock != NULL, "pg %p anon %p, NULL lock", pg, anon);
465 /* anon now owns it */
612 struct vm_anon *anon = pg->uanon; local in function:uvmpd_dropswap
614 if ((pg->flags & PG_ANON) && anon->an_swslot) {
615 uvm_swap_free(anon->an_swslot, 1)
648 struct vm_anon *anon; local in function:uvmpd_scan_queue
    [all...]
uvm_pdpolicy_clock.c 253 struct vm_anon *anon; local in function:uvmpdpol_selectvictim
289 anon = pg->uanon;
304 if ((anon || UVM_OBJ_IS_AOBJ(uobj)) && ss->ss_anonreact) {
uvm_amap.c 402 * anon. But now that we know there is only one map entry
509 * Slide the anon pointers up and clear out
695 * shared anon (to change the protection). in order to protect data
708 struct vm_anon *anon; local in function:amap_share_protect
720 anon = amap->am_anon[lcv];
721 if (anon == NULL) {
724 if (anon->an_page) {
725 pmap_page_protect(anon->an_page, prot);
739 anon = amap->am_anon[slot];
740 if (anon->an_page)
773 struct vm_anon *anon; local in function:amap_wipeout
1029 struct vm_anon *anon, *nanon; local in function:amap_cow_now
1295 struct vm_anon *anon; local in function:amap_wiperange
1392 struct vm_anon *anon; local in function:amap_swap_off
    [all...]
uvm_fault.c 73 * case [1]: upper layer fault [anon active]
74 * 1A: [read] or [write with anon->an_ref == 1]
75 * I/O takes place in upper level anon and uobj is not touched.
76 * 1B: [write with anon->an_ref > 1]
77 * new anon is alloc'd and data is copied off ["COW"]
83 * data is "promoted" from uobj to a new anon.
88 * MAPS => AMAP => UOBJ => ANON => PAGE QUEUES (PQ)
101 * - at the same time check pmap for unmapped areas and anon for pages
106 * - ensure source anon is resident in RAM
107 * - if case 1B alloc new anon and copy from sourc
554 struct vm_anon *anon; local in function:uvmfault_promote
1331 struct vm_anon *anon = anons[lcv]; local in function:uvm_fault_upper_lookup
1426 struct vm_anon * const anon = anons[flt->centeridx]; local in function:uvm_fault_upper
2382 struct vm_anon *anon; local in function:uvm_fault_lower_promote
    [all...]
uvm_map.c 3969 struct vm_anon *anon; local in function:uvm_map_clean
4035 anon = amap_lookup(&current->aref, offset);
4036 if (anon == NULL)
4039 KASSERT(anon->an_lock == amap->am_lock);
4040 pg = anon->an_page;
4068 KASSERT(pg->uanon == anon);
4089 refs = --anon->an_ref;
4091 uvm_anfree(anon);
4942 #define UVM_VOADDR_SET_ANON(voa, anon) \
4943 UVM_VOADDR_SET_OBJECT(voa, anon, UVM_VOADDR_TYPE_ANON
4958 struct vm_anon *anon = NULL; local in function:uvm_voaddr_acquire
5151 struct vm_anon * const anon = UVM_VOADDR_GET_ANON(voaddr); local in function:uvm_voaddr_release
    [all...]
  /src/usr.bin/pmap/
pmap.h 138 struct vm_anon anon; member in union:kbit::__anon7865a5be020a
pmap.c 726 struct kbit kbit, *anon = &kbit; local in function:dump_vm_anon
728 A(anon) = (u_long)alist[i];
729 S(anon) = sizeof(struct vm_anon);
730 if (A(anon) == 0) {
735 KDEREF(kd, anon);
738 D(anon, anon)->an_ref, D(anon, anon)->an_page,
739 D(anon, anon)->an_swslot)
    [all...]
  /src/sys/external/bsd/drm2/dist/drm/i915/gem/
i915_gem_mman.c 1182 struct file *anon; local in function:i915_gem_mmap
1214 anon = mmap_singleton(to_i915(dev));
1215 if (IS_ERR(anon)) {
1217 return PTR_ERR(anon);
1232 vma->vm_file = anon;
  /src/lib/libkvm/
kvm_proc.c 186 struct vm_anon *anonp, anon; local in function:_kvm_ureadm
237 if (KREAD(kd, addr, &anon))
240 addr = (u_long)anon.an_page;
251 (off_t)(anon.an_swslot * kd->nbpg)) != kd->nbpg)
  /src/usr.sbin/mountd/
mountd.c 1024 struct uucred anon; local in function:get_exportlist_one
1043 anon = def_anon;
1083 &has_host, &exflags, &anon))
1165 if (do_nfssvc(line, lineno, ep, grp, exflags, &anon,

Completed in 25 milliseconds