Home | History | Annotate | Download | only in uvm

Lines Matching defs:anon

29  * uvm_anon.c: uvm anon ops
63 struct vm_anon *anon = object;
65 anon->an_ref = 0;
66 anon->an_lock = NULL;
67 anon->an_page = NULL;
69 anon->an_swslot = 0;
75 * uvm_analloc: allocate a new anon.
77 * => anon will have no lock associated.
82 struct vm_anon *anon;
84 anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT);
85 if (anon) {
86 KASSERT(anon->an_ref == 0);
87 KASSERT(anon->an_lock == NULL);
88 KASSERT(anon->an_page == NULL);
90 KASSERT(anon->an_swslot == 0);
92 anon->an_ref = 1;
94 return anon;
98 * uvm_anfree: free a single anon structure
100 * => anon must be removed from the amap (if anon was in an amap).
101 * => amap must be locked, if anon was owned by amap.
105 uvm_anfree(struct vm_anon *anon)
107 struct vm_page *pg = anon->an_page, *pg2 __diagused;
110 UVMHIST_CALLARGS(maphist,"(anon=%#jx)", (uintptr_t)anon, 0,0,0);
112 KASSERT(anon->an_lock == NULL || rw_write_held(anon->an_lock));
113 KASSERT(anon->an_ref == 0);
120 KASSERT(anon->an_lock != NULL);
123 * If there is a resident page and it is loaned, then anon
129 pg2 = uvm_anon_lockloanpg(anon);
149 * If page has no UVM object, then anon is the owner,
163 rw_obj_hold(anon->an_lock);
167 UVMHIST_LOG(maphist, "anon %#jx, page %#jx: "
168 "freed now!", (uintptr_t)anon, (uintptr_t)pg,
173 if (anon->an_swslot > 0) {
180 anon->an_lock = NULL;
186 uvm_anon_dropswap(anon);
187 uvmpdpol_anfree(anon);
189 pool_cache_put(&uvm_anon_cache, anon);
193 * uvm_anon_lockloanpg: given a locked anon, lock its resident page owner.
195 * => anon is locked by caller
196 * => on return: anon is locked
202 * => note that the only time an anon has an ownerless resident page
206 * on an anon's resident page and that page has a non-zero loan
210 uvm_anon_lockloanpg(struct vm_anon *anon)
215 KASSERT(rw_lock_held(anon->an_lock));
226 while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
231 * toggle our anon lock and try again
242 op = rw_lock_op(anon->an_lock);
243 rw_exit(anon->an_lock);
245 rw_enter(anon->an_lock, op);
268 * uvm_anon_pagein: fetch an anon's page.
270 * => anon must be locked, and is unlocked upon return.
275 uvm_anon_pagein(struct vm_amap *amap, struct vm_anon *anon)
280 KASSERT(rw_write_held(anon->an_lock));
281 KASSERT(anon->an_lock == amap->am_lock);
284 * Get the page of the anon.
287 switch (uvmfault_anonget(NULL, amap, anon)) {
290 KASSERT(rw_write_held(anon->an_lock));
296 * anon was freed.
309 pg = anon->an_page;
311 if (anon->an_swslot > 0) {
312 uvm_swap_free(anon->an_swslot, 1);
314 anon->an_swslot = 0;
324 rw_exit(anon->an_lock);
332 * uvm_anon_dropswap: release any swap resources from this anon.
334 * => anon must be locked or have a reference count of 0.
337 uvm_anon_dropswap(struct vm_anon *anon)
341 if (anon->an_swslot == 0)
344 UVMHIST_LOG(maphist,"freeing swap for anon %#jx, paged to swslot %#jx",
345 (uintptr_t)anon, anon->an_swslot, 0, 0);
346 uvm_swap_free(anon->an_swslot, 1);
347 anon->an_swslot = 0;
353 * uvm_anon_release: release an anon and its page.
355 * => anon should not have any references.
356 * => anon must be locked.
360 uvm_anon_release(struct vm_anon *anon)
362 struct vm_page *pg = anon->an_page;
365 KASSERT(rw_write_held(anon->an_lock));
370 KASSERT(pg->uanon == anon);
372 KASSERT(anon->an_ref == 0);
380 KASSERT(anon->an_page == NULL);
381 lock = anon->an_lock;
382 uvm_anfree(anon);