Home | History | Annotate | Line # | Download | only in uvm
uvm_anon.c revision 1.49
      1 /*	$NetBSD: uvm_anon.c,v 1.49 2007/12/20 23:50:00 ad Exp $	*/
      2 
      3 /*
      4  *
      5  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  * All rights reserved.
      7  *
      8  * Redistribution and use in source and binary forms, with or without
      9  * modification, are permitted provided that the following conditions
     10  * are met:
     11  * 1. Redistributions of source code must retain the above copyright
     12  *    notice, this list of conditions and the following disclaimer.
     13  * 2. Redistributions in binary form must reproduce the above copyright
     14  *    notice, this list of conditions and the following disclaimer in the
     15  *    documentation and/or other materials provided with the distribution.
     16  * 3. All advertising materials mentioning features or use of this software
     17  *    must display the following acknowledgement:
     18  *      This product includes software developed by Charles D. Cranor and
     19  *      Washington University.
     20  * 4. The name of the author may not be used to endorse or promote products
     21  *    derived from this software without specific prior written permission.
     22  *
     23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  */
     34 
     35 /*
     36  * uvm_anon.c: uvm anon ops
     37  */
     38 
     39 #include <sys/cdefs.h>
     40 __KERNEL_RCSID(0, "$NetBSD: uvm_anon.c,v 1.49 2007/12/20 23:50:00 ad Exp $");
     41 
     42 #include "opt_uvmhist.h"
     43 
     44 #include <sys/param.h>
     45 #include <sys/systm.h>
     46 #include <sys/proc.h>
     47 #include <sys/malloc.h>
     48 #include <sys/pool.h>
     49 #include <sys/kernel.h>
     50 
     51 #include <uvm/uvm.h>
     52 #include <uvm/uvm_swap.h>
     53 #include <uvm/uvm_pdpolicy.h>
     54 
     55 static struct pool_cache uvm_anon_cache;
     56 
     57 static int uvm_anon_ctor(void *, void *, int);
     58 static void uvm_anon_dtor(void *, void *);
     59 
     60 /*
     61  * allocate anons
     62  */
     63 void
     64 uvm_anon_init(void)
     65 {
     66 
     67 	pool_cache_bootstrap(&uvm_anon_cache, sizeof(struct vm_anon), 0, 0,
     68 	    PR_LARGECACHE, "anonpl", NULL, IPL_NONE, uvm_anon_ctor,
     69 	    uvm_anon_dtor, NULL);
     70 }
     71 
     72 static int
     73 uvm_anon_ctor(void *arg, void *object, int flags)
     74 {
     75 	struct vm_anon *anon = object;
     76 
     77 	anon->an_ref = 0;
     78 	simple_lock_init(&anon->an_lock);
     79 	anon->an_page = NULL;
     80 #if defined(VMSWAP)
     81 	anon->an_swslot = 0;
     82 #endif /* defined(VMSWAP) */
     83 
     84 	return 0;
     85 }
     86 
     87 static void
     88 uvm_anon_dtor(void *arg, void *object)
     89 {
     90 
     91 	/* nothing yet */
     92 }
     93 
     94 /*
     95  * allocate an anon
     96  *
     97  * => new anon is returned locked!
     98  */
     99 struct vm_anon *
    100 uvm_analloc(void)
    101 {
    102 	struct vm_anon *anon;
    103 
    104 	anon = pool_cache_get(&uvm_anon_cache, PR_NOWAIT);
    105 	if (anon) {
    106 		KASSERT(anon->an_ref == 0);
    107 		LOCK_ASSERT(simple_lock_held(&anon->an_lock) == 0);
    108 		KASSERT(anon->an_page == NULL);
    109 #if defined(VMSWAP)
    110 		KASSERT(anon->an_swslot == 0);
    111 #endif /* defined(VMSWAP) */
    112 		anon->an_ref = 1;
    113 		simple_lock(&anon->an_lock);
    114 	}
    115 	return anon;
    116 }
    117 
    118 /*
    119  * uvm_anfree: free a single anon structure
    120  *
    121  * => caller must remove anon from its amap before calling (if it was in
    122  *	an amap).
    123  * => anon must be unlocked and have a zero reference count.
    124  * => we may lock the pageq's.
    125  */
    126 
    127 void
    128 uvm_anfree(struct vm_anon *anon)
    129 {
    130 	struct vm_page *pg;
    131 	UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
    132 	UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
    133 
    134 	KASSERT(anon->an_ref == 0);
    135 
    136 	/*
    137 	 * get page
    138 	 */
    139 
    140 	pg = anon->an_page;
    141 
    142 	/*
    143 	 * if there is a resident page and it is loaned, then anon may not
    144 	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
    145  	 * of the page has been identified and locked.
    146 	 */
    147 
    148 	if (pg && pg->loan_count) {
    149 		simple_lock(&anon->an_lock);
    150 		pg = uvm_anon_lockloanpg(anon);
    151 		simple_unlock(&anon->an_lock);
    152 	}
    153 
    154 	/*
    155 	 * if we have a resident page, we must dispose of it before freeing
    156 	 * the anon.
    157 	 */
    158 
    159 	if (pg) {
    160 
    161 		/*
    162 		 * if the page is owned by a uobject (now locked), then we must
    163 		 * kill the loan on the page rather than free it.
    164 		 */
    165 
    166 		if (pg->uobject) {
    167 			uvm_lock_pageq();
    168 			KASSERT(pg->loan_count > 0);
    169 			pg->loan_count--;
    170 			pg->uanon = NULL;
    171 			uvm_unlock_pageq();
    172 			simple_unlock(&pg->uobject->vmobjlock);
    173 		} else {
    174 
    175 			/*
    176 			 * page has no uobject, so we must be the owner of it.
    177 			 */
    178 
    179 			KASSERT((pg->flags & PG_RELEASED) == 0);
    180 			simple_lock(&anon->an_lock);
    181 			pmap_page_protect(pg, VM_PROT_NONE);
    182 
    183 			/*
    184 			 * if the page is busy, mark it as PG_RELEASED
    185 			 * so that uvm_anon_release will release it later.
    186 			 */
    187 
    188 			if (pg->flags & PG_BUSY) {
    189 				pg->flags |= PG_RELEASED;
    190 				simple_unlock(&anon->an_lock);
    191 				return;
    192 			}
    193 			uvm_lock_pageq();
    194 			uvm_pagefree(pg);
    195 			uvm_unlock_pageq();
    196 			simple_unlock(&anon->an_lock);
    197 			UVMHIST_LOG(maphist, "anon 0x%x, page 0x%x: "
    198 				    "freed now!", anon, pg, 0, 0);
    199 		}
    200 	}
    201 #if defined(VMSWAP)
    202 	if (pg == NULL && anon->an_swslot > 0) {
    203 		/* this page is no longer only in swap. */
    204 		mutex_enter(&uvm_swap_data_lock);
    205 		KASSERT(uvmexp.swpgonly > 0);
    206 		uvmexp.swpgonly--;
    207 		mutex_exit(&uvm_swap_data_lock);
    208 	}
    209 #endif /* defined(VMSWAP) */
    210 
    211 	/*
    212 	 * free any swap resources.
    213 	 */
    214 
    215 	uvm_anon_dropswap(anon);
    216 
    217 	/*
    218 	 * give a page replacement hint.
    219 	 */
    220 
    221 	uvmpdpol_anfree(anon);
    222 
    223 	/*
    224 	 * now that we've stripped the data areas from the anon,
    225 	 * free the anon itself.
    226 	 */
    227 
    228 	KASSERT(anon->an_page == NULL);
    229 #if defined(VMSWAP)
    230 	KASSERT(anon->an_swslot == 0);
    231 #endif /* defined(VMSWAP) */
    232 
    233 	pool_cache_put(&uvm_anon_cache, anon);
    234 	UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
    235 }
    236 
    237 #if defined(VMSWAP)
    238 
    239 /*
    240  * uvm_anon_dropswap:  release any swap resources from this anon.
    241  *
    242  * => anon must be locked or have a reference count of 0.
    243  */
    244 void
    245 uvm_anon_dropswap(struct vm_anon *anon)
    246 {
    247 	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
    248 
    249 	if (anon->an_swslot == 0)
    250 		return;
    251 
    252 	UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
    253 		    anon, anon->an_swslot, 0, 0);
    254 	uvm_swap_free(anon->an_swslot, 1);
    255 	anon->an_swslot = 0;
    256 }
    257 
    258 #endif /* defined(VMSWAP) */
    259 
    260 /*
    261  * uvm_anon_lockloanpg: given a locked anon, lock its resident page
    262  *
    263  * => anon is locked by caller
    264  * => on return: anon is locked
    265  *		 if there is a resident page:
    266  *			if it has a uobject, it is locked by us
    267  *			if it is ownerless, we take over as owner
    268  *		 we return the resident page (it can change during
    269  *		 this function)
    270  * => note that the only time an anon has an ownerless resident page
    271  *	is if the page was loaned from a uvm_object and the uvm_object
    272  *	disowned it
    273  * => this only needs to be called when you want to do an operation
    274  *	on an anon's resident page and that page has a non-zero loan
    275  *	count.
    276  */
    277 struct vm_page *
    278 uvm_anon_lockloanpg(struct vm_anon *anon)
    279 {
    280 	struct vm_page *pg;
    281 	bool locked = false;
    282 
    283 	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
    284 
    285 	/*
    286 	 * loop while we have a resident page that has a non-zero loan count.
    287 	 * if we successfully get our lock, we will "break" the loop.
    288 	 * note that the test for pg->loan_count is not protected -- this
    289 	 * may produce false positive results.   note that a false positive
    290 	 * result may cause us to do more work than we need to, but it will
    291 	 * not produce an incorrect result.
    292 	 */
    293 
    294 	while (((pg = anon->an_page) != NULL) && pg->loan_count != 0) {
    295 
    296 		/*
    297 		 * quickly check to see if the page has an object before
    298 		 * bothering to lock the page queues.   this may also produce
    299 		 * a false positive result, but that's ok because we do a real
    300 		 * check after that.
    301 		 */
    302 
    303 		if (pg->uobject) {
    304 			uvm_lock_pageq();
    305 			if (pg->uobject) {
    306 				locked =
    307 				    simple_lock_try(&pg->uobject->vmobjlock);
    308 			} else {
    309 				/* object disowned before we got PQ lock */
    310 				locked = true;
    311 			}
    312 			uvm_unlock_pageq();
    313 
    314 			/*
    315 			 * if we didn't get a lock (try lock failed), then we
    316 			 * toggle our anon lock and try again
    317 			 */
    318 
    319 			if (!locked) {
    320 				simple_unlock(&anon->an_lock);
    321 
    322 				/*
    323 				 * someone locking the object has a chance to
    324 				 * lock us right now
    325 				 */
    326 
    327 				simple_lock(&anon->an_lock);
    328 				continue;
    329 			}
    330 		}
    331 
    332 		/*
    333 		 * if page is un-owned [i.e. the object dropped its ownership],
    334 		 * then we can take over as owner!
    335 		 */
    336 
    337 		if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
    338 			uvm_lock_pageq();
    339 			pg->pqflags |= PQ_ANON;
    340 			pg->loan_count--;
    341 			uvm_unlock_pageq();
    342 		}
    343 		break;
    344 	}
    345 	return(pg);
    346 }
    347 
    348 #if defined(VMSWAP)
    349 
    350 /*
    351  * fetch an anon's page.
    352  *
    353  * => anon must be locked, and is unlocked upon return.
    354  * => returns true if pagein was aborted due to lack of memory.
    355  */
    356 
    357 bool
    358 uvm_anon_pagein(struct vm_anon *anon)
    359 {
    360 	struct vm_page *pg;
    361 	struct uvm_object *uobj;
    362 	int rv;
    363 
    364 	/* locked: anon */
    365 	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
    366 
    367 	rv = uvmfault_anonget(NULL, NULL, anon);
    368 
    369 	/*
    370 	 * if rv == 0, anon is still locked, else anon
    371 	 * is unlocked
    372 	 */
    373 
    374 	switch (rv) {
    375 	case 0:
    376 		break;
    377 
    378 	case EIO:
    379 	case ERESTART:
    380 
    381 		/*
    382 		 * nothing more to do on errors.
    383 		 * ERESTART can only mean that the anon was freed,
    384 		 * so again there's nothing to do.
    385 		 */
    386 
    387 		return false;
    388 
    389 	default:
    390 		return true;
    391 	}
    392 
    393 	/*
    394 	 * ok, we've got the page now.
    395 	 * mark it as dirty, clear its swslot and un-busy it.
    396 	 */
    397 
    398 	pg = anon->an_page;
    399 	uobj = pg->uobject;
    400 	if (anon->an_swslot > 0)
    401 		uvm_swap_free(anon->an_swslot, 1);
    402 	anon->an_swslot = 0;
    403 	pg->flags &= ~(PG_CLEAN);
    404 
    405 	/*
    406 	 * deactivate the page (to put it on a page queue)
    407 	 */
    408 
    409 	pmap_clear_reference(pg);
    410 	uvm_lock_pageq();
    411 	if (pg->wire_count == 0)
    412 		uvm_pagedeactivate(pg);
    413 	uvm_unlock_pageq();
    414 
    415 	if (pg->flags & PG_WANTED) {
    416 		wakeup(pg);
    417 		pg->flags &= ~(PG_WANTED);
    418 	}
    419 
    420 	/*
    421 	 * unlock the anon and we're done.
    422 	 */
    423 
    424 	simple_unlock(&anon->an_lock);
    425 	if (uobj) {
    426 		simple_unlock(&uobj->vmobjlock);
    427 	}
    428 	return false;
    429 }
    430 
    431 #endif /* defined(VMSWAP) */
    432 
    433 /*
    434  * uvm_anon_release: release an anon and its page.
    435  *
    436  * => caller must lock the anon.
    437  */
    438 
    439 void
    440 uvm_anon_release(struct vm_anon *anon)
    441 {
    442 	struct vm_page *pg = anon->an_page;
    443 
    444 	LOCK_ASSERT(simple_lock_held(&anon->an_lock));
    445 
    446 	KASSERT(pg != NULL);
    447 	KASSERT((pg->flags & PG_RELEASED) != 0);
    448 	KASSERT((pg->flags & PG_BUSY) != 0);
    449 	KASSERT(pg->uobject == NULL);
    450 	KASSERT(pg->uanon == anon);
    451 	KASSERT(pg->loan_count == 0);
    452 	KASSERT(anon->an_ref == 0);
    453 
    454 	uvm_lock_pageq();
    455 	uvm_pagefree(pg);
    456 	uvm_unlock_pageq();
    457 	simple_unlock(&anon->an_lock);
    458 
    459 	KASSERT(anon->an_page == NULL);
    460 
    461 	uvm_anfree(anon);
    462 }
    463