Home | History | Annotate | Line # | Download | only in uvm
uvm_anon.c revision 1.5
      1  1.5    chs /*	$NetBSD: uvm_anon.c,v 1.5 2000/01/11 06:57:49 chs Exp $	*/
      2  1.1  chuck 
      3  1.1  chuck /*
      4  1.1  chuck  *
      5  1.1  chuck  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  1.1  chuck  * All rights reserved.
      7  1.1  chuck  *
      8  1.1  chuck  * Redistribution and use in source and binary forms, with or without
      9  1.1  chuck  * modification, are permitted provided that the following conditions
     10  1.1  chuck  * are met:
     11  1.1  chuck  * 1. Redistributions of source code must retain the above copyright
     12  1.1  chuck  *    notice, this list of conditions and the following disclaimer.
     13  1.1  chuck  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1  chuck  *    notice, this list of conditions and the following disclaimer in the
     15  1.1  chuck  *    documentation and/or other materials provided with the distribution.
     16  1.1  chuck  * 3. All advertising materials mentioning features or use of this software
     17  1.1  chuck  *    must display the following acknowledgement:
     18  1.1  chuck  *      This product includes software developed by Charles D. Cranor and
     19  1.1  chuck  *      Washington University.
     20  1.1  chuck  * 4. The name of the author may not be used to endorse or promote products
     21  1.1  chuck  *    derived from this software without specific prior written permission.
     22  1.1  chuck  *
     23  1.1  chuck  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  1.1  chuck  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  1.1  chuck  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  1.1  chuck  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  1.1  chuck  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  1.1  chuck  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  1.1  chuck  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  1.1  chuck  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  1.1  chuck  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  1.1  chuck  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  1.1  chuck  */
     34  1.1  chuck 
     35  1.1  chuck /*
     36  1.1  chuck  * uvm_anon.c: uvm anon ops
     37  1.1  chuck  */
     38  1.1  chuck 
     39  1.1  chuck #include "opt_uvmhist.h"
     40  1.1  chuck 
     41  1.1  chuck #include <sys/param.h>
     42  1.1  chuck #include <sys/systm.h>
     43  1.1  chuck #include <sys/proc.h>
     44  1.1  chuck #include <sys/malloc.h>
     45  1.1  chuck #include <sys/pool.h>
     46  1.5    chs #include <sys/kernel.h>
     47  1.1  chuck 
     48  1.1  chuck #include <vm/vm.h>
     49  1.1  chuck #include <vm/vm_page.h>
     50  1.1  chuck #include <vm/vm_kern.h>
     51  1.1  chuck 
     52  1.1  chuck #include <uvm/uvm.h>
     53  1.1  chuck #include <uvm/uvm_swap.h>
     54  1.1  chuck 
     55  1.1  chuck /*
     56  1.5    chs  * anonblock_list: global list of anon blocks,
     57  1.5    chs  * locked by swap_syscall_lock (since we never remove
     58  1.5    chs  * anything from this list and we only add to it via swapctl(2)).
     59  1.5    chs  */
     60  1.5    chs 
     61  1.5    chs struct uvm_anonblock {
     62  1.5    chs 	LIST_ENTRY(uvm_anonblock) list;
     63  1.5    chs 	int count;
     64  1.5    chs 	struct vm_anon *anons;
     65  1.5    chs };
     66  1.5    chs static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
     67  1.5    chs 
     68  1.5    chs 
     69  1.5    chs static boolean_t anon_pagein __P((struct vm_anon *));
     70  1.5    chs 
     71  1.5    chs 
     72  1.5    chs /*
     73  1.1  chuck  * allocate anons
     74  1.1  chuck  */
     75  1.1  chuck void
     76  1.1  chuck uvm_anon_init()
     77  1.1  chuck {
     78  1.1  chuck 	int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
     79  1.5    chs 
     80  1.5    chs 	simple_lock_init(&uvm.afreelock);
     81  1.5    chs 	LIST_INIT(&anonblock_list);
     82  1.1  chuck 
     83  1.1  chuck 	/*
     84  1.1  chuck 	 * Allocate the initial anons.
     85  1.1  chuck 	 */
     86  1.5    chs 	uvm_anon_add(nanon);
     87  1.1  chuck }
     88  1.1  chuck 
     89  1.1  chuck /*
     90  1.1  chuck  * add some more anons to the free pool.  called when we add
     91  1.1  chuck  * more swap space.
     92  1.5    chs  *
     93  1.5    chs  * => swap_syscall_lock should be held (protects anonblock_list).
     94  1.1  chuck  */
     95  1.1  chuck void
     96  1.5    chs uvm_anon_add(count)
     97  1.5    chs 	int	count;
     98  1.1  chuck {
     99  1.5    chs 	struct uvm_anonblock *anonblock;
    100  1.1  chuck 	struct vm_anon *anon;
    101  1.5    chs 	int lcv, needed;
    102  1.1  chuck 
    103  1.5    chs 	simple_lock(&uvm.afreelock);
    104  1.5    chs 	uvmexp.nanonneeded += count;
    105  1.5    chs 	needed = uvmexp.nanonneeded - uvmexp.nanon;
    106  1.5    chs 	simple_unlock(&uvm.afreelock);
    107  1.5    chs 
    108  1.5    chs 	if (needed <= 0) {
    109  1.5    chs 		return;
    110  1.5    chs 	}
    111  1.5    chs 
    112  1.5    chs 	MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
    113  1.5    chs 	anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
    114  1.1  chuck 
    115  1.1  chuck 	/* XXX Should wait for VM to free up. */
    116  1.5    chs 	if (anonblock == NULL || anon == NULL) {
    117  1.5    chs 		printf("uvm_anon_add: can not allocate %d anons\n", needed);
    118  1.1  chuck 		panic("uvm_anon_add");
    119  1.1  chuck 	}
    120  1.1  chuck 
    121  1.5    chs 	anonblock->count = needed;
    122  1.5    chs 	anonblock->anons = anon;
    123  1.5    chs 	LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
    124  1.5    chs 	memset(anon, 0, sizeof(*anon) * needed);
    125  1.5    chs 
    126  1.1  chuck 	simple_lock(&uvm.afreelock);
    127  1.5    chs 	uvmexp.nanon += needed;
    128  1.5    chs 	uvmexp.nfreeanon += needed;
    129  1.5    chs 	for (lcv = 0; lcv < needed; lcv++) {
    130  1.1  chuck 		simple_lock_init(&anon->an_lock);
    131  1.1  chuck 		anon[lcv].u.an_nxt = uvm.afree;
    132  1.1  chuck 		uvm.afree = &anon[lcv];
    133  1.3   ross 		simple_lock_init(&uvm.afree->an_lock);
    134  1.1  chuck 	}
    135  1.1  chuck 	simple_unlock(&uvm.afreelock);
    136  1.1  chuck }
    137  1.1  chuck 
    138  1.1  chuck /*
    139  1.5    chs  * remove anons from the free pool.
    140  1.5    chs  */
    141  1.5    chs void
    142  1.5    chs uvm_anon_remove(count)
    143  1.5    chs 	int count;
    144  1.5    chs {
    145  1.5    chs 	/*
    146  1.5    chs 	 * we never actually free any anons, to avoid allocation overhead.
    147  1.5    chs 	 * XXX someday we might want to try to free anons.
    148  1.5    chs 	 */
    149  1.5    chs 
    150  1.5    chs 	simple_lock(&uvm.afreelock);
    151  1.5    chs 	uvmexp.nanonneeded -= count;
    152  1.5    chs 	simple_unlock(&uvm.afreelock);
    153  1.5    chs }
    154  1.5    chs 
    155  1.5    chs /*
    156  1.1  chuck  * allocate an anon
    157  1.1  chuck  */
    158  1.1  chuck struct vm_anon *
    159  1.1  chuck uvm_analloc()
    160  1.1  chuck {
    161  1.1  chuck 	struct vm_anon *a;
    162  1.1  chuck 
    163  1.1  chuck 	simple_lock(&uvm.afreelock);
    164  1.1  chuck 	a = uvm.afree;
    165  1.1  chuck 	if (a) {
    166  1.1  chuck 		uvm.afree = a->u.an_nxt;
    167  1.1  chuck 		uvmexp.nfreeanon--;
    168  1.1  chuck 		a->an_ref = 1;
    169  1.1  chuck 		a->an_swslot = 0;
    170  1.1  chuck 		a->u.an_page = NULL;		/* so we can free quickly */
    171  1.1  chuck 	}
    172  1.1  chuck 	simple_unlock(&uvm.afreelock);
    173  1.1  chuck 	return(a);
    174  1.1  chuck }
    175  1.1  chuck 
    176  1.1  chuck /*
    177  1.1  chuck  * uvm_anfree: free a single anon structure
    178  1.1  chuck  *
    179  1.1  chuck  * => caller must remove anon from its amap before calling (if it was in
    180  1.1  chuck  *	an amap).
    181  1.1  chuck  * => anon must be unlocked and have a zero reference count.
    182  1.1  chuck  * => we may lock the pageq's.
    183  1.1  chuck  */
    184  1.1  chuck void
    185  1.1  chuck uvm_anfree(anon)
    186  1.1  chuck 	struct vm_anon *anon;
    187  1.1  chuck {
    188  1.1  chuck 	struct vm_page *pg;
    189  1.1  chuck 	UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
    190  1.1  chuck 	UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
    191  1.1  chuck 
    192  1.1  chuck 	/*
    193  1.1  chuck 	 * get page
    194  1.1  chuck 	 */
    195  1.1  chuck 
    196  1.1  chuck 	pg = anon->u.an_page;
    197  1.1  chuck 
    198  1.1  chuck 	/*
    199  1.1  chuck 	 * if there is a resident page and it is loaned, then anon may not
    200  1.1  chuck 	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
    201  1.1  chuck  	 * of the page has been identified and locked.
    202  1.1  chuck 	 */
    203  1.1  chuck 
    204  1.1  chuck 	if (pg && pg->loan_count)
    205  1.1  chuck 		pg = uvm_anon_lockloanpg(anon);
    206  1.1  chuck 
    207  1.1  chuck 	/*
    208  1.1  chuck 	 * if we have a resident page, we must dispose of it before freeing
    209  1.1  chuck 	 * the anon.
    210  1.1  chuck 	 */
    211  1.1  chuck 
    212  1.1  chuck 	if (pg) {
    213  1.1  chuck 
    214  1.1  chuck 		/*
    215  1.1  chuck 		 * if the page is owned by a uobject (now locked), then we must
    216  1.1  chuck 		 * kill the loan on the page rather than free it.
    217  1.1  chuck 		 */
    218  1.1  chuck 
    219  1.1  chuck 		if (pg->uobject) {
    220  1.1  chuck 
    221  1.1  chuck 			/* kill loan */
    222  1.1  chuck 			uvm_lock_pageq();
    223  1.1  chuck #ifdef DIAGNOSTIC
    224  1.1  chuck 			if (pg->loan_count < 1)
    225  1.1  chuck 				panic("uvm_anfree: obj owned page "
    226  1.1  chuck 				      "with no loan count");
    227  1.1  chuck #endif
    228  1.1  chuck 			pg->loan_count--;
    229  1.1  chuck 			pg->uanon = NULL;
    230  1.1  chuck 			uvm_unlock_pageq();
    231  1.1  chuck 			simple_unlock(&pg->uobject->vmobjlock);
    232  1.1  chuck 
    233  1.1  chuck 		} else {
    234  1.1  chuck 
    235  1.1  chuck 			/*
    236  1.1  chuck 			 * page has no uobject, so we must be the owner of it.
    237  1.1  chuck 			 *
    238  1.1  chuck 			 * if page is busy then we just mark it as released
    239  1.1  chuck 			 * (who ever has it busy must check for this when they
    240  1.1  chuck 			 * wake up).    if the page is not busy then we can
    241  1.1  chuck 			 * free it now.
    242  1.1  chuck 			 */
    243  1.1  chuck 
    244  1.1  chuck 			if ((pg->flags & PG_BUSY) != 0) {
    245  1.1  chuck 				/* tell them to dump it when done */
    246  1.1  chuck 				pg->flags |= PG_RELEASED;
    247  1.1  chuck 				UVMHIST_LOG(maphist,
    248  1.1  chuck 				    "  anon 0x%x, page 0x%x: BUSY (released!)",
    249  1.1  chuck 				    anon, pg, 0, 0);
    250  1.1  chuck 				return;
    251  1.1  chuck 			}
    252  1.1  chuck 
    253  1.4    chs 			pmap_page_protect(pg, VM_PROT_NONE);
    254  1.1  chuck 			uvm_lock_pageq();	/* lock out pagedaemon */
    255  1.1  chuck 			uvm_pagefree(pg);	/* bye bye */
    256  1.1  chuck 			uvm_unlock_pageq();	/* free the daemon */
    257  1.1  chuck 
    258  1.1  chuck 			UVMHIST_LOG(maphist,"  anon 0x%x, page 0x%x: freed now!",
    259  1.1  chuck 			    anon, pg, 0, 0);
    260  1.1  chuck 		}
    261  1.1  chuck 	}
    262  1.1  chuck 
    263  1.1  chuck 	/*
    264  1.2    chs 	 * free any swap resources.
    265  1.1  chuck 	 */
    266  1.2    chs 	uvm_anon_dropswap(anon);
    267  1.1  chuck 
    268  1.1  chuck 	/*
    269  1.1  chuck 	 * now that we've stripped the data areas from the anon, free the anon
    270  1.1  chuck 	 * itself!
    271  1.1  chuck 	 */
    272  1.1  chuck 	simple_lock(&uvm.afreelock);
    273  1.1  chuck 	anon->u.an_nxt = uvm.afree;
    274  1.1  chuck 	uvm.afree = anon;
    275  1.1  chuck 	uvmexp.nfreeanon++;
    276  1.1  chuck 	simple_unlock(&uvm.afreelock);
    277  1.1  chuck 	UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
    278  1.2    chs }
    279  1.2    chs 
    280  1.2    chs /*
    281  1.2    chs  * uvm_anon_dropswap:  release any swap resources from this anon.
    282  1.2    chs  *
    283  1.2    chs  * => anon must be locked or have a reference count of 0.
    284  1.2    chs  */
    285  1.2    chs void
    286  1.2    chs uvm_anon_dropswap(anon)
    287  1.2    chs 	struct vm_anon *anon;
    288  1.2    chs {
    289  1.2    chs 	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
    290  1.2    chs 	if (anon->an_swslot == 0) {
    291  1.2    chs 		return;
    292  1.2    chs 	}
    293  1.2    chs 
    294  1.2    chs 	UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
    295  1.2    chs 		    anon, anon->an_swslot, 0, 0);
    296  1.2    chs 	uvm_swap_free(anon->an_swslot, 1);
    297  1.2    chs 	anon->an_swslot = 0;
    298  1.2    chs 
    299  1.2    chs 	if (anon->u.an_page == NULL) {
    300  1.2    chs 		/* this page is no longer only in swap. */
    301  1.2    chs 		simple_lock(&uvm.swap_data_lock);
    302  1.2    chs 		uvmexp.swpgonly--;
    303  1.2    chs 		simple_unlock(&uvm.swap_data_lock);
    304  1.2    chs 	}
    305  1.1  chuck }
    306  1.1  chuck 
    307  1.1  chuck /*
    308  1.1  chuck  * uvm_anon_lockloanpg: given a locked anon, lock its resident page
    309  1.1  chuck  *
    310  1.1  chuck  * => anon is locked by caller
    311  1.1  chuck  * => on return: anon is locked
    312  1.1  chuck  *		 if there is a resident page:
    313  1.1  chuck  *			if it has a uobject, it is locked by us
    314  1.1  chuck  *			if it is ownerless, we take over as owner
    315  1.1  chuck  *		 we return the resident page (it can change during
    316  1.1  chuck  *		 this function)
    317  1.1  chuck  * => note that the only time an anon has an ownerless resident page
    318  1.1  chuck  *	is if the page was loaned from a uvm_object and the uvm_object
    319  1.1  chuck  *	disowned it
    320  1.1  chuck  * => this only needs to be called when you want to do an operation
    321  1.1  chuck  *	on an anon's resident page and that page has a non-zero loan
    322  1.1  chuck  *	count.
    323  1.1  chuck  */
    324  1.1  chuck struct vm_page *
    325  1.1  chuck uvm_anon_lockloanpg(anon)
    326  1.1  chuck 	struct vm_anon *anon;
    327  1.1  chuck {
    328  1.1  chuck 	struct vm_page *pg;
    329  1.1  chuck 	boolean_t locked = FALSE;
    330  1.1  chuck 
    331  1.1  chuck 	/*
    332  1.1  chuck 	 * loop while we have a resident page that has a non-zero loan count.
    333  1.1  chuck 	 * if we successfully get our lock, we will "break" the loop.
    334  1.1  chuck 	 * note that the test for pg->loan_count is not protected -- this
    335  1.1  chuck 	 * may produce false positive results.   note that a false positive
    336  1.1  chuck 	 * result may cause us to do more work than we need to, but it will
    337  1.1  chuck 	 * not produce an incorrect result.
    338  1.1  chuck 	 */
    339  1.1  chuck 
    340  1.1  chuck 	while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
    341  1.1  chuck 
    342  1.1  chuck 		/*
    343  1.1  chuck 		 * quickly check to see if the page has an object before
    344  1.1  chuck 		 * bothering to lock the page queues.   this may also produce
    345  1.1  chuck 		 * a false positive result, but that's ok because we do a real
    346  1.1  chuck 		 * check after that.
    347  1.1  chuck 		 *
    348  1.1  chuck 		 * XXX: quick check -- worth it?   need volatile?
    349  1.1  chuck 		 */
    350  1.1  chuck 
    351  1.1  chuck 		if (pg->uobject) {
    352  1.1  chuck 
    353  1.1  chuck 			uvm_lock_pageq();
    354  1.1  chuck 			if (pg->uobject) {	/* the "real" check */
    355  1.1  chuck 				locked =
    356  1.1  chuck 				    simple_lock_try(&pg->uobject->vmobjlock);
    357  1.1  chuck 			} else {
    358  1.1  chuck 				/* object disowned before we got PQ lock */
    359  1.1  chuck 				locked = TRUE;
    360  1.1  chuck 			}
    361  1.1  chuck 			uvm_unlock_pageq();
    362  1.1  chuck 
    363  1.1  chuck 			/*
    364  1.1  chuck 			 * if we didn't get a lock (try lock failed), then we
    365  1.1  chuck 			 * toggle our anon lock and try again
    366  1.1  chuck 			 */
    367  1.1  chuck 
    368  1.1  chuck 			if (!locked) {
    369  1.1  chuck 				simple_unlock(&anon->an_lock);
    370  1.1  chuck 				/*
    371  1.1  chuck 				 * someone locking the object has a chance to
    372  1.1  chuck 				 * lock us right now
    373  1.1  chuck 				 */
    374  1.1  chuck 				simple_lock(&anon->an_lock);
    375  1.1  chuck 				continue;		/* start over */
    376  1.1  chuck 			}
    377  1.1  chuck 		}
    378  1.1  chuck 
    379  1.1  chuck 		/*
    380  1.1  chuck 		 * if page is un-owned [i.e. the object dropped its ownership],
    381  1.1  chuck 		 * then we can take over as owner!
    382  1.1  chuck 		 */
    383  1.1  chuck 
    384  1.1  chuck 		if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
    385  1.1  chuck 			uvm_lock_pageq();
    386  1.1  chuck 			pg->pqflags |= PQ_ANON;		/* take ownership... */
    387  1.1  chuck 			pg->loan_count--;	/* ... and drop our loan */
    388  1.1  chuck 			uvm_unlock_pageq();
    389  1.1  chuck 		}
    390  1.1  chuck 
    391  1.1  chuck 		/*
    392  1.1  chuck 		 * we did it!   break the loop
    393  1.1  chuck 		 */
    394  1.1  chuck 		break;
    395  1.1  chuck 	}
    396  1.1  chuck 
    397  1.1  chuck 	/*
    398  1.1  chuck 	 * done!
    399  1.1  chuck 	 */
    400  1.1  chuck 
    401  1.1  chuck 	return(pg);
    402  1.5    chs }
    403  1.5    chs 
    404  1.5    chs 
    405  1.5    chs 
    406  1.5    chs /*
    407  1.5    chs  * page in every anon that is paged out to a range of swslots.
    408  1.5    chs  *
    409  1.5    chs  * swap_syscall_lock should be held (protects anonblock_list).
    410  1.5    chs  */
    411  1.5    chs 
    412  1.5    chs boolean_t
    413  1.5    chs anon_swap_off(startslot, endslot)
    414  1.5    chs 	int startslot, endslot;
    415  1.5    chs {
    416  1.5    chs 	struct uvm_anonblock *anonblock;
    417  1.5    chs 
    418  1.5    chs 	for (anonblock = LIST_FIRST(&anonblock_list);
    419  1.5    chs 	     anonblock != NULL;
    420  1.5    chs 	     anonblock = LIST_NEXT(anonblock, list)) {
    421  1.5    chs 		int i;
    422  1.5    chs 
    423  1.5    chs 		/*
    424  1.5    chs 		 * loop thru all the anons in the anonblock,
    425  1.5    chs 		 * paging in where needed.
    426  1.5    chs 		 */
    427  1.5    chs 
    428  1.5    chs 		for (i = 0; i < anonblock->count; i++) {
    429  1.5    chs 			struct vm_anon *anon = &anonblock->anons[i];
    430  1.5    chs 			int slot;
    431  1.5    chs 
    432  1.5    chs 			/*
    433  1.5    chs 			 * lock anon to work on it.
    434  1.5    chs 			 */
    435  1.5    chs 
    436  1.5    chs 			simple_lock(&anon->an_lock);
    437  1.5    chs 
    438  1.5    chs 			/*
    439  1.5    chs 			 * is this anon's swap slot in range?
    440  1.5    chs 			 */
    441  1.5    chs 
    442  1.5    chs 			slot = anon->an_swslot;
    443  1.5    chs 			if (slot >= startslot && slot < endslot) {
    444  1.5    chs 				boolean_t rv;
    445  1.5    chs 
    446  1.5    chs 				/*
    447  1.5    chs 				 * yup, page it in.
    448  1.5    chs 				 */
    449  1.5    chs 
    450  1.5    chs 				/* locked: anon */
    451  1.5    chs 				rv = anon_pagein(anon);
    452  1.5    chs 				/* unlocked: anon */
    453  1.5    chs 
    454  1.5    chs 				if (rv) {
    455  1.5    chs 					return rv;
    456  1.5    chs 				}
    457  1.5    chs 			} else {
    458  1.5    chs 
    459  1.5    chs 				/*
    460  1.5    chs 				 * nope, unlock and proceed.
    461  1.5    chs 				 */
    462  1.5    chs 
    463  1.5    chs 				simple_unlock(&anon->an_lock);
    464  1.5    chs 			}
    465  1.5    chs 		}
    466  1.5    chs 	}
    467  1.5    chs 	return FALSE;
    468  1.5    chs }
    469  1.5    chs 
    470  1.5    chs 
    471  1.5    chs /*
    472  1.5    chs  * fetch an anon's page.
    473  1.5    chs  *
    474  1.5    chs  * => anon must be locked, and is unlocked upon return.
    475  1.5    chs  * => returns TRUE if pagein was aborted due to lack of memory.
    476  1.5    chs  */
    477  1.5    chs 
    478  1.5    chs static boolean_t
    479  1.5    chs anon_pagein(anon)
    480  1.5    chs 	struct vm_anon *anon;
    481  1.5    chs {
    482  1.5    chs 	struct vm_page *pg;
    483  1.5    chs 	struct uvm_object *uobj;
    484  1.5    chs 	int rv;
    485  1.5    chs 	UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist);
    486  1.5    chs 
    487  1.5    chs 	/* locked: anon */
    488  1.5    chs 	rv = uvmfault_anonget(NULL, NULL, anon);
    489  1.5    chs 	/* unlocked: anon */
    490  1.5    chs 
    491  1.5    chs 	switch (rv) {
    492  1.5    chs 	case VM_PAGER_OK:
    493  1.5    chs 		break;
    494  1.5    chs 
    495  1.5    chs 	case VM_PAGER_ERROR:
    496  1.5    chs 	case VM_PAGER_REFAULT:
    497  1.5    chs 
    498  1.5    chs 		/*
    499  1.5    chs 		 * nothing more to do on errors.
    500  1.5    chs 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
    501  1.5    chs 		 * so again there's nothing to do.
    502  1.5    chs 		 */
    503  1.5    chs 
    504  1.5    chs 		return FALSE;
    505  1.5    chs 
    506  1.5    chs #ifdef DIAGNOSTIC
    507  1.5    chs 	default:
    508  1.5    chs 		panic("anon_pagein: uvmfault_anonget -> %d", rv);
    509  1.5    chs #endif
    510  1.5    chs 	}
    511  1.5    chs 
    512  1.5    chs 	/*
    513  1.5    chs 	 * ok, we've got the page now.
    514  1.5    chs 	 * mark it as dirty, clear its swslot and un-busy it.
    515  1.5    chs 	 */
    516  1.5    chs 
    517  1.5    chs 	pg = anon->u.an_page;
    518  1.5    chs 	uobj = pg->uobject;
    519  1.5    chs 	uvm_swap_free(anon->an_swslot, 1);
    520  1.5    chs 	anon->an_swslot = 0;
    521  1.5    chs 	pg->flags &= ~(PG_CLEAN);
    522  1.5    chs 
    523  1.5    chs 	/*
    524  1.5    chs 	 * deactivate the page (to put it on a page queue)
    525  1.5    chs 	 */
    526  1.5    chs 
    527  1.5    chs 	pmap_clear_reference(pg);
    528  1.5    chs 	pmap_page_protect(pg, VM_PROT_NONE);
    529  1.5    chs 	uvm_lock_pageq();
    530  1.5    chs 	uvm_pagedeactivate(pg);
    531  1.5    chs 	uvm_unlock_pageq();
    532  1.5    chs 
    533  1.5    chs 	/*
    534  1.5    chs 	 * unlock the anon and we're done.
    535  1.5    chs 	 */
    536  1.5    chs 
    537  1.5    chs 	simple_unlock(&anon->an_lock);
    538  1.5    chs 	if (uobj) {
    539  1.5    chs 		simple_unlock(&uobj->vmobjlock);
    540  1.5    chs 	}
    541  1.5    chs 	return FALSE;
    542  1.1  chuck }
    543