Home | History | Annotate | Line # | Download | only in uvm
uvm_anon.c revision 1.8
      1  1.8  thorpej /*	$NetBSD: uvm_anon.c,v 1.8 2000/08/05 23:40:55 thorpej Exp $	*/
      2  1.1    chuck 
      3  1.1    chuck /*
      4  1.1    chuck  *
      5  1.1    chuck  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      6  1.1    chuck  * All rights reserved.
      7  1.1    chuck  *
      8  1.1    chuck  * Redistribution and use in source and binary forms, with or without
      9  1.1    chuck  * modification, are permitted provided that the following conditions
     10  1.1    chuck  * are met:
     11  1.1    chuck  * 1. Redistributions of source code must retain the above copyright
     12  1.1    chuck  *    notice, this list of conditions and the following disclaimer.
     13  1.1    chuck  * 2. Redistributions in binary form must reproduce the above copyright
     14  1.1    chuck  *    notice, this list of conditions and the following disclaimer in the
     15  1.1    chuck  *    documentation and/or other materials provided with the distribution.
     16  1.1    chuck  * 3. All advertising materials mentioning features or use of this software
     17  1.1    chuck  *    must display the following acknowledgement:
     18  1.1    chuck  *      This product includes software developed by Charles D. Cranor and
     19  1.1    chuck  *      Washington University.
     20  1.1    chuck  * 4. The name of the author may not be used to endorse or promote products
     21  1.1    chuck  *    derived from this software without specific prior written permission.
     22  1.1    chuck  *
     23  1.1    chuck  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
     24  1.1    chuck  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
     25  1.1    chuck  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
     26  1.1    chuck  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
     27  1.1    chuck  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
     28  1.1    chuck  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     29  1.1    chuck  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     30  1.1    chuck  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     31  1.1    chuck  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
     32  1.1    chuck  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     33  1.1    chuck  */
     34  1.1    chuck 
     35  1.1    chuck /*
     36  1.1    chuck  * uvm_anon.c: uvm anon ops
     37  1.1    chuck  */
     38  1.1    chuck 
     39  1.1    chuck #include "opt_uvmhist.h"
     40  1.1    chuck 
     41  1.1    chuck #include <sys/param.h>
     42  1.1    chuck #include <sys/systm.h>
     43  1.1    chuck #include <sys/proc.h>
     44  1.1    chuck #include <sys/malloc.h>
     45  1.1    chuck #include <sys/pool.h>
     46  1.5      chs #include <sys/kernel.h>
     47  1.1    chuck 
     48  1.1    chuck #include <uvm/uvm.h>
     49  1.1    chuck #include <uvm/uvm_swap.h>
     50  1.1    chuck 
     51  1.1    chuck /*
     52  1.5      chs  * anonblock_list: global list of anon blocks,
     53  1.5      chs  * locked by swap_syscall_lock (since we never remove
     54  1.5      chs  * anything from this list and we only add to it via swapctl(2)).
     55  1.5      chs  */
     56  1.5      chs 
     57  1.5      chs struct uvm_anonblock {
     58  1.5      chs 	LIST_ENTRY(uvm_anonblock) list;
     59  1.5      chs 	int count;
     60  1.5      chs 	struct vm_anon *anons;
     61  1.5      chs };
     62  1.5      chs static LIST_HEAD(anonlist, uvm_anonblock) anonblock_list;
     63  1.5      chs 
     64  1.5      chs 
     65  1.5      chs static boolean_t anon_pagein __P((struct vm_anon *));
     66  1.5      chs 
     67  1.5      chs 
     68  1.5      chs /*
     69  1.1    chuck  * allocate anons
     70  1.1    chuck  */
     71  1.1    chuck void
     72  1.1    chuck uvm_anon_init()
     73  1.1    chuck {
     74  1.1    chuck 	int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
     75  1.5      chs 
     76  1.5      chs 	simple_lock_init(&uvm.afreelock);
     77  1.5      chs 	LIST_INIT(&anonblock_list);
     78  1.1    chuck 
     79  1.1    chuck 	/*
     80  1.1    chuck 	 * Allocate the initial anons.
     81  1.1    chuck 	 */
     82  1.5      chs 	uvm_anon_add(nanon);
     83  1.1    chuck }
     84  1.1    chuck 
     85  1.1    chuck /*
     86  1.1    chuck  * add some more anons to the free pool.  called when we add
     87  1.1    chuck  * more swap space.
     88  1.5      chs  *
     89  1.5      chs  * => swap_syscall_lock should be held (protects anonblock_list).
     90  1.1    chuck  */
     91  1.1    chuck void
     92  1.5      chs uvm_anon_add(count)
     93  1.5      chs 	int	count;
     94  1.1    chuck {
     95  1.5      chs 	struct uvm_anonblock *anonblock;
     96  1.1    chuck 	struct vm_anon *anon;
     97  1.5      chs 	int lcv, needed;
     98  1.1    chuck 
     99  1.5      chs 	simple_lock(&uvm.afreelock);
    100  1.5      chs 	uvmexp.nanonneeded += count;
    101  1.5      chs 	needed = uvmexp.nanonneeded - uvmexp.nanon;
    102  1.5      chs 	simple_unlock(&uvm.afreelock);
    103  1.5      chs 
    104  1.5      chs 	if (needed <= 0) {
    105  1.5      chs 		return;
    106  1.5      chs 	}
    107  1.5      chs 
    108  1.5      chs 	MALLOC(anonblock, void *, sizeof(*anonblock), M_UVMAMAP, M_WAITOK);
    109  1.5      chs 	anon = (void *)uvm_km_alloc(kernel_map, sizeof(*anon) * needed);
    110  1.1    chuck 
    111  1.1    chuck 	/* XXX Should wait for VM to free up. */
    112  1.5      chs 	if (anonblock == NULL || anon == NULL) {
    113  1.5      chs 		printf("uvm_anon_add: can not allocate %d anons\n", needed);
    114  1.1    chuck 		panic("uvm_anon_add");
    115  1.1    chuck 	}
    116  1.1    chuck 
    117  1.5      chs 	anonblock->count = needed;
    118  1.5      chs 	anonblock->anons = anon;
    119  1.5      chs 	LIST_INSERT_HEAD(&anonblock_list, anonblock, list);
    120  1.5      chs 	memset(anon, 0, sizeof(*anon) * needed);
    121  1.5      chs 
    122  1.1    chuck 	simple_lock(&uvm.afreelock);
    123  1.5      chs 	uvmexp.nanon += needed;
    124  1.5      chs 	uvmexp.nfreeanon += needed;
    125  1.5      chs 	for (lcv = 0; lcv < needed; lcv++) {
    126  1.1    chuck 		simple_lock_init(&anon->an_lock);
    127  1.1    chuck 		anon[lcv].u.an_nxt = uvm.afree;
    128  1.1    chuck 		uvm.afree = &anon[lcv];
    129  1.3     ross 		simple_lock_init(&uvm.afree->an_lock);
    130  1.1    chuck 	}
    131  1.1    chuck 	simple_unlock(&uvm.afreelock);
    132  1.1    chuck }
    133  1.1    chuck 
    134  1.1    chuck /*
    135  1.5      chs  * remove anons from the free pool.
    136  1.5      chs  */
    137  1.5      chs void
    138  1.5      chs uvm_anon_remove(count)
    139  1.5      chs 	int count;
    140  1.5      chs {
    141  1.5      chs 	/*
    142  1.5      chs 	 * we never actually free any anons, to avoid allocation overhead.
    143  1.5      chs 	 * XXX someday we might want to try to free anons.
    144  1.5      chs 	 */
    145  1.5      chs 
    146  1.5      chs 	simple_lock(&uvm.afreelock);
    147  1.5      chs 	uvmexp.nanonneeded -= count;
    148  1.5      chs 	simple_unlock(&uvm.afreelock);
    149  1.5      chs }
    150  1.5      chs 
    151  1.5      chs /*
    152  1.1    chuck  * allocate an anon
    153  1.1    chuck  */
    154  1.1    chuck struct vm_anon *
    155  1.1    chuck uvm_analloc()
    156  1.1    chuck {
    157  1.1    chuck 	struct vm_anon *a;
    158  1.1    chuck 
    159  1.1    chuck 	simple_lock(&uvm.afreelock);
    160  1.1    chuck 	a = uvm.afree;
    161  1.1    chuck 	if (a) {
    162  1.1    chuck 		uvm.afree = a->u.an_nxt;
    163  1.1    chuck 		uvmexp.nfreeanon--;
    164  1.1    chuck 		a->an_ref = 1;
    165  1.1    chuck 		a->an_swslot = 0;
    166  1.1    chuck 		a->u.an_page = NULL;		/* so we can free quickly */
    167  1.1    chuck 	}
    168  1.1    chuck 	simple_unlock(&uvm.afreelock);
    169  1.1    chuck 	return(a);
    170  1.1    chuck }
    171  1.1    chuck 
    172  1.1    chuck /*
    173  1.1    chuck  * uvm_anfree: free a single anon structure
    174  1.1    chuck  *
    175  1.1    chuck  * => caller must remove anon from its amap before calling (if it was in
    176  1.1    chuck  *	an amap).
    177  1.1    chuck  * => anon must be unlocked and have a zero reference count.
    178  1.1    chuck  * => we may lock the pageq's.
    179  1.1    chuck  */
    180  1.1    chuck void
    181  1.1    chuck uvm_anfree(anon)
    182  1.1    chuck 	struct vm_anon *anon;
    183  1.1    chuck {
    184  1.1    chuck 	struct vm_page *pg;
    185  1.1    chuck 	UVMHIST_FUNC("uvm_anfree"); UVMHIST_CALLED(maphist);
    186  1.1    chuck 	UVMHIST_LOG(maphist,"(anon=0x%x)", anon, 0,0,0);
    187  1.1    chuck 
    188  1.1    chuck 	/*
    189  1.1    chuck 	 * get page
    190  1.1    chuck 	 */
    191  1.1    chuck 
    192  1.1    chuck 	pg = anon->u.an_page;
    193  1.1    chuck 
    194  1.1    chuck 	/*
    195  1.1    chuck 	 * if there is a resident page and it is loaned, then anon may not
    196  1.1    chuck 	 * own it.   call out to uvm_anon_lockpage() to ensure the real owner
    197  1.1    chuck  	 * of the page has been identified and locked.
    198  1.1    chuck 	 */
    199  1.1    chuck 
    200  1.1    chuck 	if (pg && pg->loan_count)
    201  1.1    chuck 		pg = uvm_anon_lockloanpg(anon);
    202  1.1    chuck 
    203  1.1    chuck 	/*
    204  1.1    chuck 	 * if we have a resident page, we must dispose of it before freeing
    205  1.1    chuck 	 * the anon.
    206  1.1    chuck 	 */
    207  1.1    chuck 
    208  1.1    chuck 	if (pg) {
    209  1.1    chuck 
    210  1.1    chuck 		/*
    211  1.1    chuck 		 * if the page is owned by a uobject (now locked), then we must
    212  1.1    chuck 		 * kill the loan on the page rather than free it.
    213  1.1    chuck 		 */
    214  1.1    chuck 
    215  1.1    chuck 		if (pg->uobject) {
    216  1.1    chuck 
    217  1.1    chuck 			/* kill loan */
    218  1.1    chuck 			uvm_lock_pageq();
    219  1.1    chuck #ifdef DIAGNOSTIC
    220  1.1    chuck 			if (pg->loan_count < 1)
    221  1.1    chuck 				panic("uvm_anfree: obj owned page "
    222  1.1    chuck 				      "with no loan count");
    223  1.1    chuck #endif
    224  1.1    chuck 			pg->loan_count--;
    225  1.1    chuck 			pg->uanon = NULL;
    226  1.1    chuck 			uvm_unlock_pageq();
    227  1.1    chuck 			simple_unlock(&pg->uobject->vmobjlock);
    228  1.1    chuck 
    229  1.1    chuck 		} else {
    230  1.1    chuck 
    231  1.1    chuck 			/*
    232  1.1    chuck 			 * page has no uobject, so we must be the owner of it.
    233  1.1    chuck 			 *
    234  1.1    chuck 			 * if page is busy then we just mark it as released
    235  1.1    chuck 			 * (who ever has it busy must check for this when they
    236  1.1    chuck 			 * wake up).    if the page is not busy then we can
    237  1.1    chuck 			 * free it now.
    238  1.1    chuck 			 */
    239  1.1    chuck 
    240  1.1    chuck 			if ((pg->flags & PG_BUSY) != 0) {
    241  1.1    chuck 				/* tell them to dump it when done */
    242  1.1    chuck 				pg->flags |= PG_RELEASED;
    243  1.1    chuck 				UVMHIST_LOG(maphist,
    244  1.1    chuck 				    "  anon 0x%x, page 0x%x: BUSY (released!)",
    245  1.1    chuck 				    anon, pg, 0, 0);
    246  1.1    chuck 				return;
    247  1.1    chuck 			}
    248  1.1    chuck 
    249  1.4      chs 			pmap_page_protect(pg, VM_PROT_NONE);
    250  1.1    chuck 			uvm_lock_pageq();	/* lock out pagedaemon */
    251  1.1    chuck 			uvm_pagefree(pg);	/* bye bye */
    252  1.1    chuck 			uvm_unlock_pageq();	/* free the daemon */
    253  1.1    chuck 
    254  1.1    chuck 			UVMHIST_LOG(maphist,"  anon 0x%x, page 0x%x: freed now!",
    255  1.1    chuck 			    anon, pg, 0, 0);
    256  1.1    chuck 		}
    257  1.1    chuck 	}
    258  1.1    chuck 
    259  1.1    chuck 	/*
    260  1.2      chs 	 * free any swap resources.
    261  1.1    chuck 	 */
    262  1.2      chs 	uvm_anon_dropswap(anon);
    263  1.1    chuck 
    264  1.1    chuck 	/*
    265  1.1    chuck 	 * now that we've stripped the data areas from the anon, free the anon
    266  1.1    chuck 	 * itself!
    267  1.1    chuck 	 */
    268  1.1    chuck 	simple_lock(&uvm.afreelock);
    269  1.1    chuck 	anon->u.an_nxt = uvm.afree;
    270  1.1    chuck 	uvm.afree = anon;
    271  1.1    chuck 	uvmexp.nfreeanon++;
    272  1.1    chuck 	simple_unlock(&uvm.afreelock);
    273  1.1    chuck 	UVMHIST_LOG(maphist,"<- done!",0,0,0,0);
    274  1.2      chs }
    275  1.2      chs 
    276  1.2      chs /*
    277  1.2      chs  * uvm_anon_dropswap:  release any swap resources from this anon.
    278  1.2      chs  *
    279  1.2      chs  * => anon must be locked or have a reference count of 0.
    280  1.2      chs  */
    281  1.2      chs void
    282  1.2      chs uvm_anon_dropswap(anon)
    283  1.2      chs 	struct vm_anon *anon;
    284  1.2      chs {
    285  1.2      chs 	UVMHIST_FUNC("uvm_anon_dropswap"); UVMHIST_CALLED(maphist);
    286  1.2      chs 	if (anon->an_swslot == 0) {
    287  1.2      chs 		return;
    288  1.2      chs 	}
    289  1.2      chs 
    290  1.2      chs 	UVMHIST_LOG(maphist,"freeing swap for anon %p, paged to swslot 0x%x",
    291  1.2      chs 		    anon, anon->an_swslot, 0, 0);
    292  1.2      chs 	uvm_swap_free(anon->an_swslot, 1);
    293  1.2      chs 	anon->an_swslot = 0;
    294  1.2      chs 
    295  1.2      chs 	if (anon->u.an_page == NULL) {
    296  1.2      chs 		/* this page is no longer only in swap. */
    297  1.2      chs 		simple_lock(&uvm.swap_data_lock);
    298  1.2      chs 		uvmexp.swpgonly--;
    299  1.2      chs 		simple_unlock(&uvm.swap_data_lock);
    300  1.2      chs 	}
    301  1.1    chuck }
    302  1.1    chuck 
    303  1.1    chuck /*
    304  1.1    chuck  * uvm_anon_lockloanpg: given a locked anon, lock its resident page
    305  1.1    chuck  *
    306  1.1    chuck  * => anon is locked by caller
    307  1.1    chuck  * => on return: anon is locked
    308  1.1    chuck  *		 if there is a resident page:
    309  1.1    chuck  *			if it has a uobject, it is locked by us
    310  1.1    chuck  *			if it is ownerless, we take over as owner
    311  1.1    chuck  *		 we return the resident page (it can change during
    312  1.1    chuck  *		 this function)
    313  1.1    chuck  * => note that the only time an anon has an ownerless resident page
    314  1.1    chuck  *	is if the page was loaned from a uvm_object and the uvm_object
    315  1.1    chuck  *	disowned it
    316  1.1    chuck  * => this only needs to be called when you want to do an operation
    317  1.1    chuck  *	on an anon's resident page and that page has a non-zero loan
    318  1.1    chuck  *	count.
    319  1.1    chuck  */
    320  1.1    chuck struct vm_page *
    321  1.1    chuck uvm_anon_lockloanpg(anon)
    322  1.1    chuck 	struct vm_anon *anon;
    323  1.1    chuck {
    324  1.1    chuck 	struct vm_page *pg;
    325  1.1    chuck 	boolean_t locked = FALSE;
    326  1.1    chuck 
    327  1.1    chuck 	/*
    328  1.1    chuck 	 * loop while we have a resident page that has a non-zero loan count.
    329  1.1    chuck 	 * if we successfully get our lock, we will "break" the loop.
    330  1.1    chuck 	 * note that the test for pg->loan_count is not protected -- this
    331  1.1    chuck 	 * may produce false positive results.   note that a false positive
    332  1.1    chuck 	 * result may cause us to do more work than we need to, but it will
    333  1.1    chuck 	 * not produce an incorrect result.
    334  1.1    chuck 	 */
    335  1.1    chuck 
    336  1.1    chuck 	while (((pg = anon->u.an_page) != NULL) && pg->loan_count != 0) {
    337  1.1    chuck 
    338  1.1    chuck 		/*
    339  1.1    chuck 		 * quickly check to see if the page has an object before
    340  1.1    chuck 		 * bothering to lock the page queues.   this may also produce
    341  1.1    chuck 		 * a false positive result, but that's ok because we do a real
    342  1.1    chuck 		 * check after that.
    343  1.1    chuck 		 *
    344  1.1    chuck 		 * XXX: quick check -- worth it?   need volatile?
    345  1.1    chuck 		 */
    346  1.1    chuck 
    347  1.1    chuck 		if (pg->uobject) {
    348  1.1    chuck 
    349  1.1    chuck 			uvm_lock_pageq();
    350  1.1    chuck 			if (pg->uobject) {	/* the "real" check */
    351  1.1    chuck 				locked =
    352  1.1    chuck 				    simple_lock_try(&pg->uobject->vmobjlock);
    353  1.1    chuck 			} else {
    354  1.1    chuck 				/* object disowned before we got PQ lock */
    355  1.1    chuck 				locked = TRUE;
    356  1.1    chuck 			}
    357  1.1    chuck 			uvm_unlock_pageq();
    358  1.1    chuck 
    359  1.1    chuck 			/*
    360  1.1    chuck 			 * if we didn't get a lock (try lock failed), then we
    361  1.1    chuck 			 * toggle our anon lock and try again
    362  1.1    chuck 			 */
    363  1.1    chuck 
    364  1.1    chuck 			if (!locked) {
    365  1.1    chuck 				simple_unlock(&anon->an_lock);
    366  1.1    chuck 				/*
    367  1.1    chuck 				 * someone locking the object has a chance to
    368  1.1    chuck 				 * lock us right now
    369  1.1    chuck 				 */
    370  1.1    chuck 				simple_lock(&anon->an_lock);
    371  1.1    chuck 				continue;		/* start over */
    372  1.1    chuck 			}
    373  1.1    chuck 		}
    374  1.1    chuck 
    375  1.1    chuck 		/*
    376  1.1    chuck 		 * if page is un-owned [i.e. the object dropped its ownership],
    377  1.1    chuck 		 * then we can take over as owner!
    378  1.1    chuck 		 */
    379  1.1    chuck 
    380  1.1    chuck 		if (pg->uobject == NULL && (pg->pqflags & PQ_ANON) == 0) {
    381  1.1    chuck 			uvm_lock_pageq();
    382  1.1    chuck 			pg->pqflags |= PQ_ANON;		/* take ownership... */
    383  1.1    chuck 			pg->loan_count--;	/* ... and drop our loan */
    384  1.1    chuck 			uvm_unlock_pageq();
    385  1.1    chuck 		}
    386  1.1    chuck 
    387  1.1    chuck 		/*
    388  1.1    chuck 		 * we did it!   break the loop
    389  1.1    chuck 		 */
    390  1.1    chuck 		break;
    391  1.1    chuck 	}
    392  1.1    chuck 
    393  1.1    chuck 	/*
    394  1.1    chuck 	 * done!
    395  1.1    chuck 	 */
    396  1.1    chuck 
    397  1.1    chuck 	return(pg);
    398  1.5      chs }
    399  1.5      chs 
    400  1.5      chs 
    401  1.5      chs 
    402  1.5      chs /*
    403  1.5      chs  * page in every anon that is paged out to a range of swslots.
    404  1.5      chs  *
    405  1.5      chs  * swap_syscall_lock should be held (protects anonblock_list).
    406  1.5      chs  */
    407  1.5      chs 
    408  1.5      chs boolean_t
    409  1.5      chs anon_swap_off(startslot, endslot)
    410  1.5      chs 	int startslot, endslot;
    411  1.5      chs {
    412  1.5      chs 	struct uvm_anonblock *anonblock;
    413  1.5      chs 
    414  1.5      chs 	for (anonblock = LIST_FIRST(&anonblock_list);
    415  1.5      chs 	     anonblock != NULL;
    416  1.5      chs 	     anonblock = LIST_NEXT(anonblock, list)) {
    417  1.5      chs 		int i;
    418  1.5      chs 
    419  1.5      chs 		/*
    420  1.5      chs 		 * loop thru all the anons in the anonblock,
    421  1.5      chs 		 * paging in where needed.
    422  1.5      chs 		 */
    423  1.5      chs 
    424  1.5      chs 		for (i = 0; i < anonblock->count; i++) {
    425  1.5      chs 			struct vm_anon *anon = &anonblock->anons[i];
    426  1.5      chs 			int slot;
    427  1.5      chs 
    428  1.5      chs 			/*
    429  1.5      chs 			 * lock anon to work on it.
    430  1.5      chs 			 */
    431  1.5      chs 
    432  1.5      chs 			simple_lock(&anon->an_lock);
    433  1.5      chs 
    434  1.5      chs 			/*
    435  1.5      chs 			 * is this anon's swap slot in range?
    436  1.5      chs 			 */
    437  1.5      chs 
    438  1.5      chs 			slot = anon->an_swslot;
    439  1.5      chs 			if (slot >= startslot && slot < endslot) {
    440  1.5      chs 				boolean_t rv;
    441  1.5      chs 
    442  1.5      chs 				/*
    443  1.5      chs 				 * yup, page it in.
    444  1.5      chs 				 */
    445  1.5      chs 
    446  1.5      chs 				/* locked: anon */
    447  1.5      chs 				rv = anon_pagein(anon);
    448  1.5      chs 				/* unlocked: anon */
    449  1.5      chs 
    450  1.5      chs 				if (rv) {
    451  1.5      chs 					return rv;
    452  1.5      chs 				}
    453  1.5      chs 			} else {
    454  1.5      chs 
    455  1.5      chs 				/*
    456  1.5      chs 				 * nope, unlock and proceed.
    457  1.5      chs 				 */
    458  1.5      chs 
    459  1.5      chs 				simple_unlock(&anon->an_lock);
    460  1.5      chs 			}
    461  1.5      chs 		}
    462  1.5      chs 	}
    463  1.5      chs 	return FALSE;
    464  1.5      chs }
    465  1.5      chs 
    466  1.5      chs 
    467  1.5      chs /*
    468  1.5      chs  * fetch an anon's page.
    469  1.5      chs  *
    470  1.5      chs  * => anon must be locked, and is unlocked upon return.
    471  1.5      chs  * => returns TRUE if pagein was aborted due to lack of memory.
    472  1.5      chs  */
    473  1.5      chs 
    474  1.5      chs static boolean_t
    475  1.5      chs anon_pagein(anon)
    476  1.5      chs 	struct vm_anon *anon;
    477  1.5      chs {
    478  1.5      chs 	struct vm_page *pg;
    479  1.5      chs 	struct uvm_object *uobj;
    480  1.5      chs 	int rv;
    481  1.5      chs 	UVMHIST_FUNC("anon_pagein"); UVMHIST_CALLED(pdhist);
    482  1.8  thorpej 
    483  1.5      chs 	/* locked: anon */
    484  1.5      chs 	rv = uvmfault_anonget(NULL, NULL, anon);
    485  1.8  thorpej 	/*
    486  1.8  thorpej 	 * if rv == VM_PAGER_OK, anon is still locked, else anon
    487  1.8  thorpej 	 * is unlocked
    488  1.8  thorpej 	 */
    489  1.5      chs 
    490  1.5      chs 	switch (rv) {
    491  1.5      chs 	case VM_PAGER_OK:
    492  1.5      chs 		break;
    493  1.5      chs 
    494  1.5      chs 	case VM_PAGER_ERROR:
    495  1.5      chs 	case VM_PAGER_REFAULT:
    496  1.5      chs 
    497  1.5      chs 		/*
    498  1.5      chs 		 * nothing more to do on errors.
    499  1.5      chs 		 * VM_PAGER_REFAULT can only mean that the anon was freed,
    500  1.5      chs 		 * so again there's nothing to do.
    501  1.5      chs 		 */
    502  1.5      chs 
    503  1.5      chs 		return FALSE;
    504  1.5      chs 
    505  1.5      chs #ifdef DIAGNOSTIC
    506  1.5      chs 	default:
    507  1.5      chs 		panic("anon_pagein: uvmfault_anonget -> %d", rv);
    508  1.5      chs #endif
    509  1.5      chs 	}
    510  1.5      chs 
    511  1.5      chs 	/*
    512  1.5      chs 	 * ok, we've got the page now.
    513  1.5      chs 	 * mark it as dirty, clear its swslot and un-busy it.
    514  1.5      chs 	 */
    515  1.5      chs 
    516  1.5      chs 	pg = anon->u.an_page;
    517  1.5      chs 	uobj = pg->uobject;
    518  1.5      chs 	uvm_swap_free(anon->an_swslot, 1);
    519  1.5      chs 	anon->an_swslot = 0;
    520  1.5      chs 	pg->flags &= ~(PG_CLEAN);
    521  1.5      chs 
    522  1.5      chs 	/*
    523  1.5      chs 	 * deactivate the page (to put it on a page queue)
    524  1.5      chs 	 */
    525  1.5      chs 
    526  1.5      chs 	pmap_clear_reference(pg);
    527  1.5      chs 	pmap_page_protect(pg, VM_PROT_NONE);
    528  1.5      chs 	uvm_lock_pageq();
    529  1.5      chs 	uvm_pagedeactivate(pg);
    530  1.5      chs 	uvm_unlock_pageq();
    531  1.5      chs 
    532  1.5      chs 	/*
    533  1.5      chs 	 * unlock the anon and we're done.
    534  1.5      chs 	 */
    535  1.5      chs 
    536  1.5      chs 	simple_unlock(&anon->an_lock);
    537  1.5      chs 	if (uobj) {
    538  1.5      chs 		simple_unlock(&uobj->vmobjlock);
    539  1.5      chs 	}
    540  1.5      chs 	return FALSE;
    541  1.1    chuck }
    542