Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.83
      1 /*	$NetBSD: uvm_pdaemon.c,v 1.83 2007/02/21 23:00:14 thorpej Exp $	*/
      2 
      3 /*
      4  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6  *
      7  * All rights reserved.
      8  *
      9  * This code is derived from software contributed to Berkeley by
     10  * The Mach Operating System project at Carnegie-Mellon University.
     11  *
     12  * Redistribution and use in source and binary forms, with or without
     13  * modification, are permitted provided that the following conditions
     14  * are met:
     15  * 1. Redistributions of source code must retain the above copyright
     16  *    notice, this list of conditions and the following disclaimer.
     17  * 2. Redistributions in binary form must reproduce the above copyright
     18  *    notice, this list of conditions and the following disclaimer in the
     19  *    documentation and/or other materials provided with the distribution.
     20  * 3. All advertising materials mentioning features or use of this software
     21  *    must display the following acknowledgement:
     22  *	This product includes software developed by Charles D. Cranor,
     23  *      Washington University, the University of California, Berkeley and
     24  *      its contributors.
     25  * 4. Neither the name of the University nor the names of its contributors
     26  *    may be used to endorse or promote products derived from this software
     27  *    without specific prior written permission.
     28  *
     29  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39  * SUCH DAMAGE.
     40  *
     41  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     42  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     43  *
     44  *
     45  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46  * All rights reserved.
     47  *
     48  * Permission to use, copy, modify and distribute this software and
     49  * its documentation is hereby granted, provided that both the copyright
     50  * notice and this permission notice appear in all copies of the
     51  * software, derivative works or modified versions, and any portions
     52  * thereof, and that both notices appear in supporting documentation.
     53  *
     54  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57  *
     58  * Carnegie Mellon requests users of this software to return to
     59  *
     60  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61  *  School of Computer Science
     62  *  Carnegie Mellon University
     63  *  Pittsburgh PA 15213-3890
     64  *
     65  * any improvements or extensions that they make and grant Carnegie the
     66  * rights to redistribute these changes.
     67  */
     68 
     69 /*
     70  * uvm_pdaemon.c: the page daemon
     71  */
     72 
     73 #include <sys/cdefs.h>
     74 __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.83 2007/02/21 23:00:14 thorpej Exp $");
     75 
     76 #include "opt_uvmhist.h"
     77 #include "opt_readahead.h"
     78 
     79 #include <sys/param.h>
     80 #include <sys/proc.h>
     81 #include <sys/systm.h>
     82 #include <sys/kernel.h>
     83 #include <sys/pool.h>
     84 #include <sys/buf.h>
     85 
     86 #include <uvm/uvm.h>
     87 #include <uvm/uvm_pdpolicy.h>
     88 
     89 /*
     90  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     91  * in a pass thru the inactive list when swap is full.  the value should be
     92  * "small"... if it's too large we'll cycle the active pages thru the inactive
     93  * queue too quickly to for them to be referenced and avoid being freed.
     94  */
     95 
     96 #define UVMPD_NUMDIRTYREACTS 16
     97 
     98 
     99 /*
    100  * local prototypes
    101  */
    102 
    103 static void	uvmpd_scan(void);
    104 static void	uvmpd_scan_queue(void);
    105 static void	uvmpd_tune(void);
    106 
    107 /*
    108  * XXX hack to avoid hangs when large processes fork.
    109  */
    110 int uvm_extrapages;
    111 
    112 /*
    113  * uvm_wait: wait (sleep) for the page daemon to free some pages
    114  *
    115  * => should be called with all locks released
    116  * => should _not_ be called by the page daemon (to avoid deadlock)
    117  */
    118 
    119 void
    120 uvm_wait(const char *wmsg)
    121 {
    122 	int timo = 0;
    123 	int s = splbio();
    124 
    125 	/*
    126 	 * check for page daemon going to sleep (waiting for itself)
    127 	 */
    128 
    129 	if (curproc == uvm.pagedaemon_proc && uvmexp.paging == 0) {
    130 		/*
    131 		 * now we have a problem: the pagedaemon wants to go to
    132 		 * sleep until it frees more memory.   but how can it
    133 		 * free more memory if it is asleep?  that is a deadlock.
    134 		 * we have two options:
    135 		 *  [1] panic now
    136 		 *  [2] put a timeout on the sleep, thus causing the
    137 		 *      pagedaemon to only pause (rather than sleep forever)
    138 		 *
    139 		 * note that option [2] will only help us if we get lucky
    140 		 * and some other process on the system breaks the deadlock
    141 		 * by exiting or freeing memory (thus allowing the pagedaemon
    142 		 * to continue).  for now we panic if DEBUG is defined,
    143 		 * otherwise we hope for the best with option [2] (better
    144 		 * yet, this should never happen in the first place!).
    145 		 */
    146 
    147 		printf("pagedaemon: deadlock detected!\n");
    148 		timo = hz >> 3;		/* set timeout */
    149 #if defined(DEBUG)
    150 		/* DEBUG: panic so we can debug it */
    151 		panic("pagedaemon deadlock");
    152 #endif
    153 	}
    154 
    155 	simple_lock(&uvm.pagedaemon_lock);
    156 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    157 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
    158 	    timo);
    159 
    160 	splx(s);
    161 }
    162 
    163 /*
    164  * uvm_kick_pdaemon: perform checks to determine if we need to
    165  * give the pagedaemon a nudge, and do so if necessary.
    166  */
    167 
    168 void
    169 uvm_kick_pdaemon(void)
    170 {
    171 
    172 	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
    173 	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
    174 	     uvmpdpol_needsscan_p())) {
    175 		wakeup(&uvm.pagedaemon);
    176 	}
    177 }
    178 
    179 /*
    180  * uvmpd_tune: tune paging parameters
    181  *
    182  * => called when ever memory is added (or removed?) to the system
    183  * => caller must call with page queues locked
    184  */
    185 
    186 static void
    187 uvmpd_tune(void)
    188 {
    189 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    190 
    191 	uvmexp.freemin = uvmexp.npages / 20;
    192 
    193 	/* between 16k and 256k */
    194 	/* XXX:  what are these values good for? */
    195 	uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
    196 	uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
    197 
    198 	/* Make sure there's always a user page free. */
    199 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
    200 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
    201 
    202 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
    203 	if (uvmexp.freetarg <= uvmexp.freemin)
    204 		uvmexp.freetarg = uvmexp.freemin + 1;
    205 
    206 	uvmexp.freetarg += uvm_extrapages;
    207 	uvm_extrapages = 0;
    208 
    209 	uvmexp.wiredmax = uvmexp.npages / 3;
    210 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    211 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    212 }
    213 
    214 /*
    215  * uvm_pageout: the main loop for the pagedaemon
    216  */
    217 
    218 void
    219 uvm_pageout(void *arg)
    220 {
    221 	int bufcnt, npages = 0;
    222 	int extrapages = 0;
    223 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    224 
    225 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    226 
    227 	/*
    228 	 * ensure correct priority and set paging parameters...
    229 	 */
    230 
    231 	uvm.pagedaemon_proc = curproc;
    232 	uvm_lock_pageq();
    233 	npages = uvmexp.npages;
    234 	uvmpd_tune();
    235 	uvm_unlock_pageq();
    236 
    237 	/*
    238 	 * main loop
    239 	 */
    240 
    241 	for (;;) {
    242 		simple_lock(&uvm.pagedaemon_lock);
    243 
    244 		UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    245 		UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    246 		    &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
    247 		uvmexp.pdwoke++;
    248 		UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    249 
    250 		/*
    251 		 * now lock page queues and recompute inactive count
    252 		 */
    253 
    254 		uvm_lock_pageq();
    255 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
    256 			npages = uvmexp.npages;
    257 			extrapages = uvm_extrapages;
    258 			uvmpd_tune();
    259 		}
    260 
    261 		uvmpdpol_tune();
    262 
    263 		/*
    264 		 * Estimate a hint.  Note that bufmem are returned to
    265 		 * system only when entire pool page is empty.
    266 		 */
    267 		bufcnt = uvmexp.freetarg - uvmexp.free;
    268 		if (bufcnt < 0)
    269 			bufcnt = 0;
    270 
    271 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d",
    272 		    uvmexp.free, uvmexp.freetarg, 0,0);
    273 
    274 		/*
    275 		 * scan if needed
    276 		 */
    277 
    278 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
    279 		    uvmpdpol_needsscan_p()) {
    280 			uvmpd_scan();
    281 		}
    282 
    283 		/*
    284 		 * if there's any free memory to be had,
    285 		 * wake up any waiters.
    286 		 */
    287 
    288 		if (uvmexp.free > uvmexp.reserve_kernel ||
    289 		    uvmexp.paging == 0) {
    290 			wakeup(&uvmexp.free);
    291 		}
    292 
    293 		/*
    294 		 * scan done.  unlock page queues (the only lock we are holding)
    295 		 */
    296 
    297 		uvm_unlock_pageq();
    298 
    299 		buf_drain(bufcnt << PAGE_SHIFT);
    300 
    301 		/*
    302 		 * drain pool resources now that we're not holding any locks
    303 		 */
    304 
    305 		pool_drain(0);
    306 
    307 		/*
    308 		 * free any cached u-areas we don't need
    309 		 */
    310 		uvm_uarea_drain(TRUE);
    311 
    312 	}
    313 	/*NOTREACHED*/
    314 }
    315 
    316 
    317 /*
    318  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    319  */
    320 
    321 void
    322 uvm_aiodone_worker(struct work *wk, void *dummy)
    323 {
    324 	int s, free;
    325 	struct buf *bp = (void *)wk;
    326 
    327 	KASSERT(&bp->b_work == wk);
    328 
    329 	/*
    330 	 * process an i/o that's done.
    331 	 */
    332 
    333 	free = uvmexp.free;
    334 	(*bp->b_iodone)(bp);
    335 	if (free <= uvmexp.reserve_kernel) {
    336 		s = uvm_lock_fpageq();
    337 		wakeup(&uvm.pagedaemon);
    338 		uvm_unlock_fpageq(s);
    339 	} else {
    340 		simple_lock(&uvm.pagedaemon_lock);
    341 		wakeup(&uvmexp.free);
    342 		simple_unlock(&uvm.pagedaemon_lock);
    343 	}
    344 }
    345 
    346 /*
    347  * uvmpd_trylockowner: trylock the page's owner.
    348  *
    349  * => called with pageq locked.
    350  * => resolve orphaned O->A loaned page.
    351  * => return the locked simplelock on success.  otherwise, return NULL.
    352  */
    353 
    354 struct simplelock *
    355 uvmpd_trylockowner(struct vm_page *pg)
    356 {
    357 	struct uvm_object *uobj = pg->uobject;
    358 	struct simplelock *slock;
    359 
    360 	UVM_LOCK_ASSERT_PAGEQ();
    361 	if (uobj != NULL) {
    362 		slock = &uobj->vmobjlock;
    363 	} else {
    364 		struct vm_anon *anon = pg->uanon;
    365 
    366 		KASSERT(anon != NULL);
    367 		slock = &anon->an_lock;
    368 	}
    369 
    370 	if (!simple_lock_try(slock)) {
    371 		return NULL;
    372 	}
    373 
    374 	if (uobj == NULL) {
    375 
    376 		/*
    377 		 * set PQ_ANON if it isn't set already.
    378 		 */
    379 
    380 		if ((pg->pqflags & PQ_ANON) == 0) {
    381 			KASSERT(pg->loan_count > 0);
    382 			pg->loan_count--;
    383 			pg->pqflags |= PQ_ANON;
    384 			/* anon now owns it */
    385 		}
    386 	}
    387 
    388 	return slock;
    389 }
    390 
    391 #if defined(VMSWAP)
    392 struct swapcluster {
    393 	int swc_slot;
    394 	int swc_nallocated;
    395 	int swc_nused;
    396 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    397 };
    398 
    399 static void
    400 swapcluster_init(struct swapcluster *swc)
    401 {
    402 
    403 	swc->swc_slot = 0;
    404 }
    405 
    406 static int
    407 swapcluster_allocslots(struct swapcluster *swc)
    408 {
    409 	int slot;
    410 	int npages;
    411 
    412 	if (swc->swc_slot != 0) {
    413 		return 0;
    414 	}
    415 
    416 	/* Even with strange MAXPHYS, the shift
    417 	   implicitly rounds down to a page. */
    418 	npages = MAXPHYS >> PAGE_SHIFT;
    419 	slot = uvm_swap_alloc(&npages, TRUE);
    420 	if (slot == 0) {
    421 		return ENOMEM;
    422 	}
    423 	swc->swc_slot = slot;
    424 	swc->swc_nallocated = npages;
    425 	swc->swc_nused = 0;
    426 
    427 	return 0;
    428 }
    429 
    430 static int
    431 swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    432 {
    433 	int slot;
    434 	struct uvm_object *uobj;
    435 
    436 	KASSERT(swc->swc_slot != 0);
    437 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    438 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    439 
    440 	slot = swc->swc_slot + swc->swc_nused;
    441 	uobj = pg->uobject;
    442 	if (uobj == NULL) {
    443 		LOCK_ASSERT(simple_lock_held(&pg->uanon->an_lock));
    444 		pg->uanon->an_swslot = slot;
    445 	} else {
    446 		int result;
    447 
    448 		LOCK_ASSERT(simple_lock_held(&uobj->vmobjlock));
    449 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    450 		if (result == -1) {
    451 			return ENOMEM;
    452 		}
    453 	}
    454 	swc->swc_pages[swc->swc_nused] = pg;
    455 	swc->swc_nused++;
    456 
    457 	return 0;
    458 }
    459 
    460 static void
    461 swapcluster_flush(struct swapcluster *swc, bool now)
    462 {
    463 	int slot;
    464 	int nused;
    465 	int nallocated;
    466 	int error;
    467 
    468 	if (swc->swc_slot == 0) {
    469 		return;
    470 	}
    471 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    472 
    473 	slot = swc->swc_slot;
    474 	nused = swc->swc_nused;
    475 	nallocated = swc->swc_nallocated;
    476 
    477 	/*
    478 	 * if this is the final pageout we could have a few
    479 	 * unused swap blocks.  if so, free them now.
    480 	 */
    481 
    482 	if (nused < nallocated) {
    483 		if (!now) {
    484 			return;
    485 		}
    486 		uvm_swap_free(slot + nused, nallocated - nused);
    487 	}
    488 
    489 	/*
    490 	 * now start the pageout.
    491 	 */
    492 
    493 	uvmexp.pdpageouts++;
    494 	error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    495 	KASSERT(error == 0);
    496 
    497 	/*
    498 	 * zero swslot to indicate that we are
    499 	 * no longer building a swap-backed cluster.
    500 	 */
    501 
    502 	swc->swc_slot = 0;
    503 }
    504 
    505 /*
    506  * uvmpd_dropswap: free any swap allocated to this page.
    507  *
    508  * => called with owner locked.
    509  * => return TRUE if a page had an associated slot.
    510  */
    511 
    512 static bool
    513 uvmpd_dropswap(struct vm_page *pg)
    514 {
    515 	bool result = FALSE;
    516 	struct vm_anon *anon = pg->uanon;
    517 
    518 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    519 		uvm_swap_free(anon->an_swslot, 1);
    520 		anon->an_swslot = 0;
    521 		pg->flags &= ~PG_CLEAN;
    522 		result = TRUE;
    523 	} else if (pg->pqflags & PQ_AOBJ) {
    524 		int slot = uao_set_swslot(pg->uobject,
    525 		    pg->offset >> PAGE_SHIFT, 0);
    526 		if (slot) {
    527 			uvm_swap_free(slot, 1);
    528 			pg->flags &= ~PG_CLEAN;
    529 			result = TRUE;
    530 		}
    531 	}
    532 
    533 	return result;
    534 }
    535 
    536 /*
    537  * uvmpd_trydropswap: try to free any swap allocated to this page.
    538  *
    539  * => return TRUE if a slot is successfully freed.
    540  */
    541 
    542 bool
    543 uvmpd_trydropswap(struct vm_page *pg)
    544 {
    545 	struct simplelock *slock;
    546 	bool result;
    547 
    548 	if ((pg->flags & PG_BUSY) != 0) {
    549 		return FALSE;
    550 	}
    551 
    552 	/*
    553 	 * lock the page's owner.
    554 	 */
    555 
    556 	slock = uvmpd_trylockowner(pg);
    557 	if (slock == NULL) {
    558 		return FALSE;
    559 	}
    560 
    561 	/*
    562 	 * skip this page if it's busy.
    563 	 */
    564 
    565 	if ((pg->flags & PG_BUSY) != 0) {
    566 		simple_unlock(slock);
    567 		return FALSE;
    568 	}
    569 
    570 	result = uvmpd_dropswap(pg);
    571 
    572 	simple_unlock(slock);
    573 
    574 	return result;
    575 }
    576 
    577 #endif /* defined(VMSWAP) */
    578 
    579 /*
    580  * uvmpd_scan_queue: scan an replace candidate list for pages
    581  * to clean or free.
    582  *
    583  * => called with page queues locked
    584  * => we work on meeting our free target by converting inactive pages
    585  *    into free pages.
    586  * => we handle the building of swap-backed clusters
    587  */
    588 
    589 static void
    590 uvmpd_scan_queue(void)
    591 {
    592 	struct vm_page *p;
    593 	struct uvm_object *uobj;
    594 	struct vm_anon *anon;
    595 #if defined(VMSWAP)
    596 	struct swapcluster swc;
    597 #endif /* defined(VMSWAP) */
    598 	int dirtyreacts;
    599 	struct simplelock *slock;
    600 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    601 
    602 	/*
    603 	 * swslot is non-zero if we are building a swap cluster.  we want
    604 	 * to stay in the loop while we have a page to scan or we have
    605 	 * a swap-cluster to build.
    606 	 */
    607 
    608 #if defined(VMSWAP)
    609 	swapcluster_init(&swc);
    610 #endif /* defined(VMSWAP) */
    611 
    612 	dirtyreacts = 0;
    613 	uvmpdpol_scaninit();
    614 
    615 	while (/* CONSTCOND */ 1) {
    616 
    617 		/*
    618 		 * see if we've met the free target.
    619 		 */
    620 
    621 		if (uvmexp.free + uvmexp.paging >= uvmexp.freetarg << 2 ||
    622 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    623 			UVMHIST_LOG(pdhist,"  met free target: "
    624 				    "exit loop", 0, 0, 0, 0);
    625 			break;
    626 		}
    627 
    628 		p = uvmpdpol_selectvictim();
    629 		if (p == NULL) {
    630 			break;
    631 		}
    632 		KASSERT(uvmpdpol_pageisqueued_p(p));
    633 		KASSERT(p->wire_count == 0);
    634 
    635 		/*
    636 		 * we are below target and have a new page to consider.
    637 		 */
    638 
    639 		anon = p->uanon;
    640 		uobj = p->uobject;
    641 
    642 		/*
    643 		 * first we attempt to lock the object that this page
    644 		 * belongs to.  if our attempt fails we skip on to
    645 		 * the next page (no harm done).  it is important to
    646 		 * "try" locking the object as we are locking in the
    647 		 * wrong order (pageq -> object) and we don't want to
    648 		 * deadlock.
    649 		 *
    650 		 * the only time we expect to see an ownerless page
    651 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    652 		 * anon has loaned a page from a uvm_object and the
    653 		 * uvm_object has dropped the ownership.  in that
    654 		 * case, the anon can "take over" the loaned page
    655 		 * and make it its own.
    656 		 */
    657 
    658 		slock = uvmpd_trylockowner(p);
    659 		if (slock == NULL) {
    660 			continue;
    661 		}
    662 		if (p->flags & PG_BUSY) {
    663 			simple_unlock(slock);
    664 			uvmexp.pdbusy++;
    665 			continue;
    666 		}
    667 
    668 		/* does the page belong to an object? */
    669 		if (uobj != NULL) {
    670 			uvmexp.pdobscan++;
    671 		} else {
    672 #if defined(VMSWAP)
    673 			KASSERT(anon != NULL);
    674 			uvmexp.pdanscan++;
    675 #else /* defined(VMSWAP) */
    676 			panic("%s: anon", __func__);
    677 #endif /* defined(VMSWAP) */
    678 		}
    679 
    680 
    681 		/*
    682 		 * we now have the object and the page queues locked.
    683 		 * if the page is not swap-backed, call the object's
    684 		 * pager to flush and free the page.
    685 		 */
    686 
    687 #if defined(READAHEAD_STATS)
    688 		if ((p->pqflags & PQ_READAHEAD) != 0) {
    689 			p->pqflags &= ~PQ_READAHEAD;
    690 			uvm_ra_miss.ev_count++;
    691 		}
    692 #endif /* defined(READAHEAD_STATS) */
    693 
    694 		if ((p->pqflags & PQ_SWAPBACKED) == 0) {
    695 			KASSERT(uobj != NULL);
    696 			uvm_unlock_pageq();
    697 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
    698 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    699 			uvm_lock_pageq();
    700 			continue;
    701 		}
    702 
    703 		/*
    704 		 * the page is swap-backed.  remove all the permissions
    705 		 * from the page so we can sync the modified info
    706 		 * without any race conditions.  if the page is clean
    707 		 * we can free it now and continue.
    708 		 */
    709 
    710 		pmap_page_protect(p, VM_PROT_NONE);
    711 		if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
    712 			p->flags &= ~(PG_CLEAN);
    713 		}
    714 		if (p->flags & PG_CLEAN) {
    715 			int slot;
    716 			int pageidx;
    717 
    718 			pageidx = p->offset >> PAGE_SHIFT;
    719 			uvm_pagefree(p);
    720 			uvmexp.pdfreed++;
    721 
    722 			/*
    723 			 * for anons, we need to remove the page
    724 			 * from the anon ourselves.  for aobjs,
    725 			 * pagefree did that for us.
    726 			 */
    727 
    728 			if (anon) {
    729 				KASSERT(anon->an_swslot != 0);
    730 				anon->an_page = NULL;
    731 				slot = anon->an_swslot;
    732 			} else {
    733 				slot = uao_find_swslot(uobj, pageidx);
    734 			}
    735 			simple_unlock(slock);
    736 
    737 			if (slot > 0) {
    738 				/* this page is now only in swap. */
    739 				simple_lock(&uvm.swap_data_lock);
    740 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
    741 				uvmexp.swpgonly++;
    742 				simple_unlock(&uvm.swap_data_lock);
    743 			}
    744 			continue;
    745 		}
    746 
    747 #if defined(VMSWAP)
    748 		/*
    749 		 * this page is dirty, skip it if we'll have met our
    750 		 * free target when all the current pageouts complete.
    751 		 */
    752 
    753 		if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
    754 			simple_unlock(slock);
    755 			continue;
    756 		}
    757 
    758 		/*
    759 		 * free any swap space allocated to the page since
    760 		 * we'll have to write it again with its new data.
    761 		 */
    762 
    763 		uvmpd_dropswap(p);
    764 
    765 		/*
    766 		 * if all pages in swap are only in swap,
    767 		 * the swap space is full and we can't page out
    768 		 * any more swap-backed pages.  reactivate this page
    769 		 * so that we eventually cycle all pages through
    770 		 * the inactive queue.
    771 		 */
    772 
    773 		if (uvm_swapisfull()) {
    774 			dirtyreacts++;
    775 			uvm_pageactivate(p);
    776 			simple_unlock(slock);
    777 			continue;
    778 		}
    779 
    780 		/*
    781 		 * start new swap pageout cluster (if necessary).
    782 		 */
    783 
    784 		if (swapcluster_allocslots(&swc)) {
    785 			simple_unlock(slock);
    786 			dirtyreacts++; /* XXX */
    787 			continue;
    788 		}
    789 
    790 		/*
    791 		 * at this point, we're definitely going reuse this
    792 		 * page.  mark the page busy and delayed-free.
    793 		 * we should remove the page from the page queues
    794 		 * so we don't ever look at it again.
    795 		 * adjust counters and such.
    796 		 */
    797 
    798 		p->flags |= PG_BUSY;
    799 		UVM_PAGE_OWN(p, "scan_queue");
    800 
    801 		p->flags |= PG_PAGEOUT;
    802 		uvmexp.paging++;
    803 		uvm_pagedequeue(p);
    804 
    805 		uvmexp.pgswapout++;
    806 		uvm_unlock_pageq();
    807 
    808 		/*
    809 		 * add the new page to the cluster.
    810 		 */
    811 
    812 		if (swapcluster_add(&swc, p)) {
    813 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
    814 			UVM_PAGE_OWN(p, NULL);
    815 			uvm_lock_pageq();
    816 			uvmexp.paging--;
    817 			dirtyreacts++;
    818 			uvm_pageactivate(p);
    819 			simple_unlock(slock);
    820 			continue;
    821 		}
    822 		simple_unlock(slock);
    823 
    824 		swapcluster_flush(&swc, FALSE);
    825 		uvm_lock_pageq();
    826 
    827 		/*
    828 		 * the pageout is in progress.  bump counters and set up
    829 		 * for the next loop.
    830 		 */
    831 
    832 		uvmexp.pdpending++;
    833 
    834 #else /* defined(VMSWAP) */
    835 		uvm_pageactivate(p);
    836 		simple_unlock(slock);
    837 #endif /* defined(VMSWAP) */
    838 	}
    839 
    840 #if defined(VMSWAP)
    841 	uvm_unlock_pageq();
    842 	swapcluster_flush(&swc, TRUE);
    843 	uvm_lock_pageq();
    844 #endif /* defined(VMSWAP) */
    845 }
    846 
    847 /*
    848  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    849  *
    850  * => called with pageq's locked
    851  */
    852 
    853 static void
    854 uvmpd_scan(void)
    855 {
    856 	int swap_shortage, pages_freed;
    857 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    858 
    859 	uvmexp.pdrevs++;
    860 
    861 #ifndef __SWAP_BROKEN
    862 
    863 	/*
    864 	 * swap out some processes if we are below our free target.
    865 	 * we need to unlock the page queues for this.
    866 	 */
    867 
    868 	if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0) {
    869 		uvmexp.pdswout++;
    870 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout",
    871 		    uvmexp.free, uvmexp.freetarg, 0, 0);
    872 		uvm_unlock_pageq();
    873 		uvm_swapout_threads();
    874 		uvm_lock_pageq();
    875 
    876 	}
    877 #endif
    878 
    879 	/*
    880 	 * now we want to work on meeting our targets.   first we work on our
    881 	 * free target by converting inactive pages into free pages.  then
    882 	 * we work on meeting our inactive target by converting active pages
    883 	 * to inactive ones.
    884 	 */
    885 
    886 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    887 
    888 	pages_freed = uvmexp.pdfreed;
    889 	uvmpd_scan_queue();
    890 	pages_freed = uvmexp.pdfreed - pages_freed;
    891 
    892 	/*
    893 	 * detect if we're not going to be able to page anything out
    894 	 * until we free some swap resources from active pages.
    895 	 */
    896 
    897 	swap_shortage = 0;
    898 	if (uvmexp.free < uvmexp.freetarg &&
    899 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
    900 	    !uvm_swapisfull() &&
    901 	    pages_freed == 0) {
    902 		swap_shortage = uvmexp.freetarg - uvmexp.free;
    903 	}
    904 
    905 	uvmpdpol_balancequeue(swap_shortage);
    906 }
    907 
    908 /*
    909  * uvm_reclaimable: decide whether to wait for pagedaemon.
    910  *
    911  * => return TRUE if it seems to be worth to do uvm_wait.
    912  *
    913  * XXX should be tunable.
    914  * XXX should consider pools, etc?
    915  */
    916 
    917 bool
    918 uvm_reclaimable(void)
    919 {
    920 	int filepages;
    921 	int active, inactive;
    922 
    923 	/*
    924 	 * if swap is not full, no problem.
    925 	 */
    926 
    927 	if (!uvm_swapisfull()) {
    928 		return TRUE;
    929 	}
    930 
    931 	/*
    932 	 * file-backed pages can be reclaimed even when swap is full.
    933 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
    934 	 *
    935 	 * XXX assume the worst case, ie. all wired pages are file-backed.
    936 	 *
    937 	 * XXX should consider about other reclaimable memory.
    938 	 * XXX ie. pools, traditional buffer cache.
    939 	 */
    940 
    941 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
    942 	uvm_estimatepageable(&active, &inactive);
    943 	if (filepages >= MIN((active + inactive) >> 4,
    944 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
    945 		return TRUE;
    946 	}
    947 
    948 	/*
    949 	 * kill the process, fail allocation, etc..
    950 	 */
    951 
    952 	return FALSE;
    953 }
    954 
    955 void
    956 uvm_estimatepageable(int *active, int *inactive)
    957 {
    958 
    959 	uvmpdpol_estimatepageable(active, inactive);
    960 }
    961