Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.12.2.2
      1 /*	$NetBSD: uvm_pdaemon.c,v 1.12.2.2 1999/02/25 04:33:55 chs Exp $	*/
      2 
      3 /*
      4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  */
      7 /*
      8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      9  * Copyright (c) 1991, 1993, The Regents of the University of California.
     10  *
     11  * All rights reserved.
     12  *
     13  * This code is derived from software contributed to Berkeley by
     14  * The Mach Operating System project at Carnegie-Mellon University.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by Charles D. Cranor,
     27  *      Washington University, the University of California, Berkeley and
     28  *      its contributors.
     29  * 4. Neither the name of the University nor the names of its contributors
     30  *    may be used to endorse or promote products derived from this software
     31  *    without specific prior written permission.
     32  *
     33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     43  * SUCH DAMAGE.
     44  *
     45  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     46  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     47  *
     48  *
     49  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     50  * All rights reserved.
     51  *
     52  * Permission to use, copy, modify and distribute this software and
     53  * its documentation is hereby granted, provided that both the copyright
     54  * notice and this permission notice appear in all copies of the
     55  * software, derivative works or modified versions, and any portions
     56  * thereof, and that both notices appear in supporting documentation.
     57  *
     58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     61  *
     62  * Carnegie Mellon requests users of this software to return to
     63  *
     64  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     65  *  School of Computer Science
     66  *  Carnegie Mellon University
     67  *  Pittsburgh PA 15213-3890
     68  *
     69  * any improvements or extensions that they make and grant Carnegie the
     70  * rights to redistribute these changes.
     71  */
     72 
     73 #include "opt_uvmhist.h"
     74 
     75 /*
     76  * uvm_pdaemon.c: the page daemon
     77  */
     78 
     79 #include <sys/param.h>
     80 #include <sys/proc.h>
     81 #include <sys/systm.h>
     82 #include <sys/kernel.h>
     83 #include <sys/pool.h>
     84 
     85 #include <vm/vm.h>
     86 #include <vm/vm_page.h>
     87 #include <vm/vm_kern.h>
     88 
     89 #include <uvm/uvm.h>
     90 
     91 /*
     92  * local prototypes
     93  */
     94 
     95 static void		uvmpd_scan __P((void));
     96 static boolean_t	uvmpd_scan_inactive __P((struct pglist *));
     97 static void		uvmpd_tune __P((void));
     98 
     99 
    100 /*
    101  * uvm_wait: wait (sleep) for the page daemon to free some pages
    102  *
    103  * => should be called with all locks released
    104  * => should _not_ be called by the page daemon (to avoid deadlock)
    105  */
    106 
    107 void
    108 uvm_wait(wmsg)
    109 	char *wmsg;
    110 {
    111 	int timo = 0;
    112 	int s = splbio();
    113 
    114 	/*
    115 	 * check for page daemon going to sleep (waiting for itself)
    116 	 */
    117 
    118 	if (curproc == uvm.pagedaemon_proc) {
    119 		/*
    120 		 * now we have a problem: the pagedaemon wants to go to
    121 		 * sleep until it frees more memory.   but how can it
    122 		 * free more memory if it is asleep?  that is a deadlock.
    123 		 * we have two options:
    124 		 *  [1] panic now
    125 		 *  [2] put a timeout on the sleep, thus causing the
    126 		 *      pagedaemon to only pause (rather than sleep forever)
    127 		 *
    128 		 * note that option [2] will only help us if we get lucky
    129 		 * and some other process on the system breaks the deadlock
    130 		 * by exiting or freeing memory (thus allowing the pagedaemon
    131 		 * to continue).  for now we panic if DEBUG is defined,
    132 		 * otherwise we hope for the best with option [2] (better
    133 		 * yet, this should never happen in the first place!).
    134 		 */
    135 
    136 		printf("pagedaemon: deadlock detected!\n");
    137 		timo = hz >> 3;		/* set timeout */
    138 #if defined(DEBUG)
    139 		/* DEBUG: panic so we can debug it */
    140 		panic("pagedaemon deadlock");
    141 #endif
    142 	}
    143 
    144 	simple_lock(&uvm.pagedaemon_lock);
    145 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    146 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
    147 	    timo);
    148 
    149 	splx(s);
    150 }
    151 
    152 
    153 /*
    154  * uvmpd_tune: tune paging parameters
    155  *
    156  * => called when ever memory is added (or removed?) to the system
    157  * => caller must call with page queues locked
    158  */
    159 
    160 static void
    161 uvmpd_tune()
    162 {
    163 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    164 
    165 	uvmexp.freemin = uvmexp.npages / 20;
    166 
    167 	/* between 16k and 256k */
    168 	/* XXX:  what are these values good for? */
    169 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
    170 	uvmexp.freemin = min(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
    171 
    172 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
    173 	if (uvmexp.freetarg <= uvmexp.freemin)
    174 		uvmexp.freetarg = uvmexp.freemin + 1;
    175 
    176 	/* uvmexp.inactarg: computed in main daemon loop */
    177 
    178 	uvmexp.wiredmax = uvmexp.npages / 3;
    179 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    180 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    181 }
    182 
    183 /*
    184  * uvm_pageout: the main loop for the pagedaemon
    185  */
    186 
    187 void
    188 uvm_pageout()
    189 {
    190 	int npages = 0;
    191 	int s;
    192 	struct uvm_aiodesc *aio, *nextaio;
    193 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    194 
    195 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    196 
    197 	/*
    198 	 * ensure correct priority and set paging parameters...
    199 	 */
    200 
    201 	uvm.pagedaemon_proc = curproc;
    202 	(void) spl0();
    203 	uvm_lock_pageq();
    204 	npages = uvmexp.npages;
    205 	uvmpd_tune();
    206 	uvm_unlock_pageq();
    207 
    208 	/*
    209 	 * main loop
    210 	 */
    211 	while (TRUE) {
    212 
    213 		/*
    214 		 * carefully attempt to go to sleep (without losing "wakeups"!).
    215 		 * we need splbio because we want to make sure the aio_done list
    216 		 * is totally empty before we go to sleep.
    217 		 */
    218 
    219 		s = splbio();
    220 		simple_lock(&uvm.pagedaemon_lock);
    221 
    222 		/*
    223 		 * if we've got done aio's, then bypass the sleep
    224 		 */
    225 
    226 		if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
    227 			UVMHIST_LOG(maphist,"  <<SLEEPING>>",0,0,0,0);
    228 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    229 			    &uvm.pagedaemon_lock, FALSE, "daemon_slp", 0);
    230 			uvmexp.pdwoke++;
    231 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    232 
    233 			/* relock pagedaemon_lock, still at splbio */
    234 			simple_lock(&uvm.pagedaemon_lock);
    235 		}
    236 
    237 		/*
    238 		 * check for done aio structures
    239 		 */
    240 
    241 		aio = TAILQ_FIRST(&uvm.aio_done);/* save current list (if any)*/
    242 		if (aio) {
    243 			TAILQ_INIT(&uvm.aio_done);	/* zero global list */
    244 		}
    245 
    246 		simple_unlock(&uvm.pagedaemon_lock);	/* unlock */
    247 		splx(s);				/* drop splbio */
    248 
    249 		/*
    250 		 * first clear out any pending aios (to free space in case we
    251 		 * want to pageout more stuff).
    252 		 */
    253 
    254 		for (/*null*/; aio != NULL ; aio = nextaio) {
    255 			/* XXX uvmexp.paging needs spinlock */
    256 			uvmexp.paging -= aio->npages;
    257 			nextaio = TAILQ_NEXT(aio, aioq);
    258 			aio->aiodone(aio);
    259 		}
    260 
    261 		/* Next, drain pool resources */
    262 		pool_drain(0);
    263 
    264 		/*
    265 		 * now lock page queues and recompute inactive count
    266 		 */
    267 		uvm_lock_pageq();
    268 
    269 		if (npages != uvmexp.npages) {	/* check for new pages? */
    270 			npages = uvmexp.npages;
    271 			uvmpd_tune();
    272 		}
    273 
    274 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
    275 		if (uvmexp.inactarg <= uvmexp.freetarg)
    276 			uvmexp.inactarg = uvmexp.freetarg + 1;
    277 
    278 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
    279 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
    280 		    uvmexp.inactarg);
    281 
    282 		/*
    283 		 * scan if needed
    284 		 */
    285 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
    286 		    uvmexp.inactive < uvmexp.inactarg) {
    287 			uvmpd_scan();
    288 		}
    289 
    290 		/*
    291 		 * if there's any free memory to be had,
    292 		 * wake up any waiters.
    293 		 */
    294 		wakeup(&uvmexp.free);
    295 
    296 		/*
    297 		 * done scan.  unlock page queues (the only lock we are holding)
    298 		 */
    299 		uvm_unlock_pageq();
    300 	}
    301 	/*NOTREACHED*/
    302 }
    303 
    304 /*
    305  * uvmpd_scan_inactive: the first loop of uvmpd_scan broken out into
    306  * 	its own function for ease of reading.
    307  *
    308  * => called with page queues locked
    309  * => we work on meeting our free target by converting inactive pages
    310  *    into free pages.
    311  * => we handle the building of swap-backed clusters
    312  * => we return TRUE if we are exiting because we met our target
    313  */
    314 
    315 static boolean_t
    316 uvmpd_scan_inactive(pglst)
    317 	struct pglist *pglst;
    318 {
    319 	boolean_t retval = FALSE;	/* assume we haven't hit target */
    320 	int s, free, result;
    321 	struct vm_page *p, *nextpg;
    322 	struct uvm_object *uobj;
    323 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
    324 	int npages;
    325 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
    326 	int swnpages, swcpages;				/* XXX: see below */
    327 	int swslot, oldslot;
    328 	struct vm_anon *anon;
    329 	boolean_t swap_backed;
    330 	vaddr_t start;
    331 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
    332 
    333 	/*
    334 	 * note: we currently keep swap-backed pages on a seperate inactive
    335 	 * list from object-backed pages.   however, merging the two lists
    336 	 * back together again hasn't been ruled out.   thus, we keep our
    337 	 * swap cluster in "swpps" rather than in pps (allows us to mix
    338 	 * clustering types in the event of a mixed inactive queue).
    339 	 */
    340 
    341 	/*
    342 	 * swslot is non-zero if we are building a swap cluster.  we want
    343 	 * to stay in the loop while we have a page to scan or we have
    344 	 * a swap-cluster to build.
    345 	 */
    346 	swslot = 0;
    347 	swnpages = swcpages = 0;
    348 	free = 0;
    349 
    350 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
    351 
    352 		/*
    353 		 * note that p can be NULL iff we have traversed the whole
    354 		 * list and need to do one final swap-backed clustered pageout.
    355 		 */
    356 		if (p) {
    357 			/*
    358 			 * update our copy of "free" and see if we've met
    359 			 * our target
    360 			 */
    361 			s = splimp();
    362 			uvm_lock_fpageq();
    363 			free = uvmexp.free;
    364 			uvm_unlock_fpageq();
    365 			splx(s);
    366 
    367 			if (free >= uvmexp.freetarg) {
    368 				UVMHIST_LOG(pdhist,"  met free target: "
    369 				    "exit loop", 0, 0, 0, 0);
    370 				retval = TRUE;		/* hit the target! */
    371 
    372 				if (swslot == 0)
    373 					/* exit now if no swap-i/o pending */
    374 					break;
    375 
    376 				/* set p to null to signal final swap i/o */
    377 				p = NULL;
    378 			}
    379 		}
    380 
    381 		uobj = NULL;	/* be safe and shut gcc up */
    382 		anon = NULL;	/* be safe and shut gcc up */
    383 
    384 		if (p) {	/* if (we have a new page to consider) */
    385 			/*
    386 			 * we are below target and have a new page to consider.
    387 			 */
    388 			uvmexp.pdscans++;
    389 			nextpg = TAILQ_NEXT(p, pageq);
    390 
    391 #if 1
    392 			/*
    393 			 * XXX
    394 			 * commented this out because the only way a page
    395 			 * can be referenced and still on the inactive queue
    396 			 * if it is "referenced" by the process of
    397 			 * paging it out.  we shouldn't count these
    398 			 * as real references, so this code must be a noop.
    399 			 *
    400 			 * XXX shouldn't this bit be in the *ACTIVE* queue
    401 			 * scanning code?
    402 			 */
    403 
    404 			/*
    405 			 * move referenced pages back to active queue and
    406 			 * skip to next page (unlikely to happen since
    407 			 * inactive pages shouldn't have any valid mappings
    408 			 * and we cleared reference before deactivating).
    409 			 */
    410 			if (pmap_is_referenced(PMAP_PGARG(p))) {
    411 				uvm_pageactivate(p);
    412 				uvmexp.pdreact++;
    413 				continue;
    414 			}
    415 #endif
    416 
    417 			/*
    418 			 * first we attempt to lock the object that this page
    419 			 * belongs to.  if our attempt fails we skip on to
    420 			 * the next page (no harm done).  it is important to
    421 			 * "try" locking the object as we are locking in the
    422 			 * wrong order (pageq -> object) and we don't want to
    423 			 * get deadlocked.
    424 			 *
    425 			 * the only time we exepct to see an ownerless page
    426 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
    427 			 * anon has loaned a page from a uvm_object and the
    428 			 * uvm_object has dropped the ownership.  in that
    429 			 * case, the anon can "take over" the loaned page
    430 			 * and make it its own.
    431 			 */
    432 
    433 			/* is page part of an anon or ownerless ? */
    434 			if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
    435 
    436 				anon = p->uanon;
    437 
    438 #ifdef DIAGNOSTIC
    439 				/* to be on inactive q, page must be part
    440 				 * of _something_ */
    441 				if (anon == NULL)
    442 					panic("pagedaemon: page with no anon "
    443 					    "or object detected - loop 1");
    444 #endif
    445 
    446 				if (!simple_lock_try(&anon->an_lock))
    447 					/* lock failed, skip this page */
    448 					continue;
    449 
    450 				/*
    451 				 * if the page is ownerless, claim it in the
    452 				 * name of "anon"!
    453 				 */
    454 				if ((p->pqflags & PQ_ANON) == 0) {
    455 #ifdef DIAGNOSTIC
    456 					if (p->loan_count < 1)
    457 						panic("pagedaemon: non-loaned "
    458 						    "ownerless page detected -"
    459 						    " loop 1");
    460 #endif
    461 					p->loan_count--;
    462 					p->pqflags |= PQ_ANON;      /* anon now owns it */
    463 				}
    464 
    465 				if (p->flags & PG_BUSY) {
    466 					simple_unlock(&anon->an_lock);
    467 					uvmexp.pdbusy++;
    468 					/* someone else owns page, skip it */
    469 					continue;
    470 				}
    471 
    472 				uvmexp.pdanscan++;
    473 
    474 			} else {
    475 
    476 				uobj = p->uobject;
    477 
    478 				if (!simple_lock_try(&uobj->vmobjlock))
    479 					/* lock failed, skip this page */
    480 					continue;
    481 
    482 				if (p->flags & PG_BUSY) {
    483 					simple_unlock(&uobj->vmobjlock);
    484 					uvmexp.pdbusy++;
    485 					/* someone else owns page, skip it */
    486 					continue;
    487 				}
    488 
    489 				uvmexp.pdobscan++;
    490 			}
    491 
    492 			/*
    493 			 * we now have the object and the page queues locked.
    494 			 * the page is not busy.   if the page is clean we
    495 			 * can free it now and continue.
    496 			 */
    497 
    498 			if (p->flags & PG_CLEAN) {
    499 
    500 				if (p->pqflags & PQ_SWAPBACKED) {
    501 					/* this page now lives only in swap */
    502 					uvmexp.swpguniq++;
    503 				}
    504 
    505 				/* zap all mappings with pmap_page_protect... */
    506 				pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
    507 				uvm_pagefree(p);
    508 				uvmexp.pdfreed++;
    509 
    510 				if (anon) {
    511 #ifdef DIAGNOSTIC
    512 					/*
    513 					 * an anonymous page can only be clean
    514 					 * if it has valid backing store.
    515 					 */
    516 					if (anon->an_swslot == 0)
    517 						panic("pagedaemon: clean anon "
    518 						 "page without backing store?");
    519 #endif
    520 					/* remove from object */
    521 					anon->u.an_page = NULL;
    522 					simple_unlock(&anon->an_lock);
    523 				} else {
    524 					/* pagefree has already removed the
    525 					 * page from the object */
    526 					simple_unlock(&uobj->vmobjlock);
    527 				}
    528 				continue;
    529 			}
    530 
    531 			/*
    532 			 * this page is dirty, skip it if we'll have met our
    533 			 * free target when all the current pageouts complete.
    534 			 */
    535 
    536 			if (free + uvmexp.paging > uvmexp.freetarg << 2) {
    537 				if (anon) {
    538 					simple_unlock(&anon->an_lock);
    539 				} else {
    540 					simple_unlock(&uobj->vmobjlock);
    541 				}
    542 				continue;
    543 			}
    544 
    545 			/*
    546 			 * the page we are looking at is dirty.   we must
    547 			 * clean it before it can be freed.  to do this we
    548 			 * first mark the page busy so that no one else will
    549 			 * touch the page.   we write protect all the mappings
    550 			 * of the page so that no one touches it while it is
    551 			 * in I/O.
    552 			 */
    553 
    554 			swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
    555 			p->flags |= PG_BUSY;		/* now we own it */
    556 			UVM_PAGE_OWN(p, "scan_inactive");
    557 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_READ);
    558 			uvmexp.pgswapout++;
    559 
    560 			/*
    561 			 * for swap-backed pages we need to (re)allocate
    562 			 * swap space.
    563 			 */
    564 			if (swap_backed) {
    565 
    566 				/*
    567 				 * free old swap slot (if any)
    568 				 */
    569 				if (anon) {
    570 					if (anon->an_swslot) {
    571 						uvm_swap_free(anon->an_swslot,
    572 						    1);
    573 						anon->an_swslot = 0;
    574 					}
    575 				} else {
    576 					oldslot = uao_set_swslot(uobj,
    577 					    p->offset >> PAGE_SHIFT, 0);
    578 
    579 					if (oldslot)
    580 						uvm_swap_free(oldslot, 1);
    581 				}
    582 
    583 				/*
    584 				 * start new cluster (if necessary)
    585 				 */
    586 				if (swslot == 0) {
    587 					/* want this much */
    588 					swnpages = MAXBSIZE >> PAGE_SHIFT;
    589 
    590 					swslot = uvm_swap_alloc(&swnpages,
    591 					    TRUE);
    592 
    593 					if (swslot == 0) {
    594 						/* no swap?  give up! */
    595 						p->flags &= ~PG_BUSY;
    596 						UVM_PAGE_OWN(p, NULL);
    597 						if (anon)
    598 							simple_unlock(
    599 							    &anon->an_lock);
    600 						else
    601 							simple_unlock(
    602 							    &uobj->vmobjlock);
    603 						continue;
    604 					}
    605 					swcpages = 0;	/* cluster is empty */
    606 				}
    607 
    608 				/*
    609 				 * add block to cluster
    610 				 */
    611 				swpps[swcpages] = p;
    612 				uvmexp.pgswapout++;
    613 				if (anon)
    614 					anon->an_swslot = swslot + swcpages;
    615 				else
    616 					uao_set_swslot(uobj,
    617 					    p->offset >> PAGE_SHIFT,
    618 					    swslot + swcpages);
    619 				swcpages++;
    620 
    621 				/* done (swap-backed) */
    622 			}
    623 
    624 			/* end: if (p) ["if we have new page to consider"] */
    625 		} else {
    626 
    627 			/* if p == NULL we must be doing a last swap i/o */
    628 			swap_backed = TRUE;
    629 		}
    630 
    631 		/*
    632 		 * now consider doing the pageout.
    633 		 *
    634 		 * for swap-backed pages, we do the pageout if we have either
    635 		 * filled the cluster (in which case (swnpages == swcpages) or
    636 		 * run out of pages (p == NULL).
    637 		 *
    638 		 * for object pages, we always do the pageout.
    639 		 */
    640 		if (swap_backed) {
    641 
    642 			if (p) {	/* if we just added a page to cluster */
    643 				if (anon)
    644 					simple_unlock(&anon->an_lock);
    645 				else
    646 					simple_unlock(&uobj->vmobjlock);
    647 
    648 				/* cluster not full yet? */
    649 				if (swcpages < swnpages)
    650 					continue;
    651 			}
    652 
    653 			/* starting I/O now... set up for it */
    654 			npages = swcpages;
    655 			ppsp = swpps;
    656 			/* for swap-backed pages only */
    657 			start = (vaddr_t) swslot;
    658 
    659 			/* if this is final pageout we could have a few
    660 			 * extra swap blocks */
    661 			if (swcpages < swnpages) {
    662 				uvm_swap_free(swslot + swcpages,
    663 				    (swnpages - swcpages));
    664 			}
    665 
    666 		} else {
    667 
    668 			/* normal object pageout */
    669 			ppsp = pps;
    670 			npages = sizeof(pps) / sizeof(struct vm_page *);
    671 			/* not looked at because PGO_ALLPAGES is set */
    672 			start = 0;
    673 
    674 		}
    675 
    676 		/*
    677 		 * now do the pageout.
    678 		 *
    679 		 * for swap_backed pages we have already built the cluster.
    680 		 * for !swap_backed pages, uvm_pager_put will call the object's
    681 		 * "make put cluster" function to build a cluster on our behalf.
    682 		 *
    683 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
    684 		 * it to free the cluster pages for us on a successful I/O (it
    685 		 * always does this for un-successful I/O requests).  this
    686 		 * allows us to do clustered pageout without having to deal
    687 		 * with cluster pages at this level.
    688 		 *
    689 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
    690 		 *  IN: locked: uobj (if !swap_backed), page queues
    691 		 * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
    692 		 *     !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
    693 		 *
    694 		 * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
    695 		 */
    696 
    697 		/* locked: uobj (if !swap_backed), page queues */
    698 		uvmexp.pdpageouts++;
    699 		result = uvm_pager_put((swap_backed) ? NULL : uobj, p,
    700 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
    701 		/* locked: uobj (if !swap_backed && result != PEND) */
    702 		/* unlocked: pageqs, object (if swap_backed ||result == PEND) */
    703 
    704 		/*
    705 		 * if we did i/o to swap, zero swslot to indicate that we are
    706 		 * no longer building a swap-backed cluster.
    707 		 */
    708 
    709 		if (swap_backed)
    710 			swslot = 0;		/* done with this cluster */
    711 
    712 		/*
    713 		 * first, we check for VM_PAGER_PEND which means that the
    714 		 * async I/O is in progress and the async I/O done routine
    715 		 * will clean up after us.   in this case we move on to the
    716 		 * next page.
    717 		 *
    718 		 * there is a very remote chance that the pending async i/o can
    719 		 * finish _before_ we get here.   if that happens, our page "p"
    720 		 * may no longer be on the inactive queue.   so we verify this
    721 		 * when determining the next page (starting over at the head if
    722 		 * we've lost our inactive page).
    723 		 */
    724 
    725 		if (result == VM_PAGER_PEND) {
    726 			uvm_lock_pageq();		/* relock page queues */
    727 			uvmexp.pdpending++;
    728 			if (p) {
    729 				if (p->pqflags & PQ_INACTIVE)
    730 					/* reload! */
    731 					nextpg = TAILQ_NEXT(p, pageq);
    732 				else
    733 					/* reload! */
    734 					nextpg = TAILQ_FIRST(pglst);
    735 			} else {
    736 				nextpg = NULL;		/* done list */
    737 			}
    738 			continue;
    739 		}
    740 
    741 		/*
    742 		 * clean up "p" if we have one
    743 		 */
    744 
    745 		if (p) {
    746 			/*
    747 			 * the I/O request to "p" is done and uvm_pager_put
    748 			 * has freed any cluster pages it may have allocated
    749 			 * during I/O.  all that is left for us to do is
    750 			 * clean up page "p" (which is still PG_BUSY).
    751 			 *
    752 			 * our result could be one of the following:
    753 			 *   VM_PAGER_OK: successful pageout
    754 			 *
    755 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
    756 			 *     to next page
    757 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
    758 			 *     "reactivate" page to get it out of the way (it
    759 			 *     will eventually drift back into the inactive
    760 			 *     queue for a retry).
    761 			 *   VM_PAGER_UNLOCK: should never see this as it is
    762 			 *     only valid for "get" operations
    763 			 */
    764 
    765 			/* relock p's object: page queues not lock yet, so
    766 			 * no need for "try" */
    767 
    768 			/* !swap_backed case: already locked... */
    769 			if (swap_backed) {
    770 				if (anon)
    771 					simple_lock(&anon->an_lock);
    772 				else
    773 					simple_lock(&uobj->vmobjlock);
    774 			}
    775 
    776 #ifdef DIAGNOSTIC
    777 			if (result == VM_PAGER_UNLOCK)
    778 				panic("pagedaemon: pageout returned "
    779 				    "invalid 'unlock' code");
    780 #endif
    781 
    782 			/* handle PG_WANTED now */
    783 			if (p->flags & PG_WANTED)
    784 				/* still holding object lock */
    785 				wakeup(p);
    786 
    787 			p->flags &= ~(PG_BUSY|PG_WANTED);
    788 			UVM_PAGE_OWN(p, NULL);
    789 
    790 			/* released during I/O? */
    791 			if (p->flags & PG_RELEASED) {
    792 				if (anon) {
    793 					/* remove page so we can get nextpg */
    794 					anon->u.an_page = NULL;
    795 
    796 					simple_unlock(&anon->an_lock);
    797 					uvm_anfree(anon);	/* kills anon */
    798 					pmap_page_protect(PMAP_PGARG(p),
    799 					    VM_PROT_NONE);
    800 					anon = NULL;
    801 					uvm_lock_pageq();
    802 					nextpg = TAILQ_NEXT(p, pageq);
    803 					/* free released page */
    804 					uvm_pagefree(p);
    805 
    806 				} else {
    807 
    808 #ifdef DIAGNOSTIC
    809 					if (uobj->pgops->pgo_releasepg == NULL)
    810 						panic("pagedaemon: no "
    811 						   "pgo_releasepg function");
    812 #endif
    813 
    814 					/*
    815 					 * pgo_releasepg nukes the page and
    816 					 * gets "nextpg" for us.  it returns
    817 					 * with the page queues locked (when
    818 					 * given nextpg ptr).
    819 					 */
    820 					if (!uobj->pgops->pgo_releasepg(p,
    821 					    &nextpg))
    822 						/* uobj died after release */
    823 						uobj = NULL;
    824 
    825 					/*
    826 					 * lock page queues here so that they're
    827 					 * always locked at the end of the loop.
    828 					 */
    829 					uvm_lock_pageq();
    830 				}
    831 
    832 			} else {	/* page was not released during I/O */
    833 
    834 				uvm_lock_pageq();
    835 				nextpg = TAILQ_NEXT(p, pageq);
    836 
    837 				if (result != VM_PAGER_OK) {
    838 
    839 					/* pageout was a failure... */
    840 					if (result != VM_PAGER_AGAIN)
    841 						uvm_pageactivate(p);
    842 					pmap_clear_reference(PMAP_PGARG(p));
    843 					/* XXXCDC: if (swap_backed) FREE p's
    844 					 * swap block? */
    845 
    846 				} else {
    847 
    848 					/* pageout was a success... */
    849 					pmap_clear_reference(PMAP_PGARG(p));
    850 					pmap_clear_modify(PMAP_PGARG(p));
    851 					p->flags |= PG_CLEAN;
    852 					/* XXX: could free page here, but old
    853 					 * pagedaemon does not */
    854 
    855 				}
    856 			}
    857 
    858 			/*
    859 			 * drop object lock (if there is an object left).   do
    860 			 * a safety check of nextpg to make sure it is on the
    861 			 * inactive queue (it should be since PG_BUSY pages on
    862 			 * the inactive queue can't be re-queued [note: not
    863 			 * true for active queue]).
    864 			 */
    865 
    866 			if (anon)
    867 				simple_unlock(&anon->an_lock);
    868 			else if (uobj)
    869 				simple_unlock(&uobj->vmobjlock);
    870 
    871 		} /* if (p) */ else {
    872 
    873 			/* if p is null in this loop, make sure it stays null
    874 			 * in next loop */
    875 			nextpg = NULL;
    876 
    877 			/*
    878 			 * lock page queues here just so they're always locked
    879 			 * at the end of the loop.
    880 			 */
    881 			uvm_lock_pageq();
    882 		}
    883 
    884 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
    885 			printf("pagedaemon: invalid nextpg!   reverting to "
    886 			    "queue head\n");
    887 			nextpg = TAILQ_FIRST(pglst);	/* reload! */
    888 		}
    889 
    890 	}	/* end of "inactive" 'for' loop */
    891 	return (retval);
    892 }
    893 
    894 /*
    895  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    896  *
    897  * => called with pageq's locked
    898  */
    899 
    900 void
    901 uvmpd_scan()
    902 {
    903 	int s, free, inactive_shortage, swap_shortage;
    904 	struct vm_page *p, *nextpg;
    905 	struct uvm_object *uobj;
    906 	boolean_t got_it;
    907 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    908 
    909 	uvmexp.pdrevs++;		/* counter */
    910 
    911 #ifdef __GNUC__
    912 	uobj = NULL;	/* XXX gcc */
    913 #endif
    914 	/*
    915 	 * get current "free" page count
    916 	 */
    917 	s = splimp();
    918 	uvm_lock_fpageq();
    919 	free = uvmexp.free;
    920 	uvm_unlock_fpageq();
    921 	splx(s);
    922 
    923 #ifndef __SWAP_BROKEN
    924 	/*
    925 	 * swap out some processes if we are below our free target.
    926 	 * we need to unlock the page queues for this.
    927 	 */
    928 	if (free < uvmexp.freetarg) {
    929 
    930 		uvmexp.pdswout++;
    931 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout", free,
    932 		    uvmexp.freetarg, 0, 0);
    933 		uvm_unlock_pageq();
    934 		uvm_swapout_threads();
    935 		pmap_update();		/* update so we can scan inactive q */
    936 		uvm_lock_pageq();
    937 
    938 	}
    939 #endif
    940 
    941 	/*
    942 	 * now we want to work on meeting our targets.   first we work on our
    943 	 * free target by converting inactive pages into free pages.  then
    944 	 * we work on meeting our inactive target by converting active pages
    945 	 * to inactive ones.
    946 	 */
    947 
    948 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    949 
    950 	/*
    951 	 * do loop #1!   alternate starting queue between swap and object based
    952 	 * on the low bit of uvmexp.pdrevs (which we bump by one each call).
    953 	 */
    954 
    955 	got_it = FALSE;
    956 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
    957 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
    958 	if (!got_it)
    959 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
    960 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
    961 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
    962 
    963 	/*
    964 	 * we have done the scan to get free pages.   now we work on meeting
    965 	 * our inactive target.  if we are still below the free target
    966 	 * and we didn't start any pageouts in the inactive scan above
    967 	 * (perhaps because we're out of swap space) and we've met
    968 	 * the inactive target, then go ahead and deactivate some more
    969 	 * pages anyway.  meeting the free target is important enough
    970 	 * that it's worth temporarily reducing the number of active pages.
    971 	 */
    972 
    973 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
    974 	if (free < uvmexp.freetarg && inactive_shortage <= 0 &&
    975 	    uvmexp.paging == 0) {
    976 		inactive_shortage = 16;
    977 	}
    978 
    979 	/*
    980 	 * detect if we're not going to be able to page anything out
    981 	 * until we free some swap resources from active pages.
    982 	 */
    983 	swap_shortage = 0;
    984 	if (uvmexp.free < uvmexp.freetarg &&
    985 	    uvmexp.swpginuse == uvmexp.swpages &&
    986 	    uvmexp.swpguniq < uvmexp.swpages &&
    987 	    uvmexp.paging == 0) {
    988 		swap_shortage = uvmexp.freetarg - uvmexp.free;
    989 	}
    990 
    991 	UVMHIST_LOG(pdhist, "  loop 2: inactive_shortage=%d swap_shortage=%d",
    992 		    inactive_shortage, swap_shortage,0,0);
    993 	for (p = TAILQ_FIRST(&uvm.page_active);
    994 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
    995 	     p = nextpg) {
    996 
    997 		nextpg = TAILQ_NEXT(p, pageq);
    998 		if (p->flags & PG_BUSY)
    999 			continue;	/* quick check before trying to lock */
   1000 
   1001 		/*
   1002 		 * lock the page's owner.
   1003 		 */
   1004 		/* is page anon owned or ownerless? */
   1005 		if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
   1006 #ifdef DIAGNOSTIC
   1007 			if (p->uanon == NULL)
   1008 				panic("pagedaemon: page with no anon or "
   1009 				    "object detected - loop 2");
   1010 #endif
   1011 			if (!simple_lock_try(&p->uanon->an_lock))
   1012 				continue;
   1013 
   1014 			/* take over the page? */
   1015 			if ((p->pqflags & PQ_ANON) == 0) {
   1016 #ifdef DIAGNOSTIC
   1017 				if (p->loan_count < 1)
   1018 					panic("pagedaemon: non-loaned "
   1019 					    "ownerless page detected - loop 2");
   1020 #endif
   1021 				p->loan_count--;
   1022 				p->pqflags |= PQ_ANON;
   1023 			}
   1024 		} else {
   1025 			if (!simple_lock_try(&p->uobject->vmobjlock))
   1026 				continue;
   1027 		}
   1028 
   1029 		/*
   1030 		 * skip this page if it's busy.
   1031 		 */
   1032 		if ((p->flags & PG_BUSY) != 0) {
   1033 			if (p->pqflags & PQ_ANON)
   1034 				simple_unlock(&p->uanon->an_lock);
   1035 			else
   1036 				simple_unlock(&p->uobject->vmobjlock);
   1037 			continue;
   1038 		}
   1039 
   1040 		/*
   1041 		 * free any swap allocated to this page
   1042 		 * if there's a shortage of swap.
   1043 		 */
   1044 		if (swap_shortage > 0) {
   1045 			if (p->pqflags & PQ_ANON && p->uanon->an_swslot) {
   1046 				uvm_swap_free(p->uanon->an_swslot, 1);
   1047 				p->uanon->an_swslot = 0;
   1048 				p->flags &= ~PG_CLEAN;
   1049 				swap_shortage--;
   1050 			}
   1051 			if (p->pqflags & PQ_AOBJ) {
   1052 				int slot = uao_set_swslot(p->uobject,
   1053 					p->offset >> PAGE_SHIFT, 0);
   1054 				if (slot) {
   1055 					uvm_swap_free(slot, 1);
   1056 					p->flags &= ~PG_CLEAN;
   1057 					swap_shortage--;
   1058 				}
   1059 			}
   1060 		}
   1061 
   1062 		/*
   1063 		 * deactivate this page if there's a shortage of
   1064 		 * inactive pages.
   1065 		 */
   1066 		if (inactive_shortage > 0) {
   1067 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
   1068 			/* no need to check wire_count as pg is "active" */
   1069 			uvm_pagedeactivate(p);
   1070 			uvmexp.pddeact++;
   1071 			inactive_shortage--;
   1072 		}
   1073 
   1074 		if (p->pqflags & PQ_ANON)
   1075 			simple_unlock(&p->uanon->an_lock);
   1076 		else
   1077 			simple_unlock(&p->uobject->vmobjlock);
   1078 	}
   1079 }
   1080