Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.12.2.4
      1 /*	$NetBSD: uvm_pdaemon.c,v 1.12.2.4 1999/04/29 05:35:13 chs Exp $	*/
      2 
      3 /*
      4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  */
      7 /*
      8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      9  * Copyright (c) 1991, 1993, The Regents of the University of California.
     10  *
     11  * All rights reserved.
     12  *
     13  * This code is derived from software contributed to Berkeley by
     14  * The Mach Operating System project at Carnegie-Mellon University.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by Charles D. Cranor,
     27  *      Washington University, the University of California, Berkeley and
     28  *      its contributors.
     29  * 4. Neither the name of the University nor the names of its contributors
     30  *    may be used to endorse or promote products derived from this software
     31  *    without specific prior written permission.
     32  *
     33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     43  * SUCH DAMAGE.
     44  *
     45  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     46  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     47  *
     48  *
     49  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     50  * All rights reserved.
     51  *
     52  * Permission to use, copy, modify and distribute this software and
     53  * its documentation is hereby granted, provided that both the copyright
     54  * notice and this permission notice appear in all copies of the
     55  * software, derivative works or modified versions, and any portions
     56  * thereof, and that both notices appear in supporting documentation.
     57  *
     58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     61  *
     62  * Carnegie Mellon requests users of this software to return to
     63  *
     64  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     65  *  School of Computer Science
     66  *  Carnegie Mellon University
     67  *  Pittsburgh PA 15213-3890
     68  *
     69  * any improvements or extensions that they make and grant Carnegie the
     70  * rights to redistribute these changes.
     71  */
     72 
     73 #include "opt_uvmhist.h"
     74 
     75 /*
     76  * uvm_pdaemon.c: the page daemon
     77  */
     78 
     79 #include <sys/param.h>
     80 #include <sys/proc.h>
     81 #include <sys/systm.h>
     82 #include <sys/kernel.h>
     83 #include <sys/pool.h>
     84 
     85 #include <vm/vm.h>
     86 #include <vm/vm_page.h>
     87 #include <vm/vm_kern.h>
     88 
     89 #include <uvm/uvm.h>
     90 
     91 /*
     92  * local prototypes
     93  */
     94 
     95 static void		uvmpd_scan __P((void));
     96 static boolean_t	uvmpd_scan_inactive __P((struct pglist *));
     97 static void		uvmpd_tune __P((void));
     98 
     99 
    100 /*
    101  * uvm_wait: wait (sleep) for the page daemon to free some pages
    102  *
    103  * => should be called with all locks released
    104  * => should _not_ be called by the page daemon (to avoid deadlock)
    105  */
    106 
    107 void
    108 uvm_wait(wmsg)
    109 	char *wmsg;
    110 {
    111 	int timo = 0;
    112 	int s = splbio();
    113 
    114 	/*
    115 	 * check for page daemon going to sleep (waiting for itself)
    116 	 */
    117 
    118 	if (curproc == uvm.pagedaemon_proc) {
    119 		/*
    120 		 * now we have a problem: the pagedaemon wants to go to
    121 		 * sleep until it frees more memory.   but how can it
    122 		 * free more memory if it is asleep?  that is a deadlock.
    123 		 * we have two options:
    124 		 *  [1] panic now
    125 		 *  [2] put a timeout on the sleep, thus causing the
    126 		 *      pagedaemon to only pause (rather than sleep forever)
    127 		 *
    128 		 * note that option [2] will only help us if we get lucky
    129 		 * and some other process on the system breaks the deadlock
    130 		 * by exiting or freeing memory (thus allowing the pagedaemon
    131 		 * to continue).  for now we panic if DEBUG is defined,
    132 		 * otherwise we hope for the best with option [2] (better
    133 		 * yet, this should never happen in the first place!).
    134 		 */
    135 
    136 		printf("pagedaemon: deadlock detected!\n");
    137 		timo = hz >> 3;		/* set timeout */
    138 #if defined(DEBUG)
    139 		/* DEBUG: panic so we can debug it */
    140 		panic("pagedaemon deadlock");
    141 #endif
    142 	}
    143 
    144 	simple_lock(&uvm.pagedaemon_lock);
    145 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    146 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
    147 	    timo);
    148 
    149 	splx(s);
    150 }
    151 
    152 
    153 /*
    154  * uvmpd_tune: tune paging parameters
    155  *
    156  * => called when ever memory is added (or removed?) to the system
    157  * => caller must call with page queues locked
    158  */
    159 
    160 static void
    161 uvmpd_tune()
    162 {
    163 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    164 
    165 	uvmexp.freemin = uvmexp.npages / 20;
    166 
    167 	/* between 16k and 256k */
    168 	/* XXX:  what are these values good for? */
    169 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
    170 	uvmexp.freemin = min(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
    171 
    172 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
    173 	if (uvmexp.freetarg <= uvmexp.freemin)
    174 		uvmexp.freetarg = uvmexp.freemin + 1;
    175 
    176 	/* uvmexp.inactarg: computed in main daemon loop */
    177 
    178 	uvmexp.wiredmax = uvmexp.npages / 3;
    179 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    180 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    181 }
    182 
    183 /*
    184  * uvm_pageout: the main loop for the pagedaemon
    185  */
    186 
    187 void
    188 uvm_pageout()
    189 {
    190 	int npages = 0;
    191 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    192 
    193 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    194 
    195 	/*
    196 	 * ensure correct priority and set paging parameters...
    197 	 */
    198 
    199 	uvm.pagedaemon_proc = curproc;
    200 	(void) spl0();
    201 	uvm_lock_pageq();
    202 	npages = uvmexp.npages;
    203 	uvmpd_tune();
    204 	uvm_unlock_pageq();
    205 
    206 	/*
    207 	 * main loop
    208 	 */
    209 	while (TRUE) {
    210 
    211 		simple_lock(&uvm.pagedaemon_lock);
    212 
    213 		UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    214 		UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    215 		    &uvm.pagedaemon_lock, FALSE, "pgdaemon", 0);
    216 		UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    217 
    218 		/* drain pool resources */
    219 		pool_drain(0);
    220 
    221 		/*
    222 		 * now lock page queues and recompute inactive count
    223 		 */
    224 		uvm_lock_pageq();
    225 
    226 		if (npages != uvmexp.npages) {	/* check for new pages? */
    227 			npages = uvmexp.npages;
    228 			uvmpd_tune();
    229 		}
    230 
    231 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
    232 		if (uvmexp.inactarg <= uvmexp.freetarg)
    233 			uvmexp.inactarg = uvmexp.freetarg + 1;
    234 
    235 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
    236 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
    237 		    uvmexp.inactarg);
    238 
    239 		/*
    240 		 * scan if needed
    241 		 */
    242 		if (uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
    243 		    uvmexp.inactive < uvmexp.inactarg) {
    244 			uvmpd_scan();
    245 		}
    246 
    247 		/*
    248 		 * if there's any free memory to be had,
    249 		 * wake up any waiters.
    250 		 */
    251 		wakeup(&uvmexp.free);
    252 
    253 		/*
    254 		 * done scan.  unlock page queues (the only lock we are holding)
    255 		 */
    256 		uvm_unlock_pageq();
    257 	}
    258 	/*NOTREACHED*/
    259 }
    260 
    261 
    262 /*
    263  * uvm_aiodone_daemon:  main loop for the aiodone daemon.
    264  */
    265 
    266 void
    267 uvm_aiodone_daemon()
    268 {
    269 	int s;
    270 	struct uvm_aiodesc *aio, *nextaio;
    271 	UVMHIST_FUNC("uvm_aiodoned"); UVMHIST_CALLED(pdhist);
    272 
    273 	UVMHIST_LOG(pdhist,"<starting uvm aiodone daemon>", 0, 0, 0, 0);
    274 
    275 	for (;;) {
    276 
    277 		/*
    278 		 * carefully attempt to go to sleep (without losing "wakeups"!).
    279 		 * we need splbio because we want to make sure the aio_done list
    280 		 * is totally empty before we go to sleep.
    281 		 */
    282 
    283 		s = splbio();
    284 		simple_lock(&uvm.aiodoned_lock);
    285 
    286 		/*
    287 		 * if we've got done aio's, then bypass the sleep
    288 		 */
    289 
    290 		if (TAILQ_FIRST(&uvm.aio_done) == NULL) {
    291 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    292 			UVM_UNLOCK_AND_WAIT(&uvm.aiodoned,
    293 			    &uvm.aiodoned_lock, FALSE, "aiodoned", 0);
    294 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    295 
    296 			/* relock aiodoned_lock, still at splbio */
    297 			simple_lock(&uvm.aiodoned_lock);
    298 		}
    299 
    300 		/*
    301 		 * check for done aio structures
    302 		 */
    303 
    304 		aio = TAILQ_FIRST(&uvm.aio_done);
    305 		if (aio) {
    306 			TAILQ_INIT(&uvm.aio_done);
    307 		}
    308 
    309 		simple_unlock(&uvm.aiodoned_lock);
    310 		splx(s);
    311 
    312 		/*
    313 		 * process each i/o that's done.
    314 		 */
    315 
    316 		for (/*null*/; aio != NULL ; aio = nextaio) {
    317 			uvmexp.paging -= aio->npages;
    318 			nextaio = TAILQ_NEXT(aio, aioq);
    319 			aio->aiodone(aio);
    320 		}
    321 	}
    322 }
    323 
    324 
    325 
    326 /*
    327  * uvmpd_scan_inactive: the first loop of uvmpd_scan broken out into
    328  * 	its own function for ease of reading.
    329  *
    330  * => called with page queues locked
    331  * => we work on meeting our free target by converting inactive pages
    332  *    into free pages.
    333  * => we handle the building of swap-backed clusters
    334  * => we return TRUE if we are exiting because we met our target
    335  */
    336 
    337 static boolean_t
    338 uvmpd_scan_inactive(pglst)
    339 	struct pglist *pglst;
    340 {
    341 	boolean_t retval = FALSE;	/* assume we haven't hit target */
    342 	int s, free, result;
    343 	struct vm_page *p, *nextpg;
    344 	struct uvm_object *uobj;
    345 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
    346 	int npages;
    347 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
    348 	int swnpages, swcpages;				/* XXX: see below */
    349 	int swslot, oldslot;
    350 	struct vm_anon *anon;
    351 	boolean_t swap_backed;
    352 	vaddr_t start;
    353 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
    354 
    355 	/*
    356 	 * note: we currently keep swap-backed pages on a seperate inactive
    357 	 * list from object-backed pages.   however, merging the two lists
    358 	 * back together again hasn't been ruled out.   thus, we keep our
    359 	 * swap cluster in "swpps" rather than in pps (allows us to mix
    360 	 * clustering types in the event of a mixed inactive queue).
    361 	 */
    362 
    363 	/*
    364 	 * swslot is non-zero if we are building a swap cluster.  we want
    365 	 * to stay in the loop while we have a page to scan or we have
    366 	 * a swap-cluster to build.
    367 	 */
    368 	swslot = 0;
    369 	swnpages = swcpages = 0;
    370 	free = 0;
    371 
    372 	for (p = TAILQ_FIRST(pglst); p != NULL || swslot != 0; p = nextpg) {
    373 
    374 		/*
    375 		 * note that p can be NULL iff we have traversed the whole
    376 		 * list and need to do one final swap-backed clustered pageout.
    377 		 */
    378 		if (p) {
    379 			/*
    380 			 * update our copy of "free" and see if we've met
    381 			 * our target
    382 			 */
    383 			s = splimp();
    384 			uvm_lock_fpageq();
    385 			free = uvmexp.free;
    386 			uvm_unlock_fpageq();
    387 			splx(s);
    388 
    389 			if (free >= uvmexp.freetarg) {
    390 				UVMHIST_LOG(pdhist,"  met free target: "
    391 				    "exit loop", 0, 0, 0, 0);
    392 				retval = TRUE;		/* hit the target! */
    393 
    394 				if (swslot == 0)
    395 					/* exit now if no swap-i/o pending */
    396 					break;
    397 
    398 				/* set p to null to signal final swap i/o */
    399 				p = NULL;
    400 			}
    401 		}
    402 
    403 		uobj = NULL;	/* be safe and shut gcc up */
    404 		anon = NULL;	/* be safe and shut gcc up */
    405 
    406 		if (p) {	/* if (we have a new page to consider) */
    407 			/*
    408 			 * we are below target and have a new page to consider.
    409 			 */
    410 			uvmexp.pdscans++;
    411 			nextpg = TAILQ_NEXT(p, pageq);
    412 
    413 #if 1
    414 			/*
    415 			 * XXX
    416 			 * commented this out because the only way a page
    417 			 * can be referenced and still on the inactive queue
    418 			 * if it is "referenced" by the process of
    419 			 * paging it out.  we shouldn't count these
    420 			 * as real references, so this code must be a noop.
    421 			 *
    422 			 * XXX shouldn't this bit be in the *ACTIVE* queue
    423 			 * scanning code?
    424 			 */
    425 
    426 			/*
    427 			 * move referenced pages back to active queue and
    428 			 * skip to next page (unlikely to happen since
    429 			 * inactive pages shouldn't have any valid mappings
    430 			 * and we cleared reference before deactivating).
    431 			 */
    432 			if (pmap_is_referenced(PMAP_PGARG(p))) {
    433 				uvm_pageactivate(p);
    434 				uvmexp.pdreact++;
    435 				continue;
    436 			}
    437 #endif
    438 
    439 			/*
    440 			 * first we attempt to lock the object that this page
    441 			 * belongs to.  if our attempt fails we skip on to
    442 			 * the next page (no harm done).  it is important to
    443 			 * "try" locking the object as we are locking in the
    444 			 * wrong order (pageq -> object) and we don't want to
    445 			 * get deadlocked.
    446 			 *
    447 			 * the only time we exepct to see an ownerless page
    448 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
    449 			 * anon has loaned a page from a uvm_object and the
    450 			 * uvm_object has dropped the ownership.  in that
    451 			 * case, the anon can "take over" the loaned page
    452 			 * and make it its own.
    453 			 */
    454 
    455 			/* is page part of an anon or ownerless ? */
    456 			if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
    457 
    458 				anon = p->uanon;
    459 
    460 #ifdef DIAGNOSTIC
    461 				/* to be on inactive q, page must be part
    462 				 * of _something_ */
    463 				if (anon == NULL)
    464 					panic("pagedaemon: page with no anon "
    465 					    "or object detected - loop 1");
    466 #endif
    467 
    468 				if (!simple_lock_try(&anon->an_lock))
    469 					/* lock failed, skip this page */
    470 					continue;
    471 
    472 				/*
    473 				 * if the page is ownerless, claim it in the
    474 				 * name of "anon"!
    475 				 */
    476 				if ((p->pqflags & PQ_ANON) == 0) {
    477 #ifdef DIAGNOSTIC
    478 					if (p->loan_count < 1)
    479 						panic("pagedaemon: non-loaned "
    480 						    "ownerless page detected -"
    481 						    " loop 1");
    482 #endif
    483 					p->loan_count--;
    484 					p->pqflags |= PQ_ANON;      /* anon now owns it */
    485 				}
    486 
    487 				if (p->flags & PG_BUSY) {
    488 					simple_unlock(&anon->an_lock);
    489 					uvmexp.pdbusy++;
    490 					/* someone else owns page, skip it */
    491 					continue;
    492 				}
    493 
    494 				uvmexp.pdanscan++;
    495 
    496 			} else {
    497 
    498 				uobj = p->uobject;
    499 
    500 				if (!simple_lock_try(&uobj->vmobjlock))
    501 					/* lock failed, skip this page */
    502 					continue;
    503 
    504 				if (p->flags & PG_BUSY) {
    505 					simple_unlock(&uobj->vmobjlock);
    506 					uvmexp.pdbusy++;
    507 					/* someone else owns page, skip it */
    508 					continue;
    509 				}
    510 
    511 				uvmexp.pdobscan++;
    512 			}
    513 
    514 			/*
    515 			 * we now have the object and the page queues locked.
    516 			 * the page is not busy.   if the page is clean we
    517 			 * can free it now and continue.
    518 			 */
    519 
    520 			if (p->flags & PG_CLEAN) {
    521 
    522 				if (p->pqflags & PQ_SWAPBACKED) {
    523 					/* this page now lives only in swap */
    524 					uvmexp.swpguniq++;
    525 				}
    526 
    527 				/* zap all mappings with pmap_page_protect... */
    528 				pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
    529 				uvm_pagefree(p);
    530 				uvmexp.pdfreed++;
    531 
    532 				if (anon) {
    533 #ifdef DIAGNOSTIC
    534 					/*
    535 					 * an anonymous page can only be clean
    536 					 * if it has valid backing store.
    537 					 */
    538 					if (anon->an_swslot == 0)
    539 						panic("pagedaemon: clean anon "
    540 						 "page without backing store?");
    541 #endif
    542 					/* remove from object */
    543 					anon->u.an_page = NULL;
    544 					simple_unlock(&anon->an_lock);
    545 				} else {
    546 					/* pagefree has already removed the
    547 					 * page from the object */
    548 					simple_unlock(&uobj->vmobjlock);
    549 				}
    550 				continue;
    551 			}
    552 
    553 			/*
    554 			 * this page is dirty, skip it if we'll have met our
    555 			 * free target when all the current pageouts complete.
    556 			 */
    557 
    558 			if (free + uvmexp.paging > uvmexp.freetarg << 2) {
    559 				if (anon) {
    560 					simple_unlock(&anon->an_lock);
    561 				} else {
    562 					simple_unlock(&uobj->vmobjlock);
    563 				}
    564 				continue;
    565 			}
    566 
    567 			/*
    568 			 * the page we are looking at is dirty.   we must
    569 			 * clean it before it can be freed.  to do this we
    570 			 * first mark the page busy so that no one else will
    571 			 * touch the page.   we write protect all the mappings
    572 			 * of the page so that no one touches it while it is
    573 			 * in I/O.
    574 			 */
    575 
    576 			swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
    577 			p->flags |= PG_BUSY;		/* now we own it */
    578 			UVM_PAGE_OWN(p, "scan_inactive");
    579 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_READ);
    580 			uvmexp.pgswapout++;
    581 
    582 			/*
    583 			 * for swap-backed pages we need to (re)allocate
    584 			 * swap space.
    585 			 */
    586 			if (swap_backed) {
    587 
    588 				/*
    589 				 * free old swap slot (if any)
    590 				 */
    591 				if (anon) {
    592 					if (anon->an_swslot) {
    593 						uvm_swap_free(anon->an_swslot,
    594 						    1);
    595 						anon->an_swslot = 0;
    596 					}
    597 				} else {
    598 					oldslot = uao_set_swslot(uobj,
    599 					    p->offset >> PAGE_SHIFT, 0);
    600 
    601 					if (oldslot)
    602 						uvm_swap_free(oldslot, 1);
    603 				}
    604 
    605 				/*
    606 				 * start new cluster (if necessary)
    607 				 */
    608 				if (swslot == 0) {
    609 					/* want this much */
    610 					swnpages = MAXBSIZE >> PAGE_SHIFT;
    611 
    612 					swslot = uvm_swap_alloc(&swnpages,
    613 					    TRUE);
    614 
    615 					if (swslot == 0) {
    616 						/* no swap?  give up! */
    617 						p->flags &= ~PG_BUSY;
    618 						UVM_PAGE_OWN(p, NULL);
    619 						if (anon)
    620 							simple_unlock(
    621 							    &anon->an_lock);
    622 						else
    623 							simple_unlock(
    624 							    &uobj->vmobjlock);
    625 						continue;
    626 					}
    627 					swcpages = 0;	/* cluster is empty */
    628 				}
    629 
    630 				/*
    631 				 * add block to cluster
    632 				 */
    633 				swpps[swcpages] = p;
    634 				uvmexp.pgswapout++;
    635 				if (anon)
    636 					anon->an_swslot = swslot + swcpages;
    637 				else
    638 					uao_set_swslot(uobj,
    639 					    p->offset >> PAGE_SHIFT,
    640 					    swslot + swcpages);
    641 				swcpages++;
    642 
    643 				/* done (swap-backed) */
    644 			}
    645 
    646 			/* end: if (p) ["if we have new page to consider"] */
    647 		} else {
    648 
    649 			/* if p == NULL we must be doing a last swap i/o */
    650 			swap_backed = TRUE;
    651 		}
    652 
    653 		/*
    654 		 * now consider doing the pageout.
    655 		 *
    656 		 * for swap-backed pages, we do the pageout if we have either
    657 		 * filled the cluster (in which case (swnpages == swcpages) or
    658 		 * run out of pages (p == NULL).
    659 		 *
    660 		 * for object pages, we always do the pageout.
    661 		 */
    662 		if (swap_backed) {
    663 
    664 			if (p) {	/* if we just added a page to cluster */
    665 				if (anon)
    666 					simple_unlock(&anon->an_lock);
    667 				else
    668 					simple_unlock(&uobj->vmobjlock);
    669 
    670 				/* cluster not full yet? */
    671 				if (swcpages < swnpages)
    672 					continue;
    673 			}
    674 
    675 			/* starting I/O now... set up for it */
    676 			npages = swcpages;
    677 			ppsp = swpps;
    678 			/* for swap-backed pages only */
    679 			start = (vaddr_t) swslot;
    680 
    681 			/* if this is final pageout we could have a few
    682 			 * extra swap blocks */
    683 			if (swcpages < swnpages) {
    684 				uvm_swap_free(swslot + swcpages,
    685 				    (swnpages - swcpages));
    686 			}
    687 
    688 		} else {
    689 
    690 			/* normal object pageout */
    691 			ppsp = pps;
    692 			npages = sizeof(pps) / sizeof(struct vm_page *);
    693 			/* not looked at because PGO_ALLPAGES is set */
    694 			start = 0;
    695 
    696 		}
    697 
    698 		/*
    699 		 * now do the pageout.
    700 		 *
    701 		 * for swap_backed pages we have already built the cluster.
    702 		 * for !swap_backed pages, uvm_pager_put will call the object's
    703 		 * "make put cluster" function to build a cluster on our behalf.
    704 		 *
    705 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
    706 		 * it to free the cluster pages for us on a successful I/O (it
    707 		 * always does this for un-successful I/O requests).  this
    708 		 * allows us to do clustered pageout without having to deal
    709 		 * with cluster pages at this level.
    710 		 *
    711 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
    712 		 *  IN: locked: uobj (if !swap_backed), page queues
    713 		 * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
    714 		 *     !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
    715 		 *
    716 		 * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
    717 		 */
    718 
    719 		/* locked: uobj (if !swap_backed), page queues */
    720 		uvmexp.pdpageouts++;
    721 		result = uvm_pager_put((swap_backed) ? NULL : uobj, p,
    722 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
    723 		/* locked: uobj (if !swap_backed && result != PEND) */
    724 		/* unlocked: pageqs, object (if swap_backed ||result == PEND) */
    725 
    726 		/*
    727 		 * if we did i/o to swap, zero swslot to indicate that we are
    728 		 * no longer building a swap-backed cluster.
    729 		 */
    730 
    731 		if (swap_backed)
    732 			swslot = 0;		/* done with this cluster */
    733 
    734 		/*
    735 		 * first, we check for VM_PAGER_PEND which means that the
    736 		 * async I/O is in progress and the async I/O done routine
    737 		 * will clean up after us.   in this case we move on to the
    738 		 * next page.
    739 		 *
    740 		 * there is a very remote chance that the pending async i/o can
    741 		 * finish _before_ we get here.   if that happens, our page "p"
    742 		 * may no longer be on the inactive queue.   so we verify this
    743 		 * when determining the next page (starting over at the head if
    744 		 * we've lost our inactive page).
    745 		 */
    746 
    747 		if (result == VM_PAGER_PEND) {
    748 			uvmexp.paging += npages;
    749 			uvm_lock_pageq();		/* relock page queues */
    750 			uvmexp.pdpending++;
    751 			if (p) {
    752 				if (p->pqflags & PQ_INACTIVE)
    753 					/* reload! */
    754 					nextpg = TAILQ_NEXT(p, pageq);
    755 				else
    756 					/* reload! */
    757 					nextpg = TAILQ_FIRST(pglst);
    758 			} else {
    759 				nextpg = NULL;		/* done list */
    760 			}
    761 			continue;
    762 		}
    763 
    764 		/*
    765 		 * clean up "p" if we have one
    766 		 */
    767 
    768 		if (p) {
    769 			/*
    770 			 * the I/O request to "p" is done and uvm_pager_put
    771 			 * has freed any cluster pages it may have allocated
    772 			 * during I/O.  all that is left for us to do is
    773 			 * clean up page "p" (which is still PG_BUSY).
    774 			 *
    775 			 * our result could be one of the following:
    776 			 *   VM_PAGER_OK: successful pageout
    777 			 *
    778 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
    779 			 *     to next page
    780 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
    781 			 *     "reactivate" page to get it out of the way (it
    782 			 *     will eventually drift back into the inactive
    783 			 *     queue for a retry).
    784 			 *   VM_PAGER_UNLOCK: should never see this as it is
    785 			 *     only valid for "get" operations
    786 			 */
    787 
    788 			/* relock p's object: page queues not lock yet, so
    789 			 * no need for "try" */
    790 
    791 			/* !swap_backed case: already locked... */
    792 			if (swap_backed) {
    793 				if (anon)
    794 					simple_lock(&anon->an_lock);
    795 				else
    796 					simple_lock(&uobj->vmobjlock);
    797 			}
    798 
    799 #ifdef DIAGNOSTIC
    800 			if (result == VM_PAGER_UNLOCK)
    801 				panic("pagedaemon: pageout returned "
    802 				    "invalid 'unlock' code");
    803 #endif
    804 
    805 			/* handle PG_WANTED now */
    806 			if (p->flags & PG_WANTED)
    807 				/* still holding object lock */
    808 				wakeup(p);
    809 
    810 			p->flags &= ~(PG_BUSY|PG_WANTED);
    811 			UVM_PAGE_OWN(p, NULL);
    812 
    813 			/* released during I/O? */
    814 			if (p->flags & PG_RELEASED) {
    815 				if (anon) {
    816 					/* remove page so we can get nextpg */
    817 					anon->u.an_page = NULL;
    818 
    819 					simple_unlock(&anon->an_lock);
    820 					uvm_anfree(anon);	/* kills anon */
    821 					pmap_page_protect(PMAP_PGARG(p),
    822 					    VM_PROT_NONE);
    823 					anon = NULL;
    824 					uvm_lock_pageq();
    825 					nextpg = TAILQ_NEXT(p, pageq);
    826 					/* free released page */
    827 					uvm_pagefree(p);
    828 
    829 				} else {
    830 
    831 #ifdef DIAGNOSTIC
    832 					if (uobj->pgops->pgo_releasepg == NULL)
    833 						panic("pagedaemon: no "
    834 						   "pgo_releasepg function");
    835 #endif
    836 
    837 					/*
    838 					 * pgo_releasepg nukes the page and
    839 					 * gets "nextpg" for us.  it returns
    840 					 * with the page queues locked (when
    841 					 * given nextpg ptr).
    842 					 */
    843 					if (!uobj->pgops->pgo_releasepg(p,
    844 					    &nextpg))
    845 						/* uobj died after release */
    846 						uobj = NULL;
    847 
    848 					/*
    849 					 * lock page queues here so that they're
    850 					 * always locked at the end of the loop.
    851 					 */
    852 					uvm_lock_pageq();
    853 				}
    854 
    855 			} else {	/* page was not released during I/O */
    856 
    857 				uvm_lock_pageq();
    858 				nextpg = TAILQ_NEXT(p, pageq);
    859 
    860 				if (result != VM_PAGER_OK) {
    861 
    862 					/* pageout was a failure... */
    863 					if (result != VM_PAGER_AGAIN)
    864 						uvm_pageactivate(p);
    865 					pmap_clear_reference(PMAP_PGARG(p));
    866 					/* XXXCDC: if (swap_backed) FREE p's
    867 					 * swap block? */
    868 
    869 				} else {
    870 
    871 					/* pageout was a success... */
    872 					pmap_clear_reference(PMAP_PGARG(p));
    873 					pmap_clear_modify(PMAP_PGARG(p));
    874 					p->flags |= PG_CLEAN;
    875 					/* XXX: could free page here, but old
    876 					 * pagedaemon does not */
    877 
    878 				}
    879 			}
    880 
    881 			/*
    882 			 * drop object lock (if there is an object left).   do
    883 			 * a safety check of nextpg to make sure it is on the
    884 			 * inactive queue (it should be since PG_BUSY pages on
    885 			 * the inactive queue can't be re-queued [note: not
    886 			 * true for active queue]).
    887 			 */
    888 
    889 			if (anon)
    890 				simple_unlock(&anon->an_lock);
    891 			else if (uobj)
    892 				simple_unlock(&uobj->vmobjlock);
    893 
    894 		} /* if (p) */ else {
    895 
    896 			/* if p is null in this loop, make sure it stays null
    897 			 * in next loop */
    898 			nextpg = NULL;
    899 
    900 			/*
    901 			 * lock page queues here just so they're always locked
    902 			 * at the end of the loop.
    903 			 */
    904 			uvm_lock_pageq();
    905 		}
    906 
    907 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
    908 			printf("pagedaemon: invalid nextpg!   reverting to "
    909 			    "queue head\n");
    910 			nextpg = TAILQ_FIRST(pglst);	/* reload! */
    911 		}
    912 
    913 	}	/* end of "inactive" 'for' loop */
    914 	return (retval);
    915 }
    916 
    917 /*
    918  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    919  *
    920  * => called with pageq's locked
    921  */
    922 
    923 void
    924 uvmpd_scan()
    925 {
    926 	int s, free, inactive_shortage, swap_shortage;
    927 	struct vm_page *p, *nextpg;
    928 	struct uvm_object *uobj;
    929 	boolean_t got_it;
    930 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    931 
    932 	uvmexp.pdrevs++;		/* counter */
    933 
    934 #ifdef __GNUC__
    935 	uobj = NULL;	/* XXX gcc */
    936 #endif
    937 	/*
    938 	 * get current "free" page count
    939 	 */
    940 	s = splimp();
    941 	uvm_lock_fpageq();
    942 	free = uvmexp.free;
    943 	uvm_unlock_fpageq();
    944 	splx(s);
    945 
    946 #ifndef __SWAP_BROKEN
    947 	/*
    948 	 * swap out some processes if we are below our free target.
    949 	 * we need to unlock the page queues for this.
    950 	 */
    951 	if (free < uvmexp.freetarg) {
    952 
    953 		uvmexp.pdswout++;
    954 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout", free,
    955 		    uvmexp.freetarg, 0, 0);
    956 		uvm_unlock_pageq();
    957 		uvm_swapout_threads();
    958 		pmap_update();		/* update so we can scan inactive q */
    959 		uvm_lock_pageq();
    960 
    961 	}
    962 #endif
    963 
    964 	/*
    965 	 * now we want to work on meeting our targets.   first we work on our
    966 	 * free target by converting inactive pages into free pages.  then
    967 	 * we work on meeting our inactive target by converting active pages
    968 	 * to inactive ones.
    969 	 */
    970 
    971 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    972 
    973 	/*
    974 	 * do loop #1!   alternate starting queue between swap and object based
    975 	 * on the low bit of uvmexp.pdrevs (which we bump by one each call).
    976 	 */
    977 
    978 	got_it = FALSE;
    979 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
    980 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
    981 	if (!got_it)
    982 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
    983 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
    984 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
    985 
    986 	/*
    987 	 * we have done the scan to get free pages.   now we work on meeting
    988 	 * our inactive target.  if we are still below the free target
    989 	 * and we didn't start any pageouts in the inactive scan above
    990 	 * (perhaps because we're out of swap space) and we've met
    991 	 * the inactive target, then go ahead and deactivate some more
    992 	 * pages anyway.  meeting the free target is important enough
    993 	 * that it's worth temporarily reducing the number of active pages.
    994 	 */
    995 
    996 	inactive_shortage = uvmexp.inactarg - uvmexp.inactive;
    997 	if (free < uvmexp.freetarg && inactive_shortage <= 0 &&
    998 	    uvmexp.paging == 0) {
    999 		inactive_shortage = 16;
   1000 	}
   1001 
   1002 	/*
   1003 	 * detect if we're not going to be able to page anything out
   1004 	 * until we free some swap resources from active pages.
   1005 	 */
   1006 	swap_shortage = 0;
   1007 	if (uvmexp.free < uvmexp.freetarg &&
   1008 	    uvmexp.swpginuse == uvmexp.swpages &&
   1009 	    uvmexp.swpguniq < uvmexp.swpages &&
   1010 	    uvmexp.paging == 0) {
   1011 		swap_shortage = uvmexp.freetarg - uvmexp.free;
   1012 	}
   1013 
   1014 	UVMHIST_LOG(pdhist, "  loop 2: inactive_shortage=%d swap_shortage=%d",
   1015 		    inactive_shortage, swap_shortage,0,0);
   1016 	for (p = TAILQ_FIRST(&uvm.page_active);
   1017 	     p != NULL && (inactive_shortage > 0 || swap_shortage > 0);
   1018 	     p = nextpg) {
   1019 
   1020 		nextpg = TAILQ_NEXT(p, pageq);
   1021 		if (p->flags & PG_BUSY)
   1022 			continue;	/* quick check before trying to lock */
   1023 
   1024 		/*
   1025 		 * lock the page's owner.
   1026 		 */
   1027 		/* is page anon owned or ownerless? */
   1028 		if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
   1029 #ifdef DIAGNOSTIC
   1030 			if (p->uanon == NULL)
   1031 				panic("pagedaemon: page with no anon or "
   1032 				    "object detected - loop 2");
   1033 #endif
   1034 			if (!simple_lock_try(&p->uanon->an_lock))
   1035 				continue;
   1036 
   1037 			/* take over the page? */
   1038 			if ((p->pqflags & PQ_ANON) == 0) {
   1039 #ifdef DIAGNOSTIC
   1040 				if (p->loan_count < 1)
   1041 					panic("pagedaemon: non-loaned "
   1042 					    "ownerless page detected - loop 2");
   1043 #endif
   1044 				p->loan_count--;
   1045 				p->pqflags |= PQ_ANON;
   1046 			}
   1047 		} else {
   1048 			if (!simple_lock_try(&p->uobject->vmobjlock))
   1049 				continue;
   1050 		}
   1051 
   1052 		/*
   1053 		 * skip this page if it's busy.
   1054 		 */
   1055 		if ((p->flags & PG_BUSY) != 0) {
   1056 			if (p->pqflags & PQ_ANON)
   1057 				simple_unlock(&p->uanon->an_lock);
   1058 			else
   1059 				simple_unlock(&p->uobject->vmobjlock);
   1060 			continue;
   1061 		}
   1062 
   1063 		/*
   1064 		 * free any swap allocated to this page
   1065 		 * if there's a shortage of swap.
   1066 		 */
   1067 		if (swap_shortage > 0) {
   1068 			if (p->pqflags & PQ_ANON && p->uanon->an_swslot) {
   1069 				uvm_swap_free(p->uanon->an_swslot, 1);
   1070 				p->uanon->an_swslot = 0;
   1071 				p->flags &= ~PG_CLEAN;
   1072 				swap_shortage--;
   1073 			}
   1074 			if (p->pqflags & PQ_AOBJ) {
   1075 				int slot = uao_set_swslot(p->uobject,
   1076 					p->offset >> PAGE_SHIFT, 0);
   1077 				if (slot) {
   1078 					uvm_swap_free(slot, 1);
   1079 					p->flags &= ~PG_CLEAN;
   1080 					swap_shortage--;
   1081 				}
   1082 			}
   1083 		}
   1084 
   1085 		/*
   1086 		 * deactivate this page if there's a shortage of
   1087 		 * inactive pages.
   1088 		 */
   1089 		if (inactive_shortage > 0) {
   1090 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
   1091 			/* no need to check wire_count as pg is "active" */
   1092 			uvm_pagedeactivate(p);
   1093 			uvmexp.pddeact++;
   1094 			inactive_shortage--;
   1095 		}
   1096 
   1097 		if (p->pqflags & PQ_ANON)
   1098 			simple_unlock(&p->uanon->an_lock);
   1099 		else
   1100 			simple_unlock(&p->uobject->vmobjlock);
   1101 	}
   1102 }
   1103