Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.11
      1 /*	$NetBSD: uvm_pdaemon.c,v 1.11 1998/10/18 23:50:00 chs Exp $	*/
      2 
      3 /*
      4  * XXXCDC: "ROUGH DRAFT" QUALITY UVM PRE-RELEASE FILE!
      5  *         >>>USE AT YOUR OWN RISK, WORK IS NOT FINISHED<<<
      6  */
      7 /*
      8  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      9  * Copyright (c) 1991, 1993, The Regents of the University of California.
     10  *
     11  * All rights reserved.
     12  *
     13  * This code is derived from software contributed to Berkeley by
     14  * The Mach Operating System project at Carnegie-Mellon University.
     15  *
     16  * Redistribution and use in source and binary forms, with or without
     17  * modification, are permitted provided that the following conditions
     18  * are met:
     19  * 1. Redistributions of source code must retain the above copyright
     20  *    notice, this list of conditions and the following disclaimer.
     21  * 2. Redistributions in binary form must reproduce the above copyright
     22  *    notice, this list of conditions and the following disclaimer in the
     23  *    documentation and/or other materials provided with the distribution.
     24  * 3. All advertising materials mentioning features or use of this software
     25  *    must display the following acknowledgement:
     26  *	This product includes software developed by Charles D. Cranor,
     27  *      Washington University, the University of California, Berkeley and
     28  *      its contributors.
     29  * 4. Neither the name of the University nor the names of its contributors
     30  *    may be used to endorse or promote products derived from this software
     31  *    without specific prior written permission.
     32  *
     33  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     34  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     35  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     36  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     37  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     38  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     39  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     40  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     41  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     42  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     43  * SUCH DAMAGE.
     44  *
     45  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     46  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     47  *
     48  *
     49  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     50  * All rights reserved.
     51  *
     52  * Permission to use, copy, modify and distribute this software and
     53  * its documentation is hereby granted, provided that both the copyright
     54  * notice and this permission notice appear in all copies of the
     55  * software, derivative works or modified versions, and any portions
     56  * thereof, and that both notices appear in supporting documentation.
     57  *
     58  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     59  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     60  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     61  *
     62  * Carnegie Mellon requests users of this software to return to
     63  *
     64  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     65  *  School of Computer Science
     66  *  Carnegie Mellon University
     67  *  Pittsburgh PA 15213-3890
     68  *
     69  * any improvements or extensions that they make and grant Carnegie the
     70  * rights to redistribute these changes.
     71  */
     72 
     73 #include "opt_uvmhist.h"
     74 
     75 /*
     76  * uvm_pdaemon.c: the page daemon
     77  */
     78 
     79 #include <sys/param.h>
     80 #include <sys/proc.h>
     81 #include <sys/systm.h>
     82 #include <sys/kernel.h>
     83 #include <sys/pool.h>
     84 
     85 #include <vm/vm.h>
     86 #include <vm/vm_page.h>
     87 #include <vm/vm_kern.h>
     88 
     89 #include <uvm/uvm.h>
     90 
     91 /*
     92  * local prototypes
     93  */
     94 
     95 static void		uvmpd_scan __P((void));
     96 static boolean_t	uvmpd_scan_inactive __P((struct pglist *));
     97 static void		uvmpd_tune __P((void));
     98 
     99 
    100 /*
    101  * uvm_wait: wait (sleep) for the page daemon to free some pages
    102  *
    103  * => should be called with all locks released
    104  * => should _not_ be called by the page daemon (to avoid deadlock)
    105  */
    106 
    107 void uvm_wait(wmsg)
    108 	char *wmsg;
    109 {
    110 	int timo = 0;
    111 	int s = splbio();
    112 
    113 	/*
    114 	 * check for page daemon going to sleep (waiting for itself)
    115 	 */
    116 
    117 	if (curproc == uvm.pagedaemon_proc) {
    118 		/*
    119 		 * now we have a problem: the pagedaemon wants to go to
    120 		 * sleep until it frees more memory.   but how can it
    121 		 * free more memory if it is asleep?  that is a deadlock.
    122 		 * we have two options:
    123 		 *  [1] panic now
    124 		 *  [2] put a timeout on the sleep, thus causing the
    125 		 *      pagedaemon to only pause (rather than sleep forever)
    126 		 *
    127 		 * note that option [2] will only help us if we get lucky
    128 		 * and some other process on the system breaks the deadlock
    129 		 * by exiting or freeing memory (thus allowing the pagedaemon
    130 		 * to continue).  for now we panic if DEBUG is defined,
    131 		 * otherwise we hope for the best with option [2] (better
    132 		 * yet, this should never happen in the first place!).
    133 		 */
    134 
    135 		printf("pagedaemon: deadlock detected!\n");
    136 		timo = hz >> 3;		/* set timeout */
    137 #if defined(DEBUG)
    138 		/* DEBUG: panic so we can debug it */
    139 		panic("pagedaemon deadlock");
    140 #endif
    141 	}
    142 
    143 	simple_lock(&uvm.pagedaemon_lock);
    144 	thread_wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    145 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm.pagedaemon_lock, FALSE, wmsg,
    146 	    timo);
    147 
    148 	splx(s);
    149 }
    150 
    151 
    152 /*
    153  * uvmpd_tune: tune paging parameters
    154  *
    155  * => called when ever memory is added (or removed?) to the system
    156  * => caller must call with page queues locked
    157  */
    158 
    159 static void
    160 uvmpd_tune()
    161 {
    162 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    163 
    164 	uvmexp.freemin = uvmexp.npages / 20;
    165 
    166 	/* between 16k and 256k */
    167 	/* XXX:  what are these values good for? */
    168 	uvmexp.freemin = max(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
    169 	uvmexp.freemin = min(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
    170 
    171 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
    172 	if (uvmexp.freetarg <= uvmexp.freemin)
    173 		uvmexp.freetarg = uvmexp.freemin + 1;
    174 
    175 	/* uvmexp.inactarg: computed in main daemon loop */
    176 
    177 	uvmexp.wiredmax = uvmexp.npages / 3;
    178 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    179 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    180 }
    181 
    182 /*
    183  * uvm_pageout: the main loop for the pagedaemon
    184  */
    185 
    186 void
    187 uvm_pageout()
    188 {
    189 	int npages = 0;
    190 	int s;
    191 	struct uvm_aiodesc *aio, *nextaio;
    192 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    193 
    194 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    195 
    196 	/*
    197 	 * ensure correct priority and set paging parameters...
    198 	 */
    199 
    200 	uvm.pagedaemon_proc = curproc;
    201 	(void) spl0();
    202 	uvm_lock_pageq();
    203 	npages = uvmexp.npages;
    204 	uvmpd_tune();
    205 	uvm_unlock_pageq();
    206 
    207 	/*
    208 	 * main loop
    209 	 */
    210 	while (TRUE) {
    211 
    212 		/*
    213 		 * carefully attempt to go to sleep (without losing "wakeups"!).
    214 		 * we need splbio because we want to make sure the aio_done list
    215 		 * is totally empty before we go to sleep.
    216 		 */
    217 
    218 		s = splbio();
    219 		simple_lock(&uvm.pagedaemon_lock);
    220 
    221 		/*
    222 		 * if we've got done aio's, then bypass the sleep
    223 		 */
    224 
    225 		if (uvm.aio_done.tqh_first == NULL) {
    226 			UVMHIST_LOG(maphist,"  <<SLEEPING>>",0,0,0,0);
    227 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    228 			    &uvm.pagedaemon_lock, FALSE, "daemon_slp", 0);
    229 			uvmexp.pdwoke++;
    230 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    231 
    232 			/* relock pagedaemon_lock, still at splbio */
    233 			simple_lock(&uvm.pagedaemon_lock);
    234 		}
    235 
    236 		/*
    237 		 * check for done aio structures
    238 		 */
    239 
    240 		aio = uvm.aio_done.tqh_first;	/* save current list (if any)*/
    241 		if (aio) {
    242 			TAILQ_INIT(&uvm.aio_done);	/* zero global list */
    243 		}
    244 
    245 		simple_unlock(&uvm.pagedaemon_lock);	/* unlock */
    246 		splx(s);				/* drop splbio */
    247 
    248 		/*
    249 		 * first clear out any pending aios (to free space in case we
    250 		 * want to pageout more stuff).
    251 		 */
    252 
    253 		for (/*null*/; aio != NULL ; aio = nextaio) {
    254 
    255 			uvmexp.paging -= aio->npages;
    256 			nextaio = aio->aioq.tqe_next;
    257 			aio->aiodone(aio);
    258 
    259 		}
    260 
    261 		/* Next, drain pool resources */
    262 		pool_drain(0);
    263 
    264 		/*
    265 		 * now lock page queues and recompute inactive count
    266 		 */
    267 		uvm_lock_pageq();
    268 
    269 		if (npages != uvmexp.npages) {	/* check for new pages? */
    270 			npages = uvmexp.npages;
    271 			uvmpd_tune();
    272 		}
    273 
    274 		uvmexp.inactarg = (uvmexp.active + uvmexp.inactive) / 3;
    275 		if (uvmexp.inactarg <= uvmexp.freetarg)
    276 			uvmexp.inactarg = uvmexp.freetarg + 1;
    277 
    278 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d, inact/itarg=%d/%d",
    279 		    uvmexp.free, uvmexp.freetarg, uvmexp.inactive,
    280 		    uvmexp.inactarg);
    281 
    282 		/*
    283 		 * scan if needed
    284 		 * [XXX: note we are reading uvm.free without locking]
    285 		 */
    286 		if (uvmexp.free < uvmexp.freetarg ||
    287 		    uvmexp.inactive < uvmexp.inactarg)
    288 			uvmpd_scan();
    289 
    290 		/*
    291 		 * done scan.  unlock page queues (the only lock we are holding)
    292 		 */
    293 		uvm_unlock_pageq();
    294 
    295 		/*
    296 		 * done!    restart loop.
    297 		 */
    298 		thread_wakeup(&uvmexp.free);
    299 	}
    300 	/*NOTREACHED*/
    301 }
    302 
    303 /*
    304  * uvmpd_scan_inactive: the first loop of uvmpd_scan broken out into
    305  * 	its own function for ease of reading.
    306  *
    307  * => called with page queues locked
    308  * => we work on meeting our free target by converting inactive pages
    309  *    into free pages.
    310  * => we handle the building of swap-backed clusters
    311  * => we return TRUE if we are exiting because we met our target
    312  */
    313 
    314 static boolean_t
    315 uvmpd_scan_inactive(pglst)
    316 	struct pglist *pglst;
    317 {
    318 	boolean_t retval = FALSE;	/* assume we haven't hit target */
    319 	int s, free, result;
    320 	struct vm_page *p, *nextpg;
    321 	struct uvm_object *uobj;
    322 	struct vm_page *pps[MAXBSIZE >> PAGE_SHIFT], **ppsp;
    323 	int npages;
    324 	struct vm_page *swpps[MAXBSIZE >> PAGE_SHIFT]; 	/* XXX: see below */
    325 	int swnpages, swcpages;				/* XXX: see below */
    326 	int swslot, oldslot;
    327 	struct vm_anon *anon;
    328 	boolean_t swap_backed;
    329 	vaddr_t start;
    330 	UVMHIST_FUNC("uvmpd_scan_inactive"); UVMHIST_CALLED(pdhist);
    331 
    332 	/*
    333 	 * note: we currently keep swap-backed pages on a seperate inactive
    334 	 * list from object-backed pages.   however, merging the two lists
    335 	 * back together again hasn't been ruled out.   thus, we keep our
    336 	 * swap cluster in "swpps" rather than in pps (allows us to mix
    337 	 * clustering types in the event of a mixed inactive queue).
    338 	 */
    339 
    340 	/*
    341 	 * swslot is non-zero if we are building a swap cluster.  we want
    342 	 * to stay in the loop while we have a page to scan or we have
    343 	 * a swap-cluster to build.
    344 	 */
    345 	swslot = 0;
    346 	swnpages = swcpages = 0;
    347 	free = 0;
    348 
    349 	for (p = pglst->tqh_first ; p != NULL || swslot != 0 ; p = nextpg) {
    350 
    351 		/*
    352 		 * note that p can be NULL iff we have traversed the whole
    353 		 * list and need to do one final swap-backed clustered pageout.
    354 		 */
    355 		if (p) {
    356 			/*
    357 			 * update our copy of "free" and see if we've met
    358 			 * our target
    359 			 */
    360 			s = splimp();
    361 			uvm_lock_fpageq();
    362 			free = uvmexp.free;
    363 			uvm_unlock_fpageq();
    364 			splx(s);
    365 
    366 			if (free >= uvmexp.freetarg) {
    367 				UVMHIST_LOG(pdhist,"  met free target: "
    368 				    "exit loop", 0, 0, 0, 0);
    369 				retval = TRUE;		/* hit the target! */
    370 
    371 				if (swslot == 0)
    372 					/* exit now if no swap-i/o pending */
    373 					break;
    374 
    375 				/* set p to null to signal final swap i/o */
    376 				p = NULL;
    377 			}
    378 		}
    379 
    380 		uobj = NULL;	/* be safe and shut gcc up */
    381 		anon = NULL;	/* be safe and shut gcc up */
    382 
    383 		if (p) {	/* if (we have a new page to consider) */
    384 			/*
    385 			 * we are below target and have a new page to consider.
    386 			 */
    387 			uvmexp.pdscans++;
    388 			nextpg = p->pageq.tqe_next;
    389 
    390 			/*
    391 			 * move referenced pages back to active queue and
    392 			 * skip to next page (unlikely to happen since
    393 			 * inactive pages shouldn't have any valid mappings
    394 			 * and we cleared reference before deactivating).
    395 			 */
    396 			if (pmap_is_referenced(PMAP_PGARG(p))) {
    397 				uvm_pageactivate(p);
    398 				uvmexp.pdreact++;
    399 				continue;
    400 			}
    401 
    402 			/*
    403 			 * first we attempt to lock the object that this page
    404 			 * belongs to.  if our attempt fails we skip on to
    405 			 * the next page (no harm done).  it is important to
    406 			 * "try" locking the object as we are locking in the
    407 			 * wrong order (pageq -> object) and we don't want to
    408 			 * get deadlocked.
    409 			 *
    410 			 * the only time we exepct to see an ownerless page
    411 			 * (i.e. a page with no uobject and !PQ_ANON) is if an
    412 			 * anon has loaned a page from a uvm_object and the
    413 			 * uvm_object has dropped the ownership.  in that
    414 			 * case, the anon can "take over" the loaned page
    415 			 * and make it its own.
    416 			 */
    417 
    418 			/* is page part of an anon or ownerless ? */
    419 			if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
    420 
    421 				anon = p->uanon;
    422 
    423 #ifdef DIAGNOSTIC
    424 				/* to be on inactive q, page must be part
    425 				 * of _something_ */
    426 				if (anon == NULL)
    427 					panic("pagedaemon: page with no anon "
    428 					    "or object detected - loop 1");
    429 #endif
    430 
    431 				if (!simple_lock_try(&anon->an_lock))
    432 					/* lock failed, skip this page */
    433 					continue;
    434 
    435 				/*
    436 				 * if the page is ownerless, claim it in the
    437 				 * name of "anon"!
    438 				 */
    439 				if ((p->pqflags & PQ_ANON) == 0) {
    440 #ifdef DIAGNOSTIC
    441 					if (p->loan_count < 1)
    442 						panic("pagedaemon: non-loaned "
    443 						    "ownerless page detected -"
    444 						    " loop 1");
    445 #endif
    446 					p->loan_count--;
    447 					p->pqflags |= PQ_ANON;      /* anon now owns it */
    448 				}
    449 
    450 				if (p->flags & PG_BUSY) {
    451 					simple_unlock(&anon->an_lock);
    452 					uvmexp.pdbusy++;
    453 					/* someone else owns page, skip it */
    454 					continue;
    455 				}
    456 
    457 				uvmexp.pdanscan++;
    458 
    459 			} else {
    460 
    461 				uobj = p->uobject;
    462 
    463 				if (!simple_lock_try(&uobj->vmobjlock))
    464 					/* lock failed, skip this page */
    465 					continue;
    466 
    467 				if (p->flags & PG_BUSY) {
    468 					simple_unlock(&uobj->vmobjlock);
    469 					uvmexp.pdbusy++;
    470 					/* someone else owns page, skip it */
    471 					continue;
    472 				}
    473 
    474 				uvmexp.pdobscan++;
    475 			}
    476 
    477 			/*
    478 			 * we now have the object and the page queues locked.
    479 			 * the page is not busy.   if the page is clean we
    480 			 * can free it now and continue.
    481 			 */
    482 
    483 			if (p->flags & PG_CLEAN) {
    484 				/* zap all mappings with pmap_page_protect... */
    485 				pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
    486 				uvm_pagefree(p);
    487 				uvmexp.pdfreed++;
    488 
    489 				if (anon) {
    490 #ifdef DIAGNOSTIC
    491 					/*
    492 					 * an anonymous page can only be clean
    493 					 * if it has valid backing store.
    494 					 */
    495 					if (anon->an_swslot == 0)
    496 						panic("pagedaemon: clean anon "
    497 						 "page without backing store?");
    498 #endif
    499 					/* remove from object */
    500 					anon->u.an_page = NULL;
    501 					simple_unlock(&anon->an_lock);
    502 				} else {
    503 					/* pagefree has already removed the
    504 					 * page from the object */
    505 					simple_unlock(&uobj->vmobjlock);
    506 				}
    507 				continue;
    508 			}
    509 
    510 			/*
    511 			 * this page is dirty, skip it if we'll have met our
    512 			 * free target when all the current pageouts complete.
    513 			 */
    514 			if (free + uvmexp.paging > uvmexp.freetarg)
    515 			{
    516 				if (anon) {
    517 					simple_unlock(&anon->an_lock);
    518 				} else {
    519 					simple_unlock(&uobj->vmobjlock);
    520 				}
    521 				continue;
    522 			}
    523 
    524 			/*
    525 			 * the page we are looking at is dirty.   we must
    526 			 * clean it before it can be freed.  to do this we
    527 			 * first mark the page busy so that no one else will
    528 			 * touch the page.   we write protect all the mappings
    529 			 * of the page so that no one touches it while it is
    530 			 * in I/O.
    531 			 */
    532 
    533 			swap_backed = ((p->pqflags & PQ_SWAPBACKED) != 0);
    534 			p->flags |= PG_BUSY;		/* now we own it */
    535 			UVM_PAGE_OWN(p, "scan_inactive");
    536 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_READ);
    537 			uvmexp.pgswapout++;
    538 
    539 			/*
    540 			 * for swap-backed pages we need to (re)allocate
    541 			 * swap space.
    542 			 */
    543 			if (swap_backed) {
    544 
    545 				/*
    546 				 * free old swap slot (if any)
    547 				 */
    548 				if (anon) {
    549 					if (anon->an_swslot) {
    550 						uvm_swap_free(anon->an_swslot,
    551 						    1);
    552 						anon->an_swslot = 0;
    553 					}
    554 				} else {
    555 					oldslot = uao_set_swslot(uobj,
    556 					    p->offset >> PAGE_SHIFT, 0);
    557 
    558 					if (oldslot)
    559 						uvm_swap_free(oldslot, 1);
    560 				}
    561 
    562 				/*
    563 				 * start new cluster (if necessary)
    564 				 */
    565 				if (swslot == 0) {
    566 					/* want this much */
    567 					swnpages = MAXBSIZE >> PAGE_SHIFT;
    568 
    569 					swslot = uvm_swap_alloc(&swnpages,
    570 					    TRUE);
    571 
    572 					if (swslot == 0) {
    573 						/* no swap?  give up! */
    574 						p->flags &= ~PG_BUSY;
    575 						UVM_PAGE_OWN(p, NULL);
    576 						if (anon)
    577 							simple_unlock(
    578 							    &anon->an_lock);
    579 						else
    580 							simple_unlock(
    581 							    &uobj->vmobjlock);
    582 						continue;
    583 					}
    584 					swcpages = 0;	/* cluster is empty */
    585 				}
    586 
    587 				/*
    588 				 * add block to cluster
    589 				 */
    590 				swpps[swcpages] = p;
    591 				uvmexp.pgswapout++;
    592 				if (anon)
    593 					anon->an_swslot = swslot + swcpages;
    594 				else
    595 					uao_set_swslot(uobj,
    596 					    p->offset >> PAGE_SHIFT,
    597 					    swslot + swcpages);
    598 				swcpages++;
    599 
    600 				/* done (swap-backed) */
    601 			}
    602 
    603 			/* end: if (p) ["if we have new page to consider"] */
    604 		} else {
    605 
    606 			/* if p == NULL we must be doing a last swap i/o */
    607 			swap_backed = TRUE;
    608 		}
    609 
    610 		/*
    611 		 * now consider doing the pageout.
    612 		 *
    613 		 * for swap-backed pages, we do the pageout if we have either
    614 		 * filled the cluster (in which case (swnpages == swcpages) or
    615 		 * run out of pages (p == NULL).
    616 		 *
    617 		 * for object pages, we always do the pageout.
    618 		 */
    619 		if (swap_backed) {
    620 
    621 			if (p) {	/* if we just added a page to cluster */
    622 				if (anon)
    623 					simple_unlock(&anon->an_lock);
    624 				else
    625 					simple_unlock(&uobj->vmobjlock);
    626 
    627 				/* cluster not full yet? */
    628 				if (swcpages < swnpages)
    629 					continue;
    630 			}
    631 
    632 			/* starting I/O now... set up for it */
    633 			npages = swcpages;
    634 			ppsp = swpps;
    635 			/* for swap-backed pages only */
    636 			start = (vaddr_t) swslot;
    637 
    638 			/* if this is final pageout we could have a few
    639 			 * extra swap blocks */
    640 			if (swcpages < swnpages) {
    641 				uvm_swap_free(swslot + swcpages,
    642 				    (swnpages - swcpages));
    643 			}
    644 
    645 		} else {
    646 
    647 			/* normal object pageout */
    648 			ppsp = pps;
    649 			npages = sizeof(pps) / sizeof(struct vm_page *);
    650 			/* not looked at because PGO_ALLPAGES is set */
    651 			start = 0;
    652 
    653 		}
    654 
    655 		/*
    656 		 * now do the pageout.
    657 		 *
    658 		 * for swap_backed pages we have already built the cluster.
    659 		 * for !swap_backed pages, uvm_pager_put will call the object's
    660 		 * "make put cluster" function to build a cluster on our behalf.
    661 		 *
    662 		 * we pass the PGO_PDFREECLUST flag to uvm_pager_put to instruct
    663 		 * it to free the cluster pages for us on a successful I/O (it
    664 		 * always does this for un-successful I/O requests).  this
    665 		 * allows us to do clustered pageout without having to deal
    666 		 * with cluster pages at this level.
    667 		 *
    668 		 * note locking semantics of uvm_pager_put with PGO_PDFREECLUST:
    669 		 *  IN: locked: uobj (if !swap_backed), page queues
    670 		 * OUT: locked: uobj (if !swap_backed && result !=VM_PAGER_PEND)
    671 		 *     !locked: pageqs, uobj (if swap_backed || VM_PAGER_PEND)
    672 		 *
    673 		 * [the bit about VM_PAGER_PEND saves us one lock-unlock pair]
    674 		 */
    675 
    676 		/* locked: uobj (if !swap_backed), page queues */
    677 		uvmexp.pdpageouts++;
    678 		result = uvm_pager_put((swap_backed) ? NULL : uobj, p,
    679 		    &ppsp, &npages, PGO_ALLPAGES|PGO_PDFREECLUST, start, 0);
    680 		/* locked: uobj (if !swap_backed && result != PEND) */
    681 		/* unlocked: pageqs, object (if swap_backed ||result == PEND) */
    682 
    683 		/*
    684 		 * if we did i/o to swap, zero swslot to indicate that we are
    685 		 * no longer building a swap-backed cluster.
    686 		 */
    687 
    688 		if (swap_backed)
    689 			swslot = 0;		/* done with this cluster */
    690 
    691 		/*
    692 		 * first, we check for VM_PAGER_PEND which means that the
    693 		 * async I/O is in progress and the async I/O done routine
    694 		 * will clean up after us.   in this case we move on to the
    695 		 * next page.
    696 		 *
    697 		 * there is a very remote chance that the pending async i/o can
    698 		 * finish _before_ we get here.   if that happens, our page "p"
    699 		 * may no longer be on the inactive queue.   so we verify this
    700 		 * when determining the next page (starting over at the head if
    701 		 * we've lost our inactive page).
    702 		 */
    703 
    704 		if (result == VM_PAGER_PEND) {
    705 			uvmexp.paging += npages;
    706 			uvm_lock_pageq();		/* relock page queues */
    707 			uvmexp.pdpending++;
    708 			if (p) {
    709 				if (p->pqflags & PQ_INACTIVE)
    710 					/* reload! */
    711 					nextpg = p->pageq.tqe_next;
    712 				else
    713 					/* reload! */
    714 					nextpg = pglst->tqh_first;
    715 				} else {
    716 					nextpg = NULL;		/* done list */
    717 			}
    718 			continue;
    719 		}
    720 
    721 		/*
    722 		 * clean up "p" if we have one
    723 		 */
    724 
    725 		if (p) {
    726 			/*
    727 			 * the I/O request to "p" is done and uvm_pager_put
    728 			 * has freed any cluster pages it may have allocated
    729 			 * during I/O.  all that is left for us to do is
    730 			 * clean up page "p" (which is still PG_BUSY).
    731 			 *
    732 			 * our result could be one of the following:
    733 			 *   VM_PAGER_OK: successful pageout
    734 			 *
    735 			 *   VM_PAGER_AGAIN: tmp resource shortage, we skip
    736 			 *     to next page
    737 			 *   VM_PAGER_{FAIL,ERROR,BAD}: an error.   we
    738 			 *     "reactivate" page to get it out of the way (it
    739 			 *     will eventually drift back into the inactive
    740 			 *     queue for a retry).
    741 			 *   VM_PAGER_UNLOCK: should never see this as it is
    742 			 *     only valid for "get" operations
    743 			 */
    744 
    745 			/* relock p's object: page queues not lock yet, so
    746 			 * no need for "try" */
    747 
    748 			/* !swap_backed case: already locked... */
    749 			if (swap_backed) {
    750 				if (anon)
    751 					simple_lock(&anon->an_lock);
    752 				else
    753 					simple_lock(&uobj->vmobjlock);
    754 			}
    755 
    756 #ifdef DIAGNOSTIC
    757 			if (result == VM_PAGER_UNLOCK)
    758 				panic("pagedaemon: pageout returned "
    759 				    "invalid 'unlock' code");
    760 #endif
    761 
    762 			/* handle PG_WANTED now */
    763 			if (p->flags & PG_WANTED)
    764 				/* still holding object lock */
    765 				thread_wakeup(p);
    766 
    767 			p->flags &= ~(PG_BUSY|PG_WANTED);
    768 			UVM_PAGE_OWN(p, NULL);
    769 
    770 			/* released during I/O? */
    771 			if (p->flags & PG_RELEASED) {
    772 				if (anon) {
    773 					/* remove page so we can get nextpg */
    774 					anon->u.an_page = NULL;
    775 
    776 					/* XXX needed? */
    777 					simple_unlock(&anon->an_lock);
    778 					uvm_anfree(anon);	/* kills anon */
    779 					pmap_page_protect(PMAP_PGARG(p),
    780 					    VM_PROT_NONE);
    781 					anon = NULL;
    782 					uvm_lock_pageq();
    783 					nextpg = p->pageq.tqe_next;
    784 					/* free released page */
    785 					uvm_pagefree(p);
    786 
    787 				} else {
    788 
    789 #ifdef DIAGNOSTIC
    790 					if (uobj->pgops->pgo_releasepg == NULL)
    791 						panic("pagedaemon: no "
    792 						   "pgo_releasepg function");
    793 #endif
    794 
    795 					/*
    796 					 * pgo_releasepg nukes the page and
    797 					 * gets "nextpg" for us.  it returns
    798 					 * with the page queues locked (when
    799 					 * given nextpg ptr).
    800 					 */
    801 					if (!uobj->pgops->pgo_releasepg(p,
    802 					    &nextpg))
    803 						/* uobj died after release */
    804 						uobj = NULL;
    805 
    806 					/*
    807 					 * lock page queues here so that they're
    808 					 * always locked at the end of the loop.
    809 					 */
    810 					uvm_lock_pageq();
    811 				}
    812 
    813 			} else {	/* page was not released during I/O */
    814 
    815 				uvm_lock_pageq();
    816 				nextpg = p->pageq.tqe_next;
    817 
    818 				if (result != VM_PAGER_OK) {
    819 
    820 					/* pageout was a failure... */
    821 					if (result != VM_PAGER_AGAIN)
    822 						uvm_pageactivate(p);
    823 					pmap_clear_reference(PMAP_PGARG(p));
    824 					/* XXXCDC: if (swap_backed) FREE p's
    825 					 * swap block? */
    826 
    827 				} else {
    828 
    829 					/* pageout was a success... */
    830 					pmap_clear_reference(PMAP_PGARG(p));
    831 					pmap_clear_modify(PMAP_PGARG(p));
    832 					p->flags |= PG_CLEAN;
    833 					/* XXX: could free page here, but old
    834 					 * pagedaemon does not */
    835 
    836 				}
    837 			}
    838 
    839 			/*
    840 			 * drop object lock (if there is an object left).   do
    841 			 * a safety check of nextpg to make sure it is on the
    842 			 * inactive queue (it should be since PG_BUSY pages on
    843 			 * the inactive queue can't be re-queued [note: not
    844 			 * true for active queue]).
    845 			 */
    846 
    847 			if (anon)
    848 				simple_unlock(&anon->an_lock);
    849 			else if (uobj)
    850 				simple_unlock(&uobj->vmobjlock);
    851 
    852 		} /* if (p) */ else {
    853 
    854 			/* if p is null in this loop, make sure it stays null
    855 			 * in next loop */
    856 			nextpg = NULL;
    857 
    858 			/*
    859 			 * lock page queues here just so they're always locked
    860 			 * at the end of the loop.
    861 			 */
    862 			uvm_lock_pageq();
    863 		}
    864 
    865 		if (nextpg && (nextpg->pqflags & PQ_INACTIVE) == 0) {
    866 			printf("pagedaemon: invalid nextpg!   reverting to "
    867 			    "queue head\n");
    868 			nextpg = pglst->tqh_first;	/* reload! */
    869 		}
    870 
    871 	}	/* end of "inactive" 'for' loop */
    872 	return (retval);
    873 }
    874 
    875 /*
    876  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    877  *
    878  * => called with pageq's locked
    879  */
    880 
    881 void
    882 uvmpd_scan()
    883 {
    884 	int s, free, pages_freed, page_shortage;
    885 	struct vm_page *p, *nextpg;
    886 	struct uvm_object *uobj;
    887 	boolean_t got_it;
    888 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    889 
    890 	uvmexp.pdrevs++;		/* counter */
    891 
    892 #ifdef __GNUC__
    893 	uobj = NULL;	/* XXX gcc */
    894 #endif
    895 	/*
    896 	 * get current "free" page count
    897 	 */
    898 	s = splimp();
    899 	uvm_lock_fpageq();
    900 	free = uvmexp.free;
    901 	uvm_unlock_fpageq();
    902 	splx(s);
    903 
    904 #ifndef __SWAP_BROKEN
    905 	/*
    906 	 * swap out some processes if we are below our free target.
    907 	 * we need to unlock the page queues for this.
    908 	 */
    909 	if (free < uvmexp.freetarg) {
    910 
    911 		uvmexp.pdswout++;
    912 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout", free,
    913 		    uvmexp.freetarg, 0, 0);
    914 		uvm_unlock_pageq();
    915 		uvm_swapout_threads();
    916 		pmap_update();		/* update so we can scan inactive q */
    917 		uvm_lock_pageq();
    918 
    919 	}
    920 #endif
    921 
    922 	/*
    923 	 * now we want to work on meeting our targets.   first we work on our
    924 	 * free target by converting inactive pages into free pages.  then
    925 	 * we work on meeting our inactive target by converting active pages
    926 	 * to inactive ones.
    927 	 */
    928 
    929 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    930 	pages_freed = uvmexp.pdfreed;	/* so far... */
    931 
    932 	/*
    933 	 * do loop #1!   alternate starting queue between swap and object based
    934 	 * on the low bit of uvmexp.pdrevs (which we bump by one each call).
    935 	 */
    936 
    937 	got_it = FALSE;
    938 	if ((uvmexp.pdrevs & 1) != 0 && uvmexp.nswapdev != 0)
    939 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_swp);
    940 	if (!got_it)
    941 		got_it = uvmpd_scan_inactive(&uvm.page_inactive_obj);
    942 	if (!got_it && (uvmexp.pdrevs & 1) == 0 && uvmexp.nswapdev != 0)
    943 		(void) uvmpd_scan_inactive(&uvm.page_inactive_swp);
    944 
    945 	/*
    946 	 * we have done the scan to get free pages.   now we work on meeting
    947 	 * our inactive target.
    948 	 */
    949 
    950 	page_shortage = uvmexp.inactarg - uvmexp.inactive;
    951 	pages_freed = uvmexp.pdfreed - pages_freed; /* # pages freed in loop */
    952 	if (page_shortage <= 0 && pages_freed == 0)
    953 		page_shortage = 1;
    954 
    955 	UVMHIST_LOG(pdhist, "  second loop: page_shortage=%d", page_shortage,
    956 	    0, 0, 0);
    957 	for (p = uvm.page_active.tqh_first ;
    958 	    p != NULL && page_shortage > 0 ; p = nextpg) {
    959 		nextpg = p->pageq.tqe_next;
    960 		if (p->flags & PG_BUSY)
    961 			continue;	/* quick check before trying to lock */
    962 
    963 		/*
    964 		 * lock owner
    965 		 */
    966 		/* is page anon owned or ownerless? */
    967 		if ((p->pqflags & PQ_ANON) || p->uobject == NULL) {
    968 
    969 #ifdef DIAGNOSTIC
    970 			if (p->uanon == NULL)
    971 				panic("pagedaemon: page with no anon or "
    972 				    "object detected - loop 2");
    973 #endif
    974 
    975 			if (!simple_lock_try(&p->uanon->an_lock))
    976 				continue;
    977 
    978 			/* take over the page? */
    979 			if ((p->pqflags & PQ_ANON) == 0) {
    980 
    981 #ifdef DIAGNOSTIC
    982 				if (p->loan_count < 1)
    983 					panic("pagedaemon: non-loaned "
    984 					    "ownerless page detected - loop 2");
    985 #endif
    986 
    987 				p->loan_count--;
    988 				p->pqflags |= PQ_ANON;
    989 			}
    990 
    991 		} else {
    992 
    993 			if (!simple_lock_try(&p->uobject->vmobjlock))
    994 				continue;
    995 
    996 		}
    997 
    998 		if ((p->flags & PG_BUSY) == 0) {
    999 			pmap_page_protect(PMAP_PGARG(p), VM_PROT_NONE);
   1000 			/* no need to check wire_count as pg is "active" */
   1001 			uvm_pagedeactivate(p);
   1002 			uvmexp.pddeact++;
   1003 			page_shortage--;
   1004 		}
   1005 
   1006 		if (p->pqflags & PQ_ANON)
   1007 			simple_unlock(&p->uanon->an_lock);
   1008 		else
   1009 			simple_unlock(&p->uobject->vmobjlock);
   1010 	}
   1011 
   1012 	/*
   1013 	 * done scan
   1014 	 */
   1015 }
   1016