Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.84.4.9
      1  1.84.4.9        ad /*	$NetBSD: uvm_pdaemon.c,v 1.84.4.9 2007/10/26 17:03:11 ad Exp $	*/
      2       1.1       mrg 
      3      1.34       chs /*
      4       1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5      1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6       1.1       mrg  *
      7       1.1       mrg  * All rights reserved.
      8       1.1       mrg  *
      9       1.1       mrg  * This code is derived from software contributed to Berkeley by
     10       1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11       1.1       mrg  *
     12       1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13       1.1       mrg  * modification, are permitted provided that the following conditions
     14       1.1       mrg  * are met:
     15       1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16       1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17       1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18       1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19       1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20       1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21       1.1       mrg  *    must display the following acknowledgement:
     22       1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23      1.34       chs  *      Washington University, the University of California, Berkeley and
     24       1.1       mrg  *      its contributors.
     25       1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26       1.1       mrg  *    may be used to endorse or promote products derived from this software
     27       1.1       mrg  *    without specific prior written permission.
     28       1.1       mrg  *
     29       1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30       1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31       1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32       1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33       1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34       1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35       1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36       1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37       1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38       1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39       1.1       mrg  * SUCH DAMAGE.
     40       1.1       mrg  *
     41       1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     42       1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     43       1.1       mrg  *
     44       1.1       mrg  *
     45       1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46       1.1       mrg  * All rights reserved.
     47      1.34       chs  *
     48       1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49       1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50       1.1       mrg  * notice and this permission notice appear in all copies of the
     51       1.1       mrg  * software, derivative works or modified versions, and any portions
     52       1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53      1.34       chs  *
     54      1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55      1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56       1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57      1.34       chs  *
     58       1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59       1.1       mrg  *
     60       1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61       1.1       mrg  *  School of Computer Science
     62       1.1       mrg  *  Carnegie Mellon University
     63       1.1       mrg  *  Pittsburgh PA 15213-3890
     64       1.1       mrg  *
     65       1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66       1.1       mrg  * rights to redistribute these changes.
     67       1.1       mrg  */
     68       1.1       mrg 
     69       1.1       mrg /*
     70       1.1       mrg  * uvm_pdaemon.c: the page daemon
     71       1.1       mrg  */
     72      1.42     lukem 
     73      1.42     lukem #include <sys/cdefs.h>
     74  1.84.4.9        ad __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.84.4.9 2007/10/26 17:03:11 ad Exp $");
     75      1.42     lukem 
     76      1.42     lukem #include "opt_uvmhist.h"
     77      1.69      yamt #include "opt_readahead.h"
     78       1.1       mrg 
     79       1.1       mrg #include <sys/param.h>
     80       1.1       mrg #include <sys/proc.h>
     81       1.1       mrg #include <sys/systm.h>
     82       1.1       mrg #include <sys/kernel.h>
     83       1.9        pk #include <sys/pool.h>
     84      1.24       chs #include <sys/buf.h>
     85       1.1       mrg 
     86       1.1       mrg #include <uvm/uvm.h>
     87      1.77      yamt #include <uvm/uvm_pdpolicy.h>
     88       1.1       mrg 
     89       1.1       mrg /*
     90      1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     91      1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     92      1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     93      1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     94      1.14       chs  */
     95      1.14       chs 
     96      1.14       chs #define UVMPD_NUMDIRTYREACTS 16
     97      1.14       chs 
     98      1.14       chs 
     99      1.14       chs /*
    100       1.1       mrg  * local prototypes
    101       1.1       mrg  */
    102       1.1       mrg 
    103      1.65   thorpej static void	uvmpd_scan(void);
    104      1.77      yamt static void	uvmpd_scan_queue(void);
    105      1.65   thorpej static void	uvmpd_tune(void);
    106       1.1       mrg 
    107  1.84.4.5      yamt unsigned int uvm_pagedaemon_waiters;
    108  1.84.4.5      yamt 
    109       1.1       mrg /*
    110      1.61       chs  * XXX hack to avoid hangs when large processes fork.
    111      1.61       chs  */
    112      1.61       chs int uvm_extrapages;
    113      1.61       chs 
    114      1.61       chs /*
    115       1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    116       1.1       mrg  *
    117       1.1       mrg  * => should be called with all locks released
    118       1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    119       1.1       mrg  */
    120       1.1       mrg 
    121      1.19   thorpej void
    122      1.65   thorpej uvm_wait(const char *wmsg)
    123       1.8       mrg {
    124       1.8       mrg 	int timo = 0;
    125  1.84.4.5      yamt 
    126  1.84.4.5      yamt 	mutex_spin_enter(&uvm_fpageqlock);
    127       1.1       mrg 
    128       1.8       mrg 	/*
    129       1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    130       1.8       mrg 	 */
    131       1.1       mrg 
    132  1.84.4.2        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    133       1.8       mrg 		/*
    134       1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    135       1.8       mrg 		 * sleep until it frees more memory.   but how can it
    136       1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    137       1.8       mrg 		 * we have two options:
    138       1.8       mrg 		 *  [1] panic now
    139       1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    140       1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    141       1.8       mrg 		 *
    142       1.8       mrg 		 * note that option [2] will only help us if we get lucky
    143       1.8       mrg 		 * and some other process on the system breaks the deadlock
    144       1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    145       1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    146       1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    147       1.8       mrg 		 * yet, this should never happen in the first place!).
    148       1.8       mrg 		 */
    149       1.1       mrg 
    150       1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    151       1.8       mrg 		timo = hz >> 3;		/* set timeout */
    152       1.1       mrg #if defined(DEBUG)
    153       1.8       mrg 		/* DEBUG: panic so we can debug it */
    154       1.8       mrg 		panic("pagedaemon deadlock");
    155       1.1       mrg #endif
    156       1.8       mrg 	}
    157       1.1       mrg 
    158  1.84.4.5      yamt 	uvm_pagedaemon_waiters++;
    159      1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    160  1.84.4.5      yamt 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    161       1.1       mrg }
    162       1.1       mrg 
    163      1.77      yamt /*
    164      1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    165      1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    166  1.84.4.5      yamt  *
    167  1.84.4.5      yamt  * => called with uvm_fpageqlock held.
    168      1.77      yamt  */
    169      1.77      yamt 
    170      1.77      yamt void
    171      1.77      yamt uvm_kick_pdaemon(void)
    172      1.77      yamt {
    173      1.77      yamt 
    174  1.84.4.5      yamt 	KASSERT(mutex_owned(&uvm_fpageqlock));
    175  1.84.4.5      yamt 
    176      1.77      yamt 	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
    177      1.77      yamt 	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
    178      1.77      yamt 	     uvmpdpol_needsscan_p())) {
    179      1.77      yamt 		wakeup(&uvm.pagedaemon);
    180      1.77      yamt 	}
    181      1.77      yamt }
    182       1.1       mrg 
    183       1.1       mrg /*
    184       1.1       mrg  * uvmpd_tune: tune paging parameters
    185       1.1       mrg  *
    186       1.1       mrg  * => called when ever memory is added (or removed?) to the system
    187       1.1       mrg  * => caller must call with page queues locked
    188       1.1       mrg  */
    189       1.1       mrg 
    190      1.65   thorpej static void
    191      1.37       chs uvmpd_tune(void)
    192       1.8       mrg {
    193       1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    194       1.1       mrg 
    195       1.8       mrg 	uvmexp.freemin = uvmexp.npages / 20;
    196       1.1       mrg 
    197       1.8       mrg 	/* between 16k and 256k */
    198       1.8       mrg 	/* XXX:  what are these values good for? */
    199      1.37       chs 	uvmexp.freemin = MAX(uvmexp.freemin, (16*1024) >> PAGE_SHIFT);
    200      1.37       chs 	uvmexp.freemin = MIN(uvmexp.freemin, (256*1024) >> PAGE_SHIFT);
    201      1.23     bjh21 
    202      1.23     bjh21 	/* Make sure there's always a user page free. */
    203      1.23     bjh21 	if (uvmexp.freemin < uvmexp.reserve_kernel + 1)
    204      1.23     bjh21 		uvmexp.freemin = uvmexp.reserve_kernel + 1;
    205       1.1       mrg 
    206       1.8       mrg 	uvmexp.freetarg = (uvmexp.freemin * 4) / 3;
    207       1.8       mrg 	if (uvmexp.freetarg <= uvmexp.freemin)
    208       1.8       mrg 		uvmexp.freetarg = uvmexp.freemin + 1;
    209       1.1       mrg 
    210      1.61       chs 	uvmexp.freetarg += uvm_extrapages;
    211      1.61       chs 	uvm_extrapages = 0;
    212      1.61       chs 
    213       1.8       mrg 	uvmexp.wiredmax = uvmexp.npages / 3;
    214       1.8       mrg 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    215       1.1       mrg 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    216       1.1       mrg }
    217       1.1       mrg 
    218       1.1       mrg /*
    219       1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    220       1.1       mrg  */
    221       1.1       mrg 
    222       1.8       mrg void
    223      1.80      yamt uvm_pageout(void *arg)
    224       1.8       mrg {
    225      1.60     enami 	int bufcnt, npages = 0;
    226      1.61       chs 	int extrapages = 0;
    227  1.84.4.9        ad 	struct pool *pp;
    228  1.84.4.9        ad 	uint64_t where;
    229       1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    230      1.24       chs 
    231       1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    232       1.8       mrg 
    233       1.8       mrg 	/*
    234       1.8       mrg 	 * ensure correct priority and set paging parameters...
    235       1.8       mrg 	 */
    236       1.8       mrg 
    237  1.84.4.2        ad 	uvm.pagedaemon_lwp = curlwp;
    238  1.84.4.1        ad 	mutex_enter(&uvm_pageqlock);
    239       1.8       mrg 	npages = uvmexp.npages;
    240       1.8       mrg 	uvmpd_tune();
    241  1.84.4.1        ad 	mutex_exit(&uvm_pageqlock);
    242       1.8       mrg 
    243       1.8       mrg 	/*
    244       1.8       mrg 	 * main loop
    245       1.8       mrg 	 */
    246      1.24       chs 
    247      1.24       chs 	for (;;) {
    248  1.84.4.5      yamt 		bool needsscan;
    249      1.24       chs 
    250  1.84.4.5      yamt 		mutex_spin_enter(&uvm_fpageqlock);
    251  1.84.4.5      yamt 		if (uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) {
    252  1.84.4.5      yamt 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    253  1.84.4.5      yamt 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    254  1.84.4.5      yamt 			    &uvm_fpageqlock, false, "pgdaemon", 0);
    255  1.84.4.5      yamt 			uvmexp.pdwoke++;
    256  1.84.4.5      yamt 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    257  1.84.4.5      yamt 		} else {
    258  1.84.4.5      yamt 			mutex_spin_exit(&uvm_fpageqlock);
    259  1.84.4.5      yamt 		}
    260      1.24       chs 
    261       1.8       mrg 		/*
    262      1.24       chs 		 * now lock page queues and recompute inactive count
    263       1.8       mrg 		 */
    264       1.8       mrg 
    265  1.84.4.1        ad 		mutex_enter(&uvm_pageqlock);
    266      1.61       chs 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
    267      1.24       chs 			npages = uvmexp.npages;
    268      1.61       chs 			extrapages = uvm_extrapages;
    269  1.84.4.5      yamt 			mutex_spin_enter(&uvm_fpageqlock);
    270      1.24       chs 			uvmpd_tune();
    271  1.84.4.5      yamt 			mutex_spin_exit(&uvm_fpageqlock);
    272      1.24       chs 		}
    273      1.24       chs 
    274      1.77      yamt 		uvmpdpol_tune();
    275      1.24       chs 
    276      1.60     enami 		/*
    277      1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    278      1.60     enami 		 * system only when entire pool page is empty.
    279      1.60     enami 		 */
    280  1.84.4.5      yamt 		mutex_spin_enter(&uvm_fpageqlock);
    281      1.60     enami 		bufcnt = uvmexp.freetarg - uvmexp.free;
    282      1.60     enami 		if (bufcnt < 0)
    283      1.60     enami 			bufcnt = 0;
    284      1.60     enami 
    285      1.77      yamt 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d",
    286      1.77      yamt 		    uvmexp.free, uvmexp.freetarg, 0,0);
    287       1.8       mrg 
    288  1.84.4.5      yamt 		needsscan = uvmexp.free + uvmexp.paging < uvmexp.freetarg ||
    289  1.84.4.5      yamt 		    uvmpdpol_needsscan_p();
    290  1.84.4.5      yamt 		mutex_spin_exit(&uvm_fpageqlock);
    291  1.84.4.5      yamt 
    292       1.8       mrg 		/*
    293      1.24       chs 		 * scan if needed
    294       1.8       mrg 		 */
    295  1.84.4.5      yamt 		if (needsscan)
    296      1.24       chs 			uvmpd_scan();
    297       1.8       mrg 
    298       1.8       mrg 		/*
    299      1.24       chs 		 * if there's any free memory to be had,
    300      1.24       chs 		 * wake up any waiters.
    301       1.8       mrg 		 */
    302       1.8       mrg 
    303  1.84.4.5      yamt 		mutex_spin_enter(&uvm_fpageqlock);
    304      1.24       chs 		if (uvmexp.free > uvmexp.reserve_kernel ||
    305      1.24       chs 		    uvmexp.paging == 0) {
    306      1.24       chs 			wakeup(&uvmexp.free);
    307  1.84.4.5      yamt 			uvm_pagedaemon_waiters = 0;
    308       1.8       mrg 		}
    309  1.84.4.5      yamt 		mutex_spin_exit(&uvm_fpageqlock);
    310       1.1       mrg 
    311       1.8       mrg 		/*
    312      1.24       chs 		 * scan done.  unlock page queues (the only lock we are holding)
    313       1.8       mrg 		 */
    314  1.84.4.1        ad 		mutex_exit(&uvm_pageqlock);
    315      1.38       chs 
    316      1.38       chs 		/*
    317  1.84.4.9        ad 		 * start draining pool resources now that we're not
    318  1.84.4.9        ad 		 * holding any locks.
    319      1.38       chs 		 */
    320  1.84.4.9        ad 		pool_drain_start(&pp, &where);
    321      1.38       chs 
    322  1.84.4.9        ad 		/*
    323  1.84.4.9        ad 		 * kill unused metadata buffers.
    324  1.84.4.9        ad 		 */
    325  1.84.4.9        ad 		mutex_enter(&bufcache_lock);
    326  1.84.4.9        ad 		buf_drain(bufcnt << PAGE_SHIFT);
    327  1.84.4.9        ad 		mutex_exit(&bufcache_lock);
    328      1.57  jdolecek 
    329      1.57  jdolecek 		/*
    330      1.57  jdolecek 		 * free any cached u-areas we don't need
    331      1.57  jdolecek 		 */
    332      1.84   thorpej 		uvm_uarea_drain(true);
    333      1.57  jdolecek 
    334  1.84.4.9        ad 		/*
    335  1.84.4.9        ad 		 * complete draining the pools.
    336  1.84.4.9        ad 		 */
    337  1.84.4.9        ad 		pool_drain_end(pp, where);
    338      1.24       chs 	}
    339      1.24       chs 	/*NOTREACHED*/
    340      1.24       chs }
    341      1.24       chs 
    342       1.8       mrg 
    343      1.24       chs /*
    344      1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    345      1.24       chs  */
    346       1.8       mrg 
    347      1.24       chs void
    348      1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    349      1.24       chs {
    350      1.81      yamt 	struct buf *bp = (void *)wk;
    351       1.9        pk 
    352      1.81      yamt 	KASSERT(&bp->b_work == wk);
    353       1.8       mrg 
    354      1.81      yamt 	/*
    355      1.81      yamt 	 * process an i/o that's done.
    356      1.81      yamt 	 */
    357       1.8       mrg 
    358      1.81      yamt 	(*bp->b_iodone)(bp);
    359  1.84.4.5      yamt }
    360  1.84.4.5      yamt 
    361  1.84.4.5      yamt void
    362  1.84.4.5      yamt uvm_pageout_start(int npages)
    363  1.84.4.5      yamt {
    364  1.84.4.5      yamt 
    365  1.84.4.5      yamt 	mutex_spin_enter(&uvm_fpageqlock);
    366  1.84.4.5      yamt 	uvmexp.paging += npages;
    367  1.84.4.5      yamt 	mutex_spin_exit(&uvm_fpageqlock);
    368  1.84.4.5      yamt }
    369  1.84.4.5      yamt 
    370  1.84.4.5      yamt void
    371  1.84.4.5      yamt uvm_pageout_done(int npages)
    372  1.84.4.5      yamt {
    373  1.84.4.5      yamt 
    374  1.84.4.5      yamt 	mutex_spin_enter(&uvm_fpageqlock);
    375  1.84.4.5      yamt 	KASSERT(uvmexp.paging >= npages);
    376  1.84.4.5      yamt 	uvmexp.paging -= npages;
    377  1.84.4.5      yamt 
    378  1.84.4.5      yamt 	/*
    379  1.84.4.5      yamt 	 * wake up either of pagedaemon or LWPs waiting for it.
    380  1.84.4.5      yamt 	 */
    381  1.84.4.5      yamt 
    382  1.84.4.5      yamt 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    383      1.81      yamt 		wakeup(&uvm.pagedaemon);
    384      1.81      yamt 	} else {
    385      1.81      yamt 		wakeup(&uvmexp.free);
    386  1.84.4.5      yamt 		uvm_pagedaemon_waiters = 0;
    387       1.8       mrg 	}
    388  1.84.4.5      yamt 	mutex_spin_exit(&uvm_fpageqlock);
    389       1.1       mrg }
    390       1.1       mrg 
    391      1.76      yamt /*
    392      1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    393      1.76      yamt  *
    394      1.76      yamt  * => called with pageq locked.
    395      1.76      yamt  * => resolve orphaned O->A loaned page.
    396  1.84.4.6      yamt  * => return the locked mutex on success.  otherwise, return NULL.
    397      1.76      yamt  */
    398      1.76      yamt 
    399  1.84.4.1        ad kmutex_t *
    400      1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    401      1.76      yamt {
    402      1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    403  1.84.4.1        ad 	kmutex_t *slock;
    404  1.84.4.1        ad 
    405  1.84.4.1        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    406      1.76      yamt 
    407      1.76      yamt 	if (uobj != NULL) {
    408      1.76      yamt 		slock = &uobj->vmobjlock;
    409      1.76      yamt 	} else {
    410      1.76      yamt 		struct vm_anon *anon = pg->uanon;
    411      1.76      yamt 
    412      1.76      yamt 		KASSERT(anon != NULL);
    413      1.76      yamt 		slock = &anon->an_lock;
    414      1.76      yamt 	}
    415      1.76      yamt 
    416  1.84.4.1        ad 	if (!mutex_tryenter(slock)) {
    417      1.76      yamt 		return NULL;
    418      1.76      yamt 	}
    419      1.76      yamt 
    420      1.76      yamt 	if (uobj == NULL) {
    421      1.76      yamt 
    422      1.76      yamt 		/*
    423      1.76      yamt 		 * set PQ_ANON if it isn't set already.
    424      1.76      yamt 		 */
    425      1.76      yamt 
    426      1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    427      1.76      yamt 			KASSERT(pg->loan_count > 0);
    428      1.76      yamt 			pg->loan_count--;
    429      1.76      yamt 			pg->pqflags |= PQ_ANON;
    430      1.76      yamt 			/* anon now owns it */
    431      1.76      yamt 		}
    432      1.76      yamt 	}
    433      1.76      yamt 
    434      1.76      yamt 	return slock;
    435      1.76      yamt }
    436      1.76      yamt 
    437      1.73      yamt #if defined(VMSWAP)
    438      1.73      yamt struct swapcluster {
    439      1.73      yamt 	int swc_slot;
    440      1.73      yamt 	int swc_nallocated;
    441      1.73      yamt 	int swc_nused;
    442      1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    443      1.73      yamt };
    444      1.73      yamt 
    445      1.73      yamt static void
    446      1.73      yamt swapcluster_init(struct swapcluster *swc)
    447      1.73      yamt {
    448      1.73      yamt 
    449      1.73      yamt 	swc->swc_slot = 0;
    450  1.84.4.8      yamt 	swc->swc_nused = 0;
    451      1.73      yamt }
    452      1.73      yamt 
    453      1.73      yamt static int
    454      1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    455      1.73      yamt {
    456      1.73      yamt 	int slot;
    457      1.73      yamt 	int npages;
    458      1.73      yamt 
    459      1.73      yamt 	if (swc->swc_slot != 0) {
    460      1.73      yamt 		return 0;
    461      1.73      yamt 	}
    462      1.73      yamt 
    463      1.73      yamt 	/* Even with strange MAXPHYS, the shift
    464      1.73      yamt 	   implicitly rounds down to a page. */
    465      1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    466      1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    467      1.73      yamt 	if (slot == 0) {
    468      1.73      yamt 		return ENOMEM;
    469      1.73      yamt 	}
    470      1.73      yamt 	swc->swc_slot = slot;
    471      1.73      yamt 	swc->swc_nallocated = npages;
    472      1.73      yamt 	swc->swc_nused = 0;
    473      1.73      yamt 
    474      1.73      yamt 	return 0;
    475      1.73      yamt }
    476      1.73      yamt 
    477      1.73      yamt static int
    478      1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    479      1.73      yamt {
    480      1.73      yamt 	int slot;
    481      1.73      yamt 	struct uvm_object *uobj;
    482      1.73      yamt 
    483      1.73      yamt 	KASSERT(swc->swc_slot != 0);
    484      1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    485      1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    486      1.73      yamt 
    487      1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    488      1.73      yamt 	uobj = pg->uobject;
    489      1.73      yamt 	if (uobj == NULL) {
    490  1.84.4.1        ad 		KASSERT(mutex_owned(&pg->uanon->an_lock));
    491      1.73      yamt 		pg->uanon->an_swslot = slot;
    492      1.73      yamt 	} else {
    493      1.73      yamt 		int result;
    494      1.73      yamt 
    495  1.84.4.1        ad 		KASSERT(mutex_owned(&uobj->vmobjlock));
    496      1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    497      1.73      yamt 		if (result == -1) {
    498      1.73      yamt 			return ENOMEM;
    499      1.73      yamt 		}
    500      1.73      yamt 	}
    501      1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    502      1.73      yamt 	swc->swc_nused++;
    503      1.73      yamt 
    504      1.73      yamt 	return 0;
    505      1.73      yamt }
    506      1.73      yamt 
    507      1.73      yamt static void
    508      1.83   thorpej swapcluster_flush(struct swapcluster *swc, bool now)
    509      1.73      yamt {
    510      1.73      yamt 	int slot;
    511      1.73      yamt 	int nused;
    512      1.73      yamt 	int nallocated;
    513      1.73      yamt 	int error;
    514      1.73      yamt 
    515      1.73      yamt 	if (swc->swc_slot == 0) {
    516      1.73      yamt 		return;
    517      1.73      yamt 	}
    518      1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    519      1.73      yamt 
    520      1.73      yamt 	slot = swc->swc_slot;
    521      1.73      yamt 	nused = swc->swc_nused;
    522      1.73      yamt 	nallocated = swc->swc_nallocated;
    523      1.73      yamt 
    524      1.73      yamt 	/*
    525      1.73      yamt 	 * if this is the final pageout we could have a few
    526      1.73      yamt 	 * unused swap blocks.  if so, free them now.
    527      1.73      yamt 	 */
    528      1.73      yamt 
    529      1.73      yamt 	if (nused < nallocated) {
    530      1.73      yamt 		if (!now) {
    531      1.73      yamt 			return;
    532      1.73      yamt 		}
    533      1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    534      1.73      yamt 	}
    535      1.73      yamt 
    536      1.73      yamt 	/*
    537      1.73      yamt 	 * now start the pageout.
    538      1.73      yamt 	 */
    539      1.73      yamt 
    540      1.73      yamt 	uvmexp.pdpageouts++;
    541  1.84.4.5      yamt 	uvm_pageout_start(nused);
    542      1.73      yamt 	error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    543      1.73      yamt 	KASSERT(error == 0);
    544      1.73      yamt 
    545      1.73      yamt 	/*
    546      1.73      yamt 	 * zero swslot to indicate that we are
    547      1.73      yamt 	 * no longer building a swap-backed cluster.
    548      1.73      yamt 	 */
    549      1.73      yamt 
    550      1.73      yamt 	swc->swc_slot = 0;
    551  1.84.4.8      yamt 	swc->swc_nused = 0;
    552      1.73      yamt }
    553      1.77      yamt 
    554  1.84.4.5      yamt static int
    555  1.84.4.5      yamt swapcluster_nused(struct swapcluster *swc)
    556  1.84.4.5      yamt {
    557  1.84.4.5      yamt 
    558  1.84.4.5      yamt 	return swc->swc_nused;
    559  1.84.4.5      yamt }
    560  1.84.4.5      yamt 
    561      1.77      yamt /*
    562      1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    563      1.77      yamt  *
    564      1.77      yamt  * => called with owner locked.
    565      1.84   thorpej  * => return true if a page had an associated slot.
    566      1.77      yamt  */
    567      1.77      yamt 
    568      1.83   thorpej static bool
    569      1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    570      1.77      yamt {
    571      1.84   thorpej 	bool result = false;
    572      1.77      yamt 	struct vm_anon *anon = pg->uanon;
    573      1.77      yamt 
    574      1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    575      1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    576      1.77      yamt 		anon->an_swslot = 0;
    577      1.77      yamt 		pg->flags &= ~PG_CLEAN;
    578      1.84   thorpej 		result = true;
    579      1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    580      1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    581      1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    582      1.77      yamt 		if (slot) {
    583      1.77      yamt 			uvm_swap_free(slot, 1);
    584      1.77      yamt 			pg->flags &= ~PG_CLEAN;
    585      1.84   thorpej 			result = true;
    586      1.77      yamt 		}
    587      1.77      yamt 	}
    588      1.77      yamt 
    589      1.77      yamt 	return result;
    590      1.77      yamt }
    591      1.77      yamt 
    592      1.77      yamt /*
    593      1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    594      1.77      yamt  *
    595      1.84   thorpej  * => return true if a slot is successfully freed.
    596      1.77      yamt  */
    597      1.77      yamt 
    598      1.83   thorpej bool
    599      1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    600      1.77      yamt {
    601  1.84.4.1        ad 	kmutex_t *slock;
    602      1.83   thorpej 	bool result;
    603      1.77      yamt 
    604      1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    605      1.84   thorpej 		return false;
    606      1.77      yamt 	}
    607      1.77      yamt 
    608      1.77      yamt 	/*
    609      1.77      yamt 	 * lock the page's owner.
    610      1.77      yamt 	 */
    611      1.77      yamt 
    612      1.77      yamt 	slock = uvmpd_trylockowner(pg);
    613      1.77      yamt 	if (slock == NULL) {
    614      1.84   thorpej 		return false;
    615      1.77      yamt 	}
    616      1.77      yamt 
    617      1.77      yamt 	/*
    618      1.77      yamt 	 * skip this page if it's busy.
    619      1.77      yamt 	 */
    620      1.77      yamt 
    621      1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    622  1.84.4.1        ad 		mutex_exit(slock);
    623      1.84   thorpej 		return false;
    624      1.77      yamt 	}
    625      1.77      yamt 
    626      1.77      yamt 	result = uvmpd_dropswap(pg);
    627      1.77      yamt 
    628  1.84.4.1        ad 	mutex_exit(slock);
    629      1.77      yamt 
    630      1.77      yamt 	return result;
    631      1.77      yamt }
    632      1.77      yamt 
    633      1.73      yamt #endif /* defined(VMSWAP) */
    634      1.73      yamt 
    635       1.1       mrg /*
    636      1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    637      1.77      yamt  * to clean or free.
    638       1.1       mrg  *
    639       1.1       mrg  * => called with page queues locked
    640       1.1       mrg  * => we work on meeting our free target by converting inactive pages
    641       1.1       mrg  *    into free pages.
    642       1.1       mrg  * => we handle the building of swap-backed clusters
    643       1.1       mrg  */
    644       1.1       mrg 
    645      1.65   thorpej static void
    646      1.77      yamt uvmpd_scan_queue(void)
    647       1.8       mrg {
    648      1.77      yamt 	struct vm_page *p;
    649       1.8       mrg 	struct uvm_object *uobj;
    650      1.37       chs 	struct vm_anon *anon;
    651      1.68      yamt #if defined(VMSWAP)
    652      1.73      yamt 	struct swapcluster swc;
    653      1.68      yamt #endif /* defined(VMSWAP) */
    654      1.77      yamt 	int dirtyreacts;
    655  1.84.4.1        ad 	kmutex_t *slock;
    656      1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    657       1.1       mrg 
    658       1.8       mrg 	/*
    659       1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    660      1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    661       1.8       mrg 	 * a swap-cluster to build.
    662       1.8       mrg 	 */
    663      1.24       chs 
    664      1.73      yamt #if defined(VMSWAP)
    665      1.73      yamt 	swapcluster_init(&swc);
    666      1.73      yamt #endif /* defined(VMSWAP) */
    667      1.77      yamt 
    668      1.14       chs 	dirtyreacts = 0;
    669      1.77      yamt 	uvmpdpol_scaninit();
    670      1.43       chs 
    671      1.77      yamt 	while (/* CONSTCOND */ 1) {
    672      1.24       chs 
    673      1.73      yamt 		/*
    674      1.73      yamt 		 * see if we've met the free target.
    675      1.73      yamt 		 */
    676      1.73      yamt 
    677  1.84.4.5      yamt 		if (uvmexp.free + uvmexp.paging
    678  1.84.4.5      yamt #if defined(VMSWAP)
    679  1.84.4.5      yamt 		    + swapcluster_nused(&swc)
    680  1.84.4.5      yamt #endif /* defined(VMSWAP) */
    681  1.84.4.5      yamt 		    >= uvmexp.freetarg << 2 ||
    682      1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    683      1.73      yamt 			UVMHIST_LOG(pdhist,"  met free target: "
    684      1.73      yamt 				    "exit loop", 0, 0, 0, 0);
    685      1.73      yamt 			break;
    686      1.73      yamt 		}
    687      1.24       chs 
    688      1.77      yamt 		p = uvmpdpol_selectvictim();
    689      1.77      yamt 		if (p == NULL) {
    690      1.77      yamt 			break;
    691      1.77      yamt 		}
    692      1.77      yamt 		KASSERT(uvmpdpol_pageisqueued_p(p));
    693      1.77      yamt 		KASSERT(p->wire_count == 0);
    694      1.77      yamt 
    695      1.73      yamt 		/*
    696      1.73      yamt 		 * we are below target and have a new page to consider.
    697      1.73      yamt 		 */
    698      1.30       chs 
    699      1.73      yamt 		anon = p->uanon;
    700      1.73      yamt 		uobj = p->uobject;
    701       1.8       mrg 
    702      1.73      yamt 		/*
    703      1.73      yamt 		 * first we attempt to lock the object that this page
    704      1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    705      1.73      yamt 		 * the next page (no harm done).  it is important to
    706      1.73      yamt 		 * "try" locking the object as we are locking in the
    707      1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    708      1.73      yamt 		 * deadlock.
    709      1.73      yamt 		 *
    710      1.73      yamt 		 * the only time we expect to see an ownerless page
    711      1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    712      1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    713      1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    714      1.73      yamt 		 * case, the anon can "take over" the loaned page
    715      1.73      yamt 		 * and make it its own.
    716      1.73      yamt 		 */
    717      1.30       chs 
    718      1.76      yamt 		slock = uvmpd_trylockowner(p);
    719      1.76      yamt 		if (slock == NULL) {
    720      1.76      yamt 			continue;
    721      1.76      yamt 		}
    722      1.76      yamt 		if (p->flags & PG_BUSY) {
    723  1.84.4.1        ad 			mutex_exit(slock);
    724      1.76      yamt 			uvmexp.pdbusy++;
    725      1.76      yamt 			continue;
    726      1.76      yamt 		}
    727      1.76      yamt 
    728      1.73      yamt 		/* does the page belong to an object? */
    729      1.73      yamt 		if (uobj != NULL) {
    730      1.73      yamt 			uvmexp.pdobscan++;
    731      1.73      yamt 		} else {
    732      1.73      yamt #if defined(VMSWAP)
    733      1.73      yamt 			KASSERT(anon != NULL);
    734      1.73      yamt 			uvmexp.pdanscan++;
    735      1.68      yamt #else /* defined(VMSWAP) */
    736      1.73      yamt 			panic("%s: anon", __func__);
    737      1.68      yamt #endif /* defined(VMSWAP) */
    738      1.73      yamt 		}
    739       1.8       mrg 
    740      1.37       chs 
    741      1.73      yamt 		/*
    742      1.73      yamt 		 * we now have the object and the page queues locked.
    743      1.73      yamt 		 * if the page is not swap-backed, call the object's
    744      1.73      yamt 		 * pager to flush and free the page.
    745      1.73      yamt 		 */
    746      1.37       chs 
    747      1.69      yamt #if defined(READAHEAD_STATS)
    748      1.77      yamt 		if ((p->pqflags & PQ_READAHEAD) != 0) {
    749      1.77      yamt 			p->pqflags &= ~PQ_READAHEAD;
    750      1.73      yamt 			uvm_ra_miss.ev_count++;
    751      1.73      yamt 		}
    752      1.69      yamt #endif /* defined(READAHEAD_STATS) */
    753      1.69      yamt 
    754      1.73      yamt 		if ((p->pqflags & PQ_SWAPBACKED) == 0) {
    755      1.82       alc 			KASSERT(uobj != NULL);
    756  1.84.4.1        ad 			mutex_exit(&uvm_pageqlock);
    757      1.73      yamt 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
    758      1.73      yamt 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    759  1.84.4.1        ad 			mutex_enter(&uvm_pageqlock);
    760      1.73      yamt 			continue;
    761      1.73      yamt 		}
    762      1.37       chs 
    763      1.73      yamt 		/*
    764      1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    765      1.73      yamt 		 * from the page so we can sync the modified info
    766      1.73      yamt 		 * without any race conditions.  if the page is clean
    767      1.73      yamt 		 * we can free it now and continue.
    768      1.73      yamt 		 */
    769       1.8       mrg 
    770      1.73      yamt 		pmap_page_protect(p, VM_PROT_NONE);
    771      1.73      yamt 		if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
    772      1.73      yamt 			p->flags &= ~(PG_CLEAN);
    773      1.73      yamt 		}
    774      1.73      yamt 		if (p->flags & PG_CLEAN) {
    775      1.73      yamt 			int slot;
    776      1.73      yamt 			int pageidx;
    777      1.73      yamt 
    778      1.73      yamt 			pageidx = p->offset >> PAGE_SHIFT;
    779      1.73      yamt 			uvm_pagefree(p);
    780      1.73      yamt 			uvmexp.pdfreed++;
    781       1.8       mrg 
    782       1.8       mrg 			/*
    783      1.73      yamt 			 * for anons, we need to remove the page
    784      1.73      yamt 			 * from the anon ourselves.  for aobjs,
    785      1.73      yamt 			 * pagefree did that for us.
    786       1.8       mrg 			 */
    787      1.24       chs 
    788      1.73      yamt 			if (anon) {
    789      1.73      yamt 				KASSERT(anon->an_swslot != 0);
    790      1.73      yamt 				anon->an_page = NULL;
    791      1.73      yamt 				slot = anon->an_swslot;
    792      1.73      yamt 			} else {
    793      1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
    794       1.8       mrg 			}
    795  1.84.4.1        ad 			mutex_exit(slock);
    796       1.8       mrg 
    797      1.73      yamt 			if (slot > 0) {
    798      1.73      yamt 				/* this page is now only in swap. */
    799  1.84.4.1        ad 				mutex_enter(&uvm_swap_data_lock);
    800      1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
    801      1.73      yamt 				uvmexp.swpgonly++;
    802  1.84.4.1        ad 				mutex_exit(&uvm_swap_data_lock);
    803      1.37       chs 			}
    804      1.73      yamt 			continue;
    805      1.73      yamt 		}
    806      1.37       chs 
    807      1.77      yamt #if defined(VMSWAP)
    808      1.73      yamt 		/*
    809      1.73      yamt 		 * this page is dirty, skip it if we'll have met our
    810      1.73      yamt 		 * free target when all the current pageouts complete.
    811      1.73      yamt 		 */
    812      1.24       chs 
    813      1.73      yamt 		if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
    814  1.84.4.1        ad 			mutex_exit(slock);
    815      1.73      yamt 			continue;
    816      1.73      yamt 		}
    817      1.14       chs 
    818      1.73      yamt 		/*
    819      1.73      yamt 		 * free any swap space allocated to the page since
    820      1.73      yamt 		 * we'll have to write it again with its new data.
    821      1.73      yamt 		 */
    822      1.24       chs 
    823      1.77      yamt 		uvmpd_dropswap(p);
    824      1.14       chs 
    825      1.73      yamt 		/*
    826      1.73      yamt 		 * if all pages in swap are only in swap,
    827      1.73      yamt 		 * the swap space is full and we can't page out
    828      1.73      yamt 		 * any more swap-backed pages.  reactivate this page
    829      1.73      yamt 		 * so that we eventually cycle all pages through
    830      1.73      yamt 		 * the inactive queue.
    831      1.73      yamt 		 */
    832      1.68      yamt 
    833      1.73      yamt 		if (uvm_swapisfull()) {
    834      1.73      yamt 			dirtyreacts++;
    835      1.73      yamt 			uvm_pageactivate(p);
    836  1.84.4.1        ad 			mutex_exit(slock);
    837      1.73      yamt 			continue;
    838       1.8       mrg 		}
    839       1.8       mrg 
    840       1.8       mrg 		/*
    841      1.73      yamt 		 * start new swap pageout cluster (if necessary).
    842       1.8       mrg 		 */
    843      1.24       chs 
    844      1.73      yamt 		if (swapcluster_allocslots(&swc)) {
    845  1.84.4.1        ad 			mutex_exit(slock);
    846      1.77      yamt 			dirtyreacts++; /* XXX */
    847      1.73      yamt 			continue;
    848       1.8       mrg 		}
    849       1.8       mrg 
    850       1.8       mrg 		/*
    851      1.73      yamt 		 * at this point, we're definitely going reuse this
    852      1.73      yamt 		 * page.  mark the page busy and delayed-free.
    853      1.73      yamt 		 * we should remove the page from the page queues
    854      1.73      yamt 		 * so we don't ever look at it again.
    855      1.73      yamt 		 * adjust counters and such.
    856       1.8       mrg 		 */
    857       1.8       mrg 
    858      1.73      yamt 		p->flags |= PG_BUSY;
    859      1.77      yamt 		UVM_PAGE_OWN(p, "scan_queue");
    860      1.73      yamt 
    861      1.73      yamt 		p->flags |= PG_PAGEOUT;
    862      1.73      yamt 		uvm_pagedequeue(p);
    863      1.73      yamt 
    864      1.73      yamt 		uvmexp.pgswapout++;
    865  1.84.4.1        ad 		mutex_exit(&uvm_pageqlock);
    866       1.8       mrg 
    867       1.8       mrg 		/*
    868      1.73      yamt 		 * add the new page to the cluster.
    869       1.8       mrg 		 */
    870       1.8       mrg 
    871      1.73      yamt 		if (swapcluster_add(&swc, p)) {
    872      1.73      yamt 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
    873      1.73      yamt 			UVM_PAGE_OWN(p, NULL);
    874  1.84.4.1        ad 			mutex_enter(&uvm_pageqlock);
    875      1.77      yamt 			dirtyreacts++;
    876      1.73      yamt 			uvm_pageactivate(p);
    877  1.84.4.1        ad 			mutex_exit(slock);
    878      1.73      yamt 			continue;
    879      1.73      yamt 		}
    880  1.84.4.1        ad 		mutex_exit(slock);
    881      1.73      yamt 
    882      1.84   thorpej 		swapcluster_flush(&swc, false);
    883  1.84.4.1        ad 		mutex_enter(&uvm_pageqlock);
    884      1.73      yamt 
    885       1.8       mrg 		/*
    886      1.31       chs 		 * the pageout is in progress.  bump counters and set up
    887      1.31       chs 		 * for the next loop.
    888       1.8       mrg 		 */
    889       1.8       mrg 
    890      1.31       chs 		uvmexp.pdpending++;
    891      1.77      yamt 
    892      1.77      yamt #else /* defined(VMSWAP) */
    893      1.77      yamt 		uvm_pageactivate(p);
    894  1.84.4.1        ad 		mutex_exit(slock);
    895      1.77      yamt #endif /* defined(VMSWAP) */
    896      1.73      yamt 	}
    897      1.73      yamt 
    898      1.73      yamt #if defined(VMSWAP)
    899  1.84.4.1        ad 	mutex_exit(&uvm_pageqlock);
    900      1.84   thorpej 	swapcluster_flush(&swc, true);
    901  1.84.4.1        ad 	mutex_enter(&uvm_pageqlock);
    902      1.68      yamt #endif /* defined(VMSWAP) */
    903       1.1       mrg }
    904       1.1       mrg 
    905       1.1       mrg /*
    906       1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    907       1.1       mrg  *
    908       1.1       mrg  * => called with pageq's locked
    909       1.1       mrg  */
    910       1.1       mrg 
    911      1.65   thorpej static void
    912      1.37       chs uvmpd_scan(void)
    913       1.1       mrg {
    914      1.77      yamt 	int swap_shortage, pages_freed;
    915       1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    916       1.1       mrg 
    917      1.37       chs 	uvmexp.pdrevs++;
    918       1.1       mrg 
    919       1.1       mrg #ifndef __SWAP_BROKEN
    920      1.39       chs 
    921       1.8       mrg 	/*
    922       1.8       mrg 	 * swap out some processes if we are below our free target.
    923       1.8       mrg 	 * we need to unlock the page queues for this.
    924       1.8       mrg 	 */
    925      1.39       chs 
    926  1.84.4.3        ad 	if (uvmexp.free < uvmexp.freetarg && uvmexp.nswapdev != 0 &&
    927  1.84.4.3        ad 	    uvm.swapout_enabled) {
    928       1.8       mrg 		uvmexp.pdswout++;
    929      1.37       chs 		UVMHIST_LOG(pdhist,"  free %d < target %d: swapout",
    930      1.37       chs 		    uvmexp.free, uvmexp.freetarg, 0, 0);
    931  1.84.4.1        ad 		mutex_exit(&uvm_pageqlock);
    932       1.8       mrg 		uvm_swapout_threads();
    933  1.84.4.1        ad 		mutex_enter(&uvm_pageqlock);
    934       1.1       mrg 
    935       1.8       mrg 	}
    936       1.1       mrg #endif
    937       1.1       mrg 
    938       1.8       mrg 	/*
    939       1.8       mrg 	 * now we want to work on meeting our targets.   first we work on our
    940       1.8       mrg 	 * free target by converting inactive pages into free pages.  then
    941       1.8       mrg 	 * we work on meeting our inactive target by converting active pages
    942       1.8       mrg 	 * to inactive ones.
    943       1.8       mrg 	 */
    944       1.8       mrg 
    945       1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    946       1.8       mrg 
    947      1.14       chs 	pages_freed = uvmexp.pdfreed;
    948      1.77      yamt 	uvmpd_scan_queue();
    949      1.14       chs 	pages_freed = uvmexp.pdfreed - pages_freed;
    950       1.8       mrg 
    951       1.8       mrg 	/*
    952      1.14       chs 	 * detect if we're not going to be able to page anything out
    953      1.14       chs 	 * until we free some swap resources from active pages.
    954      1.14       chs 	 */
    955      1.24       chs 
    956      1.14       chs 	swap_shortage = 0;
    957      1.14       chs 	if (uvmexp.free < uvmexp.freetarg &&
    958      1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
    959      1.52        pk 	    !uvm_swapisfull() &&
    960      1.14       chs 	    pages_freed == 0) {
    961      1.14       chs 		swap_shortage = uvmexp.freetarg - uvmexp.free;
    962      1.14       chs 	}
    963      1.24       chs 
    964      1.77      yamt 	uvmpdpol_balancequeue(swap_shortage);
    965       1.1       mrg }
    966      1.62      yamt 
    967      1.62      yamt /*
    968      1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
    969      1.62      yamt  *
    970      1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
    971      1.62      yamt  *
    972      1.62      yamt  * XXX should be tunable.
    973      1.62      yamt  * XXX should consider pools, etc?
    974      1.62      yamt  */
    975      1.62      yamt 
    976      1.83   thorpej bool
    977      1.62      yamt uvm_reclaimable(void)
    978      1.62      yamt {
    979      1.62      yamt 	int filepages;
    980      1.77      yamt 	int active, inactive;
    981      1.62      yamt 
    982      1.62      yamt 	/*
    983      1.62      yamt 	 * if swap is not full, no problem.
    984      1.62      yamt 	 */
    985      1.62      yamt 
    986      1.62      yamt 	if (!uvm_swapisfull()) {
    987      1.84   thorpej 		return true;
    988      1.62      yamt 	}
    989      1.62      yamt 
    990      1.62      yamt 	/*
    991      1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
    992      1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
    993      1.62      yamt 	 *
    994      1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
    995      1.63      yamt 	 *
    996      1.63      yamt 	 * XXX should consider about other reclaimable memory.
    997      1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
    998      1.62      yamt 	 */
    999      1.62      yamt 
   1000      1.62      yamt 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
   1001      1.77      yamt 	uvm_estimatepageable(&active, &inactive);
   1002      1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
   1003      1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
   1004      1.84   thorpej 		return true;
   1005      1.62      yamt 	}
   1006      1.62      yamt 
   1007      1.62      yamt 	/*
   1008      1.62      yamt 	 * kill the process, fail allocation, etc..
   1009      1.62      yamt 	 */
   1010      1.62      yamt 
   1011      1.84   thorpej 	return false;
   1012      1.62      yamt }
   1013      1.77      yamt 
   1014      1.77      yamt void
   1015      1.77      yamt uvm_estimatepageable(int *active, int *inactive)
   1016      1.77      yamt {
   1017      1.77      yamt 
   1018      1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
   1019      1.77      yamt }
   1020