Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.110
      1  1.110       chs /*	$NetBSD: uvm_pdaemon.c,v 1.110 2019/04/21 15:32:18 chs Exp $	*/
      2    1.1       mrg 
      3   1.34       chs /*
      4    1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5   1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6    1.1       mrg  *
      7    1.1       mrg  * All rights reserved.
      8    1.1       mrg  *
      9    1.1       mrg  * This code is derived from software contributed to Berkeley by
     10    1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11    1.1       mrg  *
     12    1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13    1.1       mrg  * modification, are permitted provided that the following conditions
     14    1.1       mrg  * are met:
     15    1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16    1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17    1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18    1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19    1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20  1.102     chuck  * 3. Neither the name of the University nor the names of its contributors
     21    1.1       mrg  *    may be used to endorse or promote products derived from this software
     22    1.1       mrg  *    without specific prior written permission.
     23    1.1       mrg  *
     24    1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25    1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26    1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27    1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28    1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29    1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30    1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31    1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32    1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33    1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34    1.1       mrg  * SUCH DAMAGE.
     35    1.1       mrg  *
     36    1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     37    1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     38    1.1       mrg  *
     39    1.1       mrg  *
     40    1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41    1.1       mrg  * All rights reserved.
     42   1.34       chs  *
     43    1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44    1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45    1.1       mrg  * notice and this permission notice appear in all copies of the
     46    1.1       mrg  * software, derivative works or modified versions, and any portions
     47    1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48   1.34       chs  *
     49   1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50   1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51    1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52   1.34       chs  *
     53    1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54    1.1       mrg  *
     55    1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56    1.1       mrg  *  School of Computer Science
     57    1.1       mrg  *  Carnegie Mellon University
     58    1.1       mrg  *  Pittsburgh PA 15213-3890
     59    1.1       mrg  *
     60    1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61    1.1       mrg  * rights to redistribute these changes.
     62    1.1       mrg  */
     63    1.1       mrg 
     64    1.1       mrg /*
     65    1.1       mrg  * uvm_pdaemon.c: the page daemon
     66    1.1       mrg  */
     67   1.42     lukem 
     68   1.42     lukem #include <sys/cdefs.h>
     69  1.110       chs __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.110 2019/04/21 15:32:18 chs Exp $");
     70   1.42     lukem 
     71   1.42     lukem #include "opt_uvmhist.h"
     72   1.69      yamt #include "opt_readahead.h"
     73    1.1       mrg 
     74    1.1       mrg #include <sys/param.h>
     75    1.1       mrg #include <sys/proc.h>
     76    1.1       mrg #include <sys/systm.h>
     77    1.1       mrg #include <sys/kernel.h>
     78    1.9        pk #include <sys/pool.h>
     79   1.24       chs #include <sys/buf.h>
     80   1.94        ad #include <sys/module.h>
     81   1.96        ad #include <sys/atomic.h>
     82  1.110       chs #include <sys/kthread.h>
     83    1.1       mrg 
     84    1.1       mrg #include <uvm/uvm.h>
     85   1.77      yamt #include <uvm/uvm_pdpolicy.h>
     86    1.1       mrg 
     87  1.107      matt #ifdef UVMHIST
     88  1.107      matt UVMHIST_DEFINE(pdhist);
     89  1.107      matt #endif
     90  1.107      matt 
     91    1.1       mrg /*
     92   1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     93   1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     94   1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     95   1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     96   1.14       chs  */
     97   1.14       chs 
     98   1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
     99   1.14       chs 
    100   1.89        ad #define	UVMPD_NUMTRYLOCKOWNER	16
    101   1.14       chs 
    102   1.14       chs /*
    103    1.1       mrg  * local prototypes
    104    1.1       mrg  */
    105    1.1       mrg 
    106   1.65   thorpej static void	uvmpd_scan(void);
    107   1.77      yamt static void	uvmpd_scan_queue(void);
    108   1.65   thorpej static void	uvmpd_tune(void);
    109  1.110       chs static void	uvmpd_pool_drain_thread(void *);
    110  1.110       chs static void	uvmpd_pool_drain_wakeup(void);
    111    1.1       mrg 
    112  1.101     pooka static unsigned int uvm_pagedaemon_waiters;
    113   1.89        ad 
    114  1.110       chs /* State for the pool drainer thread */
    115  1.110       chs static kmutex_t uvmpd_pool_drain_lock;
    116  1.110       chs static kcondvar_t uvmpd_pool_drain_cv;
    117  1.110       chs static bool uvmpd_pool_drain_run = false;
    118  1.110       chs 
    119    1.1       mrg /*
    120   1.61       chs  * XXX hack to avoid hangs when large processes fork.
    121   1.61       chs  */
    122   1.96        ad u_int uvm_extrapages;
    123   1.61       chs 
    124   1.61       chs /*
    125    1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    126    1.1       mrg  *
    127    1.1       mrg  * => should be called with all locks released
    128    1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    129    1.1       mrg  */
    130    1.1       mrg 
    131   1.19   thorpej void
    132   1.65   thorpej uvm_wait(const char *wmsg)
    133    1.8       mrg {
    134    1.8       mrg 	int timo = 0;
    135   1.89        ad 
    136   1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    137    1.1       mrg 
    138    1.8       mrg 	/*
    139    1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    140    1.8       mrg 	 */
    141    1.1       mrg 
    142   1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    143    1.8       mrg 		/*
    144    1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    145    1.8       mrg 		 * sleep until it frees more memory.   but how can it
    146    1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    147    1.8       mrg 		 * we have two options:
    148    1.8       mrg 		 *  [1] panic now
    149    1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    150    1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    151    1.8       mrg 		 *
    152    1.8       mrg 		 * note that option [2] will only help us if we get lucky
    153    1.8       mrg 		 * and some other process on the system breaks the deadlock
    154    1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    155    1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    156    1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    157    1.8       mrg 		 * yet, this should never happen in the first place!).
    158    1.8       mrg 		 */
    159    1.1       mrg 
    160    1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    161    1.8       mrg 		timo = hz >> 3;		/* set timeout */
    162    1.1       mrg #if defined(DEBUG)
    163    1.8       mrg 		/* DEBUG: panic so we can debug it */
    164    1.8       mrg 		panic("pagedaemon deadlock");
    165    1.1       mrg #endif
    166    1.8       mrg 	}
    167    1.1       mrg 
    168   1.89        ad 	uvm_pagedaemon_waiters++;
    169   1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    170   1.89        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    171    1.1       mrg }
    172    1.1       mrg 
    173   1.77      yamt /*
    174   1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    175   1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    176   1.89        ad  *
    177   1.89        ad  * => called with uvm_fpageqlock held.
    178   1.77      yamt  */
    179   1.77      yamt 
    180   1.77      yamt void
    181   1.77      yamt uvm_kick_pdaemon(void)
    182   1.77      yamt {
    183   1.77      yamt 
    184   1.89        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    185   1.89        ad 
    186   1.77      yamt 	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
    187   1.77      yamt 	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
    188  1.105      para 	     uvmpdpol_needsscan_p()) ||
    189  1.105      para 	     uvm_km_va_starved_p()) {
    190   1.77      yamt 		wakeup(&uvm.pagedaemon);
    191   1.77      yamt 	}
    192   1.77      yamt }
    193    1.1       mrg 
    194    1.1       mrg /*
    195    1.1       mrg  * uvmpd_tune: tune paging parameters
    196    1.1       mrg  *
    197    1.1       mrg  * => called when ever memory is added (or removed?) to the system
    198    1.1       mrg  * => caller must call with page queues locked
    199    1.1       mrg  */
    200    1.1       mrg 
    201   1.65   thorpej static void
    202   1.37       chs uvmpd_tune(void)
    203    1.8       mrg {
    204   1.95        ad 	int val;
    205   1.95        ad 
    206    1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    207    1.1       mrg 
    208   1.93        ad 	/*
    209   1.93        ad 	 * try to keep 0.5% of available RAM free, but limit to between
    210   1.93        ad 	 * 128k and 1024k per-CPU.  XXX: what are these values good for?
    211   1.93        ad 	 */
    212   1.95        ad 	val = uvmexp.npages / 200;
    213   1.95        ad 	val = MAX(val, (128*1024) >> PAGE_SHIFT);
    214   1.95        ad 	val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    215   1.95        ad 	val *= ncpu;
    216   1.23     bjh21 
    217   1.23     bjh21 	/* Make sure there's always a user page free. */
    218   1.95        ad 	if (val < uvmexp.reserve_kernel + 1)
    219   1.95        ad 		val = uvmexp.reserve_kernel + 1;
    220   1.95        ad 	uvmexp.freemin = val;
    221   1.95        ad 
    222   1.96        ad 	/* Calculate free target. */
    223   1.95        ad 	val = (uvmexp.freemin * 4) / 3;
    224   1.95        ad 	if (val <= uvmexp.freemin)
    225   1.95        ad 		val = uvmexp.freemin + 1;
    226   1.96        ad 	uvmexp.freetarg = val + atomic_swap_uint(&uvm_extrapages, 0);
    227   1.61       chs 
    228    1.8       mrg 	uvmexp.wiredmax = uvmexp.npages / 3;
    229  1.109  pgoyette 	UVMHIST_LOG(pdhist, "<- done, freemin=%jd, freetarg=%jd, wiredmax=%jd",
    230    1.1       mrg 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    231    1.1       mrg }
    232    1.1       mrg 
    233    1.1       mrg /*
    234    1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    235    1.1       mrg  */
    236    1.1       mrg 
    237    1.8       mrg void
    238   1.80      yamt uvm_pageout(void *arg)
    239    1.8       mrg {
    240  1.110       chs 	int npages = 0;
    241   1.61       chs 	int extrapages = 0;
    242   1.98      haad 
    243    1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    244   1.24       chs 
    245    1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    246    1.8       mrg 
    247  1.110       chs 	mutex_init(&uvmpd_pool_drain_lock, MUTEX_DEFAULT, IPL_VM);
    248  1.110       chs 	cv_init(&uvmpd_pool_drain_cv, "pooldrain");
    249  1.110       chs 
    250  1.110       chs 	/* Create the pool drainer kernel thread. */
    251  1.110       chs 	if (kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL,
    252  1.110       chs 	    uvmpd_pool_drain_thread, NULL, NULL, "pooldrain"))
    253  1.110       chs 		panic("fork pooldrain");
    254  1.110       chs 
    255    1.8       mrg 	/*
    256    1.8       mrg 	 * ensure correct priority and set paging parameters...
    257    1.8       mrg 	 */
    258    1.8       mrg 
    259   1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    260   1.89        ad 	mutex_enter(&uvm_pageqlock);
    261    1.8       mrg 	npages = uvmexp.npages;
    262    1.8       mrg 	uvmpd_tune();
    263   1.89        ad 	mutex_exit(&uvm_pageqlock);
    264    1.8       mrg 
    265    1.8       mrg 	/*
    266    1.8       mrg 	 * main loop
    267    1.8       mrg 	 */
    268   1.24       chs 
    269   1.24       chs 	for (;;) {
    270  1.105      para 		bool needsscan, needsfree, kmem_va_starved;
    271  1.105      para 
    272  1.105      para 		kmem_va_starved = uvm_km_va_starved_p();
    273   1.24       chs 
    274   1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    275  1.105      para 		if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) &&
    276  1.105      para 		    !kmem_va_starved) {
    277   1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    278   1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    279   1.89        ad 			    &uvm_fpageqlock, false, "pgdaemon", 0);
    280   1.89        ad 			uvmexp.pdwoke++;
    281   1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    282   1.89        ad 		} else {
    283   1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    284   1.89        ad 		}
    285   1.24       chs 
    286    1.8       mrg 		/*
    287   1.24       chs 		 * now lock page queues and recompute inactive count
    288    1.8       mrg 		 */
    289    1.8       mrg 
    290   1.89        ad 		mutex_enter(&uvm_pageqlock);
    291   1.61       chs 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
    292   1.24       chs 			npages = uvmexp.npages;
    293   1.61       chs 			extrapages = uvm_extrapages;
    294   1.89        ad 			mutex_spin_enter(&uvm_fpageqlock);
    295   1.24       chs 			uvmpd_tune();
    296   1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    297   1.24       chs 		}
    298   1.24       chs 
    299   1.77      yamt 		uvmpdpol_tune();
    300   1.24       chs 
    301   1.60     enami 		/*
    302   1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    303   1.60     enami 		 * system only when entire pool page is empty.
    304   1.60     enami 		 */
    305   1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    306   1.60     enami 
    307  1.109  pgoyette 		UVMHIST_LOG(pdhist,"  free/ftarg=%jd/%jd",
    308   1.77      yamt 		    uvmexp.free, uvmexp.freetarg, 0,0);
    309    1.8       mrg 
    310   1.93        ad 		needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
    311   1.93        ad 		needsscan = needsfree || uvmpdpol_needsscan_p();
    312   1.89        ad 
    313    1.8       mrg 		/*
    314   1.24       chs 		 * scan if needed
    315    1.8       mrg 		 */
    316   1.97        ad 		if (needsscan) {
    317   1.97        ad 			mutex_spin_exit(&uvm_fpageqlock);
    318   1.24       chs 			uvmpd_scan();
    319   1.97        ad 			mutex_spin_enter(&uvm_fpageqlock);
    320   1.97        ad 		}
    321    1.8       mrg 
    322    1.8       mrg 		/*
    323   1.24       chs 		 * if there's any free memory to be had,
    324   1.24       chs 		 * wake up any waiters.
    325    1.8       mrg 		 */
    326   1.24       chs 		if (uvmexp.free > uvmexp.reserve_kernel ||
    327   1.24       chs 		    uvmexp.paging == 0) {
    328   1.24       chs 			wakeup(&uvmexp.free);
    329   1.89        ad 			uvm_pagedaemon_waiters = 0;
    330    1.8       mrg 		}
    331   1.89        ad 		mutex_spin_exit(&uvm_fpageqlock);
    332    1.1       mrg 
    333    1.8       mrg 		/*
    334   1.24       chs 		 * scan done.  unlock page queues (the only lock we are holding)
    335    1.8       mrg 		 */
    336   1.89        ad 		mutex_exit(&uvm_pageqlock);
    337   1.38       chs 
    338   1.88        ad 		/*
    339   1.93        ad 		 * if we don't need free memory, we're done.
    340   1.93        ad 		 */
    341   1.93        ad 
    342  1.105      para 		if (!needsfree && !kmem_va_starved)
    343   1.93        ad 			continue;
    344   1.93        ad 
    345   1.93        ad 		/*
    346  1.110       chs 		 * kick the pool drainer thread.
    347   1.38       chs 		 */
    348   1.57  jdolecek 
    349  1.110       chs 		uvmpd_pool_drain_wakeup();
    350   1.24       chs 	}
    351   1.24       chs 	/*NOTREACHED*/
    352   1.24       chs }
    353   1.24       chs 
    354    1.8       mrg 
    355   1.24       chs /*
    356   1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    357   1.24       chs  */
    358    1.8       mrg 
    359   1.24       chs void
    360   1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    361   1.24       chs {
    362   1.81      yamt 	struct buf *bp = (void *)wk;
    363    1.9        pk 
    364   1.81      yamt 	KASSERT(&bp->b_work == wk);
    365    1.8       mrg 
    366   1.81      yamt 	/*
    367   1.81      yamt 	 * process an i/o that's done.
    368   1.81      yamt 	 */
    369    1.8       mrg 
    370   1.81      yamt 	(*bp->b_iodone)(bp);
    371   1.89        ad }
    372   1.89        ad 
    373   1.89        ad void
    374   1.89        ad uvm_pageout_start(int npages)
    375   1.89        ad {
    376   1.89        ad 
    377   1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    378   1.89        ad 	uvmexp.paging += npages;
    379   1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    380   1.89        ad }
    381   1.89        ad 
    382   1.89        ad void
    383   1.89        ad uvm_pageout_done(int npages)
    384   1.89        ad {
    385   1.89        ad 
    386   1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    387   1.89        ad 	KASSERT(uvmexp.paging >= npages);
    388   1.89        ad 	uvmexp.paging -= npages;
    389   1.89        ad 
    390   1.89        ad 	/*
    391   1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    392   1.89        ad 	 */
    393   1.89        ad 
    394   1.89        ad 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    395   1.81      yamt 		wakeup(&uvm.pagedaemon);
    396   1.81      yamt 	} else {
    397   1.81      yamt 		wakeup(&uvmexp.free);
    398   1.89        ad 		uvm_pagedaemon_waiters = 0;
    399    1.8       mrg 	}
    400   1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    401    1.1       mrg }
    402    1.1       mrg 
    403   1.76      yamt /*
    404   1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    405   1.76      yamt  *
    406   1.76      yamt  * => called with pageq locked.
    407   1.76      yamt  * => resolve orphaned O->A loaned page.
    408   1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    409   1.76      yamt  */
    410   1.76      yamt 
    411   1.89        ad kmutex_t *
    412   1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    413   1.76      yamt {
    414   1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    415   1.89        ad 	kmutex_t *slock;
    416   1.89        ad 
    417   1.89        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    418   1.76      yamt 
    419   1.76      yamt 	if (uobj != NULL) {
    420  1.103     rmind 		slock = uobj->vmobjlock;
    421   1.76      yamt 	} else {
    422   1.76      yamt 		struct vm_anon *anon = pg->uanon;
    423   1.76      yamt 
    424   1.76      yamt 		KASSERT(anon != NULL);
    425  1.103     rmind 		slock = anon->an_lock;
    426   1.76      yamt 	}
    427   1.76      yamt 
    428   1.89        ad 	if (!mutex_tryenter(slock)) {
    429   1.76      yamt 		return NULL;
    430   1.76      yamt 	}
    431   1.76      yamt 
    432   1.76      yamt 	if (uobj == NULL) {
    433   1.76      yamt 
    434   1.76      yamt 		/*
    435   1.76      yamt 		 * set PQ_ANON if it isn't set already.
    436   1.76      yamt 		 */
    437   1.76      yamt 
    438   1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    439   1.76      yamt 			KASSERT(pg->loan_count > 0);
    440   1.76      yamt 			pg->loan_count--;
    441   1.76      yamt 			pg->pqflags |= PQ_ANON;
    442   1.76      yamt 			/* anon now owns it */
    443   1.76      yamt 		}
    444   1.76      yamt 	}
    445   1.76      yamt 
    446   1.76      yamt 	return slock;
    447   1.76      yamt }
    448   1.76      yamt 
    449   1.73      yamt #if defined(VMSWAP)
    450   1.73      yamt struct swapcluster {
    451   1.73      yamt 	int swc_slot;
    452   1.73      yamt 	int swc_nallocated;
    453   1.73      yamt 	int swc_nused;
    454   1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    455   1.73      yamt };
    456   1.73      yamt 
    457   1.73      yamt static void
    458   1.73      yamt swapcluster_init(struct swapcluster *swc)
    459   1.73      yamt {
    460   1.73      yamt 
    461   1.73      yamt 	swc->swc_slot = 0;
    462   1.89        ad 	swc->swc_nused = 0;
    463   1.73      yamt }
    464   1.73      yamt 
    465   1.73      yamt static int
    466   1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    467   1.73      yamt {
    468   1.73      yamt 	int slot;
    469   1.73      yamt 	int npages;
    470   1.73      yamt 
    471   1.73      yamt 	if (swc->swc_slot != 0) {
    472   1.73      yamt 		return 0;
    473   1.73      yamt 	}
    474   1.73      yamt 
    475   1.73      yamt 	/* Even with strange MAXPHYS, the shift
    476   1.73      yamt 	   implicitly rounds down to a page. */
    477   1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    478   1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    479   1.73      yamt 	if (slot == 0) {
    480   1.73      yamt 		return ENOMEM;
    481   1.73      yamt 	}
    482   1.73      yamt 	swc->swc_slot = slot;
    483   1.73      yamt 	swc->swc_nallocated = npages;
    484   1.73      yamt 	swc->swc_nused = 0;
    485   1.73      yamt 
    486   1.73      yamt 	return 0;
    487   1.73      yamt }
    488   1.73      yamt 
    489   1.73      yamt static int
    490   1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    491   1.73      yamt {
    492   1.73      yamt 	int slot;
    493   1.73      yamt 	struct uvm_object *uobj;
    494   1.73      yamt 
    495   1.73      yamt 	KASSERT(swc->swc_slot != 0);
    496   1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    497   1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    498   1.73      yamt 
    499   1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    500   1.73      yamt 	uobj = pg->uobject;
    501   1.73      yamt 	if (uobj == NULL) {
    502  1.103     rmind 		KASSERT(mutex_owned(pg->uanon->an_lock));
    503   1.73      yamt 		pg->uanon->an_swslot = slot;
    504   1.73      yamt 	} else {
    505   1.73      yamt 		int result;
    506   1.73      yamt 
    507  1.103     rmind 		KASSERT(mutex_owned(uobj->vmobjlock));
    508   1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    509   1.73      yamt 		if (result == -1) {
    510   1.73      yamt 			return ENOMEM;
    511   1.73      yamt 		}
    512   1.73      yamt 	}
    513   1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    514   1.73      yamt 	swc->swc_nused++;
    515   1.73      yamt 
    516   1.73      yamt 	return 0;
    517   1.73      yamt }
    518   1.73      yamt 
    519   1.73      yamt static void
    520   1.83   thorpej swapcluster_flush(struct swapcluster *swc, bool now)
    521   1.73      yamt {
    522   1.73      yamt 	int slot;
    523   1.73      yamt 	int nused;
    524   1.73      yamt 	int nallocated;
    525  1.108    martin 	int error __diagused;
    526   1.73      yamt 
    527   1.73      yamt 	if (swc->swc_slot == 0) {
    528   1.73      yamt 		return;
    529   1.73      yamt 	}
    530   1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    531   1.73      yamt 
    532   1.73      yamt 	slot = swc->swc_slot;
    533   1.73      yamt 	nused = swc->swc_nused;
    534   1.73      yamt 	nallocated = swc->swc_nallocated;
    535   1.73      yamt 
    536   1.73      yamt 	/*
    537   1.73      yamt 	 * if this is the final pageout we could have a few
    538   1.73      yamt 	 * unused swap blocks.  if so, free them now.
    539   1.73      yamt 	 */
    540   1.73      yamt 
    541   1.73      yamt 	if (nused < nallocated) {
    542   1.73      yamt 		if (!now) {
    543   1.73      yamt 			return;
    544   1.73      yamt 		}
    545   1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    546   1.73      yamt 	}
    547   1.73      yamt 
    548   1.73      yamt 	/*
    549   1.73      yamt 	 * now start the pageout.
    550   1.73      yamt 	 */
    551   1.73      yamt 
    552   1.91      yamt 	if (nused > 0) {
    553   1.91      yamt 		uvmexp.pdpageouts++;
    554   1.91      yamt 		uvm_pageout_start(nused);
    555   1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    556   1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    557   1.91      yamt 	}
    558   1.73      yamt 
    559   1.73      yamt 	/*
    560   1.73      yamt 	 * zero swslot to indicate that we are
    561   1.73      yamt 	 * no longer building a swap-backed cluster.
    562   1.73      yamt 	 */
    563   1.73      yamt 
    564   1.73      yamt 	swc->swc_slot = 0;
    565   1.89        ad 	swc->swc_nused = 0;
    566   1.89        ad }
    567   1.89        ad 
    568   1.89        ad static int
    569   1.89        ad swapcluster_nused(struct swapcluster *swc)
    570   1.89        ad {
    571   1.89        ad 
    572   1.89        ad 	return swc->swc_nused;
    573   1.73      yamt }
    574   1.77      yamt 
    575   1.77      yamt /*
    576   1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    577   1.77      yamt  *
    578   1.77      yamt  * => called with owner locked.
    579   1.84   thorpej  * => return true if a page had an associated slot.
    580   1.77      yamt  */
    581   1.77      yamt 
    582   1.83   thorpej static bool
    583   1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    584   1.77      yamt {
    585   1.84   thorpej 	bool result = false;
    586   1.77      yamt 	struct vm_anon *anon = pg->uanon;
    587   1.77      yamt 
    588   1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    589   1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    590   1.77      yamt 		anon->an_swslot = 0;
    591   1.77      yamt 		pg->flags &= ~PG_CLEAN;
    592   1.84   thorpej 		result = true;
    593   1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    594   1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    595   1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    596   1.77      yamt 		if (slot) {
    597   1.77      yamt 			uvm_swap_free(slot, 1);
    598   1.77      yamt 			pg->flags &= ~PG_CLEAN;
    599   1.84   thorpej 			result = true;
    600   1.77      yamt 		}
    601   1.77      yamt 	}
    602   1.77      yamt 
    603   1.77      yamt 	return result;
    604   1.77      yamt }
    605   1.77      yamt 
    606   1.77      yamt /*
    607   1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    608   1.77      yamt  *
    609   1.84   thorpej  * => return true if a slot is successfully freed.
    610   1.77      yamt  */
    611   1.77      yamt 
    612   1.83   thorpej bool
    613   1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    614   1.77      yamt {
    615   1.89        ad 	kmutex_t *slock;
    616   1.83   thorpej 	bool result;
    617   1.77      yamt 
    618   1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    619   1.84   thorpej 		return false;
    620   1.77      yamt 	}
    621   1.77      yamt 
    622   1.77      yamt 	/*
    623   1.77      yamt 	 * lock the page's owner.
    624   1.77      yamt 	 */
    625   1.77      yamt 
    626   1.77      yamt 	slock = uvmpd_trylockowner(pg);
    627   1.77      yamt 	if (slock == NULL) {
    628   1.84   thorpej 		return false;
    629   1.77      yamt 	}
    630   1.77      yamt 
    631   1.77      yamt 	/*
    632   1.77      yamt 	 * skip this page if it's busy.
    633   1.77      yamt 	 */
    634   1.77      yamt 
    635   1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    636   1.89        ad 		mutex_exit(slock);
    637   1.84   thorpej 		return false;
    638   1.77      yamt 	}
    639   1.77      yamt 
    640   1.77      yamt 	result = uvmpd_dropswap(pg);
    641   1.77      yamt 
    642   1.89        ad 	mutex_exit(slock);
    643   1.77      yamt 
    644   1.77      yamt 	return result;
    645   1.77      yamt }
    646   1.77      yamt 
    647   1.73      yamt #endif /* defined(VMSWAP) */
    648   1.73      yamt 
    649    1.1       mrg /*
    650   1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    651   1.77      yamt  * to clean or free.
    652    1.1       mrg  *
    653    1.1       mrg  * => called with page queues locked
    654    1.1       mrg  * => we work on meeting our free target by converting inactive pages
    655    1.1       mrg  *    into free pages.
    656    1.1       mrg  * => we handle the building of swap-backed clusters
    657    1.1       mrg  */
    658    1.1       mrg 
    659   1.65   thorpej static void
    660   1.77      yamt uvmpd_scan_queue(void)
    661    1.8       mrg {
    662   1.77      yamt 	struct vm_page *p;
    663    1.8       mrg 	struct uvm_object *uobj;
    664   1.37       chs 	struct vm_anon *anon;
    665   1.68      yamt #if defined(VMSWAP)
    666   1.73      yamt 	struct swapcluster swc;
    667   1.68      yamt #endif /* defined(VMSWAP) */
    668   1.77      yamt 	int dirtyreacts;
    669   1.89        ad 	int lockownerfail;
    670   1.89        ad 	kmutex_t *slock;
    671   1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    672    1.1       mrg 
    673    1.8       mrg 	/*
    674    1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    675   1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    676    1.8       mrg 	 * a swap-cluster to build.
    677    1.8       mrg 	 */
    678   1.24       chs 
    679   1.73      yamt #if defined(VMSWAP)
    680   1.73      yamt 	swapcluster_init(&swc);
    681   1.73      yamt #endif /* defined(VMSWAP) */
    682   1.77      yamt 
    683   1.14       chs 	dirtyreacts = 0;
    684   1.89        ad 	lockownerfail = 0;
    685   1.77      yamt 	uvmpdpol_scaninit();
    686   1.43       chs 
    687   1.77      yamt 	while (/* CONSTCOND */ 1) {
    688   1.24       chs 
    689   1.73      yamt 		/*
    690   1.73      yamt 		 * see if we've met the free target.
    691   1.73      yamt 		 */
    692   1.73      yamt 
    693   1.89        ad 		if (uvmexp.free + uvmexp.paging
    694   1.89        ad #if defined(VMSWAP)
    695   1.89        ad 		    + swapcluster_nused(&swc)
    696   1.89        ad #endif /* defined(VMSWAP) */
    697   1.89        ad 		    >= uvmexp.freetarg << 2 ||
    698   1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    699   1.73      yamt 			UVMHIST_LOG(pdhist,"  met free target: "
    700   1.73      yamt 				    "exit loop", 0, 0, 0, 0);
    701   1.73      yamt 			break;
    702   1.73      yamt 		}
    703   1.24       chs 
    704   1.77      yamt 		p = uvmpdpol_selectvictim();
    705   1.77      yamt 		if (p == NULL) {
    706   1.77      yamt 			break;
    707   1.77      yamt 		}
    708   1.77      yamt 		KASSERT(uvmpdpol_pageisqueued_p(p));
    709   1.77      yamt 		KASSERT(p->wire_count == 0);
    710   1.77      yamt 
    711   1.73      yamt 		/*
    712   1.73      yamt 		 * we are below target and have a new page to consider.
    713   1.73      yamt 		 */
    714   1.30       chs 
    715   1.73      yamt 		anon = p->uanon;
    716   1.73      yamt 		uobj = p->uobject;
    717    1.8       mrg 
    718   1.73      yamt 		/*
    719   1.73      yamt 		 * first we attempt to lock the object that this page
    720   1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    721   1.73      yamt 		 * the next page (no harm done).  it is important to
    722   1.73      yamt 		 * "try" locking the object as we are locking in the
    723   1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    724   1.73      yamt 		 * deadlock.
    725   1.73      yamt 		 *
    726   1.73      yamt 		 * the only time we expect to see an ownerless page
    727   1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    728   1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    729   1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    730   1.73      yamt 		 * case, the anon can "take over" the loaned page
    731   1.73      yamt 		 * and make it its own.
    732   1.73      yamt 		 */
    733   1.30       chs 
    734   1.76      yamt 		slock = uvmpd_trylockowner(p);
    735   1.76      yamt 		if (slock == NULL) {
    736   1.89        ad 			/*
    737   1.89        ad 			 * yield cpu to make a chance for an LWP holding
    738   1.89        ad 			 * the lock run.  otherwise we can busy-loop too long
    739   1.89        ad 			 * if the page queue is filled with a lot of pages
    740   1.89        ad 			 * from few objects.
    741   1.89        ad 			 */
    742   1.89        ad 			lockownerfail++;
    743   1.89        ad 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
    744   1.89        ad 				mutex_exit(&uvm_pageqlock);
    745   1.89        ad 				/* XXX Better than yielding but inadequate. */
    746   1.89        ad 				kpause("livelock", false, 1, NULL);
    747   1.89        ad 				mutex_enter(&uvm_pageqlock);
    748   1.89        ad 				lockownerfail = 0;
    749   1.89        ad 			}
    750   1.76      yamt 			continue;
    751   1.76      yamt 		}
    752   1.76      yamt 		if (p->flags & PG_BUSY) {
    753   1.89        ad 			mutex_exit(slock);
    754   1.76      yamt 			uvmexp.pdbusy++;
    755   1.76      yamt 			continue;
    756   1.76      yamt 		}
    757   1.76      yamt 
    758   1.73      yamt 		/* does the page belong to an object? */
    759   1.73      yamt 		if (uobj != NULL) {
    760   1.73      yamt 			uvmexp.pdobscan++;
    761   1.73      yamt 		} else {
    762   1.73      yamt #if defined(VMSWAP)
    763   1.73      yamt 			KASSERT(anon != NULL);
    764   1.73      yamt 			uvmexp.pdanscan++;
    765   1.68      yamt #else /* defined(VMSWAP) */
    766   1.73      yamt 			panic("%s: anon", __func__);
    767   1.68      yamt #endif /* defined(VMSWAP) */
    768   1.73      yamt 		}
    769    1.8       mrg 
    770   1.37       chs 
    771   1.73      yamt 		/*
    772   1.73      yamt 		 * we now have the object and the page queues locked.
    773   1.73      yamt 		 * if the page is not swap-backed, call the object's
    774   1.73      yamt 		 * pager to flush and free the page.
    775   1.73      yamt 		 */
    776   1.37       chs 
    777   1.69      yamt #if defined(READAHEAD_STATS)
    778   1.77      yamt 		if ((p->pqflags & PQ_READAHEAD) != 0) {
    779   1.77      yamt 			p->pqflags &= ~PQ_READAHEAD;
    780   1.73      yamt 			uvm_ra_miss.ev_count++;
    781   1.73      yamt 		}
    782   1.69      yamt #endif /* defined(READAHEAD_STATS) */
    783   1.69      yamt 
    784   1.73      yamt 		if ((p->pqflags & PQ_SWAPBACKED) == 0) {
    785   1.82       alc 			KASSERT(uobj != NULL);
    786   1.89        ad 			mutex_exit(&uvm_pageqlock);
    787   1.73      yamt 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
    788   1.73      yamt 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    789   1.89        ad 			mutex_enter(&uvm_pageqlock);
    790   1.73      yamt 			continue;
    791   1.73      yamt 		}
    792   1.37       chs 
    793   1.73      yamt 		/*
    794   1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    795   1.73      yamt 		 * from the page so we can sync the modified info
    796   1.73      yamt 		 * without any race conditions.  if the page is clean
    797   1.73      yamt 		 * we can free it now and continue.
    798   1.73      yamt 		 */
    799    1.8       mrg 
    800   1.73      yamt 		pmap_page_protect(p, VM_PROT_NONE);
    801   1.73      yamt 		if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
    802   1.73      yamt 			p->flags &= ~(PG_CLEAN);
    803   1.73      yamt 		}
    804   1.73      yamt 		if (p->flags & PG_CLEAN) {
    805   1.73      yamt 			int slot;
    806   1.73      yamt 			int pageidx;
    807   1.73      yamt 
    808   1.73      yamt 			pageidx = p->offset >> PAGE_SHIFT;
    809   1.73      yamt 			uvm_pagefree(p);
    810   1.73      yamt 			uvmexp.pdfreed++;
    811    1.8       mrg 
    812    1.8       mrg 			/*
    813   1.73      yamt 			 * for anons, we need to remove the page
    814   1.73      yamt 			 * from the anon ourselves.  for aobjs,
    815   1.73      yamt 			 * pagefree did that for us.
    816    1.8       mrg 			 */
    817   1.24       chs 
    818   1.73      yamt 			if (anon) {
    819   1.73      yamt 				KASSERT(anon->an_swslot != 0);
    820   1.73      yamt 				anon->an_page = NULL;
    821   1.73      yamt 				slot = anon->an_swslot;
    822   1.73      yamt 			} else {
    823   1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
    824    1.8       mrg 			}
    825   1.89        ad 			mutex_exit(slock);
    826    1.8       mrg 
    827   1.73      yamt 			if (slot > 0) {
    828   1.73      yamt 				/* this page is now only in swap. */
    829   1.87        ad 				mutex_enter(&uvm_swap_data_lock);
    830   1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
    831   1.73      yamt 				uvmexp.swpgonly++;
    832   1.87        ad 				mutex_exit(&uvm_swap_data_lock);
    833   1.37       chs 			}
    834   1.73      yamt 			continue;
    835   1.73      yamt 		}
    836   1.37       chs 
    837   1.77      yamt #if defined(VMSWAP)
    838   1.73      yamt 		/*
    839   1.73      yamt 		 * this page is dirty, skip it if we'll have met our
    840   1.73      yamt 		 * free target when all the current pageouts complete.
    841   1.73      yamt 		 */
    842   1.24       chs 
    843   1.73      yamt 		if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
    844   1.89        ad 			mutex_exit(slock);
    845   1.73      yamt 			continue;
    846   1.73      yamt 		}
    847   1.14       chs 
    848   1.73      yamt 		/*
    849   1.73      yamt 		 * free any swap space allocated to the page since
    850   1.73      yamt 		 * we'll have to write it again with its new data.
    851   1.73      yamt 		 */
    852   1.24       chs 
    853   1.77      yamt 		uvmpd_dropswap(p);
    854   1.14       chs 
    855   1.73      yamt 		/*
    856   1.97        ad 		 * start new swap pageout cluster (if necessary).
    857   1.97        ad 		 *
    858   1.97        ad 		 * if swap is full reactivate this page so that
    859   1.97        ad 		 * we eventually cycle all pages through the
    860   1.97        ad 		 * inactive queue.
    861   1.73      yamt 		 */
    862   1.68      yamt 
    863   1.97        ad 		if (swapcluster_allocslots(&swc)) {
    864   1.73      yamt 			dirtyreacts++;
    865   1.73      yamt 			uvm_pageactivate(p);
    866   1.89        ad 			mutex_exit(slock);
    867   1.73      yamt 			continue;
    868    1.8       mrg 		}
    869    1.8       mrg 
    870    1.8       mrg 		/*
    871   1.73      yamt 		 * at this point, we're definitely going reuse this
    872   1.73      yamt 		 * page.  mark the page busy and delayed-free.
    873   1.73      yamt 		 * we should remove the page from the page queues
    874   1.73      yamt 		 * so we don't ever look at it again.
    875   1.73      yamt 		 * adjust counters and such.
    876    1.8       mrg 		 */
    877    1.8       mrg 
    878   1.73      yamt 		p->flags |= PG_BUSY;
    879   1.77      yamt 		UVM_PAGE_OWN(p, "scan_queue");
    880   1.73      yamt 
    881   1.73      yamt 		p->flags |= PG_PAGEOUT;
    882   1.73      yamt 		uvm_pagedequeue(p);
    883   1.73      yamt 
    884   1.73      yamt 		uvmexp.pgswapout++;
    885   1.89        ad 		mutex_exit(&uvm_pageqlock);
    886    1.8       mrg 
    887    1.8       mrg 		/*
    888   1.73      yamt 		 * add the new page to the cluster.
    889    1.8       mrg 		 */
    890    1.8       mrg 
    891   1.73      yamt 		if (swapcluster_add(&swc, p)) {
    892   1.73      yamt 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
    893   1.73      yamt 			UVM_PAGE_OWN(p, NULL);
    894   1.89        ad 			mutex_enter(&uvm_pageqlock);
    895   1.77      yamt 			dirtyreacts++;
    896   1.73      yamt 			uvm_pageactivate(p);
    897   1.89        ad 			mutex_exit(slock);
    898   1.73      yamt 			continue;
    899   1.73      yamt 		}
    900   1.89        ad 		mutex_exit(slock);
    901   1.73      yamt 
    902   1.84   thorpej 		swapcluster_flush(&swc, false);
    903   1.89        ad 		mutex_enter(&uvm_pageqlock);
    904   1.73      yamt 
    905    1.8       mrg 		/*
    906   1.31       chs 		 * the pageout is in progress.  bump counters and set up
    907   1.31       chs 		 * for the next loop.
    908    1.8       mrg 		 */
    909    1.8       mrg 
    910   1.31       chs 		uvmexp.pdpending++;
    911   1.77      yamt 
    912   1.77      yamt #else /* defined(VMSWAP) */
    913   1.77      yamt 		uvm_pageactivate(p);
    914   1.89        ad 		mutex_exit(slock);
    915   1.77      yamt #endif /* defined(VMSWAP) */
    916   1.73      yamt 	}
    917   1.73      yamt 
    918   1.73      yamt #if defined(VMSWAP)
    919   1.89        ad 	mutex_exit(&uvm_pageqlock);
    920   1.84   thorpej 	swapcluster_flush(&swc, true);
    921   1.89        ad 	mutex_enter(&uvm_pageqlock);
    922   1.68      yamt #endif /* defined(VMSWAP) */
    923    1.1       mrg }
    924    1.1       mrg 
    925    1.1       mrg /*
    926    1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    927    1.1       mrg  *
    928    1.1       mrg  * => called with pageq's locked
    929    1.1       mrg  */
    930    1.1       mrg 
    931   1.65   thorpej static void
    932   1.37       chs uvmpd_scan(void)
    933    1.1       mrg {
    934   1.77      yamt 	int swap_shortage, pages_freed;
    935    1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    936    1.1       mrg 
    937   1.37       chs 	uvmexp.pdrevs++;
    938    1.1       mrg 
    939    1.8       mrg 	/*
    940   1.93        ad 	 * work on meeting our targets.   first we work on our free target
    941   1.93        ad 	 * by converting inactive pages into free pages.  then we work on
    942   1.93        ad 	 * meeting our inactive target by converting active pages to
    943   1.93        ad 	 * inactive ones.
    944    1.8       mrg 	 */
    945    1.8       mrg 
    946    1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    947    1.8       mrg 
    948   1.14       chs 	pages_freed = uvmexp.pdfreed;
    949   1.77      yamt 	uvmpd_scan_queue();
    950   1.14       chs 	pages_freed = uvmexp.pdfreed - pages_freed;
    951    1.8       mrg 
    952    1.8       mrg 	/*
    953   1.14       chs 	 * detect if we're not going to be able to page anything out
    954   1.14       chs 	 * until we free some swap resources from active pages.
    955   1.14       chs 	 */
    956   1.24       chs 
    957   1.14       chs 	swap_shortage = 0;
    958   1.14       chs 	if (uvmexp.free < uvmexp.freetarg &&
    959   1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
    960   1.52        pk 	    !uvm_swapisfull() &&
    961   1.14       chs 	    pages_freed == 0) {
    962   1.14       chs 		swap_shortage = uvmexp.freetarg - uvmexp.free;
    963   1.14       chs 	}
    964   1.24       chs 
    965   1.77      yamt 	uvmpdpol_balancequeue(swap_shortage);
    966   1.93        ad 
    967   1.93        ad 	/*
    968   1.94        ad 	 * if still below the minimum target, try unloading kernel
    969   1.94        ad 	 * modules.
    970   1.94        ad 	 */
    971   1.93        ad 
    972   1.94        ad 	if (uvmexp.free < uvmexp.freemin) {
    973   1.94        ad 		module_thread_kick();
    974   1.93        ad 	}
    975    1.1       mrg }
    976   1.62      yamt 
    977   1.62      yamt /*
    978   1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
    979   1.62      yamt  *
    980   1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
    981   1.62      yamt  *
    982   1.62      yamt  * XXX should be tunable.
    983   1.62      yamt  * XXX should consider pools, etc?
    984   1.62      yamt  */
    985   1.62      yamt 
    986   1.83   thorpej bool
    987   1.62      yamt uvm_reclaimable(void)
    988   1.62      yamt {
    989   1.62      yamt 	int filepages;
    990   1.77      yamt 	int active, inactive;
    991   1.62      yamt 
    992   1.62      yamt 	/*
    993   1.62      yamt 	 * if swap is not full, no problem.
    994   1.62      yamt 	 */
    995   1.62      yamt 
    996   1.62      yamt 	if (!uvm_swapisfull()) {
    997   1.84   thorpej 		return true;
    998   1.62      yamt 	}
    999   1.62      yamt 
   1000   1.62      yamt 	/*
   1001   1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
   1002   1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
   1003   1.62      yamt 	 *
   1004   1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
   1005   1.63      yamt 	 *
   1006   1.63      yamt 	 * XXX should consider about other reclaimable memory.
   1007   1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
   1008   1.62      yamt 	 */
   1009   1.62      yamt 
   1010   1.62      yamt 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
   1011   1.77      yamt 	uvm_estimatepageable(&active, &inactive);
   1012   1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
   1013   1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
   1014   1.84   thorpej 		return true;
   1015   1.62      yamt 	}
   1016   1.62      yamt 
   1017   1.62      yamt 	/*
   1018   1.62      yamt 	 * kill the process, fail allocation, etc..
   1019   1.62      yamt 	 */
   1020   1.62      yamt 
   1021   1.84   thorpej 	return false;
   1022   1.62      yamt }
   1023   1.77      yamt 
   1024   1.77      yamt void
   1025   1.77      yamt uvm_estimatepageable(int *active, int *inactive)
   1026   1.77      yamt {
   1027   1.77      yamt 
   1028   1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
   1029   1.77      yamt }
   1030   1.98      haad 
   1031  1.110       chs 
   1032  1.110       chs /*
   1033  1.110       chs  * Use a separate thread for draining pools.
   1034  1.110       chs  * This work can't done from the main pagedaemon thread because
   1035  1.110       chs  * some pool allocators need to take vm_map locks.
   1036  1.110       chs  */
   1037  1.110       chs 
   1038  1.110       chs static void
   1039  1.110       chs uvmpd_pool_drain_thread(void *arg)
   1040  1.110       chs {
   1041  1.110       chs 	int bufcnt;
   1042  1.110       chs 
   1043  1.110       chs 	for (;;) {
   1044  1.110       chs 		mutex_enter(&uvmpd_pool_drain_lock);
   1045  1.110       chs 		if (!uvmpd_pool_drain_run) {
   1046  1.110       chs 			cv_wait(&uvmpd_pool_drain_cv, &uvmpd_pool_drain_lock);
   1047  1.110       chs 		}
   1048  1.110       chs 		uvmpd_pool_drain_run = false;
   1049  1.110       chs 		mutex_exit(&uvmpd_pool_drain_lock);
   1050  1.110       chs 
   1051  1.110       chs 		/*
   1052  1.110       chs 		 * kill unused metadata buffers.
   1053  1.110       chs 		 */
   1054  1.110       chs 		mutex_spin_enter(&uvm_fpageqlock);
   1055  1.110       chs 		bufcnt = uvmexp.freetarg - uvmexp.free;
   1056  1.110       chs 		mutex_spin_exit(&uvm_fpageqlock);
   1057  1.110       chs 		if (bufcnt < 0)
   1058  1.110       chs 			bufcnt = 0;
   1059  1.110       chs 
   1060  1.110       chs 		mutex_enter(&bufcache_lock);
   1061  1.110       chs 		buf_drain(bufcnt << PAGE_SHIFT);
   1062  1.110       chs 		mutex_exit(&bufcache_lock);
   1063  1.110       chs 
   1064  1.110       chs 		/*
   1065  1.110       chs 		 * drain a pool.
   1066  1.110       chs 		 */
   1067  1.110       chs 		pool_drain(NULL);
   1068  1.110       chs 	}
   1069  1.110       chs 	/*NOTREACHED*/
   1070  1.110       chs }
   1071  1.110       chs 
   1072  1.110       chs static void
   1073  1.110       chs uvmpd_pool_drain_wakeup(void)
   1074  1.110       chs {
   1075  1.110       chs 
   1076  1.110       chs 	mutex_enter(&uvmpd_pool_drain_lock);
   1077  1.110       chs 	uvmpd_pool_drain_run = true;
   1078  1.110       chs 	cv_signal(&uvmpd_pool_drain_cv);
   1079  1.110       chs 	mutex_exit(&uvmpd_pool_drain_lock);
   1080  1.110       chs }
   1081