Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.100.4.4
      1  1.100.4.4     rmind /*	$NetBSD: uvm_pdaemon.c,v 1.100.4.4 2011/03/05 20:56:37 rmind Exp $	*/
      2        1.1       mrg 
      3       1.34       chs /*
      4        1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5       1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6        1.1       mrg  *
      7        1.1       mrg  * All rights reserved.
      8        1.1       mrg  *
      9        1.1       mrg  * This code is derived from software contributed to Berkeley by
     10        1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11        1.1       mrg  *
     12        1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13        1.1       mrg  * modification, are permitted provided that the following conditions
     14        1.1       mrg  * are met:
     15        1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16        1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17        1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18        1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19        1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20  1.100.4.4     rmind  * 3. Neither the name of the University nor the names of its contributors
     21        1.1       mrg  *    may be used to endorse or promote products derived from this software
     22        1.1       mrg  *    without specific prior written permission.
     23        1.1       mrg  *
     24        1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25        1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26        1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27        1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28        1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29        1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30        1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31        1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32        1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33        1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34        1.1       mrg  * SUCH DAMAGE.
     35        1.1       mrg  *
     36        1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     37        1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     38        1.1       mrg  *
     39        1.1       mrg  *
     40        1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41        1.1       mrg  * All rights reserved.
     42       1.34       chs  *
     43        1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44        1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45        1.1       mrg  * notice and this permission notice appear in all copies of the
     46        1.1       mrg  * software, derivative works or modified versions, and any portions
     47        1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48       1.34       chs  *
     49       1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50       1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51        1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52       1.34       chs  *
     53        1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54        1.1       mrg  *
     55        1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56        1.1       mrg  *  School of Computer Science
     57        1.1       mrg  *  Carnegie Mellon University
     58        1.1       mrg  *  Pittsburgh PA 15213-3890
     59        1.1       mrg  *
     60        1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61        1.1       mrg  * rights to redistribute these changes.
     62        1.1       mrg  */
     63        1.1       mrg 
     64        1.1       mrg /*
     65        1.1       mrg  * uvm_pdaemon.c: the page daemon
     66        1.1       mrg  */
     67       1.42     lukem 
     68       1.42     lukem #include <sys/cdefs.h>
     69  1.100.4.4     rmind __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.100.4.4 2011/03/05 20:56:37 rmind Exp $");
     70       1.42     lukem 
     71       1.42     lukem #include "opt_uvmhist.h"
     72       1.69      yamt #include "opt_readahead.h"
     73        1.1       mrg 
     74        1.1       mrg #include <sys/param.h>
     75        1.1       mrg #include <sys/proc.h>
     76        1.1       mrg #include <sys/systm.h>
     77        1.1       mrg #include <sys/kernel.h>
     78        1.9        pk #include <sys/pool.h>
     79       1.24       chs #include <sys/buf.h>
     80       1.94        ad #include <sys/module.h>
     81       1.96        ad #include <sys/atomic.h>
     82        1.1       mrg 
     83        1.1       mrg #include <uvm/uvm.h>
     84       1.77      yamt #include <uvm/uvm_pdpolicy.h>
     85        1.1       mrg 
     86        1.1       mrg /*
     87       1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     88       1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     89       1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     90       1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     91       1.14       chs  */
     92       1.14       chs 
     93       1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
     94       1.14       chs 
     95       1.89        ad #define	UVMPD_NUMTRYLOCKOWNER	16
     96       1.14       chs 
     97       1.14       chs /*
     98        1.1       mrg  * local prototypes
     99        1.1       mrg  */
    100        1.1       mrg 
    101       1.65   thorpej static void	uvmpd_scan(void);
    102       1.77      yamt static void	uvmpd_scan_queue(void);
    103       1.65   thorpej static void	uvmpd_tune(void);
    104        1.1       mrg 
    105  1.100.4.3     rmind static unsigned int uvm_pagedaemon_waiters;
    106       1.89        ad 
    107        1.1       mrg /*
    108       1.61       chs  * XXX hack to avoid hangs when large processes fork.
    109       1.61       chs  */
    110       1.96        ad u_int uvm_extrapages;
    111       1.61       chs 
    112       1.98      haad static kmutex_t uvm_reclaim_lock;
    113       1.98      haad 
    114       1.98      haad SLIST_HEAD(uvm_reclaim_hooks, uvm_reclaim_hook) uvm_reclaim_list;
    115       1.98      haad 
    116       1.61       chs /*
    117        1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    118        1.1       mrg  *
    119        1.1       mrg  * => should be called with all locks released
    120        1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    121        1.1       mrg  */
    122        1.1       mrg 
    123       1.19   thorpej void
    124       1.65   thorpej uvm_wait(const char *wmsg)
    125        1.8       mrg {
    126        1.8       mrg 	int timo = 0;
    127       1.89        ad 
    128       1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    129        1.1       mrg 
    130        1.8       mrg 	/*
    131        1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    132        1.8       mrg 	 */
    133        1.1       mrg 
    134       1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    135        1.8       mrg 		/*
    136        1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    137        1.8       mrg 		 * sleep until it frees more memory.   but how can it
    138        1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    139        1.8       mrg 		 * we have two options:
    140        1.8       mrg 		 *  [1] panic now
    141        1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    142        1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    143        1.8       mrg 		 *
    144        1.8       mrg 		 * note that option [2] will only help us if we get lucky
    145        1.8       mrg 		 * and some other process on the system breaks the deadlock
    146        1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    147        1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    148        1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    149        1.8       mrg 		 * yet, this should never happen in the first place!).
    150        1.8       mrg 		 */
    151        1.1       mrg 
    152        1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    153        1.8       mrg 		timo = hz >> 3;		/* set timeout */
    154        1.1       mrg #if defined(DEBUG)
    155        1.8       mrg 		/* DEBUG: panic so we can debug it */
    156        1.8       mrg 		panic("pagedaemon deadlock");
    157        1.1       mrg #endif
    158        1.8       mrg 	}
    159        1.1       mrg 
    160       1.89        ad 	uvm_pagedaemon_waiters++;
    161       1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    162       1.89        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    163        1.1       mrg }
    164        1.1       mrg 
    165       1.77      yamt /*
    166       1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    167       1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    168       1.89        ad  *
    169       1.89        ad  * => called with uvm_fpageqlock held.
    170       1.77      yamt  */
    171       1.77      yamt 
    172       1.77      yamt void
    173       1.77      yamt uvm_kick_pdaemon(void)
    174       1.77      yamt {
    175       1.77      yamt 
    176       1.89        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    177       1.89        ad 
    178       1.77      yamt 	if (uvmexp.free + uvmexp.paging < uvmexp.freemin ||
    179       1.77      yamt 	    (uvmexp.free + uvmexp.paging < uvmexp.freetarg &&
    180       1.77      yamt 	     uvmpdpol_needsscan_p())) {
    181       1.77      yamt 		wakeup(&uvm.pagedaemon);
    182       1.77      yamt 	}
    183       1.77      yamt }
    184        1.1       mrg 
    185        1.1       mrg /*
    186        1.1       mrg  * uvmpd_tune: tune paging parameters
    187        1.1       mrg  *
    188        1.1       mrg  * => called when ever memory is added (or removed?) to the system
    189        1.1       mrg  * => caller must call with page queues locked
    190        1.1       mrg  */
    191        1.1       mrg 
    192       1.65   thorpej static void
    193       1.37       chs uvmpd_tune(void)
    194        1.8       mrg {
    195       1.95        ad 	int val;
    196       1.95        ad 
    197        1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    198        1.1       mrg 
    199       1.93        ad 	/*
    200       1.93        ad 	 * try to keep 0.5% of available RAM free, but limit to between
    201       1.93        ad 	 * 128k and 1024k per-CPU.  XXX: what are these values good for?
    202       1.93        ad 	 */
    203       1.95        ad 	val = uvmexp.npages / 200;
    204       1.95        ad 	val = MAX(val, (128*1024) >> PAGE_SHIFT);
    205       1.95        ad 	val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    206       1.95        ad 	val *= ncpu;
    207       1.23     bjh21 
    208       1.23     bjh21 	/* Make sure there's always a user page free. */
    209       1.95        ad 	if (val < uvmexp.reserve_kernel + 1)
    210       1.95        ad 		val = uvmexp.reserve_kernel + 1;
    211       1.95        ad 	uvmexp.freemin = val;
    212       1.95        ad 
    213       1.96        ad 	/* Calculate free target. */
    214       1.95        ad 	val = (uvmexp.freemin * 4) / 3;
    215       1.95        ad 	if (val <= uvmexp.freemin)
    216       1.95        ad 		val = uvmexp.freemin + 1;
    217       1.96        ad 	uvmexp.freetarg = val + atomic_swap_uint(&uvm_extrapages, 0);
    218       1.61       chs 
    219        1.8       mrg 	uvmexp.wiredmax = uvmexp.npages / 3;
    220        1.8       mrg 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    221        1.1       mrg 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    222        1.1       mrg }
    223        1.1       mrg 
    224        1.1       mrg /*
    225        1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    226        1.1       mrg  */
    227        1.1       mrg 
    228        1.8       mrg void
    229       1.80      yamt uvm_pageout(void *arg)
    230        1.8       mrg {
    231       1.60     enami 	int bufcnt, npages = 0;
    232       1.61       chs 	int extrapages = 0;
    233       1.88        ad 	struct pool *pp;
    234       1.88        ad 	uint64_t where;
    235       1.98      haad 	struct uvm_reclaim_hook *hook;
    236       1.98      haad 
    237        1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    238       1.24       chs 
    239        1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    240        1.8       mrg 
    241        1.8       mrg 	/*
    242        1.8       mrg 	 * ensure correct priority and set paging parameters...
    243        1.8       mrg 	 */
    244        1.8       mrg 
    245       1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    246       1.89        ad 	mutex_enter(&uvm_pageqlock);
    247        1.8       mrg 	npages = uvmexp.npages;
    248        1.8       mrg 	uvmpd_tune();
    249       1.89        ad 	mutex_exit(&uvm_pageqlock);
    250        1.8       mrg 
    251        1.8       mrg 	/*
    252        1.8       mrg 	 * main loop
    253        1.8       mrg 	 */
    254       1.24       chs 
    255       1.24       chs 	for (;;) {
    256       1.93        ad 		bool needsscan, needsfree;
    257       1.24       chs 
    258       1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    259       1.89        ad 		if (uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) {
    260       1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    261       1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    262       1.89        ad 			    &uvm_fpageqlock, false, "pgdaemon", 0);
    263       1.89        ad 			uvmexp.pdwoke++;
    264       1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    265       1.89        ad 		} else {
    266       1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    267       1.89        ad 		}
    268       1.24       chs 
    269        1.8       mrg 		/*
    270       1.24       chs 		 * now lock page queues and recompute inactive count
    271        1.8       mrg 		 */
    272        1.8       mrg 
    273       1.89        ad 		mutex_enter(&uvm_pageqlock);
    274       1.61       chs 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
    275       1.24       chs 			npages = uvmexp.npages;
    276       1.61       chs 			extrapages = uvm_extrapages;
    277       1.89        ad 			mutex_spin_enter(&uvm_fpageqlock);
    278       1.24       chs 			uvmpd_tune();
    279       1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    280       1.24       chs 		}
    281       1.24       chs 
    282       1.77      yamt 		uvmpdpol_tune();
    283       1.24       chs 
    284       1.60     enami 		/*
    285       1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    286       1.60     enami 		 * system only when entire pool page is empty.
    287       1.60     enami 		 */
    288       1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    289       1.60     enami 		bufcnt = uvmexp.freetarg - uvmexp.free;
    290       1.60     enami 		if (bufcnt < 0)
    291       1.60     enami 			bufcnt = 0;
    292       1.60     enami 
    293       1.77      yamt 		UVMHIST_LOG(pdhist,"  free/ftarg=%d/%d",
    294       1.77      yamt 		    uvmexp.free, uvmexp.freetarg, 0,0);
    295        1.8       mrg 
    296       1.93        ad 		needsfree = uvmexp.free + uvmexp.paging < uvmexp.freetarg;
    297       1.93        ad 		needsscan = needsfree || uvmpdpol_needsscan_p();
    298       1.89        ad 
    299        1.8       mrg 		/*
    300       1.24       chs 		 * scan if needed
    301        1.8       mrg 		 */
    302       1.97        ad 		if (needsscan) {
    303       1.97        ad 			mutex_spin_exit(&uvm_fpageqlock);
    304       1.24       chs 			uvmpd_scan();
    305       1.97        ad 			mutex_spin_enter(&uvm_fpageqlock);
    306       1.97        ad 		}
    307        1.8       mrg 
    308        1.8       mrg 		/*
    309       1.24       chs 		 * if there's any free memory to be had,
    310       1.24       chs 		 * wake up any waiters.
    311        1.8       mrg 		 */
    312       1.24       chs 		if (uvmexp.free > uvmexp.reserve_kernel ||
    313       1.24       chs 		    uvmexp.paging == 0) {
    314       1.24       chs 			wakeup(&uvmexp.free);
    315       1.89        ad 			uvm_pagedaemon_waiters = 0;
    316        1.8       mrg 		}
    317       1.89        ad 		mutex_spin_exit(&uvm_fpageqlock);
    318        1.1       mrg 
    319        1.8       mrg 		/*
    320       1.24       chs 		 * scan done.  unlock page queues (the only lock we are holding)
    321        1.8       mrg 		 */
    322       1.89        ad 		mutex_exit(&uvm_pageqlock);
    323       1.38       chs 
    324       1.88        ad 		/*
    325       1.93        ad 		 * if we don't need free memory, we're done.
    326       1.93        ad 		 */
    327       1.93        ad 
    328       1.93        ad 		if (!needsfree)
    329       1.93        ad 			continue;
    330       1.93        ad 
    331       1.93        ad 		/*
    332       1.88        ad 		 * start draining pool resources now that we're not
    333       1.88        ad 		 * holding any locks.
    334       1.88        ad 		 */
    335       1.88        ad 		pool_drain_start(&pp, &where);
    336       1.60     enami 
    337       1.38       chs 		/*
    338       1.88        ad 		 * kill unused metadata buffers.
    339       1.38       chs 		 */
    340       1.89        ad 		mutex_enter(&bufcache_lock);
    341       1.88        ad 		buf_drain(bufcnt << PAGE_SHIFT);
    342       1.89        ad 		mutex_exit(&bufcache_lock);
    343       1.57  jdolecek 
    344       1.98      haad 		mutex_enter(&uvm_reclaim_lock);
    345       1.98      haad 		SLIST_FOREACH(hook, &uvm_reclaim_list, uvm_reclaim_next) {
    346       1.98      haad 			(*hook->uvm_reclaim_hook)();
    347       1.98      haad 		}
    348       1.98      haad 		mutex_exit(&uvm_reclaim_lock);
    349       1.98      haad 
    350       1.57  jdolecek 		/*
    351       1.88        ad 		 * complete draining the pools.
    352       1.88        ad 		 */
    353       1.88        ad 		pool_drain_end(pp, where);
    354       1.24       chs 	}
    355       1.24       chs 	/*NOTREACHED*/
    356       1.24       chs }
    357       1.24       chs 
    358        1.8       mrg 
    359       1.24       chs /*
    360       1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    361       1.24       chs  */
    362        1.8       mrg 
    363       1.24       chs void
    364       1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    365       1.24       chs {
    366       1.81      yamt 	struct buf *bp = (void *)wk;
    367        1.9        pk 
    368       1.81      yamt 	KASSERT(&bp->b_work == wk);
    369        1.8       mrg 
    370       1.81      yamt 	/*
    371       1.81      yamt 	 * process an i/o that's done.
    372       1.81      yamt 	 */
    373        1.8       mrg 
    374       1.81      yamt 	(*bp->b_iodone)(bp);
    375       1.89        ad }
    376       1.89        ad 
    377       1.89        ad void
    378       1.89        ad uvm_pageout_start(int npages)
    379       1.89        ad {
    380       1.89        ad 
    381       1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    382       1.89        ad 	uvmexp.paging += npages;
    383       1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    384       1.89        ad }
    385       1.89        ad 
    386       1.89        ad void
    387       1.89        ad uvm_pageout_done(int npages)
    388       1.89        ad {
    389       1.89        ad 
    390       1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    391       1.89        ad 	KASSERT(uvmexp.paging >= npages);
    392       1.89        ad 	uvmexp.paging -= npages;
    393       1.89        ad 
    394       1.89        ad 	/*
    395       1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    396       1.89        ad 	 */
    397       1.89        ad 
    398       1.89        ad 	if (uvmexp.free <= uvmexp.reserve_kernel) {
    399       1.81      yamt 		wakeup(&uvm.pagedaemon);
    400       1.81      yamt 	} else {
    401       1.81      yamt 		wakeup(&uvmexp.free);
    402       1.89        ad 		uvm_pagedaemon_waiters = 0;
    403        1.8       mrg 	}
    404       1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    405        1.1       mrg }
    406        1.1       mrg 
    407       1.76      yamt /*
    408       1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    409       1.76      yamt  *
    410       1.76      yamt  * => called with pageq locked.
    411       1.76      yamt  * => resolve orphaned O->A loaned page.
    412       1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    413       1.76      yamt  */
    414       1.76      yamt 
    415       1.89        ad kmutex_t *
    416       1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    417       1.76      yamt {
    418       1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    419       1.89        ad 	kmutex_t *slock;
    420       1.89        ad 
    421       1.89        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    422       1.76      yamt 
    423       1.76      yamt 	if (uobj != NULL) {
    424  1.100.4.1     rmind 		slock = uobj->vmobjlock;
    425       1.76      yamt 	} else {
    426       1.76      yamt 		struct vm_anon *anon = pg->uanon;
    427       1.76      yamt 
    428       1.76      yamt 		KASSERT(anon != NULL);
    429  1.100.4.2     rmind 		slock = anon->an_lock;
    430       1.76      yamt 	}
    431       1.76      yamt 
    432       1.89        ad 	if (!mutex_tryenter(slock)) {
    433       1.76      yamt 		return NULL;
    434       1.76      yamt 	}
    435       1.76      yamt 
    436       1.76      yamt 	if (uobj == NULL) {
    437       1.76      yamt 
    438       1.76      yamt 		/*
    439       1.76      yamt 		 * set PQ_ANON if it isn't set already.
    440       1.76      yamt 		 */
    441       1.76      yamt 
    442       1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    443       1.76      yamt 			KASSERT(pg->loan_count > 0);
    444       1.76      yamt 			pg->loan_count--;
    445       1.76      yamt 			pg->pqflags |= PQ_ANON;
    446       1.76      yamt 			/* anon now owns it */
    447       1.76      yamt 		}
    448       1.76      yamt 	}
    449       1.76      yamt 
    450       1.76      yamt 	return slock;
    451       1.76      yamt }
    452       1.76      yamt 
    453       1.73      yamt #if defined(VMSWAP)
    454       1.73      yamt struct swapcluster {
    455       1.73      yamt 	int swc_slot;
    456       1.73      yamt 	int swc_nallocated;
    457       1.73      yamt 	int swc_nused;
    458       1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    459       1.73      yamt };
    460       1.73      yamt 
    461       1.73      yamt static void
    462       1.73      yamt swapcluster_init(struct swapcluster *swc)
    463       1.73      yamt {
    464       1.73      yamt 
    465       1.73      yamt 	swc->swc_slot = 0;
    466       1.89        ad 	swc->swc_nused = 0;
    467       1.73      yamt }
    468       1.73      yamt 
    469       1.73      yamt static int
    470       1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    471       1.73      yamt {
    472       1.73      yamt 	int slot;
    473       1.73      yamt 	int npages;
    474       1.73      yamt 
    475       1.73      yamt 	if (swc->swc_slot != 0) {
    476       1.73      yamt 		return 0;
    477       1.73      yamt 	}
    478       1.73      yamt 
    479       1.73      yamt 	/* Even with strange MAXPHYS, the shift
    480       1.73      yamt 	   implicitly rounds down to a page. */
    481       1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    482       1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    483       1.73      yamt 	if (slot == 0) {
    484       1.73      yamt 		return ENOMEM;
    485       1.73      yamt 	}
    486       1.73      yamt 	swc->swc_slot = slot;
    487       1.73      yamt 	swc->swc_nallocated = npages;
    488       1.73      yamt 	swc->swc_nused = 0;
    489       1.73      yamt 
    490       1.73      yamt 	return 0;
    491       1.73      yamt }
    492       1.73      yamt 
    493       1.73      yamt static int
    494       1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    495       1.73      yamt {
    496       1.73      yamt 	int slot;
    497       1.73      yamt 	struct uvm_object *uobj;
    498       1.73      yamt 
    499       1.73      yamt 	KASSERT(swc->swc_slot != 0);
    500       1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    501       1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    502       1.73      yamt 
    503       1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    504       1.73      yamt 	uobj = pg->uobject;
    505       1.73      yamt 	if (uobj == NULL) {
    506  1.100.4.2     rmind 		KASSERT(mutex_owned(pg->uanon->an_lock));
    507       1.73      yamt 		pg->uanon->an_swslot = slot;
    508       1.73      yamt 	} else {
    509       1.73      yamt 		int result;
    510       1.73      yamt 
    511  1.100.4.1     rmind 		KASSERT(mutex_owned(uobj->vmobjlock));
    512       1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    513       1.73      yamt 		if (result == -1) {
    514       1.73      yamt 			return ENOMEM;
    515       1.73      yamt 		}
    516       1.73      yamt 	}
    517       1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    518       1.73      yamt 	swc->swc_nused++;
    519       1.73      yamt 
    520       1.73      yamt 	return 0;
    521       1.73      yamt }
    522       1.73      yamt 
    523       1.73      yamt static void
    524       1.83   thorpej swapcluster_flush(struct swapcluster *swc, bool now)
    525       1.73      yamt {
    526       1.73      yamt 	int slot;
    527       1.73      yamt 	int nused;
    528       1.73      yamt 	int nallocated;
    529       1.73      yamt 	int error;
    530       1.73      yamt 
    531       1.73      yamt 	if (swc->swc_slot == 0) {
    532       1.73      yamt 		return;
    533       1.73      yamt 	}
    534       1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    535       1.73      yamt 
    536       1.73      yamt 	slot = swc->swc_slot;
    537       1.73      yamt 	nused = swc->swc_nused;
    538       1.73      yamt 	nallocated = swc->swc_nallocated;
    539       1.73      yamt 
    540       1.73      yamt 	/*
    541       1.73      yamt 	 * if this is the final pageout we could have a few
    542       1.73      yamt 	 * unused swap blocks.  if so, free them now.
    543       1.73      yamt 	 */
    544       1.73      yamt 
    545       1.73      yamt 	if (nused < nallocated) {
    546       1.73      yamt 		if (!now) {
    547       1.73      yamt 			return;
    548       1.73      yamt 		}
    549       1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    550       1.73      yamt 	}
    551       1.73      yamt 
    552       1.73      yamt 	/*
    553       1.73      yamt 	 * now start the pageout.
    554       1.73      yamt 	 */
    555       1.73      yamt 
    556       1.91      yamt 	if (nused > 0) {
    557       1.91      yamt 		uvmexp.pdpageouts++;
    558       1.91      yamt 		uvm_pageout_start(nused);
    559       1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    560       1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    561       1.91      yamt 	}
    562       1.73      yamt 
    563       1.73      yamt 	/*
    564       1.73      yamt 	 * zero swslot to indicate that we are
    565       1.73      yamt 	 * no longer building a swap-backed cluster.
    566       1.73      yamt 	 */
    567       1.73      yamt 
    568       1.73      yamt 	swc->swc_slot = 0;
    569       1.89        ad 	swc->swc_nused = 0;
    570       1.89        ad }
    571       1.89        ad 
    572       1.89        ad static int
    573       1.89        ad swapcluster_nused(struct swapcluster *swc)
    574       1.89        ad {
    575       1.89        ad 
    576       1.89        ad 	return swc->swc_nused;
    577       1.73      yamt }
    578       1.77      yamt 
    579       1.77      yamt /*
    580       1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    581       1.77      yamt  *
    582       1.77      yamt  * => called with owner locked.
    583       1.84   thorpej  * => return true if a page had an associated slot.
    584       1.77      yamt  */
    585       1.77      yamt 
    586       1.83   thorpej static bool
    587       1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    588       1.77      yamt {
    589       1.84   thorpej 	bool result = false;
    590       1.77      yamt 	struct vm_anon *anon = pg->uanon;
    591       1.77      yamt 
    592       1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    593       1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    594       1.77      yamt 		anon->an_swslot = 0;
    595       1.77      yamt 		pg->flags &= ~PG_CLEAN;
    596       1.84   thorpej 		result = true;
    597       1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    598       1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    599       1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    600       1.77      yamt 		if (slot) {
    601       1.77      yamt 			uvm_swap_free(slot, 1);
    602       1.77      yamt 			pg->flags &= ~PG_CLEAN;
    603       1.84   thorpej 			result = true;
    604       1.77      yamt 		}
    605       1.77      yamt 	}
    606       1.77      yamt 
    607       1.77      yamt 	return result;
    608       1.77      yamt }
    609       1.77      yamt 
    610       1.77      yamt /*
    611       1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    612       1.77      yamt  *
    613       1.84   thorpej  * => return true if a slot is successfully freed.
    614       1.77      yamt  */
    615       1.77      yamt 
    616       1.83   thorpej bool
    617       1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    618       1.77      yamt {
    619       1.89        ad 	kmutex_t *slock;
    620       1.83   thorpej 	bool result;
    621       1.77      yamt 
    622       1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    623       1.84   thorpej 		return false;
    624       1.77      yamt 	}
    625       1.77      yamt 
    626       1.77      yamt 	/*
    627       1.77      yamt 	 * lock the page's owner.
    628       1.77      yamt 	 */
    629       1.77      yamt 
    630       1.77      yamt 	slock = uvmpd_trylockowner(pg);
    631       1.77      yamt 	if (slock == NULL) {
    632       1.84   thorpej 		return false;
    633       1.77      yamt 	}
    634       1.77      yamt 
    635       1.77      yamt 	/*
    636       1.77      yamt 	 * skip this page if it's busy.
    637       1.77      yamt 	 */
    638       1.77      yamt 
    639       1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    640       1.89        ad 		mutex_exit(slock);
    641       1.84   thorpej 		return false;
    642       1.77      yamt 	}
    643       1.77      yamt 
    644       1.77      yamt 	result = uvmpd_dropswap(pg);
    645       1.77      yamt 
    646       1.89        ad 	mutex_exit(slock);
    647       1.77      yamt 
    648       1.77      yamt 	return result;
    649       1.77      yamt }
    650       1.77      yamt 
    651       1.73      yamt #endif /* defined(VMSWAP) */
    652       1.73      yamt 
    653        1.1       mrg /*
    654       1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    655       1.77      yamt  * to clean or free.
    656        1.1       mrg  *
    657        1.1       mrg  * => called with page queues locked
    658        1.1       mrg  * => we work on meeting our free target by converting inactive pages
    659        1.1       mrg  *    into free pages.
    660        1.1       mrg  * => we handle the building of swap-backed clusters
    661        1.1       mrg  */
    662        1.1       mrg 
    663       1.65   thorpej static void
    664       1.77      yamt uvmpd_scan_queue(void)
    665        1.8       mrg {
    666       1.77      yamt 	struct vm_page *p;
    667        1.8       mrg 	struct uvm_object *uobj;
    668       1.37       chs 	struct vm_anon *anon;
    669       1.68      yamt #if defined(VMSWAP)
    670       1.73      yamt 	struct swapcluster swc;
    671       1.68      yamt #endif /* defined(VMSWAP) */
    672       1.77      yamt 	int dirtyreacts;
    673       1.89        ad 	int lockownerfail;
    674       1.89        ad 	kmutex_t *slock;
    675       1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    676        1.1       mrg 
    677        1.8       mrg 	/*
    678        1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    679       1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    680        1.8       mrg 	 * a swap-cluster to build.
    681        1.8       mrg 	 */
    682       1.24       chs 
    683       1.73      yamt #if defined(VMSWAP)
    684       1.73      yamt 	swapcluster_init(&swc);
    685       1.73      yamt #endif /* defined(VMSWAP) */
    686       1.77      yamt 
    687       1.14       chs 	dirtyreacts = 0;
    688       1.89        ad 	lockownerfail = 0;
    689       1.77      yamt 	uvmpdpol_scaninit();
    690       1.43       chs 
    691       1.77      yamt 	while (/* CONSTCOND */ 1) {
    692       1.24       chs 
    693       1.73      yamt 		/*
    694       1.73      yamt 		 * see if we've met the free target.
    695       1.73      yamt 		 */
    696       1.73      yamt 
    697       1.89        ad 		if (uvmexp.free + uvmexp.paging
    698       1.89        ad #if defined(VMSWAP)
    699       1.89        ad 		    + swapcluster_nused(&swc)
    700       1.89        ad #endif /* defined(VMSWAP) */
    701       1.89        ad 		    >= uvmexp.freetarg << 2 ||
    702       1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    703       1.73      yamt 			UVMHIST_LOG(pdhist,"  met free target: "
    704       1.73      yamt 				    "exit loop", 0, 0, 0, 0);
    705       1.73      yamt 			break;
    706       1.73      yamt 		}
    707       1.24       chs 
    708       1.77      yamt 		p = uvmpdpol_selectvictim();
    709       1.77      yamt 		if (p == NULL) {
    710       1.77      yamt 			break;
    711       1.77      yamt 		}
    712       1.77      yamt 		KASSERT(uvmpdpol_pageisqueued_p(p));
    713       1.77      yamt 		KASSERT(p->wire_count == 0);
    714       1.77      yamt 
    715       1.73      yamt 		/*
    716       1.73      yamt 		 * we are below target and have a new page to consider.
    717       1.73      yamt 		 */
    718       1.30       chs 
    719       1.73      yamt 		anon = p->uanon;
    720       1.73      yamt 		uobj = p->uobject;
    721        1.8       mrg 
    722       1.73      yamt 		/*
    723       1.73      yamt 		 * first we attempt to lock the object that this page
    724       1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    725       1.73      yamt 		 * the next page (no harm done).  it is important to
    726       1.73      yamt 		 * "try" locking the object as we are locking in the
    727       1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    728       1.73      yamt 		 * deadlock.
    729       1.73      yamt 		 *
    730       1.73      yamt 		 * the only time we expect to see an ownerless page
    731       1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    732       1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    733       1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    734       1.73      yamt 		 * case, the anon can "take over" the loaned page
    735       1.73      yamt 		 * and make it its own.
    736       1.73      yamt 		 */
    737       1.30       chs 
    738       1.76      yamt 		slock = uvmpd_trylockowner(p);
    739       1.76      yamt 		if (slock == NULL) {
    740       1.89        ad 			/*
    741       1.89        ad 			 * yield cpu to make a chance for an LWP holding
    742       1.89        ad 			 * the lock run.  otherwise we can busy-loop too long
    743       1.89        ad 			 * if the page queue is filled with a lot of pages
    744       1.89        ad 			 * from few objects.
    745       1.89        ad 			 */
    746       1.89        ad 			lockownerfail++;
    747       1.89        ad 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
    748       1.89        ad 				mutex_exit(&uvm_pageqlock);
    749       1.89        ad 				/* XXX Better than yielding but inadequate. */
    750       1.89        ad 				kpause("livelock", false, 1, NULL);
    751       1.89        ad 				mutex_enter(&uvm_pageqlock);
    752       1.89        ad 				lockownerfail = 0;
    753       1.89        ad 			}
    754       1.76      yamt 			continue;
    755       1.76      yamt 		}
    756       1.76      yamt 		if (p->flags & PG_BUSY) {
    757       1.89        ad 			mutex_exit(slock);
    758       1.76      yamt 			uvmexp.pdbusy++;
    759       1.76      yamt 			continue;
    760       1.76      yamt 		}
    761       1.76      yamt 
    762       1.73      yamt 		/* does the page belong to an object? */
    763       1.73      yamt 		if (uobj != NULL) {
    764       1.73      yamt 			uvmexp.pdobscan++;
    765       1.73      yamt 		} else {
    766       1.73      yamt #if defined(VMSWAP)
    767       1.73      yamt 			KASSERT(anon != NULL);
    768       1.73      yamt 			uvmexp.pdanscan++;
    769       1.68      yamt #else /* defined(VMSWAP) */
    770       1.73      yamt 			panic("%s: anon", __func__);
    771       1.68      yamt #endif /* defined(VMSWAP) */
    772       1.73      yamt 		}
    773        1.8       mrg 
    774       1.37       chs 
    775       1.73      yamt 		/*
    776       1.73      yamt 		 * we now have the object and the page queues locked.
    777       1.73      yamt 		 * if the page is not swap-backed, call the object's
    778       1.73      yamt 		 * pager to flush and free the page.
    779       1.73      yamt 		 */
    780       1.37       chs 
    781       1.69      yamt #if defined(READAHEAD_STATS)
    782       1.77      yamt 		if ((p->pqflags & PQ_READAHEAD) != 0) {
    783       1.77      yamt 			p->pqflags &= ~PQ_READAHEAD;
    784       1.73      yamt 			uvm_ra_miss.ev_count++;
    785       1.73      yamt 		}
    786       1.69      yamt #endif /* defined(READAHEAD_STATS) */
    787       1.69      yamt 
    788       1.73      yamt 		if ((p->pqflags & PQ_SWAPBACKED) == 0) {
    789       1.82       alc 			KASSERT(uobj != NULL);
    790       1.89        ad 			mutex_exit(&uvm_pageqlock);
    791       1.73      yamt 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
    792       1.73      yamt 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    793       1.89        ad 			mutex_enter(&uvm_pageqlock);
    794       1.73      yamt 			continue;
    795       1.73      yamt 		}
    796       1.37       chs 
    797       1.73      yamt 		/*
    798       1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    799       1.73      yamt 		 * from the page so we can sync the modified info
    800       1.73      yamt 		 * without any race conditions.  if the page is clean
    801       1.73      yamt 		 * we can free it now and continue.
    802       1.73      yamt 		 */
    803        1.8       mrg 
    804       1.73      yamt 		pmap_page_protect(p, VM_PROT_NONE);
    805       1.73      yamt 		if ((p->flags & PG_CLEAN) && pmap_clear_modify(p)) {
    806       1.73      yamt 			p->flags &= ~(PG_CLEAN);
    807       1.73      yamt 		}
    808       1.73      yamt 		if (p->flags & PG_CLEAN) {
    809       1.73      yamt 			int slot;
    810       1.73      yamt 			int pageidx;
    811       1.73      yamt 
    812       1.73      yamt 			pageidx = p->offset >> PAGE_SHIFT;
    813       1.73      yamt 			uvm_pagefree(p);
    814       1.73      yamt 			uvmexp.pdfreed++;
    815        1.8       mrg 
    816        1.8       mrg 			/*
    817       1.73      yamt 			 * for anons, we need to remove the page
    818       1.73      yamt 			 * from the anon ourselves.  for aobjs,
    819       1.73      yamt 			 * pagefree did that for us.
    820        1.8       mrg 			 */
    821       1.24       chs 
    822       1.73      yamt 			if (anon) {
    823       1.73      yamt 				KASSERT(anon->an_swslot != 0);
    824       1.73      yamt 				anon->an_page = NULL;
    825       1.73      yamt 				slot = anon->an_swslot;
    826       1.73      yamt 			} else {
    827       1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
    828        1.8       mrg 			}
    829       1.89        ad 			mutex_exit(slock);
    830        1.8       mrg 
    831       1.73      yamt 			if (slot > 0) {
    832       1.73      yamt 				/* this page is now only in swap. */
    833       1.87        ad 				mutex_enter(&uvm_swap_data_lock);
    834       1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
    835       1.73      yamt 				uvmexp.swpgonly++;
    836       1.87        ad 				mutex_exit(&uvm_swap_data_lock);
    837       1.37       chs 			}
    838       1.73      yamt 			continue;
    839       1.73      yamt 		}
    840       1.37       chs 
    841       1.77      yamt #if defined(VMSWAP)
    842       1.73      yamt 		/*
    843       1.73      yamt 		 * this page is dirty, skip it if we'll have met our
    844       1.73      yamt 		 * free target when all the current pageouts complete.
    845       1.73      yamt 		 */
    846       1.24       chs 
    847       1.73      yamt 		if (uvmexp.free + uvmexp.paging > uvmexp.freetarg << 2) {
    848       1.89        ad 			mutex_exit(slock);
    849       1.73      yamt 			continue;
    850       1.73      yamt 		}
    851       1.14       chs 
    852       1.73      yamt 		/*
    853       1.73      yamt 		 * free any swap space allocated to the page since
    854       1.73      yamt 		 * we'll have to write it again with its new data.
    855       1.73      yamt 		 */
    856       1.24       chs 
    857       1.77      yamt 		uvmpd_dropswap(p);
    858       1.14       chs 
    859       1.73      yamt 		/*
    860       1.97        ad 		 * start new swap pageout cluster (if necessary).
    861       1.97        ad 		 *
    862       1.97        ad 		 * if swap is full reactivate this page so that
    863       1.97        ad 		 * we eventually cycle all pages through the
    864       1.97        ad 		 * inactive queue.
    865       1.73      yamt 		 */
    866       1.68      yamt 
    867       1.97        ad 		if (swapcluster_allocslots(&swc)) {
    868       1.73      yamt 			dirtyreacts++;
    869       1.73      yamt 			uvm_pageactivate(p);
    870       1.89        ad 			mutex_exit(slock);
    871       1.73      yamt 			continue;
    872        1.8       mrg 		}
    873        1.8       mrg 
    874        1.8       mrg 		/*
    875       1.73      yamt 		 * at this point, we're definitely going reuse this
    876       1.73      yamt 		 * page.  mark the page busy and delayed-free.
    877       1.73      yamt 		 * we should remove the page from the page queues
    878       1.73      yamt 		 * so we don't ever look at it again.
    879       1.73      yamt 		 * adjust counters and such.
    880        1.8       mrg 		 */
    881        1.8       mrg 
    882       1.73      yamt 		p->flags |= PG_BUSY;
    883       1.77      yamt 		UVM_PAGE_OWN(p, "scan_queue");
    884       1.73      yamt 
    885       1.73      yamt 		p->flags |= PG_PAGEOUT;
    886       1.73      yamt 		uvm_pagedequeue(p);
    887       1.73      yamt 
    888       1.73      yamt 		uvmexp.pgswapout++;
    889       1.89        ad 		mutex_exit(&uvm_pageqlock);
    890        1.8       mrg 
    891        1.8       mrg 		/*
    892       1.73      yamt 		 * add the new page to the cluster.
    893        1.8       mrg 		 */
    894        1.8       mrg 
    895       1.73      yamt 		if (swapcluster_add(&swc, p)) {
    896       1.73      yamt 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
    897       1.73      yamt 			UVM_PAGE_OWN(p, NULL);
    898       1.89        ad 			mutex_enter(&uvm_pageqlock);
    899       1.77      yamt 			dirtyreacts++;
    900       1.73      yamt 			uvm_pageactivate(p);
    901       1.89        ad 			mutex_exit(slock);
    902       1.73      yamt 			continue;
    903       1.73      yamt 		}
    904       1.89        ad 		mutex_exit(slock);
    905       1.73      yamt 
    906       1.84   thorpej 		swapcluster_flush(&swc, false);
    907       1.89        ad 		mutex_enter(&uvm_pageqlock);
    908       1.73      yamt 
    909        1.8       mrg 		/*
    910       1.31       chs 		 * the pageout is in progress.  bump counters and set up
    911       1.31       chs 		 * for the next loop.
    912        1.8       mrg 		 */
    913        1.8       mrg 
    914       1.31       chs 		uvmexp.pdpending++;
    915       1.77      yamt 
    916       1.77      yamt #else /* defined(VMSWAP) */
    917       1.77      yamt 		uvm_pageactivate(p);
    918       1.89        ad 		mutex_exit(slock);
    919       1.77      yamt #endif /* defined(VMSWAP) */
    920       1.73      yamt 	}
    921       1.73      yamt 
    922       1.73      yamt #if defined(VMSWAP)
    923       1.89        ad 	mutex_exit(&uvm_pageqlock);
    924       1.84   thorpej 	swapcluster_flush(&swc, true);
    925       1.89        ad 	mutex_enter(&uvm_pageqlock);
    926       1.68      yamt #endif /* defined(VMSWAP) */
    927        1.1       mrg }
    928        1.1       mrg 
    929        1.1       mrg /*
    930        1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    931        1.1       mrg  *
    932        1.1       mrg  * => called with pageq's locked
    933        1.1       mrg  */
    934        1.1       mrg 
    935       1.65   thorpej static void
    936       1.37       chs uvmpd_scan(void)
    937        1.1       mrg {
    938       1.77      yamt 	int swap_shortage, pages_freed;
    939        1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    940        1.1       mrg 
    941       1.37       chs 	uvmexp.pdrevs++;
    942        1.1       mrg 
    943        1.8       mrg 	/*
    944       1.93        ad 	 * work on meeting our targets.   first we work on our free target
    945       1.93        ad 	 * by converting inactive pages into free pages.  then we work on
    946       1.93        ad 	 * meeting our inactive target by converting active pages to
    947       1.93        ad 	 * inactive ones.
    948        1.8       mrg 	 */
    949        1.8       mrg 
    950        1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    951        1.8       mrg 
    952       1.14       chs 	pages_freed = uvmexp.pdfreed;
    953       1.77      yamt 	uvmpd_scan_queue();
    954       1.14       chs 	pages_freed = uvmexp.pdfreed - pages_freed;
    955        1.8       mrg 
    956        1.8       mrg 	/*
    957       1.14       chs 	 * detect if we're not going to be able to page anything out
    958       1.14       chs 	 * until we free some swap resources from active pages.
    959       1.14       chs 	 */
    960       1.24       chs 
    961       1.14       chs 	swap_shortage = 0;
    962       1.14       chs 	if (uvmexp.free < uvmexp.freetarg &&
    963       1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
    964       1.52        pk 	    !uvm_swapisfull() &&
    965       1.14       chs 	    pages_freed == 0) {
    966       1.14       chs 		swap_shortage = uvmexp.freetarg - uvmexp.free;
    967       1.14       chs 	}
    968       1.24       chs 
    969       1.77      yamt 	uvmpdpol_balancequeue(swap_shortage);
    970       1.93        ad 
    971       1.93        ad 	/*
    972       1.94        ad 	 * if still below the minimum target, try unloading kernel
    973       1.94        ad 	 * modules.
    974       1.94        ad 	 */
    975       1.93        ad 
    976       1.94        ad 	if (uvmexp.free < uvmexp.freemin) {
    977       1.94        ad 		module_thread_kick();
    978       1.93        ad 	}
    979        1.1       mrg }
    980       1.62      yamt 
    981       1.62      yamt /*
    982       1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
    983       1.62      yamt  *
    984       1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
    985       1.62      yamt  *
    986       1.62      yamt  * XXX should be tunable.
    987       1.62      yamt  * XXX should consider pools, etc?
    988       1.62      yamt  */
    989       1.62      yamt 
    990       1.83   thorpej bool
    991       1.62      yamt uvm_reclaimable(void)
    992       1.62      yamt {
    993       1.62      yamt 	int filepages;
    994       1.77      yamt 	int active, inactive;
    995       1.62      yamt 
    996       1.62      yamt 	/*
    997       1.62      yamt 	 * if swap is not full, no problem.
    998       1.62      yamt 	 */
    999       1.62      yamt 
   1000       1.62      yamt 	if (!uvm_swapisfull()) {
   1001       1.84   thorpej 		return true;
   1002       1.62      yamt 	}
   1003       1.62      yamt 
   1004       1.62      yamt 	/*
   1005       1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
   1006       1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
   1007       1.62      yamt 	 *
   1008       1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
   1009       1.63      yamt 	 *
   1010       1.63      yamt 	 * XXX should consider about other reclaimable memory.
   1011       1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
   1012       1.62      yamt 	 */
   1013       1.62      yamt 
   1014       1.62      yamt 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
   1015       1.77      yamt 	uvm_estimatepageable(&active, &inactive);
   1016       1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
   1017       1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
   1018       1.84   thorpej 		return true;
   1019       1.62      yamt 	}
   1020       1.62      yamt 
   1021       1.62      yamt 	/*
   1022       1.62      yamt 	 * kill the process, fail allocation, etc..
   1023       1.62      yamt 	 */
   1024       1.62      yamt 
   1025       1.84   thorpej 	return false;
   1026       1.62      yamt }
   1027       1.77      yamt 
   1028       1.77      yamt void
   1029       1.77      yamt uvm_estimatepageable(int *active, int *inactive)
   1030       1.77      yamt {
   1031       1.77      yamt 
   1032       1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
   1033       1.77      yamt }
   1034       1.98      haad 
   1035       1.98      haad void
   1036       1.98      haad uvm_reclaim_init(void)
   1037       1.98      haad {
   1038       1.98      haad 
   1039       1.98      haad 	/* Initialize UVM reclaim hooks. */
   1040       1.98      haad 	mutex_init(&uvm_reclaim_lock, MUTEX_DEFAULT, IPL_NONE);
   1041       1.98      haad 	SLIST_INIT(&uvm_reclaim_list);
   1042       1.98      haad }
   1043       1.98      haad 
   1044       1.98      haad void
   1045       1.98      haad uvm_reclaim_hook_add(struct uvm_reclaim_hook *hook)
   1046       1.98      haad {
   1047       1.98      haad 
   1048       1.98      haad 	KASSERT(hook != NULL);
   1049       1.98      haad 
   1050       1.98      haad 	mutex_enter(&uvm_reclaim_lock);
   1051       1.98      haad 	SLIST_INSERT_HEAD(&uvm_reclaim_list, hook, uvm_reclaim_next);
   1052       1.98      haad 	mutex_exit(&uvm_reclaim_lock);
   1053       1.98      haad }
   1054       1.98      haad 
   1055       1.98      haad void
   1056       1.98      haad uvm_reclaim_hook_del(struct uvm_reclaim_hook *hook_entry)
   1057       1.98      haad {
   1058       1.98      haad 	struct uvm_reclaim_hook *hook;
   1059       1.98      haad 
   1060       1.98      haad 	KASSERT(hook_entry != NULL);
   1061       1.98      haad 
   1062       1.98      haad 	mutex_enter(&uvm_reclaim_lock);
   1063       1.98      haad 	SLIST_FOREACH(hook, &uvm_reclaim_list, uvm_reclaim_next) {
   1064       1.98      haad 		if (hook != hook_entry) {
   1065       1.98      haad 			continue;
   1066       1.98      haad 		}
   1067       1.98      haad 
   1068       1.98      haad 		SLIST_REMOVE(&uvm_reclaim_list, hook, uvm_reclaim_hook,
   1069       1.98      haad 		    uvm_reclaim_next);
   1070       1.98      haad 		break;
   1071       1.98      haad 	}
   1072       1.98      haad 
   1073       1.98      haad 	mutex_exit(&uvm_reclaim_lock);
   1074       1.98      haad }
   1075