Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.122.2.1
      1  1.122.2.1        ad /*	$NetBSD: uvm_pdaemon.c,v 1.122.2.1 2020/01/17 21:47:38 ad Exp $	*/
      2        1.1       mrg 
      3       1.34       chs /*
      4        1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5       1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6        1.1       mrg  *
      7        1.1       mrg  * All rights reserved.
      8        1.1       mrg  *
      9        1.1       mrg  * This code is derived from software contributed to Berkeley by
     10        1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11        1.1       mrg  *
     12        1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13        1.1       mrg  * modification, are permitted provided that the following conditions
     14        1.1       mrg  * are met:
     15        1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16        1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17        1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18        1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19        1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20      1.102     chuck  * 3. Neither the name of the University nor the names of its contributors
     21        1.1       mrg  *    may be used to endorse or promote products derived from this software
     22        1.1       mrg  *    without specific prior written permission.
     23        1.1       mrg  *
     24        1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     25        1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     26        1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     27        1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     28        1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     29        1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     30        1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     31        1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     32        1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     33        1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     34        1.1       mrg  * SUCH DAMAGE.
     35        1.1       mrg  *
     36        1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     37        1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     38        1.1       mrg  *
     39        1.1       mrg  *
     40        1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     41        1.1       mrg  * All rights reserved.
     42       1.34       chs  *
     43        1.1       mrg  * Permission to use, copy, modify and distribute this software and
     44        1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     45        1.1       mrg  * notice and this permission notice appear in all copies of the
     46        1.1       mrg  * software, derivative works or modified versions, and any portions
     47        1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     48       1.34       chs  *
     49       1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     50       1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     51        1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     52       1.34       chs  *
     53        1.1       mrg  * Carnegie Mellon requests users of this software to return to
     54        1.1       mrg  *
     55        1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     56        1.1       mrg  *  School of Computer Science
     57        1.1       mrg  *  Carnegie Mellon University
     58        1.1       mrg  *  Pittsburgh PA 15213-3890
     59        1.1       mrg  *
     60        1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     61        1.1       mrg  * rights to redistribute these changes.
     62        1.1       mrg  */
     63        1.1       mrg 
     64        1.1       mrg /*
     65        1.1       mrg  * uvm_pdaemon.c: the page daemon
     66        1.1       mrg  */
     67       1.42     lukem 
     68       1.42     lukem #include <sys/cdefs.h>
     69  1.122.2.1        ad __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.122.2.1 2020/01/17 21:47:38 ad Exp $");
     70       1.42     lukem 
     71       1.42     lukem #include "opt_uvmhist.h"
     72       1.69      yamt #include "opt_readahead.h"
     73        1.1       mrg 
     74        1.1       mrg #include <sys/param.h>
     75        1.1       mrg #include <sys/proc.h>
     76        1.1       mrg #include <sys/systm.h>
     77        1.1       mrg #include <sys/kernel.h>
     78        1.9        pk #include <sys/pool.h>
     79       1.24       chs #include <sys/buf.h>
     80       1.94        ad #include <sys/module.h>
     81       1.96        ad #include <sys/atomic.h>
     82      1.110       chs #include <sys/kthread.h>
     83        1.1       mrg 
     84        1.1       mrg #include <uvm/uvm.h>
     85       1.77      yamt #include <uvm/uvm_pdpolicy.h>
     86      1.119        ad #include <uvm/uvm_pgflcache.h>
     87        1.1       mrg 
     88      1.107      matt #ifdef UVMHIST
     89      1.107      matt UVMHIST_DEFINE(pdhist);
     90      1.107      matt #endif
     91      1.107      matt 
     92        1.1       mrg /*
     93       1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     94       1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     95       1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     96       1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     97       1.14       chs  */
     98       1.14       chs 
     99       1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
    100       1.14       chs 
    101      1.113        ad #define	UVMPD_NUMTRYLOCKOWNER	128
    102       1.14       chs 
    103       1.14       chs /*
    104        1.1       mrg  * local prototypes
    105        1.1       mrg  */
    106        1.1       mrg 
    107       1.65   thorpej static void	uvmpd_scan(void);
    108       1.77      yamt static void	uvmpd_scan_queue(void);
    109       1.65   thorpej static void	uvmpd_tune(void);
    110      1.110       chs static void	uvmpd_pool_drain_thread(void *);
    111      1.110       chs static void	uvmpd_pool_drain_wakeup(void);
    112        1.1       mrg 
    113      1.101     pooka static unsigned int uvm_pagedaemon_waiters;
    114       1.89        ad 
    115      1.110       chs /* State for the pool drainer thread */
    116      1.117        ad static kmutex_t uvmpd_lock __cacheline_aligned;
    117      1.110       chs static kcondvar_t uvmpd_pool_drain_cv;
    118      1.110       chs static bool uvmpd_pool_drain_run = false;
    119      1.110       chs 
    120        1.1       mrg /*
    121       1.61       chs  * XXX hack to avoid hangs when large processes fork.
    122       1.61       chs  */
    123       1.96        ad u_int uvm_extrapages;
    124       1.61       chs 
    125       1.61       chs /*
    126        1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    127        1.1       mrg  *
    128        1.1       mrg  * => should be called with all locks released
    129        1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    130        1.1       mrg  */
    131        1.1       mrg 
    132       1.19   thorpej void
    133       1.65   thorpej uvm_wait(const char *wmsg)
    134        1.8       mrg {
    135        1.8       mrg 	int timo = 0;
    136       1.89        ad 
    137      1.111       chs 	if (uvm.pagedaemon_lwp == NULL)
    138      1.111       chs 		panic("out of memory before the pagedaemon thread exists");
    139      1.111       chs 
    140      1.117        ad 	mutex_spin_enter(&uvmpd_lock);
    141        1.1       mrg 
    142        1.8       mrg 	/*
    143        1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    144        1.8       mrg 	 */
    145        1.1       mrg 
    146       1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    147        1.8       mrg 		/*
    148        1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    149        1.8       mrg 		 * sleep until it frees more memory.   but how can it
    150        1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    151        1.8       mrg 		 * we have two options:
    152        1.8       mrg 		 *  [1] panic now
    153        1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    154        1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    155        1.8       mrg 		 *
    156        1.8       mrg 		 * note that option [2] will only help us if we get lucky
    157        1.8       mrg 		 * and some other process on the system breaks the deadlock
    158        1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    159        1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    160        1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    161        1.8       mrg 		 * yet, this should never happen in the first place!).
    162        1.8       mrg 		 */
    163        1.1       mrg 
    164        1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    165        1.8       mrg 		timo = hz >> 3;		/* set timeout */
    166        1.1       mrg #if defined(DEBUG)
    167        1.8       mrg 		/* DEBUG: panic so we can debug it */
    168        1.8       mrg 		panic("pagedaemon deadlock");
    169        1.1       mrg #endif
    170        1.8       mrg 	}
    171        1.1       mrg 
    172       1.89        ad 	uvm_pagedaemon_waiters++;
    173       1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    174      1.117        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvmpd_lock, false, wmsg, timo);
    175        1.1       mrg }
    176        1.1       mrg 
    177       1.77      yamt /*
    178       1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    179       1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    180       1.77      yamt  */
    181       1.77      yamt 
    182       1.77      yamt void
    183       1.77      yamt uvm_kick_pdaemon(void)
    184       1.77      yamt {
    185      1.121        ad 	int fpages = uvm_availmem();
    186       1.77      yamt 
    187      1.117        ad 	if (fpages + uvmexp.paging < uvmexp.freemin ||
    188      1.117        ad 	    (fpages + uvmexp.paging < uvmexp.freetarg &&
    189      1.105      para 	     uvmpdpol_needsscan_p()) ||
    190      1.105      para 	     uvm_km_va_starved_p()) {
    191      1.117        ad 	     	mutex_spin_enter(&uvmpd_lock);
    192       1.77      yamt 		wakeup(&uvm.pagedaemon);
    193      1.117        ad 	     	mutex_spin_exit(&uvmpd_lock);
    194       1.77      yamt 	}
    195       1.77      yamt }
    196        1.1       mrg 
    197        1.1       mrg /*
    198        1.1       mrg  * uvmpd_tune: tune paging parameters
    199        1.1       mrg  *
    200        1.1       mrg  * => called when ever memory is added (or removed?) to the system
    201        1.1       mrg  */
    202        1.1       mrg 
    203       1.65   thorpej static void
    204       1.37       chs uvmpd_tune(void)
    205        1.8       mrg {
    206       1.95        ad 	int val;
    207       1.95        ad 
    208        1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    209        1.1       mrg 
    210       1.93        ad 	/*
    211       1.93        ad 	 * try to keep 0.5% of available RAM free, but limit to between
    212       1.93        ad 	 * 128k and 1024k per-CPU.  XXX: what are these values good for?
    213       1.93        ad 	 */
    214       1.95        ad 	val = uvmexp.npages / 200;
    215       1.95        ad 	val = MAX(val, (128*1024) >> PAGE_SHIFT);
    216       1.95        ad 	val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    217       1.95        ad 	val *= ncpu;
    218       1.23     bjh21 
    219       1.23     bjh21 	/* Make sure there's always a user page free. */
    220       1.95        ad 	if (val < uvmexp.reserve_kernel + 1)
    221       1.95        ad 		val = uvmexp.reserve_kernel + 1;
    222       1.95        ad 	uvmexp.freemin = val;
    223       1.95        ad 
    224       1.96        ad 	/* Calculate free target. */
    225       1.95        ad 	val = (uvmexp.freemin * 4) / 3;
    226       1.95        ad 	if (val <= uvmexp.freemin)
    227       1.95        ad 		val = uvmexp.freemin + 1;
    228       1.96        ad 	uvmexp.freetarg = val + atomic_swap_uint(&uvm_extrapages, 0);
    229       1.61       chs 
    230        1.8       mrg 	uvmexp.wiredmax = uvmexp.npages / 3;
    231      1.109  pgoyette 	UVMHIST_LOG(pdhist, "<- done, freemin=%jd, freetarg=%jd, wiredmax=%jd",
    232        1.1       mrg 	      uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    233        1.1       mrg }
    234        1.1       mrg 
    235        1.1       mrg /*
    236        1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    237        1.1       mrg  */
    238        1.1       mrg 
    239        1.8       mrg void
    240       1.80      yamt uvm_pageout(void *arg)
    241        1.8       mrg {
    242      1.110       chs 	int npages = 0;
    243       1.61       chs 	int extrapages = 0;
    244      1.117        ad 	int fpages;
    245       1.98      haad 
    246        1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    247       1.24       chs 
    248        1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    249        1.8       mrg 
    250      1.117        ad 	mutex_init(&uvmpd_lock, MUTEX_DEFAULT, IPL_VM);
    251      1.110       chs 	cv_init(&uvmpd_pool_drain_cv, "pooldrain");
    252      1.110       chs 
    253      1.110       chs 	/* Create the pool drainer kernel thread. */
    254      1.110       chs 	if (kthread_create(PRI_VM, KTHREAD_MPSAFE, NULL,
    255      1.110       chs 	    uvmpd_pool_drain_thread, NULL, NULL, "pooldrain"))
    256      1.110       chs 		panic("fork pooldrain");
    257      1.110       chs 
    258        1.8       mrg 	/*
    259        1.8       mrg 	 * ensure correct priority and set paging parameters...
    260        1.8       mrg 	 */
    261        1.8       mrg 
    262       1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    263        1.8       mrg 	npages = uvmexp.npages;
    264        1.8       mrg 	uvmpd_tune();
    265        1.8       mrg 
    266        1.8       mrg 	/*
    267        1.8       mrg 	 * main loop
    268        1.8       mrg 	 */
    269       1.24       chs 
    270       1.24       chs 	for (;;) {
    271      1.105      para 		bool needsscan, needsfree, kmem_va_starved;
    272      1.105      para 
    273      1.105      para 		kmem_va_starved = uvm_km_va_starved_p();
    274       1.24       chs 
    275      1.117        ad 		mutex_spin_enter(&uvmpd_lock);
    276      1.105      para 		if ((uvm_pagedaemon_waiters == 0 || uvmexp.paging > 0) &&
    277      1.105      para 		    !kmem_va_starved) {
    278       1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    279       1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    280      1.117        ad 			    &uvmpd_lock, false, "pgdaemon", 0);
    281       1.89        ad 			uvmexp.pdwoke++;
    282       1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    283       1.89        ad 		} else {
    284      1.117        ad 			mutex_spin_exit(&uvmpd_lock);
    285       1.89        ad 		}
    286       1.24       chs 
    287        1.8       mrg 		/*
    288      1.113        ad 		 * now recompute inactive count
    289        1.8       mrg 		 */
    290        1.8       mrg 
    291       1.61       chs 		if (npages != uvmexp.npages || extrapages != uvm_extrapages) {
    292       1.24       chs 			npages = uvmexp.npages;
    293       1.61       chs 			extrapages = uvm_extrapages;
    294       1.24       chs 			uvmpd_tune();
    295       1.24       chs 		}
    296       1.24       chs 
    297       1.77      yamt 		uvmpdpol_tune();
    298       1.24       chs 
    299       1.60     enami 		/*
    300       1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    301       1.60     enami 		 * system only when entire pool page is empty.
    302       1.60     enami 		 */
    303      1.121        ad 		fpages = uvm_availmem();
    304      1.109  pgoyette 		UVMHIST_LOG(pdhist,"  free/ftarg=%jd/%jd",
    305      1.117        ad 		    fpages, uvmexp.freetarg, 0,0);
    306        1.8       mrg 
    307      1.117        ad 		needsfree = fpages + uvmexp.paging < uvmexp.freetarg;
    308       1.93        ad 		needsscan = needsfree || uvmpdpol_needsscan_p();
    309       1.89        ad 
    310        1.8       mrg 		/*
    311       1.24       chs 		 * scan if needed
    312        1.8       mrg 		 */
    313       1.97        ad 		if (needsscan) {
    314       1.24       chs 			uvmpd_scan();
    315       1.97        ad 		}
    316        1.8       mrg 
    317        1.8       mrg 		/*
    318       1.24       chs 		 * if there's any free memory to be had,
    319       1.24       chs 		 * wake up any waiters.
    320        1.8       mrg 		 */
    321      1.121        ad 		if (uvm_availmem() > uvmexp.reserve_kernel ||
    322      1.121        ad 		    uvmexp.paging == 0) {
    323      1.117        ad 			mutex_spin_enter(&uvmpd_lock);
    324       1.24       chs 			wakeup(&uvmexp.free);
    325       1.89        ad 			uvm_pagedaemon_waiters = 0;
    326      1.117        ad 			mutex_spin_exit(&uvmpd_lock);
    327        1.8       mrg 		}
    328        1.1       mrg 
    329        1.8       mrg 		/*
    330      1.113        ad 		 * scan done.  if we don't need free memory, we're done.
    331       1.93        ad 		 */
    332       1.93        ad 
    333      1.105      para 		if (!needsfree && !kmem_va_starved)
    334       1.93        ad 			continue;
    335       1.93        ad 
    336       1.93        ad 		/*
    337      1.110       chs 		 * kick the pool drainer thread.
    338       1.38       chs 		 */
    339       1.57  jdolecek 
    340      1.110       chs 		uvmpd_pool_drain_wakeup();
    341       1.24       chs 	}
    342       1.24       chs 	/*NOTREACHED*/
    343       1.24       chs }
    344       1.24       chs 
    345        1.8       mrg 
    346       1.24       chs /*
    347       1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    348       1.24       chs  */
    349        1.8       mrg 
    350       1.24       chs void
    351       1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    352       1.24       chs {
    353       1.81      yamt 	struct buf *bp = (void *)wk;
    354        1.9        pk 
    355       1.81      yamt 	KASSERT(&bp->b_work == wk);
    356        1.8       mrg 
    357       1.81      yamt 	/*
    358       1.81      yamt 	 * process an i/o that's done.
    359       1.81      yamt 	 */
    360        1.8       mrg 
    361       1.81      yamt 	(*bp->b_iodone)(bp);
    362       1.89        ad }
    363       1.89        ad 
    364       1.89        ad void
    365       1.89        ad uvm_pageout_start(int npages)
    366       1.89        ad {
    367       1.89        ad 
    368      1.113        ad 	atomic_add_int(&uvmexp.paging, npages);
    369       1.89        ad }
    370       1.89        ad 
    371       1.89        ad void
    372       1.89        ad uvm_pageout_done(int npages)
    373       1.89        ad {
    374       1.89        ad 
    375       1.89        ad 	KASSERT(uvmexp.paging >= npages);
    376      1.113        ad 	atomic_add_int(&uvmexp.paging, -npages);
    377       1.89        ad 
    378       1.89        ad 	/*
    379       1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    380       1.89        ad 	 */
    381       1.89        ad 
    382      1.117        ad 	mutex_spin_enter(&uvmpd_lock);
    383      1.121        ad 	if (uvm_availmem() <= uvmexp.reserve_kernel) {
    384       1.81      yamt 		wakeup(&uvm.pagedaemon);
    385      1.117        ad 	} else if (uvm_pagedaemon_waiters != 0) {
    386       1.81      yamt 		wakeup(&uvmexp.free);
    387       1.89        ad 		uvm_pagedaemon_waiters = 0;
    388        1.8       mrg 	}
    389      1.117        ad 	mutex_spin_exit(&uvmpd_lock);
    390        1.1       mrg }
    391        1.1       mrg 
    392       1.76      yamt /*
    393       1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    394       1.76      yamt  *
    395      1.113        ad  * => called with page interlock held.
    396       1.76      yamt  * => resolve orphaned O->A loaned page.
    397       1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    398       1.76      yamt  */
    399       1.76      yamt 
    400       1.89        ad kmutex_t *
    401       1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    402       1.76      yamt {
    403       1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    404      1.113        ad 	struct vm_anon *anon = pg->uanon;
    405      1.113        ad 	int tries, count;
    406      1.113        ad 	bool running;
    407       1.89        ad 	kmutex_t *slock;
    408       1.89        ad 
    409      1.113        ad 	KASSERT(mutex_owned(&pg->interlock));
    410       1.76      yamt 
    411       1.76      yamt 	if (uobj != NULL) {
    412      1.103     rmind 		slock = uobj->vmobjlock;
    413      1.113        ad 		KASSERTMSG(slock != NULL, "pg %p uobj %p, NULL lock", pg, uobj);
    414      1.113        ad 	} else if (anon != NULL) {
    415      1.113        ad 		slock = anon->an_lock;
    416      1.113        ad 		KASSERTMSG(slock != NULL, "pg %p anon %p, NULL lock", pg, anon);
    417       1.76      yamt 	} else {
    418      1.113        ad 		/* Page may be in state of flux - ignore. */
    419      1.113        ad 		mutex_exit(&pg->interlock);
    420      1.113        ad 		return NULL;
    421       1.76      yamt 	}
    422       1.76      yamt 
    423      1.113        ad 	/*
    424      1.113        ad 	 * Now try to lock the objects.  We'll try hard, but don't really
    425      1.113        ad 	 * plan on spending more than a millisecond or so here.
    426      1.113        ad 	 */
    427      1.113        ad 	tries = (curlwp == uvm.pagedaemon_lwp ? UVMPD_NUMTRYLOCKOWNER : 1);
    428      1.113        ad 	for (;;) {
    429      1.113        ad 		if (mutex_tryenter(slock)) {
    430      1.113        ad 			if (uobj == NULL) {
    431      1.113        ad 				/*
    432      1.113        ad 				 * set PG_ANON if it isn't set already.
    433      1.113        ad 				 */
    434      1.113        ad 				if ((pg->flags & PG_ANON) == 0) {
    435      1.113        ad 					KASSERT(pg->loan_count > 0);
    436      1.113        ad 					pg->loan_count--;
    437      1.113        ad 					pg->flags |= PG_ANON;
    438      1.113        ad 					/* anon now owns it */
    439      1.113        ad 				}
    440      1.113        ad 			}
    441      1.113        ad 			mutex_exit(&pg->interlock);
    442      1.113        ad 			return slock;
    443      1.113        ad 		}
    444      1.113        ad 		running = mutex_owner_running(slock);
    445      1.113        ad 		if (!running || --tries <= 0) {
    446      1.113        ad 			break;
    447      1.113        ad 		}
    448      1.113        ad 		count = SPINLOCK_BACKOFF_MAX;
    449      1.113        ad 		SPINLOCK_BACKOFF(count);
    450       1.76      yamt 	}
    451       1.76      yamt 
    452      1.113        ad 	/*
    453      1.113        ad 	 * We didn't get the lock; chances are the very next page on the
    454      1.113        ad 	 * queue also has the same lock, so if the lock owner is not running
    455      1.113        ad 	 * take a breather and allow them to make progress.  There could be
    456      1.113        ad 	 * only 1 CPU in the system, or the pagedaemon could have preempted
    457      1.113        ad 	 * the owner in kernel, or any number of other things could be going
    458      1.113        ad 	 * on.
    459      1.113        ad 	 */
    460      1.113        ad 	mutex_exit(&pg->interlock);
    461      1.113        ad 	if (curlwp == uvm.pagedaemon_lwp) {
    462      1.113        ad 		if (!running) {
    463      1.113        ad 			(void)kpause("pdpglock", false, 1, NULL);
    464       1.76      yamt 		}
    465      1.113        ad 		uvmexp.pdbusy++;
    466       1.76      yamt 	}
    467      1.113        ad 	return NULL;
    468       1.76      yamt }
    469       1.76      yamt 
    470       1.73      yamt #if defined(VMSWAP)
    471       1.73      yamt struct swapcluster {
    472       1.73      yamt 	int swc_slot;
    473       1.73      yamt 	int swc_nallocated;
    474       1.73      yamt 	int swc_nused;
    475       1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    476       1.73      yamt };
    477       1.73      yamt 
    478       1.73      yamt static void
    479       1.73      yamt swapcluster_init(struct swapcluster *swc)
    480       1.73      yamt {
    481       1.73      yamt 
    482       1.73      yamt 	swc->swc_slot = 0;
    483       1.89        ad 	swc->swc_nused = 0;
    484       1.73      yamt }
    485       1.73      yamt 
    486       1.73      yamt static int
    487       1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    488       1.73      yamt {
    489       1.73      yamt 	int slot;
    490       1.73      yamt 	int npages;
    491       1.73      yamt 
    492       1.73      yamt 	if (swc->swc_slot != 0) {
    493       1.73      yamt 		return 0;
    494       1.73      yamt 	}
    495       1.73      yamt 
    496       1.73      yamt 	/* Even with strange MAXPHYS, the shift
    497       1.73      yamt 	   implicitly rounds down to a page. */
    498       1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    499       1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    500       1.73      yamt 	if (slot == 0) {
    501       1.73      yamt 		return ENOMEM;
    502       1.73      yamt 	}
    503       1.73      yamt 	swc->swc_slot = slot;
    504       1.73      yamt 	swc->swc_nallocated = npages;
    505       1.73      yamt 	swc->swc_nused = 0;
    506       1.73      yamt 
    507       1.73      yamt 	return 0;
    508       1.73      yamt }
    509       1.73      yamt 
    510       1.73      yamt static int
    511       1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    512       1.73      yamt {
    513       1.73      yamt 	int slot;
    514       1.73      yamt 	struct uvm_object *uobj;
    515       1.73      yamt 
    516       1.73      yamt 	KASSERT(swc->swc_slot != 0);
    517       1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    518      1.113        ad 	KASSERT((pg->flags & PG_SWAPBACKED) != 0);
    519       1.73      yamt 
    520       1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    521       1.73      yamt 	uobj = pg->uobject;
    522       1.73      yamt 	if (uobj == NULL) {
    523      1.103     rmind 		KASSERT(mutex_owned(pg->uanon->an_lock));
    524       1.73      yamt 		pg->uanon->an_swslot = slot;
    525       1.73      yamt 	} else {
    526       1.73      yamt 		int result;
    527       1.73      yamt 
    528      1.103     rmind 		KASSERT(mutex_owned(uobj->vmobjlock));
    529       1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    530       1.73      yamt 		if (result == -1) {
    531       1.73      yamt 			return ENOMEM;
    532       1.73      yamt 		}
    533       1.73      yamt 	}
    534       1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    535       1.73      yamt 	swc->swc_nused++;
    536       1.73      yamt 
    537       1.73      yamt 	return 0;
    538       1.73      yamt }
    539       1.73      yamt 
    540       1.73      yamt static void
    541       1.83   thorpej swapcluster_flush(struct swapcluster *swc, bool now)
    542       1.73      yamt {
    543       1.73      yamt 	int slot;
    544       1.73      yamt 	int nused;
    545       1.73      yamt 	int nallocated;
    546      1.108    martin 	int error __diagused;
    547       1.73      yamt 
    548       1.73      yamt 	if (swc->swc_slot == 0) {
    549       1.73      yamt 		return;
    550       1.73      yamt 	}
    551       1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    552       1.73      yamt 
    553       1.73      yamt 	slot = swc->swc_slot;
    554       1.73      yamt 	nused = swc->swc_nused;
    555       1.73      yamt 	nallocated = swc->swc_nallocated;
    556       1.73      yamt 
    557       1.73      yamt 	/*
    558       1.73      yamt 	 * if this is the final pageout we could have a few
    559       1.73      yamt 	 * unused swap blocks.  if so, free them now.
    560       1.73      yamt 	 */
    561       1.73      yamt 
    562       1.73      yamt 	if (nused < nallocated) {
    563       1.73      yamt 		if (!now) {
    564       1.73      yamt 			return;
    565       1.73      yamt 		}
    566       1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    567       1.73      yamt 	}
    568       1.73      yamt 
    569       1.73      yamt 	/*
    570       1.73      yamt 	 * now start the pageout.
    571       1.73      yamt 	 */
    572       1.73      yamt 
    573       1.91      yamt 	if (nused > 0) {
    574       1.91      yamt 		uvmexp.pdpageouts++;
    575       1.91      yamt 		uvm_pageout_start(nused);
    576       1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    577       1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    578       1.91      yamt 	}
    579       1.73      yamt 
    580       1.73      yamt 	/*
    581       1.73      yamt 	 * zero swslot to indicate that we are
    582       1.73      yamt 	 * no longer building a swap-backed cluster.
    583       1.73      yamt 	 */
    584       1.73      yamt 
    585       1.73      yamt 	swc->swc_slot = 0;
    586       1.89        ad 	swc->swc_nused = 0;
    587       1.89        ad }
    588       1.89        ad 
    589       1.89        ad static int
    590       1.89        ad swapcluster_nused(struct swapcluster *swc)
    591       1.89        ad {
    592       1.89        ad 
    593       1.89        ad 	return swc->swc_nused;
    594       1.73      yamt }
    595       1.77      yamt 
    596       1.77      yamt /*
    597       1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    598       1.77      yamt  *
    599       1.77      yamt  * => called with owner locked.
    600       1.84   thorpej  * => return true if a page had an associated slot.
    601       1.77      yamt  */
    602       1.77      yamt 
    603      1.119        ad bool
    604       1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    605       1.77      yamt {
    606       1.84   thorpej 	bool result = false;
    607       1.77      yamt 	struct vm_anon *anon = pg->uanon;
    608       1.77      yamt 
    609      1.113        ad 	if ((pg->flags & PG_ANON) && anon->an_swslot) {
    610       1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    611       1.77      yamt 		anon->an_swslot = 0;
    612  1.122.2.1        ad 		uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    613       1.84   thorpej 		result = true;
    614      1.113        ad 	} else if (pg->flags & PG_AOBJ) {
    615       1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    616       1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    617       1.77      yamt 		if (slot) {
    618       1.77      yamt 			uvm_swap_free(slot, 1);
    619  1.122.2.1        ad 			uvm_pagemarkdirty(pg, UVM_PAGE_STATUS_DIRTY);
    620       1.84   thorpej 			result = true;
    621       1.77      yamt 		}
    622       1.77      yamt 	}
    623       1.77      yamt 
    624       1.77      yamt 	return result;
    625       1.77      yamt }
    626       1.77      yamt 
    627       1.73      yamt #endif /* defined(VMSWAP) */
    628       1.73      yamt 
    629        1.1       mrg /*
    630       1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    631       1.77      yamt  * to clean or free.
    632        1.1       mrg  *
    633        1.1       mrg  * => we work on meeting our free target by converting inactive pages
    634        1.1       mrg  *    into free pages.
    635        1.1       mrg  * => we handle the building of swap-backed clusters
    636        1.1       mrg  */
    637        1.1       mrg 
    638       1.65   thorpej static void
    639       1.77      yamt uvmpd_scan_queue(void)
    640        1.8       mrg {
    641       1.77      yamt 	struct vm_page *p;
    642        1.8       mrg 	struct uvm_object *uobj;
    643       1.37       chs 	struct vm_anon *anon;
    644       1.68      yamt #if defined(VMSWAP)
    645       1.73      yamt 	struct swapcluster swc;
    646       1.68      yamt #endif /* defined(VMSWAP) */
    647       1.77      yamt 	int dirtyreacts;
    648       1.89        ad 	kmutex_t *slock;
    649       1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    650        1.1       mrg 
    651        1.8       mrg 	/*
    652        1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    653       1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    654        1.8       mrg 	 * a swap-cluster to build.
    655        1.8       mrg 	 */
    656       1.24       chs 
    657       1.73      yamt #if defined(VMSWAP)
    658       1.73      yamt 	swapcluster_init(&swc);
    659       1.73      yamt #endif /* defined(VMSWAP) */
    660       1.77      yamt 
    661       1.14       chs 	dirtyreacts = 0;
    662       1.77      yamt 	uvmpdpol_scaninit();
    663       1.43       chs 
    664       1.77      yamt 	while (/* CONSTCOND */ 1) {
    665       1.24       chs 
    666       1.73      yamt 		/*
    667       1.73      yamt 		 * see if we've met the free target.
    668       1.73      yamt 		 */
    669       1.73      yamt 
    670      1.121        ad 		if (uvm_availmem() + uvmexp.paging
    671       1.89        ad #if defined(VMSWAP)
    672       1.89        ad 		    + swapcluster_nused(&swc)
    673       1.89        ad #endif /* defined(VMSWAP) */
    674       1.89        ad 		    >= uvmexp.freetarg << 2 ||
    675       1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    676       1.73      yamt 			UVMHIST_LOG(pdhist,"  met free target: "
    677       1.73      yamt 				    "exit loop", 0, 0, 0, 0);
    678       1.73      yamt 			break;
    679       1.73      yamt 		}
    680       1.24       chs 
    681       1.73      yamt 		/*
    682      1.113        ad 		 * first we have the pdpolicy select a victim page
    683      1.113        ad 		 * and attempt to lock the object that the page
    684       1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    685       1.73      yamt 		 * the next page (no harm done).  it is important to
    686       1.73      yamt 		 * "try" locking the object as we are locking in the
    687       1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    688       1.73      yamt 		 * deadlock.
    689       1.73      yamt 		 *
    690       1.73      yamt 		 * the only time we expect to see an ownerless page
    691      1.113        ad 		 * (i.e. a page with no uobject and !PG_ANON) is if an
    692       1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    693       1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    694       1.73      yamt 		 * case, the anon can "take over" the loaned page
    695       1.73      yamt 		 * and make it its own.
    696       1.73      yamt 		 */
    697       1.30       chs 
    698      1.113        ad 		p = uvmpdpol_selectvictim(&slock);
    699      1.113        ad 		if (p == NULL) {
    700      1.113        ad 			break;
    701       1.76      yamt 		}
    702      1.113        ad 		KASSERT(uvmpdpol_pageisqueued_p(p));
    703      1.120        ad 		KASSERT(uvm_page_owner_locked_p(p));
    704      1.113        ad 		KASSERT(p->wire_count == 0);
    705      1.113        ad 
    706      1.113        ad 		/*
    707      1.113        ad 		 * we are below target and have a new page to consider.
    708      1.113        ad 		 */
    709      1.113        ad 
    710      1.113        ad 		anon = p->uanon;
    711      1.113        ad 		uobj = p->uobject;
    712      1.113        ad 
    713       1.76      yamt 		if (p->flags & PG_BUSY) {
    714       1.89        ad 			mutex_exit(slock);
    715       1.76      yamt 			uvmexp.pdbusy++;
    716       1.76      yamt 			continue;
    717       1.76      yamt 		}
    718       1.76      yamt 
    719       1.73      yamt 		/* does the page belong to an object? */
    720       1.73      yamt 		if (uobj != NULL) {
    721       1.73      yamt 			uvmexp.pdobscan++;
    722       1.73      yamt 		} else {
    723       1.73      yamt #if defined(VMSWAP)
    724       1.73      yamt 			KASSERT(anon != NULL);
    725       1.73      yamt 			uvmexp.pdanscan++;
    726       1.68      yamt #else /* defined(VMSWAP) */
    727       1.73      yamt 			panic("%s: anon", __func__);
    728       1.68      yamt #endif /* defined(VMSWAP) */
    729       1.73      yamt 		}
    730        1.8       mrg 
    731       1.37       chs 
    732       1.73      yamt 		/*
    733      1.113        ad 		 * we now have the object locked.
    734       1.73      yamt 		 * if the page is not swap-backed, call the object's
    735       1.73      yamt 		 * pager to flush and free the page.
    736       1.73      yamt 		 */
    737       1.37       chs 
    738       1.69      yamt #if defined(READAHEAD_STATS)
    739      1.113        ad 		if ((p->flags & PG_READAHEAD) != 0) {
    740      1.113        ad 			p->flags &= ~PG_READAHEAD;
    741       1.73      yamt 			uvm_ra_miss.ev_count++;
    742       1.73      yamt 		}
    743       1.69      yamt #endif /* defined(READAHEAD_STATS) */
    744       1.69      yamt 
    745      1.113        ad 		if ((p->flags & PG_SWAPBACKED) == 0) {
    746       1.82       alc 			KASSERT(uobj != NULL);
    747       1.73      yamt 			(void) (uobj->pgops->pgo_put)(uobj, p->offset,
    748       1.73      yamt 			    p->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    749       1.73      yamt 			continue;
    750       1.73      yamt 		}
    751       1.37       chs 
    752       1.73      yamt 		/*
    753       1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    754       1.73      yamt 		 * from the page so we can sync the modified info
    755       1.73      yamt 		 * without any race conditions.  if the page is clean
    756       1.73      yamt 		 * we can free it now and continue.
    757       1.73      yamt 		 */
    758        1.8       mrg 
    759       1.73      yamt 		pmap_page_protect(p, VM_PROT_NONE);
    760  1.122.2.1        ad 		if (uvm_pagegetdirty(p) == UVM_PAGE_STATUS_UNKNOWN) {
    761  1.122.2.1        ad 			if (pmap_clear_modify(p)) {
    762  1.122.2.1        ad 				uvm_pagemarkdirty(p, UVM_PAGE_STATUS_DIRTY);
    763  1.122.2.1        ad 			} else {
    764  1.122.2.1        ad 				uvm_pagemarkdirty(p, UVM_PAGE_STATUS_CLEAN);
    765  1.122.2.1        ad 			}
    766       1.73      yamt 		}
    767  1.122.2.1        ad 		if (uvm_pagegetdirty(p) != UVM_PAGE_STATUS_DIRTY) {
    768       1.73      yamt 			int slot;
    769       1.73      yamt 			int pageidx;
    770       1.73      yamt 
    771       1.73      yamt 			pageidx = p->offset >> PAGE_SHIFT;
    772       1.73      yamt 			uvm_pagefree(p);
    773      1.113        ad 			atomic_inc_uint(&uvmexp.pdfreed);
    774        1.8       mrg 
    775        1.8       mrg 			/*
    776       1.73      yamt 			 * for anons, we need to remove the page
    777       1.73      yamt 			 * from the anon ourselves.  for aobjs,
    778       1.73      yamt 			 * pagefree did that for us.
    779        1.8       mrg 			 */
    780       1.24       chs 
    781       1.73      yamt 			if (anon) {
    782       1.73      yamt 				KASSERT(anon->an_swslot != 0);
    783       1.73      yamt 				anon->an_page = NULL;
    784       1.73      yamt 				slot = anon->an_swslot;
    785       1.73      yamt 			} else {
    786       1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
    787        1.8       mrg 			}
    788       1.73      yamt 			if (slot > 0) {
    789       1.73      yamt 				/* this page is now only in swap. */
    790       1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
    791      1.112        ad 				atomic_inc_uint(&uvmexp.swpgonly);
    792       1.37       chs 			}
    793      1.112        ad 			mutex_exit(slock);
    794       1.73      yamt 			continue;
    795       1.73      yamt 		}
    796       1.37       chs 
    797       1.77      yamt #if defined(VMSWAP)
    798       1.73      yamt 		/*
    799       1.73      yamt 		 * this page is dirty, skip it if we'll have met our
    800       1.73      yamt 		 * free target when all the current pageouts complete.
    801       1.73      yamt 		 */
    802       1.24       chs 
    803      1.121        ad 		if (uvm_availmem() + uvmexp.paging > uvmexp.freetarg << 2) {
    804       1.89        ad 			mutex_exit(slock);
    805       1.73      yamt 			continue;
    806       1.73      yamt 		}
    807       1.14       chs 
    808       1.73      yamt 		/*
    809       1.73      yamt 		 * free any swap space allocated to the page since
    810       1.73      yamt 		 * we'll have to write it again with its new data.
    811       1.73      yamt 		 */
    812       1.24       chs 
    813       1.77      yamt 		uvmpd_dropswap(p);
    814       1.14       chs 
    815       1.73      yamt 		/*
    816       1.97        ad 		 * start new swap pageout cluster (if necessary).
    817       1.97        ad 		 *
    818       1.97        ad 		 * if swap is full reactivate this page so that
    819       1.97        ad 		 * we eventually cycle all pages through the
    820       1.97        ad 		 * inactive queue.
    821       1.73      yamt 		 */
    822       1.68      yamt 
    823       1.97        ad 		if (swapcluster_allocslots(&swc)) {
    824       1.73      yamt 			dirtyreacts++;
    825      1.122        ad 			uvm_pagelock(p);
    826       1.73      yamt 			uvm_pageactivate(p);
    827      1.122        ad 			uvm_pageunlock(p);
    828       1.89        ad 			mutex_exit(slock);
    829       1.73      yamt 			continue;
    830        1.8       mrg 		}
    831        1.8       mrg 
    832        1.8       mrg 		/*
    833       1.73      yamt 		 * at this point, we're definitely going reuse this
    834       1.73      yamt 		 * page.  mark the page busy and delayed-free.
    835       1.73      yamt 		 * we should remove the page from the page queues
    836       1.73      yamt 		 * so we don't ever look at it again.
    837       1.73      yamt 		 * adjust counters and such.
    838        1.8       mrg 		 */
    839        1.8       mrg 
    840       1.73      yamt 		p->flags |= PG_BUSY;
    841       1.77      yamt 		UVM_PAGE_OWN(p, "scan_queue");
    842      1.113        ad 		p->flags |= PG_PAGEOUT;
    843      1.113        ad 		uvmexp.pgswapout++;
    844       1.73      yamt 
    845      1.122        ad 		uvm_pagelock(p);
    846       1.73      yamt 		uvm_pagedequeue(p);
    847      1.122        ad 		uvm_pageunlock(p);
    848       1.73      yamt 
    849        1.8       mrg 		/*
    850       1.73      yamt 		 * add the new page to the cluster.
    851        1.8       mrg 		 */
    852        1.8       mrg 
    853       1.73      yamt 		if (swapcluster_add(&swc, p)) {
    854       1.73      yamt 			p->flags &= ~(PG_BUSY|PG_PAGEOUT);
    855       1.73      yamt 			UVM_PAGE_OWN(p, NULL);
    856       1.77      yamt 			dirtyreacts++;
    857      1.122        ad 			uvm_pagelock(p);
    858       1.73      yamt 			uvm_pageactivate(p);
    859      1.122        ad 			uvm_pageunlock(p);
    860       1.89        ad 			mutex_exit(slock);
    861       1.73      yamt 			continue;
    862       1.73      yamt 		}
    863       1.89        ad 		mutex_exit(slock);
    864       1.73      yamt 
    865      1.115        ad 		swapcluster_flush(&swc, false);
    866      1.115        ad 
    867        1.8       mrg 		/*
    868      1.115        ad 		 * the pageout is in progress.  bump counters and set up
    869       1.31       chs 		 * for the next loop.
    870        1.8       mrg 		 */
    871        1.8       mrg 
    872      1.115        ad 		atomic_inc_uint(&uvmexp.pdpending);
    873       1.77      yamt 
    874       1.77      yamt #else /* defined(VMSWAP) */
    875      1.122        ad 		uvm_pagelock(p);
    876       1.77      yamt 		uvm_pageactivate(p);
    877      1.122        ad 		uvm_pageunlock(p);
    878       1.89        ad 		mutex_exit(slock);
    879       1.77      yamt #endif /* defined(VMSWAP) */
    880       1.73      yamt 	}
    881       1.73      yamt 
    882      1.119        ad 	uvmpdpol_scanfini();
    883      1.119        ad 
    884       1.73      yamt #if defined(VMSWAP)
    885       1.84   thorpej 	swapcluster_flush(&swc, true);
    886       1.68      yamt #endif /* defined(VMSWAP) */
    887        1.1       mrg }
    888        1.1       mrg 
    889        1.1       mrg /*
    890        1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
    891        1.1       mrg  */
    892        1.1       mrg 
    893       1.65   thorpej static void
    894       1.37       chs uvmpd_scan(void)
    895        1.1       mrg {
    896      1.117        ad 	int swap_shortage, pages_freed, fpages;
    897        1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
    898        1.1       mrg 
    899       1.37       chs 	uvmexp.pdrevs++;
    900        1.1       mrg 
    901        1.8       mrg 	/*
    902       1.93        ad 	 * work on meeting our targets.   first we work on our free target
    903       1.93        ad 	 * by converting inactive pages into free pages.  then we work on
    904       1.93        ad 	 * meeting our inactive target by converting active pages to
    905       1.93        ad 	 * inactive ones.
    906        1.8       mrg 	 */
    907        1.8       mrg 
    908        1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
    909        1.8       mrg 
    910       1.14       chs 	pages_freed = uvmexp.pdfreed;
    911       1.77      yamt 	uvmpd_scan_queue();
    912       1.14       chs 	pages_freed = uvmexp.pdfreed - pages_freed;
    913        1.8       mrg 
    914        1.8       mrg 	/*
    915       1.14       chs 	 * detect if we're not going to be able to page anything out
    916       1.14       chs 	 * until we free some swap resources from active pages.
    917       1.14       chs 	 */
    918       1.24       chs 
    919       1.14       chs 	swap_shortage = 0;
    920      1.121        ad 	fpages = uvm_availmem();
    921      1.117        ad 	if (fpages < uvmexp.freetarg &&
    922       1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
    923       1.52        pk 	    !uvm_swapisfull() &&
    924       1.14       chs 	    pages_freed == 0) {
    925      1.117        ad 		swap_shortage = uvmexp.freetarg - fpages;
    926       1.14       chs 	}
    927       1.24       chs 
    928       1.77      yamt 	uvmpdpol_balancequeue(swap_shortage);
    929       1.93        ad 
    930       1.93        ad 	/*
    931       1.94        ad 	 * if still below the minimum target, try unloading kernel
    932       1.94        ad 	 * modules.
    933       1.94        ad 	 */
    934       1.93        ad 
    935      1.121        ad 	if (uvm_availmem() < uvmexp.freemin) {
    936       1.94        ad 		module_thread_kick();
    937       1.93        ad 	}
    938        1.1       mrg }
    939       1.62      yamt 
    940       1.62      yamt /*
    941       1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
    942       1.62      yamt  *
    943       1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
    944       1.62      yamt  *
    945       1.62      yamt  * XXX should be tunable.
    946       1.62      yamt  * XXX should consider pools, etc?
    947       1.62      yamt  */
    948       1.62      yamt 
    949       1.83   thorpej bool
    950       1.62      yamt uvm_reclaimable(void)
    951       1.62      yamt {
    952       1.62      yamt 	int filepages;
    953       1.77      yamt 	int active, inactive;
    954       1.62      yamt 
    955       1.62      yamt 	/*
    956       1.62      yamt 	 * if swap is not full, no problem.
    957       1.62      yamt 	 */
    958       1.62      yamt 
    959       1.62      yamt 	if (!uvm_swapisfull()) {
    960       1.84   thorpej 		return true;
    961       1.62      yamt 	}
    962       1.62      yamt 
    963       1.62      yamt 	/*
    964       1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
    965       1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
    966       1.62      yamt 	 *
    967       1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
    968       1.63      yamt 	 *
    969       1.63      yamt 	 * XXX should consider about other reclaimable memory.
    970       1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
    971       1.62      yamt 	 */
    972       1.62      yamt 
    973      1.116        ad 	cpu_count_sync_all();
    974      1.116        ad 	filepages = (int)cpu_count_get(CPU_COUNT_FILEPAGES) +
    975      1.116        ad 	    (int)cpu_count_get(CPU_COUNT_EXECPAGES) - uvmexp.wired;
    976       1.77      yamt 	uvm_estimatepageable(&active, &inactive);
    977       1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
    978       1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
    979       1.84   thorpej 		return true;
    980       1.62      yamt 	}
    981       1.62      yamt 
    982       1.62      yamt 	/*
    983       1.62      yamt 	 * kill the process, fail allocation, etc..
    984       1.62      yamt 	 */
    985       1.62      yamt 
    986       1.84   thorpej 	return false;
    987       1.62      yamt }
    988       1.77      yamt 
    989       1.77      yamt void
    990       1.77      yamt uvm_estimatepageable(int *active, int *inactive)
    991       1.77      yamt {
    992       1.77      yamt 
    993       1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
    994       1.77      yamt }
    995       1.98      haad 
    996      1.110       chs 
    997      1.110       chs /*
    998      1.110       chs  * Use a separate thread for draining pools.
    999      1.110       chs  * This work can't done from the main pagedaemon thread because
   1000      1.110       chs  * some pool allocators need to take vm_map locks.
   1001      1.110       chs  */
   1002      1.110       chs 
   1003      1.110       chs static void
   1004      1.110       chs uvmpd_pool_drain_thread(void *arg)
   1005      1.110       chs {
   1006      1.119        ad 	struct pool *firstpool, *curpool;
   1007      1.119        ad 	int bufcnt, lastslept;
   1008      1.119        ad 	bool cycled;
   1009      1.110       chs 
   1010      1.119        ad 	firstpool = NULL;
   1011      1.119        ad 	cycled = true;
   1012      1.110       chs 	for (;;) {
   1013      1.119        ad 		/*
   1014      1.119        ad 		 * sleep until awoken by the pagedaemon.
   1015      1.119        ad 		 */
   1016      1.117        ad 		mutex_enter(&uvmpd_lock);
   1017      1.110       chs 		if (!uvmpd_pool_drain_run) {
   1018      1.119        ad 			lastslept = hardclock_ticks;
   1019      1.117        ad 			cv_wait(&uvmpd_pool_drain_cv, &uvmpd_lock);
   1020      1.119        ad 			if (hardclock_ticks != lastslept) {
   1021      1.119        ad 				cycled = false;
   1022      1.119        ad 				firstpool = NULL;
   1023      1.119        ad 			}
   1024      1.110       chs 		}
   1025      1.110       chs 		uvmpd_pool_drain_run = false;
   1026      1.117        ad 		mutex_exit(&uvmpd_lock);
   1027      1.110       chs 
   1028      1.110       chs 		/*
   1029      1.119        ad 		 * rate limit draining, otherwise in desperate circumstances
   1030      1.119        ad 		 * this can totally saturate the system with xcall activity.
   1031      1.119        ad 		 */
   1032      1.119        ad 		if (cycled) {
   1033      1.119        ad 			kpause("uvmpdlmt", false, 1, NULL);
   1034      1.119        ad 			cycled = false;
   1035      1.119        ad 			firstpool = NULL;
   1036      1.119        ad 		}
   1037      1.119        ad 
   1038      1.119        ad 		/*
   1039      1.119        ad 		 * drain and temporarily disable the freelist cache.
   1040      1.119        ad 		 */
   1041      1.119        ad 		uvm_pgflcache_pause();
   1042      1.119        ad 
   1043      1.119        ad 		/*
   1044      1.110       chs 		 * kill unused metadata buffers.
   1045      1.110       chs 		 */
   1046      1.121        ad 		bufcnt = uvmexp.freetarg - uvm_availmem();
   1047      1.110       chs 		if (bufcnt < 0)
   1048      1.110       chs 			bufcnt = 0;
   1049      1.110       chs 
   1050      1.110       chs 		mutex_enter(&bufcache_lock);
   1051      1.110       chs 		buf_drain(bufcnt << PAGE_SHIFT);
   1052      1.110       chs 		mutex_exit(&bufcache_lock);
   1053      1.110       chs 
   1054      1.110       chs 		/*
   1055      1.119        ad 		 * drain a pool, and then re-enable the freelist cache.
   1056      1.110       chs 		 */
   1057      1.119        ad 		(void)pool_drain(&curpool);
   1058      1.119        ad 		KASSERT(curpool != NULL);
   1059      1.119        ad 		if (firstpool == NULL) {
   1060      1.119        ad 			firstpool = curpool;
   1061      1.119        ad 		} else if (firstpool == curpool) {
   1062      1.119        ad 			cycled = true;
   1063      1.119        ad 		}
   1064      1.119        ad 		uvm_pgflcache_resume();
   1065      1.110       chs 	}
   1066      1.110       chs 	/*NOTREACHED*/
   1067      1.110       chs }
   1068      1.110       chs 
   1069      1.110       chs static void
   1070      1.110       chs uvmpd_pool_drain_wakeup(void)
   1071      1.110       chs {
   1072      1.110       chs 
   1073      1.117        ad 	mutex_enter(&uvmpd_lock);
   1074      1.110       chs 	uvmpd_pool_drain_run = true;
   1075      1.110       chs 	cv_signal(&uvmpd_pool_drain_cv);
   1076      1.117        ad 	mutex_exit(&uvmpd_lock);
   1077      1.110       chs }
   1078