Home | History | Annotate | Line # | Download | only in uvm
uvm_pdaemon.c revision 1.93.4.2.4.4
      1  1.93.4.2.4.4      matt /*	$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.4 2012/02/13 23:07:31 matt Exp $	*/
      2           1.1       mrg 
      3          1.34       chs /*
      4           1.1       mrg  * Copyright (c) 1997 Charles D. Cranor and Washington University.
      5          1.34       chs  * Copyright (c) 1991, 1993, The Regents of the University of California.
      6           1.1       mrg  *
      7           1.1       mrg  * All rights reserved.
      8           1.1       mrg  *
      9           1.1       mrg  * This code is derived from software contributed to Berkeley by
     10           1.1       mrg  * The Mach Operating System project at Carnegie-Mellon University.
     11           1.1       mrg  *
     12           1.1       mrg  * Redistribution and use in source and binary forms, with or without
     13           1.1       mrg  * modification, are permitted provided that the following conditions
     14           1.1       mrg  * are met:
     15           1.1       mrg  * 1. Redistributions of source code must retain the above copyright
     16           1.1       mrg  *    notice, this list of conditions and the following disclaimer.
     17           1.1       mrg  * 2. Redistributions in binary form must reproduce the above copyright
     18           1.1       mrg  *    notice, this list of conditions and the following disclaimer in the
     19           1.1       mrg  *    documentation and/or other materials provided with the distribution.
     20           1.1       mrg  * 3. All advertising materials mentioning features or use of this software
     21           1.1       mrg  *    must display the following acknowledgement:
     22           1.1       mrg  *	This product includes software developed by Charles D. Cranor,
     23          1.34       chs  *      Washington University, the University of California, Berkeley and
     24           1.1       mrg  *      its contributors.
     25           1.1       mrg  * 4. Neither the name of the University nor the names of its contributors
     26           1.1       mrg  *    may be used to endorse or promote products derived from this software
     27           1.1       mrg  *    without specific prior written permission.
     28           1.1       mrg  *
     29           1.1       mrg  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
     30           1.1       mrg  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     31           1.1       mrg  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
     32           1.1       mrg  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
     33           1.1       mrg  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
     34           1.1       mrg  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
     35           1.1       mrg  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
     36           1.1       mrg  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
     37           1.1       mrg  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
     38           1.1       mrg  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
     39           1.1       mrg  * SUCH DAMAGE.
     40           1.1       mrg  *
     41           1.1       mrg  *	@(#)vm_pageout.c        8.5 (Berkeley) 2/14/94
     42           1.4       mrg  * from: Id: uvm_pdaemon.c,v 1.1.2.32 1998/02/06 05:26:30 chs Exp
     43           1.1       mrg  *
     44           1.1       mrg  *
     45           1.1       mrg  * Copyright (c) 1987, 1990 Carnegie-Mellon University.
     46           1.1       mrg  * All rights reserved.
     47          1.34       chs  *
     48           1.1       mrg  * Permission to use, copy, modify and distribute this software and
     49           1.1       mrg  * its documentation is hereby granted, provided that both the copyright
     50           1.1       mrg  * notice and this permission notice appear in all copies of the
     51           1.1       mrg  * software, derivative works or modified versions, and any portions
     52           1.1       mrg  * thereof, and that both notices appear in supporting documentation.
     53          1.34       chs  *
     54          1.34       chs  * CARNEGIE MELLON ALLOWS FREE USE OF THIS SOFTWARE IN ITS "AS IS"
     55          1.34       chs  * CONDITION.  CARNEGIE MELLON DISCLAIMS ANY LIABILITY OF ANY KIND
     56           1.1       mrg  * FOR ANY DAMAGES WHATSOEVER RESULTING FROM THE USE OF THIS SOFTWARE.
     57          1.34       chs  *
     58           1.1       mrg  * Carnegie Mellon requests users of this software to return to
     59           1.1       mrg  *
     60           1.1       mrg  *  Software Distribution Coordinator  or  Software.Distribution (at) CS.CMU.EDU
     61           1.1       mrg  *  School of Computer Science
     62           1.1       mrg  *  Carnegie Mellon University
     63           1.1       mrg  *  Pittsburgh PA 15213-3890
     64           1.1       mrg  *
     65           1.1       mrg  * any improvements or extensions that they make and grant Carnegie the
     66           1.1       mrg  * rights to redistribute these changes.
     67           1.1       mrg  */
     68           1.1       mrg 
     69           1.1       mrg /*
     70           1.1       mrg  * uvm_pdaemon.c: the page daemon
     71           1.1       mrg  */
     72          1.42     lukem 
     73          1.42     lukem #include <sys/cdefs.h>
     74  1.93.4.2.4.4      matt __KERNEL_RCSID(0, "$NetBSD: uvm_pdaemon.c,v 1.93.4.2.4.4 2012/02/13 23:07:31 matt Exp $");
     75          1.42     lukem 
     76          1.42     lukem #include "opt_uvmhist.h"
     77          1.69      yamt #include "opt_readahead.h"
     78           1.1       mrg 
     79           1.1       mrg #include <sys/param.h>
     80           1.1       mrg #include <sys/proc.h>
     81           1.1       mrg #include <sys/systm.h>
     82           1.1       mrg #include <sys/kernel.h>
     83           1.9        pk #include <sys/pool.h>
     84          1.24       chs #include <sys/buf.h>
     85      1.93.4.2       snj #include <sys/atomic.h>
     86           1.1       mrg 
     87           1.1       mrg #include <uvm/uvm.h>
     88          1.77      yamt #include <uvm/uvm_pdpolicy.h>
     89           1.1       mrg 
     90           1.1       mrg /*
     91          1.45       wiz  * UVMPD_NUMDIRTYREACTS is how many dirty pages the pagedaemon will reactivate
     92          1.14       chs  * in a pass thru the inactive list when swap is full.  the value should be
     93          1.14       chs  * "small"... if it's too large we'll cycle the active pages thru the inactive
     94          1.14       chs  * queue too quickly to for them to be referenced and avoid being freed.
     95          1.14       chs  */
     96          1.14       chs 
     97          1.89        ad #define	UVMPD_NUMDIRTYREACTS	16
     98          1.14       chs 
     99          1.89        ad #define	UVMPD_NUMTRYLOCKOWNER	16
    100          1.14       chs 
    101          1.14       chs /*
    102           1.1       mrg  * local prototypes
    103           1.1       mrg  */
    104           1.1       mrg 
    105  1.93.4.2.4.3      matt static void	uvmpd_scan(struct uvm_pggroup *);
    106  1.93.4.2.4.3      matt static void	uvmpd_scan_queue(struct uvm_pggroup *);
    107          1.65   thorpej static void	uvmpd_tune(void);
    108           1.1       mrg 
    109  1.93.4.2.4.4      matt static void	uvmpd_checkgroup(const struct uvm_pggroup *);
    110  1.93.4.2.4.4      matt 
    111  1.93.4.2.4.3      matt static struct uvm_pdinfo {
    112  1.93.4.2.4.3      matt 	unsigned int pd_waiters;
    113  1.93.4.2.4.3      matt 	unsigned int pd_scans_neededs;
    114  1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pagingq;
    115  1.93.4.2.4.3      matt 	struct uvm_pggrouplist pd_pendingq;
    116  1.93.4.2.4.3      matt } uvm_pdinfo =  {
    117  1.93.4.2.4.3      matt 	.pd_pagingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pagingq),
    118  1.93.4.2.4.3      matt 	.pd_pendingq = TAILQ_HEAD_INITIALIZER(uvm_pdinfo.pd_pendingq),
    119  1.93.4.2.4.3      matt };
    120          1.89        ad 
    121           1.1       mrg /*
    122          1.61       chs  * XXX hack to avoid hangs when large processes fork.
    123          1.61       chs  */
    124      1.93.4.2       snj u_int uvm_extrapages;
    125          1.61       chs 
    126          1.61       chs /*
    127           1.1       mrg  * uvm_wait: wait (sleep) for the page daemon to free some pages
    128           1.1       mrg  *
    129           1.1       mrg  * => should be called with all locks released
    130           1.1       mrg  * => should _not_ be called by the page daemon (to avoid deadlock)
    131           1.1       mrg  */
    132           1.1       mrg 
    133          1.19   thorpej void
    134          1.65   thorpej uvm_wait(const char *wmsg)
    135           1.8       mrg {
    136           1.8       mrg 	int timo = 0;
    137          1.89        ad 
    138          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    139           1.1       mrg 
    140           1.8       mrg 	/*
    141           1.8       mrg 	 * check for page daemon going to sleep (waiting for itself)
    142           1.8       mrg 	 */
    143           1.1       mrg 
    144          1.86        ad 	if (curlwp == uvm.pagedaemon_lwp && uvmexp.paging == 0) {
    145           1.8       mrg 		/*
    146           1.8       mrg 		 * now we have a problem: the pagedaemon wants to go to
    147           1.8       mrg 		 * sleep until it frees more memory.   but how can it
    148           1.8       mrg 		 * free more memory if it is asleep?  that is a deadlock.
    149           1.8       mrg 		 * we have two options:
    150           1.8       mrg 		 *  [1] panic now
    151           1.8       mrg 		 *  [2] put a timeout on the sleep, thus causing the
    152           1.8       mrg 		 *      pagedaemon to only pause (rather than sleep forever)
    153           1.8       mrg 		 *
    154           1.8       mrg 		 * note that option [2] will only help us if we get lucky
    155           1.8       mrg 		 * and some other process on the system breaks the deadlock
    156           1.8       mrg 		 * by exiting or freeing memory (thus allowing the pagedaemon
    157           1.8       mrg 		 * to continue).  for now we panic if DEBUG is defined,
    158           1.8       mrg 		 * otherwise we hope for the best with option [2] (better
    159           1.8       mrg 		 * yet, this should never happen in the first place!).
    160           1.8       mrg 		 */
    161           1.1       mrg 
    162           1.8       mrg 		printf("pagedaemon: deadlock detected!\n");
    163           1.8       mrg 		timo = hz >> 3;		/* set timeout */
    164           1.1       mrg #if defined(DEBUG)
    165           1.8       mrg 		/* DEBUG: panic so we can debug it */
    166           1.8       mrg 		panic("pagedaemon deadlock");
    167           1.1       mrg #endif
    168           1.8       mrg 	}
    169           1.1       mrg 
    170  1.93.4.2.4.3      matt 	uvm_pdinfo.pd_waiters++;
    171          1.17   thorpej 	wakeup(&uvm.pagedaemon);		/* wake the daemon! */
    172          1.89        ad 	UVM_UNLOCK_AND_WAIT(&uvmexp.free, &uvm_fpageqlock, false, wmsg, timo);
    173           1.1       mrg }
    174           1.1       mrg 
    175  1.93.4.2.4.4      matt 
    176  1.93.4.2.4.4      matt static void
    177  1.93.4.2.4.4      matt uvmpd_checkgroup(const struct uvm_pggroup *grp)
    178  1.93.4.2.4.4      matt {
    179  1.93.4.2.4.4      matt #ifdef DEBUG
    180  1.93.4.2.4.4      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    181  1.93.4.2.4.4      matt 	bool in_pendingq = false;
    182  1.93.4.2.4.4      matt 	bool in_pagingq = false;
    183  1.93.4.2.4.4      matt 	const struct uvm_pggroup *tstgrp;
    184  1.93.4.2.4.4      matt 
    185  1.93.4.2.4.4      matt 	TAILQ_FOREACH(tstgrp, &pdinfo->pd_pendingq, pgrp_pending_link) {
    186  1.93.4.2.4.4      matt 		if (tstgrp == grp) {
    187  1.93.4.2.4.4      matt 			in_pendingq = true;
    188  1.93.4.2.4.4      matt 			break;
    189  1.93.4.2.4.4      matt 		}
    190  1.93.4.2.4.4      matt 	}
    191  1.93.4.2.4.4      matt 
    192  1.93.4.2.4.4      matt 	TAILQ_FOREACH(tstgrp, &pdinfo->pd_pagingq, pgrp_paging_link) {
    193  1.93.4.2.4.4      matt 		if (tstgrp == grp) {
    194  1.93.4.2.4.4      matt 			in_pagingq = true;
    195  1.93.4.2.4.4      matt 			break;
    196  1.93.4.2.4.4      matt 		}
    197  1.93.4.2.4.4      matt 	}
    198  1.93.4.2.4.4      matt 
    199  1.93.4.2.4.4      matt 	if (grp->pgrp_paging > 0) {
    200  1.93.4.2.4.4      matt 		KASSERT(in_pagingq);
    201  1.93.4.2.4.4      matt 		KASSERT(!in_pendingq);
    202  1.93.4.2.4.4      matt 	} else {
    203  1.93.4.2.4.4      matt 		KASSERT(!in_pagingq);
    204  1.93.4.2.4.4      matt 		KASSERT(in_pendingq == grp->pgrp_scan_needed);
    205  1.93.4.2.4.4      matt 	}
    206  1.93.4.2.4.4      matt #endif
    207  1.93.4.2.4.4      matt }
    208  1.93.4.2.4.4      matt 
    209          1.77      yamt /*
    210          1.77      yamt  * uvm_kick_pdaemon: perform checks to determine if we need to
    211          1.77      yamt  * give the pagedaemon a nudge, and do so if necessary.
    212          1.89        ad  *
    213          1.89        ad  * => called with uvm_fpageqlock held.
    214          1.77      yamt  */
    215          1.77      yamt 
    216          1.77      yamt void
    217          1.77      yamt uvm_kick_pdaemon(void)
    218          1.77      yamt {
    219  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    220  1.93.4.2.4.3      matt 	bool need_wakeup = false;
    221  1.93.4.2.4.3      matt 	UVMHIST_FUNC(__func__); UVMHIST_CALLED(pdhist);
    222          1.77      yamt 
    223          1.89        ad 	KASSERT(mutex_owned(&uvm_fpageqlock));
    224          1.89        ad 
    225  1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    226  1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    227  1.93.4.2.4.3      matt 		const bool prev_scan_needed = grp->pgrp_scan_needed;
    228  1.93.4.2.4.3      matt 
    229  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    230  1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    231  1.93.4.2.4.3      matt 
    232  1.93.4.2.4.3      matt 		grp->pgrp_scan_needed =
    233  1.93.4.2.4.3      matt 		    grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freemin
    234  1.93.4.2.4.3      matt 		    || (grp->pgrp_free + grp->pgrp_paging < grp->pgrp_freetarg
    235  1.93.4.2.4.3      matt 			&& uvmpdpol_needsscan_p(grp));
    236  1.93.4.2.4.3      matt 
    237  1.93.4.2.4.3      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    238  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d->%d (scan=%d)",
    239  1.93.4.2.4.3      matt 			    grp - uvm.pggroups, prev_scan_needed,
    240  1.93.4.2.4.3      matt 			    grp->pgrp_scan_needed, uvmpdpol_needsscan_p(grp));
    241  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist, " [%zd] %d < min(%d,%d)",
    242  1.93.4.2.4.3      matt 			    grp - uvm.pggroups,
    243  1.93.4.2.4.3      matt 			    grp->pgrp_free + grp->pgrp_paging,
    244  1.93.4.2.4.3      matt 			    grp->pgrp_freemin, grp->pgrp_freetarg);
    245  1.93.4.2.4.3      matt 		}
    246  1.93.4.2.4.3      matt 
    247  1.93.4.2.4.4      matt 		if (prev_scan_needed != grp->pgrp_scan_needed) {
    248  1.93.4.2.4.3      matt 			if (grp->pgrp_scan_needed) {
    249  1.93.4.2.4.3      matt 				TAILQ_INSERT_TAIL(&pdinfo->pd_pendingq,
    250  1.93.4.2.4.4      matt 				    grp, pgrp_pending_link);
    251  1.93.4.2.4.3      matt 				need_wakeup = true;
    252  1.93.4.2.4.3      matt 			} else {
    253  1.93.4.2.4.3      matt 				TAILQ_REMOVE(&pdinfo->pd_pendingq,
    254  1.93.4.2.4.4      matt 				    grp, pgrp_pending_link);
    255  1.93.4.2.4.3      matt 			}
    256  1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    257  1.93.4.2.4.3      matt 		}
    258          1.77      yamt 	}
    259  1.93.4.2.4.3      matt 
    260  1.93.4.2.4.3      matt 	if (need_wakeup)
    261  1.93.4.2.4.3      matt 		wakeup(&uvm.pagedaemon);
    262  1.93.4.2.4.3      matt 
    263  1.93.4.2.4.3      matt 	UVMHIST_LOG(pdhist, " <- done: wakeup=%d!",
    264  1.93.4.2.4.4      matt 	    need_wakeup, 0, 0, 0);
    265          1.77      yamt }
    266           1.1       mrg 
    267           1.1       mrg /*
    268           1.1       mrg  * uvmpd_tune: tune paging parameters
    269           1.1       mrg  *
    270           1.1       mrg  * => called when ever memory is added (or removed?) to the system
    271           1.1       mrg  * => caller must call with page queues locked
    272           1.1       mrg  */
    273           1.1       mrg 
    274          1.65   thorpej static void
    275          1.37       chs uvmpd_tune(void)
    276           1.8       mrg {
    277  1.93.4.2.4.3      matt 	u_int extrapages = atomic_swap_uint(&uvm_extrapages, 0) / uvmexp.ncolors;
    278  1.93.4.2.4.3      matt 	u_int freemin = 0;
    279  1.93.4.2.4.3      matt 	u_int freetarg = 0;
    280  1.93.4.2.4.3      matt 	u_int wiredmax = 0;
    281      1.93.4.2       snj 
    282           1.8       mrg 	UVMHIST_FUNC("uvmpd_tune"); UVMHIST_CALLED(pdhist);
    283           1.1       mrg 
    284  1.93.4.2.4.3      matt 	extrapages = roundup(extrapages, uvmexp.npggroups);
    285  1.93.4.2.4.3      matt 
    286  1.93.4.2.4.3      matt 	struct uvm_pggroup *grp;
    287  1.93.4.2.4.3      matt 	STAILQ_FOREACH(grp, &uvm.page_groups, pgrp_uvm_link) {
    288  1.93.4.2.4.3      matt 		KASSERT(grp->pgrp_npages > 0);
    289  1.93.4.2.4.3      matt 
    290  1.93.4.2.4.3      matt 		/*
    291  1.93.4.2.4.3      matt 		 * try to keep 0.5% of available RAM free, but limit
    292  1.93.4.2.4.3      matt 		 * to between 128k and 1024k per-CPU.
    293  1.93.4.2.4.3      matt 		 * XXX: what are these values good for?
    294  1.93.4.2.4.3      matt 		 */
    295  1.93.4.2.4.3      matt 		u_int val = grp->pgrp_npages / 200;
    296  1.93.4.2.4.3      matt 		val = MAX(val, (128*1024) >> PAGE_SHIFT);
    297  1.93.4.2.4.3      matt 		val = MIN(val, (1024*1024) >> PAGE_SHIFT);
    298  1.93.4.2.4.3      matt 		val *= ncpu;
    299  1.93.4.2.4.3      matt 
    300  1.93.4.2.4.3      matt 		/* Make sure there's always a user page free. */
    301  1.93.4.2.4.3      matt 		if (val * uvmexp.npggroups <= uvmexp.reserve_kernel)
    302  1.93.4.2.4.3      matt 			val = uvmexp.reserve_kernel / uvmexp.npggroups + 1;
    303  1.93.4.2.4.3      matt 
    304  1.93.4.2.4.3      matt 		grp->pgrp_freemin = val;
    305  1.93.4.2.4.3      matt 
    306  1.93.4.2.4.3      matt 		/* Calculate freetarg. */
    307  1.93.4.2.4.3      matt 		val = (grp->pgrp_freemin * 4) / 3;
    308  1.93.4.2.4.3      matt 		if (val <= grp->pgrp_freemin)
    309  1.93.4.2.4.3      matt 			val = grp->pgrp_freemin + 1;
    310  1.93.4.2.4.3      matt 		grp->pgrp_freetarg = val + extrapages / uvmexp.npggroups;
    311  1.93.4.2.4.3      matt 		if (grp->pgrp_freetarg > grp->pgrp_npages / 2)
    312  1.93.4.2.4.3      matt 			grp->pgrp_freetarg = grp->pgrp_npages / 2;
    313  1.93.4.2.4.3      matt 
    314  1.93.4.2.4.3      matt 		grp->pgrp_wiredmax = grp->pgrp_npages / 3;
    315  1.93.4.2.4.3      matt 		UVMHIST_LOG(pdhist,
    316  1.93.4.2.4.3      matt 		    "[%zd]: freemin=%d, freetarg=%d, wiredmax=%d",
    317  1.93.4.2.4.3      matt 		    grp - uvm.pggroups, grp->pgrp_freemin, grp->pgrp_freetarg,
    318  1.93.4.2.4.3      matt 		    grp->pgrp_wiredmax);
    319  1.93.4.2.4.3      matt 
    320  1.93.4.2.4.3      matt 		freemin += grp->pgrp_freemin;
    321  1.93.4.2.4.3      matt 		freetarg += grp->pgrp_freetarg;
    322  1.93.4.2.4.3      matt 		wiredmax += grp->pgrp_wiredmax;
    323  1.93.4.2.4.3      matt 	}
    324  1.93.4.2.4.3      matt 
    325  1.93.4.2.4.3      matt 	uvmexp.freemin = freemin;
    326  1.93.4.2.4.3      matt 	uvmexp.freetarg = freetarg;
    327  1.93.4.2.4.3      matt 	uvmexp.wiredmax = wiredmax;
    328          1.61       chs 
    329           1.8       mrg 	UVMHIST_LOG(pdhist, "<- done, freemin=%d, freetarg=%d, wiredmax=%d",
    330  1.93.4.2.4.3      matt 	    uvmexp.freemin, uvmexp.freetarg, uvmexp.wiredmax, 0);
    331           1.1       mrg }
    332           1.1       mrg 
    333           1.1       mrg /*
    334           1.1       mrg  * uvm_pageout: the main loop for the pagedaemon
    335           1.1       mrg  */
    336           1.1       mrg 
    337           1.8       mrg void
    338          1.80      yamt uvm_pageout(void *arg)
    339           1.8       mrg {
    340  1.93.4.2.4.3      matt 	u_int npages = 0;
    341  1.93.4.2.4.3      matt 	u_int extrapages = 0;
    342  1.93.4.2.4.3      matt 	u_int npggroups = 0;
    343          1.88        ad 	struct pool *pp;
    344          1.88        ad 	uint64_t where;
    345  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    346           1.8       mrg 	UVMHIST_FUNC("uvm_pageout"); UVMHIST_CALLED(pdhist);
    347          1.24       chs 
    348           1.8       mrg 	UVMHIST_LOG(pdhist,"<starting uvm pagedaemon>", 0, 0, 0, 0);
    349           1.8       mrg 
    350           1.8       mrg 	/*
    351           1.8       mrg 	 * ensure correct priority and set paging parameters...
    352           1.8       mrg 	 */
    353           1.8       mrg 
    354          1.86        ad 	uvm.pagedaemon_lwp = curlwp;
    355          1.89        ad 	mutex_enter(&uvm_pageqlock);
    356           1.8       mrg 	npages = uvmexp.npages;
    357           1.8       mrg 	uvmpd_tune();
    358          1.89        ad 	mutex_exit(&uvm_pageqlock);
    359           1.8       mrg 
    360           1.8       mrg 	/*
    361           1.8       mrg 	 * main loop
    362           1.8       mrg 	 */
    363          1.24       chs 
    364          1.24       chs 	for (;;) {
    365  1.93.4.2.4.3      matt 		struct uvm_pggroup *grp;
    366  1.93.4.2.4.3      matt 		bool need_free = false;
    367  1.93.4.2.4.3      matt 		u_int bufcnt = 0;
    368          1.24       chs 
    369          1.89        ad 		mutex_spin_enter(&uvm_fpageqlock);
    370  1.93.4.2.4.3      matt 		/*
    371  1.93.4.2.4.3      matt 		 * If we have no one waiting or all color requests have
    372  1.93.4.2.4.3      matt 		 * active paging, then wait.
    373  1.93.4.2.4.3      matt 		 */
    374  1.93.4.2.4.3      matt 		if (pdinfo->pd_waiters == 0
    375  1.93.4.2.4.3      matt 		    || TAILQ_FIRST(&pdinfo->pd_pendingq) == NULL) {
    376          1.89        ad 			UVMHIST_LOG(pdhist,"  <<SLEEPING>>",0,0,0,0);
    377          1.89        ad 			UVM_UNLOCK_AND_WAIT(&uvm.pagedaemon,
    378          1.89        ad 			    &uvm_fpageqlock, false, "pgdaemon", 0);
    379          1.89        ad 			uvmexp.pdwoke++;
    380          1.89        ad 			UVMHIST_LOG(pdhist,"  <<WOKE UP>>",0,0,0,0);
    381          1.89        ad 		} else {
    382          1.89        ad 			mutex_spin_exit(&uvm_fpageqlock);
    383          1.89        ad 		}
    384          1.24       chs 
    385           1.8       mrg 		/*
    386          1.24       chs 		 * now lock page queues and recompute inactive count
    387           1.8       mrg 		 */
    388           1.8       mrg 
    389          1.89        ad 		mutex_enter(&uvm_pageqlock);
    390  1.93.4.2.4.3      matt 		mutex_spin_enter(&uvm_fpageqlock);
    391  1.93.4.2.4.3      matt 
    392  1.93.4.2.4.3      matt 		if (npages != uvmexp.npages
    393  1.93.4.2.4.3      matt 		    || extrapages != uvm_extrapages
    394  1.93.4.2.4.3      matt 		    || npggroups != uvmexp.npggroups) {
    395          1.24       chs 			npages = uvmexp.npages;
    396          1.61       chs 			extrapages = uvm_extrapages;
    397  1.93.4.2.4.3      matt 			npggroups = uvmexp.npggroups;
    398          1.24       chs 			uvmpd_tune();
    399          1.24       chs 		}
    400          1.24       chs 
    401          1.60     enami 		/*
    402          1.60     enami 		 * Estimate a hint.  Note that bufmem are returned to
    403          1.60     enami 		 * system only when entire pool page is empty.
    404          1.60     enami 		 */
    405  1.93.4.2.4.3      matt 		bool need_wakeup = false;
    406  1.93.4.2.4.3      matt 		while ((grp = TAILQ_FIRST(&pdinfo->pd_pendingq)) != NULL) {
    407  1.93.4.2.4.3      matt 			KASSERT(grp->pgrp_npages > 0);
    408          1.60     enami 
    409  1.93.4.2.4.3      matt 			uvmpdpol_tune(grp);
    410           1.8       mrg 
    411  1.93.4.2.4.4      matt 			/*
    412  1.93.4.2.4.4      matt 			 * While we are locked, remove this from the pendingq.
    413  1.93.4.2.4.4      matt 			 */
    414  1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    415  1.93.4.2.4.4      matt 			KASSERT(grp->pgrp_scan_needed);
    416  1.93.4.2.4.4      matt 			TAILQ_REMOVE(&pdinfo->pd_pendingq, grp,
    417  1.93.4.2.4.4      matt 			    pgrp_pending_link);
    418  1.93.4.2.4.4      matt 			grp->pgrp_scan_needed = false;
    419  1.93.4.2.4.4      matt 			uvmpd_checkgroup(grp);
    420  1.93.4.2.4.4      matt 
    421  1.93.4.2.4.3      matt 			int diff = grp->pgrp_freetarg - grp->pgrp_free;
    422  1.93.4.2.4.3      matt 			if (diff < 0)
    423  1.93.4.2.4.3      matt 				diff = 0;
    424          1.89        ad 
    425  1.93.4.2.4.3      matt 			bufcnt += diff;
    426           1.8       mrg 
    427  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist," [%zu]: "
    428  1.93.4.2.4.3      matt 			    "free/ftarg/fmin=%u/%u/%u",
    429  1.93.4.2.4.3      matt 			    grp - uvm.pggroups, grp->pgrp_free,
    430  1.93.4.2.4.3      matt 			    grp->pgrp_freetarg, grp->pgrp_freemin);
    431  1.93.4.2.4.3      matt 
    432  1.93.4.2.4.3      matt 
    433  1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff)
    434  1.93.4.2.4.3      matt 				need_free = true;
    435  1.93.4.2.4.3      matt 
    436  1.93.4.2.4.3      matt 			/*
    437  1.93.4.2.4.3      matt 			 * scan if needed
    438  1.93.4.2.4.3      matt 			 */
    439  1.93.4.2.4.3      matt 			if (grp->pgrp_paging < diff
    440  1.93.4.2.4.3      matt 			    || uvmpdpol_needsscan_p(grp)) {
    441  1.93.4.2.4.3      matt 				mutex_spin_exit(&uvm_fpageqlock);
    442  1.93.4.2.4.3      matt 				uvmpd_scan(grp);
    443  1.93.4.2.4.3      matt 				mutex_spin_enter(&uvm_fpageqlock);
    444  1.93.4.2.4.3      matt 			} else {
    445  1.93.4.2.4.3      matt 				UVMHIST_LOG(pdhist,
    446  1.93.4.2.4.3      matt 				    " [%zu]: diff/paging=%u/%u: "
    447  1.93.4.2.4.3      matt 				    "scan skipped",
    448  1.93.4.2.4.3      matt 				    grp - uvm.pggroups, diff,
    449  1.93.4.2.4.3      matt 				    grp->pgrp_paging, 0);
    450  1.93.4.2.4.3      matt 			}
    451  1.93.4.2.4.3      matt 
    452  1.93.4.2.4.3      matt 			/*
    453  1.93.4.2.4.3      matt 			 * if there's any free memory to be had,
    454  1.93.4.2.4.3      matt 			 * wake up any waiters.
    455  1.93.4.2.4.3      matt 			 */
    456  1.93.4.2.4.3      matt 			if (grp->pgrp_free * uvmexp.npggroups > uvmexp.reserve_kernel
    457  1.93.4.2.4.3      matt 			    || grp->pgrp_paging == 0) {
    458  1.93.4.2.4.3      matt 				need_wakeup = true;
    459  1.93.4.2.4.3      matt 			}
    460  1.93.4.2.4.3      matt 
    461  1.93.4.2.4.3      matt 		}
    462  1.93.4.2.4.3      matt 		if (need_wakeup) {
    463  1.93.4.2.4.3      matt 			pdinfo->pd_waiters = 0;
    464          1.24       chs 			wakeup(&uvmexp.free);
    465           1.8       mrg 		}
    466  1.93.4.2.4.3      matt 		KASSERT (!need_free || need_wakeup);
    467          1.89        ad 		mutex_spin_exit(&uvm_fpageqlock);
    468           1.1       mrg 
    469           1.8       mrg 		/*
    470  1.93.4.2.4.3      matt 		 * scan done.  unlock page queues (the only lock
    471  1.93.4.2.4.3      matt 		 * we are holding)
    472           1.8       mrg 		 */
    473          1.89        ad 		mutex_exit(&uvm_pageqlock);
    474          1.38       chs 
    475          1.88        ad 		/*
    476          1.93        ad 		 * if we don't need free memory, we're done.
    477          1.93        ad 		 */
    478          1.93        ad 
    479  1.93.4.2.4.3      matt 		if (!need_free)
    480          1.93        ad 			continue;
    481          1.93        ad 
    482          1.93        ad 		/*
    483          1.88        ad 		 * start draining pool resources now that we're not
    484          1.88        ad 		 * holding any locks.
    485          1.88        ad 		 */
    486          1.88        ad 		pool_drain_start(&pp, &where);
    487          1.60     enami 
    488          1.38       chs 		/*
    489          1.88        ad 		 * kill unused metadata buffers.
    490          1.38       chs 		 */
    491  1.93.4.2.4.3      matt 		if (bufcnt > 0) {
    492  1.93.4.2.4.3      matt 			mutex_enter(&bufcache_lock);
    493  1.93.4.2.4.3      matt 			buf_drain(bufcnt << PAGE_SHIFT);
    494  1.93.4.2.4.3      matt 			mutex_exit(&bufcache_lock);
    495  1.93.4.2.4.3      matt 		}
    496          1.57  jdolecek 
    497          1.57  jdolecek 		/*
    498          1.88        ad 		 * complete draining the pools.
    499          1.88        ad 		 */
    500          1.88        ad 		pool_drain_end(pp, where);
    501          1.24       chs 	}
    502          1.24       chs 	/*NOTREACHED*/
    503          1.24       chs }
    504          1.24       chs 
    505           1.8       mrg 
    506          1.24       chs /*
    507          1.81      yamt  * uvm_aiodone_worker: a workqueue callback for the aiodone daemon.
    508          1.24       chs  */
    509           1.8       mrg 
    510          1.24       chs void
    511          1.81      yamt uvm_aiodone_worker(struct work *wk, void *dummy)
    512          1.24       chs {
    513          1.81      yamt 	struct buf *bp = (void *)wk;
    514           1.9        pk 
    515          1.81      yamt 	KASSERT(&bp->b_work == wk);
    516           1.8       mrg 
    517          1.81      yamt 	/*
    518          1.81      yamt 	 * process an i/o that's done.
    519          1.81      yamt 	 */
    520           1.8       mrg 
    521          1.81      yamt 	(*bp->b_iodone)(bp);
    522          1.89        ad }
    523          1.89        ad 
    524          1.89        ad void
    525  1.93.4.2.4.3      matt uvm_pageout_start(struct uvm_pggroup *grp, u_int npages)
    526          1.89        ad {
    527  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    528          1.89        ad 
    529          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    530  1.93.4.2.4.3      matt 
    531          1.89        ad 	uvmexp.paging += npages;
    532  1.93.4.2.4.4      matt 	uvmpd_checkgroup(grp);
    533  1.93.4.2.4.3      matt 	if (grp->pgrp_paging == 0) {
    534  1.93.4.2.4.4      matt 		TAILQ_INSERT_TAIL(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
    535  1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    536  1.93.4.2.4.3      matt 	}
    537  1.93.4.2.4.3      matt 	grp->pgrp_paging += npages;
    538          1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    539          1.89        ad }
    540          1.89        ad 
    541          1.89        ad void
    542  1.93.4.2.4.3      matt uvm_pageout_done(struct vm_page *pg, bool freed)
    543          1.89        ad {
    544  1.93.4.2.4.3      matt 	struct uvm_pdinfo * const pdinfo = &uvm_pdinfo;
    545  1.93.4.2.4.3      matt 
    546  1.93.4.2.4.3      matt 	KASSERT(pg->flags & PG_PAGEOUT);
    547          1.89        ad 
    548          1.89        ad 	mutex_spin_enter(&uvm_fpageqlock);
    549  1.93.4.2.4.3      matt 	struct uvm_pggroup * const grp = uvm_page_to_pggroup(pg);
    550  1.93.4.2.4.3      matt 
    551  1.93.4.2.4.3      matt 	KASSERT(grp->pgrp_paging > 0);
    552  1.93.4.2.4.4      matt 	uvmpd_checkgroup(grp);
    553  1.93.4.2.4.3      matt 	if (--grp->pgrp_paging == 0) {
    554  1.93.4.2.4.4      matt 		TAILQ_REMOVE(&pdinfo->pd_pagingq, grp, pgrp_paging_link);
    555  1.93.4.2.4.4      matt 		uvmpd_checkgroup(grp);
    556  1.93.4.2.4.3      matt 	}
    557  1.93.4.2.4.3      matt 	KASSERT(uvmexp.paging > 0);
    558  1.93.4.2.4.3      matt 	uvmexp.paging--;
    559  1.93.4.2.4.3      matt 	grp->pgrp_pdfreed += freed;
    560          1.89        ad 
    561          1.89        ad 	/*
    562          1.89        ad 	 * wake up either of pagedaemon or LWPs waiting for it.
    563          1.89        ad 	 */
    564  1.93.4.2.4.3      matt 	if (grp->pgrp_free * uvmexp.npggroups <= uvmexp.reserve_kernel) {
    565          1.81      yamt 		wakeup(&uvm.pagedaemon);
    566          1.81      yamt 	} else {
    567  1.93.4.2.4.3      matt 		pdinfo->pd_waiters = 0;
    568          1.81      yamt 		wakeup(&uvmexp.free);
    569           1.8       mrg 	}
    570  1.93.4.2.4.3      matt 
    571          1.89        ad 	mutex_spin_exit(&uvm_fpageqlock);
    572           1.1       mrg }
    573           1.1       mrg 
    574          1.76      yamt /*
    575          1.76      yamt  * uvmpd_trylockowner: trylock the page's owner.
    576          1.76      yamt  *
    577          1.76      yamt  * => called with pageq locked.
    578          1.76      yamt  * => resolve orphaned O->A loaned page.
    579          1.89        ad  * => return the locked mutex on success.  otherwise, return NULL.
    580          1.76      yamt  */
    581          1.76      yamt 
    582          1.89        ad kmutex_t *
    583          1.76      yamt uvmpd_trylockowner(struct vm_page *pg)
    584          1.76      yamt {
    585          1.76      yamt 	struct uvm_object *uobj = pg->uobject;
    586          1.89        ad 	kmutex_t *slock;
    587          1.89        ad 
    588          1.89        ad 	KASSERT(mutex_owned(&uvm_pageqlock));
    589          1.76      yamt 
    590          1.76      yamt 	if (uobj != NULL) {
    591          1.76      yamt 		slock = &uobj->vmobjlock;
    592          1.76      yamt 	} else {
    593          1.76      yamt 		struct vm_anon *anon = pg->uanon;
    594          1.76      yamt 
    595          1.76      yamt 		KASSERT(anon != NULL);
    596          1.76      yamt 		slock = &anon->an_lock;
    597          1.76      yamt 	}
    598          1.76      yamt 
    599          1.89        ad 	if (!mutex_tryenter(slock)) {
    600          1.76      yamt 		return NULL;
    601          1.76      yamt 	}
    602          1.76      yamt 
    603          1.76      yamt 	if (uobj == NULL) {
    604          1.76      yamt 
    605          1.76      yamt 		/*
    606          1.76      yamt 		 * set PQ_ANON if it isn't set already.
    607          1.76      yamt 		 */
    608          1.76      yamt 
    609          1.76      yamt 		if ((pg->pqflags & PQ_ANON) == 0) {
    610          1.76      yamt 			KASSERT(pg->loan_count > 0);
    611          1.76      yamt 			pg->loan_count--;
    612          1.76      yamt 			pg->pqflags |= PQ_ANON;
    613          1.76      yamt 			/* anon now owns it */
    614          1.76      yamt 		}
    615          1.76      yamt 	}
    616          1.76      yamt 
    617          1.76      yamt 	return slock;
    618          1.76      yamt }
    619          1.76      yamt 
    620          1.73      yamt #if defined(VMSWAP)
    621          1.73      yamt struct swapcluster {
    622          1.73      yamt 	int swc_slot;
    623          1.73      yamt 	int swc_nallocated;
    624          1.73      yamt 	int swc_nused;
    625          1.75      yamt 	struct vm_page *swc_pages[howmany(MAXPHYS, MIN_PAGE_SIZE)];
    626          1.73      yamt };
    627          1.73      yamt 
    628          1.73      yamt static void
    629          1.73      yamt swapcluster_init(struct swapcluster *swc)
    630          1.73      yamt {
    631          1.73      yamt 
    632          1.73      yamt 	swc->swc_slot = 0;
    633          1.89        ad 	swc->swc_nused = 0;
    634          1.73      yamt }
    635          1.73      yamt 
    636          1.73      yamt static int
    637          1.73      yamt swapcluster_allocslots(struct swapcluster *swc)
    638          1.73      yamt {
    639          1.73      yamt 	int slot;
    640          1.73      yamt 	int npages;
    641          1.73      yamt 
    642          1.73      yamt 	if (swc->swc_slot != 0) {
    643          1.73      yamt 		return 0;
    644          1.73      yamt 	}
    645          1.73      yamt 
    646          1.73      yamt 	/* Even with strange MAXPHYS, the shift
    647          1.73      yamt 	   implicitly rounds down to a page. */
    648          1.73      yamt 	npages = MAXPHYS >> PAGE_SHIFT;
    649          1.84   thorpej 	slot = uvm_swap_alloc(&npages, true);
    650          1.73      yamt 	if (slot == 0) {
    651          1.73      yamt 		return ENOMEM;
    652          1.73      yamt 	}
    653          1.73      yamt 	swc->swc_slot = slot;
    654          1.73      yamt 	swc->swc_nallocated = npages;
    655          1.73      yamt 	swc->swc_nused = 0;
    656          1.73      yamt 
    657          1.73      yamt 	return 0;
    658          1.73      yamt }
    659          1.73      yamt 
    660          1.73      yamt static int
    661          1.73      yamt swapcluster_add(struct swapcluster *swc, struct vm_page *pg)
    662          1.73      yamt {
    663          1.73      yamt 	int slot;
    664          1.73      yamt 	struct uvm_object *uobj;
    665          1.73      yamt 
    666          1.73      yamt 	KASSERT(swc->swc_slot != 0);
    667          1.73      yamt 	KASSERT(swc->swc_nused < swc->swc_nallocated);
    668          1.73      yamt 	KASSERT((pg->pqflags & PQ_SWAPBACKED) != 0);
    669          1.73      yamt 
    670          1.73      yamt 	slot = swc->swc_slot + swc->swc_nused;
    671          1.73      yamt 	uobj = pg->uobject;
    672          1.73      yamt 	if (uobj == NULL) {
    673          1.89        ad 		KASSERT(mutex_owned(&pg->uanon->an_lock));
    674          1.73      yamt 		pg->uanon->an_swslot = slot;
    675          1.73      yamt 	} else {
    676          1.73      yamt 		int result;
    677          1.73      yamt 
    678          1.89        ad 		KASSERT(mutex_owned(&uobj->vmobjlock));
    679          1.73      yamt 		result = uao_set_swslot(uobj, pg->offset >> PAGE_SHIFT, slot);
    680          1.73      yamt 		if (result == -1) {
    681          1.73      yamt 			return ENOMEM;
    682          1.73      yamt 		}
    683          1.73      yamt 	}
    684          1.73      yamt 	swc->swc_pages[swc->swc_nused] = pg;
    685          1.73      yamt 	swc->swc_nused++;
    686          1.73      yamt 
    687          1.73      yamt 	return 0;
    688          1.73      yamt }
    689          1.73      yamt 
    690          1.73      yamt static void
    691  1.93.4.2.4.3      matt swapcluster_flush(struct uvm_pggroup *grp, struct swapcluster *swc, bool now)
    692          1.73      yamt {
    693          1.73      yamt 	int slot;
    694  1.93.4.2.4.3      matt 	u_int nused;
    695          1.73      yamt 	int nallocated;
    696          1.73      yamt 	int error;
    697          1.73      yamt 
    698          1.73      yamt 	if (swc->swc_slot == 0) {
    699          1.73      yamt 		return;
    700          1.73      yamt 	}
    701          1.73      yamt 	KASSERT(swc->swc_nused <= swc->swc_nallocated);
    702          1.73      yamt 
    703          1.73      yamt 	slot = swc->swc_slot;
    704          1.73      yamt 	nused = swc->swc_nused;
    705          1.73      yamt 	nallocated = swc->swc_nallocated;
    706          1.73      yamt 
    707          1.73      yamt 	/*
    708          1.73      yamt 	 * if this is the final pageout we could have a few
    709          1.73      yamt 	 * unused swap blocks.  if so, free them now.
    710          1.73      yamt 	 */
    711          1.73      yamt 
    712          1.73      yamt 	if (nused < nallocated) {
    713          1.73      yamt 		if (!now) {
    714          1.73      yamt 			return;
    715          1.73      yamt 		}
    716          1.73      yamt 		uvm_swap_free(slot + nused, nallocated - nused);
    717          1.73      yamt 	}
    718          1.73      yamt 
    719          1.73      yamt 	/*
    720          1.73      yamt 	 * now start the pageout.
    721          1.73      yamt 	 */
    722          1.73      yamt 
    723          1.91      yamt 	if (nused > 0) {
    724  1.93.4.2.4.3      matt 		grp->pgrp_pdpageouts++;
    725  1.93.4.2.4.3      matt 		uvmexp.pdpageouts++;	/* procfs */
    726  1.93.4.2.4.3      matt 		uvm_pageout_start(grp, nused);
    727          1.91      yamt 		error = uvm_swap_put(slot, swc->swc_pages, nused, 0);
    728          1.92      yamt 		KASSERT(error == 0 || error == ENOMEM);
    729          1.91      yamt 	}
    730          1.73      yamt 
    731          1.73      yamt 	/*
    732          1.73      yamt 	 * zero swslot to indicate that we are
    733          1.73      yamt 	 * no longer building a swap-backed cluster.
    734          1.73      yamt 	 */
    735          1.73      yamt 
    736          1.73      yamt 	swc->swc_slot = 0;
    737          1.89        ad 	swc->swc_nused = 0;
    738          1.89        ad }
    739          1.89        ad 
    740          1.89        ad static int
    741          1.89        ad swapcluster_nused(struct swapcluster *swc)
    742          1.89        ad {
    743          1.89        ad 
    744          1.89        ad 	return swc->swc_nused;
    745          1.73      yamt }
    746          1.77      yamt 
    747          1.77      yamt /*
    748          1.77      yamt  * uvmpd_dropswap: free any swap allocated to this page.
    749          1.77      yamt  *
    750          1.77      yamt  * => called with owner locked.
    751          1.84   thorpej  * => return true if a page had an associated slot.
    752          1.77      yamt  */
    753          1.77      yamt 
    754          1.83   thorpej static bool
    755          1.77      yamt uvmpd_dropswap(struct vm_page *pg)
    756          1.77      yamt {
    757          1.84   thorpej 	bool result = false;
    758          1.77      yamt 	struct vm_anon *anon = pg->uanon;
    759          1.77      yamt 
    760          1.77      yamt 	if ((pg->pqflags & PQ_ANON) && anon->an_swslot) {
    761          1.77      yamt 		uvm_swap_free(anon->an_swslot, 1);
    762          1.77      yamt 		anon->an_swslot = 0;
    763          1.77      yamt 		pg->flags &= ~PG_CLEAN;
    764          1.84   thorpej 		result = true;
    765          1.77      yamt 	} else if (pg->pqflags & PQ_AOBJ) {
    766          1.77      yamt 		int slot = uao_set_swslot(pg->uobject,
    767          1.77      yamt 		    pg->offset >> PAGE_SHIFT, 0);
    768          1.77      yamt 		if (slot) {
    769          1.77      yamt 			uvm_swap_free(slot, 1);
    770          1.77      yamt 			pg->flags &= ~PG_CLEAN;
    771          1.84   thorpej 			result = true;
    772          1.77      yamt 		}
    773          1.77      yamt 	}
    774          1.77      yamt 
    775          1.77      yamt 	return result;
    776          1.77      yamt }
    777          1.77      yamt 
    778          1.77      yamt /*
    779          1.77      yamt  * uvmpd_trydropswap: try to free any swap allocated to this page.
    780          1.77      yamt  *
    781          1.84   thorpej  * => return true if a slot is successfully freed.
    782          1.77      yamt  */
    783          1.77      yamt 
    784          1.83   thorpej bool
    785          1.77      yamt uvmpd_trydropswap(struct vm_page *pg)
    786          1.77      yamt {
    787          1.89        ad 	kmutex_t *slock;
    788          1.83   thorpej 	bool result;
    789          1.77      yamt 
    790          1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    791          1.84   thorpej 		return false;
    792          1.77      yamt 	}
    793          1.77      yamt 
    794          1.77      yamt 	/*
    795          1.77      yamt 	 * lock the page's owner.
    796          1.77      yamt 	 */
    797          1.77      yamt 
    798          1.77      yamt 	slock = uvmpd_trylockowner(pg);
    799          1.77      yamt 	if (slock == NULL) {
    800          1.84   thorpej 		return false;
    801          1.77      yamt 	}
    802          1.77      yamt 
    803          1.77      yamt 	/*
    804          1.77      yamt 	 * skip this page if it's busy.
    805          1.77      yamt 	 */
    806          1.77      yamt 
    807          1.77      yamt 	if ((pg->flags & PG_BUSY) != 0) {
    808          1.89        ad 		mutex_exit(slock);
    809          1.84   thorpej 		return false;
    810          1.77      yamt 	}
    811          1.77      yamt 
    812          1.77      yamt 	result = uvmpd_dropswap(pg);
    813          1.77      yamt 
    814          1.89        ad 	mutex_exit(slock);
    815          1.77      yamt 
    816          1.77      yamt 	return result;
    817          1.77      yamt }
    818          1.77      yamt 
    819          1.73      yamt #endif /* defined(VMSWAP) */
    820          1.73      yamt 
    821           1.1       mrg /*
    822          1.77      yamt  * uvmpd_scan_queue: scan an replace candidate list for pages
    823          1.77      yamt  * to clean or free.
    824           1.1       mrg  *
    825           1.1       mrg  * => called with page queues locked
    826           1.1       mrg  * => we work on meeting our free target by converting inactive pages
    827           1.1       mrg  *    into free pages.
    828           1.1       mrg  * => we handle the building of swap-backed clusters
    829           1.1       mrg  */
    830           1.1       mrg 
    831          1.65   thorpej static void
    832  1.93.4.2.4.3      matt uvmpd_scan_queue(struct uvm_pggroup *grp)
    833           1.8       mrg {
    834  1.93.4.2.4.3      matt 	struct vm_page *pg;
    835           1.8       mrg 	struct uvm_object *uobj;
    836          1.37       chs 	struct vm_anon *anon;
    837          1.68      yamt #if defined(VMSWAP)
    838          1.73      yamt 	struct swapcluster swc;
    839          1.68      yamt #endif /* defined(VMSWAP) */
    840          1.77      yamt 	int dirtyreacts;
    841          1.89        ad 	int lockownerfail;
    842          1.89        ad 	kmutex_t *slock;
    843          1.77      yamt 	UVMHIST_FUNC("uvmpd_scan_queue"); UVMHIST_CALLED(pdhist);
    844           1.1       mrg 
    845           1.8       mrg 	/*
    846           1.8       mrg 	 * swslot is non-zero if we are building a swap cluster.  we want
    847          1.24       chs 	 * to stay in the loop while we have a page to scan or we have
    848           1.8       mrg 	 * a swap-cluster to build.
    849           1.8       mrg 	 */
    850          1.24       chs 
    851          1.73      yamt #if defined(VMSWAP)
    852          1.73      yamt 	swapcluster_init(&swc);
    853          1.73      yamt #endif /* defined(VMSWAP) */
    854          1.77      yamt 
    855          1.14       chs 	dirtyreacts = 0;
    856          1.89        ad 	lockownerfail = 0;
    857  1.93.4.2.4.3      matt 	uvmpdpol_scaninit(grp);
    858          1.43       chs 
    859          1.77      yamt 	while (/* CONSTCOND */ 1) {
    860          1.24       chs 
    861          1.73      yamt 		/*
    862          1.73      yamt 		 * see if we've met the free target.
    863          1.73      yamt 		 */
    864          1.73      yamt 
    865  1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging
    866          1.89        ad #if defined(VMSWAP)
    867          1.89        ad 		    + swapcluster_nused(&swc)
    868          1.89        ad #endif /* defined(VMSWAP) */
    869  1.93.4.2.4.3      matt 		    >= grp->pgrp_freetarg << 2 ||
    870          1.73      yamt 		    dirtyreacts == UVMPD_NUMDIRTYREACTS) {
    871  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist,"  [%zd]: met free target (%u + %u >= %u): "
    872  1.93.4.2.4.3      matt 			    "exit loop", grp - uvm.pggroups,
    873  1.93.4.2.4.3      matt 			    grp->pgrp_free, grp->pgrp_paging,
    874  1.93.4.2.4.3      matt 			    grp->pgrp_freetarg << 2);
    875          1.73      yamt 			break;
    876          1.73      yamt 		}
    877          1.24       chs 
    878  1.93.4.2.4.3      matt 		pg = uvmpdpol_selectvictim(grp);
    879  1.93.4.2.4.3      matt 		if (pg == NULL) {
    880  1.93.4.2.4.3      matt 			UVMHIST_LOG(pdhist,"  [%zd]: selectvictim didn't: "
    881  1.93.4.2.4.3      matt 			    "exit loop", grp - uvm.pggroups, 0, 0, 0);
    882          1.77      yamt 			break;
    883          1.77      yamt 		}
    884  1.93.4.2.4.3      matt 		KASSERT(uvmpdpol_pageisqueued_p(pg));
    885  1.93.4.2.4.3      matt 		KASSERT(pg->wire_count == 0);
    886          1.77      yamt 
    887          1.73      yamt 		/*
    888          1.73      yamt 		 * we are below target and have a new page to consider.
    889          1.73      yamt 		 */
    890          1.30       chs 
    891  1.93.4.2.4.3      matt 		anon = pg->uanon;
    892  1.93.4.2.4.3      matt 		uobj = pg->uobject;
    893           1.8       mrg 
    894          1.73      yamt 		/*
    895          1.73      yamt 		 * first we attempt to lock the object that this page
    896          1.73      yamt 		 * belongs to.  if our attempt fails we skip on to
    897          1.73      yamt 		 * the next page (no harm done).  it is important to
    898          1.73      yamt 		 * "try" locking the object as we are locking in the
    899          1.73      yamt 		 * wrong order (pageq -> object) and we don't want to
    900          1.73      yamt 		 * deadlock.
    901          1.73      yamt 		 *
    902          1.73      yamt 		 * the only time we expect to see an ownerless page
    903          1.73      yamt 		 * (i.e. a page with no uobject and !PQ_ANON) is if an
    904          1.73      yamt 		 * anon has loaned a page from a uvm_object and the
    905          1.73      yamt 		 * uvm_object has dropped the ownership.  in that
    906          1.73      yamt 		 * case, the anon can "take over" the loaned page
    907          1.73      yamt 		 * and make it its own.
    908          1.73      yamt 		 */
    909          1.30       chs 
    910  1.93.4.2.4.3      matt 		slock = uvmpd_trylockowner(pg);
    911          1.76      yamt 		if (slock == NULL) {
    912          1.89        ad 			/*
    913          1.89        ad 			 * yield cpu to make a chance for an LWP holding
    914          1.89        ad 			 * the lock run.  otherwise we can busy-loop too long
    915          1.89        ad 			 * if the page queue is filled with a lot of pages
    916          1.89        ad 			 * from few objects.
    917          1.89        ad 			 */
    918          1.89        ad 			lockownerfail++;
    919          1.89        ad 			if (lockownerfail > UVMPD_NUMTRYLOCKOWNER) {
    920          1.89        ad 				mutex_exit(&uvm_pageqlock);
    921          1.89        ad 				/* XXX Better than yielding but inadequate. */
    922          1.89        ad 				kpause("livelock", false, 1, NULL);
    923          1.89        ad 				mutex_enter(&uvm_pageqlock);
    924          1.89        ad 				lockownerfail = 0;
    925          1.89        ad 			}
    926          1.76      yamt 			continue;
    927          1.76      yamt 		}
    928  1.93.4.2.4.3      matt 		if (pg->flags & PG_BUSY) {
    929          1.89        ad 			mutex_exit(slock);
    930  1.93.4.2.4.3      matt 			grp->pgrp_pdbusy++;
    931          1.76      yamt 			continue;
    932          1.76      yamt 		}
    933          1.76      yamt 
    934          1.73      yamt 		/* does the page belong to an object? */
    935          1.73      yamt 		if (uobj != NULL) {
    936  1.93.4.2.4.3      matt 			grp->pgrp_pdobscan++;
    937          1.73      yamt 		} else {
    938          1.73      yamt #if defined(VMSWAP)
    939          1.73      yamt 			KASSERT(anon != NULL);
    940  1.93.4.2.4.3      matt 			grp->pgrp_pdanscan++;
    941          1.68      yamt #else /* defined(VMSWAP) */
    942          1.73      yamt 			panic("%s: anon", __func__);
    943          1.68      yamt #endif /* defined(VMSWAP) */
    944          1.73      yamt 		}
    945           1.8       mrg 
    946          1.37       chs 
    947          1.73      yamt 		/*
    948          1.73      yamt 		 * we now have the object and the page queues locked.
    949          1.73      yamt 		 * if the page is not swap-backed, call the object's
    950          1.73      yamt 		 * pager to flush and free the page.
    951          1.73      yamt 		 */
    952          1.37       chs 
    953          1.69      yamt #if defined(READAHEAD_STATS)
    954  1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_READAHEAD) != 0) {
    955  1.93.4.2.4.3      matt 			pg->pqflags &= ~PQ_READAHEAD;
    956          1.73      yamt 			uvm_ra_miss.ev_count++;
    957          1.73      yamt 		}
    958          1.69      yamt #endif /* defined(READAHEAD_STATS) */
    959          1.69      yamt 
    960  1.93.4.2.4.3      matt 		if ((pg->pqflags & PQ_SWAPBACKED) == 0) {
    961          1.82       alc 			KASSERT(uobj != NULL);
    962          1.89        ad 			mutex_exit(&uvm_pageqlock);
    963  1.93.4.2.4.3      matt 			(void) (uobj->pgops->pgo_put)(uobj, pg->offset,
    964  1.93.4.2.4.3      matt 			    pg->offset + PAGE_SIZE, PGO_CLEANIT|PGO_FREE);
    965          1.89        ad 			mutex_enter(&uvm_pageqlock);
    966          1.73      yamt 			continue;
    967          1.73      yamt 		}
    968          1.37       chs 
    969          1.73      yamt 		/*
    970          1.73      yamt 		 * the page is swap-backed.  remove all the permissions
    971          1.73      yamt 		 * from the page so we can sync the modified info
    972          1.73      yamt 		 * without any race conditions.  if the page is clean
    973          1.73      yamt 		 * we can free it now and continue.
    974          1.73      yamt 		 */
    975           1.8       mrg 
    976  1.93.4.2.4.3      matt 		pmap_page_protect(pg, VM_PROT_NONE);
    977  1.93.4.2.4.3      matt 		if ((pg->flags & PG_CLEAN) && pmap_clear_modify(pg)) {
    978  1.93.4.2.4.3      matt 			pg->flags &= ~(PG_CLEAN);
    979          1.73      yamt 		}
    980  1.93.4.2.4.3      matt 		if (pg->flags & PG_CLEAN) {
    981          1.73      yamt 			int slot;
    982          1.73      yamt 			int pageidx;
    983          1.73      yamt 
    984  1.93.4.2.4.3      matt 			pageidx = pg->offset >> PAGE_SHIFT;
    985  1.93.4.2.4.3      matt 			KASSERT(!uvmpdpol_pageisqueued_p(pg));
    986  1.93.4.2.4.3      matt 			uvm_pagefree(pg);
    987  1.93.4.2.4.3      matt 			grp->pgrp_pdfreed++;
    988           1.8       mrg 
    989           1.8       mrg 			/*
    990          1.73      yamt 			 * for anons, we need to remove the page
    991          1.73      yamt 			 * from the anon ourselves.  for aobjs,
    992          1.73      yamt 			 * pagefree did that for us.
    993           1.8       mrg 			 */
    994          1.24       chs 
    995          1.73      yamt 			if (anon) {
    996          1.73      yamt 				KASSERT(anon->an_swslot != 0);
    997          1.73      yamt 				anon->an_page = NULL;
    998          1.73      yamt 				slot = anon->an_swslot;
    999          1.73      yamt 			} else {
   1000          1.73      yamt 				slot = uao_find_swslot(uobj, pageidx);
   1001           1.8       mrg 			}
   1002          1.89        ad 			mutex_exit(slock);
   1003           1.8       mrg 
   1004          1.73      yamt 			if (slot > 0) {
   1005          1.73      yamt 				/* this page is now only in swap. */
   1006          1.87        ad 				mutex_enter(&uvm_swap_data_lock);
   1007          1.73      yamt 				KASSERT(uvmexp.swpgonly < uvmexp.swpginuse);
   1008          1.73      yamt 				uvmexp.swpgonly++;
   1009          1.87        ad 				mutex_exit(&uvm_swap_data_lock);
   1010          1.37       chs 			}
   1011          1.73      yamt 			continue;
   1012          1.73      yamt 		}
   1013          1.37       chs 
   1014          1.77      yamt #if defined(VMSWAP)
   1015          1.73      yamt 		/*
   1016          1.73      yamt 		 * this page is dirty, skip it if we'll have met our
   1017          1.73      yamt 		 * free target when all the current pageouts complete.
   1018          1.73      yamt 		 */
   1019          1.24       chs 
   1020  1.93.4.2.4.3      matt 		if (grp->pgrp_free + grp->pgrp_paging > grp->pgrp_freetarg << 2) {
   1021          1.89        ad 			mutex_exit(slock);
   1022          1.73      yamt 			continue;
   1023          1.73      yamt 		}
   1024          1.14       chs 
   1025          1.73      yamt 		/*
   1026          1.73      yamt 		 * free any swap space allocated to the page since
   1027          1.73      yamt 		 * we'll have to write it again with its new data.
   1028          1.73      yamt 		 */
   1029          1.24       chs 
   1030  1.93.4.2.4.3      matt 		uvmpd_dropswap(pg);
   1031          1.14       chs 
   1032          1.73      yamt 		/*
   1033          1.73      yamt 		 * start new swap pageout cluster (if necessary).
   1034      1.93.4.1       snj 		 *
   1035      1.93.4.1       snj 		 * if swap is full reactivate this page so that
   1036      1.93.4.1       snj 		 * we eventually cycle all pages through the
   1037      1.93.4.1       snj 		 * inactive queue.
   1038           1.8       mrg 		 */
   1039          1.24       chs 
   1040          1.73      yamt 		if (swapcluster_allocslots(&swc)) {
   1041      1.93.4.1       snj 			dirtyreacts++;
   1042  1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1043          1.89        ad 			mutex_exit(slock);
   1044          1.73      yamt 			continue;
   1045           1.8       mrg 		}
   1046           1.8       mrg 
   1047           1.8       mrg 		/*
   1048          1.73      yamt 		 * at this point, we're definitely going reuse this
   1049          1.73      yamt 		 * page.  mark the page busy and delayed-free.
   1050          1.73      yamt 		 * we should remove the page from the page queues
   1051          1.73      yamt 		 * so we don't ever look at it again.
   1052          1.73      yamt 		 * adjust counters and such.
   1053           1.8       mrg 		 */
   1054           1.8       mrg 
   1055  1.93.4.2.4.3      matt 		pg->flags |= PG_BUSY;
   1056  1.93.4.2.4.3      matt 		UVM_PAGE_OWN(pg, "scan_queue");
   1057          1.73      yamt 
   1058  1.93.4.2.4.3      matt 		pg->flags |= PG_PAGEOUT;
   1059  1.93.4.2.4.3      matt 		uvm_pagedequeue(pg);
   1060          1.73      yamt 
   1061  1.93.4.2.4.3      matt 		grp->pgrp_pgswapout++;
   1062          1.89        ad 		mutex_exit(&uvm_pageqlock);
   1063           1.8       mrg 
   1064           1.8       mrg 		/*
   1065          1.73      yamt 		 * add the new page to the cluster.
   1066           1.8       mrg 		 */
   1067           1.8       mrg 
   1068  1.93.4.2.4.3      matt 		if (swapcluster_add(&swc, pg)) {
   1069  1.93.4.2.4.3      matt 			pg->flags &= ~(PG_BUSY|PG_PAGEOUT);
   1070  1.93.4.2.4.3      matt 			UVM_PAGE_OWN(pg, NULL);
   1071          1.89        ad 			mutex_enter(&uvm_pageqlock);
   1072          1.77      yamt 			dirtyreacts++;
   1073  1.93.4.2.4.3      matt 			uvm_pageactivate(pg);
   1074          1.89        ad 			mutex_exit(slock);
   1075          1.73      yamt 			continue;
   1076          1.73      yamt 		}
   1077          1.89        ad 		mutex_exit(slock);
   1078          1.73      yamt 
   1079  1.93.4.2.4.3      matt 		swapcluster_flush(grp, &swc, false);
   1080          1.89        ad 		mutex_enter(&uvm_pageqlock);
   1081          1.73      yamt 
   1082           1.8       mrg 		/*
   1083          1.31       chs 		 * the pageout is in progress.  bump counters and set up
   1084          1.31       chs 		 * for the next loop.
   1085           1.8       mrg 		 */
   1086           1.8       mrg 
   1087          1.31       chs 		uvmexp.pdpending++;
   1088          1.77      yamt #else /* defined(VMSWAP) */
   1089  1.93.4.2.4.3      matt 		uvm_pageactivate(pg);
   1090          1.89        ad 		mutex_exit(slock);
   1091          1.77      yamt #endif /* defined(VMSWAP) */
   1092          1.73      yamt 	}
   1093          1.73      yamt 
   1094          1.73      yamt #if defined(VMSWAP)
   1095          1.89        ad 	mutex_exit(&uvm_pageqlock);
   1096  1.93.4.2.4.3      matt 	swapcluster_flush(grp, &swc, true);
   1097          1.89        ad 	mutex_enter(&uvm_pageqlock);
   1098          1.68      yamt #endif /* defined(VMSWAP) */
   1099           1.1       mrg }
   1100           1.1       mrg 
   1101           1.1       mrg /*
   1102           1.1       mrg  * uvmpd_scan: scan the page queues and attempt to meet our targets.
   1103           1.1       mrg  *
   1104           1.1       mrg  * => called with pageq's locked
   1105           1.1       mrg  */
   1106           1.1       mrg 
   1107          1.65   thorpej static void
   1108  1.93.4.2.4.3      matt uvmpd_scan(struct uvm_pggroup *grp)
   1109           1.1       mrg {
   1110  1.93.4.2.4.3      matt 	u_int swap_shortage, pages_freed;
   1111           1.8       mrg 	UVMHIST_FUNC("uvmpd_scan"); UVMHIST_CALLED(pdhist);
   1112           1.1       mrg 
   1113  1.93.4.2.4.3      matt 	grp->pgrp_pdrevs++;
   1114           1.1       mrg 
   1115           1.8       mrg 	/*
   1116          1.93        ad 	 * work on meeting our targets.   first we work on our free target
   1117          1.93        ad 	 * by converting inactive pages into free pages.  then we work on
   1118          1.93        ad 	 * meeting our inactive target by converting active pages to
   1119          1.93        ad 	 * inactive ones.
   1120           1.8       mrg 	 */
   1121           1.8       mrg 
   1122           1.8       mrg 	UVMHIST_LOG(pdhist, "  starting 'free' loop",0,0,0,0);
   1123           1.8       mrg 
   1124  1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed;
   1125  1.93.4.2.4.3      matt 	uvmpd_scan_queue(grp);
   1126  1.93.4.2.4.3      matt 	pages_freed = grp->pgrp_pdfreed - pages_freed;
   1127           1.8       mrg 
   1128           1.8       mrg 	/*
   1129          1.14       chs 	 * detect if we're not going to be able to page anything out
   1130          1.14       chs 	 * until we free some swap resources from active pages.
   1131          1.14       chs 	 */
   1132          1.24       chs 
   1133          1.14       chs 	swap_shortage = 0;
   1134  1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freetarg &&
   1135          1.52        pk 	    uvmexp.swpginuse >= uvmexp.swpgavail &&
   1136          1.52        pk 	    !uvm_swapisfull() &&
   1137          1.14       chs 	    pages_freed == 0) {
   1138  1.93.4.2.4.3      matt 		swap_shortage = grp->pgrp_freetarg - grp->pgrp_free;
   1139          1.14       chs 	}
   1140          1.24       chs 
   1141  1.93.4.2.4.3      matt 	uvmpdpol_balancequeue(grp, swap_shortage);
   1142          1.93        ad 
   1143          1.93        ad 	/*
   1144          1.93        ad 	 * swap out some processes if we are still below the minimum
   1145          1.93        ad 	 * free target.  we need to unlock the page queues for this.
   1146          1.93        ad 	 */
   1147          1.93        ad 
   1148  1.93.4.2.4.3      matt 	if (grp->pgrp_free < grp->pgrp_freemin
   1149  1.93.4.2.4.3      matt 	    && uvmexp.nswapdev != 0 && uvm.swapout_enabled) {
   1150  1.93.4.2.4.3      matt 		grp->pgrp_pdswout++;
   1151          1.93        ad 		UVMHIST_LOG(pdhist,"  free %d < min %d: swapout",
   1152          1.93        ad 		    uvmexp.free, uvmexp.freemin, 0, 0);
   1153          1.93        ad 		mutex_exit(&uvm_pageqlock);
   1154          1.93        ad 		uvm_swapout_threads();
   1155          1.93        ad 		mutex_enter(&uvm_pageqlock);
   1156          1.93        ad 
   1157          1.93        ad 	}
   1158           1.1       mrg }
   1159          1.62      yamt 
   1160          1.62      yamt /*
   1161          1.62      yamt  * uvm_reclaimable: decide whether to wait for pagedaemon.
   1162          1.62      yamt  *
   1163          1.84   thorpej  * => return true if it seems to be worth to do uvm_wait.
   1164          1.62      yamt  *
   1165          1.62      yamt  * XXX should be tunable.
   1166          1.62      yamt  * XXX should consider pools, etc?
   1167          1.62      yamt  */
   1168          1.62      yamt 
   1169          1.83   thorpej bool
   1170          1.62      yamt uvm_reclaimable(void)
   1171          1.62      yamt {
   1172          1.62      yamt 	int filepages;
   1173          1.77      yamt 	int active, inactive;
   1174          1.62      yamt 
   1175          1.62      yamt 	/*
   1176          1.62      yamt 	 * if swap is not full, no problem.
   1177          1.62      yamt 	 */
   1178          1.62      yamt 
   1179          1.62      yamt 	if (!uvm_swapisfull()) {
   1180          1.84   thorpej 		return true;
   1181          1.62      yamt 	}
   1182          1.62      yamt 
   1183          1.62      yamt 	/*
   1184          1.62      yamt 	 * file-backed pages can be reclaimed even when swap is full.
   1185          1.62      yamt 	 * if we have more than 1/16 of pageable memory or 5MB, try to reclaim.
   1186          1.62      yamt 	 *
   1187          1.62      yamt 	 * XXX assume the worst case, ie. all wired pages are file-backed.
   1188          1.63      yamt 	 *
   1189          1.63      yamt 	 * XXX should consider about other reclaimable memory.
   1190          1.63      yamt 	 * XXX ie. pools, traditional buffer cache.
   1191          1.62      yamt 	 */
   1192          1.62      yamt 
   1193          1.62      yamt 	filepages = uvmexp.filepages + uvmexp.execpages - uvmexp.wired;
   1194          1.77      yamt 	uvm_estimatepageable(&active, &inactive);
   1195          1.77      yamt 	if (filepages >= MIN((active + inactive) >> 4,
   1196          1.62      yamt 	    5 * 1024 * 1024 >> PAGE_SHIFT)) {
   1197          1.84   thorpej 		return true;
   1198          1.62      yamt 	}
   1199          1.62      yamt 
   1200          1.62      yamt 	/*
   1201          1.62      yamt 	 * kill the process, fail allocation, etc..
   1202          1.62      yamt 	 */
   1203          1.62      yamt 
   1204          1.84   thorpej 	return false;
   1205          1.62      yamt }
   1206          1.77      yamt 
   1207          1.77      yamt void
   1208  1.93.4.2.4.3      matt uvm_estimatepageable(u_int *active, u_int *inactive)
   1209          1.77      yamt {
   1210          1.77      yamt 
   1211          1.77      yamt 	uvmpdpol_estimatepageable(active, inactive);
   1212          1.77      yamt }
   1213